content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Clean Player Data For Projections
#'
#' @description For many of the the data soruces the player column contains more data than
#' needed to identify the player. With the help of regular expression data such as position,
#' team and injury information is cleaned from the player names.
#' @param playerCol The vector of player data taken from the data table that is returned from
#' the data scrape
#' @return The updated vector of player data
#' @export getPlayerName
getPlayerName <- function(playerCol){
playerCol <- gsub("49ers", "Niners", playerCol, fixed = TRUE)
playerCol <- gsub("New York NYG", "Giants", playerCol, fixed = TRUE)
playerCol <- gsub("New York NYJ", "Jets", playerCol, fixed = TRUE)
playerCol <- gsub("New York.+\\(NYG", "Giants", playerCol)
playerCol <- gsub("New York.+\\(NYJ", "Jets", playerCol)
playerCol <- gsub("New York Giants", "Giants", playerCol)
playerCol <- gsub("New York Jets", "Jets", playerCol)
playerCol <- gsub("New England Patriots", "Patriots", playerCol)
playerCol <- gsub("New England", "Patriots", playerCol)
playerCol <- gsub("New Orleans Saints", "Saints", playerCol)
playerCol <- gsub("New Orleans", "Saints", playerCol)
playerCol <- gsub("\n\\s*[A-Z]\\. [A-Za-z ,.'-]+\n", "", playerCol)
playerCol <- gsub("^(Q|D|OUT|SUS|IR)([A-Z])", "\\2", playerCol)
playerCol <- gsub("D/ST\n.*D/ST\n", "", playerCol)
playerCol <- gsub("Questionable|Probable|Injured Reserve|Out|SSPD|Final|View|Videos|News|Video|(N|n)ote|(N|n)otes|(P|p)layer|^No new|New ", "", playerCol)
playerCol <- gsub("(B(AL|al)|B(UF|uf)|C(HI|hi)|C(IN|in)|C(LE|le)|D(AL|al)|D(EN|en)|D(ET|et)|GB|H(OU|ou)|I(ND|nd)|J(AC|ac)|J(AX|ax)|KC|K(AN|an)|NO|O(AK|ak)|P(IT|it)|P(HI|hi)|NYG|NYJ|NE|S(EA|ea)|A(TL|tl)|A(RI|ri)|M(IA|ia)|SD|S(T|t)(L|l)|C(AR|ar)|SF|T(EN|en)|W(AS|as)|TB|M(IN|in)|W(SH|sh)) |LAC|LAR|LA", "", playerCol)
playerCol <- gsub(",\\s(B(AL|al)|B(UF|uf)|C(HI|hi)|C(IN|in)|C(LE|le)|D(AL|al)|D(EN|en)|D(ET|et)|GB|H(OU|ou)|I(ND|nd)|J(AC|ac)|J(AX|ax)|KC|K(AN|an)|NO|O(AK|ak)|P(IT|it)|P(HI|hi)|NYG|NYJ|NE|S(EA|ea)|A(TL|tl)|A(RI|ri)|M(IA|ia)|SD|S(T|t)(L|l)|C(AR|ar)|SF|T(EN|en)|W(AS|as)|TB|M(IN|in)|W(SH|sh))|LAC|LAR|LA", "", playerCol)
playerCol <- gsub("(B(AL|al)|B(UF|uf)|C(HI|hi)|C(IN|in)|C(LE|le)|D(AL|al)|D(EN|en)|D(ET|et)|GB|H(OU|ou)|I(ND|nd)|J(AC|ac)|J(AX|ax)|KC|K(AN|an)|NO|O(AK|ak)|P(IT|it)|P(HI|hi)|NYG|NYJ|NE|S(EA|ea)|A(TL|tl)|A(RI|ri)|M(IA|ia)|SD|S(T|t)(L|l)|C(AR|ar)|SF|T(EN|en)|W(AS|as)|TB|M(IN|in)|W(SH|sh))$|LAC|LAR|LA", "", playerCol)
playerCol <- gsub("BAL|BUF|CHI|CIN|CLE|DAL|DEN|DET|GB|HOU|IND|JAC|JAX|KC|KAN|NO|OAK|PIT|PHI|NYG|NYJ|NE|SEA|ATL|ARI|MIA|SD|STL|CAR|SF|TEN|WAS|TB|MIN|WSH", "", playerCol)
playerCol <- gsub("\\s+((P|Q|O|D|S)$|IR|EXE|SUS|PUP|DNP|LP)|\\s(P|Q|O|D|S)\\s|^\\[(Q|P|O|D|S)\\]\\s|(P|Q|O|D|S|IR)$", "", playerCol)
playerCol <- gsub(" Jr.| Sr.| Jr| Sr| III", "", playerCol)
playerCol <- gsub("\\sat|\\svs.","", playerCol)
playerCol <- gsub("[^a-zA-Z \\.\\-]", "", playerCol)
playerCol <- gsub("Niners", "49ers", playerCol, fixed = TRUE)
playerCol <- gsub(" {2,99}", "", playerCol)
playerCol <- gsub("vs$", "", playerCol)
playerCol <- gsub("(W|L)$", "", playerCol)
playerCol <- gsub("RBTE$|RBWR$|TERB$|WRRB$|WRTE$|TEWR$|QBRB$|RBQB$|QBWR$|WRQB$|TEQB$|QBTE$|QB$|RB$|WR$|TE$|K$|DEF$|DST$|FA$|DL$|LB$|DB$| FA|DST D", "", playerCol)
playerCol <- gsub("^\\s+|\\s$", "", playerCol)
playerCol <- gsub("\\-$", "", playerCol)
playerCol <- gsub(" - DEF(W|L)$", "", playerCol)
for(n in seq_along(correct.from)){
playerCol[playerCol == correct.from[n]] <- correct.to[n]
}
for(n in seq_along(nflTeam.id)){
playerCol[playerCol == nflTeam.city[n]] <- nflTeam.name[n]
playerCol[playerCol == paste(nflTeam.city[n], nflTeam.name[n])] <- nflTeam.name[n]
}
playerCol <- gsub("^\\s+|\\s$", "", playerCol)
return(playerCol)
}
| /R/getPlayerName.R | no_license | isaactpetersen/ffanalytics | R | false | false | 3,847 | r | #' Clean Player Data For Projections
#'
#' @description For many of the the data soruces the player column contains more data than
#' needed to identify the player. With the help of regular expression data such as position,
#' team and injury information is cleaned from the player names.
#' @param playerCol The vector of player data taken from the data table that is returned from
#' the data scrape
#' @return The updated vector of player data
#' @export getPlayerName
getPlayerName <- function(playerCol){
playerCol <- gsub("49ers", "Niners", playerCol, fixed = TRUE)
playerCol <- gsub("New York NYG", "Giants", playerCol, fixed = TRUE)
playerCol <- gsub("New York NYJ", "Jets", playerCol, fixed = TRUE)
playerCol <- gsub("New York.+\\(NYG", "Giants", playerCol)
playerCol <- gsub("New York.+\\(NYJ", "Jets", playerCol)
playerCol <- gsub("New York Giants", "Giants", playerCol)
playerCol <- gsub("New York Jets", "Jets", playerCol)
playerCol <- gsub("New England Patriots", "Patriots", playerCol)
playerCol <- gsub("New England", "Patriots", playerCol)
playerCol <- gsub("New Orleans Saints", "Saints", playerCol)
playerCol <- gsub("New Orleans", "Saints", playerCol)
playerCol <- gsub("\n\\s*[A-Z]\\. [A-Za-z ,.'-]+\n", "", playerCol)
playerCol <- gsub("^(Q|D|OUT|SUS|IR)([A-Z])", "\\2", playerCol)
playerCol <- gsub("D/ST\n.*D/ST\n", "", playerCol)
playerCol <- gsub("Questionable|Probable|Injured Reserve|Out|SSPD|Final|View|Videos|News|Video|(N|n)ote|(N|n)otes|(P|p)layer|^No new|New ", "", playerCol)
playerCol <- gsub("(B(AL|al)|B(UF|uf)|C(HI|hi)|C(IN|in)|C(LE|le)|D(AL|al)|D(EN|en)|D(ET|et)|GB|H(OU|ou)|I(ND|nd)|J(AC|ac)|J(AX|ax)|KC|K(AN|an)|NO|O(AK|ak)|P(IT|it)|P(HI|hi)|NYG|NYJ|NE|S(EA|ea)|A(TL|tl)|A(RI|ri)|M(IA|ia)|SD|S(T|t)(L|l)|C(AR|ar)|SF|T(EN|en)|W(AS|as)|TB|M(IN|in)|W(SH|sh)) |LAC|LAR|LA", "", playerCol)
playerCol <- gsub(",\\s(B(AL|al)|B(UF|uf)|C(HI|hi)|C(IN|in)|C(LE|le)|D(AL|al)|D(EN|en)|D(ET|et)|GB|H(OU|ou)|I(ND|nd)|J(AC|ac)|J(AX|ax)|KC|K(AN|an)|NO|O(AK|ak)|P(IT|it)|P(HI|hi)|NYG|NYJ|NE|S(EA|ea)|A(TL|tl)|A(RI|ri)|M(IA|ia)|SD|S(T|t)(L|l)|C(AR|ar)|SF|T(EN|en)|W(AS|as)|TB|M(IN|in)|W(SH|sh))|LAC|LAR|LA", "", playerCol)
playerCol <- gsub("(B(AL|al)|B(UF|uf)|C(HI|hi)|C(IN|in)|C(LE|le)|D(AL|al)|D(EN|en)|D(ET|et)|GB|H(OU|ou)|I(ND|nd)|J(AC|ac)|J(AX|ax)|KC|K(AN|an)|NO|O(AK|ak)|P(IT|it)|P(HI|hi)|NYG|NYJ|NE|S(EA|ea)|A(TL|tl)|A(RI|ri)|M(IA|ia)|SD|S(T|t)(L|l)|C(AR|ar)|SF|T(EN|en)|W(AS|as)|TB|M(IN|in)|W(SH|sh))$|LAC|LAR|LA", "", playerCol)
playerCol <- gsub("BAL|BUF|CHI|CIN|CLE|DAL|DEN|DET|GB|HOU|IND|JAC|JAX|KC|KAN|NO|OAK|PIT|PHI|NYG|NYJ|NE|SEA|ATL|ARI|MIA|SD|STL|CAR|SF|TEN|WAS|TB|MIN|WSH", "", playerCol)
playerCol <- gsub("\\s+((P|Q|O|D|S)$|IR|EXE|SUS|PUP|DNP|LP)|\\s(P|Q|O|D|S)\\s|^\\[(Q|P|O|D|S)\\]\\s|(P|Q|O|D|S|IR)$", "", playerCol)
playerCol <- gsub(" Jr.| Sr.| Jr| Sr| III", "", playerCol)
playerCol <- gsub("\\sat|\\svs.","", playerCol)
playerCol <- gsub("[^a-zA-Z \\.\\-]", "", playerCol)
playerCol <- gsub("Niners", "49ers", playerCol, fixed = TRUE)
playerCol <- gsub(" {2,99}", "", playerCol)
playerCol <- gsub("vs$", "", playerCol)
playerCol <- gsub("(W|L)$", "", playerCol)
playerCol <- gsub("RBTE$|RBWR$|TERB$|WRRB$|WRTE$|TEWR$|QBRB$|RBQB$|QBWR$|WRQB$|TEQB$|QBTE$|QB$|RB$|WR$|TE$|K$|DEF$|DST$|FA$|DL$|LB$|DB$| FA|DST D", "", playerCol)
playerCol <- gsub("^\\s+|\\s$", "", playerCol)
playerCol <- gsub("\\-$", "", playerCol)
playerCol <- gsub(" - DEF(W|L)$", "", playerCol)
for(n in seq_along(correct.from)){
playerCol[playerCol == correct.from[n]] <- correct.to[n]
}
for(n in seq_along(nflTeam.id)){
playerCol[playerCol == nflTeam.city[n]] <- nflTeam.name[n]
playerCol[playerCol == paste(nflTeam.city[n], nflTeam.name[n])] <- nflTeam.name[n]
}
playerCol <- gsub("^\\s+|\\s$", "", playerCol)
return(playerCol)
}
|
# This file is generated by make.paws. Please do not edit here.
test_that("describe_cross_account_access_role", {
expect_error(describe_cross_account_access_role(), NA)
})
test_that("list_assessment_runs", {
expect_error(list_assessment_runs(), NA)
})
test_that("list_assessment_targets", {
expect_error(list_assessment_targets(), NA)
})
test_that("list_assessment_templates", {
expect_error(list_assessment_templates(), NA)
})
test_that("list_event_subscriptions", {
expect_error(list_event_subscriptions(), NA)
})
test_that("list_findings", {
expect_error(list_findings(), NA)
})
test_that("list_rules_packages", {
expect_error(list_rules_packages(), NA)
})
| /service/paws.inspector/tests/testthat/test_paws.inspector.R | permissive | CR-Mercado/paws | R | false | false | 695 | r | # This file is generated by make.paws. Please do not edit here.
test_that("describe_cross_account_access_role", {
expect_error(describe_cross_account_access_role(), NA)
})
test_that("list_assessment_runs", {
expect_error(list_assessment_runs(), NA)
})
test_that("list_assessment_targets", {
expect_error(list_assessment_targets(), NA)
})
test_that("list_assessment_templates", {
expect_error(list_assessment_templates(), NA)
})
test_that("list_event_subscriptions", {
expect_error(list_event_subscriptions(), NA)
})
test_that("list_findings", {
expect_error(list_findings(), NA)
})
test_that("list_rules_packages", {
expect_error(list_rules_packages(), NA)
})
|
##Data processing
source('Processdata.R');
load("hprdAsigH-13Jun12.Rd");
counts <- read.table("GSE52529_fpkm_matrix.txt",sep="\t",header=T);
data <- Processdata(counts,hprdAsigH.m);
#Gene ID Conversion
r <- as.character(rownames(data$exp));
geneIDselect <- mapIds(org.Hs.eg.db,keys=r,column="SYMBOL",keytype="ENSEMBL",multiVals="first");
rownames(data$exp) <- geneIDselect;
data$exp <- data$exp[!is.na(rownames(data$exp)),];
##Calculation of NCG
source('DoIntegPPI.R');
source('CompECC.R');
source('CompNCG.R');
load("hs_km.Rda");
int.o <- DoIntegPPI(data$exp,data$adj);
ECC <- CompECC(int.o$adjMC);
NCG <- CompNCG(ECC,int.o$expMC,km);
##Construction of lineage trajectory
source('DoSCENTalt.R');
pheno.v <- c(rep(1,96),rep(2,96),rep(3,96),rep(4,84));
#0h,24h,48h,72h
scent.o_NCG <- DoSCENTalt(data$exp,sr.v=NCG,pheno.v);
##AUC evaluation
require("ROCR");
pred <- prediction(c(NCG[1:96],NCG[289:372]),c(rep(1,96),rep(0,84)));
perf <- performance(pred,"tpr","fpr");
auc <- performance(pred,'auc')@y.values;
##P-value evaluation
PI <- wilcox.test(NCG[1:96],NCG[289:372],alternative = "greater")
| /Code/GSE52529.R | no_license | Xinzhe-Ni/NCG | R | false | false | 1,097 | r | ##Data processing
source('Processdata.R');
load("hprdAsigH-13Jun12.Rd");
counts <- read.table("GSE52529_fpkm_matrix.txt",sep="\t",header=T);
data <- Processdata(counts,hprdAsigH.m);
#Gene ID Conversion
r <- as.character(rownames(data$exp));
geneIDselect <- mapIds(org.Hs.eg.db,keys=r,column="SYMBOL",keytype="ENSEMBL",multiVals="first");
rownames(data$exp) <- geneIDselect;
data$exp <- data$exp[!is.na(rownames(data$exp)),];
##Calculation of NCG
source('DoIntegPPI.R');
source('CompECC.R');
source('CompNCG.R');
load("hs_km.Rda");
int.o <- DoIntegPPI(data$exp,data$adj);
ECC <- CompECC(int.o$adjMC);
NCG <- CompNCG(ECC,int.o$expMC,km);
##Construction of lineage trajectory
source('DoSCENTalt.R');
pheno.v <- c(rep(1,96),rep(2,96),rep(3,96),rep(4,84));
#0h,24h,48h,72h
scent.o_NCG <- DoSCENTalt(data$exp,sr.v=NCG,pheno.v);
##AUC evaluation
require("ROCR");
pred <- prediction(c(NCG[1:96],NCG[289:372]),c(rep(1,96),rep(0,84)));
perf <- performance(pred,"tpr","fpr");
auc <- performance(pred,'auc')@y.values;
##P-value evaluation
PI <- wilcox.test(NCG[1:96],NCG[289:372],alternative = "greater")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53_operations.R
\name{route53_delete_reusable_delegation_set}
\alias{route53_delete_reusable_delegation_set}
\title{Deletes a reusable delegation set}
\usage{
route53_delete_reusable_delegation_set(Id)
}
\arguments{
\item{Id}{[required] The ID of the reusable delegation set that you want to delete.}
}
\description{
Deletes a reusable delegation set.
}
\details{
You can delete a reusable delegation set only if it isn\'t associated
with any hosted zones.
To verify that the reusable delegation set is not associated with any
hosted zones, submit a
\href{https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetReusableDelegationSet.html}{GetReusableDelegationSet}
request and specify the ID of the reusable delegation set that you want
to delete.
}
\section{Request syntax}{
\preformatted{svc$delete_reusable_delegation_set(
Id = "string"
)
}
}
\keyword{internal}
| /cran/paws.networking/man/route53_delete_reusable_delegation_set.Rd | permissive | johnnytommy/paws | R | false | true | 960 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53_operations.R
\name{route53_delete_reusable_delegation_set}
\alias{route53_delete_reusable_delegation_set}
\title{Deletes a reusable delegation set}
\usage{
route53_delete_reusable_delegation_set(Id)
}
\arguments{
\item{Id}{[required] The ID of the reusable delegation set that you want to delete.}
}
\description{
Deletes a reusable delegation set.
}
\details{
You can delete a reusable delegation set only if it isn\'t associated
with any hosted zones.
To verify that the reusable delegation set is not associated with any
hosted zones, submit a
\href{https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetReusableDelegationSet.html}{GetReusableDelegationSet}
request and specify the ID of the reusable delegation set that you want
to delete.
}
\section{Request syntax}{
\preformatted{svc$delete_reusable_delegation_set(
Id = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{blis.b}
\alias{blis.b}
\title{Bliss leghorn data series B}
\format{Vector of 21 values}
\source{
D'Agostino and Stephens (1986) page 546.
}
\usage{
blis.b
}
\description{
Weights of 21 leghorn chicks (grams, measured at 21 days). Series A is data \code{leghorn}.
}
\keyword{datasets}
| /man/blis.b.Rd | no_license | nxskok/edfr | R | false | true | 391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{blis.b}
\alias{blis.b}
\title{Bliss leghorn data series B}
\format{Vector of 21 values}
\source{
D'Agostino and Stephens (1986) page 546.
}
\usage{
blis.b
}
\description{
Weights of 21 leghorn chicks (grams, measured at 21 days). Series A is data \code{leghorn}.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findNLDI.R
\name{get_nldi_sources}
\alias{get_nldi_sources}
\title{Get current NLDI offerings}
\usage{
get_nldi_sources()
}
\value{
data.frame
}
\description{
Used to query the current resources available through the NLDI
}
\examples{
\donttest{
get_nldi_sources()
}
}
\keyword{nldi}
| /man/get_nldi_sources.Rd | permissive | dblodgett-usgs/dataRetrieval | R | false | true | 362 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findNLDI.R
\name{get_nldi_sources}
\alias{get_nldi_sources}
\title{Get current NLDI offerings}
\usage{
get_nldi_sources()
}
\value{
data.frame
}
\description{
Used to query the current resources available through the NLDI
}
\examples{
\donttest{
get_nldi_sources()
}
}
\keyword{nldi}
|
# nigeria bivariate
library(BayesX)
library(tidyr)
source("nigeria/plot_legend.r")
source("nigeria/plot_sizes.r")
source("nigeria/basisZ_mbmi.r")
source("nigeria/basisZ_mage.r")
source("nigeria/basisZ_cage.r")
# read results from BayesX
source("nigeria/read_ssvs.r")
source("nigeria/read_nossvs.r")
require(ggplot2)
data <- read.table("nigeria/data/nigeria4.raw", header = TRUE)
par(mar=c(4,4,2,1)+0.1, oma = c(0,0,0,0),ps=10)
mbmi <- unique(data$mbmi)
mbmi <- mbmi[order(mbmi)]
mage <- unique(data$mage)
mage <- mage[order(mage)]
cage <- unique(data$cage)
cage <- cage[order(cage)]
mbmi2 <- unique(data$mbmi2)
mbmi2 <- mbmi2[order(mbmi2)]
mage2 <- unique(data$mage2)
mage2 <- mage2[order(mage2)]
cage2 <- unique(data$cage2)
cage2 <- cage2[order(cage2)]
# splines
cols2 <- c("grey", "darkgrey", "black", "darkgrey", "grey")
cols1 <- c("firebrick1", "firebrick2", "firebrick3", "firebrick2", "firebrick1")
pos <- c(5, 6, 3, 8, 9)
#pdf("nigeria_nonlin.pdf", width = height_90, height = width_90)
# multiply
lin.cage.rho <- wasting.rho.lin[wasting.rho.lin$varname=="cage2",]$pmean* wasting.rho.cage$cage2
lin.mage.rho <- wasting.rho.lin[wasting.rho.lin$varname=="mage2",]$pmean*wasting.rho.mage$mage2
lin.mbmi.rho <- wasting.rho.lin[wasting.rho.lin$varname=="mbmi2",]$pmean*wasting.rho.mbmi$mbmi2
lin.cage.s.sigma <- stunting.sigma.lin[stunting.sigma.lin$varname=="cage2",]$pmean*stunting.sigma.cage$cage
lin.mage.s.sigma <- stunting.sigma.lin[stunting.sigma.lin$varname=="mage2",]$pmean*stunting.sigma.mage$mage
lin.mbmi.s.sigma <- stunting.sigma.lin[stunting.sigma.lin$varname=="mbmi2",]$pmean*stunting.sigma.mbmi$mbmi
lin.cage.w.sigma <- wasting.sigma.lin[wasting.sigma.lin$varname=="cage2",]$pmean * wasting.sigma.cage$cage
lin.mage.w.sigma <- wasting.sigma.lin[wasting.sigma.lin$varname=="mage2",]$pmean * wasting.sigma.mage$mage
lin.mbmi.w.sigma <- wasting.sigma.lin[wasting.sigma.lin$varname=="mbmi2",]$pmean * wasting.sigma.mbmi$mbmi
lin.cage.w.mu <- wasting.mu.lin[wasting.mu.lin$varname=="cage2",]$pmean * wasting.mu.cage$cage
lin.mage.w.mu <- wasting.mu.lin[wasting.mu.lin$varname=="mage2",]$pmean * wasting.mu.mage$mage
lin.mbmi.w.mu <- wasting.mu.lin[wasting.mu.lin$varname=="mbmi2",]$pmean * wasting.mu.mbmi$mbmi
lin.cage.s.mu <- stunting.mu.lin[stunting.mu.lin$varname=="cage2",]$pmean * stunting.mu.cage$cage
lin.mage.s.mu <- stunting.mu.lin[stunting.mu.lin$varname=="mage2",]$pmean * stunting.mu.mage$mage
lin.mbmi.s.mu <- stunting.mu.lin[stunting.mu.lin$varname=="mbmi2",]$pmean * stunting.mu.mbmi$mbmi
wasting.rho.cage.dat <- cbind(x=cage,gather(cbind((wasting.rho.cage[,pos[1:5]]+lin.cage.rho))), source="NBSS prior", var="cage", param="rho - wasting")
wasting.rho.mage.dat <- cbind(x=mage,gather(cbind((wasting.rho.mage[,pos[1:5]]+lin.mage.rho))), source="NBSS prior", var="mage", param="rho - wasting")
wasting.rho.mbmi.dat <- cbind(x=mbmi,gather(cbind((wasting.rho.mbmi[,pos[1:5]]+lin.mbmi.rho))), source="NBSS prior", var="mbmi", param="rho - wasting")
wasting.rho.cage.full.dat <- cbind(x=cage, gather(cbind(wasting.rho.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="rho - wasting")
wasting.rho.mage.full.dat <- cbind(x=mage, gather(cbind(wasting.rho.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="rho - wasting")
wasting.rho.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(wasting.rho.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="rho - wasting")
stunting.sigma.cage.dat <- cbind(x=cage,gather(cbind((stunting.sigma.cage[,pos[1:5]]+lin.cage.s.sigma))), source="NBSS prior", var="cage",param="sigma - stunting")
stunting.sigma.mage.dat <- cbind(x=mage,gather(cbind((stunting.sigma.mage[,pos[1:5]]+lin.mage.s.sigma))), source="NBSS prior", var="mage",param="sigma - stunting")
stunting.sigma.mbmi.dat <- cbind(x=mbmi,gather(cbind((stunting.sigma.mbmi[,pos[1:5]]+lin.mbmi.s.sigma))), source="NBSS prior", var="mbmi",param="sigma - stunting")
stunting.sigma.cage.full.dat <- cbind(x=cage, gather(cbind(stunting.sigma.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="sigma - stunting")
stunting.sigma.mage.full.dat <- cbind(x=mage, gather(cbind(stunting.sigma.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="sigma - stunting")
stunting.sigma.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(stunting.sigma.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="sigma - stunting")
wasting.sigma.cage.dat <- cbind(x=cage,gather(cbind((wasting.sigma.cage+lin.cage.w.sigma)[,pos[1:5]])), source="NBSS prior", var="cage",param="sigma - wasting")
wasting.sigma.mage.dat <- cbind(x=mage,gather(cbind((wasting.sigma.mage+lin.mage.w.sigma)[,pos[1:5]])), source="NBSS prior", var="mage",param="sigma - wasting")
wasting.sigma.mbmi.dat <- cbind(x=mbmi,gather(cbind((wasting.sigma.mbmi+lin.mbmi.w.sigma)[,pos[1:5]])), source="NBSS prior", var="mbmi",param="sigma - wasting")
wasting.sigma.cage.full.dat <- cbind(x=cage, gather(cbind(wasting.sigma.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="sigma - wasting")
wasting.sigma.mage.full.dat <- cbind(x=mage, gather(cbind(wasting.sigma.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="sigma - wasting")
wasting.sigma.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(wasting.sigma.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="sigma - wasting")
wasting.mu.cage.dat <- cbind(x=cage,gather(cbind((wasting.mu.cage+lin.cage.w.mu)[,pos[1:5]])), source="NBSS prior", var="cage",param="mu - wasting")
wasting.mu.mage.dat <- cbind(x=mage,gather(cbind((wasting.mu.mage+lin.mage.w.mu)[,pos[1:5]])), source="NBSS prior", var="mage",param="mu - wasting")
wasting.mu.mbmi.dat <- cbind(x=mbmi,gather(cbind((wasting.mu.mbmi+lin.mbmi.w.mu)[,pos[1:5]])), source="NBSS prior", var="mbmi",param="mu - wasting")
wasting.mu.cage.full.dat <- cbind(x=cage, gather(cbind(wasting.mu.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="mu - wasting")
wasting.mu.mage.full.dat <- cbind(x=mage, gather(cbind(wasting.mu.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="mu - wasting")
wasting.mu.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(wasting.mu.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="mu - wasting")
stunting.mu.cage.dat <- cbind(x=cage,gather(cbind((stunting.mu.cage+lin.cage.s.mu)[,pos[1:5]])), source="NBSS prior", var="cage",param="mu - stunting")
stunting.mu.mage.dat <- cbind(x=mage,gather(cbind((stunting.mu.mage+lin.mage.s.mu)[,pos[1:5]])), source="NBSS prior", var="mage",param="mu - stunting")
stunting.mu.mbmi.dat <- cbind(x=mbmi,gather(cbind((stunting.mu.mbmi+lin.mbmi.s.mu)[,pos[1:5]])), source="NBSS prior", var="mbmi",param="mu - stunting")
stunting.mu.cage.full.dat <- cbind(x=cage, gather(cbind(stunting.mu.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="mu - stunting")
stunting.mu.mage.full.dat <- cbind(x=mage, gather(cbind(stunting.mu.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="mu - stunting")
stunting.mu.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(stunting.mu.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="mu - stunting")
###### Construction of nonlinear effect
Z.mbmi2 <- basisZ_mbmi(mbmi2)
Z.mage2 <- basisZ_mage(mage2)
Z.cage2 <- basisZ_cage(cage2)
params <- c("rho", "sigma", "mu")
vars <- c("wasting2", "stunting2")
kovars <- c("mbmi2", "cage2", "mage2")
effs <- expand.grid(params, vars, kovars)
effs <- effs[!(effs$Var1 == "rho" & effs$Var2 == "stunting2"),]
# setwd("C:/Studies/HypChoice/Nigeria/res/bivn/")
i <-1
ggEffFun <- function(i){
### construct nonlinear effect
param <- as.character(effs[i,1])
var <- as.character(effs[i,2])
kovar <- as.character(effs[i,3])
lin.sample.var.param <- read.table(paste0("nigeria/results/bivn/bivn_MAIN_",param,"_REGRESSION_", var, "_LinearEffects_ssvs_sample.raw"), header=T)
colnames(lin.sample.var.param) <- c("intnr","mbmi2", "mage2", "cage2", "csex", "electricity", "cbirthorder",
"edupartner", "munemployed", "mresidence", "car", "motorcycle",
"radio", "television", "ctwin", "refrigerator", "bicycle")
samp.dat <- read.table(paste0("nigeria/results/bivn/bivn_MAIN_",param,"_REGRESSION_", var, "_nonlinear_pspline_effect_of_", kovar,"_sample.raw"), header=T)
#param.dat <- read.table("bivn_MAIN_",param, "_REGRESSION_", var, "_nonlinear_pspline_effect_of_", kovar, "_param.res", header=T)
#res.dat <- read.table("bivn_MAIN_rho_REGRESSION_wasting2_nonlinear_pspline_effect_of_mbmi2.res", header=T)
# need: 1000 effect samples
# 1000 samples of 60 obs
# mbmi2 <- unique(data$mbmi2)
# mbmi2 <- mbmi2[order(mbmi2)]
# mage2 <- unique(data$mage2)
# mage2 <- mage2[order(mage2)]
# cage2 <- unique(data$cage2)
# cage2<- cage2[order(cage2)]
kv <- unique(data[,kovar])
kv <- kv[order(kv)]
# multiply beta coefficient of each sample with covariate to get 1000 samples of linear effects
lin.samp <- lapply(lin.sample.var.param[,kovar], function(x) x* kv)
# 1000 samples in columns
lin.samp <- do.call(cbind, lin.samp)
# 1000 samples of nonlinear effect in columns
sm.samp <- get(paste0("Z.",kovar)) %*% t(samp.dat[,-1])
eff <-scale((lin.samp +sm.samp), scale=F)
#mbmi.res.c <-t(scale(lin.samp, scale=F) + scale(sm.samp, scale=F))
eff.dat <- data.frame(pmean=apply(t(eff), 2, mean),
pqu2p5 =apply(t(eff), 2, function(x) quantile(x, prob=c(0.025))),
pqu97p5 =apply(t(eff), 2, function(x) quantile(x, prob=c(0.975))))
kv <- get(substr(kovar, 1, nchar(kovar)-1))
eff.dat <- cbind(x=kv, gather(eff.dat), source="NPBSS", kovar=kovar, param_var=paste0(param, " - ", var))
eff.dat.full <- get(paste0(gsub('[0-9]+', '', var),".",param, ".", gsub('[0-9]+', '', kovar),".full"))
pmean.full <- scale(eff.dat.full$pmean, scale=F)
pqu2p5.full <- eff.dat.full$pqu2p5 - attr(pmean.full, "scaled:center")
pqu97p5.full <- eff.dat.full$pqu97p5 - attr(pmean.full, "scaled:center")
eff.dat.full <- cbind(x = kv, gather(data.frame(pmean= pmean.full, pqu2p5= pqu2p5.full, pqu97p5=pqu97p5.full)), source="Full Model", kovar=kovar, param_var=paste0(param, " - ", var))
dat <- rbind(eff.dat, eff.dat.full)
return(dat)
}
gg <- lapply(1:nrow(effs), ggEffFun)
gg.data <- do.call(rbind, gg)
gg.data$param_var <-substr(gg.data$param_var, 1, nchar(as.character(gg.data$param_var))-1)
gg.data$kovar <- substr(gg.data$kovar, 1, nchar(as.character(gg.data$kovar))-1)
# pdf("nigeria_bivn_nonlin.pdf" , width = 2*480, height = 2*480)
ggplot(gg.data, aes(x=x, y=value)) + geom_line(aes(linetype=factor(key),colour=as.factor(source), alpha=as.factor(source))) +
facet_grid(factor(param_var, , levels=c("rho - wasting", "sigma - stunting", "sigma - wasting", "mu - stunting", "mu - wasting")) ~ kovar ,scales="free", labeller=label_parsed)+
scale_colour_manual(values=c("red", "dodgerblue3")) + scale_alpha_manual(values=c(1,0.8)) +
scale_linetype_manual(values=c("solid", "dotted", "dotted")) +
guides(colour=guide_legend(title="Model"), alpha=FALSE, linetype=FALSE) + xlab("") + ylab("")
# ggsave("nigeria/niger_bivn_nonlin.png",width = 7, height = 7)
cols <- c("firebrick3","firebrick1", "firebrick2" , "firebrick2", "firebrick1")
# BayesX retort plots
min = min(wasting.sigma.cage[,c(5,9)])
max = max(wasting.sigma.cage[,c(5,9)])
plot(cage, rep(0, nrow(wasting.sigma.cage)), ylim = c(min, max), main = expression(paste(sigma, " - wasting", sep = "")), type = "l", xlab = "cage", ylab = "")
for(i in 1:5){
lines(cage, wasting.sigma.cage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.sigma.mage[,c(5,9)])
max = max(wasting.sigma.mage[,c(5,9)])
plot(mage, rep(0, nrow(wasting.sigma.mage)), ylim = c(min, max), main = expression(paste(sigma, " - wasting", sep = "")), type = "l", xlab = "mage", ylab = "")
for(i in 1:5){
lines(mage, wasting.sigma.mage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.sigma.mbmi[,c(5,9)])
max = max(wasting.sigma.mbmi[,c(5,9)])
plot(mbmi, rep(0, nrow(wasting.sigma.mbmi)), ylim = c(min, max), main = expression(paste(sigma, " - wasting", sep = "")), type = "l", xlab = "mbmi", ylab = "")
for(i in 1:5){
lines(mbmi, wasting.sigma.mbmi[,pos[i]], col = cols[i], lty = 2)
}
min = min(stunting.mu.cage[,c(5,9)])
max = max(stunting.mu.cage[,c(5,9)])
plot(cage, rep(0, nrow(stunting.mu.cage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "cage", ylab = "")
for(i in 1:5){
lines(cage, stunting.mu.cage[,pos[i]], col = cols[i], lty = 2)
}
min = min(stunting.mu.mage[,c(5,9)])
max = max(stunting.mu.mage[,c(5,9)])
plot(mage, rep(0, nrow(stunting.mu.mage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mage", ylab = "")
for(i in 1:5){
lines(mage, stunting.mu.mage[,pos[i]], col = cols[i], lty = 2)
}
min = min(stunting.mu.mbmi[,c(5,9)])
max = max(stunting.mu.mbmi[,c(5,9)])
plot(mbmi, rep(0, nrow(stunting.mu.mbmi)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mbmi", ylab = "")
for(i in 1:5){
lines(mbmi, stunting.mu.mbmi[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.mu.cage[,c(5,9)])
max = max(wasting.mu.cage[,c(5,9)])
plot(cage, rep(0, nrow(wasting.mu.cage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "cage", ylab = "")
for(i in 1:5){
lines(cage, wasting.mu.cage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.mu.mage[,c(5,9)])
max = max(wasting.mu.mage[,c(5,9)])
plot(mage, rep(0, nrow(wasting.mu.mage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mage", ylab = "")
for(i in 1:5){
lines(mage, wasting.mu.mage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.mu.mbmi[,c(5,9)])
max = max(wasting.mu.mbmi[,c(5,9)])
plot(mbmi, rep(0, nrow(wasting.mu.mbmi)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mbmi", ylab = "")
for(i in 1:5){
lines(mbmi, wasting.mu.mbmi[,pos[i]], col = cols[i], lty = 2)
}
dev.off()
# linear effects
data.lin <- rbind(wasting.rho.lin,
stunting.sigma.lin,
wasting.sigma.lin,
stunting.mu.lin,
wasting.mu.lin
)
data.lin$varname
data.lin$parameter <- rep(c(1,2,2,3,3), each = 16)#as.character(rep(c("1", "1", "2", "2", "3", "4", "4", "5"), each = 18))
data.lin$choltype <- rep(c(4,5,4,5,4), each = 16)#(rep(c("6", "7", "6", "7", "6", "6", "7", "6"), each = 18))
data.lin$Variable <- data.lin$varname
data.lin$Mean <- data.lin$pmean
labels <- list("1" = expression(rho),
"2" = expression(sigma),
"3" = expression(mu),
"4" = expression(wasting),
"5" = expression(stunting))
new_label <- function(variable,value){
return(labels[value])
}
# pdf("nigeria_bivn_lin.pdf", width = width, height = width_90)
ggplot(data.lin, aes(Variable, Mean) ) +
geom_point() +
geom_errorbar(aes(ymax = pqu97p5, ymin = pqu2p5)) +
coord_flip() +
theme_minimal(base_size = 8) +
facet_grid(parameter ~ choltype, labeller = new_label)
dev.off()
data.lin.nossvs <-rbind(wasting.rho.lin.full,
stunting.sigma.lin.full,
wasting.sigma.lin.full,
stunting.mu.lin.full,
wasting.mu.lin.full
)
data.lin.nossvs <- data.lin.nossvs[data.lin.nossvs$varname != "const",]
data.lin.nossvs$parameter <- rep(c(1,2,2,3,3), each = 13)#as.character(rep(c("1", "1", "2", "2", "3", "4", "4", "5"), each = 18))
data.lin.nossvs$choltype <- rep(c(4,5,4,5,4), each = 13)#(rep(c("6", "7", "6", "7", "6", "6", "7", "6"), each = 18))
data.lin.nossvs$Variable <- data.lin.nossvs$varname
data.lin.nossvs$Mean <- data.lin.nossvs$pmean
data.lin <- data.frame(data.lin, source="ssvs")
data.lin.nossvs <- data.frame(data.lin.nossvs, source="nossvs")
data <- rbind(data.lin, data.lin.nossvs)
data$alpha <- ifelse(data$source == "ssvs", 0.5, 0.45)
# png("nigeria_bivn_lin.png" , width = 2*480, height = 2*480)
# ggsave("niger_bivn_lin.pdf",width = 7, height = 7)
ggplot(data, aes(x =Variable,
y=Mean, colour=as.factor(source))) +
geom_point(size=1)+
geom_errorbar(aes(ymax = pqu97p5, ymin = pqu2p5), lwd=0.05)+
coord_flip()+
theme_minimal(base_size = 8)+
facet_grid(parameter ~ choltype, labeller = new_label)+
# scale_alpha_manual(values = c("ssvs"=1, "nossvs"=0.2), guide = 'none') +
scale_colour_manual(values = c("ssvs"="red", "nossvs"="dodgerblue3"), labels=c("NBSS prior", "Full model"),guide = 'none')+
ylab("Posterior Mean") + xlab("Variable")+
guides(colour=guide_legend(title="Model"))
# ggsave("nigeria/niger_bivn_lin.png",width = 7, height = 7)
########### spatial plot
indiaraw <- read.dta("nigeria/data/NGKR6AFL.DTA")
states <- levels(indiaraw$v023)
states_str <- as.character(states)
in_it <- rep(FALSE, 73)
for(i in 1:73){
in_it[i] <- length(grep("Urban", states_str[i])) > 0
}
states <- as.factor(states_str[in_it])
map <- shp2bnd(shpname = "nigeria/data/sdr_subnational_boundaries_2015-08-10-2/shps2/sdr_subnational_boundaries2",
regionnames = states)
ord <- wasting.rho.subregion$subregion
# pdf("apl_nigeria_map_rho.pdf", width = width, height = 5)
# pdf("apl_nigeria_map_rho.pdf")
# postscript(file = paste0("apl_nigeria_map_rho.eps"), horizontal = FALSE)
rho1 <- wasting.rho.subregion$pmean[ord]
limmax <- max(max(rho1))
limmin <- min(min(rho1))
#limmax <- max(max(xi1), max(xi2))
#limmin <- min(min(xi1), min(xi2))
limmax <- 0.1 * ceiling(10*limmax)
limmin <- 0.1 * floor(10*limmin)
limmin <- sign(limmin) * max(limmax, -limmin)
limmax <- sign(limmax) * max(limmax, -limmin)
par(mar=c(4,4,2,1)+0.1, oma = c(0,0,0,0))
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = rho1)
layout(matrix(c(1,2),nrow = 2, byrow = TRUE), widths = c(1), heights = c(1,0.2))
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("wasting")))
par(mar=c(0,0,0,0))
legend_map(c(limmin,limmax), window.scale=4)
par(mar=c(4,4,2,1)+0.1)
dev.off()
# pdf("nigeria/nigeria_map_sigma.pdf", width = width, height = 3.5)
# postscript(file = paste0("apl_nigeria_map_sigma.eps"), horizontal = FALSE)
sigma1 <- wasting.sigma.subregion$pmean[ord]
sigma2 <- stunting.sigma.subregion$pmean[ord]
limmax <- max(max(sigma1), max(sigma2))
limmin <- min(min(sigma1), min(sigma2))
limmax <- 0.1 * ceiling(10*limmax)
limmin <- 0.1 * floor(10*limmin)
limmin <- sign(limmin) * max(limmax, -limmin)
limmax <- sign(limmax) * max(limmax, -limmin)
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = sigma1)
layout(matrix(c(1,2,3,3),nrow = 2, byrow = TRUE), widths = c(1,1), heights = c(1,0.2))
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("wasting")))
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = sigma2)
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("stunting")))
par(mar=c(0,0,0,0))
legend_map(c(limmin,limmax), window.scale=4)
par(mar=c(4,4,2,1)+0.1)
dev.off()
# pdf("nigeria/nigeria_map_mu.pdf", width = width, height = 3.5)
# postscript(file = paste0("apl_nigeria_map_mu.eps"), horizontal = FALSE)
mu1 <- wasting.mu.subregion$pmean[ord]
mu2 <- stunting.mu.subregion$pmean[ord]
limmax <- max(max(mu1), max(mu2))
limmin <- min(min(mu1), min(mu2))
limmax <- 0.1 * ceiling(10*limmax)
limmin <- 0.1 * floor(10*limmin)
limmin <- sign(limmin) * max(limmax, -limmin)
limmax <- sign(limmax) * max(limmax, -limmin)
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = mu1)
layout(matrix(c(1,2,3,3),nrow = 2, byrow = TRUE), widths = c(1,1), heights = c(1,0.2))
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("wasting")))
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = mu2)
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("stunting")))
par(mar=c(0,0,0,0))
legend_map(c(limmin,limmax), window.scale=4)
par(mar=c(4,4,2,1)+0.1)
dev.off()
| /nigeria/plots.r | no_license | manucarl/varsel | R | false | false | 21,048 | r | # nigeria bivariate
library(BayesX)
library(tidyr)
source("nigeria/plot_legend.r")
source("nigeria/plot_sizes.r")
source("nigeria/basisZ_mbmi.r")
source("nigeria/basisZ_mage.r")
source("nigeria/basisZ_cage.r")
# read results from BayesX
source("nigeria/read_ssvs.r")
source("nigeria/read_nossvs.r")
require(ggplot2)
data <- read.table("nigeria/data/nigeria4.raw", header = TRUE)
par(mar=c(4,4,2,1)+0.1, oma = c(0,0,0,0),ps=10)
mbmi <- unique(data$mbmi)
mbmi <- mbmi[order(mbmi)]
mage <- unique(data$mage)
mage <- mage[order(mage)]
cage <- unique(data$cage)
cage <- cage[order(cage)]
mbmi2 <- unique(data$mbmi2)
mbmi2 <- mbmi2[order(mbmi2)]
mage2 <- unique(data$mage2)
mage2 <- mage2[order(mage2)]
cage2 <- unique(data$cage2)
cage2 <- cage2[order(cage2)]
# splines
cols2 <- c("grey", "darkgrey", "black", "darkgrey", "grey")
cols1 <- c("firebrick1", "firebrick2", "firebrick3", "firebrick2", "firebrick1")
pos <- c(5, 6, 3, 8, 9)
#pdf("nigeria_nonlin.pdf", width = height_90, height = width_90)
# multiply
lin.cage.rho <- wasting.rho.lin[wasting.rho.lin$varname=="cage2",]$pmean* wasting.rho.cage$cage2
lin.mage.rho <- wasting.rho.lin[wasting.rho.lin$varname=="mage2",]$pmean*wasting.rho.mage$mage2
lin.mbmi.rho <- wasting.rho.lin[wasting.rho.lin$varname=="mbmi2",]$pmean*wasting.rho.mbmi$mbmi2
lin.cage.s.sigma <- stunting.sigma.lin[stunting.sigma.lin$varname=="cage2",]$pmean*stunting.sigma.cage$cage
lin.mage.s.sigma <- stunting.sigma.lin[stunting.sigma.lin$varname=="mage2",]$pmean*stunting.sigma.mage$mage
lin.mbmi.s.sigma <- stunting.sigma.lin[stunting.sigma.lin$varname=="mbmi2",]$pmean*stunting.sigma.mbmi$mbmi
lin.cage.w.sigma <- wasting.sigma.lin[wasting.sigma.lin$varname=="cage2",]$pmean * wasting.sigma.cage$cage
lin.mage.w.sigma <- wasting.sigma.lin[wasting.sigma.lin$varname=="mage2",]$pmean * wasting.sigma.mage$mage
lin.mbmi.w.sigma <- wasting.sigma.lin[wasting.sigma.lin$varname=="mbmi2",]$pmean * wasting.sigma.mbmi$mbmi
lin.cage.w.mu <- wasting.mu.lin[wasting.mu.lin$varname=="cage2",]$pmean * wasting.mu.cage$cage
lin.mage.w.mu <- wasting.mu.lin[wasting.mu.lin$varname=="mage2",]$pmean * wasting.mu.mage$mage
lin.mbmi.w.mu <- wasting.mu.lin[wasting.mu.lin$varname=="mbmi2",]$pmean * wasting.mu.mbmi$mbmi
lin.cage.s.mu <- stunting.mu.lin[stunting.mu.lin$varname=="cage2",]$pmean * stunting.mu.cage$cage
lin.mage.s.mu <- stunting.mu.lin[stunting.mu.lin$varname=="mage2",]$pmean * stunting.mu.mage$mage
lin.mbmi.s.mu <- stunting.mu.lin[stunting.mu.lin$varname=="mbmi2",]$pmean * stunting.mu.mbmi$mbmi
wasting.rho.cage.dat <- cbind(x=cage,gather(cbind((wasting.rho.cage[,pos[1:5]]+lin.cage.rho))), source="NBSS prior", var="cage", param="rho - wasting")
wasting.rho.mage.dat <- cbind(x=mage,gather(cbind((wasting.rho.mage[,pos[1:5]]+lin.mage.rho))), source="NBSS prior", var="mage", param="rho - wasting")
wasting.rho.mbmi.dat <- cbind(x=mbmi,gather(cbind((wasting.rho.mbmi[,pos[1:5]]+lin.mbmi.rho))), source="NBSS prior", var="mbmi", param="rho - wasting")
wasting.rho.cage.full.dat <- cbind(x=cage, gather(cbind(wasting.rho.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="rho - wasting")
wasting.rho.mage.full.dat <- cbind(x=mage, gather(cbind(wasting.rho.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="rho - wasting")
wasting.rho.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(wasting.rho.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="rho - wasting")
stunting.sigma.cage.dat <- cbind(x=cage,gather(cbind((stunting.sigma.cage[,pos[1:5]]+lin.cage.s.sigma))), source="NBSS prior", var="cage",param="sigma - stunting")
stunting.sigma.mage.dat <- cbind(x=mage,gather(cbind((stunting.sigma.mage[,pos[1:5]]+lin.mage.s.sigma))), source="NBSS prior", var="mage",param="sigma - stunting")
stunting.sigma.mbmi.dat <- cbind(x=mbmi,gather(cbind((stunting.sigma.mbmi[,pos[1:5]]+lin.mbmi.s.sigma))), source="NBSS prior", var="mbmi",param="sigma - stunting")
stunting.sigma.cage.full.dat <- cbind(x=cage, gather(cbind(stunting.sigma.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="sigma - stunting")
stunting.sigma.mage.full.dat <- cbind(x=mage, gather(cbind(stunting.sigma.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="sigma - stunting")
stunting.sigma.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(stunting.sigma.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="sigma - stunting")
wasting.sigma.cage.dat <- cbind(x=cage,gather(cbind((wasting.sigma.cage+lin.cage.w.sigma)[,pos[1:5]])), source="NBSS prior", var="cage",param="sigma - wasting")
wasting.sigma.mage.dat <- cbind(x=mage,gather(cbind((wasting.sigma.mage+lin.mage.w.sigma)[,pos[1:5]])), source="NBSS prior", var="mage",param="sigma - wasting")
wasting.sigma.mbmi.dat <- cbind(x=mbmi,gather(cbind((wasting.sigma.mbmi+lin.mbmi.w.sigma)[,pos[1:5]])), source="NBSS prior", var="mbmi",param="sigma - wasting")
wasting.sigma.cage.full.dat <- cbind(x=cage, gather(cbind(wasting.sigma.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="sigma - wasting")
wasting.sigma.mage.full.dat <- cbind(x=mage, gather(cbind(wasting.sigma.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="sigma - wasting")
wasting.sigma.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(wasting.sigma.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="sigma - wasting")
wasting.mu.cage.dat <- cbind(x=cage,gather(cbind((wasting.mu.cage+lin.cage.w.mu)[,pos[1:5]])), source="NBSS prior", var="cage",param="mu - wasting")
wasting.mu.mage.dat <- cbind(x=mage,gather(cbind((wasting.mu.mage+lin.mage.w.mu)[,pos[1:5]])), source="NBSS prior", var="mage",param="mu - wasting")
wasting.mu.mbmi.dat <- cbind(x=mbmi,gather(cbind((wasting.mu.mbmi+lin.mbmi.w.mu)[,pos[1:5]])), source="NBSS prior", var="mbmi",param="mu - wasting")
wasting.mu.cage.full.dat <- cbind(x=cage, gather(cbind(wasting.mu.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="mu - wasting")
wasting.mu.mage.full.dat <- cbind(x=mage, gather(cbind(wasting.mu.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="mu - wasting")
wasting.mu.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(wasting.mu.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="mu - wasting")
stunting.mu.cage.dat <- cbind(x=cage,gather(cbind((stunting.mu.cage+lin.cage.s.mu)[,pos[1:5]])), source="NBSS prior", var="cage",param="mu - stunting")
stunting.mu.mage.dat <- cbind(x=mage,gather(cbind((stunting.mu.mage+lin.mage.s.mu)[,pos[1:5]])), source="NBSS prior", var="mage",param="mu - stunting")
stunting.mu.mbmi.dat <- cbind(x=mbmi,gather(cbind((stunting.mu.mbmi+lin.mbmi.s.mu)[,pos[1:5]])), source="NBSS prior", var="mbmi",param="mu - stunting")
stunting.mu.cage.full.dat <- cbind(x=cage, gather(cbind(stunting.mu.cage.full[,pos[1:5]])), source="Full Model", var="cage", param="mu - stunting")
stunting.mu.mage.full.dat <- cbind(x=mage, gather(cbind(stunting.mu.mage.full[,pos[1:5]])), source="Full Model", var="mage", param="mu - stunting")
stunting.mu.mbmi.full.dat <- cbind(x=mbmi, gather(cbind(stunting.mu.mbmi.full[,pos[1:5]])), source="Full Model", var="mbmi", param="mu - stunting")
###### Construction of nonlinear effect
Z.mbmi2 <- basisZ_mbmi(mbmi2)
Z.mage2 <- basisZ_mage(mage2)
Z.cage2 <- basisZ_cage(cage2)
params <- c("rho", "sigma", "mu")
vars <- c("wasting2", "stunting2")
kovars <- c("mbmi2", "cage2", "mage2")
effs <- expand.grid(params, vars, kovars)
effs <- effs[!(effs$Var1 == "rho" & effs$Var2 == "stunting2"),]
# setwd("C:/Studies/HypChoice/Nigeria/res/bivn/")
i <-1
ggEffFun <- function(i){
### construct nonlinear effect
param <- as.character(effs[i,1])
var <- as.character(effs[i,2])
kovar <- as.character(effs[i,3])
lin.sample.var.param <- read.table(paste0("nigeria/results/bivn/bivn_MAIN_",param,"_REGRESSION_", var, "_LinearEffects_ssvs_sample.raw"), header=T)
colnames(lin.sample.var.param) <- c("intnr","mbmi2", "mage2", "cage2", "csex", "electricity", "cbirthorder",
"edupartner", "munemployed", "mresidence", "car", "motorcycle",
"radio", "television", "ctwin", "refrigerator", "bicycle")
samp.dat <- read.table(paste0("nigeria/results/bivn/bivn_MAIN_",param,"_REGRESSION_", var, "_nonlinear_pspline_effect_of_", kovar,"_sample.raw"), header=T)
#param.dat <- read.table("bivn_MAIN_",param, "_REGRESSION_", var, "_nonlinear_pspline_effect_of_", kovar, "_param.res", header=T)
#res.dat <- read.table("bivn_MAIN_rho_REGRESSION_wasting2_nonlinear_pspline_effect_of_mbmi2.res", header=T)
# need: 1000 effect samples
# 1000 samples of 60 obs
# mbmi2 <- unique(data$mbmi2)
# mbmi2 <- mbmi2[order(mbmi2)]
# mage2 <- unique(data$mage2)
# mage2 <- mage2[order(mage2)]
# cage2 <- unique(data$cage2)
# cage2<- cage2[order(cage2)]
kv <- unique(data[,kovar])
kv <- kv[order(kv)]
# multiply beta coefficient of each sample with covariate to get 1000 samples of linear effects
lin.samp <- lapply(lin.sample.var.param[,kovar], function(x) x* kv)
# 1000 samples in columns
lin.samp <- do.call(cbind, lin.samp)
# 1000 samples of nonlinear effect in columns
sm.samp <- get(paste0("Z.",kovar)) %*% t(samp.dat[,-1])
eff <-scale((lin.samp +sm.samp), scale=F)
#mbmi.res.c <-t(scale(lin.samp, scale=F) + scale(sm.samp, scale=F))
eff.dat <- data.frame(pmean=apply(t(eff), 2, mean),
pqu2p5 =apply(t(eff), 2, function(x) quantile(x, prob=c(0.025))),
pqu97p5 =apply(t(eff), 2, function(x) quantile(x, prob=c(0.975))))
kv <- get(substr(kovar, 1, nchar(kovar)-1))
eff.dat <- cbind(x=kv, gather(eff.dat), source="NPBSS", kovar=kovar, param_var=paste0(param, " - ", var))
eff.dat.full <- get(paste0(gsub('[0-9]+', '', var),".",param, ".", gsub('[0-9]+', '', kovar),".full"))
pmean.full <- scale(eff.dat.full$pmean, scale=F)
pqu2p5.full <- eff.dat.full$pqu2p5 - attr(pmean.full, "scaled:center")
pqu97p5.full <- eff.dat.full$pqu97p5 - attr(pmean.full, "scaled:center")
eff.dat.full <- cbind(x = kv, gather(data.frame(pmean= pmean.full, pqu2p5= pqu2p5.full, pqu97p5=pqu97p5.full)), source="Full Model", kovar=kovar, param_var=paste0(param, " - ", var))
dat <- rbind(eff.dat, eff.dat.full)
return(dat)
}
gg <- lapply(1:nrow(effs), ggEffFun)
gg.data <- do.call(rbind, gg)
gg.data$param_var <-substr(gg.data$param_var, 1, nchar(as.character(gg.data$param_var))-1)
gg.data$kovar <- substr(gg.data$kovar, 1, nchar(as.character(gg.data$kovar))-1)
# pdf("nigeria_bivn_nonlin.pdf" , width = 2*480, height = 2*480)
ggplot(gg.data, aes(x=x, y=value)) + geom_line(aes(linetype=factor(key),colour=as.factor(source), alpha=as.factor(source))) +
facet_grid(factor(param_var, , levels=c("rho - wasting", "sigma - stunting", "sigma - wasting", "mu - stunting", "mu - wasting")) ~ kovar ,scales="free", labeller=label_parsed)+
scale_colour_manual(values=c("red", "dodgerblue3")) + scale_alpha_manual(values=c(1,0.8)) +
scale_linetype_manual(values=c("solid", "dotted", "dotted")) +
guides(colour=guide_legend(title="Model"), alpha=FALSE, linetype=FALSE) + xlab("") + ylab("")
# ggsave("nigeria/niger_bivn_nonlin.png",width = 7, height = 7)
cols <- c("firebrick3","firebrick1", "firebrick2" , "firebrick2", "firebrick1")
# BayesX retort plots
min = min(wasting.sigma.cage[,c(5,9)])
max = max(wasting.sigma.cage[,c(5,9)])
plot(cage, rep(0, nrow(wasting.sigma.cage)), ylim = c(min, max), main = expression(paste(sigma, " - wasting", sep = "")), type = "l", xlab = "cage", ylab = "")
for(i in 1:5){
lines(cage, wasting.sigma.cage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.sigma.mage[,c(5,9)])
max = max(wasting.sigma.mage[,c(5,9)])
plot(mage, rep(0, nrow(wasting.sigma.mage)), ylim = c(min, max), main = expression(paste(sigma, " - wasting", sep = "")), type = "l", xlab = "mage", ylab = "")
for(i in 1:5){
lines(mage, wasting.sigma.mage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.sigma.mbmi[,c(5,9)])
max = max(wasting.sigma.mbmi[,c(5,9)])
plot(mbmi, rep(0, nrow(wasting.sigma.mbmi)), ylim = c(min, max), main = expression(paste(sigma, " - wasting", sep = "")), type = "l", xlab = "mbmi", ylab = "")
for(i in 1:5){
lines(mbmi, wasting.sigma.mbmi[,pos[i]], col = cols[i], lty = 2)
}
min = min(stunting.mu.cage[,c(5,9)])
max = max(stunting.mu.cage[,c(5,9)])
plot(cage, rep(0, nrow(stunting.mu.cage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "cage", ylab = "")
for(i in 1:5){
lines(cage, stunting.mu.cage[,pos[i]], col = cols[i], lty = 2)
}
min = min(stunting.mu.mage[,c(5,9)])
max = max(stunting.mu.mage[,c(5,9)])
plot(mage, rep(0, nrow(stunting.mu.mage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mage", ylab = "")
for(i in 1:5){
lines(mage, stunting.mu.mage[,pos[i]], col = cols[i], lty = 2)
}
min = min(stunting.mu.mbmi[,c(5,9)])
max = max(stunting.mu.mbmi[,c(5,9)])
plot(mbmi, rep(0, nrow(stunting.mu.mbmi)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mbmi", ylab = "")
for(i in 1:5){
lines(mbmi, stunting.mu.mbmi[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.mu.cage[,c(5,9)])
max = max(wasting.mu.cage[,c(5,9)])
plot(cage, rep(0, nrow(wasting.mu.cage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "cage", ylab = "")
for(i in 1:5){
lines(cage, wasting.mu.cage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.mu.mage[,c(5,9)])
max = max(wasting.mu.mage[,c(5,9)])
plot(mage, rep(0, nrow(wasting.mu.mage)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mage", ylab = "")
for(i in 1:5){
lines(mage, wasting.mu.mage[,pos[i]], col = cols[i], lty = 2)
}
min = min(wasting.mu.mbmi[,c(5,9)])
max = max(wasting.mu.mbmi[,c(5,9)])
plot(mbmi, rep(0, nrow(wasting.mu.mbmi)), ylim = c(min, max), main = expression(paste(mu, " - stunting", sep = "")), type = "l", xlab = "mbmi", ylab = "")
for(i in 1:5){
lines(mbmi, wasting.mu.mbmi[,pos[i]], col = cols[i], lty = 2)
}
dev.off()
# linear effects
data.lin <- rbind(wasting.rho.lin,
stunting.sigma.lin,
wasting.sigma.lin,
stunting.mu.lin,
wasting.mu.lin
)
data.lin$varname
data.lin$parameter <- rep(c(1,2,2,3,3), each = 16)#as.character(rep(c("1", "1", "2", "2", "3", "4", "4", "5"), each = 18))
data.lin$choltype <- rep(c(4,5,4,5,4), each = 16)#(rep(c("6", "7", "6", "7", "6", "6", "7", "6"), each = 18))
data.lin$Variable <- data.lin$varname
data.lin$Mean <- data.lin$pmean
labels <- list("1" = expression(rho),
"2" = expression(sigma),
"3" = expression(mu),
"4" = expression(wasting),
"5" = expression(stunting))
new_label <- function(variable,value){
return(labels[value])
}
# pdf("nigeria_bivn_lin.pdf", width = width, height = width_90)
ggplot(data.lin, aes(Variable, Mean) ) +
geom_point() +
geom_errorbar(aes(ymax = pqu97p5, ymin = pqu2p5)) +
coord_flip() +
theme_minimal(base_size = 8) +
facet_grid(parameter ~ choltype, labeller = new_label)
dev.off()
data.lin.nossvs <-rbind(wasting.rho.lin.full,
stunting.sigma.lin.full,
wasting.sigma.lin.full,
stunting.mu.lin.full,
wasting.mu.lin.full
)
data.lin.nossvs <- data.lin.nossvs[data.lin.nossvs$varname != "const",]
data.lin.nossvs$parameter <- rep(c(1,2,2,3,3), each = 13)#as.character(rep(c("1", "1", "2", "2", "3", "4", "4", "5"), each = 18))
data.lin.nossvs$choltype <- rep(c(4,5,4,5,4), each = 13)#(rep(c("6", "7", "6", "7", "6", "6", "7", "6"), each = 18))
data.lin.nossvs$Variable <- data.lin.nossvs$varname
data.lin.nossvs$Mean <- data.lin.nossvs$pmean
data.lin <- data.frame(data.lin, source="ssvs")
data.lin.nossvs <- data.frame(data.lin.nossvs, source="nossvs")
data <- rbind(data.lin, data.lin.nossvs)
data$alpha <- ifelse(data$source == "ssvs", 0.5, 0.45)
# png("nigeria_bivn_lin.png" , width = 2*480, height = 2*480)
# ggsave("niger_bivn_lin.pdf",width = 7, height = 7)
ggplot(data, aes(x =Variable,
y=Mean, colour=as.factor(source))) +
geom_point(size=1)+
geom_errorbar(aes(ymax = pqu97p5, ymin = pqu2p5), lwd=0.05)+
coord_flip()+
theme_minimal(base_size = 8)+
facet_grid(parameter ~ choltype, labeller = new_label)+
# scale_alpha_manual(values = c("ssvs"=1, "nossvs"=0.2), guide = 'none') +
scale_colour_manual(values = c("ssvs"="red", "nossvs"="dodgerblue3"), labels=c("NBSS prior", "Full model"),guide = 'none')+
ylab("Posterior Mean") + xlab("Variable")+
guides(colour=guide_legend(title="Model"))
# ggsave("nigeria/niger_bivn_lin.png",width = 7, height = 7)
########### spatial plot
indiaraw <- read.dta("nigeria/data/NGKR6AFL.DTA")
states <- levels(indiaraw$v023)
states_str <- as.character(states)
in_it <- rep(FALSE, 73)
for(i in 1:73){
in_it[i] <- length(grep("Urban", states_str[i])) > 0
}
states <- as.factor(states_str[in_it])
map <- shp2bnd(shpname = "nigeria/data/sdr_subnational_boundaries_2015-08-10-2/shps2/sdr_subnational_boundaries2",
regionnames = states)
ord <- wasting.rho.subregion$subregion
# pdf("apl_nigeria_map_rho.pdf", width = width, height = 5)
# pdf("apl_nigeria_map_rho.pdf")
# postscript(file = paste0("apl_nigeria_map_rho.eps"), horizontal = FALSE)
rho1 <- wasting.rho.subregion$pmean[ord]
limmax <- max(max(rho1))
limmin <- min(min(rho1))
#limmax <- max(max(xi1), max(xi2))
#limmin <- min(min(xi1), min(xi2))
limmax <- 0.1 * ceiling(10*limmax)
limmin <- 0.1 * floor(10*limmin)
limmin <- sign(limmin) * max(limmax, -limmin)
limmax <- sign(limmax) * max(limmax, -limmin)
par(mar=c(4,4,2,1)+0.1, oma = c(0,0,0,0))
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = rho1)
layout(matrix(c(1,2),nrow = 2, byrow = TRUE), widths = c(1), heights = c(1,0.2))
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("wasting")))
par(mar=c(0,0,0,0))
legend_map(c(limmin,limmax), window.scale=4)
par(mar=c(4,4,2,1)+0.1)
dev.off()
# pdf("nigeria/nigeria_map_sigma.pdf", width = width, height = 3.5)
# postscript(file = paste0("apl_nigeria_map_sigma.eps"), horizontal = FALSE)
sigma1 <- wasting.sigma.subregion$pmean[ord]
sigma2 <- stunting.sigma.subregion$pmean[ord]
limmax <- max(max(sigma1), max(sigma2))
limmin <- min(min(sigma1), min(sigma2))
limmax <- 0.1 * ceiling(10*limmax)
limmin <- 0.1 * floor(10*limmin)
limmin <- sign(limmin) * max(limmax, -limmin)
limmax <- sign(limmax) * max(limmax, -limmin)
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = sigma1)
layout(matrix(c(1,2,3,3),nrow = 2, byrow = TRUE), widths = c(1,1), heights = c(1,0.2))
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("wasting")))
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = sigma2)
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("stunting")))
par(mar=c(0,0,0,0))
legend_map(c(limmin,limmax), window.scale=4)
par(mar=c(4,4,2,1)+0.1)
dev.off()
# pdf("nigeria/nigeria_map_mu.pdf", width = width, height = 3.5)
# postscript(file = paste0("apl_nigeria_map_mu.eps"), horizontal = FALSE)
mu1 <- wasting.mu.subregion$pmean[ord]
mu2 <- stunting.mu.subregion$pmean[ord]
limmax <- max(max(mu1), max(mu2))
limmin <- min(min(mu1), min(mu2))
limmax <- 0.1 * ceiling(10*limmax)
limmin <- 0.1 * floor(10*limmin)
limmin <- sign(limmin) * max(limmax, -limmin)
limmax <- sign(limmax) * max(limmax, -limmin)
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = mu1)
layout(matrix(c(1,2,3,3),nrow = 2, byrow = TRUE), widths = c(1,1), heights = c(1,0.2))
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("wasting")))
data <- data.frame(intnr = seq(1, length(states)), states = as.character(states), x = mu2)
drawmap(data = data, map = map, drawnames = FALSE, plotvar = "x", swapcolors = TRUE, limits = c(limmin, limmax), hcl.par = list(h = c(250, 0), c = 80, l = c(50, 95), power = 1.2), legend = FALSE, main = expression(paste("stunting")))
par(mar=c(0,0,0,0))
legend_map(c(limmin,limmax), window.scale=4)
par(mar=c(4,4,2,1)+0.1)
dev.off()
|
# ------------------------------------------------------------------------
# Beast QG MS
# Genetic correlations for iga, ige, igg across all ages, lambs, adults
# AMS, SEJ
# July 2019
# ------------------------------------------------------------------------
load("data/20181107_SoayGRM.Rdata")
load("data/20181107_BEAST_data_formatted.RData")
library(asreml)
library(plyr)
library(ggplot2)
library(magrittr)
library(tidyr)
source("r/ASReml.EstEffects.R")
source("r/ASReml.ExtractPredictors.R")
source("r/pin.R")
ainv <- asreml.Ainverse(pedigree)$ginv
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# 1. Make model structures #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
BEASTX$Lamb.IgAmp <- ifelse(BEASTX$LambAdult == "Lambs" , BEASTX$IgAmp, NA)
BEASTX$Lamb.IgEmp <- ifelse(BEASTX$LambAdult == "Lambs" , BEASTX$IgEmp, NA)
BEASTX$Lamb.IgGmp <- ifelse(BEASTX$LambAdult == "Lambs" , BEASTX$IgGmp, NA)
BEASTX$Adult.IgAmp <- ifelse(BEASTX$LambAdult == "Adults", BEASTX$IgAmp, NA)
BEASTX$Adult.IgEmp <- ifelse(BEASTX$LambAdult == "Adults", BEASTX$IgEmp, NA)
BEASTX$Adult.IgGmp <- ifelse(BEASTX$LambAdult == "Adults", BEASTX$IgGmp, NA)
head(BEASTX)
x1 <- data.frame(Trait1 = c("Lamb.IgAmp", "Lamb.IgEmp", "Lamb.IgGmp", "Adult.IgAmp", "Adult.IgEmp", "Adult.IgGmp"))
x2 <- x1
names(x2) <- "Trait2"
x1 <- merge(x1, x2) %>%
apply(1, sort) %>%
t %>%
data.frame %>%
unique()
x1 <- subset(x1, X1 != X2)
x1 <- separate(x1, X1, c("LambAdult.1", "Trait.1"), sep = "\\.", remove = F)
x1 <- separate(x1, X2, c("LambAdult.2", "Trait.2"), sep = "\\.", remove = F)
x1$Model <- NA
x1$Model <- ifelse(x1$LambAdult.1 == "Lamb" & x1$LambAdult.2 == "Lamb",
"trait+trait:Sex+trait:LambAgeAugust,random=~corgh(trait):ped(ID)",
x1$Model)
x1$Model <- ifelse(x1$LambAdult.1 == "Adult" & x1$LambAdult.2 == "Adult",
"trait+trait:Sex+trait:Age,random=~corgh(trait):ped(ID)+idh(trait):ide(ID)",
x1$Model)
x1$Model <- ifelse(x1$LambAdult.1 != x1$LambAdult.2,
"trait+trait:Sex,random=~corgh(trait):ped(ID)",
x1$Model)
x1$Model <- paste0("cbind(", x1$X1, ",", x1$X2, ") ~ ", x1$Model)
x1$ModelNo <- 1:nrow(x1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# 2. Run the Models #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
restab <- NULL
modlist <- list()
for(i in 1:nrow(x1)){
print(paste0("Running model ", i))
eval(parse(text = paste0("fit1 <- asreml(fixed = ", x1$Model[i], ",
rcov = ~ units:idh(trait, init = NA),
data = BEASTX,
ginverse = list(ID = ainv),
workspace = 500e+6, pworkspace = 500e+6,
maxiter = 100, na.method.Y = \"include\", na.method.X = \"include\")")))
modlist[[i]] <- fit1
temp <- cbind(ModelNo = i, summary(fit1)$varcomp)
temp$Effect <- row.names(temp)
restab <- rbind(restab, temp)
rm(fit1, temp)
}
x1$Model
x1$Model <- gsub("ped", "giv", x1$Model)
#~~ GRM Models
restab.grm <- NULL
BEASTX$ID2 <- as.character(BEASTX$ID)
BEASTX <- subset(BEASTX, ID2 %in% dimnames(grminv)[[1]]) %>% droplevels
for(i in 1:nrow(x1)){
print(paste0("Running model ", i))
eval(parse(text = paste0("fit1 <- asreml(fixed = ", x1$Model[i], ",
rcov = ~ units:idh(trait, init = NA),
data = BEASTX,
ginverse = list(ID = grminv),
workspace = 500e+6, pworkspace = 500e+6,
maxiter = 100, na.method.Y = \"include\", na.method.X = \"include\")")))
temp <- cbind(ModelNo = i, summary(fit1)$varcomp)
temp$Effect <- row.names(temp)
restab.grm <- rbind(restab.grm, temp)
save(fit1, file = paste0("bivar", i, ".RData"))
rm(fit1, temp)
gc()
}
| /1.3_BEAST_Genetic_Correlations.R | no_license | sejlab/Soay_Immune_GWAS | R | false | false | 4,131 | r | # ------------------------------------------------------------------------
# Beast QG MS
# Genetic correlations for iga, ige, igg across all ages, lambs, adults
# AMS, SEJ
# July 2019
# ------------------------------------------------------------------------
load("data/20181107_SoayGRM.Rdata")
load("data/20181107_BEAST_data_formatted.RData")
library(asreml)
library(plyr)
library(ggplot2)
library(magrittr)
library(tidyr)
source("r/ASReml.EstEffects.R")
source("r/ASReml.ExtractPredictors.R")
source("r/pin.R")
ainv <- asreml.Ainverse(pedigree)$ginv
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# 1. Make model structures #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
BEASTX$Lamb.IgAmp <- ifelse(BEASTX$LambAdult == "Lambs" , BEASTX$IgAmp, NA)
BEASTX$Lamb.IgEmp <- ifelse(BEASTX$LambAdult == "Lambs" , BEASTX$IgEmp, NA)
BEASTX$Lamb.IgGmp <- ifelse(BEASTX$LambAdult == "Lambs" , BEASTX$IgGmp, NA)
BEASTX$Adult.IgAmp <- ifelse(BEASTX$LambAdult == "Adults", BEASTX$IgAmp, NA)
BEASTX$Adult.IgEmp <- ifelse(BEASTX$LambAdult == "Adults", BEASTX$IgEmp, NA)
BEASTX$Adult.IgGmp <- ifelse(BEASTX$LambAdult == "Adults", BEASTX$IgGmp, NA)
head(BEASTX)
x1 <- data.frame(Trait1 = c("Lamb.IgAmp", "Lamb.IgEmp", "Lamb.IgGmp", "Adult.IgAmp", "Adult.IgEmp", "Adult.IgGmp"))
x2 <- x1
names(x2) <- "Trait2"
x1 <- merge(x1, x2) %>%
apply(1, sort) %>%
t %>%
data.frame %>%
unique()
x1 <- subset(x1, X1 != X2)
x1 <- separate(x1, X1, c("LambAdult.1", "Trait.1"), sep = "\\.", remove = F)
x1 <- separate(x1, X2, c("LambAdult.2", "Trait.2"), sep = "\\.", remove = F)
x1$Model <- NA
x1$Model <- ifelse(x1$LambAdult.1 == "Lamb" & x1$LambAdult.2 == "Lamb",
"trait+trait:Sex+trait:LambAgeAugust,random=~corgh(trait):ped(ID)",
x1$Model)
x1$Model <- ifelse(x1$LambAdult.1 == "Adult" & x1$LambAdult.2 == "Adult",
"trait+trait:Sex+trait:Age,random=~corgh(trait):ped(ID)+idh(trait):ide(ID)",
x1$Model)
x1$Model <- ifelse(x1$LambAdult.1 != x1$LambAdult.2,
"trait+trait:Sex,random=~corgh(trait):ped(ID)",
x1$Model)
x1$Model <- paste0("cbind(", x1$X1, ",", x1$X2, ") ~ ", x1$Model)
x1$ModelNo <- 1:nrow(x1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# 2. Run the Models #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
restab <- NULL
modlist <- list()
for(i in 1:nrow(x1)){
print(paste0("Running model ", i))
eval(parse(text = paste0("fit1 <- asreml(fixed = ", x1$Model[i], ",
rcov = ~ units:idh(trait, init = NA),
data = BEASTX,
ginverse = list(ID = ainv),
workspace = 500e+6, pworkspace = 500e+6,
maxiter = 100, na.method.Y = \"include\", na.method.X = \"include\")")))
modlist[[i]] <- fit1
temp <- cbind(ModelNo = i, summary(fit1)$varcomp)
temp$Effect <- row.names(temp)
restab <- rbind(restab, temp)
rm(fit1, temp)
}
x1$Model
x1$Model <- gsub("ped", "giv", x1$Model)
#~~ GRM Models
restab.grm <- NULL
BEASTX$ID2 <- as.character(BEASTX$ID)
BEASTX <- subset(BEASTX, ID2 %in% dimnames(grminv)[[1]]) %>% droplevels
for(i in 1:nrow(x1)){
print(paste0("Running model ", i))
eval(parse(text = paste0("fit1 <- asreml(fixed = ", x1$Model[i], ",
rcov = ~ units:idh(trait, init = NA),
data = BEASTX,
ginverse = list(ID = grminv),
workspace = 500e+6, pworkspace = 500e+6,
maxiter = 100, na.method.Y = \"include\", na.method.X = \"include\")")))
temp <- cbind(ModelNo = i, summary(fit1)$varcomp)
temp$Effect <- row.names(temp)
restab.grm <- rbind(restab.grm, temp)
save(fit1, file = paste0("bivar", i, ".RData"))
rm(fit1, temp)
gc()
}
|
library(shiny)
mainFun<-function(pval,n,x,e,nulldist){
#start to get bhat by first getting close with the theory estimate
t<-qt(pval/2,df=n-2,lower.tail=F)
bhat.theory<-t*sd(e)/sqrt(sum((x-mean(x))^2) ) #sd(e)=true σ, sqrt(n)*sd(x) = Σ[(x-bar(x))^2]
bhat<-bhat.theory
y<-x*bhat+e
bhat.emp<-abs(summary(lm(y~x))$coeff[2,1])
pval.emp<-summary(lm(y~x))$coeff[2,4]
getp<-function(beta){
y<-x*beta+e
p<-summary(lm(y~x))$coeff[2,4]
return(p)
}
pval.emp<-getp(beta=bhat)
#Now get a total of three betas, with at least one corresponding to a pval.emp below pval, and at least one above pval
bhat1<-bhat.theory
count<-0
while(pval.emp<pval & count<100){
bhat<-bhat*.9
pval.emp<-getp(bhat)
count<-count+1
}
bhat2<-bhat
count<-0
while(pval.emp>pval & count<100 ){
bhat<-bhat*1.1
pval.emp<-getp(bhat)
count<-count+1
}
bhat3<-bhat
#then iteratively narrow down.
#take the max & min of the three betas (possible replicates)
#take their mean.
#Of these three new betas (min, max & mean) keep the two with pvalues closest to the one you want
#Set these as the new "min" & "max" and continue to narrow down!
bmin<-min(bhat1,bhat2,bhat3)
bmax<-max(bhat1,bhat2,bhat3)
iter<-0
while(iter<25&!nulldist){
iter<-iter+1
bmid<-mean(c(bmin,bmax))
pmax<-getp(bmin)
pmin<-getp(bmax)
pmid<-getp(bmid)
#print(pval>pmin & pval<pmax)#test
if(pval>pmid){#bmin too small
bmin<-bmin
bmax<-bmid
}
if(pval<=pmid){#bmax too big
bmin<-bmid
bmax<-bmax
}
#print(c(iter,pmid,pval))
}
bmid<-mean(c(bmin,bmax))
bhat<-bmid
if(nulldist)bhat<-0
y<-x*bhat+e
pval.emp<-summary(lm(y~x))$coeff[2,4]
return(list(t=t,e=e,x=x,bhat=bhat,y=y,pval.emp=pval.emp) )
}
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output) {
all.xe<-reactive({
set.seed(input$XEseed)
e.all<-rnorm(2000)
x.all<-rnorm(2000)
list(x=x.all,e=e.all)
})
pval<-reactive({ 10^(input$logp) })
pbreaks<-reactive({ c(pval()*1.1,pval()*.9) })
e<-reactive({ all.xe()$e[1:input$n] })
x<-reactive({ all.xe()$x[1:input$n] })
#seedForPlots<-234032
#set.seed(seedForPlots)
data22<-reactive({ mainFun(pval=pval(),n=input$n,x=x(),e=e(),nulldist=input$nulldist) })
coefs<-reactive({ lm(data22()$y ~ data22()$x )$coef })
lowessXY<- reactive({ lowess(data22()$x,data22()$y) })
output$outplot<-renderPlot( {
plot(data22()$x,data22()$y,xlab='X',ylab='Y')
if(input$bestFit) abline( coefs() ,col='darkgreen',lwd=2)
if(input$lowess) lines(lowessXY(),col='blue',lwd=2)
})
#
# output$detailTable<-renderTable({
# data.frame(matmat<-matrix(c('Model:', 'Y=α+Xβ+ε',
# 'Generating Parameters:',paste0('α=0; β =',round(data22()$bhat,digits=4)),'Fitted Parameters: ', paste0('α=',round(coefs()[1],digits=4), '; β =',round(coefs()[2],digits=4) )),byrow=TRUE,ncol=2 ))
# })
#
output$detailTable<-renderTable({
tabtab<- data.frame(matmat<-matrix(c(0,round(data22()$bhat,digits=4),round(coefs()[1],digits=4),round(coefs()[2],digits=4) ),byrow=TRUE,ncol=2))
row.names(tabtab)<-c('Generating Parameters:','Fitted Parameters:')
colnames(tabtab)<-c('α','β')
tabtab
},digits=4)
#note, right now we're showing the empirical pval
output$pval<-renderText(paste('p-value =',prettyNum(data22()$pval.emp)) )
#output$test<-renderText(data22()$pval.emp)
output$formula<-renderText('Model: Y=α+Xβ+ε')
output$genParams<-renderText(paste0('Generating Parameters: α=0; β =',round(data22()$bhat,digits=4)) )
output$fitParams<-renderText(paste0('Fitted Parameters: α=',round(coefs()[1],digits=4), '; β =',round(coefs()[2],digits=4) ))
}) | /Shiny/EDA Shiny/server.R | no_license | Adamyazori/EDA-Project | R | false | false | 3,838 | r | library(shiny)
mainFun<-function(pval,n,x,e,nulldist){
#start to get bhat by first getting close with the theory estimate
t<-qt(pval/2,df=n-2,lower.tail=F)
bhat.theory<-t*sd(e)/sqrt(sum((x-mean(x))^2) ) #sd(e)=true σ, sqrt(n)*sd(x) = Σ[(x-bar(x))^2]
bhat<-bhat.theory
y<-x*bhat+e
bhat.emp<-abs(summary(lm(y~x))$coeff[2,1])
pval.emp<-summary(lm(y~x))$coeff[2,4]
getp<-function(beta){
y<-x*beta+e
p<-summary(lm(y~x))$coeff[2,4]
return(p)
}
pval.emp<-getp(beta=bhat)
#Now get a total of three betas, with at least one corresponding to a pval.emp below pval, and at least one above pval
bhat1<-bhat.theory
count<-0
while(pval.emp<pval & count<100){
bhat<-bhat*.9
pval.emp<-getp(bhat)
count<-count+1
}
bhat2<-bhat
count<-0
while(pval.emp>pval & count<100 ){
bhat<-bhat*1.1
pval.emp<-getp(bhat)
count<-count+1
}
bhat3<-bhat
#then iteratively narrow down.
#take the max & min of the three betas (possible replicates)
#take their mean.
#Of these three new betas (min, max & mean) keep the two with pvalues closest to the one you want
#Set these as the new "min" & "max" and continue to narrow down!
bmin<-min(bhat1,bhat2,bhat3)
bmax<-max(bhat1,bhat2,bhat3)
iter<-0
while(iter<25&!nulldist){
iter<-iter+1
bmid<-mean(c(bmin,bmax))
pmax<-getp(bmin)
pmin<-getp(bmax)
pmid<-getp(bmid)
#print(pval>pmin & pval<pmax)#test
if(pval>pmid){#bmin too small
bmin<-bmin
bmax<-bmid
}
if(pval<=pmid){#bmax too big
bmin<-bmid
bmax<-bmax
}
#print(c(iter,pmid,pval))
}
bmid<-mean(c(bmin,bmax))
bhat<-bmid
if(nulldist)bhat<-0
y<-x*bhat+e
pval.emp<-summary(lm(y~x))$coeff[2,4]
return(list(t=t,e=e,x=x,bhat=bhat,y=y,pval.emp=pval.emp) )
}
# Define server logic required to plot various variables against mpg
shinyServer(function(input, output) {
all.xe<-reactive({
set.seed(input$XEseed)
e.all<-rnorm(2000)
x.all<-rnorm(2000)
list(x=x.all,e=e.all)
})
pval<-reactive({ 10^(input$logp) })
pbreaks<-reactive({ c(pval()*1.1,pval()*.9) })
e<-reactive({ all.xe()$e[1:input$n] })
x<-reactive({ all.xe()$x[1:input$n] })
#seedForPlots<-234032
#set.seed(seedForPlots)
data22<-reactive({ mainFun(pval=pval(),n=input$n,x=x(),e=e(),nulldist=input$nulldist) })
coefs<-reactive({ lm(data22()$y ~ data22()$x )$coef })
lowessXY<- reactive({ lowess(data22()$x,data22()$y) })
output$outplot<-renderPlot( {
plot(data22()$x,data22()$y,xlab='X',ylab='Y')
if(input$bestFit) abline( coefs() ,col='darkgreen',lwd=2)
if(input$lowess) lines(lowessXY(),col='blue',lwd=2)
})
#
# output$detailTable<-renderTable({
# data.frame(matmat<-matrix(c('Model:', 'Y=α+Xβ+ε',
# 'Generating Parameters:',paste0('α=0; β =',round(data22()$bhat,digits=4)),'Fitted Parameters: ', paste0('α=',round(coefs()[1],digits=4), '; β =',round(coefs()[2],digits=4) )),byrow=TRUE,ncol=2 ))
# })
#
output$detailTable<-renderTable({
tabtab<- data.frame(matmat<-matrix(c(0,round(data22()$bhat,digits=4),round(coefs()[1],digits=4),round(coefs()[2],digits=4) ),byrow=TRUE,ncol=2))
row.names(tabtab)<-c('Generating Parameters:','Fitted Parameters:')
colnames(tabtab)<-c('α','β')
tabtab
},digits=4)
#note, right now we're showing the empirical pval
output$pval<-renderText(paste('p-value =',prettyNum(data22()$pval.emp)) )
#output$test<-renderText(data22()$pval.emp)
output$formula<-renderText('Model: Y=α+Xβ+ε')
output$genParams<-renderText(paste0('Generating Parameters: α=0; β =',round(data22()$bhat,digits=4)) )
output$fitParams<-renderText(paste0('Fitted Parameters: α=',round(coefs()[1],digits=4), '; β =',round(coefs()[2],digits=4) ))
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/closeIssue.R
\name{closeIssue}
\alias{closeIssue}
\title{closeIssue}
\usage{
closeIssue(user, ID, notes = NA, project = ".")
}
\arguments{
\item{user}{string: the person who closed the issue.}
\item{ID}{string: the ID of the issue you want to close.}
\item{notes}{string: any closing notes you want to add to the issue.}
\item{project}{string: the project location}
}
\description{
Close an issue you've previously logged.
Add notes on how you closed it for future reference.
}
| /man/closeIssue.Rd | no_license | stephstammel/consultthat | R | false | true | 559 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/closeIssue.R
\name{closeIssue}
\alias{closeIssue}
\title{closeIssue}
\usage{
closeIssue(user, ID, notes = NA, project = ".")
}
\arguments{
\item{user}{string: the person who closed the issue.}
\item{ID}{string: the ID of the issue you want to close.}
\item{notes}{string: any closing notes you want to add to the issue.}
\item{project}{string: the project location}
}
\description{
Close an issue you've previously logged.
Add notes on how you closed it for future reference.
}
|
cmdargs <- c("-m","/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/gad_lead.fear/perm.free.gad_lead.fear/mask.nii.gz",
"--set1", "/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/setfilenames_fearGTcalm.txt","--setlabels1", "/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/depanxcov-midpoint5.csv",
"--model","/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/gad_lead.fear/perm.free.gad_lead.fear/permute_free_model.R",
"--output","perm.free.gad_lead.fear/perm.free.gad_lead.fear.",
debug.Rdata
"--slurmN", "60"
)
| /gad_lead.fear/perm.free.gad_lead.fear/perm.free.gad_lead.fear/readargs.R | no_license | jflournoy/sea_np_models | R | false | false | 788 | r | cmdargs <- c("-m","/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/gad_lead.fear/perm.free.gad_lead.fear/mask.nii.gz",
"--set1", "/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/setfilenames_fearGTcalm.txt","--setlabels1", "/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/datafiles/depanxcov-midpoint5.csv",
"--model","/net/holynfs01/srv/export/mclaughlin/share_root/stressdevlab/stress_pipeline/Group/FaceReactivity/NewNeuropoint/gad_lead.fear/perm.free.gad_lead.fear/permute_free_model.R",
"--output","perm.free.gad_lead.fear/perm.free.gad_lead.fear.",
debug.Rdata
"--slurmN", "60"
)
|
## ---------------------------------------------------------------------------------------------------------------------
#
# Bioinformatics Research Group
# http://biorg.cis.fiu.edu/
# Florida International University
#
# This software is a "Camilo Valdes Work" under the terms of the United States Copyright Act.
# Please cite the author(s) in any work or product based on this material.
#
# OBJECTIVE:
# The purpose of this program is to transform a text input matrix in "wide form" into "long form" format. Note
# that this script is focused on expression data in which the first column (in wide format) is the gene_id and
# subsequent columns are samples that hold expression units (TPMs, FPKMs, Read Counts, etc.) for a given gene.
#
#
# NOTES:
# Please see the dependencies section below for the required libraries (if any).
#
# DEPENDENCIES:
#
# • ggplot2
# • RColorBrewer
# • reshape2
#
# The above libraries & modules are required.
#
# AUTHOR: Camilo Valdes (cvalde03@fiu.edu)
# Bioinformatics Research Group,
# School of Computing and Information Sciences,
# Florida International University (FIU)
#
#
# ---------------------------------------------------------------------------------------------------------------------
#
# Import any necessary system or 3rd-party libraries & frameworks here
#
library(ggplot2)
library(RColorBrewer)
library(reshape2)
# Set some options to alter the default behavior of the REPL.
options( width=256, digits=15, warn=1, echo=FALSE )
# ------------------------------------------------- Project Setup -----------------------------------------------------
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] R Starting... ", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
working_directory="/path/to/working/directory"
setwd( file.path( working_directory ) )
figures_base_dir=paste(working_directory, "/figures" , sep="")
png_output_dir=paste( figures_base_dir, "/png", sep="" )
dir.create( file.path( png_output_dir ), showWarnings=FALSE, recursive=TRUE )
# ------------------------------------------------------ Main ---------------------------------------------------------
# File in "wide format"
fileToProcess = paste( working_directory, "/path/to/input/file/human_expression.txt", sep="" )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] Input File:", sep="") )
print(fileToProcess)
# Read-in the file
countsTable = read.delim( fileToProcess, header=TRUE, sep="\t", check.names=FALSE )
numberOfRows = nrow(countsTable)
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] Number of Rows: ", numberOfRows, sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
print(head(countsTable, n=10))
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
#
# Convert to "long form" using the 'melt' function of the 'reshape2' package.
#
locationIDVariable = 1
meltedLongDataFrame = melt( countsTable, id.vars=locationIDVariable)
print(head(meltedLongDataFrame, n=10))
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
#
# Output
#
outputFile = paste(working_directory, "/path/to/output/file/human_expression-long.txt", sep="" )
write.table( meltedLongDataFrame, file=outputFile, row.names=FALSE, quote=FALSE, sep="\t")
# ------------------------------------------------------ END ---------------------------------------------------------
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] Done.", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
| /post_processing/long_form_transform.R | permissive | camilo-v/Bioinformatics | R | false | false | 3,875 | r |
## ---------------------------------------------------------------------------------------------------------------------
#
# Bioinformatics Research Group
# http://biorg.cis.fiu.edu/
# Florida International University
#
# This software is a "Camilo Valdes Work" under the terms of the United States Copyright Act.
# Please cite the author(s) in any work or product based on this material.
#
# OBJECTIVE:
# The purpose of this program is to transform a text input matrix in "wide form" into "long form" format. Note
# that this script is focused on expression data in which the first column (in wide format) is the gene_id and
# subsequent columns are samples that hold expression units (TPMs, FPKMs, Read Counts, etc.) for a given gene.
#
#
# NOTES:
# Please see the dependencies section below for the required libraries (if any).
#
# DEPENDENCIES:
#
# • ggplot2
# • RColorBrewer
# • reshape2
#
# The above libraries & modules are required.
#
# AUTHOR: Camilo Valdes (cvalde03@fiu.edu)
# Bioinformatics Research Group,
# School of Computing and Information Sciences,
# Florida International University (FIU)
#
#
# ---------------------------------------------------------------------------------------------------------------------
#
# Import any necessary system or 3rd-party libraries & frameworks here
#
library(ggplot2)
library(RColorBrewer)
library(reshape2)
# Set some options to alter the default behavior of the REPL.
options( width=256, digits=15, warn=1, echo=FALSE )
# ------------------------------------------------- Project Setup -----------------------------------------------------
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] R Starting... ", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
working_directory="/path/to/working/directory"
setwd( file.path( working_directory ) )
figures_base_dir=paste(working_directory, "/figures" , sep="")
png_output_dir=paste( figures_base_dir, "/png", sep="" )
dir.create( file.path( png_output_dir ), showWarnings=FALSE, recursive=TRUE )
# ------------------------------------------------------ Main ---------------------------------------------------------
# File in "wide format"
fileToProcess = paste( working_directory, "/path/to/input/file/human_expression.txt", sep="" )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] Input File:", sep="") )
print(fileToProcess)
# Read-in the file
countsTable = read.delim( fileToProcess, header=TRUE, sep="\t", check.names=FALSE )
numberOfRows = nrow(countsTable)
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] Number of Rows: ", numberOfRows, sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
print(head(countsTable, n=10))
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
#
# Convert to "long form" using the 'melt' function of the 'reshape2' package.
#
locationIDVariable = 1
meltedLongDataFrame = melt( countsTable, id.vars=locationIDVariable)
print(head(meltedLongDataFrame, n=10))
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
#
# Output
#
outputFile = paste(working_directory, "/path/to/output/file/human_expression-long.txt", sep="" )
write.table( meltedLongDataFrame, file=outputFile, row.names=FALSE, quote=FALSE, sep="\t")
# ------------------------------------------------------ END ---------------------------------------------------------
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] Done.", sep="") )
print( paste( "[", format(Sys.time(), "%m/%d/%y %H:%M:%S"),"] ", sep="") )
|
# Set the working directory
setwd("C:/Users/mark__000/Documents/Springboard/Data-Wrangling-Project/UCI HAR Dataset")
# Read in the raw data
activity_labels <- read.table("activity_labels.txt",stringsAsFactors = F)
features <- read.table("features.txt",stringsAsFactors = F)
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
subject_test <- read.table("test/subject_test.txt")
# Label column names as appropriate and fuse training and testing sets
colnames(subject_train) <- c("Subject")
colnames(subject_test) <- c("Subject")
colnames(activity_labels) <- c("ID","ActivityName")
colnames(features) <- c("ID","Feature")
colnames(X_train) <- make.names(features$Feature, unique = T)
colnames(X_test) <- make.names(features$Feature, unique = T)
colnames(y_train) <- c("ActivityLabel")
colnames(y_test) <- c("ActivityLabel")
# Extract columns containing mean and std dev for each measurement
X_train_mean <- X_train[,grepl("mean..",colnames(X_train),fixed=T)]
X_test_mean <- X_test[,grepl("mean..",colnames(X_test),fixed=T)]
X_train_std <- X_train[,grepl("std..",colnames(X_train),fixed=T)]
X_test_std <- X_test[,grepl("std..",colnames(X_test),fixed=T)]
# Fuse train and test data together including subject and activity label
fused_train <- cbind(subject_train,y_train,X_train_mean,X_train_std)
fused_test <- cbind(subject_test,y_test,X_test_mean,X_test_std)
fused_data <- rbind(fused_train, fused_test)
# Merge fused data with activity labels to include the description of the activity
fused_data <- merge(fused_data,activity_labels,by.x="ActivityLabel",by.y="ID")
# Split data by subject and activity
fused_split <- split(fused_data, fused_data[,c("Subject","ActivityLabel")])
# Apply avg to each column
fused_apply <- lapply(fused_split, function(x){
col_mean <- colMeans(x[,setdiff(colnames(x),c("Subject","ActivityLabel","ActivityName"))])
df <- data.frame(t(col_mean))
df <- cbind(Subject=x$Subject[1],
ActivityLabel=x$ActivityLabel[1],
ActivityName=x$ActivityName[1],
df)
return(df)
})
# Combine data
final_data <- do.call(rbind, fused_apply)
| /run_analysis_base_R.R | no_license | mawilliam/Data-Wrangling-Project | R | false | false | 2,275 | r | # Set the working directory
setwd("C:/Users/mark__000/Documents/Springboard/Data-Wrangling-Project/UCI HAR Dataset")
# Read in the raw data
activity_labels <- read.table("activity_labels.txt",stringsAsFactors = F)
features <- read.table("features.txt",stringsAsFactors = F)
X_train <- read.table("train/X_train.txt")
y_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
X_test <- read.table("test/X_test.txt")
y_test <- read.table("test/y_test.txt")
subject_test <- read.table("test/subject_test.txt")
# Label column names as appropriate and fuse training and testing sets
colnames(subject_train) <- c("Subject")
colnames(subject_test) <- c("Subject")
colnames(activity_labels) <- c("ID","ActivityName")
colnames(features) <- c("ID","Feature")
colnames(X_train) <- make.names(features$Feature, unique = T)
colnames(X_test) <- make.names(features$Feature, unique = T)
colnames(y_train) <- c("ActivityLabel")
colnames(y_test) <- c("ActivityLabel")
# Extract columns containing mean and std dev for each measurement
X_train_mean <- X_train[,grepl("mean..",colnames(X_train),fixed=T)]
X_test_mean <- X_test[,grepl("mean..",colnames(X_test),fixed=T)]
X_train_std <- X_train[,grepl("std..",colnames(X_train),fixed=T)]
X_test_std <- X_test[,grepl("std..",colnames(X_test),fixed=T)]
# Fuse train and test data together including subject and activity label
fused_train <- cbind(subject_train,y_train,X_train_mean,X_train_std)
fused_test <- cbind(subject_test,y_test,X_test_mean,X_test_std)
fused_data <- rbind(fused_train, fused_test)
# Merge fused data with activity labels to include the description of the activity
fused_data <- merge(fused_data,activity_labels,by.x="ActivityLabel",by.y="ID")
# Split data by subject and activity
fused_split <- split(fused_data, fused_data[,c("Subject","ActivityLabel")])
# Apply avg to each column
fused_apply <- lapply(fused_split, function(x){
col_mean <- colMeans(x[,setdiff(colnames(x),c("Subject","ActivityLabel","ActivityName"))])
df <- data.frame(t(col_mean))
df <- cbind(Subject=x$Subject[1],
ActivityLabel=x$ActivityLabel[1],
ActivityName=x$ActivityName[1],
df)
return(df)
})
# Combine data
final_data <- do.call(rbind, fused_apply)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{lsmeans}
\alias{lsmeans}
\alias{wrappers}
\alias{lstrends}
\alias{lsmip}
\alias{lsm}
\alias{lsmobj}
\alias{lsm.options}
\alias{get.lsm.option}
\title{Wrappers for alternative naming of EMMs}
\usage{
lsmeans(...)
lstrends(...)
lsmip(...)
lsm(...)
lsmobj(...)
lsm.options(...)
get.lsm.option(x, default = emm_defaults[[x]])
}
\arguments{
\item{...}{Arguments passed to the corresponding \code{em}\emph{xxxx} function}
\item{x}{Character name of desired option}
\item{default}{default value to return if \code{x} not found}
}
\value{
The result of the call to \code{em}\emph{xxxx}, suitably modified.
\code{get.lsm.option} and \code{lsm.options} remap options from
and to corresponding options in the \pkg{emmeans} options system.
}
\description{
These are wrappers for \code{\link{emmeans}} and related functions to provide
backward compatibility, or for users who may prefer to
use other terminology than \dQuote{estimated marginal means} -- namely
\dQuote{least-squares means}. These functions also provide the functionality
formerly provided by the \pkg{lsmeans} package, which is now just a front-end
for \pkg{emmeans}.
}
\details{
For each function with \code{ls}\emph{xxxx} in its name,
the same function named \code{em}\emph{xxxx} is called. Any estimator names or
list items beginning with \dQuote{em} are replaced with \dQuote{ls}
before the results are returned
}
\examples{
pigs.lm <- lm(log(conc) ~ source + factor(percent), data = pigs)
lsmeans(pigs.lm, "source")
}
\seealso{
\code{\link{emmeans}}, \code{\link{emtrends}}, \code{\link{emmip}},
\code{\link{emm}}, \code{\link{emmobj}}, \code{\link{emm_options}},
\code{\link{get_emm_option}}
}
| /man/wrappers.Rd | no_license | cran/emmeans | R | false | true | 1,844 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{lsmeans}
\alias{lsmeans}
\alias{wrappers}
\alias{lstrends}
\alias{lsmip}
\alias{lsm}
\alias{lsmobj}
\alias{lsm.options}
\alias{get.lsm.option}
\title{Wrappers for alternative naming of EMMs}
\usage{
lsmeans(...)
lstrends(...)
lsmip(...)
lsm(...)
lsmobj(...)
lsm.options(...)
get.lsm.option(x, default = emm_defaults[[x]])
}
\arguments{
\item{...}{Arguments passed to the corresponding \code{em}\emph{xxxx} function}
\item{x}{Character name of desired option}
\item{default}{default value to return if \code{x} not found}
}
\value{
The result of the call to \code{em}\emph{xxxx}, suitably modified.
\code{get.lsm.option} and \code{lsm.options} remap options from
and to corresponding options in the \pkg{emmeans} options system.
}
\description{
These are wrappers for \code{\link{emmeans}} and related functions to provide
backward compatibility, or for users who may prefer to
use other terminology than \dQuote{estimated marginal means} -- namely
\dQuote{least-squares means}. These functions also provide the functionality
formerly provided by the \pkg{lsmeans} package, which is now just a front-end
for \pkg{emmeans}.
}
\details{
For each function with \code{ls}\emph{xxxx} in its name,
the same function named \code{em}\emph{xxxx} is called. Any estimator names or
list items beginning with \dQuote{em} are replaced with \dQuote{ls}
before the results are returned
}
\examples{
pigs.lm <- lm(log(conc) ~ source + factor(percent), data = pigs)
lsmeans(pigs.lm, "source")
}
\seealso{
\code{\link{emmeans}}, \code{\link{emtrends}}, \code{\link{emmip}},
\code{\link{emm}}, \code{\link{emmobj}}, \code{\link{emm_options}},
\code{\link{get_emm_option}}
}
|
#
# Logistics Regression Training and Parameters Tuning
#
# Training Model
set.seed(2015)
cat(">>>> Training LogReg 1/3: with only-CV; with subsampling\n")
mdl_logreg <- train(Label ~ .,
data = train_df,
method = "glm",
family = 'binomial',
trControl = trCtrol,
preProcess = c('center', 'scale'),
metric = "ROC"
)
# Prediction on validation date set
# pred_lab <- predict.train(mdl_logreg, newdata = valid)
# confusionMatrix(pred_lab, valid_lab_fct)
# Without using subsampling to deal with unbalanced data
cat(">>>> Training LogReg 2/3: with only-CV; without subsampling\n")
mdl_logreg_2 <- train(Label ~ .,
data = train_df,
method = "glm",
family = 'binomial',
trControl = trCtrol2,
preProcess = c('center', 'scale'),
metric = "ROC"
)
# 5-times repeated 10-fold cv to train logreg
cat(">>>> Training LogReg 3/3: with repeated-CV; with subsampilng\n")
mdl_logreg_repcv <- train(Label ~ .,
data = train_df,
method = "glm",
family = 'binomial',
trControl = trCtrolRepCV,
preProcess = c('center', 'scale'),
metric = "ROC"
)
save(mdl_logreg_repcv, file = "Model_logreg.Robj")
| /src/model_logreg.R | no_license | Puriney/ML_Proj | R | false | false | 1,463 | r | #
# Logistics Regression Training and Parameters Tuning
#
# Training Model
set.seed(2015)
cat(">>>> Training LogReg 1/3: with only-CV; with subsampling\n")
mdl_logreg <- train(Label ~ .,
data = train_df,
method = "glm",
family = 'binomial',
trControl = trCtrol,
preProcess = c('center', 'scale'),
metric = "ROC"
)
# Prediction on validation date set
# pred_lab <- predict.train(mdl_logreg, newdata = valid)
# confusionMatrix(pred_lab, valid_lab_fct)
# Without using subsampling to deal with unbalanced data
cat(">>>> Training LogReg 2/3: with only-CV; without subsampling\n")
mdl_logreg_2 <- train(Label ~ .,
data = train_df,
method = "glm",
family = 'binomial',
trControl = trCtrol2,
preProcess = c('center', 'scale'),
metric = "ROC"
)
# 5-times repeated 10-fold cv to train logreg
cat(">>>> Training LogReg 3/3: with repeated-CV; with subsampilng\n")
mdl_logreg_repcv <- train(Label ~ .,
data = train_df,
method = "glm",
family = 'binomial',
trControl = trCtrolRepCV,
preProcess = c('center', 'scale'),
metric = "ROC"
)
save(mdl_logreg_repcv, file = "Model_logreg.Robj")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_odkc_turtledata_2020.R
\name{download_odkc_turtledata_2020}
\alias{download_odkc_turtledata_2020}
\title{Download all turtle data from DBCA's ODK Central 2020}
\usage{
download_odkc_turtledata_2020(
local_dir = here::here("media"),
prod = "https://odkc.dbca.wa.gov.au",
tz = ruODK::get_default_tz(),
download = TRUE,
odkc_version = ruODK::get_default_odkc_version(),
verbose = wastdr::get_wastdr_verbose()
)
}
\arguments{
\item{local_dir}{A local directory to which to save the attachment files.
Attachment filepaths will be relative to the directory.
The directory and its parent folders will be created if not existing.}
\item{prod}{The ODKC PROD server,
default: "https://odkc.dbca.wa.gov.au".}
\item{tz}{The lubridate timezone, default: "Australia/Perth}
\item{download}{(lgl) Whether to download attachments to \code{local_dir} or not,}
\item{odkc_version}{The numeric ODK Central version, e.g. 0.7 or 0.8.}
\item{verbose}{Whether to show debug messages.
Default: wastdr::get_wastdr_verbose()}
}
\value{
An S3 class "odkc_turtledata" with items:
\itemize{
\item downloaded_on An UTC POSIXct timestamp of the data snapshot.
\item tracks The turtle tracks and nests from form
"Turtle Track or Nest 1.0".
\item tracks_dist Individual disturbances recorded against tracks,
one record per disturbance.
\item tracks_log Individual nest tags recorded against nests,
one record per tag.
\item tracks_egg Next excavation photos, one record per photo.
\item tracks_hatch Turtle hatchling morphometrics, one record per measured
hatchling.
\item tracks_fan_outlier Individual hatchling track outliers recorded
against hatched nests, one record per outlier.
\item tracks_light Individual light sources known at hatchling emergence,
one record per light source.
\item track_tally A line transect tally of turtle tracks from form
"Track Tally 0.6".
\item dist The disturbance and predation records from form
"Predator or Disturbance 1.1".
\item mwi Strandings and rescues from the form
"Marine Wildlife Incident 0.6 ".
\item mwi_dmg Individual injuries recorded against mwi,
one record per injury.
\item mwi_tag Individual tags sighted during an mwi, one record per tag.
\item tsi Turtle Sightings from form \code{Turtle-Sighting},
one record per sighted turtle.
\item tt Individual turtles encountered and tagged after nesting
during night time turtle tagging from form \code{Turtle-Tagging-3-0},
one record per encountered turtle.
\item tt_dmg Characteristic damages or injuries recorded during turtle
tagging, one record per damage.
\item tt_tag Individual tags recorded during turtle tagging,
one record per tag. Tags can be of different type,
re-sighted, applied, or removed.
\item tt_log Individual loggers recorded during turtle tagging,
one record per logger. Loggers can be of different type,
re-sighted, deployed, or removed.
\item tt_fix Turtle Tagging records with missing coordinates. These need to
be backfilled into WAStD by hand from paper datasheets.
\item svs Survey start points from form \code{Site-Visit-Start}.
\item sve Survey end points from form \code{Site-Visit-End}.
\item sites An sf object of known WAStD sites.
\item areas An sf object of known WAStD localities.
}
}
\description{
\lifecycle{maturing}
}
\seealso{
Other odkc:
\code{\link{add_hatching_emergence_success_odkc}()},
\code{\link{add_nest_labels_odkc}()},
\code{\link{exclude_training_species_odkc}()},
\code{\link{filter_odkc_turtledata}()},
\code{\link{general_disturbance_by_season_odkc}()},
\code{\link{hatching_emergence_success_odkc}()},
\code{\link{map_dist_odkc}()},
\code{\link{map_mwi_odkc}()},
\code{\link{map_sv_odkc}()},
\code{\link{map_tracks_odkc}()},
\code{\link{map_tt_odkc}()},
\code{\link{nest_disturbance_by_season_odkc}()},
\code{\link{nesting_type_by_area_season_age_species_odkc}()},
\code{\link{nesting_type_by_area_season_species_odkc}()},
\code{\link{nesting_type_by_season_age_species_odkc}()},
\code{\link{nesting_type_by_season_calendarday_age_species_odkc}()},
\code{\link{nesting_type_by_season_calendarday_species_odkc}()},
\code{\link{nesting_type_by_season_day_species_odkc}()},
\code{\link{nesting_type_by_season_species_odkc}()},
\code{\link{nesting_type_by_season_week_age_species_odkc}()},
\code{\link{nesting_type_by_season_week_site_species_odkc}()},
\code{\link{nesting_type_by_season_week_species_odkc}()},
\code{\link{nesting_type_by_site_season_age_species_odkc}()},
\code{\link{nesting_type_by_site_season_species_odkc}()}
}
\concept{odkc}
| /man/download_odkc_turtledata_2020.Rd | no_license | dbca-wa/wastdr | R | false | true | 4,571 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_odkc_turtledata_2020.R
\name{download_odkc_turtledata_2020}
\alias{download_odkc_turtledata_2020}
\title{Download all turtle data from DBCA's ODK Central 2020}
\usage{
download_odkc_turtledata_2020(
local_dir = here::here("media"),
prod = "https://odkc.dbca.wa.gov.au",
tz = ruODK::get_default_tz(),
download = TRUE,
odkc_version = ruODK::get_default_odkc_version(),
verbose = wastdr::get_wastdr_verbose()
)
}
\arguments{
\item{local_dir}{A local directory to which to save the attachment files.
Attachment filepaths will be relative to the directory.
The directory and its parent folders will be created if not existing.}
\item{prod}{The ODKC PROD server,
default: "https://odkc.dbca.wa.gov.au".}
\item{tz}{The lubridate timezone, default: "Australia/Perth}
\item{download}{(lgl) Whether to download attachments to \code{local_dir} or not,}
\item{odkc_version}{The numeric ODK Central version, e.g. 0.7 or 0.8.}
\item{verbose}{Whether to show debug messages.
Default: wastdr::get_wastdr_verbose()}
}
\value{
An S3 class "odkc_turtledata" with items:
\itemize{
\item downloaded_on An UTC POSIXct timestamp of the data snapshot.
\item tracks The turtle tracks and nests from form
"Turtle Track or Nest 1.0".
\item tracks_dist Individual disturbances recorded against tracks,
one record per disturbance.
\item tracks_log Individual nest tags recorded against nests,
one record per tag.
\item tracks_egg Next excavation photos, one record per photo.
\item tracks_hatch Turtle hatchling morphometrics, one record per measured
hatchling.
\item tracks_fan_outlier Individual hatchling track outliers recorded
against hatched nests, one record per outlier.
\item tracks_light Individual light sources known at hatchling emergence,
one record per light source.
\item track_tally A line transect tally of turtle tracks from form
"Track Tally 0.6".
\item dist The disturbance and predation records from form
"Predator or Disturbance 1.1".
\item mwi Strandings and rescues from the form
"Marine Wildlife Incident 0.6 ".
\item mwi_dmg Individual injuries recorded against mwi,
one record per injury.
\item mwi_tag Individual tags sighted during an mwi, one record per tag.
\item tsi Turtle Sightings from form \code{Turtle-Sighting},
one record per sighted turtle.
\item tt Individual turtles encountered and tagged after nesting
during night time turtle tagging from form \code{Turtle-Tagging-3-0},
one record per encountered turtle.
\item tt_dmg Characteristic damages or injuries recorded during turtle
tagging, one record per damage.
\item tt_tag Individual tags recorded during turtle tagging,
one record per tag. Tags can be of different type,
re-sighted, applied, or removed.
\item tt_log Individual loggers recorded during turtle tagging,
one record per logger. Loggers can be of different type,
re-sighted, deployed, or removed.
\item tt_fix Turtle Tagging records with missing coordinates. These need to
be backfilled into WAStD by hand from paper datasheets.
\item svs Survey start points from form \code{Site-Visit-Start}.
\item sve Survey end points from form \code{Site-Visit-End}.
\item sites An sf object of known WAStD sites.
\item areas An sf object of known WAStD localities.
}
}
\description{
\lifecycle{maturing}
}
\seealso{
Other odkc:
\code{\link{add_hatching_emergence_success_odkc}()},
\code{\link{add_nest_labels_odkc}()},
\code{\link{exclude_training_species_odkc}()},
\code{\link{filter_odkc_turtledata}()},
\code{\link{general_disturbance_by_season_odkc}()},
\code{\link{hatching_emergence_success_odkc}()},
\code{\link{map_dist_odkc}()},
\code{\link{map_mwi_odkc}()},
\code{\link{map_sv_odkc}()},
\code{\link{map_tracks_odkc}()},
\code{\link{map_tt_odkc}()},
\code{\link{nest_disturbance_by_season_odkc}()},
\code{\link{nesting_type_by_area_season_age_species_odkc}()},
\code{\link{nesting_type_by_area_season_species_odkc}()},
\code{\link{nesting_type_by_season_age_species_odkc}()},
\code{\link{nesting_type_by_season_calendarday_age_species_odkc}()},
\code{\link{nesting_type_by_season_calendarday_species_odkc}()},
\code{\link{nesting_type_by_season_day_species_odkc}()},
\code{\link{nesting_type_by_season_species_odkc}()},
\code{\link{nesting_type_by_season_week_age_species_odkc}()},
\code{\link{nesting_type_by_season_week_site_species_odkc}()},
\code{\link{nesting_type_by_season_week_species_odkc}()},
\code{\link{nesting_type_by_site_season_age_species_odkc}()},
\code{\link{nesting_type_by_site_season_species_odkc}()}
}
\concept{odkc}
|
# Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
shinyUI(
fluidPage(
includeCSS("www/style.css"),
tags$head(
tags$meta(name = "robots", content = "noindex")
),
fixedRow(align = "center",
div(style = div_css(p1.w, p1.h + 50),
conditionalPanel("output.plot_rd",
h2(paste(max_year, "Regional District Disposal Rates")),
div(class = "div-link", style = paste0("width:", translate_px(p1.w - 38), ";"),
HTML(paste0(div("Sort by: ", class = 'msw-label'),
actionButton("sort_name", "Name", class = 'msw-button'),
"/",
actionButton("sort_rate", "Disposal Rate", class = 'msw-button'),
"/",
actionButton("sort_population", "Population", class = 'msw-button')
))
)),
girafeOutput(outputId = 'plot_rd', height = p1.h))
),
fixedRow(align = "center",
div(style = div_css(p1.w, p1.h),
uiOutput("ui_info"),
girafeOutput(outputId = 'plot_year', height = p2.h),
br(),
uiOutput("ui_resources", style = "display: inline-block;"),
uiOutput("ui_dl", style = "display: inline-block;"))
))
) | /dataviz/app/ui.R | permissive | bcgov/msw-disposal-indicator | R | false | false | 1,924 | r | # Copyright 2018 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
shinyUI(
fluidPage(
includeCSS("www/style.css"),
tags$head(
tags$meta(name = "robots", content = "noindex")
),
fixedRow(align = "center",
div(style = div_css(p1.w, p1.h + 50),
conditionalPanel("output.plot_rd",
h2(paste(max_year, "Regional District Disposal Rates")),
div(class = "div-link", style = paste0("width:", translate_px(p1.w - 38), ";"),
HTML(paste0(div("Sort by: ", class = 'msw-label'),
actionButton("sort_name", "Name", class = 'msw-button'),
"/",
actionButton("sort_rate", "Disposal Rate", class = 'msw-button'),
"/",
actionButton("sort_population", "Population", class = 'msw-button')
))
)),
girafeOutput(outputId = 'plot_rd', height = p1.h))
),
fixedRow(align = "center",
div(style = div_css(p1.w, p1.h),
uiOutput("ui_info"),
girafeOutput(outputId = 'plot_year', height = p2.h),
br(),
uiOutput("ui_resources", style = "display: inline-block;"),
uiOutput("ui_dl", style = "display: inline-block;"))
))
) |
##' @title Diffuse and remove NA
##' @param ... strings (vector or multiple arguments)
##' @return
##' @author Shir Dekel
##' @export
diffuse_non_na <- function(...) {
if (length(list(...)) > 1) {
diffused <-
enexprs(...) %>%
discard(is.na) %>%
syms()
} else {
diffused <-
enexprs(...) %>%
unlist() %>%
discard(is.na) %>%
syms()
}
return(diffused)
}
| /R/diffuse_non_na.R | no_license | shirdekel/phd_thesis | R | false | false | 410 | r | ##' @title Diffuse and remove NA
##' @param ... strings (vector or multiple arguments)
##' @return
##' @author Shir Dekel
##' @export
diffuse_non_na <- function(...) {
if (length(list(...)) > 1) {
diffused <-
enexprs(...) %>%
discard(is.na) %>%
syms()
} else {
diffused <-
enexprs(...) %>%
unlist() %>%
discard(is.na) %>%
syms()
}
return(diffused)
}
|
### From Princy: /work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/src/gene_level_v2.R
require(data.table)
require(tidyverse)
require(magrittr)
require(foreach)
require(rstan)
require(doMC)
require(eagle1S)
require(doParallel)
registerDoParallel()
getDoParWorkers()
#inputargs = commandArgs(TRUE)
inputargs = c("8", "LIVER.txt")
print(inputargs)
which_chr = paste("chr", inputargs[1], sep = "")
tissue = gsub("\\.txt", "", inputargs[2])
cat(tissue, ":", which_chr, "\n")
topresDir = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/results/gene_level_with_env/"
resDir = paste(topresDir, tissue, "/", sep = "")
aseDir = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/ase/"
ciseqtlDir = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/ciseqtls/from_meri/"
checkpoint_dir = paste0(resDir, which_chr, "/")
#ase_fn = paste(aseDir, inputargs[2], sep = "")
ase_fn = "eagle_ase.txt"
meta_fn = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/subject_sex.txt"
phased_fn = paste(
"/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/phased_genotypes/",
which_chr,
".txt.gz",
sep = ""
)
gtex_abbrv = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/etc/gtex_colors.txt"
ciseqtl_tiss = read_tsv(gtex_abbrv) %>% filter(tissue_abbrv == tissue) %>% .$tissue_id
ciseqtl_fn = paste(ciseqtlDir, "independent_eqtls_txt.txt", sep = "")
bb_stan = eagle1S:::stanmodels$bb
min_reads = 1000
min_samples = 10
ase = read_tsv(ase_fn) %>% filter(CHR == which_chr)
ase = ase %>% rename(
CHROM = CHR,
individual = SUBJECT_ID,
REF = REF_ALLELE,
ALT = ALT_ALLELE,
r = REF_COUNT,
a = ALT_COUNT
)
ase = ase %>% filter(r + a >= 10)
ase = ase %>% filter(pmin(r,a) >= 3)
individuals_tissue_chr = unique(ase$individual)
meta = read_tsv(meta_fn) %>% rename(individual = SUBJID, x = SEX)
meta = meta[meta$individual %in% individuals_tissue_chr, ]
if (min(table(meta$x)) < 10 | length(table(meta$x)) == 1) {
print("very few samples")
quit()
} else{
dir.create(resDir, showWarnings = F)
dir.create(checkpoint_dir,
recursive = T,
showWarnings = F)
}
phased = read_tsv(phased_fn, comment = "##") %>% rename(CHROM =
`#CHROM`)
ciseqtl = read.delim(ciseqtl_fn, stringsAsFactors = F)
ciseqtl = ciseqtl %>%
filter(tissue == ciseqtl_tiss) %>%
filter(str_detect(variant_id, paste(which_chr, "_", sep = "")))
# filter(abs(tss_distance) <= 1e6)
ciseqtl$gene_id = sapply(strsplit(ciseqtl$gene_id, "\\."), function(x)
x[1])
# ciseqtl = ciseqtl %>% semi_join(ase, by = c("gene_id" = "GENE_ID"))
### select genes with enough samples
ase_genes = ase %>% group_by(GENE_ID) %>% summarise(n = n()) %>%
filter(n >= min_samples) %>% drop_na %>% .$GENE_ID
### select genes with enough reads
ase_genes = ase %>% filter(GENE_ID %in% ase_genes) %>%
group_by(GENE_ID) %>% summarise(total_count_snp = sum(TOTAL_COUNT)) %>%
filter(total_count_snp >= 1000) %>% .$GENE_ID
ase_cieqtl_genes = intersect(ase_genes, ciseqtl$gene_id)
ciseqtl = ciseqtl %>% filter(gene_id %in% ase_cieqtl_genes)
ase = ase %>% filter(GENE_ID %in% ase_cieqtl_genes)
exonic_snps = unique(ase$VARIANT_ID)
select_snps_phased = data.frame(
variant_id = c(ciseqtl$variant_id, exonic_snps),
stringsAsFactors = F
)
ase = ase %>% select(CHROM, POS, REF, ALT, individual, r, a, GENE_ID)
phased = semi_join(phased, select_snps_phased, by = c("ID" = "variant_id"))
phased_types = c("0|0", "0|1", "1|0", "1|1")
phased_hets = c("0|1", "1|0")
phased = phased %>% unite(pos_alt, POS, ALT, sep = "_", remove =
F) %>%
distinct(pos_alt, .keep_all = TRUE)
class(phased) = "data.frame"
rownames(phased) = phased$pos_alt
ase %<>% mutate(pos_alt = paste(POS, ALT, sep = "_"),
individual = as.character(individual)) %>%
filter(pos_alt %in% rownames(phased)) %>%
mutate(geno = phased[cbind(as.character(pos_alt), individual)])
unique_genes = unique(ase$GENE_ID)
logit = function(p) {
log(p / (1 - p))
}
inv_logit = function(g) {
1 / (1 + exp(-g))
}
min_samples = 10
concShape = 1.001
concRate = 0.001
min_reads = 1000
max_iter = 30
allres = foreach(
exonic_snp_pos = unique_genes,
.errorhandling = "remove",
.combine = bind_rows
) %dopar% {
# checkpointing per "gene"
if (!is.null(checkpoint_dir)) {
check_fn = paste0(checkpoint_dir, exonic_snp_pos, ".txt.gz")
if (file.exists(check_fn) & !interactive()) {
return(read_tsv(check_fn) %>%
mutate(exonic_snp_pos = exonic_snp_pos))
}
}
gene_ase = ase %>%
filter(GENE_ID == exonic_snp_pos) %>%
select(CHROM, POS, REF, ALT, r, a, individual, geno)
if (nrow(gene_ase) < min_samples) {
cat(exonic_snp_pos, "sample_size not enough\n")
return(NULL)
}
allelic_count_total = sum(gene_ase$a + gene_ase$r)
cat("Allelic total count ", allelic_count_total, "\n")
if (allelic_count_total < min_reads) {
cat(exonic_snp_pos, "readcounts not enough\n")
return(NULL)
}
cis_snps = ciseqtl %>% filter(gene_id == exonic_snp_pos) %>% .$variant_id
cat("Number of ciseQTLs:", exonic_snp_pos, length(cis_snps), "\n")
if (length(cis_snps) == 0) {
cat("no snps for gene", exonic_snp_pos, "\n")
return(NULL)
}
# iterate over cis SNPs
temp_results = foreach(
snp_pos = cis_snps,
.errorhandling = if (interactive())
"stop"
else
"remove",
.combine = bind_rows
) %do% {
cat(exonic_snp_pos, snp_pos, "\n")
reg_geno = (phased %>% filter(ID == snp_pos))[, 11:ncol(phased)] %>% as.matrix()
if (nrow(reg_geno) != 1) {
print("Skipping >biallelic site")
return(NULL)
}
reg_geno = data_frame(individual = colnames(reg_geno),
reg_geno = as.character(reg_geno))
# join the ASE and cisSNP phased genotypes
ase_temp = gene_ase %>% inner_join(reg_geno, by = "individual") %>%
filter(geno %in% phased_hets, # signal only comes from het exonic SNPs
reg_geno %in% phased_types, # require phased cisSNP
(r + a) > 0) %>% # must have some coverage
mutate(het_x = ifelse(reg_geno %in% phased_hets, # if cisSNP is het
ifelse(geno == reg_geno, 1, -1), # is it in phase with the exonicSNP?
0))
if (min(table(ase_temp$x)) < min_samples) {
cat(exonic_snp_pos, snp_pos, "sample_size not enough\n")
return(NULL)
}
if (nrow(ase_temp) < min_samples) {
cat(exonic_snp_pos, snp_pos, "sample_size not enough\n")
return(NULL)
}
num_het_snps = sum(ase_temp$het_x != 0)
if (num_het_snps < min_samples) {
cat(exonic_snp_pos, snp_pos, "sample_size not enough\n")
return(NULL) # no heterozygous regulatory SNPs
}
ase_temp %<>% left_join(meta, by = "individual")
ase_temp$x <- ifelse(ase_temp$x==2, 0, 1)
# LM for initialization of GLM
coverage = with(ase_temp, a + r)
y = logit(ase_temp$a / coverage)
get_init = function(x_mat) {
beta_init = solve(t(x_mat) %*% x_mat, t(x_mat) %*% y) # LM
# now try to get a sensible initialization for the concentration param
# BB variance is n*p*(1-p)*(conc+n)/(conc+1).
# np(1-p) is binomial variance.
# Second term: (conc+1+(n-1))/(conc+1)=1+(n-1)/(conc+1).
prob = inv_logit(x_mat %*% beta_init)
fitted_a = coverage * prob
var_a = (ase_temp$a - fitted_a) ^ 2
bin_var = coverage * prob * (1 - prob)
#(coverage - 1) / (var_a / bin_var - 1) - 1 # method of moments
conc_init = mean((coverage + 1) / (var_a / bin_var + 1) - 1) # like adding Gamma(2,2) pseudocounts
if (conc_init < 1.5 | conc_init > 100.)
conc_init = 10.
list(beta = as.numeric(beta_init) %>% as.array(),
conc = conc_init)
}
# are we testing the exonic SNP itself (or a perfectly linked SNP)
testing_self_flag = length(unique(ase_temp$het_x)) == 1
# full model with eqtl and gxe
x_full = if (!testing_self_flag)
model.matrix(~ x + het_x + x:het_x, data = ase_temp) else
model.matrix(~ x, data = ase_temp)
stan_dat = list(
N = nrow(x_full),
P = ncol(x_full),
x = x_full,
ys = ase_temp$a,
ns = ase_temp$a + ase_temp$r,
concShape = concShape,
concRate = concRate
)
fit_full = if (det(t(x_full) %*% x_full) > 0) {
optimizing(bb_stan,
data = stan_dat)
} else {
list(value = NA,
par = rep(NA, 1 + ncol(x_full)))
}
# TODO: does this work when testing the exonic SNP itself?
names(fit_full$par) = c("conc", "intercept", if (!testing_self_flag)
"b_eqtl", "b_gxe", "env")
# eqtl but no gxe model
x_eqtl = if (!testing_self_flag)
model.matrix(~ het_x , data = ase_temp)
else
model.matrix(~ 1, data = ase_temp)
stan_dat$x = x_eqtl
stan_dat$N = nrow(x_eqtl)
stan_dat$P = ncol(x_eqtl)
fit_eqtl = optimizing(bb_stan,
data = stan_dat)$value
# null model, no eQTL or GxE
x_0 = model.matrix(~ 1, data = ase_temp)
stan_dat$x = x_0
stan_dat$N = nrow(x_0)
stan_dat$P = ncol(x_0)
fit_0 = optimizing(bb_stan,
data = stan_dat)$value
df = ncol(x_full) - ncol(x_eqtl)
# if (F) { # debugging code
# ase_temp %>% mutate(het=reg_geno,
# coverage=r+a,
# ar = a/coverage,
# in_phase=geno == reg_geno) %>%
# ggplot(aes( x,ar,size=coverage,col=factor(het_x))) + geom_point() + ylim(0,1) +
# xlab("Environmental factor") + ylab("Phased allelic ratio")
# pchisq( 2.0*(fit_full$value - fit_eqtl), df = df, lower.tail = F)
# pchisq( 2.0*(fit_eqtl - fit_0), df = 1, lower.tail = F)
# }
num_het_ind = length(unique(ase_temp %>% filter(het_x != 0) %>% .$individual)) # for performance analysis later
data_frame(
total_count = sum(coverage),
num_het_snps = num_het_snps,
num_het_ind = num_het_ind,
reg_snp_pos = snp_pos,
df = df,
l0 = fit_0,
l_geno = fit_eqtl,
l_interact = fit_full$value) %>% cbind(as_data_frame(as.list(fit_full$par)))
}
if (!is.null(checkpoint_dir)) {
print("Saving results")
checkpoint_file = gzfile(check_fn, "w")
temp_results %>% write_tsv(checkpoint_file) # write_tsv
close(checkpoint_file)
}
if (is.null(temp_results)) {
return(NULL)
} else{
temp_results %>%
mutate(exonic_snp_pos = exonic_snp_pos)
}
}
#res_file= gzfile( paste0(outputdir,which_chr,".txt.gz"), "w" )
#allres %>% format(digits=5) %>% write.table(res_file, quote = F, row.names = F, col.names = T, sep="\t")
#close(res_file)
# allres = ase %>%
# eagle1S(phased,
# meta,
# cisdist = cisdist,
# checkpoint_dir=checkpoint_dir)
save(allres, file = paste(resDir, which_chr, ".Rdata", sep = ""))
| /Extended_Methods/Downstream_analysis/sn_spMF/ASB_ChIP/eagle_ASB_ASE.R | permissive | heyuan7676/ts_eQTLs | R | false | false | 11,490 | r | ### From Princy: /work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/src/gene_level_v2.R
require(data.table)
require(tidyverse)
require(magrittr)
require(foreach)
require(rstan)
require(doMC)
require(eagle1S)
require(doParallel)
registerDoParallel()
getDoParWorkers()
#inputargs = commandArgs(TRUE)
inputargs = c("8", "LIVER.txt")
print(inputargs)
which_chr = paste("chr", inputargs[1], sep = "")
tissue = gsub("\\.txt", "", inputargs[2])
cat(tissue, ":", which_chr, "\n")
topresDir = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/results/gene_level_with_env/"
resDir = paste(topresDir, tissue, "/", sep = "")
aseDir = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/ase/"
ciseqtlDir = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/ciseqtls/from_meri/"
checkpoint_dir = paste0(resDir, which_chr, "/")
#ase_fn = paste(aseDir, inputargs[2], sep = "")
ase_fn = "eagle_ase.txt"
meta_fn = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/subject_sex.txt"
phased_fn = paste(
"/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/phased_genotypes/",
which_chr,
".txt.gz",
sep = ""
)
gtex_abbrv = "/work-zfs/abattle4/parsana/gtex_gender/variant_level_analysis/data/etc/gtex_colors.txt"
ciseqtl_tiss = read_tsv(gtex_abbrv) %>% filter(tissue_abbrv == tissue) %>% .$tissue_id
ciseqtl_fn = paste(ciseqtlDir, "independent_eqtls_txt.txt", sep = "")
bb_stan = eagle1S:::stanmodels$bb
min_reads = 1000
min_samples = 10
ase = read_tsv(ase_fn) %>% filter(CHR == which_chr)
ase = ase %>% rename(
CHROM = CHR,
individual = SUBJECT_ID,
REF = REF_ALLELE,
ALT = ALT_ALLELE,
r = REF_COUNT,
a = ALT_COUNT
)
ase = ase %>% filter(r + a >= 10)
ase = ase %>% filter(pmin(r,a) >= 3)
individuals_tissue_chr = unique(ase$individual)
meta = read_tsv(meta_fn) %>% rename(individual = SUBJID, x = SEX)
meta = meta[meta$individual %in% individuals_tissue_chr, ]
if (min(table(meta$x)) < 10 | length(table(meta$x)) == 1) {
print("very few samples")
quit()
} else{
dir.create(resDir, showWarnings = F)
dir.create(checkpoint_dir,
recursive = T,
showWarnings = F)
}
phased = read_tsv(phased_fn, comment = "##") %>% rename(CHROM =
`#CHROM`)
ciseqtl = read.delim(ciseqtl_fn, stringsAsFactors = F)
ciseqtl = ciseqtl %>%
filter(tissue == ciseqtl_tiss) %>%
filter(str_detect(variant_id, paste(which_chr, "_", sep = "")))
# filter(abs(tss_distance) <= 1e6)
ciseqtl$gene_id = sapply(strsplit(ciseqtl$gene_id, "\\."), function(x)
x[1])
# ciseqtl = ciseqtl %>% semi_join(ase, by = c("gene_id" = "GENE_ID"))
### select genes with enough samples
ase_genes = ase %>% group_by(GENE_ID) %>% summarise(n = n()) %>%
filter(n >= min_samples) %>% drop_na %>% .$GENE_ID
### select genes with enough reads
ase_genes = ase %>% filter(GENE_ID %in% ase_genes) %>%
group_by(GENE_ID) %>% summarise(total_count_snp = sum(TOTAL_COUNT)) %>%
filter(total_count_snp >= 1000) %>% .$GENE_ID
ase_cieqtl_genes = intersect(ase_genes, ciseqtl$gene_id)
ciseqtl = ciseqtl %>% filter(gene_id %in% ase_cieqtl_genes)
ase = ase %>% filter(GENE_ID %in% ase_cieqtl_genes)
exonic_snps = unique(ase$VARIANT_ID)
select_snps_phased = data.frame(
variant_id = c(ciseqtl$variant_id, exonic_snps),
stringsAsFactors = F
)
ase = ase %>% select(CHROM, POS, REF, ALT, individual, r, a, GENE_ID)
phased = semi_join(phased, select_snps_phased, by = c("ID" = "variant_id"))
phased_types = c("0|0", "0|1", "1|0", "1|1")
phased_hets = c("0|1", "1|0")
phased = phased %>% unite(pos_alt, POS, ALT, sep = "_", remove =
F) %>%
distinct(pos_alt, .keep_all = TRUE)
class(phased) = "data.frame"
rownames(phased) = phased$pos_alt
ase %<>% mutate(pos_alt = paste(POS, ALT, sep = "_"),
individual = as.character(individual)) %>%
filter(pos_alt %in% rownames(phased)) %>%
mutate(geno = phased[cbind(as.character(pos_alt), individual)])
unique_genes = unique(ase$GENE_ID)
logit = function(p) {
log(p / (1 - p))
}
inv_logit = function(g) {
1 / (1 + exp(-g))
}
min_samples = 10
concShape = 1.001
concRate = 0.001
min_reads = 1000
max_iter = 30
allres = foreach(
exonic_snp_pos = unique_genes,
.errorhandling = "remove",
.combine = bind_rows
) %dopar% {
# checkpointing per "gene"
if (!is.null(checkpoint_dir)) {
check_fn = paste0(checkpoint_dir, exonic_snp_pos, ".txt.gz")
if (file.exists(check_fn) & !interactive()) {
return(read_tsv(check_fn) %>%
mutate(exonic_snp_pos = exonic_snp_pos))
}
}
gene_ase = ase %>%
filter(GENE_ID == exonic_snp_pos) %>%
select(CHROM, POS, REF, ALT, r, a, individual, geno)
if (nrow(gene_ase) < min_samples) {
cat(exonic_snp_pos, "sample_size not enough\n")
return(NULL)
}
allelic_count_total = sum(gene_ase$a + gene_ase$r)
cat("Allelic total count ", allelic_count_total, "\n")
if (allelic_count_total < min_reads) {
cat(exonic_snp_pos, "readcounts not enough\n")
return(NULL)
}
cis_snps = ciseqtl %>% filter(gene_id == exonic_snp_pos) %>% .$variant_id
cat("Number of ciseQTLs:", exonic_snp_pos, length(cis_snps), "\n")
if (length(cis_snps) == 0) {
cat("no snps for gene", exonic_snp_pos, "\n")
return(NULL)
}
# iterate over cis SNPs
temp_results = foreach(
snp_pos = cis_snps,
.errorhandling = if (interactive())
"stop"
else
"remove",
.combine = bind_rows
) %do% {
cat(exonic_snp_pos, snp_pos, "\n")
reg_geno = (phased %>% filter(ID == snp_pos))[, 11:ncol(phased)] %>% as.matrix()
if (nrow(reg_geno) != 1) {
print("Skipping >biallelic site")
return(NULL)
}
reg_geno = data_frame(individual = colnames(reg_geno),
reg_geno = as.character(reg_geno))
# join the ASE and cisSNP phased genotypes
ase_temp = gene_ase %>% inner_join(reg_geno, by = "individual") %>%
filter(geno %in% phased_hets, # signal only comes from het exonic SNPs
reg_geno %in% phased_types, # require phased cisSNP
(r + a) > 0) %>% # must have some coverage
mutate(het_x = ifelse(reg_geno %in% phased_hets, # if cisSNP is het
ifelse(geno == reg_geno, 1, -1), # is it in phase with the exonicSNP?
0))
if (min(table(ase_temp$x)) < min_samples) {
cat(exonic_snp_pos, snp_pos, "sample_size not enough\n")
return(NULL)
}
if (nrow(ase_temp) < min_samples) {
cat(exonic_snp_pos, snp_pos, "sample_size not enough\n")
return(NULL)
}
num_het_snps = sum(ase_temp$het_x != 0)
if (num_het_snps < min_samples) {
cat(exonic_snp_pos, snp_pos, "sample_size not enough\n")
return(NULL) # no heterozygous regulatory SNPs
}
ase_temp %<>% left_join(meta, by = "individual")
ase_temp$x <- ifelse(ase_temp$x==2, 0, 1)
# LM for initialization of GLM
coverage = with(ase_temp, a + r)
y = logit(ase_temp$a / coverage)
get_init = function(x_mat) {
beta_init = solve(t(x_mat) %*% x_mat, t(x_mat) %*% y) # LM
# now try to get a sensible initialization for the concentration param
# BB variance is n*p*(1-p)*(conc+n)/(conc+1).
# np(1-p) is binomial variance.
# Second term: (conc+1+(n-1))/(conc+1)=1+(n-1)/(conc+1).
prob = inv_logit(x_mat %*% beta_init)
fitted_a = coverage * prob
var_a = (ase_temp$a - fitted_a) ^ 2
bin_var = coverage * prob * (1 - prob)
#(coverage - 1) / (var_a / bin_var - 1) - 1 # method of moments
conc_init = mean((coverage + 1) / (var_a / bin_var + 1) - 1) # like adding Gamma(2,2) pseudocounts
if (conc_init < 1.5 | conc_init > 100.)
conc_init = 10.
list(beta = as.numeric(beta_init) %>% as.array(),
conc = conc_init)
}
# are we testing the exonic SNP itself (or a perfectly linked SNP)
testing_self_flag = length(unique(ase_temp$het_x)) == 1
# full model with eqtl and gxe
x_full = if (!testing_self_flag)
model.matrix(~ x + het_x + x:het_x, data = ase_temp) else
model.matrix(~ x, data = ase_temp)
stan_dat = list(
N = nrow(x_full),
P = ncol(x_full),
x = x_full,
ys = ase_temp$a,
ns = ase_temp$a + ase_temp$r,
concShape = concShape,
concRate = concRate
)
fit_full = if (det(t(x_full) %*% x_full) > 0) {
optimizing(bb_stan,
data = stan_dat)
} else {
list(value = NA,
par = rep(NA, 1 + ncol(x_full)))
}
# TODO: does this work when testing the exonic SNP itself?
names(fit_full$par) = c("conc", "intercept", if (!testing_self_flag)
"b_eqtl", "b_gxe", "env")
# eqtl but no gxe model
x_eqtl = if (!testing_self_flag)
model.matrix(~ het_x , data = ase_temp)
else
model.matrix(~ 1, data = ase_temp)
stan_dat$x = x_eqtl
stan_dat$N = nrow(x_eqtl)
stan_dat$P = ncol(x_eqtl)
fit_eqtl = optimizing(bb_stan,
data = stan_dat)$value
# null model, no eQTL or GxE
x_0 = model.matrix(~ 1, data = ase_temp)
stan_dat$x = x_0
stan_dat$N = nrow(x_0)
stan_dat$P = ncol(x_0)
fit_0 = optimizing(bb_stan,
data = stan_dat)$value
df = ncol(x_full) - ncol(x_eqtl)
# if (F) { # debugging code
# ase_temp %>% mutate(het=reg_geno,
# coverage=r+a,
# ar = a/coverage,
# in_phase=geno == reg_geno) %>%
# ggplot(aes( x,ar,size=coverage,col=factor(het_x))) + geom_point() + ylim(0,1) +
# xlab("Environmental factor") + ylab("Phased allelic ratio")
# pchisq( 2.0*(fit_full$value - fit_eqtl), df = df, lower.tail = F)
# pchisq( 2.0*(fit_eqtl - fit_0), df = 1, lower.tail = F)
# }
num_het_ind = length(unique(ase_temp %>% filter(het_x != 0) %>% .$individual)) # for performance analysis later
data_frame(
total_count = sum(coverage),
num_het_snps = num_het_snps,
num_het_ind = num_het_ind,
reg_snp_pos = snp_pos,
df = df,
l0 = fit_0,
l_geno = fit_eqtl,
l_interact = fit_full$value) %>% cbind(as_data_frame(as.list(fit_full$par)))
}
if (!is.null(checkpoint_dir)) {
print("Saving results")
checkpoint_file = gzfile(check_fn, "w")
temp_results %>% write_tsv(checkpoint_file) # write_tsv
close(checkpoint_file)
}
if (is.null(temp_results)) {
return(NULL)
} else{
temp_results %>%
mutate(exonic_snp_pos = exonic_snp_pos)
}
}
#res_file= gzfile( paste0(outputdir,which_chr,".txt.gz"), "w" )
#allres %>% format(digits=5) %>% write.table(res_file, quote = F, row.names = F, col.names = T, sep="\t")
#close(res_file)
# allres = ase %>%
# eagle1S(phased,
# meta,
# cisdist = cisdist,
# checkpoint_dir=checkpoint_dir)
save(allres, file = paste(resDir, which_chr, ".Rdata", sep = ""))
|
plot3.R
install.packages("readr")
library(readr)
hhpower <- read_delim("household_power_consumption.txt", delim = ";")
str(hhpower)
library(lubridate)
library(dplyr)
hhpower.f1 <- filter(hhpower, Date == "1/2/2007")
str(hhpower.f1)
hhpower.f2 <- filter(hhpower, Date == "2/2/2007")
str(hhpower.f2)
hhpower.feb <- bind_rows(hhpower.f1, hhpower.f2)
str(hhpower.feb)
hhpower.feb$Date <- dmy(hhpower.feb$Date)#crucial - next step creates NA for Date and datetime otherwise
hhpower.feb$datetime <- as.POSIXct(paste(hhpower.feb$Date, hhpower.feb$Time), format="%Y-%m-%d %H:%M:%S")
str(hhpower.feb)
str(hhpower.feb)
head(hhpower.feb)
tail(hhpower.feb)
plot(x=hhpower.feb$datetime, y=hhpower.feb$Sub_metering_1, type="l", xlab = "", ylab = "Energy submetering")
lines(hhpower.feb$datetime, hhpower.feb$Sub_metering_2, col= "red")
lines(hhpower.feb$datetime, hhpower.feb$Sub_metering_3, col= "blue")
legend ("topright", lty=1, col=c('black', 'red', 'blue'), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
| /plot3.R | no_license | j-tamad/project-1-plots | R | false | false | 1,020 | r | plot3.R
install.packages("readr")
library(readr)
hhpower <- read_delim("household_power_consumption.txt", delim = ";")
str(hhpower)
library(lubridate)
library(dplyr)
hhpower.f1 <- filter(hhpower, Date == "1/2/2007")
str(hhpower.f1)
hhpower.f2 <- filter(hhpower, Date == "2/2/2007")
str(hhpower.f2)
hhpower.feb <- bind_rows(hhpower.f1, hhpower.f2)
str(hhpower.feb)
hhpower.feb$Date <- dmy(hhpower.feb$Date)#crucial - next step creates NA for Date and datetime otherwise
hhpower.feb$datetime <- as.POSIXct(paste(hhpower.feb$Date, hhpower.feb$Time), format="%Y-%m-%d %H:%M:%S")
str(hhpower.feb)
str(hhpower.feb)
head(hhpower.feb)
tail(hhpower.feb)
plot(x=hhpower.feb$datetime, y=hhpower.feb$Sub_metering_1, type="l", xlab = "", ylab = "Energy submetering")
lines(hhpower.feb$datetime, hhpower.feb$Sub_metering_2, col= "red")
lines(hhpower.feb$datetime, hhpower.feb$Sub_metering_3, col= "blue")
legend ("topright", lty=1, col=c('black', 'red', 'blue'), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
|
##The cacheMatrix.R program contains functions to create a matrix,
##and then calculate the inverse of that input matrix.
##The program contains two main functions:makeCacheMatrix and cacheSolve.escription of what your
##The first function, makeCacheMatrix creates a special "vector", which is really a list containing a function to
##set the value of the matrix
##get the value of the matrix
##set the inverse of the matrix
##get the inverse of the matrix
##This function creates a matrix that is invertible
##and also calculates the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
##Defining the "get" function for retrieving the input matrix
get <- function() x
##calculating the inverse of the input matrix and storing it in the variable m
setsolve <- function(solve) m <<- solve
getsolve <- function() m
##creating the list of functions
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
m
##The cacheSolve function retrieves the inverse of the input matrix
##by using the functions defined in makeCacheMatrix.
##The function first checks if the inverse of the matrix
##is already stored in the cache and if it exists, it displays
##the stored result instead of calculating it again.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
##check if the inverse already exists in cache
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## if inverse does not exist, calculate the inverse
##of the input matrix using solve()
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | anusri25/ProgrammingAssignment2 | R | false | false | 1,828 | r | ##The cacheMatrix.R program contains functions to create a matrix,
##and then calculate the inverse of that input matrix.
##The program contains two main functions:makeCacheMatrix and cacheSolve.escription of what your
##The first function, makeCacheMatrix creates a special "vector", which is really a list containing a function to
##set the value of the matrix
##get the value of the matrix
##set the inverse of the matrix
##get the inverse of the matrix
##This function creates a matrix that is invertible
##and also calculates the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
##Defining the "get" function for retrieving the input matrix
get <- function() x
##calculating the inverse of the input matrix and storing it in the variable m
setsolve <- function(solve) m <<- solve
getsolve <- function() m
##creating the list of functions
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
m
##The cacheSolve function retrieves the inverse of the input matrix
##by using the functions defined in makeCacheMatrix.
##The function first checks if the inverse of the matrix
##is already stored in the cache and if it exists, it displays
##the stored result instead of calculating it again.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
##check if the inverse already exists in cache
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## if inverse does not exist, calculate the inverse
##of the input matrix using solve()
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
#' Order substrings of a character vector
#' @name order_substr
#' @description Split a character vector, reorder the substrings, and paste back together.
#' @param x a character vector.
#' @param split a regular expressin by which to split. See \link{Details}.
#' @param perl logical, whether to interpret \code{split} as a Perl-style regular expression.
#' @param collapse a character string with which to collapse the re-ordered sub-strings.
#' @param reverse logical, whether to reverse the ordering.
#' @return A character vector with substrings ordered alphabetically and re-collapsed.
#' @details By default, \code{order_substr()} splits each string of a character vector at any commas,
#' forward slashes, hyphens, and whitespace, regardless of whether these characters
#' are surrounded by whitespace.
#'
#' Alternative regular expressions for splitting can be used via \code{split}.
#' Other strings may be used to separate the alphabetized output via \code{collapse}.
#'
#' @note Cleaning with \code{order_substr()} works poorly when substrings are inconsistently separated
#' by the split pattern. For example, the vector of tuna species below hyphenates \code{yellow-fin}
#' but leaves \code{bluefin} as a single word. Therefore, with the default split patterns,
#' \code{order_substr()} splits \code{yellow-fin} prior to alphabetization.
#'
#' @examples
#' tuna <- c("tuna,skipjack", "tuna , bluefin", "yellow-fin - tuna", "tuna, albacore")
#' order_substr(tuna)
#'
#' colors <- c("green/red", "yellow / blue", "orange purple")
#' order_substr(colors, collapse = "-", reverse = TRUE)
#' @export
order_substr <-
function(x, split = "\\s*,\\s*|\\s*/\\s*|\\s*-\\s*|\\s+",
perl = TRUE, collapse = " ", reverse = FALSE) {
# Locate NA values to add back in later
na_ind <- which(is.na(x))
# Split each element into its own character vector at user-defined split patterns
tmp <- strsplit(x, split = split, perl = perl)
# Sort the substrings alphabetically and collapse.
tmp2 <- vapply(tmp,
function(x) paste(sort(x, decreasing = reverse),
collapse = collapse
),
FUN.VALUE = character(1)
)
# Trim leading and trailing whitespace
tmp3 <- gsub(tmp2, patt = "^\\s|\\s$", rep = "")
# Add NA values back in
tmp3[na_ind] <- NA_character_
return(tmp3)
}
| /R/order_substr.R | no_license | coletl/easyr | R | false | false | 2,483 | r | #' Order substrings of a character vector
#' @name order_substr
#' @description Split a character vector, reorder the substrings, and paste back together.
#' @param x a character vector.
#' @param split a regular expressin by which to split. See \link{Details}.
#' @param perl logical, whether to interpret \code{split} as a Perl-style regular expression.
#' @param collapse a character string with which to collapse the re-ordered sub-strings.
#' @param reverse logical, whether to reverse the ordering.
#' @return A character vector with substrings ordered alphabetically and re-collapsed.
#' @details By default, \code{order_substr()} splits each string of a character vector at any commas,
#' forward slashes, hyphens, and whitespace, regardless of whether these characters
#' are surrounded by whitespace.
#'
#' Alternative regular expressions for splitting can be used via \code{split}.
#' Other strings may be used to separate the alphabetized output via \code{collapse}.
#'
#' @note Cleaning with \code{order_substr()} works poorly when substrings are inconsistently separated
#' by the split pattern. For example, the vector of tuna species below hyphenates \code{yellow-fin}
#' but leaves \code{bluefin} as a single word. Therefore, with the default split patterns,
#' \code{order_substr()} splits \code{yellow-fin} prior to alphabetization.
#'
#' @examples
#' tuna <- c("tuna,skipjack", "tuna , bluefin", "yellow-fin - tuna", "tuna, albacore")
#' order_substr(tuna)
#'
#' colors <- c("green/red", "yellow / blue", "orange purple")
#' order_substr(colors, collapse = "-", reverse = TRUE)
#' @export
order_substr <-
function(x, split = "\\s*,\\s*|\\s*/\\s*|\\s*-\\s*|\\s+",
perl = TRUE, collapse = " ", reverse = FALSE) {
# Locate NA values to add back in later
na_ind <- which(is.na(x))
# Split each element into its own character vector at user-defined split patterns
tmp <- strsplit(x, split = split, perl = perl)
# Sort the substrings alphabetically and collapse.
tmp2 <- vapply(tmp,
function(x) paste(sort(x, decreasing = reverse),
collapse = collapse
),
FUN.VALUE = character(1)
)
# Trim leading and trailing whitespace
tmp3 <- gsub(tmp2, patt = "^\\s|\\s$", rep = "")
# Add NA values back in
tmp3[na_ind] <- NA_character_
return(tmp3)
}
|
library(tidyverse)
hosp_data <- read.table(file='./hospital.txt', header= TRUE)
head(hosp_data)
x <- hosp_data$Stay
y <- hosp_data$InfctRsk
#a. Use lm() to fit regression line for x and y
reg_line <- lm(y~x)
summary(reg_line)
plot(x,y)
abline(reg_line)
# anova(reg_line) prints ANOVA table for reg_line
#b. 95% confidence interval
new_data <- data.frame(x = c(7.5,10, 12.5))
reg_line_95c <- predict(reg_line, interval="c", newdata=new_data)
#c. 95% prediction interval
reg_line_95p <- predict(reg_line, interval="p", newdata=new_data)
#d. Unbiased estimator for error variability
b0 <- reg_line$coefficients[1]
b1 <- reg_line$coefficients[2]
yhat <- b0 + b1*x # fitted yhat
MSE <- (sum((yhat-y)^2)/(n-2))
#Standard Error of b1
SE_b1 <- (1/sqrt(n))*sqrt(MSE) / sqrt(mean((x-mean(x))^2))
| /Exercise-3/ex3-1.R | no_license | zachnguyen03/reg-analysis-r-exercises | R | false | false | 794 | r | library(tidyverse)
hosp_data <- read.table(file='./hospital.txt', header= TRUE)
head(hosp_data)
x <- hosp_data$Stay
y <- hosp_data$InfctRsk
#a. Use lm() to fit regression line for x and y
reg_line <- lm(y~x)
summary(reg_line)
plot(x,y)
abline(reg_line)
# anova(reg_line) prints ANOVA table for reg_line
#b. 95% confidence interval
new_data <- data.frame(x = c(7.5,10, 12.5))
reg_line_95c <- predict(reg_line, interval="c", newdata=new_data)
#c. 95% prediction interval
reg_line_95p <- predict(reg_line, interval="p", newdata=new_data)
#d. Unbiased estimator for error variability
b0 <- reg_line$coefficients[1]
b1 <- reg_line$coefficients[2]
yhat <- b0 + b1*x # fitted yhat
MSE <- (sum((yhat-y)^2)/(n-2))
#Standard Error of b1
SE_b1 <- (1/sqrt(n))*sqrt(MSE) / sqrt(mean((x-mean(x))^2))
|
# factor finder
train_temp <- train[, -371]
test_temp <- test
full_data <- rbind(train_temp, test_temp)
df_temp <- data.frame(VarName = rep(0, 370), Factors = rep(0, 370), NonReal = rep(0, 370), Mean = rep(0, 370), SD = rep(0, 370), Min = rep(0, 370), Max = rep(0, 370))
class(df_temp$VarName) <- "character"
for (i in colnames(full_data)) {
j <- which(colnames(full_data) == i)
df_temp[j, 1] <- i
df_temp[j, 2] <- NROW(unique(full_data[, i]))
df_temp[j, 3] <- ifelse(sum(floor(full_data[, i]) == full_data[, i]) == nrow(full_data), 1, 0)
df_temp[j, 3] <- ifelse((df_temp[j ,3] == 1) & (df_temp[j, 2] == 2), ifelse(df_temp[j, 2] == 1, 3, 2), df_temp[j, 3])
df_temp[j ,4] <- mean(full_data[, i])
df_temp[j, 5] <- sd(full_data[, i])
df_temp[j, 6] <- min(full_data[, i])
df_temp[j, 7] <- max(full_data[, i])
cat(i, ifelse(df_temp[j ,3] > 0, ifelse(df_temp[j, 3] == 1, " (Integer Valued)", ifelse(df_temp[j ,3] == 2, " (Binary Valued)", " (Unique)")), " (Real Valued)"), ": ", df_temp[j, 2], " unique values.\n", sep = "")
}
df_temp <- df_temp[order(-df_temp[, 2]), ]
for (i in 1:nrow(df_temp)) {
cat("#", df_temp[i, 1], ifelse(df_temp[i ,3] > 0, ifelse(df_temp[i, 3] == 1, " (Integer Valued)", ifelse(df_temp[i ,3] == 2, " (Binary Valued)", " (Unique)")), " (Real Valued)"), ": ", df_temp[i, 2], " unique values. ", df_temp[i, 4], "+", df_temp[i, 5], " [", df_temp[i, 6], ", ", df_temp[i, 7], "]\n", sep = "")
}
table(df_temp[, 2])
#ID (Integer Valued): 151838 unique values. 75919.5+43832 [1, 151838]
#var38 (Real Valued): 109982 unique values. 117311+217717.5 [1202.73, 28894396]
#saldo_var30 (Real Valued): 30654 unique values. 13869.89+64512.88 [-4942.26, 4212656]
#saldo_medio_var5_ult3 (Real Valued): 29322 unique values. 1054.428+8164.239 [-1036.08, 544365.6]
#saldo_var42 (Real Valued): 28458 unique values. 7353.417+51614.02 [-4942.26, 4212656]
#saldo_medio_var5_ult1 (Real Valued): 25650 unique values. 1067.076+9701.489 [-1605.15, 656187.1]
#saldo_medio_var5_hace2 (Real Valued): 25270 unique values. 1604.172+12086.77 [-1573.23, 812137.3]
#saldo_var5 (Real Valued): 20454 unique values. 1008.811+9814.646 [-2895.72, 619329.2]
#imp_op_var39_comer_ult3 (Real Valued): 16284 unique values. 121.3306+577.0944 [0, 47943.96]
#imp_op_var41_comer_ult3 (Real Valued): 16003 unique values. 114.2617+526.2707 [0, 28927.89]
#imp_op_var39_ult1 (Real Valued): 14814 unique values. 140.6267+715.3216 [0, 72511.77]
#imp_op_var41_ult1 (Real Valued): 14597 unique values. 137.2287+697.7656 [0, 72511.77]
#imp_op_var39_comer_ult1 (Real Valued): 13414 unique values. 73.33668+351.9674 [0, 21093.96]
#saldo_medio_var5_hace3 (Real Valued): 13249 unique values. 896.861+8767.932 [-32.85, 1542339]
#imp_op_var41_comer_ult1 (Real Valued): 13155 unique values. 69.26854+325.1229 [0, 14784.9]
#saldo_var37 (Real Valued): 7247 unique values. 37.29588+501.355 [0, 90000]
#imp_trans_var37_ult1 (Real Valued): 6808 unique values. 1948.867+28468.85 [0, 3e+06]
#saldo_medio_var12_ult3 (Real Valued): 6801 unique values. 4572.94+37818.87 [0, 3002214]
#saldo_medio_var12_ult1 (Real Valued): 6676 unique values. 5881.09+48741.89 [0, 4356643]
#saldo_var12 (Real Valued): 5858 unique values. 6205.616+50651.96 [0, 4202599]
#saldo_medio_var12_hace2 (Real Valued): 5469 unique values. 4178.141+41806.71 [0, 4500000]
#saldo_var24 (Real Valued): 5041 unique values. 6123.537+50557.84 [0, 4202599]
#saldo_medio_var13_corto_ult3 (Real Valued): 4513 unique values. 3863.91+25401.18 [0, 450000]
#saldo_medio_var8_ult3 (Real Valued): 4364 unique values. 107.4197+1876.621 [-1844.52, 216012]
#saldo_medio_var8_ult1 (Real Valued): 4260 unique values. 121.4081+2222.832 [-3925.92, 375060]
#imp_var43_emit_ult1 (Real Valued): 4240 unique values. 923.3666+19222.83 [0, 2880000]
#saldo_var8 (Real Valued): 3948 unique values. 138.6336+2460.739 [-4942.26, 375060]
#saldo_var26 (Real Valued): 3139 unique values. 77.35887+720.8709 [0, 69756.72]
#saldo_var25 (Real Valued): 3006 unique values. 73.79599+706.5955 [0, 69756.72]
#saldo_medio_var13_corto_hace2 (Real Valued): 2902 unique values. 3610.341+25999.56 [0, 450000]
#saldo_medio_var8_hace2 (Real Valued): 2618 unique values. 64.52489+1715.668 [-287.67, 264702.9]
#saldo_medio_var12_hace3 (Real Valued): 2110 unique values. 659.1445+10643.69 [0, 1202339]
#saldo_medio_var13_corto_ult1 (Real Valued): 1671 unique values. 4880.83+31828.14 [0, 450000]
#saldo_medio_var13_corto_hace3 (Real Valued): 1608 unique values. 528.6911+6848.963 [0, 304838.7]
#saldo_var13 (Real Valued): 1469 unique values. 6516.469+38169.34 [0, 1500000]
#saldo_var13_corto (Real Valued): 1254 unique values. 5027.191+32532.35 [0, 450000]
#saldo_medio_var13_largo_ult3 (Real Valued): 999 unique values. 761.1156+12239.74 [0, 1034483]
#imp_ent_var16_ult1 (Real Valued): 938 unique values. 84.68832+1655.242 [0, 240000]
#saldo_medio_var8_hace3 (Real Valued): 793 unique values. 9.43363+514.7723 [0, 95260.56]
#imp_aport_var13_hace3 (Real Valued): 712 unique values. 2811.729+25297.97 [0, 1008000]
#imp_op_var40_comer_ult3 (Real Valued): 705 unique values. 7.068982+201.3045 [0, 47943.96]
#imp_op_var39_efect_ult3 (Real Valued): 680 unique values. 112.7174+850.4504 [0, 131100]
#imp_op_var41_efect_ult3 (Real Valued): 673 unique values. 112.0976+845.8037 [0, 131100]
#saldo_var14 (Real Valued): 636 unique values. 63.62686+2686.515 [0, 450000]
#imp_op_var40_comer_ult1 (Real Valued): 601 unique values. 4.068145+115.0159 [0, 21093.96]
#saldo_var31 (Real Valued): 567 unique values. 247.4915+17084.24 [0, 6119500]
#saldo_var1 (Real Valued): 565 unique values. 28.64053+7765.61 [-0.9, 3e+06]
#saldo_var40 (Real Valued): 560 unique values. 4.57422+119.446 [-0.9, 9966]
#saldo_medio_var13_largo_hace2 (Real Valued): 547 unique values. 793.9402+13562.28 [0, 1008000]
#imp_op_var39_efect_ult1 (Real Valued): 503 unique values. 68.26483+528.4804 [0, 67500]
#imp_op_var41_efect_ult1 (Real Valued): 497 unique values. 67.82307+524.5429 [0, 67500]
#saldo_medio_var13_largo_hace3 (Real Valued): 495 unique values. 169.372+4660.327 [0, 534000]
#imp_op_var40_ult1 (Real Valued): 462 unique values. 3.397988+113.4237 [0, 23799.96]
#saldo_medio_var13_largo_ult1 (Real Valued): 417 unique values. 971.9136+15732.39 [0, 1500000]
#saldo_var13_largo (Real Valued): 388 unique values. 1488.479+19697.03 [0, 1500000]
#imp_var7_recib_ult1 (Real Valued): 320 unique values. 141.7602+6910.232 [0, 1039260]
#saldo_var20 (Real Valued): 301 unique values. 18.45219+1809.63 [0, 455858.2]
#saldo_medio_var44_ult1 (Real Valued): 288 unique values. 85.98633+4360.172 [0, 681462.9]
#saldo_medio_var44_ult3 (Real Valued): 288 unique values. 63.5501+3094.025 [0, 397884.3]
#imp_aport_var13_ult1 (Real Valued): 287 unique values. 589.4449+10786.61 [0, 450000]
#saldo_var44 (Real Valued): 267 unique values. 102.1546+5082.714 [0, 740006.6]
#var3 (Integer Valued): 231 unique values. -1551.539+39394.01 [-999999, 238]
#saldo_medio_var17_ult1 (Real Valued): 228 unique values. 94.32871+10791.04 [0, 3998687]
#saldo_medio_var17_ult3 (Real Valued): 228 unique values. 78.49367+9422.151 [0, 3525777]
#saldo_var17 (Real Valued): 215 unique values. 129.2718+16259.09 [0, 6119500]
#num_var45_ult3 (Integer Valued): 197 unique values. 13.57063+33.01762 [0, 825]
#saldo_medio_var44_hace2 (Real Valued): 197 unique values. 47.53023+3011.415 [0, 453893.4]
#imp_compra_var44_ult1 (Real Valued): 175 unique values. 83.62604+9895.33 [0, 3410059]
#saldo_medio_var17_hace2 (Real Valued): 164 unique values. 63.78418+11041.35 [-0.03, 4210084]
#saldo_var32 (Real Valued): 150 unique values. 3.562878+136.0992 [0, 13522.89]
#imp_sal_var16_ult1 (Real Valued): 124 unique values. 5.197786+400.4784 [0, 105000]
#num_op_var39_ult3 (Integer Valued): 119 unique values. 4.651741+17.34866 [0, 522]
#num_op_var41_ult3 (Integer Valued): 116 unique values. 4.57832+17.11056 [0, 489]
#num_op_var41_comer_ult3 (Integer Valued): 114 unique values. 3.486143+14.39538 [0, 438]
#num_op_var39_comer_ult3 (Integer Valued): 113 unique values. 3.62516+15.0516 [0, 600]
#saldo_medio_var33_ult1 (Real Valued): 107 unique values. 14.83929+1044.317 [-0.6, 162355.8]
#saldo_medio_var33_ult3 (Real Valued): 107 unique values. 10.72662+738.4301 [-0.6, 138055]
#saldo_var33 (Real Valued): 106 unique values. 16.06515+1076.136 [0, 162964.6]
#num_var45_ult1 (Integer Valued): 105 unique values. 4.331603+14.25032 [0, 510]
#num_var45_hace2 (Integer Valued): 102 unique values. 5.403667+14.623 [0, 426]
#var15 (Integer Valued): 101 unique values. 33.1759+12.94428 [5, 105]
#saldo_medio_var33_hace2 (Real Valued): 90 unique values. 10.1842+750.3009 [0, 146605.3]
#num_op_var39_ult1 (Integer Valued): 81 unique values. 2.924545+11.15037 [0, 468]
#imp_venta_var44_ult1 (Real Valued): 79 unique values. 56.6849+8282.173 [0, 2754476]
#num_op_var41_ult1 (Integer Valued): 78 unique values. 2.871297+10.96082 [0, 468]
#num_med_var45_ult3 (Integer Valued): 77 unique values. 4.000145+10.833 [0, 273]
#num_op_var39_comer_ult1 (Integer Valued): 73 unique values. 2.20433+9.154147 [0, 438]
#num_var45_hace3 (Integer Valued): 71 unique values. 3.835364+10.07737 [0, 339]
#num_op_var41_comer_ult1 (Integer Valued): 69 unique values. 2.131225+8.870698 [0, 438]
#imp_aport_var17_ult1 (Real Valued): 68 unique values. 27.41612+2197.478 [0, 432457.3]
#num_op_var41_hace2 (Integer Valued): 64 unique values. 1.613865+7.404416 [0, 249]
#num_op_var39_hace2 (Integer Valued): 63 unique values. 1.633208+7.459715 [0, 249]
#num_op_var40_comer_ult3 (Integer Valued): 62 unique values. 0.1390166+3.749499 [0, 582]
#saldo_medio_var44_hace3 (Real Valued): 60 unique values. 6.62519+757.6116 [0, 217762.2]
#imp_compra_var44_hace3 (Real Valued): 59 unique values. 31.51976+2856.289 [0, 596253]
#delta_imp_aport_var13_1y3 (Real Valued): 51 unique values. 49197171+699681763 [-1, 1e+10]
#imp_op_var40_efect_ult3 (Real Valued): 50 unique values. 0.6198284+39.5285 [0, 6600]
#imp_reemb_var13_ult1 (Real Valued): 48 unique values. 41.61107+2731.71 [0, 450000]
#num_var43_recib_ult1 (Integer Valued): 48 unique values. 0.8161396+3.688661 [0, 282]
#saldo_medio_var33_hace3 (Real Valued): 45 unique values. 1.346302+144.9454 [0, 40080.6]
#num_op_var41_efect_ult3 (Integer Valued): 43 unique values. 1.216672+5.201159 [0, 156]
#num_op_var39_efect_ult3 (Integer Valued): 43 unique values. 1.220999+5.223692 [0, 156]
#imp_op_var40_efect_ult1 (Real Valued): 42 unique values. 0.4417574+32.35967 [0, 6600]
#num_op_var40_ult3 (Integer Valued): 41 unique values. 0.07342036+2.185707 [0, 351]
#num_var22_ult3 (Integer Valued): 39 unique values. 3.044139+6.206356 [0, 234]
#num_op_var40_comer_ult1 (Integer Valued): 39 unique values. 0.07310423+1.851307 [0, 210]
#saldo_medio_var17_hace3 (Real Valued): 37 unique values. 21.35409+6131.54 [-0.06, 2368559]
#imp_aport_var17_hace3 (Real Valued): 35 unique values. 60.52824+15750.38 [0, 6083692]
#num_op_var40_ult1 (Integer Valued): 34 unique values. 0.05324754+1.561945 [0, 234]
#imp_reemb_var17_ult1 (Real Valued): 31 unique values. 9.928591+1006.98 [0, 211775.6]
#delta_imp_compra_var44_1y3 (Real Valued): 29 unique values. 9747231+312054384 [-1, 1e+10]
#num_op_var39_efect_ult1 (Integer Valued): 28 unique values. 0.7262609+3.244832 [0, 90]
#num_op_var41_hace3 (Integer Valued): 27 unique values. 0.0931585+1.258035 [0, 144]
#num_op_var39_hace3 (Integer Valued): 27 unique values. 0.09398833+1.265537 [0, 144]
#num_var37_0 (Integer Valued): 27 unique values. 0.4137502+2.19774 [0, 114]
#num_var37 (Integer Valued): 27 unique values. 0.4137502+2.19774 [0, 114]
#var21 (Integer Valued): 27 unique values. 32.56892+383.9295 [0, 30000]
#num_op_var41_efect_ult1 (Integer Valued): 27 unique values. 0.7230996+3.226856 [0, 90]
#num_var43_emit_ult1 (Integer Valued): 27 unique values. 0.3921548+2.120021 [0, 180]
#num_op_var40_hace2 (Integer Valued): 26 unique values. 0.01934298+0.8166237 [0, 117]
#imp_aport_var33_hace3 (Real Valued): 25 unique values. 2.665853+222.9601 [0, 36497.67]
#num_var22_hace2 (Integer Valued): 25 unique values. 1.2992+3.444711 [0, 123]
#num_var37_med_ult2 (Integer Valued): 23 unique values. 0.2614563+1.639784 [0, 117]
#num_var22_hace3 (Integer Valued): 21 unique values. 1.18577+3.279359 [0, 108]
#num_trasp_var11_ult1 (Integer Valued): 21 unique values. 0.1221631+1.161938 [0, 93]
#num_ent_var16_ult1 (Integer Valued): 19 unique values. 0.1867714+1.026261 [0, 84]
#num_var22_ult1 (Integer Valued): 19 unique values. 0.5591683+2.086463 [0, 96]
#imp_aport_var33_ult1 (Real Valued): 18 unique values. 0.3153049+66.48285 [0, 24000]
#imp_trasp_var33_in_ult1 (Real Valued): 18 unique values. 1.030191+144.9559 [0, 35310.6]
#num_med_var22_ult3 (Integer Valued): 17 unique values. 0.6380616+1.835578 [0, 78]
#delta_num_compra_var44_1y3 (Real Valued): 15 unique values. 9747231+312054384 [-1, 1e+10]
#num_var35 (Integer Valued): 14 unique values. 3.30198+2.870254 [0, 42]
#imp_trasp_var33_in_hace3 (Real Valued): 14 unique values. 3.297142+511.2715 [0, 149252.1]
#num_var30_0 (Integer Valued): 13 unique values. 3.374057+1.317344 [0, 114]
#delta_imp_aport_var33_1y3 (Real Valued): 13 unique values. 724456.3+85112197 [-1, 1e+10]
#imp_trasp_var17_in_ult1 (Real Valued): 13 unique values. 4.977631+801.9853 [0, 199665.8]
#num_var17_0 (Integer Valued): 12 unique values. 0.01092612+0.3654363 [0, 66]
#num_var31_0 (Integer Valued): 12 unique values. 0.01954056+0.4091737 [0, 66]
#num_compra_var44_ult1 (Integer Valued): 12 unique values. 0.007606791+0.313682 [0, 51]
#num_var39_0 (Integer Valued): 11 unique values. 2.728368+1.143373 [0, 39]
#num_var41_0 (Integer Valued): 11 unique values. 2.701675+1.107408 [0, 39]
#delta_imp_aport_var17_1y3 (Real Valued): 11 unique values. 5005335+223670790 [-1, 1e+10]
#num_venta_var44_ult1 (Integer Valued): 11 unique values. 0.003714485+0.2475391 [0, 45]
#num_var4 (Integer Valued): 10 unique values. 1.080546+0.9105813 [0, 9]
#num_var26_0 (Integer Valued): 10 unique values. 0.09149883+0.6285751 [0, 33]
#num_var26 (Integer Valued): 10 unique values. 0.09149883+0.6285751 [0, 33]
#num_var25_0 (Integer Valued): 10 unique values. 0.08705331+0.6104144 [0, 33]
#num_var25 (Integer Valued): 10 unique values. 0.08705331+0.6104144 [0, 33]
#num_var30 (Integer Valued): 10 unique values. 2.386458+1.644342 [0, 33]
#num_aport_var17_ult1 (Integer Valued): 10 unique values. 0.003418117+0.1902756 [0, 27]
#num_op_var40_efect_ult1 (Integer Valued): 10 unique values. 0.003161264+0.1876135 [0, 33]
#num_op_var40_efect_ult3 (Integer Valued): 10 unique values. 0.00432698+0.2328456 [0, 33]
#num_var17 (Integer Valued): 9 unique values. 0.008080981+0.2603911 [0, 27]
#num_var31 (Integer Valued): 9 unique values. 0.01568777+0.3092885 [0, 27]
#num_var42_0 (Integer Valued): 9 unique values. 3.204475+0.9094442 [0, 114]
#imp_trasp_var17_in_hace3 (Real Valued): 9 unique values. 5.620317+1484.075 [0, 555824.4]
#imp_venta_var44_hace3 (Real Valued): 9 unique values. 5.81531+1286.811 [0, 438202.5]
#num_compra_var44_hace3 (Integer Valued): 9 unique values. 0.002430222+0.1800473 [0, 45]
#num_var13_0 (Integer Valued): 8 unique values. 0.1695821+0.7537059 [0, 21]
#num_var13_largo_0 (Integer Valued): 8 unique values. 0.03850815+0.415315 [0, 21]
#num_var13_largo (Integer Valued): 8 unique values. 0.03647308+0.3928403 [0, 21]
#num_var13 (Integer Valued): 8 unique values. 0.1621531+0.7235146 [0, 21]
#delta_imp_venta_var44_1y3 (Real Valued): 8 unique values. 4807756+219213956 [-1, 1e+10]
#delta_num_aport_var13_1y3 (Real Valued): 8 unique values. 49197171+699681763 [-1, 1e+10]
#num_aport_var13_hace3 (Integer Valued): 8 unique values. 0.07531711+0.5459375 [0, 24]
#saldo_medio_var13_medio_hace2 (Real Valued): 8 unique values. 0.1872255+33.86857 [0, 8129.04]
#saldo_medio_var13_medio_ult3 (Real Valued): 8 unique values. 0.5261697+84.06159 [0, 18870.99]
#saldo_medio_var29_hace2 (Real Valued): 8 unique values. 0.1759405+35.17292 [0, 10430.01]
#num_var12_0 (Integer Valued): 7 unique values. 0.2124172+0.8524591 [0, 111]
#num_var42 (Integer Valued): 7 unique values. 2.219128+1.499438 [0, 18]
#delta_num_aport_var17_1y3 (Real Valued): 7 unique values. 5005335+223670790 [-1, 1e+10]
#delta_num_venta_var44_1y3 (Real Valued): 7 unique values. 4807756+219213956 [-1, 1e+10]
#num_aport_var13_ult1 (Integer Valued): 7 unique values. 0.01792042+0.2764757 [0, 30]
#saldo_medio_var13_medio_ult1 (Real Valued): 7 unique values. 0.8651135+144.3056 [0, 36000]
#saldo_medio_var29_ult1 (Real Valued): 7 unique values. 0.2245624+45.85384 [0, 13793.67]
#saldo_medio_var29_ult3 (Real Valued): 7 unique values. 0.1736297+32.1613 [0, 8030.16]
#num_var5_0 (Integer Valued): 6 unique values. 2.891944+0.6580709 [0, 15]
#num_var5 (Integer Valued): 6 unique values. 1.998966+1.431536 [0, 15]
#num_var14_0 (Integer Valued): 6 unique values. 0.0734006+0.5540104 [0, 111]
#num_op_var40_hace3 (Integer Valued): 6 unique values. 0.0008298318+0.1381505 [0, 48]
#saldo_var13_medio (Integer Valued): 6 unique values. 0.8001949+136.478 [0, 36000]
#imp_var7_emit_ult1 (Real Valued): 6 unique values. 6.805839+1603.794 [0, 526500]
#num_aport_var17_hace3 (Integer Valued): 6 unique values. 0.001580632+0.1337844 [0, 39]
#num_var7_recib_ult1 (Integer Valued): 6 unique values. 0.009385002+0.206228 [0, 24]
#num_reemb_var17_ult1 (Integer Valued): 6 unique values. 0.001106442+0.0961537 [0, 21]
#num_sal_var16_ult1 (Integer Valued): 6 unique values. 0.00470238+0.1439577 [0, 15]
#num_var32_0 (Integer Valued): 5 unique values. 0.004445527+0.1429328 [0, 12]
#num_var32 (Integer Valued): 5 unique values. 0.004445527+0.1429328 [0, 12]
#saldo_var6 (Real Valued): 5 unique values. 0.3562053+71.82753 [0, 19531.8]
#saldo_var29 (Real Valued): 5 unique values. 0.3562053+71.82753 [0, 19531.8]
#saldo_var34 (Integer Valued): 5 unique values. 0.7519857+172.5589 [0, 54000]
#var36 (Integer Valued): 5 unique values. 40.52277+47.3752 [0, 99]
#delta_num_aport_var33_1y3 (Real Valued): 5 unique values. 724456.3+85112197 [-1, 1e+10]
#imp_amort_var34_ult1 (Real Valued): 5 unique values. 0.01985234+4.205855 [0, 1096.02]
#imp_trasp_var17_out_ult1 (Real Valued): 5 unique values. 0.9580809+252.8928 [0, 69622.29]
#num_aport_var33_hace3 (Integer Valued): 5 unique values. 0.001027411+0.06798767 [0, 12]
#num_venta_var44_hace3 (Integer Valued): 5 unique values. 0.0003754001+0.07097997 [0, 24]
#num_var12 (Integer Valued): 4 unique values. 0.1375347+0.6342133 [0, 15]
#num_var14 (Integer Valued): 4 unique values. 0.01584584+0.2207018 [0, 12]
#num_var24_0 (Integer Valued): 4 unique values. 0.1284461+0.6099534 [0, 9]
#num_var33_0 (Integer Valued): 4 unique values. 0.002706832+0.1006407 [0, 12]
#saldo_var18 (Integer Valued): 4 unique values. 23.31432+7761.807 [0, 3e+06]
#imp_amort_var18_ult1 (Real Valued): 4 unique values. 0.133349+41.13473 [0, 15691.8]
#num_aport_var33_ult1 (Integer Valued): 4 unique values. 0.000592737+0.05333671 [0, 12]
#num_meses_var5_ult3 (Integer Valued): 4 unique values. 1.981441+1.297777 [0, 3]
#num_meses_var8_ult3 (Integer Valued): 4 unique values. 0.05445277+0.3366657 [0, 3]
#num_meses_var12_ult3 (Integer Valued): 4 unique values. 0.1016083+0.486021 [0, 3]
#num_meses_var13_corto_ult3 (Integer Valued): 4 unique values. 0.09895415+0.4847579 [0, 3]
#num_meses_var13_largo_ult3 (Integer Valued): 4 unique values. 0.01818385+0.2180135 [0, 3]
#num_meses_var17_ult3 (Integer Valued): 4 unique values. 0.002871481+0.07702166 [0, 3]
#num_meses_var33_ult3 (Integer Valued): 4 unique values. 0.001593804+0.0627367 [0, 3]
#num_meses_var39_vig_ult3 (Integer Valued): 4 unique values. 1.595365+0.7174265 [0, 3]
#num_meses_var44_ult3 (Integer Valued): 4 unique values. 0.003589352+0.08721886 [0, 3]
#num_var1_0 (Integer Valued): 3 unique values. 0.03544567+0.3250755 [0, 6]
#num_var1 (Integer Valued): 3 unique values. 0.01155837+0.1864909 [0, 6]
#num_var8_0 (Integer Valued): 3 unique values. 0.09981691+0.5382623 [0, 6]
#num_var13_corto_0 (Integer Valued): 3 unique values. 0.1309158+0.6191257 [0, 6]
#num_var13_corto (Integer Valued): 3 unique values. 0.1255417+0.601215 [0, 6]
#num_var13_medio_0 (Integer Valued): 3 unique values. 0.0001580632+0.02434576 [0, 6]
#num_var24 (Integer Valued): 3 unique values. 0.1139833+0.5739635 [0, 6]
#num_var33 (Integer Valued): 3 unique values. 0.002272158+0.08875974 [0, 6]
#num_var40_0 (Integer Valued): 3 unique values. 0.03530737+0.3240861 [0, 6]
#num_var44_0 (Integer Valued): 3 unique values. 0.005907612+0.1343268 [0, 6]
#num_var44 (Integer Valued): 3 unique values. 0.005334633+0.1273289 [0, 6]
#delta_imp_reemb_var17_1y3 (Integer Valued): 3 unique values. 2041650+142872190 [-1, 1e+10]
#delta_imp_trasp_var17_in_1y3 (Integer Valued): 3 unique values. 790316+88896498 [-1, 1e+10]
#delta_imp_trasp_var33_in_1y3 (Integer Valued): 3 unique values. 1119614+105806254 [-1, 1e+10]
#delta_num_reemb_var17_1y3 (Integer Valued): 3 unique values. 2041650+142872190 [-1, 1e+10]
#delta_num_trasp_var17_in_1y3 (Integer Valued): 3 unique values. 790316+88896498 [-1, 1e+10]
#delta_num_trasp_var33_in_1y3 (Integer Valued): 3 unique values. 1119614+105806254 [-1, 1e+10]
#imp_trasp_var33_out_ult1 (Real Valued): 3 unique values. 0.05680614+16.36095 [0, 5625.33]
#num_var7_emit_ult1 (Integer Valued): 3 unique values. 0.0001383053+0.02775868 [0, 9]
#num_meses_var29_ult3 (Integer Valued): 3 unique values. 9.220353e-05+0.01257202 [0, 2]
#num_trasp_var17_in_hace3 (Integer Valued): 3 unique values. 0.0002173369+0.03174291 [0, 6]
#num_trasp_var17_in_ult1 (Integer Valued): 3 unique values. 0.0002766106+0.03266278 [0, 6]
#num_trasp_var33_in_ult1 (Integer Valued): 3 unique values. 0.0003754001+0.03692104 [0, 6]
#ind_var1_0 (Binary Valued): 2 unique values. 0.01178229+0.1079053 [0, 1]
#ind_var1 (Binary Valued): 2 unique values. 0.003839619+0.06184578 [0, 1]
#ind_var5_0 (Binary Valued): 2 unique values. 0.9575139+0.201696 [0, 1]
#ind_var5 (Binary Valued): 2 unique values. 0.6637535+0.4724259 [0, 1]
#ind_var6_0 (Binary Valued): 2 unique values. 9.87895e-05+0.009938832 [0, 1]
#ind_var6 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var8_0 (Binary Valued): 2 unique values. 0.03325913+0.1793131 [0, 1]
#ind_var8 (Binary Valued): 2 unique values. 0.02924169+0.1684838 [0, 1]
#ind_var12_0 (Binary Valued): 2 unique values. 0.06806597+0.2518599 [0, 1]
#ind_var12 (Binary Valued): 2 unique values. 0.04541024+0.2082029 [0, 1]
#ind_var13_0 (Binary Valued): 2 unique values. 0.05272725+0.2234892 [0, 1]
#ind_var13_corto_0 (Binary Valued): 2 unique values. 0.04321053+0.2033314 [0, 1]
#ind_var13_corto (Binary Valued): 2 unique values. 0.0418143+0.2001653 [0, 1]
#ind_var13_largo_0 (Binary Valued): 2 unique values. 0.0104651+0.1017627 [0, 1]
#ind_var13_largo (Binary Valued): 2 unique values. 0.01028728+0.1009035 [0, 1]
#ind_var13_medio_0 (Binary Valued): 2 unique values. 4.610177e-05+0.006789694 [0, 1]
#ind_var13_medio (Binary Valued): 2 unique values. 4.610177e-05+0.006789694 [0, 1]
#ind_var13 (Binary Valued): 2 unique values. 0.05136395+0.2207397 [0, 1]
#ind_var14_0 (Binary Valued): 2 unique values. 0.02403219+0.1531496 [0, 1]
#ind_var14 (Binary Valued): 2 unique values. 0.005222672+0.07207933 [0, 1]
#ind_var17_0 (Binary Valued): 2 unique values. 0.001705765+0.04126581 [0, 1]
#ind_var17 (Binary Valued): 2 unique values. 0.001422569+0.03769024 [0, 1]
#ind_var18_0 (Binary Valued): 2 unique values. 1.97579e-05+0.004444957 [0, 1]
#ind_var18 (Binary Valued): 2 unique values. 1.97579e-05+0.004444957 [0, 1]
#ind_var19 (Binary Valued): 2 unique values. 0.003971338+0.0628935 [0, 1]
#ind_var20_0 (Binary Valued): 2 unique values. 0.003523492+0.05925454 [0, 1]
#ind_var20 (Binary Valued): 2 unique values. 0.002568527+0.05061567 [0, 1]
#ind_var24_0 (Binary Valued): 2 unique values. 0.04264413+0.2020542 [0, 1]
#ind_var24 (Binary Valued): 2 unique values. 0.0379681+0.1911198 [0, 1]
#ind_var25_cte (Binary Valued): 2 unique values. 0.02720004+0.1626665 [0, 1]
#ind_var26_0 (Binary Valued): 2 unique values. 0.02526377+0.1569257 [0, 1]
#ind_var26_cte (Binary Valued): 2 unique values. 0.02841845+0.1661656 [0, 1]
#ind_var26 (Binary Valued): 2 unique values. 0.02526377+0.1569257 [0, 1]
#ind_var25_0 (Binary Valued): 2 unique values. 0.02420343+0.1536808 [0, 1]
#ind_var25 (Binary Valued): 2 unique values. 0.02420343+0.1536808 [0, 1]
#ind_var29_0 (Binary Valued): 2 unique values. 9.87895e-05+0.009938832 [0, 1]
#ind_var29 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var30_0 (Binary Valued): 2 unique values. 0.9953964+0.06769363 [0, 1]
#ind_var30 (Binary Valued): 2 unique values. 0.7334001+0.4421829 [0, 1]
#ind_var31_0 (Binary Valued): 2 unique values. 0.004307222+0.06548815 [0, 1]
#ind_var31 (Binary Valued): 2 unique values. 0.003760587+0.06120841 [0, 1]
#ind_var32_cte (Binary Valued): 2 unique values. 0.001323779+0.03635981 [0, 1]
#ind_var32_0 (Binary Valued): 2 unique values. 0.00115913+0.03402637 [0, 1]
#ind_var32 (Binary Valued): 2 unique values. 0.00115913+0.03402637 [0, 1]
#ind_var33_0 (Binary Valued): 2 unique values. 0.0008100739+0.02845036 [0, 1]
#ind_var33 (Binary Valued): 2 unique values. 0.0006981125+0.02641268 [0, 1]
#ind_var34_0 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var34 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var37_cte (Binary Valued): 2 unique values. 0.07151043+0.2576764 [0, 1]
#ind_var37_0 (Binary Valued): 2 unique values. 0.0647664+0.246114 [0, 1]
#ind_var37 (Binary Valued): 2 unique values. 0.0647664+0.246114 [0, 1]
#ind_var39_0 (Binary Valued): 2 unique values. 0.8816502+0.323023 [0, 1]
#ind_var40_0 (Binary Valued): 2 unique values. 0.01174936+0.1077562 [0, 1]
#ind_var40 (Binary Valued): 2 unique values. 0.003806689+0.06158103 [0, 1]
#ind_var41_0 (Binary Valued): 2 unique values. 0.8801025+0.3248427 [0, 1]
#ind_var39 (Binary Valued): 2 unique values. 0.003806689+0.06158103 [0, 1]
#ind_var44_0 (Binary Valued): 2 unique values. 0.001949446+0.04410962 [0, 1]
#ind_var44 (Binary Valued): 2 unique values. 0.001765039+0.04197541 [0, 1]
#num_var6_0 (Binary Valued): 2 unique values. 0.0002963685+0.0298165 [0, 3]
#num_var6 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var8 (Binary Valued): 2 unique values. 0.08772508+0.5054515 [0, 3]
#num_var13_medio (Binary Valued): 2 unique values. 0.0001383053+0.02036908 [0, 3]
#num_var18_0 (Binary Valued): 2 unique values. 5.92737e-05+0.01333487 [0, 3]
#num_var18 (Binary Valued): 2 unique values. 5.92737e-05+0.01333487 [0, 3]
#num_var20_0 (Binary Valued): 2 unique values. 0.01057048+0.1777636 [0, 3]
#num_var20 (Binary Valued): 2 unique values. 0.007705581+0.151847 [0, 3]
#num_var29_0 (Binary Valued): 2 unique values. 0.0002963685+0.0298165 [0, 3]
#num_var29 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var34_0 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var34 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var40 (Binary Valued): 2 unique values. 0.01142007+0.1847431 [0, 3]
#num_var39 (Binary Valued): 2 unique values. 0.01142007+0.1847431 [0, 3]
#delta_imp_amort_var18_1y3 (Binary Valued): 2 unique values. 197579+44449566 [0, 1e+10]
#delta_imp_amort_var34_1y3 (Binary Valued): 2 unique values. 263438.7+51325769 [0, 1e+10]
#delta_imp_reemb_var13_1y3 (Binary Valued): 2 unique values. 4280878+206859225 [0, 1e+10]
#delta_imp_reemb_var33_1y3 (Binary Valued): 2 unique values. 65859.67+25663138 [0, 1e+10]
#delta_imp_trasp_var17_out_1y3 (Binary Valued): 2 unique values. 263438.7+51325769 [0, 1e+10]
#delta_imp_trasp_var33_out_1y3 (Binary Valued): 2 unique values. 131719.3+36293039 [0, 1e+10]
#delta_num_reemb_var13_1y3 (Binary Valued): 2 unique values. 4280878+206859225 [0, 1e+10]
#delta_num_reemb_var33_1y3 (Binary Valued): 2 unique values. 65859.67+25663138 [0, 1e+10]
#delta_num_trasp_var17_out_1y3 (Binary Valued): 2 unique values. 263438.7+51325769 [0, 1e+10]
#delta_num_trasp_var33_out_1y3 (Binary Valued): 2 unique values. 131719.3+36293039 [0, 1e+10]
#imp_reemb_var17_hace3 (Real Valued): 2 unique values. 0.07921041+30.86544 [0, 12027.15]
#imp_reemb_var33_ult1 (Binary Valued): 2 unique values. 0.00790316+3.079577 [0, 1200]
#ind_var7_emit_ult1 (Binary Valued): 2 unique values. 3.292983e-05+0.005738377 [0, 1]
#ind_var7_recib_ult1 (Binary Valued): 2 unique values. 0.002588285+0.05080947 [0, 1]
#ind_var10_ult1 (Binary Valued): 2 unique values. 0.08106008+0.2729283 [0, 1]
#ind_var10cte_ult1 (Binary Valued): 2 unique values. 0.0919862+0.2890074 [0, 1]
#ind_var9_cte_ult1 (Binary Valued): 2 unique values. 0.0969125+0.2958396 [0, 1]
#ind_var9_ult1 (Binary Valued): 2 unique values. 0.08623006+0.2807044 [0, 1]
#ind_var43_emit_ult1 (Binary Valued): 2 unique values. 0.06729541+0.2505337 [0, 1]
#ind_var43_recib_ult1 (Binary Valued): 2 unique values. 0.128479+0.3346235 [0, 1]
#num_meses_var13_medio_ult3 (Binary Valued): 2 unique values. 9.220353e-05+0.01357939 [0, 2]
#num_reemb_var13_ult1 (Binary Valued): 2 unique values. 0.001284263+0.06205777 [0, 3]
#num_reemb_var17_hace3 (Binary Valued): 2 unique values. 1.97579e-05+0.007698941 [0, 3]
#num_reemb_var33_ult1 (Binary Valued): 2 unique values. 1.97579e-05+0.007698941 [0, 3]
#num_trasp_var17_out_ult1 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_trasp_var33_in_hace3 (Binary Valued): 2 unique values. 0.0002568527+0.02775783 [0, 3]
#num_trasp_var33_out_ult1 (Binary Valued): 2 unique values. 3.95158e-05+0.01088791 [0, 3]
#saldo_medio_var29_hace3 (Real Valued): 2 unique values. 0.0009562824+0.3726288 [0, 145.2]
#ind_var2_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var2 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var27_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var28_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var28 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var27 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var41 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var46_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var46 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var27_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var28_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var28 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var27 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var41 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var46_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var46 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var28 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var27 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var41 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var46 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_amort_var18_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_amort_var34_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_reemb_var13_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_reemb_var33_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_trasp_var17_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_trasp_var33_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var2_0_ult1 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var2_ult1 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_reemb_var13_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_reemb_var33_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_trasp_var17_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_trasp_var33_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var2_ult1 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_medio_var13_medio_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 17 18 19 21 23 25 26
# 34 100 23 16 11 10 8 10 6 9 4 3 3 2 1 1 2 2 2 1 2 1
# 27 28 29 31 34 35 37 39 41 42 43 45 48 50 51 59 60 62 63 64 68 69
# 7 1 1 1 1 1 1 2 1 1 2 1 2 1 1 1 1 1 1 1 1 1
# 71 73 77 78 79 81 90 101 102 105 106 107 113 114 116 119 124 150 164 175 197 215
# 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 2 1
# 228 231 267 287 288 301 320 388 417 462 495 497 503 547 560 565 567 601 636 673 680 705
# 2 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 712 793 938 999 1254 1469 1608 1671 2110 2618 2902 3006 3139 3948 4240 4260 4364 4513 5041 5469 5858 6676
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 6801 6808 7247 13155 13249 13414 14597 14814 16003 16284 20454 25270 25650 28458 29322 30654 109982 151838
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
| /scripts/Factor Finder.R | no_license | Laurae2/Santander | R | false | false | 33,311 | r | # factor finder
train_temp <- train[, -371]
test_temp <- test
full_data <- rbind(train_temp, test_temp)
df_temp <- data.frame(VarName = rep(0, 370), Factors = rep(0, 370), NonReal = rep(0, 370), Mean = rep(0, 370), SD = rep(0, 370), Min = rep(0, 370), Max = rep(0, 370))
class(df_temp$VarName) <- "character"
for (i in colnames(full_data)) {
j <- which(colnames(full_data) == i)
df_temp[j, 1] <- i
df_temp[j, 2] <- NROW(unique(full_data[, i]))
df_temp[j, 3] <- ifelse(sum(floor(full_data[, i]) == full_data[, i]) == nrow(full_data), 1, 0)
df_temp[j, 3] <- ifelse((df_temp[j ,3] == 1) & (df_temp[j, 2] == 2), ifelse(df_temp[j, 2] == 1, 3, 2), df_temp[j, 3])
df_temp[j ,4] <- mean(full_data[, i])
df_temp[j, 5] <- sd(full_data[, i])
df_temp[j, 6] <- min(full_data[, i])
df_temp[j, 7] <- max(full_data[, i])
cat(i, ifelse(df_temp[j ,3] > 0, ifelse(df_temp[j, 3] == 1, " (Integer Valued)", ifelse(df_temp[j ,3] == 2, " (Binary Valued)", " (Unique)")), " (Real Valued)"), ": ", df_temp[j, 2], " unique values.\n", sep = "")
}
df_temp <- df_temp[order(-df_temp[, 2]), ]
for (i in 1:nrow(df_temp)) {
cat("#", df_temp[i, 1], ifelse(df_temp[i ,3] > 0, ifelse(df_temp[i, 3] == 1, " (Integer Valued)", ifelse(df_temp[i ,3] == 2, " (Binary Valued)", " (Unique)")), " (Real Valued)"), ": ", df_temp[i, 2], " unique values. ", df_temp[i, 4], "+", df_temp[i, 5], " [", df_temp[i, 6], ", ", df_temp[i, 7], "]\n", sep = "")
}
table(df_temp[, 2])
#ID (Integer Valued): 151838 unique values. 75919.5+43832 [1, 151838]
#var38 (Real Valued): 109982 unique values. 117311+217717.5 [1202.73, 28894396]
#saldo_var30 (Real Valued): 30654 unique values. 13869.89+64512.88 [-4942.26, 4212656]
#saldo_medio_var5_ult3 (Real Valued): 29322 unique values. 1054.428+8164.239 [-1036.08, 544365.6]
#saldo_var42 (Real Valued): 28458 unique values. 7353.417+51614.02 [-4942.26, 4212656]
#saldo_medio_var5_ult1 (Real Valued): 25650 unique values. 1067.076+9701.489 [-1605.15, 656187.1]
#saldo_medio_var5_hace2 (Real Valued): 25270 unique values. 1604.172+12086.77 [-1573.23, 812137.3]
#saldo_var5 (Real Valued): 20454 unique values. 1008.811+9814.646 [-2895.72, 619329.2]
#imp_op_var39_comer_ult3 (Real Valued): 16284 unique values. 121.3306+577.0944 [0, 47943.96]
#imp_op_var41_comer_ult3 (Real Valued): 16003 unique values. 114.2617+526.2707 [0, 28927.89]
#imp_op_var39_ult1 (Real Valued): 14814 unique values. 140.6267+715.3216 [0, 72511.77]
#imp_op_var41_ult1 (Real Valued): 14597 unique values. 137.2287+697.7656 [0, 72511.77]
#imp_op_var39_comer_ult1 (Real Valued): 13414 unique values. 73.33668+351.9674 [0, 21093.96]
#saldo_medio_var5_hace3 (Real Valued): 13249 unique values. 896.861+8767.932 [-32.85, 1542339]
#imp_op_var41_comer_ult1 (Real Valued): 13155 unique values. 69.26854+325.1229 [0, 14784.9]
#saldo_var37 (Real Valued): 7247 unique values. 37.29588+501.355 [0, 90000]
#imp_trans_var37_ult1 (Real Valued): 6808 unique values. 1948.867+28468.85 [0, 3e+06]
#saldo_medio_var12_ult3 (Real Valued): 6801 unique values. 4572.94+37818.87 [0, 3002214]
#saldo_medio_var12_ult1 (Real Valued): 6676 unique values. 5881.09+48741.89 [0, 4356643]
#saldo_var12 (Real Valued): 5858 unique values. 6205.616+50651.96 [0, 4202599]
#saldo_medio_var12_hace2 (Real Valued): 5469 unique values. 4178.141+41806.71 [0, 4500000]
#saldo_var24 (Real Valued): 5041 unique values. 6123.537+50557.84 [0, 4202599]
#saldo_medio_var13_corto_ult3 (Real Valued): 4513 unique values. 3863.91+25401.18 [0, 450000]
#saldo_medio_var8_ult3 (Real Valued): 4364 unique values. 107.4197+1876.621 [-1844.52, 216012]
#saldo_medio_var8_ult1 (Real Valued): 4260 unique values. 121.4081+2222.832 [-3925.92, 375060]
#imp_var43_emit_ult1 (Real Valued): 4240 unique values. 923.3666+19222.83 [0, 2880000]
#saldo_var8 (Real Valued): 3948 unique values. 138.6336+2460.739 [-4942.26, 375060]
#saldo_var26 (Real Valued): 3139 unique values. 77.35887+720.8709 [0, 69756.72]
#saldo_var25 (Real Valued): 3006 unique values. 73.79599+706.5955 [0, 69756.72]
#saldo_medio_var13_corto_hace2 (Real Valued): 2902 unique values. 3610.341+25999.56 [0, 450000]
#saldo_medio_var8_hace2 (Real Valued): 2618 unique values. 64.52489+1715.668 [-287.67, 264702.9]
#saldo_medio_var12_hace3 (Real Valued): 2110 unique values. 659.1445+10643.69 [0, 1202339]
#saldo_medio_var13_corto_ult1 (Real Valued): 1671 unique values. 4880.83+31828.14 [0, 450000]
#saldo_medio_var13_corto_hace3 (Real Valued): 1608 unique values. 528.6911+6848.963 [0, 304838.7]
#saldo_var13 (Real Valued): 1469 unique values. 6516.469+38169.34 [0, 1500000]
#saldo_var13_corto (Real Valued): 1254 unique values. 5027.191+32532.35 [0, 450000]
#saldo_medio_var13_largo_ult3 (Real Valued): 999 unique values. 761.1156+12239.74 [0, 1034483]
#imp_ent_var16_ult1 (Real Valued): 938 unique values. 84.68832+1655.242 [0, 240000]
#saldo_medio_var8_hace3 (Real Valued): 793 unique values. 9.43363+514.7723 [0, 95260.56]
#imp_aport_var13_hace3 (Real Valued): 712 unique values. 2811.729+25297.97 [0, 1008000]
#imp_op_var40_comer_ult3 (Real Valued): 705 unique values. 7.068982+201.3045 [0, 47943.96]
#imp_op_var39_efect_ult3 (Real Valued): 680 unique values. 112.7174+850.4504 [0, 131100]
#imp_op_var41_efect_ult3 (Real Valued): 673 unique values. 112.0976+845.8037 [0, 131100]
#saldo_var14 (Real Valued): 636 unique values. 63.62686+2686.515 [0, 450000]
#imp_op_var40_comer_ult1 (Real Valued): 601 unique values. 4.068145+115.0159 [0, 21093.96]
#saldo_var31 (Real Valued): 567 unique values. 247.4915+17084.24 [0, 6119500]
#saldo_var1 (Real Valued): 565 unique values. 28.64053+7765.61 [-0.9, 3e+06]
#saldo_var40 (Real Valued): 560 unique values. 4.57422+119.446 [-0.9, 9966]
#saldo_medio_var13_largo_hace2 (Real Valued): 547 unique values. 793.9402+13562.28 [0, 1008000]
#imp_op_var39_efect_ult1 (Real Valued): 503 unique values. 68.26483+528.4804 [0, 67500]
#imp_op_var41_efect_ult1 (Real Valued): 497 unique values. 67.82307+524.5429 [0, 67500]
#saldo_medio_var13_largo_hace3 (Real Valued): 495 unique values. 169.372+4660.327 [0, 534000]
#imp_op_var40_ult1 (Real Valued): 462 unique values. 3.397988+113.4237 [0, 23799.96]
#saldo_medio_var13_largo_ult1 (Real Valued): 417 unique values. 971.9136+15732.39 [0, 1500000]
#saldo_var13_largo (Real Valued): 388 unique values. 1488.479+19697.03 [0, 1500000]
#imp_var7_recib_ult1 (Real Valued): 320 unique values. 141.7602+6910.232 [0, 1039260]
#saldo_var20 (Real Valued): 301 unique values. 18.45219+1809.63 [0, 455858.2]
#saldo_medio_var44_ult1 (Real Valued): 288 unique values. 85.98633+4360.172 [0, 681462.9]
#saldo_medio_var44_ult3 (Real Valued): 288 unique values. 63.5501+3094.025 [0, 397884.3]
#imp_aport_var13_ult1 (Real Valued): 287 unique values. 589.4449+10786.61 [0, 450000]
#saldo_var44 (Real Valued): 267 unique values. 102.1546+5082.714 [0, 740006.6]
#var3 (Integer Valued): 231 unique values. -1551.539+39394.01 [-999999, 238]
#saldo_medio_var17_ult1 (Real Valued): 228 unique values. 94.32871+10791.04 [0, 3998687]
#saldo_medio_var17_ult3 (Real Valued): 228 unique values. 78.49367+9422.151 [0, 3525777]
#saldo_var17 (Real Valued): 215 unique values. 129.2718+16259.09 [0, 6119500]
#num_var45_ult3 (Integer Valued): 197 unique values. 13.57063+33.01762 [0, 825]
#saldo_medio_var44_hace2 (Real Valued): 197 unique values. 47.53023+3011.415 [0, 453893.4]
#imp_compra_var44_ult1 (Real Valued): 175 unique values. 83.62604+9895.33 [0, 3410059]
#saldo_medio_var17_hace2 (Real Valued): 164 unique values. 63.78418+11041.35 [-0.03, 4210084]
#saldo_var32 (Real Valued): 150 unique values. 3.562878+136.0992 [0, 13522.89]
#imp_sal_var16_ult1 (Real Valued): 124 unique values. 5.197786+400.4784 [0, 105000]
#num_op_var39_ult3 (Integer Valued): 119 unique values. 4.651741+17.34866 [0, 522]
#num_op_var41_ult3 (Integer Valued): 116 unique values. 4.57832+17.11056 [0, 489]
#num_op_var41_comer_ult3 (Integer Valued): 114 unique values. 3.486143+14.39538 [0, 438]
#num_op_var39_comer_ult3 (Integer Valued): 113 unique values. 3.62516+15.0516 [0, 600]
#saldo_medio_var33_ult1 (Real Valued): 107 unique values. 14.83929+1044.317 [-0.6, 162355.8]
#saldo_medio_var33_ult3 (Real Valued): 107 unique values. 10.72662+738.4301 [-0.6, 138055]
#saldo_var33 (Real Valued): 106 unique values. 16.06515+1076.136 [0, 162964.6]
#num_var45_ult1 (Integer Valued): 105 unique values. 4.331603+14.25032 [0, 510]
#num_var45_hace2 (Integer Valued): 102 unique values. 5.403667+14.623 [0, 426]
#var15 (Integer Valued): 101 unique values. 33.1759+12.94428 [5, 105]
#saldo_medio_var33_hace2 (Real Valued): 90 unique values. 10.1842+750.3009 [0, 146605.3]
#num_op_var39_ult1 (Integer Valued): 81 unique values. 2.924545+11.15037 [0, 468]
#imp_venta_var44_ult1 (Real Valued): 79 unique values. 56.6849+8282.173 [0, 2754476]
#num_op_var41_ult1 (Integer Valued): 78 unique values. 2.871297+10.96082 [0, 468]
#num_med_var45_ult3 (Integer Valued): 77 unique values. 4.000145+10.833 [0, 273]
#num_op_var39_comer_ult1 (Integer Valued): 73 unique values. 2.20433+9.154147 [0, 438]
#num_var45_hace3 (Integer Valued): 71 unique values. 3.835364+10.07737 [0, 339]
#num_op_var41_comer_ult1 (Integer Valued): 69 unique values. 2.131225+8.870698 [0, 438]
#imp_aport_var17_ult1 (Real Valued): 68 unique values. 27.41612+2197.478 [0, 432457.3]
#num_op_var41_hace2 (Integer Valued): 64 unique values. 1.613865+7.404416 [0, 249]
#num_op_var39_hace2 (Integer Valued): 63 unique values. 1.633208+7.459715 [0, 249]
#num_op_var40_comer_ult3 (Integer Valued): 62 unique values. 0.1390166+3.749499 [0, 582]
#saldo_medio_var44_hace3 (Real Valued): 60 unique values. 6.62519+757.6116 [0, 217762.2]
#imp_compra_var44_hace3 (Real Valued): 59 unique values. 31.51976+2856.289 [0, 596253]
#delta_imp_aport_var13_1y3 (Real Valued): 51 unique values. 49197171+699681763 [-1, 1e+10]
#imp_op_var40_efect_ult3 (Real Valued): 50 unique values. 0.6198284+39.5285 [0, 6600]
#imp_reemb_var13_ult1 (Real Valued): 48 unique values. 41.61107+2731.71 [0, 450000]
#num_var43_recib_ult1 (Integer Valued): 48 unique values. 0.8161396+3.688661 [0, 282]
#saldo_medio_var33_hace3 (Real Valued): 45 unique values. 1.346302+144.9454 [0, 40080.6]
#num_op_var41_efect_ult3 (Integer Valued): 43 unique values. 1.216672+5.201159 [0, 156]
#num_op_var39_efect_ult3 (Integer Valued): 43 unique values. 1.220999+5.223692 [0, 156]
#imp_op_var40_efect_ult1 (Real Valued): 42 unique values. 0.4417574+32.35967 [0, 6600]
#num_op_var40_ult3 (Integer Valued): 41 unique values. 0.07342036+2.185707 [0, 351]
#num_var22_ult3 (Integer Valued): 39 unique values. 3.044139+6.206356 [0, 234]
#num_op_var40_comer_ult1 (Integer Valued): 39 unique values. 0.07310423+1.851307 [0, 210]
#saldo_medio_var17_hace3 (Real Valued): 37 unique values. 21.35409+6131.54 [-0.06, 2368559]
#imp_aport_var17_hace3 (Real Valued): 35 unique values. 60.52824+15750.38 [0, 6083692]
#num_op_var40_ult1 (Integer Valued): 34 unique values. 0.05324754+1.561945 [0, 234]
#imp_reemb_var17_ult1 (Real Valued): 31 unique values. 9.928591+1006.98 [0, 211775.6]
#delta_imp_compra_var44_1y3 (Real Valued): 29 unique values. 9747231+312054384 [-1, 1e+10]
#num_op_var39_efect_ult1 (Integer Valued): 28 unique values. 0.7262609+3.244832 [0, 90]
#num_op_var41_hace3 (Integer Valued): 27 unique values. 0.0931585+1.258035 [0, 144]
#num_op_var39_hace3 (Integer Valued): 27 unique values. 0.09398833+1.265537 [0, 144]
#num_var37_0 (Integer Valued): 27 unique values. 0.4137502+2.19774 [0, 114]
#num_var37 (Integer Valued): 27 unique values. 0.4137502+2.19774 [0, 114]
#var21 (Integer Valued): 27 unique values. 32.56892+383.9295 [0, 30000]
#num_op_var41_efect_ult1 (Integer Valued): 27 unique values. 0.7230996+3.226856 [0, 90]
#num_var43_emit_ult1 (Integer Valued): 27 unique values. 0.3921548+2.120021 [0, 180]
#num_op_var40_hace2 (Integer Valued): 26 unique values. 0.01934298+0.8166237 [0, 117]
#imp_aport_var33_hace3 (Real Valued): 25 unique values. 2.665853+222.9601 [0, 36497.67]
#num_var22_hace2 (Integer Valued): 25 unique values. 1.2992+3.444711 [0, 123]
#num_var37_med_ult2 (Integer Valued): 23 unique values. 0.2614563+1.639784 [0, 117]
#num_var22_hace3 (Integer Valued): 21 unique values. 1.18577+3.279359 [0, 108]
#num_trasp_var11_ult1 (Integer Valued): 21 unique values. 0.1221631+1.161938 [0, 93]
#num_ent_var16_ult1 (Integer Valued): 19 unique values. 0.1867714+1.026261 [0, 84]
#num_var22_ult1 (Integer Valued): 19 unique values. 0.5591683+2.086463 [0, 96]
#imp_aport_var33_ult1 (Real Valued): 18 unique values. 0.3153049+66.48285 [0, 24000]
#imp_trasp_var33_in_ult1 (Real Valued): 18 unique values. 1.030191+144.9559 [0, 35310.6]
#num_med_var22_ult3 (Integer Valued): 17 unique values. 0.6380616+1.835578 [0, 78]
#delta_num_compra_var44_1y3 (Real Valued): 15 unique values. 9747231+312054384 [-1, 1e+10]
#num_var35 (Integer Valued): 14 unique values. 3.30198+2.870254 [0, 42]
#imp_trasp_var33_in_hace3 (Real Valued): 14 unique values. 3.297142+511.2715 [0, 149252.1]
#num_var30_0 (Integer Valued): 13 unique values. 3.374057+1.317344 [0, 114]
#delta_imp_aport_var33_1y3 (Real Valued): 13 unique values. 724456.3+85112197 [-1, 1e+10]
#imp_trasp_var17_in_ult1 (Real Valued): 13 unique values. 4.977631+801.9853 [0, 199665.8]
#num_var17_0 (Integer Valued): 12 unique values. 0.01092612+0.3654363 [0, 66]
#num_var31_0 (Integer Valued): 12 unique values. 0.01954056+0.4091737 [0, 66]
#num_compra_var44_ult1 (Integer Valued): 12 unique values. 0.007606791+0.313682 [0, 51]
#num_var39_0 (Integer Valued): 11 unique values. 2.728368+1.143373 [0, 39]
#num_var41_0 (Integer Valued): 11 unique values. 2.701675+1.107408 [0, 39]
#delta_imp_aport_var17_1y3 (Real Valued): 11 unique values. 5005335+223670790 [-1, 1e+10]
#num_venta_var44_ult1 (Integer Valued): 11 unique values. 0.003714485+0.2475391 [0, 45]
#num_var4 (Integer Valued): 10 unique values. 1.080546+0.9105813 [0, 9]
#num_var26_0 (Integer Valued): 10 unique values. 0.09149883+0.6285751 [0, 33]
#num_var26 (Integer Valued): 10 unique values. 0.09149883+0.6285751 [0, 33]
#num_var25_0 (Integer Valued): 10 unique values. 0.08705331+0.6104144 [0, 33]
#num_var25 (Integer Valued): 10 unique values. 0.08705331+0.6104144 [0, 33]
#num_var30 (Integer Valued): 10 unique values. 2.386458+1.644342 [0, 33]
#num_aport_var17_ult1 (Integer Valued): 10 unique values. 0.003418117+0.1902756 [0, 27]
#num_op_var40_efect_ult1 (Integer Valued): 10 unique values. 0.003161264+0.1876135 [0, 33]
#num_op_var40_efect_ult3 (Integer Valued): 10 unique values. 0.00432698+0.2328456 [0, 33]
#num_var17 (Integer Valued): 9 unique values. 0.008080981+0.2603911 [0, 27]
#num_var31 (Integer Valued): 9 unique values. 0.01568777+0.3092885 [0, 27]
#num_var42_0 (Integer Valued): 9 unique values. 3.204475+0.9094442 [0, 114]
#imp_trasp_var17_in_hace3 (Real Valued): 9 unique values. 5.620317+1484.075 [0, 555824.4]
#imp_venta_var44_hace3 (Real Valued): 9 unique values. 5.81531+1286.811 [0, 438202.5]
#num_compra_var44_hace3 (Integer Valued): 9 unique values. 0.002430222+0.1800473 [0, 45]
#num_var13_0 (Integer Valued): 8 unique values. 0.1695821+0.7537059 [0, 21]
#num_var13_largo_0 (Integer Valued): 8 unique values. 0.03850815+0.415315 [0, 21]
#num_var13_largo (Integer Valued): 8 unique values. 0.03647308+0.3928403 [0, 21]
#num_var13 (Integer Valued): 8 unique values. 0.1621531+0.7235146 [0, 21]
#delta_imp_venta_var44_1y3 (Real Valued): 8 unique values. 4807756+219213956 [-1, 1e+10]
#delta_num_aport_var13_1y3 (Real Valued): 8 unique values. 49197171+699681763 [-1, 1e+10]
#num_aport_var13_hace3 (Integer Valued): 8 unique values. 0.07531711+0.5459375 [0, 24]
#saldo_medio_var13_medio_hace2 (Real Valued): 8 unique values. 0.1872255+33.86857 [0, 8129.04]
#saldo_medio_var13_medio_ult3 (Real Valued): 8 unique values. 0.5261697+84.06159 [0, 18870.99]
#saldo_medio_var29_hace2 (Real Valued): 8 unique values. 0.1759405+35.17292 [0, 10430.01]
#num_var12_0 (Integer Valued): 7 unique values. 0.2124172+0.8524591 [0, 111]
#num_var42 (Integer Valued): 7 unique values. 2.219128+1.499438 [0, 18]
#delta_num_aport_var17_1y3 (Real Valued): 7 unique values. 5005335+223670790 [-1, 1e+10]
#delta_num_venta_var44_1y3 (Real Valued): 7 unique values. 4807756+219213956 [-1, 1e+10]
#num_aport_var13_ult1 (Integer Valued): 7 unique values. 0.01792042+0.2764757 [0, 30]
#saldo_medio_var13_medio_ult1 (Real Valued): 7 unique values. 0.8651135+144.3056 [0, 36000]
#saldo_medio_var29_ult1 (Real Valued): 7 unique values. 0.2245624+45.85384 [0, 13793.67]
#saldo_medio_var29_ult3 (Real Valued): 7 unique values. 0.1736297+32.1613 [0, 8030.16]
#num_var5_0 (Integer Valued): 6 unique values. 2.891944+0.6580709 [0, 15]
#num_var5 (Integer Valued): 6 unique values. 1.998966+1.431536 [0, 15]
#num_var14_0 (Integer Valued): 6 unique values. 0.0734006+0.5540104 [0, 111]
#num_op_var40_hace3 (Integer Valued): 6 unique values. 0.0008298318+0.1381505 [0, 48]
#saldo_var13_medio (Integer Valued): 6 unique values. 0.8001949+136.478 [0, 36000]
#imp_var7_emit_ult1 (Real Valued): 6 unique values. 6.805839+1603.794 [0, 526500]
#num_aport_var17_hace3 (Integer Valued): 6 unique values. 0.001580632+0.1337844 [0, 39]
#num_var7_recib_ult1 (Integer Valued): 6 unique values. 0.009385002+0.206228 [0, 24]
#num_reemb_var17_ult1 (Integer Valued): 6 unique values. 0.001106442+0.0961537 [0, 21]
#num_sal_var16_ult1 (Integer Valued): 6 unique values. 0.00470238+0.1439577 [0, 15]
#num_var32_0 (Integer Valued): 5 unique values. 0.004445527+0.1429328 [0, 12]
#num_var32 (Integer Valued): 5 unique values. 0.004445527+0.1429328 [0, 12]
#saldo_var6 (Real Valued): 5 unique values. 0.3562053+71.82753 [0, 19531.8]
#saldo_var29 (Real Valued): 5 unique values. 0.3562053+71.82753 [0, 19531.8]
#saldo_var34 (Integer Valued): 5 unique values. 0.7519857+172.5589 [0, 54000]
#var36 (Integer Valued): 5 unique values. 40.52277+47.3752 [0, 99]
#delta_num_aport_var33_1y3 (Real Valued): 5 unique values. 724456.3+85112197 [-1, 1e+10]
#imp_amort_var34_ult1 (Real Valued): 5 unique values. 0.01985234+4.205855 [0, 1096.02]
#imp_trasp_var17_out_ult1 (Real Valued): 5 unique values. 0.9580809+252.8928 [0, 69622.29]
#num_aport_var33_hace3 (Integer Valued): 5 unique values. 0.001027411+0.06798767 [0, 12]
#num_venta_var44_hace3 (Integer Valued): 5 unique values. 0.0003754001+0.07097997 [0, 24]
#num_var12 (Integer Valued): 4 unique values. 0.1375347+0.6342133 [0, 15]
#num_var14 (Integer Valued): 4 unique values. 0.01584584+0.2207018 [0, 12]
#num_var24_0 (Integer Valued): 4 unique values. 0.1284461+0.6099534 [0, 9]
#num_var33_0 (Integer Valued): 4 unique values. 0.002706832+0.1006407 [0, 12]
#saldo_var18 (Integer Valued): 4 unique values. 23.31432+7761.807 [0, 3e+06]
#imp_amort_var18_ult1 (Real Valued): 4 unique values. 0.133349+41.13473 [0, 15691.8]
#num_aport_var33_ult1 (Integer Valued): 4 unique values. 0.000592737+0.05333671 [0, 12]
#num_meses_var5_ult3 (Integer Valued): 4 unique values. 1.981441+1.297777 [0, 3]
#num_meses_var8_ult3 (Integer Valued): 4 unique values. 0.05445277+0.3366657 [0, 3]
#num_meses_var12_ult3 (Integer Valued): 4 unique values. 0.1016083+0.486021 [0, 3]
#num_meses_var13_corto_ult3 (Integer Valued): 4 unique values. 0.09895415+0.4847579 [0, 3]
#num_meses_var13_largo_ult3 (Integer Valued): 4 unique values. 0.01818385+0.2180135 [0, 3]
#num_meses_var17_ult3 (Integer Valued): 4 unique values. 0.002871481+0.07702166 [0, 3]
#num_meses_var33_ult3 (Integer Valued): 4 unique values. 0.001593804+0.0627367 [0, 3]
#num_meses_var39_vig_ult3 (Integer Valued): 4 unique values. 1.595365+0.7174265 [0, 3]
#num_meses_var44_ult3 (Integer Valued): 4 unique values. 0.003589352+0.08721886 [0, 3]
#num_var1_0 (Integer Valued): 3 unique values. 0.03544567+0.3250755 [0, 6]
#num_var1 (Integer Valued): 3 unique values. 0.01155837+0.1864909 [0, 6]
#num_var8_0 (Integer Valued): 3 unique values. 0.09981691+0.5382623 [0, 6]
#num_var13_corto_0 (Integer Valued): 3 unique values. 0.1309158+0.6191257 [0, 6]
#num_var13_corto (Integer Valued): 3 unique values. 0.1255417+0.601215 [0, 6]
#num_var13_medio_0 (Integer Valued): 3 unique values. 0.0001580632+0.02434576 [0, 6]
#num_var24 (Integer Valued): 3 unique values. 0.1139833+0.5739635 [0, 6]
#num_var33 (Integer Valued): 3 unique values. 0.002272158+0.08875974 [0, 6]
#num_var40_0 (Integer Valued): 3 unique values. 0.03530737+0.3240861 [0, 6]
#num_var44_0 (Integer Valued): 3 unique values. 0.005907612+0.1343268 [0, 6]
#num_var44 (Integer Valued): 3 unique values. 0.005334633+0.1273289 [0, 6]
#delta_imp_reemb_var17_1y3 (Integer Valued): 3 unique values. 2041650+142872190 [-1, 1e+10]
#delta_imp_trasp_var17_in_1y3 (Integer Valued): 3 unique values. 790316+88896498 [-1, 1e+10]
#delta_imp_trasp_var33_in_1y3 (Integer Valued): 3 unique values. 1119614+105806254 [-1, 1e+10]
#delta_num_reemb_var17_1y3 (Integer Valued): 3 unique values. 2041650+142872190 [-1, 1e+10]
#delta_num_trasp_var17_in_1y3 (Integer Valued): 3 unique values. 790316+88896498 [-1, 1e+10]
#delta_num_trasp_var33_in_1y3 (Integer Valued): 3 unique values. 1119614+105806254 [-1, 1e+10]
#imp_trasp_var33_out_ult1 (Real Valued): 3 unique values. 0.05680614+16.36095 [0, 5625.33]
#num_var7_emit_ult1 (Integer Valued): 3 unique values. 0.0001383053+0.02775868 [0, 9]
#num_meses_var29_ult3 (Integer Valued): 3 unique values. 9.220353e-05+0.01257202 [0, 2]
#num_trasp_var17_in_hace3 (Integer Valued): 3 unique values. 0.0002173369+0.03174291 [0, 6]
#num_trasp_var17_in_ult1 (Integer Valued): 3 unique values. 0.0002766106+0.03266278 [0, 6]
#num_trasp_var33_in_ult1 (Integer Valued): 3 unique values. 0.0003754001+0.03692104 [0, 6]
#ind_var1_0 (Binary Valued): 2 unique values. 0.01178229+0.1079053 [0, 1]
#ind_var1 (Binary Valued): 2 unique values. 0.003839619+0.06184578 [0, 1]
#ind_var5_0 (Binary Valued): 2 unique values. 0.9575139+0.201696 [0, 1]
#ind_var5 (Binary Valued): 2 unique values. 0.6637535+0.4724259 [0, 1]
#ind_var6_0 (Binary Valued): 2 unique values. 9.87895e-05+0.009938832 [0, 1]
#ind_var6 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var8_0 (Binary Valued): 2 unique values. 0.03325913+0.1793131 [0, 1]
#ind_var8 (Binary Valued): 2 unique values. 0.02924169+0.1684838 [0, 1]
#ind_var12_0 (Binary Valued): 2 unique values. 0.06806597+0.2518599 [0, 1]
#ind_var12 (Binary Valued): 2 unique values. 0.04541024+0.2082029 [0, 1]
#ind_var13_0 (Binary Valued): 2 unique values. 0.05272725+0.2234892 [0, 1]
#ind_var13_corto_0 (Binary Valued): 2 unique values. 0.04321053+0.2033314 [0, 1]
#ind_var13_corto (Binary Valued): 2 unique values. 0.0418143+0.2001653 [0, 1]
#ind_var13_largo_0 (Binary Valued): 2 unique values. 0.0104651+0.1017627 [0, 1]
#ind_var13_largo (Binary Valued): 2 unique values. 0.01028728+0.1009035 [0, 1]
#ind_var13_medio_0 (Binary Valued): 2 unique values. 4.610177e-05+0.006789694 [0, 1]
#ind_var13_medio (Binary Valued): 2 unique values. 4.610177e-05+0.006789694 [0, 1]
#ind_var13 (Binary Valued): 2 unique values. 0.05136395+0.2207397 [0, 1]
#ind_var14_0 (Binary Valued): 2 unique values. 0.02403219+0.1531496 [0, 1]
#ind_var14 (Binary Valued): 2 unique values. 0.005222672+0.07207933 [0, 1]
#ind_var17_0 (Binary Valued): 2 unique values. 0.001705765+0.04126581 [0, 1]
#ind_var17 (Binary Valued): 2 unique values. 0.001422569+0.03769024 [0, 1]
#ind_var18_0 (Binary Valued): 2 unique values. 1.97579e-05+0.004444957 [0, 1]
#ind_var18 (Binary Valued): 2 unique values. 1.97579e-05+0.004444957 [0, 1]
#ind_var19 (Binary Valued): 2 unique values. 0.003971338+0.0628935 [0, 1]
#ind_var20_0 (Binary Valued): 2 unique values. 0.003523492+0.05925454 [0, 1]
#ind_var20 (Binary Valued): 2 unique values. 0.002568527+0.05061567 [0, 1]
#ind_var24_0 (Binary Valued): 2 unique values. 0.04264413+0.2020542 [0, 1]
#ind_var24 (Binary Valued): 2 unique values. 0.0379681+0.1911198 [0, 1]
#ind_var25_cte (Binary Valued): 2 unique values. 0.02720004+0.1626665 [0, 1]
#ind_var26_0 (Binary Valued): 2 unique values. 0.02526377+0.1569257 [0, 1]
#ind_var26_cte (Binary Valued): 2 unique values. 0.02841845+0.1661656 [0, 1]
#ind_var26 (Binary Valued): 2 unique values. 0.02526377+0.1569257 [0, 1]
#ind_var25_0 (Binary Valued): 2 unique values. 0.02420343+0.1536808 [0, 1]
#ind_var25 (Binary Valued): 2 unique values. 0.02420343+0.1536808 [0, 1]
#ind_var29_0 (Binary Valued): 2 unique values. 9.87895e-05+0.009938832 [0, 1]
#ind_var29 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var30_0 (Binary Valued): 2 unique values. 0.9953964+0.06769363 [0, 1]
#ind_var30 (Binary Valued): 2 unique values. 0.7334001+0.4421829 [0, 1]
#ind_var31_0 (Binary Valued): 2 unique values. 0.004307222+0.06548815 [0, 1]
#ind_var31 (Binary Valued): 2 unique values. 0.003760587+0.06120841 [0, 1]
#ind_var32_cte (Binary Valued): 2 unique values. 0.001323779+0.03635981 [0, 1]
#ind_var32_0 (Binary Valued): 2 unique values. 0.00115913+0.03402637 [0, 1]
#ind_var32 (Binary Valued): 2 unique values. 0.00115913+0.03402637 [0, 1]
#ind_var33_0 (Binary Valued): 2 unique values. 0.0008100739+0.02845036 [0, 1]
#ind_var33 (Binary Valued): 2 unique values. 0.0006981125+0.02641268 [0, 1]
#ind_var34_0 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var34 (Binary Valued): 2 unique values. 2.634387e-05+0.005132577 [0, 1]
#ind_var37_cte (Binary Valued): 2 unique values. 0.07151043+0.2576764 [0, 1]
#ind_var37_0 (Binary Valued): 2 unique values. 0.0647664+0.246114 [0, 1]
#ind_var37 (Binary Valued): 2 unique values. 0.0647664+0.246114 [0, 1]
#ind_var39_0 (Binary Valued): 2 unique values. 0.8816502+0.323023 [0, 1]
#ind_var40_0 (Binary Valued): 2 unique values. 0.01174936+0.1077562 [0, 1]
#ind_var40 (Binary Valued): 2 unique values. 0.003806689+0.06158103 [0, 1]
#ind_var41_0 (Binary Valued): 2 unique values. 0.8801025+0.3248427 [0, 1]
#ind_var39 (Binary Valued): 2 unique values. 0.003806689+0.06158103 [0, 1]
#ind_var44_0 (Binary Valued): 2 unique values. 0.001949446+0.04410962 [0, 1]
#ind_var44 (Binary Valued): 2 unique values. 0.001765039+0.04197541 [0, 1]
#num_var6_0 (Binary Valued): 2 unique values. 0.0002963685+0.0298165 [0, 3]
#num_var6 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var8 (Binary Valued): 2 unique values. 0.08772508+0.5054515 [0, 3]
#num_var13_medio (Binary Valued): 2 unique values. 0.0001383053+0.02036908 [0, 3]
#num_var18_0 (Binary Valued): 2 unique values. 5.92737e-05+0.01333487 [0, 3]
#num_var18 (Binary Valued): 2 unique values. 5.92737e-05+0.01333487 [0, 3]
#num_var20_0 (Binary Valued): 2 unique values. 0.01057048+0.1777636 [0, 3]
#num_var20 (Binary Valued): 2 unique values. 0.007705581+0.151847 [0, 3]
#num_var29_0 (Binary Valued): 2 unique values. 0.0002963685+0.0298165 [0, 3]
#num_var29 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var34_0 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var34 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_var40 (Binary Valued): 2 unique values. 0.01142007+0.1847431 [0, 3]
#num_var39 (Binary Valued): 2 unique values. 0.01142007+0.1847431 [0, 3]
#delta_imp_amort_var18_1y3 (Binary Valued): 2 unique values. 197579+44449566 [0, 1e+10]
#delta_imp_amort_var34_1y3 (Binary Valued): 2 unique values. 263438.7+51325769 [0, 1e+10]
#delta_imp_reemb_var13_1y3 (Binary Valued): 2 unique values. 4280878+206859225 [0, 1e+10]
#delta_imp_reemb_var33_1y3 (Binary Valued): 2 unique values. 65859.67+25663138 [0, 1e+10]
#delta_imp_trasp_var17_out_1y3 (Binary Valued): 2 unique values. 263438.7+51325769 [0, 1e+10]
#delta_imp_trasp_var33_out_1y3 (Binary Valued): 2 unique values. 131719.3+36293039 [0, 1e+10]
#delta_num_reemb_var13_1y3 (Binary Valued): 2 unique values. 4280878+206859225 [0, 1e+10]
#delta_num_reemb_var33_1y3 (Binary Valued): 2 unique values. 65859.67+25663138 [0, 1e+10]
#delta_num_trasp_var17_out_1y3 (Binary Valued): 2 unique values. 263438.7+51325769 [0, 1e+10]
#delta_num_trasp_var33_out_1y3 (Binary Valued): 2 unique values. 131719.3+36293039 [0, 1e+10]
#imp_reemb_var17_hace3 (Real Valued): 2 unique values. 0.07921041+30.86544 [0, 12027.15]
#imp_reemb_var33_ult1 (Binary Valued): 2 unique values. 0.00790316+3.079577 [0, 1200]
#ind_var7_emit_ult1 (Binary Valued): 2 unique values. 3.292983e-05+0.005738377 [0, 1]
#ind_var7_recib_ult1 (Binary Valued): 2 unique values. 0.002588285+0.05080947 [0, 1]
#ind_var10_ult1 (Binary Valued): 2 unique values. 0.08106008+0.2729283 [0, 1]
#ind_var10cte_ult1 (Binary Valued): 2 unique values. 0.0919862+0.2890074 [0, 1]
#ind_var9_cte_ult1 (Binary Valued): 2 unique values. 0.0969125+0.2958396 [0, 1]
#ind_var9_ult1 (Binary Valued): 2 unique values. 0.08623006+0.2807044 [0, 1]
#ind_var43_emit_ult1 (Binary Valued): 2 unique values. 0.06729541+0.2505337 [0, 1]
#ind_var43_recib_ult1 (Binary Valued): 2 unique values. 0.128479+0.3346235 [0, 1]
#num_meses_var13_medio_ult3 (Binary Valued): 2 unique values. 9.220353e-05+0.01357939 [0, 2]
#num_reemb_var13_ult1 (Binary Valued): 2 unique values. 0.001284263+0.06205777 [0, 3]
#num_reemb_var17_hace3 (Binary Valued): 2 unique values. 1.97579e-05+0.007698941 [0, 3]
#num_reemb_var33_ult1 (Binary Valued): 2 unique values. 1.97579e-05+0.007698941 [0, 3]
#num_trasp_var17_out_ult1 (Binary Valued): 2 unique values. 7.90316e-05+0.01539773 [0, 3]
#num_trasp_var33_in_hace3 (Binary Valued): 2 unique values. 0.0002568527+0.02775783 [0, 3]
#num_trasp_var33_out_ult1 (Binary Valued): 2 unique values. 3.95158e-05+0.01088791 [0, 3]
#saldo_medio_var29_hace3 (Real Valued): 2 unique values. 0.0009562824+0.3726288 [0, 145.2]
#ind_var2_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var2 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var27_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var28_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var28 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var27 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var41 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var46_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#ind_var46 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var27_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var28_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var28 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var27 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var41 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var46_0 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var46 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var28 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var27 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var41 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var46 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_amort_var18_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_amort_var34_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_reemb_var13_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_reemb_var33_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_trasp_var17_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#imp_trasp_var33_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var2_0_ult1 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_var2_ult1 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_reemb_var13_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_reemb_var33_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_trasp_var17_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#num_trasp_var33_out_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_var2_ult1 (Integer Valued): 1 unique values. 0+0 [0, 0]
#saldo_medio_var13_medio_hace3 (Integer Valued): 1 unique values. 0+0 [0, 0]
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 17 18 19 21 23 25 26
# 34 100 23 16 11 10 8 10 6 9 4 3 3 2 1 1 2 2 2 1 2 1
# 27 28 29 31 34 35 37 39 41 42 43 45 48 50 51 59 60 62 63 64 68 69
# 7 1 1 1 1 1 1 2 1 1 2 1 2 1 1 1 1 1 1 1 1 1
# 71 73 77 78 79 81 90 101 102 105 106 107 113 114 116 119 124 150 164 175 197 215
# 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 2 1
# 228 231 267 287 288 301 320 388 417 462 495 497 503 547 560 565 567 601 636 673 680 705
# 2 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 712 793 938 999 1254 1469 1608 1671 2110 2618 2902 3006 3139 3948 4240 4260 4364 4513 5041 5469 5858 6676
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
# 6801 6808 7247 13155 13249 13414 14597 14814 16003 16284 20454 25270 25650 28458 29322 30654 109982 151838
# 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/irr.R
\name{irr_ts_distribution}
\alias{irr_ts_distribution}
\title{Get the permutation distribution of the IRR test statistic}
\usage{
irr_ts_distribution(ratings, obs_ts = NULL, reps = 10000,
keep_dist = FALSE, seed = NULL)
}
\arguments{
\item{ratings}{matrix of dimension [R, Ns].
Each row corresponds to the ratings given by a single rater;
columns correspond to items rated.}
\item{obs_ts}{if None, \code{obs_ts} is calculated as the value
of the test statistic for the original data}
\item{reps}{integer number of random permutations of the elements of each row of ratings}
\item{keep_dist}{Boolean flag for whether to store and return the array
of permutation values of the irr test statistic}
\item{seed}{Random seed for random number generator.
If NULL, the pseudorandom number generator is the instance used by the permutation function.}
}
\value{
A list containing:
\itemize{
\item{obs_ts: observed value of the test statistic for the input data, or the input value of \code{obs_ts} if it was given as input}
\item{geq: integer number of iterations for which the test statistic was greater than or equal to \code{obs_ts}}
\item{reps: number of permutations}
\item{pvalue: geq/reps}
\item{dist: if \code{keep_dist}, the array of values of the irr test statistic from the \code{reps} iterations. Otherwise, NULL.}
}
}
\description{
Simulates the permutation distribution of the irr test statistic for
a matrix of ratings ``ratings``
}
\details{
If \code{obs_ts} is not null, computes the reference value of the test
statistic before the first permutation. Otherwise, uses the value
\code{obs_ts} for comparison.
If \code{keep_dist}, return the distribution of values of the test statistic;
otherwise, return only the number of permutations for which the value of
the irr test statistic is at least as large as \code{obs_ts}.
}
| /man/irr_ts_distribution.Rd | permissive | spertus/permuter | R | false | true | 1,927 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/irr.R
\name{irr_ts_distribution}
\alias{irr_ts_distribution}
\title{Get the permutation distribution of the IRR test statistic}
\usage{
irr_ts_distribution(ratings, obs_ts = NULL, reps = 10000,
keep_dist = FALSE, seed = NULL)
}
\arguments{
\item{ratings}{matrix of dimension [R, Ns].
Each row corresponds to the ratings given by a single rater;
columns correspond to items rated.}
\item{obs_ts}{if None, \code{obs_ts} is calculated as the value
of the test statistic for the original data}
\item{reps}{integer number of random permutations of the elements of each row of ratings}
\item{keep_dist}{Boolean flag for whether to store and return the array
of permutation values of the irr test statistic}
\item{seed}{Random seed for random number generator.
If NULL, the pseudorandom number generator is the instance used by the permutation function.}
}
\value{
A list containing:
\itemize{
\item{obs_ts: observed value of the test statistic for the input data, or the input value of \code{obs_ts} if it was given as input}
\item{geq: integer number of iterations for which the test statistic was greater than or equal to \code{obs_ts}}
\item{reps: number of permutations}
\item{pvalue: geq/reps}
\item{dist: if \code{keep_dist}, the array of values of the irr test statistic from the \code{reps} iterations. Otherwise, NULL.}
}
}
\description{
Simulates the permutation distribution of the irr test statistic for
a matrix of ratings ``ratings``
}
\details{
If \code{obs_ts} is not null, computes the reference value of the test
statistic before the first permutation. Otherwise, uses the value
\code{obs_ts} for comparison.
If \code{keep_dist}, return the distribution of values of the test statistic;
otherwise, return only the number of permutations for which the value of
the irr test statistic is at least as large as \code{obs_ts}.
}
|
RMAwithCDFfilter<-function(SampleInfoFile=NULL, CELfiles_dir=NULL, AffyBatchInput=NULL,
custom_cdfname=NULL, custom_annotation=NULL, arrayNameColumn=NULL, sampleNameColumn=NULL, classColumn) {
# input provided either as AffyBatch object or list of cel files in an input file
if (!is.null(AffyBatchInput)) {
if (!(inherits(AffyBatchInput, what="AffyBatch"))) {
stop("the object AffyBatchInput is not an object of class AffyBatch")
}
AffyBatch_formatted_input<-TRUE
} else {
if (is.null(SampleInfoFile)) {
stop("Please provide input data either as a sampleinfo file or as an AffyBatch object")
}
if (is.null(CELfiles_dir)) {
CELfiles_dir<-getwd()
}
AffyBatch_formatted_input<-FALSE
}
# SampleInfoFile<-"/home/francesco/Documents/PREDA/Esempio/SampleInfoFile.txt"
# CELfiles_dir<-"/home/francesco/Documents/PREDA/Esempio"
# custom_cdfname<-"hugene10stv1hsentrezgcdf"
# arrayNameColumn=1
# sampleNameColumn=2
# classColumn=3
require(affy)
custom_cdfname<-cleancdfname(custom_cdfname)
if (is.null(custom_annotation)) {
custom_annotation<-gsub(pattern="cdf$", replacement=".db", perl=TRUE, x=custom_cdfname)
}
# verify if at least 4 probes per probeset are present in selcted custom CDF environment
require(custom_cdfname, character.only = TRUE)
cdfcustom<-as.list(get(custom_cdfname))
conteggio_probes<-sapply(cdfcustom, FUN=nrow)
selezione_probeset<-(conteggio_probes >= 4)
cdfcustom_filtered<-cdfcustom[selezione_probeset]
# remove Affymetrix control probesets (OPTIONAL STEP)
controls_selection<-(grep(pattern="^AFFX",x=names(cdfcustom_filtered), perl=TRUE))
if( length(controls_selection) > 0) {
cdfcustom_filtered<-cdfcustom_filtered[-controls_selection]
}
# create the new CDF environment from filetered list
filtered_customcdf_env<-new.env()
multiassign(x=cdfcustom_filtered, envir =filtered_customcdf_env)
assign("filtered_customcdf_env", value=filtered_customcdf_env, envir=.GlobalEnv)
# if input is provided as affybatch object
if (AffyBatch_formatted_input) {
AffyBatchInput@cdfName<-"filtered_customcdf_env"
annotation(AffyBatchInput)<-custom_annotation
my_AnnotatedDataFrame<-new("AnnotatedDataFrame", data=data.frame("Class"=pData(AffyBatchInput)[,classColumn], row.names=sampleNames(AffyBatchInput)), varMetadata=data.frame(labelDescription="Samples classes", row.names="Class"))
phenoData(AffyBatchInput)<-my_AnnotatedDataFrame
my_ExpressionSet<-rma(AffyBatchInput)
} else {
# read sampleinfo file
sampleinfo<-read.table(SampleInfoFile, sep="\t", header=TRUE, stringsAsFactors=FALSE)
# RMA normalize data and obtain ExpressionSet object
# ATTENTI AL PARAMETRO cdfname !!
my_AnnotatedDataFrame<-new("AnnotatedDataFrame", data=data.frame("Class"=sampleinfo[,classColumn], row.names=sampleinfo[,sampleNameColumn]), varMetadata=data.frame(labelDescription="Samples classes", row.names="Class"))
my_ExpressionSet<-justRMA(filenames=sampleinfo[,arrayNameColumn], celfile.path=CELfiles_dir, sampleNames=sampleinfo[,sampleNameColumn], cdfname="filtered_customcdf_env", phenoData=my_AnnotatedDataFrame)
my_ExpressionSet@annotation<-custom_annotation
}
return(my_ExpressionSet)
}
| /R/RMAwithCDFfilter.R | no_license | bernatgel/PREDAsam | R | false | false | 3,142 | r |
RMAwithCDFfilter<-function(SampleInfoFile=NULL, CELfiles_dir=NULL, AffyBatchInput=NULL,
custom_cdfname=NULL, custom_annotation=NULL, arrayNameColumn=NULL, sampleNameColumn=NULL, classColumn) {
# input provided either as AffyBatch object or list of cel files in an input file
if (!is.null(AffyBatchInput)) {
if (!(inherits(AffyBatchInput, what="AffyBatch"))) {
stop("the object AffyBatchInput is not an object of class AffyBatch")
}
AffyBatch_formatted_input<-TRUE
} else {
if (is.null(SampleInfoFile)) {
stop("Please provide input data either as a sampleinfo file or as an AffyBatch object")
}
if (is.null(CELfiles_dir)) {
CELfiles_dir<-getwd()
}
AffyBatch_formatted_input<-FALSE
}
# SampleInfoFile<-"/home/francesco/Documents/PREDA/Esempio/SampleInfoFile.txt"
# CELfiles_dir<-"/home/francesco/Documents/PREDA/Esempio"
# custom_cdfname<-"hugene10stv1hsentrezgcdf"
# arrayNameColumn=1
# sampleNameColumn=2
# classColumn=3
require(affy)
custom_cdfname<-cleancdfname(custom_cdfname)
if (is.null(custom_annotation)) {
custom_annotation<-gsub(pattern="cdf$", replacement=".db", perl=TRUE, x=custom_cdfname)
}
# verify if at least 4 probes per probeset are present in selcted custom CDF environment
require(custom_cdfname, character.only = TRUE)
cdfcustom<-as.list(get(custom_cdfname))
conteggio_probes<-sapply(cdfcustom, FUN=nrow)
selezione_probeset<-(conteggio_probes >= 4)
cdfcustom_filtered<-cdfcustom[selezione_probeset]
# remove Affymetrix control probesets (OPTIONAL STEP)
controls_selection<-(grep(pattern="^AFFX",x=names(cdfcustom_filtered), perl=TRUE))
if( length(controls_selection) > 0) {
cdfcustom_filtered<-cdfcustom_filtered[-controls_selection]
}
# create the new CDF environment from filetered list
filtered_customcdf_env<-new.env()
multiassign(x=cdfcustom_filtered, envir =filtered_customcdf_env)
assign("filtered_customcdf_env", value=filtered_customcdf_env, envir=.GlobalEnv)
# if input is provided as affybatch object
if (AffyBatch_formatted_input) {
AffyBatchInput@cdfName<-"filtered_customcdf_env"
annotation(AffyBatchInput)<-custom_annotation
my_AnnotatedDataFrame<-new("AnnotatedDataFrame", data=data.frame("Class"=pData(AffyBatchInput)[,classColumn], row.names=sampleNames(AffyBatchInput)), varMetadata=data.frame(labelDescription="Samples classes", row.names="Class"))
phenoData(AffyBatchInput)<-my_AnnotatedDataFrame
my_ExpressionSet<-rma(AffyBatchInput)
} else {
# read sampleinfo file
sampleinfo<-read.table(SampleInfoFile, sep="\t", header=TRUE, stringsAsFactors=FALSE)
# RMA normalize data and obtain ExpressionSet object
# ATTENTI AL PARAMETRO cdfname !!
my_AnnotatedDataFrame<-new("AnnotatedDataFrame", data=data.frame("Class"=sampleinfo[,classColumn], row.names=sampleinfo[,sampleNameColumn]), varMetadata=data.frame(labelDescription="Samples classes", row.names="Class"))
my_ExpressionSet<-justRMA(filenames=sampleinfo[,arrayNameColumn], celfile.path=CELfiles_dir, sampleNames=sampleinfo[,sampleNameColumn], cdfname="filtered_customcdf_env", phenoData=my_AnnotatedDataFrame)
my_ExpressionSet@annotation<-custom_annotation
}
return(my_ExpressionSet)
}
|
\name{closeViz-methods}
\docType{methods}
\alias{closeViz-methods}
\alias{closeViz,NG_Visualization-method}
\alias{closeViz,NG_Viz2DAxis-method}
\alias{closeViz,NG_Viztk2d-method}
\title{Initialization of a new Display}
\description{
If custom visualization class (NG_Visualization) has been defined. This is the method that gets called if navGraph switches away from a graph that links to the visualization class.
See package vignette for more detail.
}
\section{Methods}{
\describe{
\item{\code{signature(obj = "NG_Visualization")}}{}
}}
\keyword{methods}
| /man/closeViz-methods.Rd | no_license | cran/RnavGraph | R | false | false | 562 | rd | \name{closeViz-methods}
\docType{methods}
\alias{closeViz-methods}
\alias{closeViz,NG_Visualization-method}
\alias{closeViz,NG_Viz2DAxis-method}
\alias{closeViz,NG_Viztk2d-method}
\title{Initialization of a new Display}
\description{
If custom visualization class (NG_Visualization) has been defined. This is the method that gets called if navGraph switches away from a graph that links to the visualization class.
See package vignette for more detail.
}
\section{Methods}{
\describe{
\item{\code{signature(obj = "NG_Visualization")}}{}
}}
\keyword{methods}
|
library(stats)
args = commandArgs(trailingOnly=TRUE)
pca <- vector(mode="list", length=3)
names(pca) <- c("eigen_data", "pca_normalizer", "pca_transform")
eigen_data <- function(Z, rows, cols) {
# function to print x raised to the power y
Z <- matrix(Z, nrow = rows, ncol = cols, byrow = TRUE)
eig <- eigen(Z, symmetric = FALSE)
return (c(eig['values'], eig['vectors']))
}
pca_normalizer <- function(Z) {
col.mean = apply(Z, 1, mean)
mat <- Z - col.mean
V <- cov(t(mat))
eig <- eigen(V, symmetric = FALSE)
return (eig)
}
pca_transform <- function(Z) {
library(FactoMineR)
res.pca <- PCA(Z)
return (c(res.pca['eig'], res.pca['var'], res.pca['ind']))
}
pca.eigen_data = eigen_data
pca.pca_normalizer = pca_normalizer
pca.pca_transform = pca_transform
| /pca.R | no_license | aswinvk28/eigen_moment_inertia | R | false | false | 818 | r | library(stats)
args = commandArgs(trailingOnly=TRUE)
pca <- vector(mode="list", length=3)
names(pca) <- c("eigen_data", "pca_normalizer", "pca_transform")
eigen_data <- function(Z, rows, cols) {
# function to print x raised to the power y
Z <- matrix(Z, nrow = rows, ncol = cols, byrow = TRUE)
eig <- eigen(Z, symmetric = FALSE)
return (c(eig['values'], eig['vectors']))
}
pca_normalizer <- function(Z) {
col.mean = apply(Z, 1, mean)
mat <- Z - col.mean
V <- cov(t(mat))
eig <- eigen(V, symmetric = FALSE)
return (eig)
}
pca_transform <- function(Z) {
library(FactoMineR)
res.pca <- PCA(Z)
return (c(res.pca['eig'], res.pca['var'], res.pca['ind']))
}
pca.eigen_data = eigen_data
pca.pca_normalizer = pca_normalizer
pca.pca_transform = pca_transform
|
#' day 8
#'
#' @export
day8 <- function() {
# dependencies
`%>%` <- magrittr::`%>%`
unglue_data <- unglue::unglue_data
mutate <- dplyr::mutate
# data
file <- system.file("extdata/day8.txt", package = "adventofcode2020")
input <- readLines(file)
# part 1
clean_data <- clean_data1 <-
unglue_data(input, "{call} {value}", convert = TRUE) %>%
mutate(passes = 0)
i <- 1
passes <- 0
glob <- 0
repeat {
passes <- clean_data1[i, "passes"] + 1
clean_data1[i, "passes"] <- passes
if(passes == 2) break
call <- clean_data1[i, "call"]
value <- clean_data1[i, "value"]
if(call == "acc") {
glob <- glob + value
i <- i + 1
} else if(call == "jmp") {
i <- i + value
} else {
i <- i + 1
}
}
part1 <- glob
# part 2
# return NULL if inf loop, glob otherwise
test_code <- function(clean_data) {
i <- 1
passes <- 0
glob <- 0
repeat {
passes <- clean_data[i, "passes"] + 1
clean_data[i, "passes"] <- passes
if(passes == 2) return(NULL)
call <- clean_data[i, "call"]
value <- clean_data[i, "value"]
if(call == "acc") {
glob <- glob + value
i <- i + 1
} else if(call == "jmp") {
i <- i + value
} else {
i <- i + 1
}
if(i == nrow(clean_data)) return(glob)
}
}
for (j in seq(nrow(clean_data))) {
call <- clean_data[j, "call"]
clean_data_modif <- clean_data
if(call == "nop") {
clean_data_modif[j, "call"] <- "jmp"
res <- test_code(clean_data_modif)
if(!is.null(res)) break
} else if(call == "jmp") {
clean_data_modif[j, "call"] <- "nop"
res <- test_code(clean_data_modif)
if(!is.null(res)) break
}
}
part2 <- res
list(part1 = part1, part2 = part2)
}
| /R/day8.R | no_license | moodymudskipper/adventofcode2020 | R | false | false | 1,830 | r | #' day 8
#'
#' @export
day8 <- function() {
# dependencies
`%>%` <- magrittr::`%>%`
unglue_data <- unglue::unglue_data
mutate <- dplyr::mutate
# data
file <- system.file("extdata/day8.txt", package = "adventofcode2020")
input <- readLines(file)
# part 1
clean_data <- clean_data1 <-
unglue_data(input, "{call} {value}", convert = TRUE) %>%
mutate(passes = 0)
i <- 1
passes <- 0
glob <- 0
repeat {
passes <- clean_data1[i, "passes"] + 1
clean_data1[i, "passes"] <- passes
if(passes == 2) break
call <- clean_data1[i, "call"]
value <- clean_data1[i, "value"]
if(call == "acc") {
glob <- glob + value
i <- i + 1
} else if(call == "jmp") {
i <- i + value
} else {
i <- i + 1
}
}
part1 <- glob
# part 2
# return NULL if inf loop, glob otherwise
test_code <- function(clean_data) {
i <- 1
passes <- 0
glob <- 0
repeat {
passes <- clean_data[i, "passes"] + 1
clean_data[i, "passes"] <- passes
if(passes == 2) return(NULL)
call <- clean_data[i, "call"]
value <- clean_data[i, "value"]
if(call == "acc") {
glob <- glob + value
i <- i + 1
} else if(call == "jmp") {
i <- i + value
} else {
i <- i + 1
}
if(i == nrow(clean_data)) return(glob)
}
}
for (j in seq(nrow(clean_data))) {
call <- clean_data[j, "call"]
clean_data_modif <- clean_data
if(call == "nop") {
clean_data_modif[j, "call"] <- "jmp"
res <- test_code(clean_data_modif)
if(!is.null(res)) break
} else if(call == "jmp") {
clean_data_modif[j, "call"] <- "nop"
res <- test_code(clean_data_modif)
if(!is.null(res)) break
}
}
part2 <- res
list(part1 = part1, part2 = part2)
}
|
rm(list=ls())
library(tidyverse)
library(curl)
library(forcats)
library(readxl)
library(RcppRoll)
library(cowplot)
#Read in data
temp <- tempfile()
source <- "https://raw.githubusercontent.com/DataScienceScotland/COVID-19-Management-Information/master/COVID19%20-%20Daily%20Management%20Information%20-%20Scottish%20Health%20Boards%20-%20Cumulative%20cases.csv"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
data <- read.csv(temp)[-c(136),-c(16,17)]
#Remove blank rows
data <- data %>% filter_all(all_vars(complete.cases(.)))
data$Date <- as.Date(data$Date)
data_long <- gather(data, HB, cumul_cases, c(2:15))
data_long$HB <- gsub("[.]", " ", data_long$HB)
#Treat supressed numbers as 0
data_long$cumul_cases <- as.numeric(ifelse(data_long$cumul_cases=="*", 0, data_long$cumul_cases))
#Calculate daily cases
data_long <- data_long %>%
arrange(HB, Date) %>%
group_by(HB) %>%
mutate(cases=cumul_cases-lag(cumul_cases,1))
data_long$cases <- ifelse(is.na(data_long$cases), 0, data_long$cases)
#Cases data is weirdly missing for 20th July, so assume there were 0 new cases in every HB on that day
temp <- data.frame(Date=rep(as.Date("2020-07-20", times=14)),
HB=unique(data_long$HB), cases=rep(0, times=14))
data_long <- bind_rows(data_long, temp)
heatmap <- data_long %>%
arrange(HB, Date) %>%
group_by(HB) %>%
mutate(casesroll_avg=roll_mean(cases, 7, align="right", fill=0))
#Since 15th June 2020, Pillar 2 cases are now included in the Scottish total,
#this means that all Pillar 2 cases *prior* to this date all show up on 15th June.
#To fix this for the time series we *could* redistribute these back across the time
#series, but easier just to leave them out and allocate the moving average from
#14th June as the number of new cases on 15th.
heatmap$cases <- if_else(heatmap$Date=="2020-06-15", lag(heatmap$casesroll_avg, 1),
heatmap$cases)
#Recalculate rolling average
heatmap <- heatmap %>%
group_by(HB) %>%
mutate(casesroll_avg=roll_mean(cases, 7, align="right", fill=0)) %>%
mutate(maxcaserate=max(casesroll_avg), maxcaseday=Date[which(casesroll_avg==maxcaserate)][1],
cumul_cases=cumsum(cases), totalcases=max(cumul_cases))
heatmap$maxcaseprop <- heatmap$casesroll_avg/heatmap$maxcaserate
#Enter dates to plot from and to
plotfrom <- "2020-03-14"
plotto <- max(heatmap$Date)
#Plot case trajectories
casetiles <- ggplot(heatmap, aes(x=Date, y=fct_reorder(HB, maxcaseday), fill=maxcaseprop))+
geom_tile(colour="White", show.legend=FALSE)+
geom_segment(aes(x=as.Date("2020-06-15"), xend=as.Date("2020-06-15"), y=0.5, yend=14.5),
colour="grey20")+
annotate("text", x=as.Date("2020-06-15"), y=14.6, label="*", size=5)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
coord_cartesian(clip = 'off')+
labs(title="Timelines for COVID-19 cases in Scottish Health Boards",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases, normalised to the maximum value within the Health Board.\nBoards are ordered by the date at which they reached their peak number of new cases. Bars on the right represent the absolute number of cases in each Health Board.\nData since 15th June (denoted with an asterisk) has included additional tests conducted under the UK Government testing programme (Pillar 2).\nAs a result, data for the 15th June itself is estimated. Data updated to ", plotto,". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Scottish Government | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text.y=element_text(colour="Black"))
casebars <- ggplot(subset(heatmap, Date==maxcaseday), aes(x=totalcases, y=fct_reorder(HB, maxcaseday), fill=totalcases))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed cases", breaks=c(0,1000, 2000, 3000, 4000))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDScottishLACasesHeatmap.tiff", units="in", width=12, height=5, res=500)
plot_grid(casetiles, casebars, align="h", rel_widths=c(1,0.2))
dev.off()
library(ggridges)
tiff("Outputs/COVIDScottishHBCaseRidges.tiff", units="in", width=12, height=5, res=500)
ggplot(heatmap, aes(x=Date, y=fct_reorder(HB, totalcases), height=casesroll_avg, fill=casesroll_avg))+
geom_density_ridges_gradient(stat="identity", rel_min_height=0.001)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Cases per day\n7-day rolling avg.")+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
scale_y_discrete(name="")+
labs(title="Timelines of confirmed COVID-19 cases in Scottish Health Boards",
caption="Data from Scottish Government | Plot by @VictimOfMaths")
dev.off()
ggplot(heatmap)+
geom_col(aes(x=Date, y=cases, fill=cases))+
facet_wrap(~HB)+
scale_fill_distiller(palette="Spectral", name="Cases per day\n7-day rolling avg.")+
theme_classic()
#Download ICU data from https://www.gov.scot/publications/coronavirus-covid-19-trends-in-daily-data/
temp <- tempfile()
source <- "https://www.gov.scot/binaries/content/documents/govscot/publications/statistics/2020/04/coronavirus-covid-19-trends-in-daily-data/documents/covid-19-data-by-nhs-board/covid-19-data-by-nhs-board/govscot%3Adocument/COVID-19%2Bdata%2Bby%2BNHS%2BBoard%2B07%2BJuly%2B2020.xlsx"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
#Need to manually increment the numbers at the end of this range: 79 = 1st June
ICUdata <- read_excel(temp, sheet=4, range="A3:O115")
ICUdata_long <- gather(ICUdata, HB, cases, c(2:15))
ICUdata_long$cases <- as.numeric(ifelse(ICUdata_long$cases=="*", 0, ICUdata_long$cases))
ICUdata_long$Date <- as.Date(ICUdata_long$Date)
ICUdata_long$HB <- substr(ICUdata_long$HB, 5, 100)
ICUheatmap <- ICUdata_long %>%
arrange(HB, Date) %>%
group_by(HB) %>%
mutate(casesroll_avg=roll_mean(cases, 5, align="right", fill=0)) %>%
mutate(maxcaserate=max(casesroll_avg), maxcaseday=Date[which(casesroll_avg==maxcaserate)][1],
totalcases=sum(cases))
ICUheatmap$maxcaseprop <- ICUheatmap$casesroll_avg/ICUheatmap$maxcaserate
#Enter dates to plot from and to
ICUplotfrom <- "2020-03-14"
ICUplotto <- max(ICUheatmap$Date)
#Plot case trajectories
ICUcasetiles <- ggplot(ICUheatmap, aes(x=Date, y=fct_reorder(HB, maxcaseday), fill=maxcaseprop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral", na.value="White")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(ICUplotfrom, ICUplotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 ICU patients in Scottish Health Boards",
subtitle=paste0("The heatmap represents the 5-day rolling average of the number of ICU inpatients with confirmed or suspected COVID-19, normalised to the maximum value within the Health Board.\nBoards are ordered by the date at which they reached their peak number of ICU cases. Bars on the right represent the absolute number of ICU cases in each Health Board.\nData updated to ", ICUplotto,". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Scottish Government | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text.y=element_text(colour="Black"))
ICUcasebars <- ggplot(subset(ICUheatmap, Date==maxcaseday), aes(x=totalcases, y=fct_reorder(HB, maxcaseday), fill=totalcases))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed cases", breaks=c(0,1000, 2000, 3000))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDScottishLAICUHeatmap.tiff", units="in", width=12, height=5, res=500)
plot_grid(ICUcasetiles, ICUcasebars, align="h", rel_widths=c(1,0.2))
dev.off()
| /Heatmaps/Scottish HB Heatmaps.R | no_license | theosanderson/COVID-19 | R | false | false | 8,600 | r | rm(list=ls())
library(tidyverse)
library(curl)
library(forcats)
library(readxl)
library(RcppRoll)
library(cowplot)
#Read in data
temp <- tempfile()
source <- "https://raw.githubusercontent.com/DataScienceScotland/COVID-19-Management-Information/master/COVID19%20-%20Daily%20Management%20Information%20-%20Scottish%20Health%20Boards%20-%20Cumulative%20cases.csv"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
data <- read.csv(temp)[-c(136),-c(16,17)]
#Remove blank rows
data <- data %>% filter_all(all_vars(complete.cases(.)))
data$Date <- as.Date(data$Date)
data_long <- gather(data, HB, cumul_cases, c(2:15))
data_long$HB <- gsub("[.]", " ", data_long$HB)
#Treat supressed numbers as 0
data_long$cumul_cases <- as.numeric(ifelse(data_long$cumul_cases=="*", 0, data_long$cumul_cases))
#Calculate daily cases
data_long <- data_long %>%
arrange(HB, Date) %>%
group_by(HB) %>%
mutate(cases=cumul_cases-lag(cumul_cases,1))
data_long$cases <- ifelse(is.na(data_long$cases), 0, data_long$cases)
#Cases data is weirdly missing for 20th July, so assume there were 0 new cases in every HB on that day
temp <- data.frame(Date=rep(as.Date("2020-07-20", times=14)),
HB=unique(data_long$HB), cases=rep(0, times=14))
data_long <- bind_rows(data_long, temp)
heatmap <- data_long %>%
arrange(HB, Date) %>%
group_by(HB) %>%
mutate(casesroll_avg=roll_mean(cases, 7, align="right", fill=0))
#Since 15th June 2020, Pillar 2 cases are now included in the Scottish total,
#this means that all Pillar 2 cases *prior* to this date all show up on 15th June.
#To fix this for the time series we *could* redistribute these back across the time
#series, but easier just to leave them out and allocate the moving average from
#14th June as the number of new cases on 15th.
heatmap$cases <- if_else(heatmap$Date=="2020-06-15", lag(heatmap$casesroll_avg, 1),
heatmap$cases)
#Recalculate rolling average
heatmap <- heatmap %>%
group_by(HB) %>%
mutate(casesroll_avg=roll_mean(cases, 7, align="right", fill=0)) %>%
mutate(maxcaserate=max(casesroll_avg), maxcaseday=Date[which(casesroll_avg==maxcaserate)][1],
cumul_cases=cumsum(cases), totalcases=max(cumul_cases))
heatmap$maxcaseprop <- heatmap$casesroll_avg/heatmap$maxcaserate
#Enter dates to plot from and to
plotfrom <- "2020-03-14"
plotto <- max(heatmap$Date)
#Plot case trajectories
casetiles <- ggplot(heatmap, aes(x=Date, y=fct_reorder(HB, maxcaseday), fill=maxcaseprop))+
geom_tile(colour="White", show.legend=FALSE)+
geom_segment(aes(x=as.Date("2020-06-15"), xend=as.Date("2020-06-15"), y=0.5, yend=14.5),
colour="grey20")+
annotate("text", x=as.Date("2020-06-15"), y=14.6, label="*", size=5)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
coord_cartesian(clip = 'off')+
labs(title="Timelines for COVID-19 cases in Scottish Health Boards",
subtitle=paste0("The heatmap represents the 7-day rolling average of the number of new confirmed cases, normalised to the maximum value within the Health Board.\nBoards are ordered by the date at which they reached their peak number of new cases. Bars on the right represent the absolute number of cases in each Health Board.\nData since 15th June (denoted with an asterisk) has included additional tests conducted under the UK Government testing programme (Pillar 2).\nAs a result, data for the 15th June itself is estimated. Data updated to ", plotto,". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Scottish Government | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text.y=element_text(colour="Black"))
casebars <- ggplot(subset(heatmap, Date==maxcaseday), aes(x=totalcases, y=fct_reorder(HB, maxcaseday), fill=totalcases))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed cases", breaks=c(0,1000, 2000, 3000, 4000))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDScottishLACasesHeatmap.tiff", units="in", width=12, height=5, res=500)
plot_grid(casetiles, casebars, align="h", rel_widths=c(1,0.2))
dev.off()
library(ggridges)
tiff("Outputs/COVIDScottishHBCaseRidges.tiff", units="in", width=12, height=5, res=500)
ggplot(heatmap, aes(x=Date, y=fct_reorder(HB, totalcases), height=casesroll_avg, fill=casesroll_avg))+
geom_density_ridges_gradient(stat="identity", rel_min_height=0.001)+
theme_classic()+
scale_fill_distiller(palette="Spectral", name="Cases per day\n7-day rolling avg.")+
scale_x_date(name="Date", limits=as.Date(c(plotfrom, plotto)), expand=c(0,0))+
scale_y_discrete(name="")+
labs(title="Timelines of confirmed COVID-19 cases in Scottish Health Boards",
caption="Data from Scottish Government | Plot by @VictimOfMaths")
dev.off()
ggplot(heatmap)+
geom_col(aes(x=Date, y=cases, fill=cases))+
facet_wrap(~HB)+
scale_fill_distiller(palette="Spectral", name="Cases per day\n7-day rolling avg.")+
theme_classic()
#Download ICU data from https://www.gov.scot/publications/coronavirus-covid-19-trends-in-daily-data/
temp <- tempfile()
source <- "https://www.gov.scot/binaries/content/documents/govscot/publications/statistics/2020/04/coronavirus-covid-19-trends-in-daily-data/documents/covid-19-data-by-nhs-board/covid-19-data-by-nhs-board/govscot%3Adocument/COVID-19%2Bdata%2Bby%2BNHS%2BBoard%2B07%2BJuly%2B2020.xlsx"
temp <- curl_download(url=source, destfile=temp, quiet=FALSE, mode="wb")
#Need to manually increment the numbers at the end of this range: 79 = 1st June
ICUdata <- read_excel(temp, sheet=4, range="A3:O115")
ICUdata_long <- gather(ICUdata, HB, cases, c(2:15))
ICUdata_long$cases <- as.numeric(ifelse(ICUdata_long$cases=="*", 0, ICUdata_long$cases))
ICUdata_long$Date <- as.Date(ICUdata_long$Date)
ICUdata_long$HB <- substr(ICUdata_long$HB, 5, 100)
ICUheatmap <- ICUdata_long %>%
arrange(HB, Date) %>%
group_by(HB) %>%
mutate(casesroll_avg=roll_mean(cases, 5, align="right", fill=0)) %>%
mutate(maxcaserate=max(casesroll_avg), maxcaseday=Date[which(casesroll_avg==maxcaserate)][1],
totalcases=sum(cases))
ICUheatmap$maxcaseprop <- ICUheatmap$casesroll_avg/ICUheatmap$maxcaserate
#Enter dates to plot from and to
ICUplotfrom <- "2020-03-14"
ICUplotto <- max(ICUheatmap$Date)
#Plot case trajectories
ICUcasetiles <- ggplot(ICUheatmap, aes(x=Date, y=fct_reorder(HB, maxcaseday), fill=maxcaseprop))+
geom_tile(colour="White", show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral", na.value="White")+
scale_y_discrete(name="", expand=c(0,0))+
scale_x_date(name="Date", limits=as.Date(c(ICUplotfrom, ICUplotto)), expand=c(0,0))+
labs(title="Timelines for COVID-19 ICU patients in Scottish Health Boards",
subtitle=paste0("The heatmap represents the 5-day rolling average of the number of ICU inpatients with confirmed or suspected COVID-19, normalised to the maximum value within the Health Board.\nBoards are ordered by the date at which they reached their peak number of ICU cases. Bars on the right represent the absolute number of ICU cases in each Health Board.\nData updated to ", ICUplotto,". Data for most recent days is provisional and may be revised upwards as additional tests are processed."),
caption="Data from Scottish Government | Plot by @VictimOfMaths")+
theme(axis.line.y=element_blank(), plot.subtitle=element_text(size=rel(0.78)), plot.title.position="plot",
axis.text.y=element_text(colour="Black"))
ICUcasebars <- ggplot(subset(ICUheatmap, Date==maxcaseday), aes(x=totalcases, y=fct_reorder(HB, maxcaseday), fill=totalcases))+
geom_col(show.legend=FALSE)+
theme_classic()+
scale_fill_distiller(palette="Spectral")+
scale_x_continuous(name="Total confirmed cases", breaks=c(0,1000, 2000, 3000))+
theme(axis.title.y=element_blank(), axis.line.y=element_blank(), axis.text.y=element_blank(),
axis.ticks.y=element_blank(), axis.text.x=element_text(colour="Black"))
tiff("Outputs/COVIDScottishLAICUHeatmap.tiff", units="in", width=12, height=5, res=500)
plot_grid(ICUcasetiles, ICUcasebars, align="h", rel_widths=c(1,0.2))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/catboost.R
\name{catboost.from_data_frame}
\alias{catboost.from_data_frame}
\title{Create pool from data frame}
\usage{
catboost.from_data_frame(data, target = NULL, weight = NULL,
baseline = NULL)
}
\arguments{
\item{data}{data.frame object}
\item{target}{Target vector}
}
\description{
Create pool from data frame
}
| /catboost/R-package/man/catboost.from_data_frame.Rd | permissive | kyper999/catboost_yandex | R | false | true | 399 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/catboost.R
\name{catboost.from_data_frame}
\alias{catboost.from_data_frame}
\title{Create pool from data frame}
\usage{
catboost.from_data_frame(data, target = NULL, weight = NULL,
baseline = NULL)
}
\arguments{
\item{data}{data.frame object}
\item{target}{Target vector}
}
\description{
Create pool from data frame
}
|
library(flood)
### Name: confInt_sTL
### Title: Estimated confidence intervals using sTL
### Aliases: confInt_sTL
### ** Examples
library("evd")
# Seasonal observations of 80 years at one station:
x1 <- rgev(80, 2, 1, 0.2) # observations from season 1
x2 <- rgev(80, 3, 1, 0.3) # observations from season 2
confInt_sTL(x1=x1, x2=x2, p=0.95, alpha=0.05)
# Seasonal observations of 100 years at 4 stations:
x1 <- matrix(rgev(400, 2, 1, 0.3), ncol=4)
x2 <- matrix(rgev(400, 4, 1, 0.2), ncol=4)
confInt_sTL(x1=x1, x2=x2, j=2, p=0.95, alpha=0.05)
| /data/genthat_extracted_code/flood/examples/confInt_sTL.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 550 | r | library(flood)
### Name: confInt_sTL
### Title: Estimated confidence intervals using sTL
### Aliases: confInt_sTL
### ** Examples
library("evd")
# Seasonal observations of 80 years at one station:
x1 <- rgev(80, 2, 1, 0.2) # observations from season 1
x2 <- rgev(80, 3, 1, 0.3) # observations from season 2
confInt_sTL(x1=x1, x2=x2, p=0.95, alpha=0.05)
# Seasonal observations of 100 years at 4 stations:
x1 <- matrix(rgev(400, 2, 1, 0.3), ncol=4)
x2 <- matrix(rgev(400, 4, 1, 0.2), ncol=4)
confInt_sTL(x1=x1, x2=x2, j=2, p=0.95, alpha=0.05)
|
#I create a Function and I locate thisone in the source.
#The source File is committed too whit the name "loadData.R" and you'll find the function there
#call the source and config variables *-* Available in the repository "loadData.R"
source("loadData.R")
#construct the dataset with function
dataset <- loadData()
#create the PNG file
png(file = "plot1.png")
hist(dataset$Global_active_power, col="red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
#close device
dev.off() | /plot1.R | no_license | josewrodgers/ExData_Plotting1 | R | false | false | 507 | r | #I create a Function and I locate thisone in the source.
#The source File is committed too whit the name "loadData.R" and you'll find the function there
#call the source and config variables *-* Available in the repository "loadData.R"
source("loadData.R")
#construct the dataset with function
dataset <- loadData()
#create the PNG file
png(file = "plot1.png")
hist(dataset$Global_active_power, col="red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
#close device
dev.off() |
loop_dynamic <-function(x){##relocate memory every iteration
a <- NA
for (i in 1:x) {
a <- c(a, i)
# print(a)
#print(object.size(a))
}
}
loop_prealloc <- function(x){
a <- rep(NA, x)##preallocate memory space for vector a
for (i in 1:x) {
a[i] <- i
#print(a)
#print(object.size(a))
}
}
print("Without pre-allocating memory space, the time taken is:")
print(system.time(loop_dynamic(10000)))
print("When using pre-allocation, the time taken is:")
print(system.time(loop_prealloc(10000))) | /Week3/Code/preallocate.R | no_license | emersonff/CMEECourseWork | R | false | false | 525 | r |
loop_dynamic <-function(x){##relocate memory every iteration
a <- NA
for (i in 1:x) {
a <- c(a, i)
# print(a)
#print(object.size(a))
}
}
loop_prealloc <- function(x){
a <- rep(NA, x)##preallocate memory space for vector a
for (i in 1:x) {
a[i] <- i
#print(a)
#print(object.size(a))
}
}
print("Without pre-allocating memory space, the time taken is:")
print(system.time(loop_dynamic(10000)))
print("When using pre-allocation, the time taken is:")
print(system.time(loop_prealloc(10000))) |
testlist <- list(A = structure(c(2.31584307392256e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613108594-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 257 | r | testlist <- list(A = structure(c(2.31584307392256e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
# Collaborative Social Sience Data - Pair Assignment 3
# Data
library(countrycode)
library(WDI)
library(plyr)
library(reshape2)
library(zoo)
# Set Working Directory
try(setwd("/Users/Lukas/Documents/Git/PairAssignment3/PairAssignment3_Data"),silent=TRUE)
try(setwd("C:/Users/Dani/Documents/GitHub2/PairAssignment3/PairAssignment3_Data"),silent=TRUE)
getwd()
# natural disasters
dis <- read.csv("disaster19912015.csv", header = FALSE, sep = ",", ".", stringsAsFactors = FALSE, na.strings = c("", "NA"))
names(dis)[1] <- 'year'
names(dis)[2] <- 'disaster'
names(dis)[3] <- 'iso3c'
names(dis)[4] <- 'country'
names(dis)[5] <- 'occurrence'
names(dis)[6] <- 'deaths'
names(dis)[7] <- 'affected'
names(dis)[8] <- 'injured'
names(dis)[9] <- 'homeless'
names(dis)[10] <- 'total.affected'
names(dis)[11] <- 'total.damage'
dis <- dis[-1,]
dis <- dis[-1,]
dis$iso3c <- NULL
dis$deaths <- NULL
dis$affected <- NULL
dis$injured <- NULL
dis$homeless <- NULL
dis$total.affected <- NULL
dis$total.damage <- NULL
dis$country <- gsub("[^a-zA-Z0-9]","",dis$country) #get rid of special characters
dis$country[dis$country=="AzoresIslands"] <- "Azores"
dis$country[dis$country=="CanaryIs"] <- "Canary Islands"
dis$country[dis$country=="CentralAfricanRepublic"] <- "Central African Republic"
dis$country[dis$country=="LaoPeoplesDemocraticRepublicthe"] <- "Laos"
dis$country[dis$country=="Runion"] <- "Reunion"
dis$country[dis$country=="SaintLucia"] <- "Saint Lucia"
dis$country[dis$country=="SerbiaMontenegro"] <- "Serbia"
dis$country[dis$country=="VirginIslandUS"] <- "Virgin Island US"
dis$country[dis$country=="CongotheDemocraticRepublicofthe"] <- "Democratic republic of the Congo"
dis$country[dis$country=="Congothe"] <- "Republic of the Congo"
dis$disaster <- gsub("[^a-zA-Z0-9]","",dis$disaster) #get rid of special characters
dis$occurrence <- as.numeric(dis$occurrence)
dis <- dis[,c(1,3,2,4)]
dis[91, ] #delete GermanyFedRep
dis <- dis[-c(91), ]
dis[793, ] #delete NetherlandsAntilles
dis <- dis[-c(793), ]
aggrtdis <- dcast(dis, country + year ~ disaster, sum) #p317 R for Dummies
disastercc <- aggrtdis$country
aggrtdis$iso2c <- countrycode(disastercc, "country.name", "iso2c")
aggrtdis <- aggrtdis[complete.cases(aggrtdis),]
aggrtdis$Animalaccident <- NULL
aggrtdis$Extremetemperature <- NULL
aggrtdis$Insectinfestation <- NULL
aggrtdis$Massmovementdry <- NULL
aggrtdis$Volcanicactivity <- NULL
aggrtdis$Impact <- NULL
aggrtdis$Disaster <- (aggrtdis$Drought + aggrtdis$Earthquake + aggrtdis$Epidemic + aggrtdis$Flood + aggrtdis$Landslide + aggrtdis$Storm + aggrtdis$Wildfire)
rm(dis, disastercc)
# main refinancing operation (ECB)
MRO <- read.csv("MainRefinancingOperations.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
MRO <- MRO[-1,]
MRO <- MRO[-1,]
MRO <- MRO[-1,]
MRO <- MRO[-1,]
MRO <- MRO[-1,]
names(MRO)[1] <- 'time'
names(MRO)[2] <- 'ECB.MRO'
MRO$ECB.MRO <- as.numeric(MRO$ECB.MRO)
MRO$Date <- as.yearqtr(MRO$time, format = "%Y-%m-%d")
format(MRO$Date, format = "%y/0%q")
MRO$Date <- gsub("[^a-zA-Z0-9]","",MRO$Date) #get rid of special characters
# deposit facility (ECB)
deposit <- read.csv("DepositFacility.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
deposit <- deposit[-1,]
deposit <- deposit[-1,]
deposit <- deposit[-1,]
deposit <- deposit[-1,]
deposit <- deposit[-1,]
names(deposit)[1] <- 'time'
names(deposit)[2] <- 'ECB.depofacil'
deposit$ECB.depofacil <- as.numeric(deposit$ECB.depofacil)
deposit$Date <- as.yearqtr(deposit$time, format = "%Y-%m-%d")
format(deposit$Date, format = "%y/0%q")
deposit$Date <- gsub("[^a-zA-Z0-9]","",deposit$Date) #get rid of special characters
# quarterly GDP growth (OECD)
GDPq <- read.csv("QNA_06042016113157540.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
GDPq <- GDPq[-1,]
GDPq$GPSA <- 0
GDPq$GPSA[which(GDPq$V5=="GPSA")] <- 1
GDPq$GDP <- 0
GDPq$GDP[which(GDPq$V4=="Gross domestic product - expenditure approach")] <- 1
GDPq$year <- GDPq$V9
GDPq$year <- gsub("\\-.*","",GDPq$year)
names(GDPq)[1] <- 'iso3c'
names(GDPq)[2] <- 'country'
GDPq$V3 <- NULL
GDPq$V6 <- NULL
GDPq$V7 <- NULL
GDPq$V8 <- NULL
GDPq$V10 <- NULL
GDPq$V11 <- NULL
GDPq$V12 <- NULL
GDPq$V13 <- NULL
GDPq$V14 <- NULL
GDPq$V15 <- NULL
GDPq$V16 <- NULL
GDPq$V18 <- NULL
GDPq$V19 <- NULL
names(GDPq)[5] <- 'Date'
GDPq$Date <- gsub("[^a-zA-Z0-9]","",GDPq$Date) #get rid of special characters
names(GDPq)[6] <- 'GDPq.gr'
sub <- subset(GDPq, GPSA > 0)
GPSA <- subset(sub, GDP > 0)
rm(GDPq, sub)
# consumption spending
consume <- read.csv("QNA_06042016174850637.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
consume <- consume[-1,]
names(consume)[1] <- 'iso3c'
names(consume)[2] <- 'country'
consume$type <- 0
consume$type[which(consume$V4=="Private final consumption expenditure by durability")] <- 1
consume$computation <- 0
consume$computation[which(consume$V5=="CQRSA")] <- 1
consume$V3 <- NULL
consume$V5 <- NULL
consume$V6 <- NULL
consume$V7 <- NULL
consume$V8 <- NULL
consume$V10 <- NULL
consume$V12 <- NULL
consume$V13 <- NULL
consume$V14 <- NULL
consume$V15 <- NULL
consume$V16 <- NULL
consume$V18 <- NULL
consume$V19 <- NULL
names(consume)[3] <- 'consumption.type'
names(consume)[4] <- 'Date'
consume$Date <- gsub("[^a-zA-Z0-9]","",consume$Date) #get rid of special characters
names(consume)[5] <- 'currency'
names(consume)[6] <- 'consumption.spending'
sub <- subset(consume, type > 0)
prvconsm <- subset(sub, computation > 0)
rm(consume, sub)
# total rate of unemployment (OECD)
unempl <- read.csv("Unemployment.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
unempl <- unempl[-1,]
names(unempl)[1] <- 'iso3c'
unempl$V2 <- NULL
unempl$V3 <- NULL
unempl$V4 <- NULL
unempl$V5 <- NULL
unempl$V8 <- NULL
names(unempl)[2] <- 'Date'
unempl$Date <- gsub("[^a-zA-Z0-9]","",unempl$Date) #get rid of special characters
names(unempl)[3] <- 'unempl'
# Germany
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EGDAXI&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
DAX <- read.csv(URL)
colnames(DAX) <- paste("DAX", colnames(DAX), sep = ".")
names(DAX)[1] <- 'year'
DAX$year <- as.character(DAX$year)
DAX$year <- substring(DAX$year,1,nchar(DAX$year)-6)
DAX <- ddply(DAX, .(year), function(DAX) c(DAX.Open=mean(DAX$DAX.Open), DAX.High=mean(DAX$DAX.High), DAX.Low=mean(DAX$DAX.Low), DAX.Close=mean(DAX$DAX.Close), DAX.Volume=mean(DAX$DAX.Volume), DAX.Adj.Close=mean(DAX$DAX.Adj.Close)))
DAX$year <- as.numeric(DAX$year)
# Japan
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EN225&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
NIKKEI <- read.csv(URL)
colnames(NIKKEI) <- paste("NIK", colnames(NIKKEI), sep = ".")
names(NIKKEI)[1] <- 'year'
NIKKEI$year <- as.character(NIKKEI$year)
NIKKEI$year <- substring(NIKKEI$year,1,nchar(NIKKEI$year)-6)
NIKKEI <- ddply(NIKKEI, .(year), function(NIKKEI) c(NIK.Open=mean(NIKKEI$NIK.Open), NIK.High=mean(NIKKEI$NIK.High), NIK.Low=mean(NIKKEI$NIK.Low), NIK.Close=mean(NIKKEI$NIK.Close), NIK.Volume=mean(NIKKEI$NIK.Volume), NIK.Adj.Close=mean(NIKKEI$NIK.Adj.Close)))
NIKKEI$year <- as.numeric(NIKKEI$year)
# UK
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EFTSE&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
FTSE <- read.csv(URL)
colnames(FTSE) <- paste("FTSE", colnames(FTSE), sep = ".")
names(FTSE)[1] <- 'year'
FTSE$year <- as.character(FTSE$year)
FTSE$year <- substring(FTSE$year,1,nchar(FTSE$year)-6)
FTSE <- ddply(FTSE, .(year), function(FTSE) c(FTSE.Open=mean(FTSE$FTSE.Open), FTSE.High=mean(FTSE$FTSE.High), FTSE.Low=mean(FTSE$FTSE.Low), FTSE.Close=mean(FTSE$FTSE.Close), FTSE.Volume=mean(FTSE$FTSE.Volume), FTSE.Adj.Close=mean(FTSE$FTSE.Adj.Close)))
FTSE$year <- as.numeric(FTSE$year)
# France
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EFCHI&a=00&b=1&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
CAC <- read.csv(URL)
colnames(CAC) <- paste("CAC", colnames(CAC), sep = ".")
names(CAC)[1] <- 'year'
CAC$year <- as.character(CAC$year)
CAC$year <- substring(CAC$year,1,nchar(CAC$year)-6)
CAC <- ddply(CAC, .(year), function(CAC) c(CAC.Open=mean(CAC$CAC.Open), CAC.High=mean(CAC$CAC.High), CAC.Low=mean(CAC$CAC.Low), CAC.Close=mean(CAC$CAC.Close), CAC.Volume=mean(CAC$CAC.Volume), CAC.Adj.Close=mean(CAC$CAC.Adj.Close)))
CAC$year <- as.numeric(CAC$year)
# Brasil
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EBVSP&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
IBOV <- read.csv(URL)
colnames(IBOV) <- paste("IBOV", colnames(IBOV), sep = ".")
names(IBOV)[1] <- 'year'
IBOV$year <- as.character(IBOV$year)
IBOV$year <- substring(IBOV$year,1,nchar(IBOV$year)-6)
IBOV <- ddply(IBOV, .(year), function(IBOV) c(IBOV.Open=mean(IBOV$IBOV.Open), IBOV.High=mean(IBOV$IBOV.High), IBOV.Low=mean(IBOV$IBOV.Low), IBOV.Close=mean(IBOV$IBOV.Close), IBOV.Volume=mean(IBOV$IBOV.Volume), IBOV.Adj.Close=mean(IBOV$IBOV.Adj.Close)))
IBOV$year <- as.numeric(IBOV$year)
# Russia
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=OMRU.EX&a=00&b=01&c=1992&d=02&e=21&f=2016&g=d&ignore=.csv"
OMRU <- read.csv(URL)
colnames(OMRU) <- paste("OMRU", colnames(OMRU), sep = ".")
names(OMRU)[1] <- 'year'
OMRU$year <- as.character(OMRU$year)
OMRU$year <- substring(OMRU$year,1,nchar(OMRU$year)-6)
OMRU <- ddply(OMRU, .(year), function(OMRU) c(OMRU.Open=mean(OMRU$OMRU.Open), OMRU.High=mean(OMRU$OMRU.High), OMRU.Low=mean(OMRU$OMRU.Low), OMRU.Close=mean(OMRU$OMRU.Close), OMRU.Volume=mean(OMRU$OMRU.Volume), OMRU.Adj.Close=mean(OMRU$OMRU.Adj.Close)))
OMRU$year <- as.numeric(OMRU$year)
rm(URL)
#World Bank Development Indicators
WDI <- WDI(country = "all", indicator = c("CM.MKT.TRAD.GD.ZS", #Stocks traded, total value (% of GDP) (4th column)
"BN.KLT.DINV.CD.ZS", #Foreign direct investment (% of GDP)
"CM.MKT.TRNR", #Stocks traded, turnover ratio (%)
"NY.GDP.PCAP.KD.ZG", #GDP per capita growth (annual %)
"NEGDIKSTKKD", #Estimated Capital stock (real 2005 US$)
"NY.GDP.MKTP.KD.ZG"), #GDP growth (annual %)
start=1991, end=2015)
summary(WDI$CM.MKT.TRAD.GD.ZS) #4339 NA's
summary(WDI$BN.KLT.DINV.CD.ZS) #5996
summary(WDI$CM.MKT.TRNR) # 4313 NA's
summary(WDI$NY.GDP.PCAP.KD.ZG) # 945 NA's
summary(WDI$NY.GDP.MKTP.KD.ZG) # 942 NA's
names(WDI)[4] <- 'stocks'
names(WDI)[5] <- 'fdi'
names(WDI)[6] <- 'sturnover'
names(WDI)[7] <- 'gdp.pc.gr'
names(WDI)[8] <- 'gdp.gr'
WDI[1:213, ] #delete non-countries (a.k.a. regions)
WDI <- WDI[-c(1:213), ]
WDI[1126:1147, ] #delete Cape Verde, it's called Cabo Verde now (both have same iso2c)
WDI <- WDI[-c(1126:1147), ]
sub <- subset(WDI, iso2c != "B8")
sub <- subset(sub, iso2c != "F1")
sub <- subset(sub, iso2c != "S1")
sub <- subset(sub, iso2c != "S2")
sub <- subset(sub, iso2c != "S3")
sub <- subset(sub, iso2c != "S4")
sub <- subset(sub, iso2c != "Z4")
WDI <- subset(sub, iso2c != "Z7")
rm(sub)
# merge the data sets
merge1 <- merge(WDI,aggrtdis,by=c("iso2c", "year"), all.x = TRUE)
merge2 <- merge(merge1,CAC,by=c("year"), all.x = TRUE)
merge3 <- merge(merge2,DAX,by=c("year"), all.x = TRUE)
merge4 <- merge(merge3,FTSE,by=c("year"), all.x = TRUE)
merge5 <- merge(merge4,IBOV,by=c("year"), all.x = TRUE)
merge6 <- merge(merge5,NIKKEI,by=c("year"), all.x = TRUE)
merge7 <- merge(merge6,OMRU,by=c("year"), all.x = TRUE)
rm(WDI, aggrtdis, CAC, DAX, FTSE, IBOV, OMRU, NIKKEI, merge1, merge2, merge3, merge4, merge5, merge6)
merge7$country.y <- NULL
names(merge7)[3] <- 'country'
merge8 <- merge(GPSA,merge7,by=c("country", "year"), all.x = TRUE)
rm(merge7, GPSA)
merge9 <- merge(merge8,unempl,by=c("iso3c", "Date"), all.x = TRUE)
rm(merge8, unempl)
merge10 <- merge(merge9,prvconsm,by=c("iso3c", "Date"), all.x = TRUE)
merge10$country.y <- NULL
names(merge10)[3] <- 'country'
rm(merge9, prvconsm)
#merge11 <- merge(merge10,deposit,by=c("Date"), all.x = TRUE)
#rm(deposit, merge10)
#merge12 <- merge(merge11,MRO,by=c("Date"), all.x = TRUE) #396 observations too much
#rm(MRO, merge11)
# creating percentage changes
Out <- change(merge12, Var = 'B',
type = 'proportion',
NewVar = 'PercentChange',
slideBy = -2)
| /PairAssignment3_Code/Y2_SS_Collaborative_Session03.R | no_license | DanielLimberg/PairAssignment3 | R | false | false | 12,351 | r | # Collaborative Social Sience Data - Pair Assignment 3
# Data
library(countrycode)
library(WDI)
library(plyr)
library(reshape2)
library(zoo)
# Set Working Directory
try(setwd("/Users/Lukas/Documents/Git/PairAssignment3/PairAssignment3_Data"),silent=TRUE)
try(setwd("C:/Users/Dani/Documents/GitHub2/PairAssignment3/PairAssignment3_Data"),silent=TRUE)
getwd()
# natural disasters
dis <- read.csv("disaster19912015.csv", header = FALSE, sep = ",", ".", stringsAsFactors = FALSE, na.strings = c("", "NA"))
names(dis)[1] <- 'year'
names(dis)[2] <- 'disaster'
names(dis)[3] <- 'iso3c'
names(dis)[4] <- 'country'
names(dis)[5] <- 'occurrence'
names(dis)[6] <- 'deaths'
names(dis)[7] <- 'affected'
names(dis)[8] <- 'injured'
names(dis)[9] <- 'homeless'
names(dis)[10] <- 'total.affected'
names(dis)[11] <- 'total.damage'
dis <- dis[-1,]
dis <- dis[-1,]
dis$iso3c <- NULL
dis$deaths <- NULL
dis$affected <- NULL
dis$injured <- NULL
dis$homeless <- NULL
dis$total.affected <- NULL
dis$total.damage <- NULL
dis$country <- gsub("[^a-zA-Z0-9]","",dis$country) #get rid of special characters
dis$country[dis$country=="AzoresIslands"] <- "Azores"
dis$country[dis$country=="CanaryIs"] <- "Canary Islands"
dis$country[dis$country=="CentralAfricanRepublic"] <- "Central African Republic"
dis$country[dis$country=="LaoPeoplesDemocraticRepublicthe"] <- "Laos"
dis$country[dis$country=="Runion"] <- "Reunion"
dis$country[dis$country=="SaintLucia"] <- "Saint Lucia"
dis$country[dis$country=="SerbiaMontenegro"] <- "Serbia"
dis$country[dis$country=="VirginIslandUS"] <- "Virgin Island US"
dis$country[dis$country=="CongotheDemocraticRepublicofthe"] <- "Democratic republic of the Congo"
dis$country[dis$country=="Congothe"] <- "Republic of the Congo"
dis$disaster <- gsub("[^a-zA-Z0-9]","",dis$disaster) #get rid of special characters
dis$occurrence <- as.numeric(dis$occurrence)
dis <- dis[,c(1,3,2,4)]
dis[91, ] #delete GermanyFedRep
dis <- dis[-c(91), ]
dis[793, ] #delete NetherlandsAntilles
dis <- dis[-c(793), ]
aggrtdis <- dcast(dis, country + year ~ disaster, sum) #p317 R for Dummies
disastercc <- aggrtdis$country
aggrtdis$iso2c <- countrycode(disastercc, "country.name", "iso2c")
aggrtdis <- aggrtdis[complete.cases(aggrtdis),]
aggrtdis$Animalaccident <- NULL
aggrtdis$Extremetemperature <- NULL
aggrtdis$Insectinfestation <- NULL
aggrtdis$Massmovementdry <- NULL
aggrtdis$Volcanicactivity <- NULL
aggrtdis$Impact <- NULL
aggrtdis$Disaster <- (aggrtdis$Drought + aggrtdis$Earthquake + aggrtdis$Epidemic + aggrtdis$Flood + aggrtdis$Landslide + aggrtdis$Storm + aggrtdis$Wildfire)
rm(dis, disastercc)
# main refinancing operation (ECB)
MRO <- read.csv("MainRefinancingOperations.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
MRO <- MRO[-1,]
MRO <- MRO[-1,]
MRO <- MRO[-1,]
MRO <- MRO[-1,]
MRO <- MRO[-1,]
names(MRO)[1] <- 'time'
names(MRO)[2] <- 'ECB.MRO'
MRO$ECB.MRO <- as.numeric(MRO$ECB.MRO)
MRO$Date <- as.yearqtr(MRO$time, format = "%Y-%m-%d")
format(MRO$Date, format = "%y/0%q")
MRO$Date <- gsub("[^a-zA-Z0-9]","",MRO$Date) #get rid of special characters
# deposit facility (ECB)
deposit <- read.csv("DepositFacility.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
deposit <- deposit[-1,]
deposit <- deposit[-1,]
deposit <- deposit[-1,]
deposit <- deposit[-1,]
deposit <- deposit[-1,]
names(deposit)[1] <- 'time'
names(deposit)[2] <- 'ECB.depofacil'
deposit$ECB.depofacil <- as.numeric(deposit$ECB.depofacil)
deposit$Date <- as.yearqtr(deposit$time, format = "%Y-%m-%d")
format(deposit$Date, format = "%y/0%q")
deposit$Date <- gsub("[^a-zA-Z0-9]","",deposit$Date) #get rid of special characters
# quarterly GDP growth (OECD)
GDPq <- read.csv("QNA_06042016113157540.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
GDPq <- GDPq[-1,]
GDPq$GPSA <- 0
GDPq$GPSA[which(GDPq$V5=="GPSA")] <- 1
GDPq$GDP <- 0
GDPq$GDP[which(GDPq$V4=="Gross domestic product - expenditure approach")] <- 1
GDPq$year <- GDPq$V9
GDPq$year <- gsub("\\-.*","",GDPq$year)
names(GDPq)[1] <- 'iso3c'
names(GDPq)[2] <- 'country'
GDPq$V3 <- NULL
GDPq$V6 <- NULL
GDPq$V7 <- NULL
GDPq$V8 <- NULL
GDPq$V10 <- NULL
GDPq$V11 <- NULL
GDPq$V12 <- NULL
GDPq$V13 <- NULL
GDPq$V14 <- NULL
GDPq$V15 <- NULL
GDPq$V16 <- NULL
GDPq$V18 <- NULL
GDPq$V19 <- NULL
names(GDPq)[5] <- 'Date'
GDPq$Date <- gsub("[^a-zA-Z0-9]","",GDPq$Date) #get rid of special characters
names(GDPq)[6] <- 'GDPq.gr'
sub <- subset(GDPq, GPSA > 0)
GPSA <- subset(sub, GDP > 0)
rm(GDPq, sub)
# consumption spending
consume <- read.csv("QNA_06042016174850637.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
consume <- consume[-1,]
names(consume)[1] <- 'iso3c'
names(consume)[2] <- 'country'
consume$type <- 0
consume$type[which(consume$V4=="Private final consumption expenditure by durability")] <- 1
consume$computation <- 0
consume$computation[which(consume$V5=="CQRSA")] <- 1
consume$V3 <- NULL
consume$V5 <- NULL
consume$V6 <- NULL
consume$V7 <- NULL
consume$V8 <- NULL
consume$V10 <- NULL
consume$V12 <- NULL
consume$V13 <- NULL
consume$V14 <- NULL
consume$V15 <- NULL
consume$V16 <- NULL
consume$V18 <- NULL
consume$V19 <- NULL
names(consume)[3] <- 'consumption.type'
names(consume)[4] <- 'Date'
consume$Date <- gsub("[^a-zA-Z0-9]","",consume$Date) #get rid of special characters
names(consume)[5] <- 'currency'
names(consume)[6] <- 'consumption.spending'
sub <- subset(consume, type > 0)
prvconsm <- subset(sub, computation > 0)
rm(consume, sub)
# total rate of unemployment (OECD)
unempl <- read.csv("Unemployment.csv", header = FALSE, sep = ",", stringsAsFactors = FALSE, na.strings = c("", "NA"))
unempl <- unempl[-1,]
names(unempl)[1] <- 'iso3c'
unempl$V2 <- NULL
unempl$V3 <- NULL
unempl$V4 <- NULL
unempl$V5 <- NULL
unempl$V8 <- NULL
names(unempl)[2] <- 'Date'
unempl$Date <- gsub("[^a-zA-Z0-9]","",unempl$Date) #get rid of special characters
names(unempl)[3] <- 'unempl'
# Germany
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EGDAXI&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
DAX <- read.csv(URL)
colnames(DAX) <- paste("DAX", colnames(DAX), sep = ".")
names(DAX)[1] <- 'year'
DAX$year <- as.character(DAX$year)
DAX$year <- substring(DAX$year,1,nchar(DAX$year)-6)
DAX <- ddply(DAX, .(year), function(DAX) c(DAX.Open=mean(DAX$DAX.Open), DAX.High=mean(DAX$DAX.High), DAX.Low=mean(DAX$DAX.Low), DAX.Close=mean(DAX$DAX.Close), DAX.Volume=mean(DAX$DAX.Volume), DAX.Adj.Close=mean(DAX$DAX.Adj.Close)))
DAX$year <- as.numeric(DAX$year)
# Japan
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EN225&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
NIKKEI <- read.csv(URL)
colnames(NIKKEI) <- paste("NIK", colnames(NIKKEI), sep = ".")
names(NIKKEI)[1] <- 'year'
NIKKEI$year <- as.character(NIKKEI$year)
NIKKEI$year <- substring(NIKKEI$year,1,nchar(NIKKEI$year)-6)
NIKKEI <- ddply(NIKKEI, .(year), function(NIKKEI) c(NIK.Open=mean(NIKKEI$NIK.Open), NIK.High=mean(NIKKEI$NIK.High), NIK.Low=mean(NIKKEI$NIK.Low), NIK.Close=mean(NIKKEI$NIK.Close), NIK.Volume=mean(NIKKEI$NIK.Volume), NIK.Adj.Close=mean(NIKKEI$NIK.Adj.Close)))
NIKKEI$year <- as.numeric(NIKKEI$year)
# UK
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EFTSE&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
FTSE <- read.csv(URL)
colnames(FTSE) <- paste("FTSE", colnames(FTSE), sep = ".")
names(FTSE)[1] <- 'year'
FTSE$year <- as.character(FTSE$year)
FTSE$year <- substring(FTSE$year,1,nchar(FTSE$year)-6)
FTSE <- ddply(FTSE, .(year), function(FTSE) c(FTSE.Open=mean(FTSE$FTSE.Open), FTSE.High=mean(FTSE$FTSE.High), FTSE.Low=mean(FTSE$FTSE.Low), FTSE.Close=mean(FTSE$FTSE.Close), FTSE.Volume=mean(FTSE$FTSE.Volume), FTSE.Adj.Close=mean(FTSE$FTSE.Adj.Close)))
FTSE$year <- as.numeric(FTSE$year)
# France
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EFCHI&a=00&b=1&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
CAC <- read.csv(URL)
colnames(CAC) <- paste("CAC", colnames(CAC), sep = ".")
names(CAC)[1] <- 'year'
CAC$year <- as.character(CAC$year)
CAC$year <- substring(CAC$year,1,nchar(CAC$year)-6)
CAC <- ddply(CAC, .(year), function(CAC) c(CAC.Open=mean(CAC$CAC.Open), CAC.High=mean(CAC$CAC.High), CAC.Low=mean(CAC$CAC.Low), CAC.Close=mean(CAC$CAC.Close), CAC.Volume=mean(CAC$CAC.Volume), CAC.Adj.Close=mean(CAC$CAC.Adj.Close)))
CAC$year <- as.numeric(CAC$year)
# Brasil
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=%5EBVSP&a=00&b=01&c=1991&d=02&e=21&f=2016&g=d&ignore=.csv"
IBOV <- read.csv(URL)
colnames(IBOV) <- paste("IBOV", colnames(IBOV), sep = ".")
names(IBOV)[1] <- 'year'
IBOV$year <- as.character(IBOV$year)
IBOV$year <- substring(IBOV$year,1,nchar(IBOV$year)-6)
IBOV <- ddply(IBOV, .(year), function(IBOV) c(IBOV.Open=mean(IBOV$IBOV.Open), IBOV.High=mean(IBOV$IBOV.High), IBOV.Low=mean(IBOV$IBOV.Low), IBOV.Close=mean(IBOV$IBOV.Close), IBOV.Volume=mean(IBOV$IBOV.Volume), IBOV.Adj.Close=mean(IBOV$IBOV.Adj.Close)))
IBOV$year <- as.numeric(IBOV$year)
# Russia
URL <- "http://real-chart.finance.yahoo.com/table.csv?s=OMRU.EX&a=00&b=01&c=1992&d=02&e=21&f=2016&g=d&ignore=.csv"
OMRU <- read.csv(URL)
colnames(OMRU) <- paste("OMRU", colnames(OMRU), sep = ".")
names(OMRU)[1] <- 'year'
OMRU$year <- as.character(OMRU$year)
OMRU$year <- substring(OMRU$year,1,nchar(OMRU$year)-6)
OMRU <- ddply(OMRU, .(year), function(OMRU) c(OMRU.Open=mean(OMRU$OMRU.Open), OMRU.High=mean(OMRU$OMRU.High), OMRU.Low=mean(OMRU$OMRU.Low), OMRU.Close=mean(OMRU$OMRU.Close), OMRU.Volume=mean(OMRU$OMRU.Volume), OMRU.Adj.Close=mean(OMRU$OMRU.Adj.Close)))
OMRU$year <- as.numeric(OMRU$year)
rm(URL)
#World Bank Development Indicators
WDI <- WDI(country = "all", indicator = c("CM.MKT.TRAD.GD.ZS", #Stocks traded, total value (% of GDP) (4th column)
"BN.KLT.DINV.CD.ZS", #Foreign direct investment (% of GDP)
"CM.MKT.TRNR", #Stocks traded, turnover ratio (%)
"NY.GDP.PCAP.KD.ZG", #GDP per capita growth (annual %)
"NEGDIKSTKKD", #Estimated Capital stock (real 2005 US$)
"NY.GDP.MKTP.KD.ZG"), #GDP growth (annual %)
start=1991, end=2015)
summary(WDI$CM.MKT.TRAD.GD.ZS) #4339 NA's
summary(WDI$BN.KLT.DINV.CD.ZS) #5996
summary(WDI$CM.MKT.TRNR) # 4313 NA's
summary(WDI$NY.GDP.PCAP.KD.ZG) # 945 NA's
summary(WDI$NY.GDP.MKTP.KD.ZG) # 942 NA's
names(WDI)[4] <- 'stocks'
names(WDI)[5] <- 'fdi'
names(WDI)[6] <- 'sturnover'
names(WDI)[7] <- 'gdp.pc.gr'
names(WDI)[8] <- 'gdp.gr'
WDI[1:213, ] #delete non-countries (a.k.a. regions)
WDI <- WDI[-c(1:213), ]
WDI[1126:1147, ] #delete Cape Verde, it's called Cabo Verde now (both have same iso2c)
WDI <- WDI[-c(1126:1147), ]
sub <- subset(WDI, iso2c != "B8")
sub <- subset(sub, iso2c != "F1")
sub <- subset(sub, iso2c != "S1")
sub <- subset(sub, iso2c != "S2")
sub <- subset(sub, iso2c != "S3")
sub <- subset(sub, iso2c != "S4")
sub <- subset(sub, iso2c != "Z4")
WDI <- subset(sub, iso2c != "Z7")
rm(sub)
# merge the data sets
merge1 <- merge(WDI,aggrtdis,by=c("iso2c", "year"), all.x = TRUE)
merge2 <- merge(merge1,CAC,by=c("year"), all.x = TRUE)
merge3 <- merge(merge2,DAX,by=c("year"), all.x = TRUE)
merge4 <- merge(merge3,FTSE,by=c("year"), all.x = TRUE)
merge5 <- merge(merge4,IBOV,by=c("year"), all.x = TRUE)
merge6 <- merge(merge5,NIKKEI,by=c("year"), all.x = TRUE)
merge7 <- merge(merge6,OMRU,by=c("year"), all.x = TRUE)
rm(WDI, aggrtdis, CAC, DAX, FTSE, IBOV, OMRU, NIKKEI, merge1, merge2, merge3, merge4, merge5, merge6)
merge7$country.y <- NULL
names(merge7)[3] <- 'country'
merge8 <- merge(GPSA,merge7,by=c("country", "year"), all.x = TRUE)
rm(merge7, GPSA)
merge9 <- merge(merge8,unempl,by=c("iso3c", "Date"), all.x = TRUE)
rm(merge8, unempl)
merge10 <- merge(merge9,prvconsm,by=c("iso3c", "Date"), all.x = TRUE)
merge10$country.y <- NULL
names(merge10)[3] <- 'country'
rm(merge9, prvconsm)
#merge11 <- merge(merge10,deposit,by=c("Date"), all.x = TRUE)
#rm(deposit, merge10)
#merge12 <- merge(merge11,MRO,by=c("Date"), all.x = TRUE) #396 observations too much
#rm(MRO, merge11)
# creating percentage changes
Out <- change(merge12, Var = 'B',
type = 'proportion',
NewVar = 'PercentChange',
slideBy = -2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wafregional_operations.R
\name{wafregional_get_permission_policy}
\alias{wafregional_get_permission_policy}
\title{Returns the IAM policy attached to the RuleGroup}
\usage{
wafregional_get_permission_policy(ResourceArn)
}
\arguments{
\item{ResourceArn}{[required] The Amazon Resource Name (ARN) of the RuleGroup for which you want to
get the policy.}
}
\description{
Returns the IAM policy attached to the RuleGroup.
}
\section{Request syntax}{
\preformatted{svc$get_permission_policy(
ResourceArn = "string"
)
}
}
\keyword{internal}
| /paws/man/wafregional_get_permission_policy.Rd | permissive | johnnytommy/paws | R | false | true | 615 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wafregional_operations.R
\name{wafregional_get_permission_policy}
\alias{wafregional_get_permission_policy}
\title{Returns the IAM policy attached to the RuleGroup}
\usage{
wafregional_get_permission_policy(ResourceArn)
}
\arguments{
\item{ResourceArn}{[required] The Amazon Resource Name (ARN) of the RuleGroup for which you want to
get the policy.}
}
\description{
Returns the IAM policy attached to the RuleGroup.
}
\section{Request syntax}{
\preformatted{svc$get_permission_policy(
ResourceArn = "string"
)
}
}
\keyword{internal}
|
#This is a exemple of the R scripts used in the article "Large‐scale genetic
##panmixia in the blue shark (Prionace glauca): A single worldwide population,
##or a genetic lag‐time effect of the “grey zone” of differentiation?" to
##determine the existence and duration of the grey zone, for the graph generation.
#written by Diane Bailleul
#email: diane.bailleul.pro@gmail.com
#Please, do not use (with or witout modifications) without citing
##R and the original article.
##################################################
#data treatment and graph generation for N=100000#
##################################################
#rename data from SimuPop/grey_zone.py into res_simupopN100000.txt
data1 <- read.table("res_simupopN100000.txt", header = FALSE, sep="\t")
names(data1) <- c("gen", "Fst_tot", "rep", "Fst_sub", paste("sim", 1:1000, sep="_"))
head(data1)
data1 <- data1[,-1005]
data1$gen <- data1$gen + 20 #I don't like generation "0"
#pvalues computation
datat <- data1
pval_two <- NULL
for(i in 1:nrow(datat)){
sim <- datat[i,5:1004]
#pval_upper <- c(pval_upper, mean(sim <= datat[i,4]))
pval_two <- c(pval_two, if (2*min(mean(sim <= datat[i,4]), mean(sim >= datat[i,4])) > 1) {1} else {2*min(mean(sim <= datat[i,4]), mean(sim >= datat[i,4]))})
}
#pval_upper <- 1 - pval_upper
#detection % per generation
ndata1 <- cbind(data1[,1:4], pval_two)
reccords <- unlist(lapply(split(ndata1[,5], ndata1[,1]), function(x) mean(x <= 0.05)))
Fst_tot <- unlist(lapply(split(ndata1[,2], ndata1[,1]),unique))
Fst_sub <- split(ndata1[,4], ndata1[,1])
#Fst_subb <- lapply(Fst_sub, function(x) x*10) #if necessary to change scale
#graph, at least
x <- as.numeric(names(reccords))
y <- as.vector(reccords)
lis <- loess(y ~ x, span = 0.5)
z <- predict(lis, x)
preN100000 <- z
atN100000 <- x
par(mar = c(5.1, 4.1, 4.1, 4.1))
plot(Fst_tot~x, type = "l", ylim = c(0,1), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", axes = FALSE)
mtext("Fst", side = 4, line = 3, par(las = 0), col = "darkblue")
mtext("generation", side = 1, line = 3, par(las = 0), col = "black")
mtext("detection capacity", side = 2, line = 3, par(las = 0), col = "darkgreen")
plotdegrad(nb_degrad = 2000, attenua = 700, xleft0 = 0, ybottom0 = 0, xright0 = 1170, ytop0 = 1)
#a <- 2000 #number of rectangles
#b <- 700 #number of shades
#xright0 <- 1170 #total length of the square
#xleft0 <- 0
#ybottom <- 0
#ytop0 <- 1
ytick <- seq(0, 1, by = 0.2)
axis(side = 2, at = ytick, labels = FALSE, col = "darkgreen")
text(x = -180, y = ytick + 0.03, col = "darkgreen", cex = 1,
labels = c("0%","20%","40%","60%","80%","100%"), srt = 90, pos = 1, xpd = TRUE)
ztick <- seq(0, 1, by = 0.2)
axis(side = 4, at = ztick, labels = FALSE, col ="darkblue")
text(x = 2180, y = ztick + 0.02, col ="darkblue", cex = 1,
labels = c("0","0.02","0.04","0.06","0.08","0.1"), srt = 270, pos = 1, xpd = TRUE)
xtick <- seq(0, 2000, by = 200)
axis(side = 1, at = xtick, labels = FALSE)
text(x = xtick, y = -0.1,
labels = xtick, srt = 0, xpd = TRUE, cex = 1)
lines(Fst_tot~x, type = "l", col = "darkblue", lwd = 2)
lines(as.vector(unlist(lapply(Fst_sub, median)))~x, type = "l", col = "royalblue")
lines(as.vector(unlist(lapply(Fst_sub, function(x) quantile(x, probs = 0.025))))~x,
lty = 3, col = "royalblue")
lines(as.vector(unlist(lapply(Fst_sub, function(x) quantile(x, probs = 0.975))))~x,
lty = 3, col = "royalblue")
lines(z~x, type = "l", col = "darkgreen", lwd = 2)
#Legends
#library(gplots)
legend(x = 280, y = 0.95,
c("significant sub-Fst proportion", "Fst", "median sub-Fst", "sub-Fst 95% CI"),
col = c("darkgreen", "darkblue", "royalblue", "royalblue"),
bg = c("white"),
#pch = c(16, 16),
lwd = c(3, 3, 2, 2),
lty = c(1, 1, 1, 3),
inset = 0,
cex = 1,
border = "white")
dev.copy2eps(file="RplotN100000.eps")
| /graph_gz.r | no_license | dbailleul/grey_zone | R | false | false | 3,846 | r | #This is a exemple of the R scripts used in the article "Large‐scale genetic
##panmixia in the blue shark (Prionace glauca): A single worldwide population,
##or a genetic lag‐time effect of the “grey zone” of differentiation?" to
##determine the existence and duration of the grey zone, for the graph generation.
#written by Diane Bailleul
#email: diane.bailleul.pro@gmail.com
#Please, do not use (with or witout modifications) without citing
##R and the original article.
##################################################
#data treatment and graph generation for N=100000#
##################################################
#rename data from SimuPop/grey_zone.py into res_simupopN100000.txt
data1 <- read.table("res_simupopN100000.txt", header = FALSE, sep="\t")
names(data1) <- c("gen", "Fst_tot", "rep", "Fst_sub", paste("sim", 1:1000, sep="_"))
head(data1)
data1 <- data1[,-1005]
data1$gen <- data1$gen + 20 #I don't like generation "0"
#pvalues computation
datat <- data1
pval_two <- NULL
for(i in 1:nrow(datat)){
sim <- datat[i,5:1004]
#pval_upper <- c(pval_upper, mean(sim <= datat[i,4]))
pval_two <- c(pval_two, if (2*min(mean(sim <= datat[i,4]), mean(sim >= datat[i,4])) > 1) {1} else {2*min(mean(sim <= datat[i,4]), mean(sim >= datat[i,4]))})
}
#pval_upper <- 1 - pval_upper
#detection % per generation
ndata1 <- cbind(data1[,1:4], pval_two)
reccords <- unlist(lapply(split(ndata1[,5], ndata1[,1]), function(x) mean(x <= 0.05)))
Fst_tot <- unlist(lapply(split(ndata1[,2], ndata1[,1]),unique))
Fst_sub <- split(ndata1[,4], ndata1[,1])
#Fst_subb <- lapply(Fst_sub, function(x) x*10) #if necessary to change scale
#graph, at least
x <- as.numeric(names(reccords))
y <- as.vector(reccords)
lis <- loess(y ~ x, span = 0.5)
z <- predict(lis, x)
preN100000 <- z
atN100000 <- x
par(mar = c(5.1, 4.1, 4.1, 4.1))
plot(Fst_tot~x, type = "l", ylim = c(0,1), xaxt = "n", yaxt = "n",
xlab = "", ylab = "", axes = FALSE)
mtext("Fst", side = 4, line = 3, par(las = 0), col = "darkblue")
mtext("generation", side = 1, line = 3, par(las = 0), col = "black")
mtext("detection capacity", side = 2, line = 3, par(las = 0), col = "darkgreen")
plotdegrad(nb_degrad = 2000, attenua = 700, xleft0 = 0, ybottom0 = 0, xright0 = 1170, ytop0 = 1)
#a <- 2000 #number of rectangles
#b <- 700 #number of shades
#xright0 <- 1170 #total length of the square
#xleft0 <- 0
#ybottom <- 0
#ytop0 <- 1
ytick <- seq(0, 1, by = 0.2)
axis(side = 2, at = ytick, labels = FALSE, col = "darkgreen")
text(x = -180, y = ytick + 0.03, col = "darkgreen", cex = 1,
labels = c("0%","20%","40%","60%","80%","100%"), srt = 90, pos = 1, xpd = TRUE)
ztick <- seq(0, 1, by = 0.2)
axis(side = 4, at = ztick, labels = FALSE, col ="darkblue")
text(x = 2180, y = ztick + 0.02, col ="darkblue", cex = 1,
labels = c("0","0.02","0.04","0.06","0.08","0.1"), srt = 270, pos = 1, xpd = TRUE)
xtick <- seq(0, 2000, by = 200)
axis(side = 1, at = xtick, labels = FALSE)
text(x = xtick, y = -0.1,
labels = xtick, srt = 0, xpd = TRUE, cex = 1)
lines(Fst_tot~x, type = "l", col = "darkblue", lwd = 2)
lines(as.vector(unlist(lapply(Fst_sub, median)))~x, type = "l", col = "royalblue")
lines(as.vector(unlist(lapply(Fst_sub, function(x) quantile(x, probs = 0.025))))~x,
lty = 3, col = "royalblue")
lines(as.vector(unlist(lapply(Fst_sub, function(x) quantile(x, probs = 0.975))))~x,
lty = 3, col = "royalblue")
lines(z~x, type = "l", col = "darkgreen", lwd = 2)
#Legends
#library(gplots)
legend(x = 280, y = 0.95,
c("significant sub-Fst proportion", "Fst", "median sub-Fst", "sub-Fst 95% CI"),
col = c("darkgreen", "darkblue", "royalblue", "royalblue"),
bg = c("white"),
#pch = c(16, 16),
lwd = c(3, 3, 2, 2),
lty = c(1, 1, 1, 3),
inset = 0,
cex = 1,
border = "white")
dev.copy2eps(file="RplotN100000.eps")
|
#' Build news section
#'
#' @description
#' A `NEWS.md` will be broken up into versions using level one (`#`) or
#' level two headings (`##`) that (partially) match one of the following forms
#' (ignoring case):
#'
#' * `{package name} 1.3.0`
#' * `{package name} v1.3.0`
#' * `Version 1.3.0`
#' * `Changes in 1.3.0`
#' * `Changes in v1.3.0`
#'
#' @details
#' A [common structure](https://style.tidyverse.org/news.html) for news files
#' is to use a top level heading for each release, and use a second level
#' heading to break up individual bullets into sections.
#'
#' ```yaml
#' # foofy 1.0.0
#'
#' ## Major changes
#'
#' * Can now work with all grooveable grobbles!
#'
#' ## Minor improvements and bug fixes
#'
#' * Printing scrobbles no longer errors (@githubusername, #100)
#'
#' * Wibbles are now 55% less jibbly (#200)
#' ```
#'
#' Issues and contributors will be automatically linked to the corresponding
#' pages on GitHub if the GitHub repo can be discovered from the `DESCRIPTION`
#' (typically from a `URL` entry containing `github.com`)
#'
#' If a version is available on CRAN, the release date will automatically
#' be added to the heading (see below for how to suppress); if not
#' available on CRAN, "Unreleased" will be added.
#'
#' @section YAML config:
#'
#' To automatically link to release announcements, include a `releases`
#' section.
#'
#' ```yaml
#' news:
#' releases:
#' - text: "usethis 1.3.0"
#' href: https://www.tidyverse.org/articles/2018/02/usethis-1-3-0/
#' - text: "usethis 1.0.0 (and 1.1.0)"
#' href: https://www.tidyverse.org/articles/2017/11/usethis-1.0.0/
#' ```
#'
#' Control whether news is present on one page or multiple pages with the
#' `one_page` field. The default is `true`.
#'
#' ```yaml
#' news:
#' one_page: false
#' ```
#'
#' Suppress the default addition of CRAN release dates with:
#'
#' ```yaml
#' news:
#' cran_dates: false
#' ```
#'
#' @seealso [Tidyverse style for News](https://style.tidyverse.org/news.html)
#'
#' @inheritParams build_articles
#' @export
build_news <- function(pkg = ".",
override = list(),
preview = NA) {
pkg <- section_init(pkg, depth = 1L, override = override)
if (!has_news(pkg$src_path))
return()
rule("Building news")
dir_create(path(pkg$dst_path, "news"))
switch(news_style(pkg$meta),
single = build_news_single(pkg),
multi = build_news_multi(pkg)
)
preview_site(pkg, "news", preview = preview)
}
build_news_single <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
news <- data_news(pkg)
render_page(
pkg,
"news",
list(
contents = purrr::transpose(news),
pagetitle = tr_("Changelog"),
source = repo_source(pkg, "NEWS.md")
),
path("news", "index.html")
)
}
build_news_multi <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
news <- data_news(pkg)
page <- factor(news$page, levels = unique(news$page))
news_paged <- tibble::tibble(
version = levels(page),
file_out = paste0("news-", version, ".html"),
contents = news[c("html", "version", "anchor")] %>% split(page)
)
render_news <- function(version, file_out, contents) {
render_page(
pkg,
"news",
list(
version = version,
contents = rev(purrr::transpose(contents)),
pagetitle = sprintf(tr_("Version %s"), version)
),
path("news", file_out),
)
}
news_paged %>% purrr::pmap(render_news)
render_page(
pkg,
"news-index",
list(
versions = news_paged %>% purrr::transpose(),
pagetitle = tr_("News")
),
path("news", "index.html")
)
}
globalVariables(".")
data_news <- function(pkg = list()) {
html <- markdown_body(path(pkg$src_path, "NEWS.md"))
xml <- xml2::read_html(html)
downlit::downlit_html_node(xml)
sections <- xml2::xml_find_all(xml, "./body/div")
footnotes <- has_class(sections, "footnotes")
if (any(footnotes)) {
warn("Footnotes in NEWS.md are not currently suppoted")
}
sections <- sections[!footnotes]
levels <- sections %>%
xml2::xml_find_first(".//h1|.//h2|.//h3|.//h4|.//h5") %>%
xml2::xml_name()
ulevels <- unique(levels)
if (!identical(ulevels, "h1") && !identical(ulevels, "h2")) {
abort(c(
"Invalid NEWS.md: inconsistent use of section headings.",
i = "Top-level headings must be either all <h1> or all <h2>.",
i = "See ?build_news for more details."
))
}
if (ulevels == "h1") {
# Bump every heading down a level so to get a single <h1> for the page title
tweak_section_levels(xml)
}
titles <- xml2::xml_text(xml2::xml_find_first(sections, ".//h2"), trim = TRUE)
versions <- news_version(titles, pkg$package)
sections <- sections[!is.na(versions)]
versions <- versions[!is.na(versions)]
show_dates <- purrr::pluck(pkg, "meta", "news", "cran_dates", .default = !is_testing())
if (show_dates) {
timeline <- pkg_timeline(pkg$package)
} else {
timeline <- NULL
}
html <- sections %>%
purrr::walk2(
versions,
tweak_news_heading,
timeline = timeline,
bs_version = pkg$bs_version
) %>%
purrr::map_chr(as.character, options = character()) %>%
purrr::map_chr(repo_auto_link, pkg = pkg)
anchors <- xml2::xml_attr(sections, "id")
news <- tibble::tibble(
version = versions,
page = purrr::map_chr(versions, version_page),
anchor = anchors,
html = html
)
news
}
news_version <- function(x, pkgname) {
pattern <- paste0("(?x)
(?:", pkgname, "|version|changes\\ in)
\\s+ # whitespace
v? # optional v followed by
(?<version>
(?:\\d+[.-]\\d+)(?:[.-]\\d+)* # digits, dots, and dashes
| # OR
\\(development\\ version\\) # literal used by usethis
)
")
pieces <- re_match(x, pattern, ignore.case = TRUE)
gsub("^[(]|[)]$", "", pieces$version)
}
version_page <- function(x) {
if (x == "development version") {
return("dev")
}
ver <- unclass(package_version(x))[[1]]
if (length(ver) == 4 && ver[[4]] > 0) {
"dev"
} else {
paste0(ver[[1]], ".", ver[[2]])
}
}
navbar_news <- function(pkg) {
releases_meta <- pkg$meta$news$releases
if (!is.null(releases_meta)) {
menu(tr_("News"),
c(
list(menu_text(tr_("Releases"))),
releases_meta,
list(
menu_spacer(),
menu_link(tr_("Changelog"), "news/index.html")
)
)
)
} else if (has_news(pkg$src_path)) {
menu_link(tr_("Changelog"), "news/index.html")
}
}
has_news <- function(path = ".") {
file_exists(path(path, "NEWS.md"))
}
pkg_timeline <- function(package) {
if (!has_internet()) {
return(NULL)
}
url <- paste0("https://crandb.r-pkg.org/", package, "/all")
resp <- httr::RETRY("GET", url, quiet = TRUE)
if (httr::http_error(resp)) {
return(NULL)
}
content <- httr::content(resp)
timeline <- content$timeline
data.frame(
version = names(timeline),
date = as.Date(unlist(timeline)),
stringsAsFactors = FALSE,
row.names = NULL
)
}
tweak_news_heading <- function(html, version, timeline, bs_version) {
class <- if (bs_version == 3) "page-header" else "pkg-version"
h2 <- xml2::xml_find_all(html, ".//h2")
xml2::xml_set_attr(h2, "class", class)
xml2::xml_set_attr(h2, "data-toc-text", version)
# Add release date, if known
if (!is.null(timeline)) {
date <- timeline$date[match(version, timeline$version)]
if (!is.na(date)) {
if (bs_version == 3) {
release_str <- paste0(" <small>", date, "</small>")
release_html <- xml2::xml_find_first(xml2::read_html(release_str), ".//small")
xml2::xml_add_child(h2, release_html, .where = 1)
} else {
release_date <- sprintf(tr_("CRAN release: %s"), date)
release_str <- paste0("<p class='text-muted'>", release_date, "</p>")
release_html <- xml2::xml_find_first(xml2::read_html(release_str), ".//p")
xml2::xml_add_sibling(h2, release_html, .where = "after")
}
}
}
tweak_news_anchor(html, version)
invisible()
}
# Manually de-duplicate repeated section anchors using version
tweak_news_anchor <- function(html, version) {
div <- xml2::xml_find_all(html, ".//div")
div <- div[has_class(div, "section")]
id <- xml2::xml_attr(div, "id")
id <- gsub("-[0-9]+", "", id) # remove pandoc de-duplication suffixes
id <- paste0(id, "-", gsub("[^a-z0-9]+", "-", version)) # . breaks scrollspy
xml2::xml_attr(div, "id") <- id
invisible()
}
tweak_section_levels <- function(html) {
sections <- xml2::xml_find_all(html, ".//div[contains(@class, 'section level')]")
# Update headings
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h5"), "h6")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h4"), "h5")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h3"), "h4")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h2"), "h3")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h1"), "h2")
# Update section
xml2::xml_attr(sections, "class") <- paste0("section level", get_section_level(sections) + 1)
invisible()
}
news_style <- function(meta) {
one_page <- purrr::pluck(meta, "news", "one_page") %||%
purrr::pluck(meta, "news", 1, "one_page") %||%
TRUE
if (one_page) "single" else "multi"
}
| /R/build-news.R | permissive | datacamp/pkgdown | R | false | false | 9,341 | r | #' Build news section
#'
#' @description
#' A `NEWS.md` will be broken up into versions using level one (`#`) or
#' level two headings (`##`) that (partially) match one of the following forms
#' (ignoring case):
#'
#' * `{package name} 1.3.0`
#' * `{package name} v1.3.0`
#' * `Version 1.3.0`
#' * `Changes in 1.3.0`
#' * `Changes in v1.3.0`
#'
#' @details
#' A [common structure](https://style.tidyverse.org/news.html) for news files
#' is to use a top level heading for each release, and use a second level
#' heading to break up individual bullets into sections.
#'
#' ```yaml
#' # foofy 1.0.0
#'
#' ## Major changes
#'
#' * Can now work with all grooveable grobbles!
#'
#' ## Minor improvements and bug fixes
#'
#' * Printing scrobbles no longer errors (@githubusername, #100)
#'
#' * Wibbles are now 55% less jibbly (#200)
#' ```
#'
#' Issues and contributors will be automatically linked to the corresponding
#' pages on GitHub if the GitHub repo can be discovered from the `DESCRIPTION`
#' (typically from a `URL` entry containing `github.com`)
#'
#' If a version is available on CRAN, the release date will automatically
#' be added to the heading (see below for how to suppress); if not
#' available on CRAN, "Unreleased" will be added.
#'
#' @section YAML config:
#'
#' To automatically link to release announcements, include a `releases`
#' section.
#'
#' ```yaml
#' news:
#' releases:
#' - text: "usethis 1.3.0"
#' href: https://www.tidyverse.org/articles/2018/02/usethis-1-3-0/
#' - text: "usethis 1.0.0 (and 1.1.0)"
#' href: https://www.tidyverse.org/articles/2017/11/usethis-1.0.0/
#' ```
#'
#' Control whether news is present on one page or multiple pages with the
#' `one_page` field. The default is `true`.
#'
#' ```yaml
#' news:
#' one_page: false
#' ```
#'
#' Suppress the default addition of CRAN release dates with:
#'
#' ```yaml
#' news:
#' cran_dates: false
#' ```
#'
#' @seealso [Tidyverse style for News](https://style.tidyverse.org/news.html)
#'
#' @inheritParams build_articles
#' @export
build_news <- function(pkg = ".",
override = list(),
preview = NA) {
pkg <- section_init(pkg, depth = 1L, override = override)
if (!has_news(pkg$src_path))
return()
rule("Building news")
dir_create(path(pkg$dst_path, "news"))
switch(news_style(pkg$meta),
single = build_news_single(pkg),
multi = build_news_multi(pkg)
)
preview_site(pkg, "news", preview = preview)
}
build_news_single <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
news <- data_news(pkg)
render_page(
pkg,
"news",
list(
contents = purrr::transpose(news),
pagetitle = tr_("Changelog"),
source = repo_source(pkg, "NEWS.md")
),
path("news", "index.html")
)
}
build_news_multi <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
news <- data_news(pkg)
page <- factor(news$page, levels = unique(news$page))
news_paged <- tibble::tibble(
version = levels(page),
file_out = paste0("news-", version, ".html"),
contents = news[c("html", "version", "anchor")] %>% split(page)
)
render_news <- function(version, file_out, contents) {
render_page(
pkg,
"news",
list(
version = version,
contents = rev(purrr::transpose(contents)),
pagetitle = sprintf(tr_("Version %s"), version)
),
path("news", file_out),
)
}
news_paged %>% purrr::pmap(render_news)
render_page(
pkg,
"news-index",
list(
versions = news_paged %>% purrr::transpose(),
pagetitle = tr_("News")
),
path("news", "index.html")
)
}
globalVariables(".")
data_news <- function(pkg = list()) {
html <- markdown_body(path(pkg$src_path, "NEWS.md"))
xml <- xml2::read_html(html)
downlit::downlit_html_node(xml)
sections <- xml2::xml_find_all(xml, "./body/div")
footnotes <- has_class(sections, "footnotes")
if (any(footnotes)) {
warn("Footnotes in NEWS.md are not currently suppoted")
}
sections <- sections[!footnotes]
levels <- sections %>%
xml2::xml_find_first(".//h1|.//h2|.//h3|.//h4|.//h5") %>%
xml2::xml_name()
ulevels <- unique(levels)
if (!identical(ulevels, "h1") && !identical(ulevels, "h2")) {
abort(c(
"Invalid NEWS.md: inconsistent use of section headings.",
i = "Top-level headings must be either all <h1> or all <h2>.",
i = "See ?build_news for more details."
))
}
if (ulevels == "h1") {
# Bump every heading down a level so to get a single <h1> for the page title
tweak_section_levels(xml)
}
titles <- xml2::xml_text(xml2::xml_find_first(sections, ".//h2"), trim = TRUE)
versions <- news_version(titles, pkg$package)
sections <- sections[!is.na(versions)]
versions <- versions[!is.na(versions)]
show_dates <- purrr::pluck(pkg, "meta", "news", "cran_dates", .default = !is_testing())
if (show_dates) {
timeline <- pkg_timeline(pkg$package)
} else {
timeline <- NULL
}
html <- sections %>%
purrr::walk2(
versions,
tweak_news_heading,
timeline = timeline,
bs_version = pkg$bs_version
) %>%
purrr::map_chr(as.character, options = character()) %>%
purrr::map_chr(repo_auto_link, pkg = pkg)
anchors <- xml2::xml_attr(sections, "id")
news <- tibble::tibble(
version = versions,
page = purrr::map_chr(versions, version_page),
anchor = anchors,
html = html
)
news
}
news_version <- function(x, pkgname) {
pattern <- paste0("(?x)
(?:", pkgname, "|version|changes\\ in)
\\s+ # whitespace
v? # optional v followed by
(?<version>
(?:\\d+[.-]\\d+)(?:[.-]\\d+)* # digits, dots, and dashes
| # OR
\\(development\\ version\\) # literal used by usethis
)
")
pieces <- re_match(x, pattern, ignore.case = TRUE)
gsub("^[(]|[)]$", "", pieces$version)
}
version_page <- function(x) {
if (x == "development version") {
return("dev")
}
ver <- unclass(package_version(x))[[1]]
if (length(ver) == 4 && ver[[4]] > 0) {
"dev"
} else {
paste0(ver[[1]], ".", ver[[2]])
}
}
navbar_news <- function(pkg) {
releases_meta <- pkg$meta$news$releases
if (!is.null(releases_meta)) {
menu(tr_("News"),
c(
list(menu_text(tr_("Releases"))),
releases_meta,
list(
menu_spacer(),
menu_link(tr_("Changelog"), "news/index.html")
)
)
)
} else if (has_news(pkg$src_path)) {
menu_link(tr_("Changelog"), "news/index.html")
}
}
has_news <- function(path = ".") {
file_exists(path(path, "NEWS.md"))
}
pkg_timeline <- function(package) {
if (!has_internet()) {
return(NULL)
}
url <- paste0("https://crandb.r-pkg.org/", package, "/all")
resp <- httr::RETRY("GET", url, quiet = TRUE)
if (httr::http_error(resp)) {
return(NULL)
}
content <- httr::content(resp)
timeline <- content$timeline
data.frame(
version = names(timeline),
date = as.Date(unlist(timeline)),
stringsAsFactors = FALSE,
row.names = NULL
)
}
tweak_news_heading <- function(html, version, timeline, bs_version) {
class <- if (bs_version == 3) "page-header" else "pkg-version"
h2 <- xml2::xml_find_all(html, ".//h2")
xml2::xml_set_attr(h2, "class", class)
xml2::xml_set_attr(h2, "data-toc-text", version)
# Add release date, if known
if (!is.null(timeline)) {
date <- timeline$date[match(version, timeline$version)]
if (!is.na(date)) {
if (bs_version == 3) {
release_str <- paste0(" <small>", date, "</small>")
release_html <- xml2::xml_find_first(xml2::read_html(release_str), ".//small")
xml2::xml_add_child(h2, release_html, .where = 1)
} else {
release_date <- sprintf(tr_("CRAN release: %s"), date)
release_str <- paste0("<p class='text-muted'>", release_date, "</p>")
release_html <- xml2::xml_find_first(xml2::read_html(release_str), ".//p")
xml2::xml_add_sibling(h2, release_html, .where = "after")
}
}
}
tweak_news_anchor(html, version)
invisible()
}
# Manually de-duplicate repeated section anchors using version
tweak_news_anchor <- function(html, version) {
div <- xml2::xml_find_all(html, ".//div")
div <- div[has_class(div, "section")]
id <- xml2::xml_attr(div, "id")
id <- gsub("-[0-9]+", "", id) # remove pandoc de-duplication suffixes
id <- paste0(id, "-", gsub("[^a-z0-9]+", "-", version)) # . breaks scrollspy
xml2::xml_attr(div, "id") <- id
invisible()
}
tweak_section_levels <- function(html) {
sections <- xml2::xml_find_all(html, ".//div[contains(@class, 'section level')]")
# Update headings
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h5"), "h6")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h4"), "h5")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h3"), "h4")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h2"), "h3")
xml2::xml_set_name(xml2::xml_find_all(sections, ".//h1"), "h2")
# Update section
xml2::xml_attr(sections, "class") <- paste0("section level", get_section_level(sections) + 1)
invisible()
}
news_style <- function(meta) {
one_page <- purrr::pluck(meta, "news", "one_page") %||%
purrr::pluck(meta, "news", 1, "one_page") %||%
TRUE
if (one_page) "single" else "multi"
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/argo.R
\name{argo}
\alias{argo}
\alias{argo_search}
\alias{argo_files}
\alias{argo_qwmo}
\alias{argo_plan}
\alias{argo_buoy_files}
\title{Get Argo buoy data}
\usage{
argo_search(
func = NULL,
of = NULL,
qwmo = NULL,
wmo = NULL,
box = NULL,
area = NULL,
around = NULL,
year = NULL,
yearmin = NULL,
yearmax = NULL,
month = NULL,
monthmin = NULL,
monthmax = NULL,
lr = NULL,
from = NULL,
to = NULL,
dmode = NULL,
pres_qc = NULL,
temp_qc = NULL,
psal_qc = NULL,
doxy_qc = NULL,
ticket = NULL,
limit = 10,
...
)
argo_files(wmo = NULL, cyc = NULL, ...)
argo_qwmo(qwmo, limit = 10, ...)
argo_plan(...)
argo_buoy_files(dac, id, ...)
argo(dac, id, cycle, dtype, ...)
}
\arguments{
\item{func}{A function, one of n, np, nf, coord, fullcoord, list, ftplist, ticket, version}
\item{of}{of string}
\item{qwmo}{qwmo string}
\item{wmo}{wmo string. mandatory when using \code{argo_files}}
\item{box}{Bounding box, of the form: A) lon, lat for geographical coordinates of
the center of a box, or B) min lon, min lat, width, height, for geographical
coordinates of the center of the box and its width and height, and the longitude must
given between -180W and 180E. Width and height are in degrees of longitude and latitude.}
\item{area}{(integer/character), One of 0, 1, or 2, but can be in combination. e.g. 0, '0,2'
See Details.}
\item{around}{(character) Selects profiles located around a given center point. List of
3 or 4 numerical values depending on how the center point need to be specified:
e.g., '-40,35,100', '6900678,2,200', '6900678,2,200,30'. See Details}
\item{year}{restrict profiles sampled on a single, or a list of given years. One or a
comma separated list of numerical value(s) higher than 0 and lower than 9999.}
\item{yearmin, yearmax}{restrict profiles sampled before (yearmax) and/or after (yearmin)
a given year. A numerical value higher than 0 and lower than 9999. cannot be applied with
the other restriction parameter \code{year}}
\item{month}{restrict profiles sampled on a single, or a list of given month(s). One or a
comma separated list of numerical value(s) higher or equal to 1 and lower or equal to 12.
The month convention is a standard one: January is 1, February is 2, ... December is 12.}
\item{monthmin, monthmax}{restrict profiles sampled before (monthmax) and/or after (monthmin)
a given month. Higher or equal to 1 and lower or equal to 12. The month convention is a
standard one: January is 1, February is 2, ... December is 12. These restrictions cannot be
applied with the other restriction parameter month. At this time, these parameters are not
circular, so that the restriction chain: monthmin=12&monthmax=2 will through an error
and not select December to February profiles. To do so, you need to use a coma separated
list of months using the month restriction parameter.}
\item{lr}{restriction allows you to impose the last report (hence lr) date in days. A
numerical value in days between 1 (profiles sampled yesterday) and 60 (profiles sampled
over the last 60 days). This restriction allows a simple selection of the so-called
'active' floats, ie those which reported a profiles over the last 30 days.}
\item{from, to}{select profiles sampled before (to) and/or after (from) an explicit date
(included). The date is specified following the format: YYYYMMDD, ie the year, month
and day numbers.}
\item{dmode}{(character) imposes a restriction on the Data Mode of profiles. A single value or
a coma separated list of characters defining the Data Mode to select. It can be: R for
"Real Time", A for "Real Time with Adjusted value" and D for "Delayed Mode". See Details.}
\item{pres_qc, temp_qc, psal_qc, doxy_qc}{Quality control. Imposes a restriction on the profile
data quality flag. For a given variable PARAM which can be: pres (pressure),
temp (temperature), psal (salinity) or doxy (oxygen), this restriction selects profiles
having one or a coma separated list of data quality flag. See Details.}
\item{ticket}{(numeric) select profiles with or without a ticket filled in the database. A
value: 0 (no ticket) or 1 (has a ticket). See
http://www.ifremer.fr/lpo/naarc/m/docs/api/database.html for more details.}
\item{limit}{(integer) number to return}
\item{...}{Curl options passed on to \code{\link[crul]{HttpClient}}. Optional}
\item{cyc}{a cycle number}
\item{dac}{(character) Data assembly center code}
\item{id}{(numeric) Buoy identifier}
\item{cycle}{(numeric) Cycle number}
\item{dtype}{(character) Data type, one of \code{D} for delayed, or \code{R} for real-time}
}
\description{
THE ARGO API IS DOWN. WE'VE BEEN UNSUCCESSFUL FIGURING OUT WHERE
THE API MOVED TO OR IF IT STILL EXISTS. SEE "API Status" SECTION
BELOW FOR MORE DETAILS.
}
\details{
\code{area} parameter definitions:
\itemize{
\item Value 0 selects profiles located in the North-Atlantic ocean north of 20S
and not in areas 1 and 2.
\item Value 1 selects profiles located in the Mediterranean Sea.
\item Value 2 selects profiles located in the Nordic Seas.
}
\code{around} parameter definitions:
\itemize{
\item Specification 1: The location is specified with specific geographical
coordinates in the following format: around=longitude,latitude,distance - The longitude
must given between -180W and 180E and the distance is in kilometers.
\item Specification 2: The location is the one of an existing profile in the database.
It is thus specified with a float WMO and a cycle number: around=wmo,cyc,distance
This specification can take an optional fourth value specifying the time range in days
around the specified profile.
}
\code{dmode} parameter definitions:
\itemize{
\item Data from Argo floats are transmitted from the float, passed through processing and
automatic quality control procedures. These profiles have a Data Mode called: real-time data.
\item The data are also issued to the Principle Investigators who apply other procedures to
check data quality returned to the global data centre within 6 to 12 months. These profiles
have a Data Mode called: delayed mode data.
\item The adjustments applied to delayed-data may also be applied to real-time data, to
correct sensor drifts for real-time users. These profiles have a Data Mode called: real
time data with adjusted values.
}
\code{*_qc} parameter definitions:
This information was extracted from the netcdf profile variable PROFILE_PARAM_QC. Once
quality control procedures have been applied, a synthetic flag is assigned for each
parameter of each profile under this variable in netcdf files. It indicates the fraction
n of profile levels with good data. It can take one of the following values:
\itemize{
\item A or F: All (n=100\%) or none (n=0\%) of the profile levels contain good data,
\item B,C,D,E: n is in one of the intermediate range: 75-100, 50-75, 25-50 or 0-25
\item empty: No QC was performed.
}
}
\section{File storage}{
We use \pkg{rappdirs} to store files, see
\code{\link[rappdirs]{user_cache_dir}} for how we determine the directory on
your machine to save files to, and run
\code{rappdirs::user_cache_dir("rnoaa/argo")} to get that directory.
The \code{path} parameter used to be used to set where files are stored
on your machine.
}
\section{API Status}{
The API is down as of 2019-11-07, and probably some time before that. The
following functions won't work anymore (future package versions may bring
them back if the API comes back):
\itemize{
\item argo_search
\item argo_files
\item argo_qwmo
\item argo_plan
}
The following functions still work as they are based off the FTP server
that still exists:
\itemize{
\item argo_buoy_files
\item argo
}
}
\examples{
\dontrun{
# Search Argo metadata
## Number of profiles
# argo_search("np", limit = 3)
## Number of floats
# argo_search("nf", limit = 3)
## Number of both profiles and floats
# argo_search("n", limit = 3)
## return the coordinates in time and space of profiles
# argo_search("coord", limit = 3)
## return the coordinates in time and space of profiles, plus other metadata
# argo_search("fullcoord", limit = 3)
## List various things, e.g,...
### data assembly centers
# argo_search("list", "dac")
### data modes
# argo_search("list", "dmode", limit = 5)
### World Meteorological Organization unique float ID's
# argo_search("list", "wmo", limit = 5)
### Profile years
# argo_search("list", "year", limit = 5)
## coord or fullcoord with specific buoy id
# argo_search("coord", wmo = 13857, limit = 3)
# argo_search("fullcoord", wmo = 13857, limit = 3)
# Spatial search
### search by bounding box (see param def above)
# argo_search("coord", box = c(-40, 35, 3, 2))
### search by area
# argo_search("coord", area = 0)
### search by around
# argo_search("coord", around = '-40,35,100')
# Time based search
### search by year
# argo_search("coord", year = 2006)
### search by yearmin and yearmax
# argo_search("coord", yearmin = 2007)
# argo_search("coord", yearmin = 2007, yearmax = 2009)
### search by month
# argo_search("coord", month = '12,1,2')
### search by from or to
# argo_search("coord", from = 20090212)
# argo_search("coord", to = 20051129)
# Data mode search
# argo_search("coord", dmode = "R")
# argo_search("coord", dmode = "R,A")
# Data quality based search
# argo_search("coord", pres_qc = "A,B")
# argo_search("coord", temp_qc = "A")
# argo_search("coord", pres_qc = "A", temp_qc = "A")
# Ticket search
# argo_search("coord", ticket = 0)
## Search on partial float id number
# argo_qwmo(qwmo = 49)
# argo_qwmo(qwmo = 49, limit = 2)
## Get files
# argo_files(wmo = 13857)
# argo_files(wmo = 13857, cyc = 12)
# argo_files(wmo = 13857, cyc = 45)
## Get planned buoys data, accepts no parameters
# argo_plan()
# Get files for a buoy, must specify data assembly center (dac)
argo_buoy_files(dac = "bodc", id = 1901309)
argo_buoy_files(dac = "kma", id = 2900308)
# Get data
x <- argo_buoy_files(dac = "meds", id = 4900881)
argo(dac = "meds", id = 4900881, cycle = 127, dtype = "D")
}
}
\references{
http://www.ifremer.fr/lpo/naarc/m/docs/api/howto.html
}
| /man/argo.Rd | permissive | mpettis/rnoaa | R | false | true | 10,113 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/argo.R
\name{argo}
\alias{argo}
\alias{argo_search}
\alias{argo_files}
\alias{argo_qwmo}
\alias{argo_plan}
\alias{argo_buoy_files}
\title{Get Argo buoy data}
\usage{
argo_search(
func = NULL,
of = NULL,
qwmo = NULL,
wmo = NULL,
box = NULL,
area = NULL,
around = NULL,
year = NULL,
yearmin = NULL,
yearmax = NULL,
month = NULL,
monthmin = NULL,
monthmax = NULL,
lr = NULL,
from = NULL,
to = NULL,
dmode = NULL,
pres_qc = NULL,
temp_qc = NULL,
psal_qc = NULL,
doxy_qc = NULL,
ticket = NULL,
limit = 10,
...
)
argo_files(wmo = NULL, cyc = NULL, ...)
argo_qwmo(qwmo, limit = 10, ...)
argo_plan(...)
argo_buoy_files(dac, id, ...)
argo(dac, id, cycle, dtype, ...)
}
\arguments{
\item{func}{A function, one of n, np, nf, coord, fullcoord, list, ftplist, ticket, version}
\item{of}{of string}
\item{qwmo}{qwmo string}
\item{wmo}{wmo string. mandatory when using \code{argo_files}}
\item{box}{Bounding box, of the form: A) lon, lat for geographical coordinates of
the center of a box, or B) min lon, min lat, width, height, for geographical
coordinates of the center of the box and its width and height, and the longitude must
given between -180W and 180E. Width and height are in degrees of longitude and latitude.}
\item{area}{(integer/character), One of 0, 1, or 2, but can be in combination. e.g. 0, '0,2'
See Details.}
\item{around}{(character) Selects profiles located around a given center point. List of
3 or 4 numerical values depending on how the center point need to be specified:
e.g., '-40,35,100', '6900678,2,200', '6900678,2,200,30'. See Details}
\item{year}{restrict profiles sampled on a single, or a list of given years. One or a
comma separated list of numerical value(s) higher than 0 and lower than 9999.}
\item{yearmin, yearmax}{restrict profiles sampled before (yearmax) and/or after (yearmin)
a given year. A numerical value higher than 0 and lower than 9999. cannot be applied with
the other restriction parameter \code{year}}
\item{month}{restrict profiles sampled on a single, or a list of given month(s). One or a
comma separated list of numerical value(s) higher or equal to 1 and lower or equal to 12.
The month convention is a standard one: January is 1, February is 2, ... December is 12.}
\item{monthmin, monthmax}{restrict profiles sampled before (monthmax) and/or after (monthmin)
a given month. Higher or equal to 1 and lower or equal to 12. The month convention is a
standard one: January is 1, February is 2, ... December is 12. These restrictions cannot be
applied with the other restriction parameter month. At this time, these parameters are not
circular, so that the restriction chain: monthmin=12&monthmax=2 will through an error
and not select December to February profiles. To do so, you need to use a coma separated
list of months using the month restriction parameter.}
\item{lr}{restriction allows you to impose the last report (hence lr) date in days. A
numerical value in days between 1 (profiles sampled yesterday) and 60 (profiles sampled
over the last 60 days). This restriction allows a simple selection of the so-called
'active' floats, ie those which reported a profiles over the last 30 days.}
\item{from, to}{select profiles sampled before (to) and/or after (from) an explicit date
(included). The date is specified following the format: YYYYMMDD, ie the year, month
and day numbers.}
\item{dmode}{(character) imposes a restriction on the Data Mode of profiles. A single value or
a coma separated list of characters defining the Data Mode to select. It can be: R for
"Real Time", A for "Real Time with Adjusted value" and D for "Delayed Mode". See Details.}
\item{pres_qc, temp_qc, psal_qc, doxy_qc}{Quality control. Imposes a restriction on the profile
data quality flag. For a given variable PARAM which can be: pres (pressure),
temp (temperature), psal (salinity) or doxy (oxygen), this restriction selects profiles
having one or a coma separated list of data quality flag. See Details.}
\item{ticket}{(numeric) select profiles with or without a ticket filled in the database. A
value: 0 (no ticket) or 1 (has a ticket). See
http://www.ifremer.fr/lpo/naarc/m/docs/api/database.html for more details.}
\item{limit}{(integer) number to return}
\item{...}{Curl options passed on to \code{\link[crul]{HttpClient}}. Optional}
\item{cyc}{a cycle number}
\item{dac}{(character) Data assembly center code}
\item{id}{(numeric) Buoy identifier}
\item{cycle}{(numeric) Cycle number}
\item{dtype}{(character) Data type, one of \code{D} for delayed, or \code{R} for real-time}
}
\description{
THE ARGO API IS DOWN. WE'VE BEEN UNSUCCESSFUL FIGURING OUT WHERE
THE API MOVED TO OR IF IT STILL EXISTS. SEE "API Status" SECTION
BELOW FOR MORE DETAILS.
}
\details{
\code{area} parameter definitions:
\itemize{
\item Value 0 selects profiles located in the North-Atlantic ocean north of 20S
and not in areas 1 and 2.
\item Value 1 selects profiles located in the Mediterranean Sea.
\item Value 2 selects profiles located in the Nordic Seas.
}
\code{around} parameter definitions:
\itemize{
\item Specification 1: The location is specified with specific geographical
coordinates in the following format: around=longitude,latitude,distance - The longitude
must given between -180W and 180E and the distance is in kilometers.
\item Specification 2: The location is the one of an existing profile in the database.
It is thus specified with a float WMO and a cycle number: around=wmo,cyc,distance
This specification can take an optional fourth value specifying the time range in days
around the specified profile.
}
\code{dmode} parameter definitions:
\itemize{
\item Data from Argo floats are transmitted from the float, passed through processing and
automatic quality control procedures. These profiles have a Data Mode called: real-time data.
\item The data are also issued to the Principle Investigators who apply other procedures to
check data quality returned to the global data centre within 6 to 12 months. These profiles
have a Data Mode called: delayed mode data.
\item The adjustments applied to delayed-data may also be applied to real-time data, to
correct sensor drifts for real-time users. These profiles have a Data Mode called: real
time data with adjusted values.
}
\code{*_qc} parameter definitions:
This information was extracted from the netcdf profile variable PROFILE_PARAM_QC. Once
quality control procedures have been applied, a synthetic flag is assigned for each
parameter of each profile under this variable in netcdf files. It indicates the fraction
n of profile levels with good data. It can take one of the following values:
\itemize{
\item A or F: All (n=100\%) or none (n=0\%) of the profile levels contain good data,
\item B,C,D,E: n is in one of the intermediate range: 75-100, 50-75, 25-50 or 0-25
\item empty: No QC was performed.
}
}
\section{File storage}{
We use \pkg{rappdirs} to store files, see
\code{\link[rappdirs]{user_cache_dir}} for how we determine the directory on
your machine to save files to, and run
\code{rappdirs::user_cache_dir("rnoaa/argo")} to get that directory.
The \code{path} parameter used to be used to set where files are stored
on your machine.
}
\section{API Status}{
The API is down as of 2019-11-07, and probably some time before that. The
following functions won't work anymore (future package versions may bring
them back if the API comes back):
\itemize{
\item argo_search
\item argo_files
\item argo_qwmo
\item argo_plan
}
The following functions still work as they are based off the FTP server
that still exists:
\itemize{
\item argo_buoy_files
\item argo
}
}
\examples{
\dontrun{
# Search Argo metadata
## Number of profiles
# argo_search("np", limit = 3)
## Number of floats
# argo_search("nf", limit = 3)
## Number of both profiles and floats
# argo_search("n", limit = 3)
## return the coordinates in time and space of profiles
# argo_search("coord", limit = 3)
## return the coordinates in time and space of profiles, plus other metadata
# argo_search("fullcoord", limit = 3)
## List various things, e.g,...
### data assembly centers
# argo_search("list", "dac")
### data modes
# argo_search("list", "dmode", limit = 5)
### World Meteorological Organization unique float ID's
# argo_search("list", "wmo", limit = 5)
### Profile years
# argo_search("list", "year", limit = 5)
## coord or fullcoord with specific buoy id
# argo_search("coord", wmo = 13857, limit = 3)
# argo_search("fullcoord", wmo = 13857, limit = 3)
# Spatial search
### search by bounding box (see param def above)
# argo_search("coord", box = c(-40, 35, 3, 2))
### search by area
# argo_search("coord", area = 0)
### search by around
# argo_search("coord", around = '-40,35,100')
# Time based search
### search by year
# argo_search("coord", year = 2006)
### search by yearmin and yearmax
# argo_search("coord", yearmin = 2007)
# argo_search("coord", yearmin = 2007, yearmax = 2009)
### search by month
# argo_search("coord", month = '12,1,2')
### search by from or to
# argo_search("coord", from = 20090212)
# argo_search("coord", to = 20051129)
# Data mode search
# argo_search("coord", dmode = "R")
# argo_search("coord", dmode = "R,A")
# Data quality based search
# argo_search("coord", pres_qc = "A,B")
# argo_search("coord", temp_qc = "A")
# argo_search("coord", pres_qc = "A", temp_qc = "A")
# Ticket search
# argo_search("coord", ticket = 0)
## Search on partial float id number
# argo_qwmo(qwmo = 49)
# argo_qwmo(qwmo = 49, limit = 2)
## Get files
# argo_files(wmo = 13857)
# argo_files(wmo = 13857, cyc = 12)
# argo_files(wmo = 13857, cyc = 45)
## Get planned buoys data, accepts no parameters
# argo_plan()
# Get files for a buoy, must specify data assembly center (dac)
argo_buoy_files(dac = "bodc", id = 1901309)
argo_buoy_files(dac = "kma", id = 2900308)
# Get data
x <- argo_buoy_files(dac = "meds", id = 4900881)
argo(dac = "meds", id = 4900881, cycle = 127, dtype = "D")
}
}
\references{
http://www.ifremer.fr/lpo/naarc/m/docs/api/howto.html
}
|
# store types
meta <- c("origin", "dev", "id", "calendar")
detail <- list(dollar = list(
loss = c("paid", "incurred"),
recovery = c("paid_recovery", "incurred_recovery")),
"desc"
)
# return columns for column names or by sum of dollar column
# can return multiple columns
# select_ldf(ldf_data, c("origin", "dev", "paid"))
# select_ldf(ldf_data, c("paid_excess250", "sal_sub", "paid"))
select_ldf <- function(ldf, values) {
# return summed type for dollar detail categories
dollar_detail <- intersect(values, unlist(detail$dollar))
if (length(dollar_detail) > 0) {
totals <- sapply(dollar_detail, function(x) sum_dollar(ldf, dollar = x))
}
# note: Probably need to find replacement for sapply as it can be inconsistent
# return columns for values supplied by column name
col_names <- setdiff(intersect(names(ldf), values), unlist(detail$dollar))
out <- ldf[, col_names, drop = FALSE]
# combine with totals for dollar columns supplied in values argument
if (exists("totals")) {
out <- data.frame(out, totals)
}
# order columns as supplied in values argument
out[, match(values, names(out)), drop = FALSE]
}
#' merge wrapper for data frame using 'type' attribute
#'
#' @param df
#' @param by vector of column names to merge by
#' @param columns to be excluded from the merge
merge_ldf <- function(df, calendar1, calendar2, by, exclude) {
group1 <- df[df$calendar == calendar1, setdiff(names(df), exclude)]
group2 <- df[df$calendar == calendar2, setdiff(names(df), exclude)]
comparison <- merge(group1, group2, by = by,
all.x = TRUE, all.y = TRUE,
suffixes = c(paste0("_", calendar1), paste0("_", calendar2)))
comparison[is.na(comparison)] <- 0
comparison
} | /R/utils.R | no_license | merlinoa/lossdb | R | false | false | 1,820 | r | # store types
meta <- c("origin", "dev", "id", "calendar")
detail <- list(dollar = list(
loss = c("paid", "incurred"),
recovery = c("paid_recovery", "incurred_recovery")),
"desc"
)
# return columns for column names or by sum of dollar column
# can return multiple columns
# select_ldf(ldf_data, c("origin", "dev", "paid"))
# select_ldf(ldf_data, c("paid_excess250", "sal_sub", "paid"))
select_ldf <- function(ldf, values) {
# return summed type for dollar detail categories
dollar_detail <- intersect(values, unlist(detail$dollar))
if (length(dollar_detail) > 0) {
totals <- sapply(dollar_detail, function(x) sum_dollar(ldf, dollar = x))
}
# note: Probably need to find replacement for sapply as it can be inconsistent
# return columns for values supplied by column name
col_names <- setdiff(intersect(names(ldf), values), unlist(detail$dollar))
out <- ldf[, col_names, drop = FALSE]
# combine with totals for dollar columns supplied in values argument
if (exists("totals")) {
out <- data.frame(out, totals)
}
# order columns as supplied in values argument
out[, match(values, names(out)), drop = FALSE]
}
#' merge wrapper for data frame using 'type' attribute
#'
#' @param df
#' @param by vector of column names to merge by
#' @param columns to be excluded from the merge
merge_ldf <- function(df, calendar1, calendar2, by, exclude) {
group1 <- df[df$calendar == calendar1, setdiff(names(df), exclude)]
group2 <- df[df$calendar == calendar2, setdiff(names(df), exclude)]
comparison <- merge(group1, group2, by = by,
all.x = TRUE, all.y = TRUE,
suffixes = c(paste0("_", calendar1), paste0("_", calendar2)))
comparison[is.na(comparison)] <- 0
comparison
} |
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.53611177638955e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615839003-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 826 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.53611177638955e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.r
\name{parse_description}
\alias{parse_description}
\title{parse description of ScanImage TIFF file, converting it to key-value list}
\usage{
parse_description(x, raw = FALSE)
}
\arguments{
\item{x}{Path to a TIFF file, one or more slices returned by
\code{\link{read.scanimage}} or a raw data block.}
\item{raw}{Whether to return the raw description field as a single string or
when \code{FALSE} (the default) to return it is as a list containing parsed
R data types.}
}
\value{
a named \code{list} or, when \code{raw=TRUE}, a character vector of
length 1.
}
\description{
parse description of ScanImage TIFF file, converting it to key-value list
}
\details{
ScanImage TIFF flies contain a single description field, which is a
CR delimited values of the form \code{key=value}.
}
\examples{
desc=parse_description(system.file(
'extdata/Blank-IPA_1s_16r_032.tif',package='scanimage'))
desc$state.configName
# [1] "ajdm_piezo"
desc$state.acq.frameRate
# [1] 8.138021 (Hz)
}
\seealso{
\code{\link{read.scanimage}}
}
| /man/parse_description.Rd | no_license | jefferis/scanimage | R | false | true | 1,104 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.r
\name{parse_description}
\alias{parse_description}
\title{parse description of ScanImage TIFF file, converting it to key-value list}
\usage{
parse_description(x, raw = FALSE)
}
\arguments{
\item{x}{Path to a TIFF file, one or more slices returned by
\code{\link{read.scanimage}} or a raw data block.}
\item{raw}{Whether to return the raw description field as a single string or
when \code{FALSE} (the default) to return it is as a list containing parsed
R data types.}
}
\value{
a named \code{list} or, when \code{raw=TRUE}, a character vector of
length 1.
}
\description{
parse description of ScanImage TIFF file, converting it to key-value list
}
\details{
ScanImage TIFF flies contain a single description field, which is a
CR delimited values of the form \code{key=value}.
}
\examples{
desc=parse_description(system.file(
'extdata/Blank-IPA_1s_16r_032.tif',package='scanimage'))
desc$state.configName
# [1] "ajdm_piezo"
desc$state.acq.frameRate
# [1] 8.138021 (Hz)
}
\seealso{
\code{\link{read.scanimage}}
}
|
library(caret)
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(rattle)
library(randomForest)
set.seed(012345)
trainUrl <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
testUrl <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
training <- read.csv(url(trainUrl), na.strings=c("NA","#DIV/0!",""))
testing <- read.csv(url(testUrl), na.strings=c("NA","#DIV/0!",""))
inTrain <- createDataPartition(y=training$classe, p=0.6, list=FALSE)
myTraining <- training[inTrain, ]; myTesting <- training[-inTrain, ]
dim(myTraining)
dim(myTesting)
myDataNZV <- nearZeroVar(myTraining, saveMetrics=TRUE)
myNZVvars <- names(myTraining) %in% c("new_window", "kurtosis_roll_belt", "kurtosis_picth_belt",
"kurtosis_yaw_belt", "skewness_roll_belt", "skewness_roll_belt.1", "skewness_yaw_belt",
"max_yaw_belt", "min_yaw_belt", "amplitude_yaw_belt", "avg_roll_arm", "stddev_roll_arm",
"var_roll_arm", "avg_pitch_arm", "stddev_pitch_arm", "var_pitch_arm", "avg_yaw_arm",
"stddev_yaw_arm", "var_yaw_arm", "kurtosis_roll_arm", "kurtosis_picth_arm",
"kurtosis_yaw_arm", "skewness_roll_arm", "skewness_pitch_arm", "skewness_yaw_arm",
"max_roll_arm", "min_roll_arm", "min_pitch_arm", "amplitude_roll_arm", "amplitude_pitch_arm",
"kurtosis_roll_dumbbell", "kurtosis_picth_dumbbell", "kurtosis_yaw_dumbbell", "skewness_roll_dumbbell",
"skewness_pitch_dumbbell", "skewness_yaw_dumbbell", "max_yaw_dumbbell", "min_yaw_dumbbell",
"amplitude_yaw_dumbbell", "kurtosis_roll_forearm", "kurtosis_picth_forearm", "kurtosis_yaw_forearm",
"skewness_roll_forearm", "skewness_pitch_forearm", "skewness_yaw_forearm", "max_roll_forearm",
"max_yaw_forearm", "min_roll_forearm", "min_yaw_forearm", "amplitude_roll_forearm",
"amplitude_yaw_forearm", "avg_roll_forearm", "stddev_roll_forearm", "var_roll_forearm",
"avg_pitch_forearm", "stddev_pitch_forearm", "var_pitch_forearm", "avg_yaw_forearm",
"stddev_yaw_forearm", "var_yaw_forearm")
myTraining <- myTraining[!myNZVvars]
dim(myTraining)
myTraining <- myTraining[c(-1)]
trainingV3 <- myTraining #creating another subset to iterate in loop
for(i in 1:length(myTraining)) { #for every column in the training dataset
if( sum( is.na( myTraining[, i] ) ) /nrow(myTraining) >= .6 ) { #if n?? NAs > 60% of total observations
for(j in 1:length(trainingV3)) {
if( length( grep(names(myTraining[i]), names(trainingV3)[j]) ) ==1) { #if the columns are the same:
trainingV3 <- trainingV3[ , -j] #Remove that column
}
}
}
}
dim(trainingV3)
myTraining <- trainingV3
rm(trainingV3)
clean1 <- colnames(myTraining)
clean2 <- colnames(myTraining[, -58])
myTesting <- myTesting[clean1]
testing <- testing[clean2]
dim(myTesting)
dim(testing)
for (i in 1:length(testing) ) {
for(j in 1:length(myTraining)) {
if( length( grep(names(myTraining[i]), names(testing)[j]) ) ==1) {
class(testing[j]) <- class(myTraining[i])
}
}
}
testing <- rbind(myTraining[2, -58] , testing)
testing <- testing[-1,]
modFitA1 <- rpart(classe ~ ., data=myTraining, method="class")
fancyRpartPlot(modFitA1)
predictionsA1 <- predict(modFitA1, myTesting, type = "class")
confusionMatrix(predictionsA1, myTesting$classe)
modFitB1 <- randomForest(classe ~. , data=myTraining)
predictionsB1 <- predict(modFitB1, myTesting, type = "class")
confusionMatrix(predictionsB1, myTesting$classe)
predictionsB2 <- predict(modFitB1, testing, type = "class")
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(predictionsB2)
| /DS-PML-Course-Assignment.R | no_license | Rich20152016/DS-Practical-Machine-Learning | R | false | false | 4,254 | r | library(caret)
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(rattle)
library(randomForest)
set.seed(012345)
trainUrl <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv"
testUrl <- "https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv"
training <- read.csv(url(trainUrl), na.strings=c("NA","#DIV/0!",""))
testing <- read.csv(url(testUrl), na.strings=c("NA","#DIV/0!",""))
inTrain <- createDataPartition(y=training$classe, p=0.6, list=FALSE)
myTraining <- training[inTrain, ]; myTesting <- training[-inTrain, ]
dim(myTraining)
dim(myTesting)
myDataNZV <- nearZeroVar(myTraining, saveMetrics=TRUE)
myNZVvars <- names(myTraining) %in% c("new_window", "kurtosis_roll_belt", "kurtosis_picth_belt",
"kurtosis_yaw_belt", "skewness_roll_belt", "skewness_roll_belt.1", "skewness_yaw_belt",
"max_yaw_belt", "min_yaw_belt", "amplitude_yaw_belt", "avg_roll_arm", "stddev_roll_arm",
"var_roll_arm", "avg_pitch_arm", "stddev_pitch_arm", "var_pitch_arm", "avg_yaw_arm",
"stddev_yaw_arm", "var_yaw_arm", "kurtosis_roll_arm", "kurtosis_picth_arm",
"kurtosis_yaw_arm", "skewness_roll_arm", "skewness_pitch_arm", "skewness_yaw_arm",
"max_roll_arm", "min_roll_arm", "min_pitch_arm", "amplitude_roll_arm", "amplitude_pitch_arm",
"kurtosis_roll_dumbbell", "kurtosis_picth_dumbbell", "kurtosis_yaw_dumbbell", "skewness_roll_dumbbell",
"skewness_pitch_dumbbell", "skewness_yaw_dumbbell", "max_yaw_dumbbell", "min_yaw_dumbbell",
"amplitude_yaw_dumbbell", "kurtosis_roll_forearm", "kurtosis_picth_forearm", "kurtosis_yaw_forearm",
"skewness_roll_forearm", "skewness_pitch_forearm", "skewness_yaw_forearm", "max_roll_forearm",
"max_yaw_forearm", "min_roll_forearm", "min_yaw_forearm", "amplitude_roll_forearm",
"amplitude_yaw_forearm", "avg_roll_forearm", "stddev_roll_forearm", "var_roll_forearm",
"avg_pitch_forearm", "stddev_pitch_forearm", "var_pitch_forearm", "avg_yaw_forearm",
"stddev_yaw_forearm", "var_yaw_forearm")
myTraining <- myTraining[!myNZVvars]
dim(myTraining)
myTraining <- myTraining[c(-1)]
trainingV3 <- myTraining #creating another subset to iterate in loop
for(i in 1:length(myTraining)) { #for every column in the training dataset
if( sum( is.na( myTraining[, i] ) ) /nrow(myTraining) >= .6 ) { #if n?? NAs > 60% of total observations
for(j in 1:length(trainingV3)) {
if( length( grep(names(myTraining[i]), names(trainingV3)[j]) ) ==1) { #if the columns are the same:
trainingV3 <- trainingV3[ , -j] #Remove that column
}
}
}
}
dim(trainingV3)
myTraining <- trainingV3
rm(trainingV3)
clean1 <- colnames(myTraining)
clean2 <- colnames(myTraining[, -58])
myTesting <- myTesting[clean1]
testing <- testing[clean2]
dim(myTesting)
dim(testing)
for (i in 1:length(testing) ) {
for(j in 1:length(myTraining)) {
if( length( grep(names(myTraining[i]), names(testing)[j]) ) ==1) {
class(testing[j]) <- class(myTraining[i])
}
}
}
testing <- rbind(myTraining[2, -58] , testing)
testing <- testing[-1,]
modFitA1 <- rpart(classe ~ ., data=myTraining, method="class")
fancyRpartPlot(modFitA1)
predictionsA1 <- predict(modFitA1, myTesting, type = "class")
confusionMatrix(predictionsA1, myTesting$classe)
modFitB1 <- randomForest(classe ~. , data=myTraining)
predictionsB1 <- predict(modFitB1, myTesting, type = "class")
confusionMatrix(predictionsB1, myTesting$classe)
predictionsB2 <- predict(modFitB1, testing, type = "class")
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(predictionsB2)
|
#! /usr/bin/env Rscript
###################################################################
# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #
###################################################################
###################################################################
# * Generalised filter function for FC processing
# * Utility function for xcpEngine
###################################################################
###################################################################
# Load required libraries
###################################################################
suppressMessages(suppressWarnings(library(optparse)))
suppressMessages(suppressWarnings(library(signal)))
suppressMessages(suppressWarnings(library(RNifti)))
###################################################################
# Parse arguments to script, and ensure that the required arguments
# have been passed.
###################################################################
option_list = list(
make_option(c("-i", "--img"), action="store", default=NA, type='character',
help="Path to the BOLD timeseries to be filtered"),
make_option(c("-o", "--out"), action="store", default=NA, type='character',
help="Output path"),
make_option(c("-f", "--filter"), action="store", default='butterworth', type='character',
help="The class of filter to be applied to the timeseries.
Valid options include:
* butterworth [default]
* chebyshev1
* chebyshev2
* elliptic"),
make_option(c("-m", "--mask"), action="store", default=NA, type='character',
help="Spatial mask indicating the voxels of the input image
to which the filter should be applied."),
make_option(c("-c", "--hipass"), action="store", default=0.01, type='numeric',
help="The lower bound on frequencies permitted by the filter.
Any frequencies below the highpass cutoff will be
attenuated. [default 0.01]"),
make_option(c("-l", "--lopass"), action="store", default="nyquist", type='character',
help="The upper bound on frequencies permitted by the filter.
Any frequencies above the lowpass cutoff will be
attenuated. [default Nyquist]"),
make_option(c("-r", "--order"), action="store", default=1, type='numeric',
help="The filter order indicates the number of input samples
taken under consideration when generating an output
signal. In general, using a higher-order filter will
result in a sharper cutoff between accepted and
attenuated frequencies. For a gentler filter, use a
lower order."),
make_option(c("-d", "--direction"), action="store", default=2, type='numeric',
help="The filter direction indicates whether the input signal
should be processed in the forward direction only [-d 1]
or in both forward and reverse directions [-d 2]."),
make_option(c("-p", "--rpass"), action="store", default=1, type='numeric',
help="Chebyshev I and elliptic filters allow for sharper
discrimination between accepted and attenuated
frequencies at the cost of a 'ripple' in the pass band.
This ripple results in somewhat uneven retention of
pass-band frequencies."),
make_option(c("-s", "--rstop"), action="store", default=1, type='numeric',
help="Chebyshev II and elliptic filters allow for sharper
discrimination between accepted and attenuated
frequencies at the cost of a 'ripple' in the stop band.
This ripple results in somewhat uneven removal of
stop-band frequencies.")
)
opt = parse_args(OptionParser(option_list=option_list))
if (is.na(opt$img)) {
cat('User did not specify an input timeseries.\n')
cat('Use genfilter.R -h for an expanded usage menu.\n')
quit()
}
if (is.na(opt$out)) {
cat('User did not specify an output path.\n')
cat('Use genfilter.R -h for an expanded usage menu.\n')
quit()
}
impath <- opt$img
outpath <- opt$out
type <- opt$filter
maskpath <- opt$mask
order <- opt$order
direction <- opt$direction
hpf <- opt$hipass
lpf <- opt$lopass
ripple <- opt$rpass
ripple2 <- opt$rstop
###################################################################
# Compute the sequence's repetition time
###################################################################
hdr <- dumpNifti(impath)
tr <- hdr$pixdim[5]
###################################################################
# 1. Construct the filter
# a. Compute Nyquist
###################################################################
nyquist <- 1/(2*tr)
###################################################################
# b. Convert input frequencies to percent Nyquist
# low pass
###################################################################
if (lpf=="nyquist"){
lpnorm <- 1
} else {
lpf <- as.numeric(lpf)
lpnorm <- lpf/nyquist
}
if (lpnorm > 1){
lpnorm <- 1
}
###################################################################
# high pass
###################################################################
hpnorm <- hpf/nyquist
if (hpnorm < 0){
hpnorm <- 0
}
###################################################################
# c. Generate the filter
###################################################################
if (type=='butterworth'){
filt <- signal::butter(order, c(hpnorm,lpnorm),
"pass", "z")
} else if (type=='chebyshev1'){
filt <- signal::cheby1(order, ripple,
c(hpnorm,lpnorm), "pass", "z")
} else if (type=='chebyshev2'){
filt <- signal::cheby2(order, ripple2,
c(hpnorm,lpnorm), "pass", "z")
} else if (type=='elliptic') {
filt <- signal::ellip(order, ripple, ripple2,
c(hpnorm,lpnorm), "pass", "z")
}
sink("/dev/null")
###################################################################
# 2. Load in the image
###################################################################
img <- readNifti(impath)
out <- img
if (!is.na(maskpath)){
mask <- readNifti(maskpath)
logmask <- (mask == 1)
img <- img[logmask]
dim(img) <- c(sum(logmask),hdr$dim[5])
img <- t(img)
} else {
img <- as.array(img)
dim(img) <- c(prod(hdr$dim[2:4]),hdr$dim[5])
img <- t(img)
}
nvol <- dim(img)[1]
nvox <- dim(img)[2]
sink(NULL)
###################################################################
# 3. Apply the filter
###################################################################
img_filt <- matrix(nrow=nvol,ncol=nvox)
for (vox in 1:nvox){
if (direction==1){
img_filt[,vox] <- signal::filter(filt,img[,vox])
} else if (direction==2){
img_filt[,vox] <- signal::filtfilt(filt,img[,vox])
}
}
###################################################################
# 4. Write out the image
###################################################################
if (!is.na(maskpath)){
for (i in 1:nvol) {
out[,,,i][logmask]<- img_filt[i,]
}
} else {
for (i in 1:nvol) {
out[out > -Inf] <- t(img_filt)
}
}
sink("/dev/null")
writeNifti(out,outpath,template=impath,datatype='float')
sink(NULL)
| /utils/genfilter.R | no_license | mattcieslak/xcpEngine | R | false | false | 8,115 | r | #! /usr/bin/env Rscript
###################################################################
# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #
###################################################################
###################################################################
# * Generalised filter function for FC processing
# * Utility function for xcpEngine
###################################################################
###################################################################
# Load required libraries
###################################################################
suppressMessages(suppressWarnings(library(optparse)))
suppressMessages(suppressWarnings(library(signal)))
suppressMessages(suppressWarnings(library(RNifti)))
###################################################################
# Parse arguments to script, and ensure that the required arguments
# have been passed.
###################################################################
option_list = list(
make_option(c("-i", "--img"), action="store", default=NA, type='character',
help="Path to the BOLD timeseries to be filtered"),
make_option(c("-o", "--out"), action="store", default=NA, type='character',
help="Output path"),
make_option(c("-f", "--filter"), action="store", default='butterworth', type='character',
help="The class of filter to be applied to the timeseries.
Valid options include:
* butterworth [default]
* chebyshev1
* chebyshev2
* elliptic"),
make_option(c("-m", "--mask"), action="store", default=NA, type='character',
help="Spatial mask indicating the voxels of the input image
to which the filter should be applied."),
make_option(c("-c", "--hipass"), action="store", default=0.01, type='numeric',
help="The lower bound on frequencies permitted by the filter.
Any frequencies below the highpass cutoff will be
attenuated. [default 0.01]"),
make_option(c("-l", "--lopass"), action="store", default="nyquist", type='character',
help="The upper bound on frequencies permitted by the filter.
Any frequencies above the lowpass cutoff will be
attenuated. [default Nyquist]"),
make_option(c("-r", "--order"), action="store", default=1, type='numeric',
help="The filter order indicates the number of input samples
taken under consideration when generating an output
signal. In general, using a higher-order filter will
result in a sharper cutoff between accepted and
attenuated frequencies. For a gentler filter, use a
lower order."),
make_option(c("-d", "--direction"), action="store", default=2, type='numeric',
help="The filter direction indicates whether the input signal
should be processed in the forward direction only [-d 1]
or in both forward and reverse directions [-d 2]."),
make_option(c("-p", "--rpass"), action="store", default=1, type='numeric',
help="Chebyshev I and elliptic filters allow for sharper
discrimination between accepted and attenuated
frequencies at the cost of a 'ripple' in the pass band.
This ripple results in somewhat uneven retention of
pass-band frequencies."),
make_option(c("-s", "--rstop"), action="store", default=1, type='numeric',
help="Chebyshev II and elliptic filters allow for sharper
discrimination between accepted and attenuated
frequencies at the cost of a 'ripple' in the stop band.
This ripple results in somewhat uneven removal of
stop-band frequencies.")
)
opt = parse_args(OptionParser(option_list=option_list))
if (is.na(opt$img)) {
cat('User did not specify an input timeseries.\n')
cat('Use genfilter.R -h for an expanded usage menu.\n')
quit()
}
if (is.na(opt$out)) {
cat('User did not specify an output path.\n')
cat('Use genfilter.R -h for an expanded usage menu.\n')
quit()
}
impath <- opt$img
outpath <- opt$out
type <- opt$filter
maskpath <- opt$mask
order <- opt$order
direction <- opt$direction
hpf <- opt$hipass
lpf <- opt$lopass
ripple <- opt$rpass
ripple2 <- opt$rstop
###################################################################
# Compute the sequence's repetition time
###################################################################
hdr <- dumpNifti(impath)
tr <- hdr$pixdim[5]
###################################################################
# 1. Construct the filter
# a. Compute Nyquist
###################################################################
nyquist <- 1/(2*tr)
###################################################################
# b. Convert input frequencies to percent Nyquist
# low pass
###################################################################
if (lpf=="nyquist"){
lpnorm <- 1
} else {
lpf <- as.numeric(lpf)
lpnorm <- lpf/nyquist
}
if (lpnorm > 1){
lpnorm <- 1
}
###################################################################
# high pass
###################################################################
hpnorm <- hpf/nyquist
if (hpnorm < 0){
hpnorm <- 0
}
###################################################################
# c. Generate the filter
###################################################################
if (type=='butterworth'){
filt <- signal::butter(order, c(hpnorm,lpnorm),
"pass", "z")
} else if (type=='chebyshev1'){
filt <- signal::cheby1(order, ripple,
c(hpnorm,lpnorm), "pass", "z")
} else if (type=='chebyshev2'){
filt <- signal::cheby2(order, ripple2,
c(hpnorm,lpnorm), "pass", "z")
} else if (type=='elliptic') {
filt <- signal::ellip(order, ripple, ripple2,
c(hpnorm,lpnorm), "pass", "z")
}
sink("/dev/null")
###################################################################
# 2. Load in the image
###################################################################
img <- readNifti(impath)
out <- img
if (!is.na(maskpath)){
mask <- readNifti(maskpath)
logmask <- (mask == 1)
img <- img[logmask]
dim(img) <- c(sum(logmask),hdr$dim[5])
img <- t(img)
} else {
img <- as.array(img)
dim(img) <- c(prod(hdr$dim[2:4]),hdr$dim[5])
img <- t(img)
}
nvol <- dim(img)[1]
nvox <- dim(img)[2]
sink(NULL)
###################################################################
# 3. Apply the filter
###################################################################
img_filt <- matrix(nrow=nvol,ncol=nvox)
for (vox in 1:nvox){
if (direction==1){
img_filt[,vox] <- signal::filter(filt,img[,vox])
} else if (direction==2){
img_filt[,vox] <- signal::filtfilt(filt,img[,vox])
}
}
###################################################################
# 4. Write out the image
###################################################################
if (!is.na(maskpath)){
for (i in 1:nvol) {
out[,,,i][logmask]<- img_filt[i,]
}
} else {
for (i in 1:nvol) {
out[out > -Inf] <- t(img_filt)
}
}
sink("/dev/null")
writeNifti(out,outpath,template=impath,datatype='float')
sink(NULL)
|
context("test_phe_fun_prop_plot")
# test calculations
test_that("confidence limits calculate correctly",{
expect_equal(data.frame(phe_fun_prop_plot((test_fs)[2:3], numerator, denominator)),
data.frame(test_fp),check.attributes=FALSE, check.names=FALSE, info="test default")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,9:16)[1:3], Numerator, Denominator,
# multiplier = 100, type="full")),
# data.frame(select(slice(test_Prop,9:16),1:9)),check.attributes=FALSE, check.names=FALSE, info="test full, percentage")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator,
# multiplier = 1, type="standard")),
# data.frame(select(slice(test_Prop,1:8),1:6)),check.attributes=FALSE, check.names=FALSE, info="test standard")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,17:24)[1:3], Numerator, Denominator,
# type="full", confidence=99.8)),
# data.frame(select(slice(test_Prop,17:24),1:9)),check.attributes=FALSE, check.names=FALSE, info="test confidence")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator, type="value")),
# data.frame(select(slice(test_Prop,1:8),1:4)),check.attributes=FALSE, check.names=FALSE, info="test value")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator, type="lower")),
# data.frame(select(slice(test_Prop,1:8),1:3,5)),check.attributes=FALSE, check.names=FALSE, info="test lower")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator, type="upper")),
# data.frame(select(slice(test_Prop,1:8),1:3,6)),check.attributes=FALSE, check.names=FALSE, info="test upper")
# expect_equal(arrange(data.frame(phe_proportion(filter(test_Prop,Area %in% c("Area09","Area10","Area11"))[1:3], Numerator, Denominator, type="full")), Area),
# arrange(data.frame(select(filter(test_Prop,Area %in% c("Area09","Area10","Area11")),1:9)), Area),check.attributes=FALSE, check.names=FALSE, info="test NAs")
# expect_equal(arrange(data.frame(phe_proportion(slice(test_Prop_g,1:8)[1:3], Numerator, Denominator, type="standard")), Area),
# arrange(data.frame(test_Prop_g_results[1:6]),Area),check.attributes=FALSE, check.names=FALSE, info="test grouped")
})
# test error handling
# check required arguments present
# stop("function phe_proportion requires at least 3 arguments: data, x, n")
# stop("numerators must be greater than or equal to zero")
# stop("denominators must be greater than zero")
# stop("numerators must be less than or equal to denominator for a proportion statistic")
test_that("denominators must be greater than zero",{
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,100,0)), obs,pop),
"denominators must be greater than zero", info="check your denominators")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,-80,30),
pop=c(100,100,200)
), obs, pop),
"numerators must be greater than or equal to zero", info="error num < 0")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,0,100)), obs, pop),
"denominators must be greater than zero", info="error denom = 0")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,100,-100)), obs, pop),
"denominators must be greater than zero", info="error denom < 0")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,100,20)), obs, pop),
"numerators must be less than or equal to denominator for a proportion statistic", info="error numerator>denominator")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
pop =c(100,100,20)), pop),
"function phe_proportion requires at least 3 arguments: data, x, n", info="check the parameters passed into the function")
})
| /tests/testthat/test_fun_prop_plot.R | no_license | matthew-francis/PHEindicatormethods | R | false | false | 4,756 | r | context("test_phe_fun_prop_plot")
# test calculations
test_that("confidence limits calculate correctly",{
expect_equal(data.frame(phe_fun_prop_plot((test_fs)[2:3], numerator, denominator)),
data.frame(test_fp),check.attributes=FALSE, check.names=FALSE, info="test default")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,9:16)[1:3], Numerator, Denominator,
# multiplier = 100, type="full")),
# data.frame(select(slice(test_Prop,9:16),1:9)),check.attributes=FALSE, check.names=FALSE, info="test full, percentage")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator,
# multiplier = 1, type="standard")),
# data.frame(select(slice(test_Prop,1:8),1:6)),check.attributes=FALSE, check.names=FALSE, info="test standard")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,17:24)[1:3], Numerator, Denominator,
# type="full", confidence=99.8)),
# data.frame(select(slice(test_Prop,17:24),1:9)),check.attributes=FALSE, check.names=FALSE, info="test confidence")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator, type="value")),
# data.frame(select(slice(test_Prop,1:8),1:4)),check.attributes=FALSE, check.names=FALSE, info="test value")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator, type="lower")),
# data.frame(select(slice(test_Prop,1:8),1:3,5)),check.attributes=FALSE, check.names=FALSE, info="test lower")
# expect_equal(data.frame(phe_proportion(slice(test_Prop,1:8)[1:3], Numerator, Denominator, type="upper")),
# data.frame(select(slice(test_Prop,1:8),1:3,6)),check.attributes=FALSE, check.names=FALSE, info="test upper")
# expect_equal(arrange(data.frame(phe_proportion(filter(test_Prop,Area %in% c("Area09","Area10","Area11"))[1:3], Numerator, Denominator, type="full")), Area),
# arrange(data.frame(select(filter(test_Prop,Area %in% c("Area09","Area10","Area11")),1:9)), Area),check.attributes=FALSE, check.names=FALSE, info="test NAs")
# expect_equal(arrange(data.frame(phe_proportion(slice(test_Prop_g,1:8)[1:3], Numerator, Denominator, type="standard")), Area),
# arrange(data.frame(test_Prop_g_results[1:6]),Area),check.attributes=FALSE, check.names=FALSE, info="test grouped")
})
# test error handling
# check required arguments present
# stop("function phe_proportion requires at least 3 arguments: data, x, n")
# stop("numerators must be greater than or equal to zero")
# stop("denominators must be greater than zero")
# stop("numerators must be less than or equal to denominator for a proportion statistic")
test_that("denominators must be greater than zero",{
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,100,0)), obs,pop),
"denominators must be greater than zero", info="check your denominators")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,-80,30),
pop=c(100,100,200)
), obs, pop),
"numerators must be greater than or equal to zero", info="error num < 0")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,0,100)), obs, pop),
"denominators must be greater than zero", info="error denom = 0")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,100,-100)), obs, pop),
"denominators must be greater than zero", info="error denom < 0")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
obs =c(65,80,30),
pop =c(100,100,20)), obs, pop),
"numerators must be less than or equal to denominator for a proportion statistic", info="error numerator>denominator")
expect_error(phe_fun_prop_sig(data.frame(area=c("Area1","Area2","Area3"),
pop =c(100,100,20)), pop),
"function phe_proportion requires at least 3 arguments: data, x, n", info="check the parameters passed into the function")
})
|
library(readxl)
library(digest)
library(haven)
library(dplyr)
library(VIM)
library(naniar)
library(smoothmest)
library(fastDummies)
#Ejericio 1
#Importo el fichero y eval?o y corrijo los formatos que no me cuadran
DF <- read_excel("SBAnational.xlsx")
str(DF)
names(DF)
#corrijo formatos
#formatos de num?rico a integer
DF[,c(10,11,12,14,15)] <- apply(DF[,c(10,11,12,14,15)], 2, as.integer)
#de lo que sea a factor
#factor: Los factores son la forma en la que R guarda las variables categoricas
for (i in c(4,5,7,8,13,16,17,18,19,24)) {DF[,i]<-as_factor(DF[,i])}
#el identificador no puede ser un n?mero xque en ning?n caso debemos operar con ?l
DF[,1] <- as.character(DF$LoanNr_ChkDgt)
DF$LoanNr_ChkDgt
#de POSIXct a Date
DF[,c(9,20,21)] <- apply(DF[,c(9,20,21)], 2, as.Date)
#las columnas 22, 23, 25, 26 y 27 son num?ricas y se quedan como num?ricas
#las columnas 2, 3 y 6 son character y las mantengo como character
#Ejercicio 2
#anonimizaci?n
DF <- as.data.frame(mutate(DF, Name=sapply(Name, digest, c("md5"))))
str(DF)
#la variable a anonimizar es Name, que el identificador de la empresa que pide el pr?stamo
#escojo este m?todo por de los que hemos estudiados es el que el formato
#de anonimizaci?n es el m?s ?ptimo (aunque no el m?s seguro)
#adem?s, cuido que me sustituya completamente la variable a anonimizar
#puesto que en otro caso estar?a pseudoanonimizando
#data discovering
#quito duplicados
DF <- DF[!duplicated(DF), ] #no hay
DF
#empiezo por los dominios de las variables
#los identificadores no los tengo que analizar: columnas 1, 2, 3 y 6
#variables factores
tabla_frecuencias <- function(vector) {
aux <- as.data.frame(table(vector))
return(aux)
}
frec_State <- tabla_frecuencias(DF$State)
frec_Zip <- tabla_frecuencias(DF$Zip)
frec_BankState <- tabla_frecuencias(DF$BankState)
frec_NAICS <- tabla_frecuencias(DF$NAICS)
frec_NewExist <- tabla_frecuencias(DF$NewExist)
frec_FranchiseCode <- tabla_frecuencias(DF$FranchiseCode)
frec_UrbanRural <- tabla_frecuencias(DF$UrbanRural)
frec_RevLineCr <- tabla_frecuencias(DF$RevLineCr)
frec_LowDoc <- tabla_frecuencias(DF$LowDoc)
frec_Status <- tabla_frecuencias(DF$MIS_Status)
#las variables para las que no tengo mapeo, no puedo revisarlas
#corrijo las variables FranchiseCode y LowDoc xque no tienen
# bien informados algunos valores
#los que puedo cambiar, los cambios. Los que no, los elimino
DF$RevLineCr[DF$RevLineCr == "1"] <- "Y"
DF$RevLineCr[DF$RevLineCr == "0"] <- "N"
DF$LowDoc[DF$LowDoc == "1"] <- "Y"
DF$LowDoc[DF$LowDoc == "0"] <- "N"
DF$Zip[DF$Zip == "0"] <- ""
DF$NAICS[DF$NAICS == "0"] <- ""
DF <- DF[(DF$LowDoc=="Y" | DF$LowDoc=="N") & (DF$RevLineCr=="Y" | DF$RevLineCr=="N") & (DF$NewExist=="1" | DF$NewExist=="2") ,]
#variables continuas
numeric <- as.data.frame(select_if(DF, is.numeric))
desc_numeric <- as.data.frame(summary(numeric))
#auencias
ausencias <- as.data.frame(miss_var_summary(DF))
ausencias_casos <- miss_case_table(DF)
#la variable que m?s ausencias presenta es ChgOffDate, pero no la puedo
#eliminar xque son ausencias ficticias, son pr?stamos sin impago
#por ?ltimo, visualizo los outliers para las variables num?ricas
#salvo el a?o de la concesi?n del pr?stamo, que
#a pesar de ser integer, con analizar el rango es suficiente
boxplot(numeric[-c(1)], las = 2)
#como se ve claro que hay rango de valores distintos
#divido las visualizaciones
boxplot(numeric[c(1,2,3,4)], las = 2)
boxplot(numeric[c(5,7,8,9)], las = 2)
boxplot(numeric[c(14)], las = 2)
#el outlier que presenta la variable ChgOffPrinGr tiene
#sentido y se complementa con las ausencias de ChgOffDate
#xque solo hay un impago
#de lo dem?s, me llama la atenci?n una empresa en la variable
#GrAppv, pero la reviso es una empresa grande en general
#Ejercicio 4: imputaciones
#imputaciones valores ausentes variables num?ricas
DF <- kNN(DF, variable=colnames(DF[c(22,23,27,25,26)]), dist_var=colnames(DF[c(12,13,14,15,17,18,19)]))
miss_var_summary(DF[c(22,23,27,25,26)])
#Ejercicio 5: acotaciones
#acoto variables continuas
acotacion <- function(var) {
var <- ifelse(var<(mean(var)-3*sd(var)),(mean(var)-3*sd(var)),ifelse(var>(mean(var)+3*sd(var)), (mean(var)+3*sd(var)), var))
return(var)}
DF[,c(22,23,25,26,27)] <- apply (DF[,c(22,23,25,26,27)], 1, acotacion)
summary(DF[,c(22,23,25,26,27)])
#Ejercicio 6
#anonimizaci?n consulta
epsilon <- 0.1
max <- max(DF[c(23)])
min <- min(DF[c(23)])
n <- nrow(DF)
gs <- (max-min)/n
consulta <- DF %>%
group_by(Zip) %>%
summarize(mean(BalanceGross))
consulta_anonimizada <- cbind(consulta[,1], rdoublex(16503, consulta[,2], gs/epsilon))
#Ejercicio 7
#genero dummies
any_na(DF[,17])
levels(DF[,17])
#los missings los imputo a valores 0 que equivale a desconocido
DF$UrbanRural[is.na(DF[17])==TRUE] <- "0"
dummies <- dummy_cols(DF[,17])[-c(1,2)]
names(dummies) <- c("Urban", "Rural")
DF <- cbind(DF[-c(17)], dummies)
#solo hacen falta 2 variables dummies para no perder informaci?n
| /codigo (1).R | no_license | asier-aranoa/prueba | R | false | false | 4,913 | r |
library(readxl)
library(digest)
library(haven)
library(dplyr)
library(VIM)
library(naniar)
library(smoothmest)
library(fastDummies)
#Ejericio 1
#Importo el fichero y eval?o y corrijo los formatos que no me cuadran
DF <- read_excel("SBAnational.xlsx")
str(DF)
names(DF)
#corrijo formatos
#formatos de num?rico a integer
DF[,c(10,11,12,14,15)] <- apply(DF[,c(10,11,12,14,15)], 2, as.integer)
#de lo que sea a factor
#factor: Los factores son la forma en la que R guarda las variables categoricas
for (i in c(4,5,7,8,13,16,17,18,19,24)) {DF[,i]<-as_factor(DF[,i])}
#el identificador no puede ser un n?mero xque en ning?n caso debemos operar con ?l
DF[,1] <- as.character(DF$LoanNr_ChkDgt)
DF$LoanNr_ChkDgt
#de POSIXct a Date
DF[,c(9,20,21)] <- apply(DF[,c(9,20,21)], 2, as.Date)
#las columnas 22, 23, 25, 26 y 27 son num?ricas y se quedan como num?ricas
#las columnas 2, 3 y 6 son character y las mantengo como character
#Ejercicio 2
#anonimizaci?n
DF <- as.data.frame(mutate(DF, Name=sapply(Name, digest, c("md5"))))
str(DF)
#la variable a anonimizar es Name, que el identificador de la empresa que pide el pr?stamo
#escojo este m?todo por de los que hemos estudiados es el que el formato
#de anonimizaci?n es el m?s ?ptimo (aunque no el m?s seguro)
#adem?s, cuido que me sustituya completamente la variable a anonimizar
#puesto que en otro caso estar?a pseudoanonimizando
#data discovering
#quito duplicados
DF <- DF[!duplicated(DF), ] #no hay
DF
#empiezo por los dominios de las variables
#los identificadores no los tengo que analizar: columnas 1, 2, 3 y 6
#variables factores
tabla_frecuencias <- function(vector) {
aux <- as.data.frame(table(vector))
return(aux)
}
frec_State <- tabla_frecuencias(DF$State)
frec_Zip <- tabla_frecuencias(DF$Zip)
frec_BankState <- tabla_frecuencias(DF$BankState)
frec_NAICS <- tabla_frecuencias(DF$NAICS)
frec_NewExist <- tabla_frecuencias(DF$NewExist)
frec_FranchiseCode <- tabla_frecuencias(DF$FranchiseCode)
frec_UrbanRural <- tabla_frecuencias(DF$UrbanRural)
frec_RevLineCr <- tabla_frecuencias(DF$RevLineCr)
frec_LowDoc <- tabla_frecuencias(DF$LowDoc)
frec_Status <- tabla_frecuencias(DF$MIS_Status)
#las variables para las que no tengo mapeo, no puedo revisarlas
#corrijo las variables FranchiseCode y LowDoc xque no tienen
# bien informados algunos valores
#los que puedo cambiar, los cambios. Los que no, los elimino
DF$RevLineCr[DF$RevLineCr == "1"] <- "Y"
DF$RevLineCr[DF$RevLineCr == "0"] <- "N"
DF$LowDoc[DF$LowDoc == "1"] <- "Y"
DF$LowDoc[DF$LowDoc == "0"] <- "N"
DF$Zip[DF$Zip == "0"] <- ""
DF$NAICS[DF$NAICS == "0"] <- ""
DF <- DF[(DF$LowDoc=="Y" | DF$LowDoc=="N") & (DF$RevLineCr=="Y" | DF$RevLineCr=="N") & (DF$NewExist=="1" | DF$NewExist=="2") ,]
#variables continuas
numeric <- as.data.frame(select_if(DF, is.numeric))
desc_numeric <- as.data.frame(summary(numeric))
#auencias
ausencias <- as.data.frame(miss_var_summary(DF))
ausencias_casos <- miss_case_table(DF)
#la variable que m?s ausencias presenta es ChgOffDate, pero no la puedo
#eliminar xque son ausencias ficticias, son pr?stamos sin impago
#por ?ltimo, visualizo los outliers para las variables num?ricas
#salvo el a?o de la concesi?n del pr?stamo, que
#a pesar de ser integer, con analizar el rango es suficiente
boxplot(numeric[-c(1)], las = 2)
#como se ve claro que hay rango de valores distintos
#divido las visualizaciones
boxplot(numeric[c(1,2,3,4)], las = 2)
boxplot(numeric[c(5,7,8,9)], las = 2)
boxplot(numeric[c(14)], las = 2)
#el outlier que presenta la variable ChgOffPrinGr tiene
#sentido y se complementa con las ausencias de ChgOffDate
#xque solo hay un impago
#de lo dem?s, me llama la atenci?n una empresa en la variable
#GrAppv, pero la reviso es una empresa grande en general
#Ejercicio 4: imputaciones
#imputaciones valores ausentes variables num?ricas
DF <- kNN(DF, variable=colnames(DF[c(22,23,27,25,26)]), dist_var=colnames(DF[c(12,13,14,15,17,18,19)]))
miss_var_summary(DF[c(22,23,27,25,26)])
#Ejercicio 5: acotaciones
#acoto variables continuas
acotacion <- function(var) {
var <- ifelse(var<(mean(var)-3*sd(var)),(mean(var)-3*sd(var)),ifelse(var>(mean(var)+3*sd(var)), (mean(var)+3*sd(var)), var))
return(var)}
DF[,c(22,23,25,26,27)] <- apply (DF[,c(22,23,25,26,27)], 1, acotacion)
summary(DF[,c(22,23,25,26,27)])
#Ejercicio 6
#anonimizaci?n consulta
epsilon <- 0.1
max <- max(DF[c(23)])
min <- min(DF[c(23)])
n <- nrow(DF)
gs <- (max-min)/n
consulta <- DF %>%
group_by(Zip) %>%
summarize(mean(BalanceGross))
consulta_anonimizada <- cbind(consulta[,1], rdoublex(16503, consulta[,2], gs/epsilon))
#Ejercicio 7
#genero dummies
any_na(DF[,17])
levels(DF[,17])
#los missings los imputo a valores 0 que equivale a desconocido
DF$UrbanRural[is.na(DF[17])==TRUE] <- "0"
dummies <- dummy_cols(DF[,17])[-c(1,2)]
names(dummies) <- c("Urban", "Rural")
DF <- cbind(DF[-c(17)], dummies)
#solo hacen falta 2 variables dummies para no perder informaci?n
|
###Final Project Prototype
###Julia Bowling
###Data Visualization
library("ggplot2")
library("ggplot2")
library("tidyr")
library("dplyr")
###Deportations: http://www.dhs.gov/publication/yearbook-immigration-statistics-2013-enforcement-actions
###Imprisonment: http://www.bjs.gov/index.cfm?ty=nps
migrant <- read.csv("~/Desktop/juliacbowling.github.io/data/migrant.csv")
View(migrant)
head(migrant)
migrant$northborder <- migrant$BP.North+migrant$HS.North+migrant$ER.North
migrant$southborder <- migrant$BP.South+migrant$HS.South+migrant$ER.South
head(migrant)
plot1 <- ggplot(migrant, aes(x=Year)) +
geom_line(aes(y=PROGRAM.Total), color="#E63227") +
geom_line(aes(y=northborder), color="#0C5B15") +
geom_line(aes(y=southborder), color="#293BE5") + ggtitle("Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=18, hjust=0))
print(plot1)
ggsave("border.pdf", plot1)
plot2 <- ggplot(migrant, aes(x=Year)) +
geom_line(aes(y=Total), color="#E63227") +
geom_line(aes(y=North.America), color="#0C5B15") +
geom_line(aes(y=South.America), color="#293BE5") + ggtitle("Nationalities of Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=18, hjust=0))
print(plot2)
ggsave("nation.pdf", plot2)
migrant <- mutate(migrant, na_percent = (North.America/Total))
migrant <- mutate(migrant, sa_percent = (South.America/Total))
migrant <- mutate(migrant, af_percent = (Africa/Total))
migrant <- mutate(migrant, as_percent = (Asia/Total))
migrant <- mutate(migrant, eu_percent = (Europe/Total))
migrant <- mutate(migrant, oc_percent = (Oceania/Total))
migrant <- mutate(migrant, ukn_percent = (Unknown.1/Total))
migrant <- mutate(migrant, am_percent = ((North.America+South.America)/Total))
migrant2013 <- subset(migrant, Year==2013, drop=FALSE)
head(migrant2013)
plot3 <- ggplot(migrant2013, aes(x=Year), colorRamp()) +
geom_bar(stat="identity", aes(y=1), fill='grey') +
geom_bar(stat="identity", aes(y=na_percent), fill='red') + coord_flip() + ggtitle("Origins of Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot3)
###new??? plot3 below is wrong
chart_data <- select(migrant2013, Year, na_percent, sa_percent, af_percentas_percent, eu_percent, oc_percent, ukn_percent, am_percent)
chart_data <- gather(chart_data, na_percent, -Year)
plot3 <- ggplot(chart_data, aes(x=Year, y=na_percent)) + coord_flip() + geom_bar()
print(plot3)
library(dplyr)
continent <- select(migrant, Year, Total, North.America, South.America, Africa, Asia, Europe, Oceania)
contdeport <- gather(continent, name, deportations, North.America:Oceania)
topcountry <- select(migrant, Year, Total, Mexico, Guatemala, Honduras, El.Salvador)
topdeport <- gather(topcountry, country, deportations, Mexico:El.Salvador)
###na_percent == 0.9507957
ggsave("percent_northamerican.pdf", plot3, width = 8, height = 4)
plot4 <- ggplot(migrant2013, aes(x=Year), colorRamp()) +
geom_bar(stat="identity", aes(y=1), fill='grey') +
geom_bar(stat="identity", aes(y=am_percent), fill='red') + coord_flip() + ggtitle("Origins of Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot4)
ggsave("percent_american.pdf", plot4, width = 8, height = 4)
###am_percent == 0.9696022
plot6 <- ggplot(migrant, aes(x=Year)) +
geom_area(aes(y=Mexico), color="purple") +
geom_area(aes(y=Guatemala), color="green") +
geom_area(aes(y=Honduras), color="yellow") +
geom_area(aes(y=El.Salvador), color="blue") +
geom_area(aes(y=Ecuador), color="red") + ggtitle("Deportations: Top Countries of Origin, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot6)
ggsave("top_deporations.pdf", plot6)
plot7 <- ggplot(migrant, aes(x=Year)) +
geom_area(aes(y=Guatemala), color="green") +
geom_area(aes(y=Honduras), color="yellow") +
geom_area(aes(y=El.Salvador), color="blue") +
geom_area(aes(y=Ecuador), color="red") + ggtitle("Deportations: Top Rising Countries of Origin, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot7)
###alternative for plot 7
chart_data <- select(migrant, Year, Guatemala, Honduras, El.Salvador, Ecuador)
chart_data <- gather(chart_data, country, deported, -Year)
plot7 <- ggplot(chart_data, aes(x=Year, y=deported, group=country, fill = country)) + geom_area()
print(plot7)
ggsave("top_risingdeporations.pdf", plot7)
####how to facet wrap, fill color, stack, and gather
###NEW DEC 2
plot8 <- ggplot(migrant, aes(x=Year, y=Canada))+geom_line()+
ggtitle("U.S. Deportations to Canada, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot8)
ggsave("Canada.pdf", plot8)
plot9 <- ggplot(migrant, aes(x=Year, y=Mexico))+geom_line()+
ggtitle("U.S. Deportations to Mexico, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot9)
ggsave("Mexico.pdf", plot9)
plot10 <- ggplot(migrant, aes(x=Year, y=Guatemala))+geom_line()+
ggtitle("U.S. Deportations to Guatemala, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot10)
ggsave("Guatemala.pdf", plot10)
plot11 <- ggplot(migrant, aes(x=Year, y=Honduras))+geom_line()+
ggtitle("U.S. Deportations to Honduras, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot11)
ggsave("Honduras.pdf", plot11)
wrap_data <- gather(migrant, Year, Canada, Mexico, Guatemala, Honduras)
plot11 <- ggplot(wrap_data, aes(x=Year, y=deported, group=country, fill = country)) + geom_area()
plot12 <- ggplot(migrant, aes(country, deportations)) + geom_line() + facet_grid(country ~ .) +
ggtitle("U.S. Deportations 2004-2013") +
theme(plot.title = element_text(family="Trebuchet MS", face ="bold", size=20, hjust=0, color="#555555"))
migrant2013 <- t(migrant2013)
plot2013 <- ggplot(migrant2013, aes(x=, y=)) + geom_line() + facet_grid(country ~ .) +
ggtitle("U.S. Deportations 2004-2013") +
theme(plot.title = element_text(family="Trebuchet MS", face ="bold", size=20, hjust=0, color="#555555"))
removal_history <- read.csv("~/Desktop/juliacbowling.github.io/data/removal_history.csv")
presidents <- read.csv("~/Desktop/juliacbowling.github.io/data/presidents.csv")
View(presidents)
View(removal_history)
head(removal_history)
removal_history$Deportations <- removal_history$Removals + removal_history$Returns
plot5 <- ggplot() +
geom_line(data=removal_history, aes(y=Deportations, x=Year), color="red") +
geom_bar(data = presidents, aes(x = TO_YYYY, y = count)) +
ggtitle("Deportations 1892-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot5)
ggsave("dep1892.pdf", plot5)
r1950 <- subset(removal_history, Year>1939, drop = FALSE)
p1950 <- subset(presidents, TO_YYYY>1939, drop = FALSE)
i1977 <- subset(removal_history, Year>1977, drop = FALSE)
plot5 <- ggplot() +
geom_area(data=r1950, aes(y=Deportations, x=Year), fill="red") +
geom_area(data=i1977, aes(y=im_count, x=Year), fill="blue") +
geom_bar(data = p1950, aes(x = TO_YYYY, y = count), stat='identity', width = .1) +
ggtitle("Deportations 1940-2014") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot5)
ggsave("dep1940.pdf", plot5)
uspop <- select(Year, uspop)
removal1978 <- left_join(uspop, removal1978, "Year")
removal1978 <- subset(removal_history, Year>1977, drop = FALSE)
plot5 <- ggplot(removal1978, aes(x=Year)) +
geom_line(aes(y=Deportations), color="purple")+
geom_line(aes(y=im_count), color="blue") +
geom_line(aes(y=uspop), color="green") +
ggtitle("Imprisonment & Deportation 1977-2014") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot5)
ggsave("im_dep2.pdf", plot5)
######
uspop <- read.csv("~/Desktop/juliacbowling.github.io/data/uspop20thcent.csv")
uspop <- select(Year, uspop)
rates <- left_join(uspop, removal_history, "Year")
View(rates)
rates <- mutate(rates, drate = Deportations/uspop, irate = im_count/uspop)
######
######for 2013 maps
dep1 <- read.csv("~/Desktop/juliacbowling.github.io/data/dep1.csv")
popinc <- read.csv("~/Desktop/juliacbowling.github.io/data/popinc.csv")
left_join()
##map of cities involved
##map of states where deporations happen, what % where?
tracts <- readOGR(dsn = 'counties', layer = 'cb_2014_us_county_20m')
names(tracts)
tracts <- fortify(tracts, region='AFFGEOID')
head(tracts)
mapData <- left_join(tracts, languages, by=c('id' = 'Id'))
languages <- arrange(languages, -sp_percent) | /data/MigrantDec9.R | no_license | juliacbowling/juliacbowling.github.io | R | false | false | 8,890 | r | ###Final Project Prototype
###Julia Bowling
###Data Visualization
library("ggplot2")
library("ggplot2")
library("tidyr")
library("dplyr")
###Deportations: http://www.dhs.gov/publication/yearbook-immigration-statistics-2013-enforcement-actions
###Imprisonment: http://www.bjs.gov/index.cfm?ty=nps
migrant <- read.csv("~/Desktop/juliacbowling.github.io/data/migrant.csv")
View(migrant)
head(migrant)
migrant$northborder <- migrant$BP.North+migrant$HS.North+migrant$ER.North
migrant$southborder <- migrant$BP.South+migrant$HS.South+migrant$ER.South
head(migrant)
plot1 <- ggplot(migrant, aes(x=Year)) +
geom_line(aes(y=PROGRAM.Total), color="#E63227") +
geom_line(aes(y=northborder), color="#0C5B15") +
geom_line(aes(y=southborder), color="#293BE5") + ggtitle("Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=18, hjust=0))
print(plot1)
ggsave("border.pdf", plot1)
plot2 <- ggplot(migrant, aes(x=Year)) +
geom_line(aes(y=Total), color="#E63227") +
geom_line(aes(y=North.America), color="#0C5B15") +
geom_line(aes(y=South.America), color="#293BE5") + ggtitle("Nationalities of Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=18, hjust=0))
print(plot2)
ggsave("nation.pdf", plot2)
migrant <- mutate(migrant, na_percent = (North.America/Total))
migrant <- mutate(migrant, sa_percent = (South.America/Total))
migrant <- mutate(migrant, af_percent = (Africa/Total))
migrant <- mutate(migrant, as_percent = (Asia/Total))
migrant <- mutate(migrant, eu_percent = (Europe/Total))
migrant <- mutate(migrant, oc_percent = (Oceania/Total))
migrant <- mutate(migrant, ukn_percent = (Unknown.1/Total))
migrant <- mutate(migrant, am_percent = ((North.America+South.America)/Total))
migrant2013 <- subset(migrant, Year==2013, drop=FALSE)
head(migrant2013)
plot3 <- ggplot(migrant2013, aes(x=Year), colorRamp()) +
geom_bar(stat="identity", aes(y=1), fill='grey') +
geom_bar(stat="identity", aes(y=na_percent), fill='red') + coord_flip() + ggtitle("Origins of Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot3)
###new??? plot3 below is wrong
chart_data <- select(migrant2013, Year, na_percent, sa_percent, af_percentas_percent, eu_percent, oc_percent, ukn_percent, am_percent)
chart_data <- gather(chart_data, na_percent, -Year)
plot3 <- ggplot(chart_data, aes(x=Year, y=na_percent)) + coord_flip() + geom_bar()
print(plot3)
library(dplyr)
continent <- select(migrant, Year, Total, North.America, South.America, Africa, Asia, Europe, Oceania)
contdeport <- gather(continent, name, deportations, North.America:Oceania)
topcountry <- select(migrant, Year, Total, Mexico, Guatemala, Honduras, El.Salvador)
topdeport <- gather(topcountry, country, deportations, Mexico:El.Salvador)
###na_percent == 0.9507957
ggsave("percent_northamerican.pdf", plot3, width = 8, height = 4)
plot4 <- ggplot(migrant2013, aes(x=Year), colorRamp()) +
geom_bar(stat="identity", aes(y=1), fill='grey') +
geom_bar(stat="identity", aes(y=am_percent), fill='red') + coord_flip() + ggtitle("Origins of Undocumented People Deported 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot4)
ggsave("percent_american.pdf", plot4, width = 8, height = 4)
###am_percent == 0.9696022
plot6 <- ggplot(migrant, aes(x=Year)) +
geom_area(aes(y=Mexico), color="purple") +
geom_area(aes(y=Guatemala), color="green") +
geom_area(aes(y=Honduras), color="yellow") +
geom_area(aes(y=El.Salvador), color="blue") +
geom_area(aes(y=Ecuador), color="red") + ggtitle("Deportations: Top Countries of Origin, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot6)
ggsave("top_deporations.pdf", plot6)
plot7 <- ggplot(migrant, aes(x=Year)) +
geom_area(aes(y=Guatemala), color="green") +
geom_area(aes(y=Honduras), color="yellow") +
geom_area(aes(y=El.Salvador), color="blue") +
geom_area(aes(y=Ecuador), color="red") + ggtitle("Deportations: Top Rising Countries of Origin, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot7)
###alternative for plot 7
chart_data <- select(migrant, Year, Guatemala, Honduras, El.Salvador, Ecuador)
chart_data <- gather(chart_data, country, deported, -Year)
plot7 <- ggplot(chart_data, aes(x=Year, y=deported, group=country, fill = country)) + geom_area()
print(plot7)
ggsave("top_risingdeporations.pdf", plot7)
####how to facet wrap, fill color, stack, and gather
###NEW DEC 2
plot8 <- ggplot(migrant, aes(x=Year, y=Canada))+geom_line()+
ggtitle("U.S. Deportations to Canada, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot8)
ggsave("Canada.pdf", plot8)
plot9 <- ggplot(migrant, aes(x=Year, y=Mexico))+geom_line()+
ggtitle("U.S. Deportations to Mexico, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot9)
ggsave("Mexico.pdf", plot9)
plot10 <- ggplot(migrant, aes(x=Year, y=Guatemala))+geom_line()+
ggtitle("U.S. Deportations to Guatemala, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot10)
ggsave("Guatemala.pdf", plot10)
plot11 <- ggplot(migrant, aes(x=Year, y=Honduras))+geom_line()+
ggtitle("U.S. Deportations to Honduras, 2004-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot11)
ggsave("Honduras.pdf", plot11)
wrap_data <- gather(migrant, Year, Canada, Mexico, Guatemala, Honduras)
plot11 <- ggplot(wrap_data, aes(x=Year, y=deported, group=country, fill = country)) + geom_area()
plot12 <- ggplot(migrant, aes(country, deportations)) + geom_line() + facet_grid(country ~ .) +
ggtitle("U.S. Deportations 2004-2013") +
theme(plot.title = element_text(family="Trebuchet MS", face ="bold", size=20, hjust=0, color="#555555"))
migrant2013 <- t(migrant2013)
plot2013 <- ggplot(migrant2013, aes(x=, y=)) + geom_line() + facet_grid(country ~ .) +
ggtitle("U.S. Deportations 2004-2013") +
theme(plot.title = element_text(family="Trebuchet MS", face ="bold", size=20, hjust=0, color="#555555"))
removal_history <- read.csv("~/Desktop/juliacbowling.github.io/data/removal_history.csv")
presidents <- read.csv("~/Desktop/juliacbowling.github.io/data/presidents.csv")
View(presidents)
View(removal_history)
head(removal_history)
removal_history$Deportations <- removal_history$Removals + removal_history$Returns
plot5 <- ggplot() +
geom_line(data=removal_history, aes(y=Deportations, x=Year), color="red") +
geom_bar(data = presidents, aes(x = TO_YYYY, y = count)) +
ggtitle("Deportations 1892-2013") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot5)
ggsave("dep1892.pdf", plot5)
r1950 <- subset(removal_history, Year>1939, drop = FALSE)
p1950 <- subset(presidents, TO_YYYY>1939, drop = FALSE)
i1977 <- subset(removal_history, Year>1977, drop = FALSE)
plot5 <- ggplot() +
geom_area(data=r1950, aes(y=Deportations, x=Year), fill="red") +
geom_area(data=i1977, aes(y=im_count, x=Year), fill="blue") +
geom_bar(data = p1950, aes(x = TO_YYYY, y = count), stat='identity', width = .1) +
ggtitle("Deportations 1940-2014") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot5)
ggsave("dep1940.pdf", plot5)
uspop <- select(Year, uspop)
removal1978 <- left_join(uspop, removal1978, "Year")
removal1978 <- subset(removal_history, Year>1977, drop = FALSE)
plot5 <- ggplot(removal1978, aes(x=Year)) +
geom_line(aes(y=Deportations), color="purple")+
geom_line(aes(y=im_count), color="blue") +
geom_line(aes(y=uspop), color="green") +
ggtitle("Imprisonment & Deportation 1977-2014") +
theme(plot.title = element_text(family="Helvetica", face ="bold", size=16, hjust=0))
print(plot5)
ggsave("im_dep2.pdf", plot5)
######
uspop <- read.csv("~/Desktop/juliacbowling.github.io/data/uspop20thcent.csv")
uspop <- select(Year, uspop)
rates <- left_join(uspop, removal_history, "Year")
View(rates)
rates <- mutate(rates, drate = Deportations/uspop, irate = im_count/uspop)
######
######for 2013 maps
dep1 <- read.csv("~/Desktop/juliacbowling.github.io/data/dep1.csv")
popinc <- read.csv("~/Desktop/juliacbowling.github.io/data/popinc.csv")
left_join()
##map of cities involved
##map of states where deporations happen, what % where?
tracts <- readOGR(dsn = 'counties', layer = 'cb_2014_us_county_20m')
names(tracts)
tracts <- fortify(tracts, region='AFFGEOID')
head(tracts)
mapData <- left_join(tracts, languages, by=c('id' = 'Id'))
languages <- arrange(languages, -sp_percent) |
library(stranger)
### Name: strange
### Title: Computes anomaly metrics by invoking specific method(s) with
### associated sets of parameters
### Aliases: strange stranger
### ** Examples
## Not run:
##D library(stranger)
##D data(iris)
##D crazydata <- crazyfy(iris[,1:4])
##D curious <- strange(crazydata, weird="knn")
## End(Not run)
| /data/genthat_extracted_code/stranger/examples/stranger.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 347 | r | library(stranger)
### Name: strange
### Title: Computes anomaly metrics by invoking specific method(s) with
### associated sets of parameters
### Aliases: strange stranger
### ** Examples
## Not run:
##D library(stranger)
##D data(iris)
##D crazydata <- crazyfy(iris[,1:4])
##D curious <- strange(crazydata, weird="knn")
## End(Not run)
|
## Sensitivity Analysis for an Unobserved Confounder
#or using the vanderWeele and Arah 2011 formulas
#gamma<-E(Y|a,x,U=1) - E(Y|a,x,U=0)
#delta<-P(U=1|a1,x) - P(U=1|a0,x)
unobs_conf<-function(d,g){
bias<-g*d
corr.lbound<- -0.008054+bias
corr.lbound
}
unobs_conf_ad2<-function(d,g){
bias<-g*d
corr.lbound<- -0.006079+bias
corr.lbound
}
unobs_conf_ad3<-function(d,g){
bias<-g*d
corr.lbound<- -0.005249+ bias
corr.lbound
}
#ignoring the unobserved confounder, the effect size for cortrate is: -0.02469, -0.02376, -0.02196, If an unobserved confounder triples the effect size, 0.07716 may be reasonable for an upper bound of gamma
gamma<-seq(0,.18,0.005)
delta<-seq(.05,.25,0.05)
ngam<-length(gamma)
ndelt<-length(delta)
lisa<-NULL
sam<-array(numeric(ndelt*ngam), dim=c(ndelt, ngam))
for(i in 1:ndelt) {
for (j in 1:ngam) {
lisa<-unobs_conf(d=delta[i], g=gamma[j])
sam[i,j]<-lisa
}
}
lisa_ad2<-NULL
sam_ad2<-array(numeric(ndelt*ngam), dim=c(ndelt, ngam))
for(i in 1:ndelt) {
for (j in 1:ngam) {
lisa_ad2<-unobs_conf_ad2(d=delta[i], g=gamma[j])
sam_ad2[i,j]<-lisa_ad2
}
}
lisa_ad3<-NULL
sam_ad3<-array(numeric(ndelt*ngam), dim=c(ndelt, ngam))
for(i in 1:ndelt) {
for (j in 1:ngam) {
lisa_ad3<-unobs_conf_ad3(d=delta[i], g=gamma[j])
sam_ad3[i,j]<-lisa_ad3
}
}
xrange<-range(gamma)
yrange<-range(sam)
colors<-c("slategray1", "steelblue3", "steelblue4", "thistle", "violet")
plot(xrange, yrange, type="n",
xlab="E(Y|a,x,U=1) - E(Y|a,x,U=0)",
ylab="Corrected lower confidence bound")
for(i in 1:ndelt){
lines(gamma, sam[i,], type="l", lwd=2, col=colors[i])
}
abline(h=0)
legend("topleft", title="delta", as.character(delta),
fill=colors, bty="n", horiz=FALSE, ncol=2)
#with delta=0.1, gamma would only have to be >0.08, >0.06, >.05 to render the effect non-significant
#this is equivalent to saying that the presence of an unobserved confounder would have to change the average effect by more than 224%, 152%, 127% to make the
dat<-data.frame(rbind(sam, sam_ad2, sam_ad3))
dat<-data.frame( x=rep(gamma, 15), y=c(sam[1,], sam[2,], sam[3,], sam[4,], sam[5,], sam_ad2[1,], sam_ad2[2,], sam_ad2[3,], sam_ad2[4,], sam_ad2[5,],sam_ad3[1,], sam_ad3[2,], sam_ad3[3,], sam_ad3[4,], sam_ad3[5,]),
delta=factor(rep(c(.05,.1, .15,.2,.25), each=length(gamma))),
graph=factor(rep(c("Model 1", "Model 2", "Model 3"), each=length(delta)*length(gamma))))
#dat$excl<-as.factor(c(rep("primary analysis", 5), rep("current smokers excluded", 5), rep("current smokers, drug users excluded", 5)))
postscript("eFig_rgb.eps", paper="special", height=6, width=8, horizontal=FALSE, colormodel="rgb")
pdf("unobsc.pdf")
ggplot(data=dat, aes(x=x, y=y)) + facet_wrap(~graph, nrow=1) +
geom_line(aes(colour=delta, group=delta)) + geom_hline(y=0) +
labs(x="gamma", y="Corrected lower confidence bound", colour="") +
theme_bw(base_size=10) + scale_colour_discrete(name="delta") +
theme(axis.text.x=element_text(size=10)) +
theme(axis.text.y=element_text(size=10)) +
theme(strip.text.x=element_text(size=10)) +
opts(legend.position="bottom", legend.direction="horizontal") +
theme(legend.text=element_text(size=10)) +
theme(legend.title=element_text(size=10))
dev.off()
| /SensitivityAnalysis4.R | no_license | kararudolph/Aim2 | R | false | false | 3,186 | r | ## Sensitivity Analysis for an Unobserved Confounder
#or using the vanderWeele and Arah 2011 formulas
#gamma<-E(Y|a,x,U=1) - E(Y|a,x,U=0)
#delta<-P(U=1|a1,x) - P(U=1|a0,x)
unobs_conf<-function(d,g){
bias<-g*d
corr.lbound<- -0.008054+bias
corr.lbound
}
unobs_conf_ad2<-function(d,g){
bias<-g*d
corr.lbound<- -0.006079+bias
corr.lbound
}
unobs_conf_ad3<-function(d,g){
bias<-g*d
corr.lbound<- -0.005249+ bias
corr.lbound
}
#ignoring the unobserved confounder, the effect size for cortrate is: -0.02469, -0.02376, -0.02196, If an unobserved confounder triples the effect size, 0.07716 may be reasonable for an upper bound of gamma
gamma<-seq(0,.18,0.005)
delta<-seq(.05,.25,0.05)
ngam<-length(gamma)
ndelt<-length(delta)
lisa<-NULL
sam<-array(numeric(ndelt*ngam), dim=c(ndelt, ngam))
for(i in 1:ndelt) {
for (j in 1:ngam) {
lisa<-unobs_conf(d=delta[i], g=gamma[j])
sam[i,j]<-lisa
}
}
lisa_ad2<-NULL
sam_ad2<-array(numeric(ndelt*ngam), dim=c(ndelt, ngam))
for(i in 1:ndelt) {
for (j in 1:ngam) {
lisa_ad2<-unobs_conf_ad2(d=delta[i], g=gamma[j])
sam_ad2[i,j]<-lisa_ad2
}
}
lisa_ad3<-NULL
sam_ad3<-array(numeric(ndelt*ngam), dim=c(ndelt, ngam))
for(i in 1:ndelt) {
for (j in 1:ngam) {
lisa_ad3<-unobs_conf_ad3(d=delta[i], g=gamma[j])
sam_ad3[i,j]<-lisa_ad3
}
}
xrange<-range(gamma)
yrange<-range(sam)
colors<-c("slategray1", "steelblue3", "steelblue4", "thistle", "violet")
plot(xrange, yrange, type="n",
xlab="E(Y|a,x,U=1) - E(Y|a,x,U=0)",
ylab="Corrected lower confidence bound")
for(i in 1:ndelt){
lines(gamma, sam[i,], type="l", lwd=2, col=colors[i])
}
abline(h=0)
legend("topleft", title="delta", as.character(delta),
fill=colors, bty="n", horiz=FALSE, ncol=2)
#with delta=0.1, gamma would only have to be >0.08, >0.06, >.05 to render the effect non-significant
#this is equivalent to saying that the presence of an unobserved confounder would have to change the average effect by more than 224%, 152%, 127% to make the
dat<-data.frame(rbind(sam, sam_ad2, sam_ad3))
dat<-data.frame( x=rep(gamma, 15), y=c(sam[1,], sam[2,], sam[3,], sam[4,], sam[5,], sam_ad2[1,], sam_ad2[2,], sam_ad2[3,], sam_ad2[4,], sam_ad2[5,],sam_ad3[1,], sam_ad3[2,], sam_ad3[3,], sam_ad3[4,], sam_ad3[5,]),
delta=factor(rep(c(.05,.1, .15,.2,.25), each=length(gamma))),
graph=factor(rep(c("Model 1", "Model 2", "Model 3"), each=length(delta)*length(gamma))))
#dat$excl<-as.factor(c(rep("primary analysis", 5), rep("current smokers excluded", 5), rep("current smokers, drug users excluded", 5)))
postscript("eFig_rgb.eps", paper="special", height=6, width=8, horizontal=FALSE, colormodel="rgb")
pdf("unobsc.pdf")
ggplot(data=dat, aes(x=x, y=y)) + facet_wrap(~graph, nrow=1) +
geom_line(aes(colour=delta, group=delta)) + geom_hline(y=0) +
labs(x="gamma", y="Corrected lower confidence bound", colour="") +
theme_bw(base_size=10) + scale_colour_discrete(name="delta") +
theme(axis.text.x=element_text(size=10)) +
theme(axis.text.y=element_text(size=10)) +
theme(strip.text.x=element_text(size=10)) +
opts(legend.position="bottom", legend.direction="horizontal") +
theme(legend.text=element_text(size=10)) +
theme(legend.title=element_text(size=10))
dev.off()
|
library(BMTME)
### Define OBJFFUNC for Multitrait Fitness calculation
gbs_bme <- function(PHENO, MARKERS, OBJFUNC.ARGS, CROSSVAL, SEEDRNG, LMD) {
PHENO <- as.data.frame(PHENO)
MARKERS <- as.matrix(MARKERS)
A <- tcrossprod(MARKERS)/ncol(MARKERS)
LG <- cholesky(A)
ZG <- model.matrix(~0 + row.names(PHENO))
Z.G <- ZG %*% LG
Y <- as.matrix(PHENO)
#
pheno <- data.frame(GID = row.names(PHENO), Response = PHENO$yield)
CrossV <- CV.KFold(pheno, DataSetID = 'GID', K = CROSSVAL)
#
pm <- BME(Y = Y, Z1 = Z.G, nIter = OBJFUNC.ARGS$nIter, burnIn = OBJFUNC.ARGS$burnIn, thin = OBJFUNC.ARGS$thin, bs = OBJFUNC.ARGS$bs, testingSet = CrossV)
#
accuracy <- summary(pm)
return(accuracy$Pearson[which(as.vector(accuracy$Trait) == 'yield')]*(1 - LMD*ncol(MARKERS)))
} | /gbs_functions/gbs_bme.R | no_license | aho25/GS_BDE | R | false | false | 781 | r | library(BMTME)
### Define OBJFFUNC for Multitrait Fitness calculation
gbs_bme <- function(PHENO, MARKERS, OBJFUNC.ARGS, CROSSVAL, SEEDRNG, LMD) {
PHENO <- as.data.frame(PHENO)
MARKERS <- as.matrix(MARKERS)
A <- tcrossprod(MARKERS)/ncol(MARKERS)
LG <- cholesky(A)
ZG <- model.matrix(~0 + row.names(PHENO))
Z.G <- ZG %*% LG
Y <- as.matrix(PHENO)
#
pheno <- data.frame(GID = row.names(PHENO), Response = PHENO$yield)
CrossV <- CV.KFold(pheno, DataSetID = 'GID', K = CROSSVAL)
#
pm <- BME(Y = Y, Z1 = Z.G, nIter = OBJFUNC.ARGS$nIter, burnIn = OBJFUNC.ARGS$burnIn, thin = OBJFUNC.ARGS$thin, bs = OBJFUNC.ARGS$bs, testingSet = CrossV)
#
accuracy <- summary(pm)
return(accuracy$Pearson[which(as.vector(accuracy$Trait) == 'yield')]*(1 - LMD*ncol(MARKERS)))
} |
library(data.table)
library(dplyr)
library(caret)
library(e1071)
library(Rlof)
library(DMwR2)
#1)load data
fp <- file.path('C:/Users/research/Documents/RStudio/workspace/oneclass.test',
'twoclass.label.csv')
data <- fread(input = fp, showProgress = FALSE)
data$classLabel <- as.factor(data$classLabel)
data$threatLabel <- as.factor(data$threatLabel)
summary(data$classLabel)
#parameters to tune:
##kernel: svm kernel
# ascore.thresh.vec <- c(0.7,0.8,0.9)
kernel.vec <- c('linear','polynomial','radial')
FP.chunk.size.vec <- c(20,40,60,80,100)
set.seed(42)
getwd()
setwd('C:/Users/research/Documents/RStudio/workspace/oneclass.test')
source('ocsvm.update.FP.chunk.oversample.ver2.R')
perc.over <- 200
library(foreach)
library(doParallel)
#setup parallel backend to use many processors
cores <- detectCores()
cl <- makeCluster(cores[1]-1, outfile='check.txt',type='PSOCK') #not to overload your computer
registerDoParallel(cl)
#clusterExport(cl, 'data')
#clusterEvalQ(cl, library(rms))
i<-1
foreach(kernel= kernel.vec, .combine=cbind) %:%
foreach(FP.chunk.size= FP.chunk.size.vec, .combine='c') %dopar% {
print(i)
print(paste(kernel,FP.chunk.size,perc.over,sep=','))
ocsvm.update.FP.chunk.oversample(data,kernel,FP.chunk.size,perc.over)
i<-i+1
}
#stop cluster
stopCluster(cl)
# for(kernel in kernel.vec)
# for(FP.chunk.size in FP.chunk.size.vec)
# {
# print(paste(kernel,FP.chunk.size,perc.over,sep=','))
# ocsvm.update.FP.chunk.oversample(data,kernel,FP.chunk.size,perc.over)
# }
| /Chapter 5 E-base-OU/RStudio/ocsvm-OU/test.ocsvm.update.FP.chunk.oversample.ver2.R | no_license | DianaHaidar/PhD-Project | R | false | false | 1,564 | r | library(data.table)
library(dplyr)
library(caret)
library(e1071)
library(Rlof)
library(DMwR2)
#1)load data
fp <- file.path('C:/Users/research/Documents/RStudio/workspace/oneclass.test',
'twoclass.label.csv')
data <- fread(input = fp, showProgress = FALSE)
data$classLabel <- as.factor(data$classLabel)
data$threatLabel <- as.factor(data$threatLabel)
summary(data$classLabel)
#parameters to tune:
##kernel: svm kernel
# ascore.thresh.vec <- c(0.7,0.8,0.9)
kernel.vec <- c('linear','polynomial','radial')
FP.chunk.size.vec <- c(20,40,60,80,100)
set.seed(42)
getwd()
setwd('C:/Users/research/Documents/RStudio/workspace/oneclass.test')
source('ocsvm.update.FP.chunk.oversample.ver2.R')
perc.over <- 200
library(foreach)
library(doParallel)
#setup parallel backend to use many processors
cores <- detectCores()
cl <- makeCluster(cores[1]-1, outfile='check.txt',type='PSOCK') #not to overload your computer
registerDoParallel(cl)
#clusterExport(cl, 'data')
#clusterEvalQ(cl, library(rms))
i<-1
foreach(kernel= kernel.vec, .combine=cbind) %:%
foreach(FP.chunk.size= FP.chunk.size.vec, .combine='c') %dopar% {
print(i)
print(paste(kernel,FP.chunk.size,perc.over,sep=','))
ocsvm.update.FP.chunk.oversample(data,kernel,FP.chunk.size,perc.over)
i<-i+1
}
#stop cluster
stopCluster(cl)
# for(kernel in kernel.vec)
# for(FP.chunk.size in FP.chunk.size.vec)
# {
# print(paste(kernel,FP.chunk.size,perc.over,sep=','))
# ocsvm.update.FP.chunk.oversample(data,kernel,FP.chunk.size,perc.over)
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_barcode_table.R
\name{get_barcode_table}
\alias{get_barcode_table}
\title{Get Barcode Dataframe}
\usage{
get_barcode_table()
}
\value{
Returns a dataframe of barcode identifiers and reference barcodes
}
\description{
\code{get_barcode_table} reads in the fasta file that is included with this package.
}
\examples{
barcodes_df <- get_barcode_table()
}
| /man/get_barcode_table.Rd | no_license | Benji-Wagner/SequenceMapper | R | false | true | 434 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_barcode_table.R
\name{get_barcode_table}
\alias{get_barcode_table}
\title{Get Barcode Dataframe}
\usage{
get_barcode_table()
}
\value{
Returns a dataframe of barcode identifiers and reference barcodes
}
\description{
\code{get_barcode_table} reads in the fasta file that is included with this package.
}
\examples{
barcodes_df <- get_barcode_table()
}
|
library(tidyr)
library(maps)
library(dplyr)
library(ggplot2)
library(tools)
#clear workspace
rm(list = ls())
#read in data, trim and recode proper variables
df <- read.csv("data/cc-est2017-alldata-41.csv")
df_clean <- df %>%
filter(YEAR > 2) %>%
filter(YEAR < 10) %>%
mutate(YEAR = ifelse(YEAR == 3, 2010, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 4, 2011, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 5, 2012, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 6, 2013, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 7, 2014, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 8, 2015, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 9, 2016, YEAR)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 0, "Total", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 1, "0 to 4", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 2, "5 to 9", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 3, "10 to 14", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 4, "15 to 19", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 5, "20 to 24", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 6, "25 to 29", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 7, "30 to 34", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 8, "35 to 39", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 9, "40 to 44", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 10, "45 to 49", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 11, "50 to 54", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 12, "55 to 59", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 13, "60 to 64", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 14, "65 to 69", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 15, "70 to 74", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 16, "75 to 79", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 17, "80 to 84", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 18, "85 or older", AGEGRP))
#Calculate the median age by county and year
#using a cumulative sum to find when the
#running total tips past the midpoint, thus
#marking the median age bucket
oregon_median_age <- df_clean %>%
filter(AGEGRP != "Total") %>%
group_by(CTYNAME, YEAR) %>%
mutate(MEDIAN_COUNT = sum(TOT_POP)/2) %>%
group_by(CTYNAME, YEAR) %>%
mutate(RUNNING_TOTAL = cumsum(TOT_POP)) %>%
mutate(MEDIAN_GROUP = ifelse(RUNNING_TOTAL >= MEDIAN_COUNT, TRUE, FALSE)) %>%
group_by(CTYNAME, YEAR) %>%
mutate(MEDIAN = ifelse(MEDIAN_GROUP != lag(MEDIAN_GROUP), TRUE, FALSE)) %>%
filter(MEDIAN == TRUE) %>%
select(CTYNAME, AGEGRP, YEAR)
#join age data to spatial mapping data
oregon_map_data <- map_data("county") %>%
filter(region == "oregon") %>%
mutate(CTYNAME = paste(toTitleCase(subregion), "County"))
or_median_map <- left_join(oregon_map_data, oregon_median_age)
#map the data for 2010 and 2016
#quick visual to just spot check the data
#final maps were made in Carto
map2010 <- ggplot() +
geom_polygon(data = filter(or_median_map, YEAR ==2010),
mapping = aes(x = long, y = lat, group = group, fill = AGEGRP),
color = "white",
alpha = I(.9)) +
theme_bw()+
theme(panel.grid.major = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank()) +
scale_fill_brewer("Age Group") +
ggtitle("Median Age in Oregon Counties, 2010")
map2016 <- ggplot() +
geom_polygon(data = filter(or_median_map, YEAR ==2016),
mapping = aes(x = long, y = lat, group = group, fill = AGEGRP),
color = "white",
alpha = I(.9)) +
theme_bw()+
theme(panel.grid.major = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank()) +
scale_fill_brewer("Age Group") +
ggtitle("Median Age in Oregon Counties, 2016")
#spread and reshape the data frame so it
#is suited for mapping in Carto
oregon_median_age_for_carto <- oregon_median_age %>%
spread(YEAR, AGEGRP)
#code to write CSV
write.csv(oregon_median_age_for_carto, "Oregon_median_age_carto.csv")
| /Oregon Median Age.R | no_license | anniema15/oregon-census | R | false | false | 4,234 | r | library(tidyr)
library(maps)
library(dplyr)
library(ggplot2)
library(tools)
#clear workspace
rm(list = ls())
#read in data, trim and recode proper variables
df <- read.csv("data/cc-est2017-alldata-41.csv")
df_clean <- df %>%
filter(YEAR > 2) %>%
filter(YEAR < 10) %>%
mutate(YEAR = ifelse(YEAR == 3, 2010, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 4, 2011, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 5, 2012, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 6, 2013, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 7, 2014, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 8, 2015, YEAR)) %>%
mutate(YEAR = ifelse(YEAR == 9, 2016, YEAR)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 0, "Total", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 1, "0 to 4", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 2, "5 to 9", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 3, "10 to 14", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 4, "15 to 19", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 5, "20 to 24", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 6, "25 to 29", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 7, "30 to 34", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 8, "35 to 39", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 9, "40 to 44", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 10, "45 to 49", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 11, "50 to 54", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 12, "55 to 59", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 13, "60 to 64", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 14, "65 to 69", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 15, "70 to 74", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 16, "75 to 79", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 17, "80 to 84", AGEGRP)) %>%
mutate(AGEGRP = ifelse(AGEGRP == 18, "85 or older", AGEGRP))
#Calculate the median age by county and year
#using a cumulative sum to find when the
#running total tips past the midpoint, thus
#marking the median age bucket
oregon_median_age <- df_clean %>%
filter(AGEGRP != "Total") %>%
group_by(CTYNAME, YEAR) %>%
mutate(MEDIAN_COUNT = sum(TOT_POP)/2) %>%
group_by(CTYNAME, YEAR) %>%
mutate(RUNNING_TOTAL = cumsum(TOT_POP)) %>%
mutate(MEDIAN_GROUP = ifelse(RUNNING_TOTAL >= MEDIAN_COUNT, TRUE, FALSE)) %>%
group_by(CTYNAME, YEAR) %>%
mutate(MEDIAN = ifelse(MEDIAN_GROUP != lag(MEDIAN_GROUP), TRUE, FALSE)) %>%
filter(MEDIAN == TRUE) %>%
select(CTYNAME, AGEGRP, YEAR)
#join age data to spatial mapping data
oregon_map_data <- map_data("county") %>%
filter(region == "oregon") %>%
mutate(CTYNAME = paste(toTitleCase(subregion), "County"))
or_median_map <- left_join(oregon_map_data, oregon_median_age)
#map the data for 2010 and 2016
#quick visual to just spot check the data
#final maps were made in Carto
map2010 <- ggplot() +
geom_polygon(data = filter(or_median_map, YEAR ==2010),
mapping = aes(x = long, y = lat, group = group, fill = AGEGRP),
color = "white",
alpha = I(.9)) +
theme_bw()+
theme(panel.grid.major = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank()) +
scale_fill_brewer("Age Group") +
ggtitle("Median Age in Oregon Counties, 2010")
map2016 <- ggplot() +
geom_polygon(data = filter(or_median_map, YEAR ==2016),
mapping = aes(x = long, y = lat, group = group, fill = AGEGRP),
color = "white",
alpha = I(.9)) +
theme_bw()+
theme(panel.grid.major = element_blank(),
panel.border = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank()) +
scale_fill_brewer("Age Group") +
ggtitle("Median Age in Oregon Counties, 2016")
#spread and reshape the data frame so it
#is suited for mapping in Carto
oregon_median_age_for_carto <- oregon_median_age %>%
spread(YEAR, AGEGRP)
#code to write CSV
write.csv(oregon_median_age_for_carto, "Oregon_median_age_carto.csv")
|
# 23 Jul 2021
# function for plotting gene tracks only for Fig 2C
# VPS45 paper Fig 1D
# init
library(Gviz)
# data("cpgIslands")
library(rtracklayer)
library(BSgenome)
library(BSgenome.Hsapiens.UCSC.hg38)
library(TxDb.Hsapiens.UCSC.hg38.knownGene)
library(ensembldb)
library(org.Hs.eg.db)
library(grDevices)
library(gridExtra)
library(GenomicRanges)
library(readr)
library(readxl)
###
options(ucscChromosomeNames = F)
# function for plotting rs2027349 site
# revised from plot_anywhere
# Use OverlayTrack to combine data tracks
# load data
###
# list all files in /Fig2C (VPS45)
# import_file_list <-
# list.files(path = "Fig2C/",
# all.files = F,
# recursive = F)
VPS45_data_list <-
vector(mode = "list",
length = 3)
proximal_gene_list <-
scan(file = "Fig2C/proximal_genes.txt",
what = character(),
quote = "")
# sort and unique
proximal_gene_list <- unique(sort(proximal_gene_list))
gene_coordination_data <-
read_delim("Fig2C/VPS45-1-gene_table.txt",
delim = "\t", escape_double = FALSE,
trim_ws = TRUE)
gene_coordination_data <-
gene_coordination_data[, c(-(2:7))]
i <- 1
for (i in 1:3) {
VPS45_data_list[[i]] <-
read_excel("Fig2C/Fig_2C_data.xlsx",
sheet = i)
VPS45_data_list[[i]] <-
base::merge(x = VPS45_data_list[[i]],
y = gene_coordination_data,
by.x = "gene",
by.y = "Gene_Symbol")
## select only genes in the list
# VPS45_data_list[[i]] <-
# VPS45_data_list[[i]][VPS45_data_list[[i]]$Gene_Symbol %in%
# proximal_gene_list, ]
}
plot_gene_track <- function(chr, start, end,
SNP_position = 10000,
SNP_id = "SNP_id",
mcols = 100, strand = "+",
x_offset_1 = 0, x_offset_2 = 0,
title_name = "") {
# cell.type.selector <- 1L
cell_type <-
GRanges(seqnames = Rle(chr),
seqinfo = Seqinfo(seqnames = chr,
genome = "hg38"),
ranges = IRanges(start = start,
end = end,
names = chr),
mcols = as.data.frame(mcols),
strand = Rle(strand(strand)))
print(as.character(unique(seqnames(cell_type))))
iTrack <- IdeogramTrack(genome = genome(cell_type),
chromosome = as.character(unique(seqnames(cell_type))),
fontcolor = "black",
fontsize = 18)
gTrack <- GenomeAxisTrack(col = "black",
fontcolor = "black",
fontsize = 14,
scale = 0.1)
snpTrack <- AnnotationTrack(start = SNP_position, end = SNP_position,
chromosome = chr,
id = "rs2027349", shape = "box",
name = SNP_id,
strand = "*",
group = c(SNP_id),
fontcolor.group = "black",
fontcolor.item = "black",
fontsize = 16,
col = "black", col.title = "black",
just.group = "below",
showID = TRUE,
cex.group = 1,
groupAnnotation = "id")
### VPS45 Track
## construct Gviz instance
VPS45_plot <-
DataTrack(start = VPS45_data_list[[1]]$start,
end = VPS45_data_list[[1]]$stop,
chromosome = "chr1",
data = as.matrix(rbind(VPS45_data_list[[1]]$beta,
VPS45_data_list[[2]]$beta,
VPS45_data_list[[3]]$beta)),
genome = "hg38",
groups = c("VPS45-1-gRNA",
"VPS45-2-gRNA",
"VPS45-3-gRNA"),
type = c("p", "a"),
# col = "black",
col.title = "black",
col.axis = "black",
just.group = "below",
legend = T)
########### plotting
ucscGenes <- UcscTrack(genome = genome(cell_type),
table = "ncbiRefSeq",
track = 'NCBI RefSeq',
trackType = "GeneRegionTrack",
chromosome = as.character(unique(seqnames(cell_type))),
rstarts = "exonStarts",
rends = "exonEnds",
gene = "name",
symbol = 'name',
transcript = "name",
strand = "strand",
stacking = 'pack',
showID = T,
geneSymbol = T)
z <- ranges(ucscGenes)
mcols(z)$transcript <- as.vector(mapIds(org.Hs.eg.db,
gsub("\\.[1-9]$",
"",
mcols(z)$symbol),
"SYMBOL",
"REFSEQ"))
grTrack <- ucscGenes
ranges(grTrack) <- z
grTrack@dp@pars$col.line <- "black"
grTrack@dp@pars$fontcolor <- "black"
grTrack@name <- paste("RefSeq", "Gene", collapse = "\n")
grTrack@dp@pars$fontcolor.title <- "black"
grTrack@dp@pars$fontcolor.item <- "black"
grTrack@dp@pars$fontcolor.group <- "black"
grTrack@dp@pars$fontsize.group <- 18
######
# htTrack <- HighlightTrack(trackList = list(alTrack_CN, alTrack_NPC, alTrack_DN, alTrack_GA, alTrack_iPS),
# start = c(26212000, 26241000),
# width = c(15000, 2000)
# chromosome = as.character(unique(seqnames(cell_type))))
plotTracks( list(iTrack, gTrack,
VPS45_plot,
snpTrack,
grTrack),
sizes = c(0.5, 0.5, 2, 0.5, 4),
chromosome = cell_type@ranges@NAMES,
from = (cell_type@ranges@start - x_offset_1),
to = (cell_type@ranges@start + cell_type@ranges@width + x_offset_2),
transcriptAnnotation = "transcript",
collapseTranscripts = "transcript")#,
}
# plot_gene_track(chr = "chr1",
# start = 150067620,
# end = 150067621,
# SNP_position = 150067621,
# SNP_id = "rs2027349",
# x_offset_1 = 250000, x_offset_2 = 250000,
# title_name = "NGN2-Glut")
plot_gene_track(chr = "chr1",
start = 149850000,
end = 150550000,
SNP_position = 150067621,
SNP_id = "rs2027349",
# x_offset_1 = 250000, x_offset_2 = 250000,
title_name = "NGN2-Glut")
# 150067621
min(VPS45_data_list[[1]]$start) #149884459
max(VPS45_data_list[[1]]$stop) #150513789
| /Siwei_analysis/code_R/VPS45/code_plot_Fig_2C.R | no_license | endeneon/VPS45_repo | R | false | false | 7,213 | r | # 23 Jul 2021
# function for plotting gene tracks only for Fig 2C
# VPS45 paper Fig 1D
# init
library(Gviz)
# data("cpgIslands")
library(rtracklayer)
library(BSgenome)
library(BSgenome.Hsapiens.UCSC.hg38)
library(TxDb.Hsapiens.UCSC.hg38.knownGene)
library(ensembldb)
library(org.Hs.eg.db)
library(grDevices)
library(gridExtra)
library(GenomicRanges)
library(readr)
library(readxl)
###
options(ucscChromosomeNames = F)
# function for plotting rs2027349 site
# revised from plot_anywhere
# Use OverlayTrack to combine data tracks
# load data
###
# list all files in /Fig2C (VPS45)
# import_file_list <-
# list.files(path = "Fig2C/",
# all.files = F,
# recursive = F)
VPS45_data_list <-
vector(mode = "list",
length = 3)
proximal_gene_list <-
scan(file = "Fig2C/proximal_genes.txt",
what = character(),
quote = "")
# sort and unique
proximal_gene_list <- unique(sort(proximal_gene_list))
gene_coordination_data <-
read_delim("Fig2C/VPS45-1-gene_table.txt",
delim = "\t", escape_double = FALSE,
trim_ws = TRUE)
gene_coordination_data <-
gene_coordination_data[, c(-(2:7))]
i <- 1
for (i in 1:3) {
VPS45_data_list[[i]] <-
read_excel("Fig2C/Fig_2C_data.xlsx",
sheet = i)
VPS45_data_list[[i]] <-
base::merge(x = VPS45_data_list[[i]],
y = gene_coordination_data,
by.x = "gene",
by.y = "Gene_Symbol")
## select only genes in the list
# VPS45_data_list[[i]] <-
# VPS45_data_list[[i]][VPS45_data_list[[i]]$Gene_Symbol %in%
# proximal_gene_list, ]
}
plot_gene_track <- function(chr, start, end,
SNP_position = 10000,
SNP_id = "SNP_id",
mcols = 100, strand = "+",
x_offset_1 = 0, x_offset_2 = 0,
title_name = "") {
# cell.type.selector <- 1L
cell_type <-
GRanges(seqnames = Rle(chr),
seqinfo = Seqinfo(seqnames = chr,
genome = "hg38"),
ranges = IRanges(start = start,
end = end,
names = chr),
mcols = as.data.frame(mcols),
strand = Rle(strand(strand)))
print(as.character(unique(seqnames(cell_type))))
iTrack <- IdeogramTrack(genome = genome(cell_type),
chromosome = as.character(unique(seqnames(cell_type))),
fontcolor = "black",
fontsize = 18)
gTrack <- GenomeAxisTrack(col = "black",
fontcolor = "black",
fontsize = 14,
scale = 0.1)
snpTrack <- AnnotationTrack(start = SNP_position, end = SNP_position,
chromosome = chr,
id = "rs2027349", shape = "box",
name = SNP_id,
strand = "*",
group = c(SNP_id),
fontcolor.group = "black",
fontcolor.item = "black",
fontsize = 16,
col = "black", col.title = "black",
just.group = "below",
showID = TRUE,
cex.group = 1,
groupAnnotation = "id")
### VPS45 Track
## construct Gviz instance
VPS45_plot <-
DataTrack(start = VPS45_data_list[[1]]$start,
end = VPS45_data_list[[1]]$stop,
chromosome = "chr1",
data = as.matrix(rbind(VPS45_data_list[[1]]$beta,
VPS45_data_list[[2]]$beta,
VPS45_data_list[[3]]$beta)),
genome = "hg38",
groups = c("VPS45-1-gRNA",
"VPS45-2-gRNA",
"VPS45-3-gRNA"),
type = c("p", "a"),
# col = "black",
col.title = "black",
col.axis = "black",
just.group = "below",
legend = T)
########### plotting
ucscGenes <- UcscTrack(genome = genome(cell_type),
table = "ncbiRefSeq",
track = 'NCBI RefSeq',
trackType = "GeneRegionTrack",
chromosome = as.character(unique(seqnames(cell_type))),
rstarts = "exonStarts",
rends = "exonEnds",
gene = "name",
symbol = 'name',
transcript = "name",
strand = "strand",
stacking = 'pack',
showID = T,
geneSymbol = T)
z <- ranges(ucscGenes)
mcols(z)$transcript <- as.vector(mapIds(org.Hs.eg.db,
gsub("\\.[1-9]$",
"",
mcols(z)$symbol),
"SYMBOL",
"REFSEQ"))
grTrack <- ucscGenes
ranges(grTrack) <- z
grTrack@dp@pars$col.line <- "black"
grTrack@dp@pars$fontcolor <- "black"
grTrack@name <- paste("RefSeq", "Gene", collapse = "\n")
grTrack@dp@pars$fontcolor.title <- "black"
grTrack@dp@pars$fontcolor.item <- "black"
grTrack@dp@pars$fontcolor.group <- "black"
grTrack@dp@pars$fontsize.group <- 18
######
# htTrack <- HighlightTrack(trackList = list(alTrack_CN, alTrack_NPC, alTrack_DN, alTrack_GA, alTrack_iPS),
# start = c(26212000, 26241000),
# width = c(15000, 2000)
# chromosome = as.character(unique(seqnames(cell_type))))
plotTracks( list(iTrack, gTrack,
VPS45_plot,
snpTrack,
grTrack),
sizes = c(0.5, 0.5, 2, 0.5, 4),
chromosome = cell_type@ranges@NAMES,
from = (cell_type@ranges@start - x_offset_1),
to = (cell_type@ranges@start + cell_type@ranges@width + x_offset_2),
transcriptAnnotation = "transcript",
collapseTranscripts = "transcript")#,
}
# plot_gene_track(chr = "chr1",
# start = 150067620,
# end = 150067621,
# SNP_position = 150067621,
# SNP_id = "rs2027349",
# x_offset_1 = 250000, x_offset_2 = 250000,
# title_name = "NGN2-Glut")
plot_gene_track(chr = "chr1",
start = 149850000,
end = 150550000,
SNP_position = 150067621,
SNP_id = "rs2027349",
# x_offset_1 = 250000, x_offset_2 = 250000,
title_name = "NGN2-Glut")
# 150067621
min(VPS45_data_list[[1]]$start) #149884459
max(VPS45_data_list[[1]]$stop) #150513789
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attach.all.R
\name{attach.all}
\alias{attach.all}
\alias{detach.all}
\alias{attach.bugs}
\alias{detach.bugs}
\title{Attach / detach elements of (bugs) objects to search path}
\usage{
attach.all(x, overwrite = NA, name = "attach.all")
}
\arguments{
\item{x}{An object, which must be of class \code{bugs} for
\code{attach.bugs}.}
\item{overwrite}{If \code{TRUE}, objects with identical names in the
Workspace (.GlobalEnv) that are masking objects in the database to be
attached will be deleted. If \code{NA} (the default) and an interactive
session is running, a dialog box asks the user whether masking objects
should be deleted. In non-interactive mode, behaviour is identical to
\code{overwrite=FALSE}, i.e. nothing will be deleted.}
\item{name}{The name of the environment where \code{x} will be attached /
which will be detached.}
}
\value{
\code{attach.all} and \code{attach.bugs} invisibly return the
\code{\link{environment}}(s).
\code{detach.all} and \code{detach.bugs} detach the \code{environment}(s)
named \code{name} created by \code{attach.all}.
}
\description{
The database is attached/detached to the search path. See
\code{\link{attach}} for details.
}
\details{
While \code{attach.all} attaches all elements of an object \code{x} to a
database called \code{name}, \code{attach.bugs} attaches all elements of
\code{x$sims.list} to the database \code{bugs.sims} itself making use of
\code{attach.all}.
\code{detach.all} and \code{detach.bugs} are removing the databases
mentioned above.
\code{attach.all} also attaches \code{n.sims} (the number of simulations
saved from the MCMC runs) to the database.
Each scalar parameter in the model is attached as vectors of length
\code{n.sims}, each vector is attached as a 2-way array (with first
dimension equal to \code{n.sims}), each matrix is attached as a 3-way array,
and so forth.
}
\note{
Without detaching, do not use \code{attach.all} or \code{attach.bugs}
on another (\code{bugs}) object, because instead of the given name, an
object called \code{name} is attached. Therefore strange things may happen
\ldots{}
}
\examples{
# An example model file is given in:
model.file <- system.file('model', 'schools.txt', package='R2MultiBUGS')
# Some example data (see ?schools for details):
data(schools)
J <- nrow(schools)
y <- schools$estimate
sigma.y <- schools$sd
data <- list ('J', 'y', 'sigma.y')
inits <- function(){
list(theta = rnorm(J, 0, 100), mu.theta = rnorm(1, 0, 100),
sigma.theta = runif(1, 0, 100))
}
parameters <- c('theta', 'mu.theta', 'sigma.theta')
\dontrun{
## See ?bugs if the following fails:
schools.sim <- bugs(data, inits, parameters, model.file,
n.chains = 3, n.iter = 1000,
working.directory = NULL)
# Do some inferential summaries
attach.bugs(schools.sim)
# posterior probability that the coaching program in school A
# is better than in school C:
print(mean(theta[,1] > theta[,3]))
# 50\% posterior interval for the difference between school A's
# and school C's program:
print(quantile(theta[,1] - theta[,3], c(.25, .75)))
plot(theta[,1], theta[,3])
detach.bugs()
}
}
\seealso{
\code{\link{bugs}}, \code{\link{attach}}, \code{\link{detach}}
}
\keyword{data}
| /man/attach.all.Rd | no_license | MultiBUGS/R2MultiBUGS | R | false | true | 3,256 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attach.all.R
\name{attach.all}
\alias{attach.all}
\alias{detach.all}
\alias{attach.bugs}
\alias{detach.bugs}
\title{Attach / detach elements of (bugs) objects to search path}
\usage{
attach.all(x, overwrite = NA, name = "attach.all")
}
\arguments{
\item{x}{An object, which must be of class \code{bugs} for
\code{attach.bugs}.}
\item{overwrite}{If \code{TRUE}, objects with identical names in the
Workspace (.GlobalEnv) that are masking objects in the database to be
attached will be deleted. If \code{NA} (the default) and an interactive
session is running, a dialog box asks the user whether masking objects
should be deleted. In non-interactive mode, behaviour is identical to
\code{overwrite=FALSE}, i.e. nothing will be deleted.}
\item{name}{The name of the environment where \code{x} will be attached /
which will be detached.}
}
\value{
\code{attach.all} and \code{attach.bugs} invisibly return the
\code{\link{environment}}(s).
\code{detach.all} and \code{detach.bugs} detach the \code{environment}(s)
named \code{name} created by \code{attach.all}.
}
\description{
The database is attached/detached to the search path. See
\code{\link{attach}} for details.
}
\details{
While \code{attach.all} attaches all elements of an object \code{x} to a
database called \code{name}, \code{attach.bugs} attaches all elements of
\code{x$sims.list} to the database \code{bugs.sims} itself making use of
\code{attach.all}.
\code{detach.all} and \code{detach.bugs} are removing the databases
mentioned above.
\code{attach.all} also attaches \code{n.sims} (the number of simulations
saved from the MCMC runs) to the database.
Each scalar parameter in the model is attached as vectors of length
\code{n.sims}, each vector is attached as a 2-way array (with first
dimension equal to \code{n.sims}), each matrix is attached as a 3-way array,
and so forth.
}
\note{
Without detaching, do not use \code{attach.all} or \code{attach.bugs}
on another (\code{bugs}) object, because instead of the given name, an
object called \code{name} is attached. Therefore strange things may happen
\ldots{}
}
\examples{
# An example model file is given in:
model.file <- system.file('model', 'schools.txt', package='R2MultiBUGS')
# Some example data (see ?schools for details):
data(schools)
J <- nrow(schools)
y <- schools$estimate
sigma.y <- schools$sd
data <- list ('J', 'y', 'sigma.y')
inits <- function(){
list(theta = rnorm(J, 0, 100), mu.theta = rnorm(1, 0, 100),
sigma.theta = runif(1, 0, 100))
}
parameters <- c('theta', 'mu.theta', 'sigma.theta')
\dontrun{
## See ?bugs if the following fails:
schools.sim <- bugs(data, inits, parameters, model.file,
n.chains = 3, n.iter = 1000,
working.directory = NULL)
# Do some inferential summaries
attach.bugs(schools.sim)
# posterior probability that the coaching program in school A
# is better than in school C:
print(mean(theta[,1] > theta[,3]))
# 50\% posterior interval for the difference between school A's
# and school C's program:
print(quantile(theta[,1] - theta[,3], c(.25, .75)))
plot(theta[,1], theta[,3])
detach.bugs()
}
}
\seealso{
\code{\link{bugs}}, \code{\link{attach}}, \code{\link{detach}}
}
\keyword{data}
|
file.list <- list.files("results/")
output <- list()
for (i in file.list){
probe <- unlist(strsplit(unlist(strsplit(i, "_"))[3], "[.]"))[1]
print(probe)
load(paste0("results/", i))
output[[length(output)+1]] <- list(probe=probe, p.K.given.y=results.crp$p.K.given.y)
}
save(output, file="eqtl_full_results.RData")
| /eqtl/TIMBR_eqtl_collapse_results.R | no_license | wesleycrouse/TIMBR_data | R | false | false | 320 | r | file.list <- list.files("results/")
output <- list()
for (i in file.list){
probe <- unlist(strsplit(unlist(strsplit(i, "_"))[3], "[.]"))[1]
print(probe)
load(paste0("results/", i))
output[[length(output)+1]] <- list(probe=probe, p.K.given.y=results.crp$p.K.given.y)
}
save(output, file="eqtl_full_results.RData")
|
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(8)
system.time(
scantwo.perm.imp.2.8 <-
scantwo(LG.f2.after.crossover,pheno.col=3:4,method="hk",n.perm=10,n.cluster = 16)
)
names(scantwo.perm.imp.2.8) <- colnames(LG.f2.after.crossover$pheno)
sfStop()
# save output
save(scantwo.perm.imp.2.8, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.2.8.Rdata")
| /F2/scantwo/scantwo_perm_2.8.R | no_license | leejimmy93/KIAT_cabernet | R | false | false | 744 | r | library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(8)
system.time(
scantwo.perm.imp.2.8 <-
scantwo(LG.f2.after.crossover,pheno.col=3:4,method="hk",n.perm=10,n.cluster = 16)
)
names(scantwo.perm.imp.2.8) <- colnames(LG.f2.after.crossover$pheno)
sfStop()
# save output
save(scantwo.perm.imp.2.8, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.2.8.Rdata")
|
### Ch. 3 Exploring EML
##############################
# Step 0: load packages
##############################
library(devtools)
library(dataone)
library(datapack)
library(EML)
library(remotes)
library(XML)
library(arcticdatautils)
library(datamgmt)
##############################
# Step 1: Need to be in member node to explore file
##############################
cn_staging <- CNode('STAGING')
adc_test <- getMNode(cn_staging, 'urn:node:mnTestARCTIC')
##############################
# Step 2: read in and view doc for crude view of the EML file
##############################
doc <- read_eml(getObject(adc_test, "urn:uuid:558eabf1-1e91-4881-8ba3-ef8684d8f6a1"))
View(doc)
# explore further
doc$dataset # view data set element
doc$dataset$creator # view data set creator
# pressing tab now will bring up a list since creator is a series-type of object
#doc$dataset$creator[[1]]$
##############################
# Step 3: use the eml_get() function to explore EML
# takes any chunk of EML and returns all instances of the specified element (examples below)
# eml_get(doc, "entity")
##############################
doc <- read_eml(system.file("example-eml.xml", package = "arcticdatautils"))
eml_get(doc, "creator")
eml_get(doc, "boundingCoordinates")
eml_get(doc, "url")
##############################
# Step 3: or use the eml_get_simple() function as a simplified alternative
# eml_get_simple(doc$dataset$otherEntity, "entityName")
##############################
# Practice question: Which creators have a surName "Mecum"?
# example using which_in_eml():
n <- which_in_eml(doc$dataset$creator, "surName", "Mecum")
doc$dataset$creator[[n]] # answer
# example using combo of eml_get_simple() and which():
ent_names <- eml_get_simple(doc$dataset$creator, "surName")
i <- which(ent_names == "Mecum")
doc$dataset$creator[[i]] # answer
| /code/Ch3_Exploring_EML.R | no_license | samanthacsik/NCEAS-DataTeamTraining | R | false | false | 1,844 | r | ### Ch. 3 Exploring EML
##############################
# Step 0: load packages
##############################
library(devtools)
library(dataone)
library(datapack)
library(EML)
library(remotes)
library(XML)
library(arcticdatautils)
library(datamgmt)
##############################
# Step 1: Need to be in member node to explore file
##############################
cn_staging <- CNode('STAGING')
adc_test <- getMNode(cn_staging, 'urn:node:mnTestARCTIC')
##############################
# Step 2: read in and view doc for crude view of the EML file
##############################
doc <- read_eml(getObject(adc_test, "urn:uuid:558eabf1-1e91-4881-8ba3-ef8684d8f6a1"))
View(doc)
# explore further
doc$dataset # view data set element
doc$dataset$creator # view data set creator
# pressing tab now will bring up a list since creator is a series-type of object
#doc$dataset$creator[[1]]$
##############################
# Step 3: use the eml_get() function to explore EML
# takes any chunk of EML and returns all instances of the specified element (examples below)
# eml_get(doc, "entity")
##############################
doc <- read_eml(system.file("example-eml.xml", package = "arcticdatautils"))
eml_get(doc, "creator")
eml_get(doc, "boundingCoordinates")
eml_get(doc, "url")
##############################
# Step 3: or use the eml_get_simple() function as a simplified alternative
# eml_get_simple(doc$dataset$otherEntity, "entityName")
##############################
# Practice question: Which creators have a surName "Mecum"?
# example using which_in_eml():
n <- which_in_eml(doc$dataset$creator, "surName", "Mecum")
doc$dataset$creator[[n]] # answer
# example using combo of eml_get_simple() and which():
ent_names <- eml_get_simple(doc$dataset$creator, "surName")
i <- which(ent_names == "Mecum")
doc$dataset$creator[[i]] # answer
|
#' Make video from still image sequence
#'
#' Make a video file from a sequence of still images using FFmpeg.
#'
#' @details
#' \code{ffmpeg} is a wrapper function around the popular \href{https://www.ffmpeg.org/}{FFmpeg command line multimedia framework}.
#' It translates arguments provided by the user in familiar R syntax into a system call to the \code{ffmpeg} command line tool, which must be installed on the system.
#'
#' \code{ffmpeg} does not provide complete flexibility to allow making every possible valid call to FFmpeg,
#' but users who are that well versed in the use of FFmpeg can use the command line utility directly or pass their custom calls directly to \code{system} from within R.
#' The \code{ffmpeg} R function is primarily useful to users not well versed in the use of the FFmpeg multimedia framework who will be satisfied with the level of flexibility provided.
#' Since this function is provided in the context of the \code{mapmate} package, it is aimed at assisting with converting still image sequences to video.
#' While additional uses may be incorporated into \code{ffmpeg} in future, the FFmpeg multimedia framework itself provides a far broader suite of tools and functionality than is needed here.
#'
#' Keep in mind that the purpose of \code{mapmate} is not to generate animations directly from R. See packages like \code{animation} if that is more the goal.
#' The goal that \code{mapmate} attempts to fulfill is strictly that of animation pre-production and it does so by focusing on the generation of still image sequences.
#' Any animation is expected to be done later by the user via software dedicated to video editing and production.
#' \code{ffmpeg} is provided in \code{mapmate} as an exception to the rule for users who wish to trade the full control and flexibility over video editing and production
#' that \code{mapmate} aims to avoid entangling itself with for the convenience of generating relatively basic video output directly from an R session.
#'
#' Ultimately, if you want an incredibly fancy video, do not rely on \code{ffmpeg} to splice and merge and overlay and juxtapose all your layers together,
#' to crop and pan and rotate, to apply effects and transitions and every other form of video processing to your image sequences; finish the production outside of R, because that is what makes sense.
#' If you are an FFmpeg expert, you don't need to use \code{ffmpeg} at all (but perhaps consider helping to improve this code!).
#' If you are not an FFmpeg expert, use other video editing software.
#'
#' There always comes a point where it makes the most sense to transition from one application to another.
#' When external solutions exist, it does not make sense to port the solution to every problem into R.
#' Future package versions may provide more and more functionality and control over video production directly from R through \code{ffmpeg} or other functions,
#' but at this time this should not be a primary development goal for \code{mapmate}.
#'
#' \subsection{Input Files}{
#' A common way to specify a set of input image files when using FFmpeg directy is to provide something like \code{myimages\%04d.png},
#' which requires specifying the entire, non-changing file name with the only substitution being for the unique, order, consecutive integer file numbering component of the file name.
#' The pattern used indicates how may places are occupied by the file indices, which should be constant. In this example, \code{\%04d} represents the file numbering \code{0000, 0001, 0002, ..., 9999}.
#' If using Windows, you must use this approach. Any image sequences generated by \code{mapmate} will follow this kind of file naming convention.
#' If you want to make videos from image sequences not made by \code{mapmate}, they will also commonly follow this convention, but not always, in which case you will have to rename your files.
#'
#' An alternative and often convenient way to provide a general pattern for matching to a set of input files is with globbing. However, globbing is not available on Windows.
#' Linux users may find this additional option helpful in cases where file naming is not quite as described above or, for example, if there are multiple sequences of files in one directory.
#' If \code{glob=TRUE}, wildcards can be used in the \code{pattern} argument, e.g., \code{pattern="*png"}, \code{pattern="myimages*png"}, or \code{pattern="*images0*.png"}.
#' The default is \code{glob=FALSE} and \code{glob} is simply ignored on Windows.
#'
#' The current package version of \code{ffmpeg} allows merging more than two sequences without error,
#' but testing has not confirmed this is actually working correctly, as all layers do not always appear in the output video.
#' }
#'
#' \subsection{Merging multiple image sequences}{
#'
#' \emph{Merging is experimental and under development. It does not yet work as intended.}
#'
#' \code{pattern} may be a vector referring to multiple image sequences. This is for merging or blending layers into one output video file.
#' The first vector element refers to the top layer among image sequences.
#' All files do not need to be in the same directory; \code{dir} can be vectorized to match with \code{pattern} if sequences are in different locations.
#' Similarly, \code{rate}, \code{delay}, and \code{start} can be vectors. If nothing but \code{pattern} is a vector, the other arguments are duplicated.
#' Vectors should be of equal length.
#'
#' Merging capabilities are limited. An expert in the use of FFmpeg should use it directly and not via this wrapper function.
#' If merging sequences with this function, it is recommended they be the same number of frames, begin from the same starting frame, and proceed at the same frame rate, though this is not strictly required.
#' Also, merging only two sequences at a time is recommended or they may not all display.
#' Sequences must be very similar in a variety of respects. For example, images must be the same dimensions across sequences.
#' For greater control, use FFmpeg directly from the command line and consult official FFmpeg documentation, or help improve this wrapper function via Github issues and pull requests.
#'
#' Remember that \code{mapmate} generates still image sequences that are intended for later use in a dedicated video editing program, one with a GUI, unlike FFmpeg which is a command line application.
#' In such a program, it is assumed the user may be dropping multiple image sequences on different tracks of a project timeline, layering the tracks together,
#' and for this reason the default background png color is transparent.
#' In the default case, using \code{alpha} less than \code{1.0} is generally unnecessary when merging two image sequences into a video with FFmpeg.
#' If not using defaults, \code{alpha} may not provide the flexibility desired.
#' }
#'
#' \subsection{Framerates}{
#' For \code{rate}, non-integer numeric values are rounded. Character options may be a valid abbreviation such as \code{"ntsc"} or a quoted ratio such as \code{"30000/1001"}.
#' Note that this is the familiar "29.97" (or, 29.97003, to be exact) but FFmpeg does not accept values like these.
#' Using \code{delay} instead of \code{rate} is more limiting since \code{delay} is converted back to rate (\eqn{delay=1/rate}), but must then be rounded to an integer.
#' Using \code{rate} is recommended. Arbitrary, non-standard framerates may lead to rendered videos that do not play properly in many media players.
#' For common settings and character abbreviations, see \href{http://ffmpeg.org/ffmpeg-utils.html#Video-rate}{FFmpeg standard video rates}.
#'
#' \code{rate} technically refers to the assumed or intended framerate of the input image file sequence.
#' This is important to mention because of the distinction between input and output framerates in FFmpeg.
#' See the details below on \code{min.rate} and \code{fps.out} to understand the differences and how to avoid some common problems.
#' }
#'
#' \subsection{Output Scaling}{
#' If \code{size} is not set to \code{"source"}, the output video is scaled.
#' \code{size} can be a character string of dimensions in length by height format such as \code{"720x480"} or an abbreviated standard such as \code{"ntsc"}.
#' See \href{http://ffmpeg.org/ffmpeg-utils.html#Video-size}{FFmpeg standard video sizes} for common dimensions and available abbreviations.
#' }
#'
#' \subsection{Presets, Codecs, Pixel Formats, Lossless Encoding, and minimum framerates}{
#' Presets provide a certain encoding speed to compression ratio.
#' Available presets include \code{ultrafast}, \code{superfast}, \code{veryfast}, \code{faster}, \code{fast}, \code{medium}, \code{slow}, \code{slower}, \code{veryslow}.
#' Faster speed corresponds to greater file size. Slower speeds are due to greater compression.
#'
#' \code{codec} is ignored if the file name in \code{pattern} ends with \code{.gif}.
#' For other video output file types a default codec is used depending on the file extension in \code{pattern} when \code{codec="default"}.
#' These can be overridden with options like \code{codec="h264"}, \code{"libx264"}, \code{"libvpx"}, \code{"prores"}, \code{"qtrle"}, etc.,
#' but the user needs to be knowledgeable regarding which codecs can be used for which output types or errors will be thrown.
#'
#' \code{format} is ignored if the file name in \code{pattern} ends with \code{.gif}.
#' The default is \code{"yuv420p"}, which performs 4:2:0 chroma subsampling.
#' This pixel format can reduce video quality, but it is the default because it ensures compatibility with most media players, many of which still cannot play 4:4:4 video.
#' For valid alternatives, run \code{system("ffmpeg -pix_fmts")}.
#'
#' \code{lossless} is ignored except for relevant \code{codec} settings, e.g., \code{h264} or \code{libx264}.
#' If \code{TRUE}, recommended \code{preset} values are \code{ultrafast} or \code{veryslow}. See \code{https://trac.ffmpeg.org/wiki/Encode/H.264} for more information.
#'
#' \code{min.rate} applies only to non-\code{gif} video output. Video files typically have framerates of 25 fps or 30 fps or higher.
#' In the case of creating gifs from an image file sequence, low framerates on the order of 10 fps or lower, even 1 fps, are often desired.
#' If such a low framerate is desired for video file output, many media players may not be able to play, or play properly, such a video file.
#' For example, the popular VLC media player can have difficulties with playback of video files created with a framerate of less than 10 fps, particularly with rates closer to 1.
#'
#' \code{min.rate} sets a lower bound on the framerate of the output file.
#' The intended frame rate given by \code{rate} or derived from \code{delay}, of the input image file sequence specified in \code{pattern},
#' is still preserved in the output playback. However, if \code{rate} is less than \code{min.rate}, the output file will achieve \code{min.rate} fps by duplicating frames.
#' For example, if \code{rate=1} and \code{min.rate=10}, a sequence consisting of 60 images will be converted to a 10 fps video containing 600 frames and taking the intended 60 seconds to play.
#' The tradeoff for compatibility with various media players is increased video file size, but depending on the codec, should not increase file size linearly,
#' e.g., not likely a ten times increase for the given example.
#'
#' Nevertheless, control is given to the user over the video output fps lower bound via \code{min.rate}. Just know that too low a value can cause problems.
#' If \code{rate} is greater than \code{min.rate}, the output file framerate will match the specified \code{rate} of the input image sequence.
#' This also may not be desired if \code{rate} is an atypical number for video framerates.
#' This matching can be overridden by specifying \code{fps.out} as something other than \code{rate}.
#' }
#'
#' @param dir directory containing images, defaults to working directory.
#' @param pattern character, for matching a set of input image files. See details for acceptable and possible alternative patterns.
#' @param output character, output file name.
#' @param output_dir character, output directory. Defaults to working directory.
#' @param rate integer or character, intended framerate of input image sequence in Hertz (Hz) or frames per second (fps). See details.
#' @param delay numeric, time delay between frames in output video. Alternative to \code{rate}. See details.
#' @param start integer, frame to start from in input image sequence. Defaults to \code{start=1}.
#' @param size character, the dimensions of the video output. Defaults to \code{"source"}, which is equal to the dimensions of the input files. Otherwise scaling is performed on the output. See details.
#' @param preset character, encoding presets available in FFmpeg. Defaults to \code{ultrafast}. See details.
#' @param codec character, the video codec used. See details.
#' @param format character, the pixel format. See details.
#' @param lossless logical, use lossless H.264 encoding if applicable. Defaults to \code{FALSE}. See details.
#' Set to zero if your image sequence has file names beginning from zero or a higher number if you want to skip frames.
#' @param min.rate integer, the minimum frame rate for non-\code{gif} video output (\code{mp4}, \code{mov}, \code{mkv}, \code{webm}). Defaults to \code{10}. See details.
#' @param fps.out integer or character, framerate of output video. This can be given in the same ways as \code{rate}. Defaults to \code{rate}. See details.
#' @param alpha numeric, from 0 to 1.0. Only applicable when \code{pattern} is vectorized, referring to layering of multiple image sequences.
#' Defaults to \code{1.0} (non-transparent) since \code{mapmate} produces transparent-background png sequences by default with subsequent layering in mind.
#' @param overwrite logical, overwrite existing output file.
#' @param glob logical, defaults to \code{FALSE}. Globbing is not available on Windows. Linux users, see details on how \code{glob} affects \code{pattern}.
#' @param details logical, whether to show FFmpeg output on the R console.
#'
#' @return returns the system call to FFmpeg as a character string.
#' @export
#'
#' @examples
#' \dontrun{
#' data(borders)
#' library(dplyr)
#' n <- 90
#' borders <- map(1:n, ~mutate(borders, id = .x)) %>% bind_rows()
#' args <- list(width=300, height=300, res=300, bg="black")
#' save_seq(borders, id="id", n.frames=n, col="white",
#' type="maplines", file="images", png.args=args)
#' ffmpeg(pattern="images_%04d.png", output="video.mp4", rate=10)}
ffmpeg <- function(dir=".", pattern, output, output_dir=".", rate="ntsc", delay=1, start=1, size="source",
preset="ultrafast", codec="default", format="yuv420p", lossless=FALSE, min.rate=10, fps.out=rate, alpha=1.0, overwrite=FALSE, glob=FALSE, details=FALSE){
if (!missing(rate) && !missing(delay)) stop("specify 'rate' or 'delay' but not both")
if(!missing(delay)) rate <- round(1/delay)
# input files
linux <- .Platform$OS.type=="linux"
iglob <- "-pattern_type glob -i "
input <- file.path(dir, pattern)
blend <- if(length(input)==1) FALSE else TRUE
input <- if(linux & glob) paste0(iglob, "\"", input, "\"") else paste("-i", input)
inrate <- paste("-framerate", rate)
start <- paste("-start_number", start)
input <- paste0(paste(start, inrate, input), collapse=" ")
#output files
ext <- strsplit(output, "\\.")[[1]]
ext_stop <- "'output' must end in '.mp4', '.mov', '.mkv', '.webm', or '.gif'"
if(length(ext)==1) stop(ext_stop) else ext <- utils::tail(ext, 1)
if(!ext %in% c("mp4", "mov", "mkv", "webm", "gif")) stop(ext_stop)
output <- file.path(output_dir, output)
# video filter chain
format <- paste0("format=", format)
if(size == "source"){
size <- ""
} else if(ext != "gif"){
size <- paste0(",scale=", size, ",setsar=1:1")
} else size <- paste("-s", size)
if(blend){
blend_prefix <- "-filter_complex \"blend=all_mode='overlay':all_opacity="
if(ext=="gif"){
vf <- paste0(blend_prefix, alpha, "\"")
} else {
vf <- paste0(blend_prefix, alpha, ",", format, size, "\"")
}
} else if(ext=="gif"){
vf <- size
} else vf <- paste0("-vf ", "\"", format, size, "\"")
output <- paste(vf, output)
outrate <- paste("-r", max(fps.out, min.rate))
output <- paste(outrate, output, ifelse(overwrite, "-y", "-n"))
#video codec
if(ext=="gif"){
vc <- " "
} else {
if(codec=="default") codec <- switch(ext, mp4="libx264", mov="libx264", mkv="libx264", webm="libvpx")
vc <- paste0(" -c:v ", codec, " -preset ", preset, " ")
if(lossless & codec %in% c("h264", "libx264")) vc <- paste0(vc, "-qp 0 ")
}
x <- gsub(" ", " ", paste0("ffmpeg ", input, vc, output))
if(details) system(x) else system(x, show.output.on.console=FALSE)
x
}
| /R/ffmpeg.R | no_license | cassljx/mapmate | R | false | false | 16,918 | r | #' Make video from still image sequence
#'
#' Make a video file from a sequence of still images using FFmpeg.
#'
#' @details
#' \code{ffmpeg} is a wrapper function around the popular \href{https://www.ffmpeg.org/}{FFmpeg command line multimedia framework}.
#' It translates arguments provided by the user in familiar R syntax into a system call to the \code{ffmpeg} command line tool, which must be installed on the system.
#'
#' \code{ffmpeg} does not provide complete flexibility to allow making every possible valid call to FFmpeg,
#' but users who are that well versed in the use of FFmpeg can use the command line utility directly or pass their custom calls directly to \code{system} from within R.
#' The \code{ffmpeg} R function is primarily useful to users not well versed in the use of the FFmpeg multimedia framework who will be satisfied with the level of flexibility provided.
#' Since this function is provided in the context of the \code{mapmate} package, it is aimed at assisting with converting still image sequences to video.
#' While additional uses may be incorporated into \code{ffmpeg} in future, the FFmpeg multimedia framework itself provides a far broader suite of tools and functionality than is needed here.
#'
#' Keep in mind that the purpose of \code{mapmate} is not to generate animations directly from R. See packages like \code{animation} if that is more the goal.
#' The goal that \code{mapmate} attempts to fulfill is strictly that of animation pre-production and it does so by focusing on the generation of still image sequences.
#' Any animation is expected to be done later by the user via software dedicated to video editing and production.
#' \code{ffmpeg} is provided in \code{mapmate} as an exception to the rule for users who wish to trade the full control and flexibility over video editing and production
#' that \code{mapmate} aims to avoid entangling itself with for the convenience of generating relatively basic video output directly from an R session.
#'
#' Ultimately, if you want an incredibly fancy video, do not rely on \code{ffmpeg} to splice and merge and overlay and juxtapose all your layers together,
#' to crop and pan and rotate, to apply effects and transitions and every other form of video processing to your image sequences; finish the production outside of R, because that is what makes sense.
#' If you are an FFmpeg expert, you don't need to use \code{ffmpeg} at all (but perhaps consider helping to improve this code!).
#' If you are not an FFmpeg expert, use other video editing software.
#'
#' There always comes a point where it makes the most sense to transition from one application to another.
#' When external solutions exist, it does not make sense to port the solution to every problem into R.
#' Future package versions may provide more and more functionality and control over video production directly from R through \code{ffmpeg} or other functions,
#' but at this time this should not be a primary development goal for \code{mapmate}.
#'
#' \subsection{Input Files}{
#' A common way to specify a set of input image files when using FFmpeg directy is to provide something like \code{myimages\%04d.png},
#' which requires specifying the entire, non-changing file name with the only substitution being for the unique, order, consecutive integer file numbering component of the file name.
#' The pattern used indicates how may places are occupied by the file indices, which should be constant. In this example, \code{\%04d} represents the file numbering \code{0000, 0001, 0002, ..., 9999}.
#' If using Windows, you must use this approach. Any image sequences generated by \code{mapmate} will follow this kind of file naming convention.
#' If you want to make videos from image sequences not made by \code{mapmate}, they will also commonly follow this convention, but not always, in which case you will have to rename your files.
#'
#' An alternative and often convenient way to provide a general pattern for matching to a set of input files is with globbing. However, globbing is not available on Windows.
#' Linux users may find this additional option helpful in cases where file naming is not quite as described above or, for example, if there are multiple sequences of files in one directory.
#' If \code{glob=TRUE}, wildcards can be used in the \code{pattern} argument, e.g., \code{pattern="*png"}, \code{pattern="myimages*png"}, or \code{pattern="*images0*.png"}.
#' The default is \code{glob=FALSE} and \code{glob} is simply ignored on Windows.
#'
#' The current package version of \code{ffmpeg} allows merging more than two sequences without error,
#' but testing has not confirmed this is actually working correctly, as all layers do not always appear in the output video.
#' }
#'
#' \subsection{Merging multiple image sequences}{
#'
#' \emph{Merging is experimental and under development. It does not yet work as intended.}
#'
#' \code{pattern} may be a vector referring to multiple image sequences. This is for merging or blending layers into one output video file.
#' The first vector element refers to the top layer among image sequences.
#' All files do not need to be in the same directory; \code{dir} can be vectorized to match with \code{pattern} if sequences are in different locations.
#' Similarly, \code{rate}, \code{delay}, and \code{start} can be vectors. If nothing but \code{pattern} is a vector, the other arguments are duplicated.
#' Vectors should be of equal length.
#'
#' Merging capabilities are limited. An expert in the use of FFmpeg should use it directly and not via this wrapper function.
#' If merging sequences with this function, it is recommended they be the same number of frames, begin from the same starting frame, and proceed at the same frame rate, though this is not strictly required.
#' Also, merging only two sequences at a time is recommended or they may not all display.
#' Sequences must be very similar in a variety of respects. For example, images must be the same dimensions across sequences.
#' For greater control, use FFmpeg directly from the command line and consult official FFmpeg documentation, or help improve this wrapper function via Github issues and pull requests.
#'
#' Remember that \code{mapmate} generates still image sequences that are intended for later use in a dedicated video editing program, one with a GUI, unlike FFmpeg which is a command line application.
#' In such a program, it is assumed the user may be dropping multiple image sequences on different tracks of a project timeline, layering the tracks together,
#' and for this reason the default background png color is transparent.
#' In the default case, using \code{alpha} less than \code{1.0} is generally unnecessary when merging two image sequences into a video with FFmpeg.
#' If not using defaults, \code{alpha} may not provide the flexibility desired.
#' }
#'
#' \subsection{Framerates}{
#' For \code{rate}, non-integer numeric values are rounded. Character options may be a valid abbreviation such as \code{"ntsc"} or a quoted ratio such as \code{"30000/1001"}.
#' Note that this is the familiar "29.97" (or, 29.97003, to be exact) but FFmpeg does not accept values like these.
#' Using \code{delay} instead of \code{rate} is more limiting since \code{delay} is converted back to rate (\eqn{delay=1/rate}), but must then be rounded to an integer.
#' Using \code{rate} is recommended. Arbitrary, non-standard framerates may lead to rendered videos that do not play properly in many media players.
#' For common settings and character abbreviations, see \href{http://ffmpeg.org/ffmpeg-utils.html#Video-rate}{FFmpeg standard video rates}.
#'
#' \code{rate} technically refers to the assumed or intended framerate of the input image file sequence.
#' This is important to mention because of the distinction between input and output framerates in FFmpeg.
#' See the details below on \code{min.rate} and \code{fps.out} to understand the differences and how to avoid some common problems.
#' }
#'
#' \subsection{Output Scaling}{
#' If \code{size} is not set to \code{"source"}, the output video is scaled.
#' \code{size} can be a character string of dimensions in length by height format such as \code{"720x480"} or an abbreviated standard such as \code{"ntsc"}.
#' See \href{http://ffmpeg.org/ffmpeg-utils.html#Video-size}{FFmpeg standard video sizes} for common dimensions and available abbreviations.
#' }
#'
#' \subsection{Presets, Codecs, Pixel Formats, Lossless Encoding, and minimum framerates}{
#' Presets provide a certain encoding speed to compression ratio.
#' Available presets include \code{ultrafast}, \code{superfast}, \code{veryfast}, \code{faster}, \code{fast}, \code{medium}, \code{slow}, \code{slower}, \code{veryslow}.
#' Faster speed corresponds to greater file size. Slower speeds are due to greater compression.
#'
#' \code{codec} is ignored if the file name in \code{pattern} ends with \code{.gif}.
#' For other video output file types a default codec is used depending on the file extension in \code{pattern} when \code{codec="default"}.
#' These can be overridden with options like \code{codec="h264"}, \code{"libx264"}, \code{"libvpx"}, \code{"prores"}, \code{"qtrle"}, etc.,
#' but the user needs to be knowledgeable regarding which codecs can be used for which output types or errors will be thrown.
#'
#' \code{format} is ignored if the file name in \code{pattern} ends with \code{.gif}.
#' The default is \code{"yuv420p"}, which performs 4:2:0 chroma subsampling.
#' This pixel format can reduce video quality, but it is the default because it ensures compatibility with most media players, many of which still cannot play 4:4:4 video.
#' For valid alternatives, run \code{system("ffmpeg -pix_fmts")}.
#'
#' \code{lossless} is ignored except for relevant \code{codec} settings, e.g., \code{h264} or \code{libx264}.
#' If \code{TRUE}, recommended \code{preset} values are \code{ultrafast} or \code{veryslow}. See \code{https://trac.ffmpeg.org/wiki/Encode/H.264} for more information.
#'
#' \code{min.rate} applies only to non-\code{gif} video output. Video files typically have framerates of 25 fps or 30 fps or higher.
#' In the case of creating gifs from an image file sequence, low framerates on the order of 10 fps or lower, even 1 fps, are often desired.
#' If such a low framerate is desired for video file output, many media players may not be able to play, or play properly, such a video file.
#' For example, the popular VLC media player can have difficulties with playback of video files created with a framerate of less than 10 fps, particularly with rates closer to 1.
#'
#' \code{min.rate} sets a lower bound on the framerate of the output file.
#' The intended frame rate given by \code{rate} or derived from \code{delay}, of the input image file sequence specified in \code{pattern},
#' is still preserved in the output playback. However, if \code{rate} is less than \code{min.rate}, the output file will achieve \code{min.rate} fps by duplicating frames.
#' For example, if \code{rate=1} and \code{min.rate=10}, a sequence consisting of 60 images will be converted to a 10 fps video containing 600 frames and taking the intended 60 seconds to play.
#' The tradeoff for compatibility with various media players is increased video file size, but depending on the codec, should not increase file size linearly,
#' e.g., not likely a ten times increase for the given example.
#'
#' Nevertheless, control is given to the user over the video output fps lower bound via \code{min.rate}. Just know that too low a value can cause problems.
#' If \code{rate} is greater than \code{min.rate}, the output file framerate will match the specified \code{rate} of the input image sequence.
#' This also may not be desired if \code{rate} is an atypical number for video framerates.
#' This matching can be overridden by specifying \code{fps.out} as something other than \code{rate}.
#' }
#'
#' @param dir directory containing images, defaults to working directory.
#' @param pattern character, for matching a set of input image files. See details for acceptable and possible alternative patterns.
#' @param output character, output file name.
#' @param output_dir character, output directory. Defaults to working directory.
#' @param rate integer or character, intended framerate of input image sequence in Hertz (Hz) or frames per second (fps). See details.
#' @param delay numeric, time delay between frames in output video. Alternative to \code{rate}. See details.
#' @param start integer, frame to start from in input image sequence. Defaults to \code{start=1}.
#' @param size character, the dimensions of the video output. Defaults to \code{"source"}, which is equal to the dimensions of the input files. Otherwise scaling is performed on the output. See details.
#' @param preset character, encoding presets available in FFmpeg. Defaults to \code{ultrafast}. See details.
#' @param codec character, the video codec used. See details.
#' @param format character, the pixel format. See details.
#' @param lossless logical, use lossless H.264 encoding if applicable. Defaults to \code{FALSE}. See details.
#' Set to zero if your image sequence has file names beginning from zero or a higher number if you want to skip frames.
#' @param min.rate integer, the minimum frame rate for non-\code{gif} video output (\code{mp4}, \code{mov}, \code{mkv}, \code{webm}). Defaults to \code{10}. See details.
#' @param fps.out integer or character, framerate of output video. This can be given in the same ways as \code{rate}. Defaults to \code{rate}. See details.
#' @param alpha numeric, from 0 to 1.0. Only applicable when \code{pattern} is vectorized, referring to layering of multiple image sequences.
#' Defaults to \code{1.0} (non-transparent) since \code{mapmate} produces transparent-background png sequences by default with subsequent layering in mind.
#' @param overwrite logical, overwrite existing output file.
#' @param glob logical, defaults to \code{FALSE}. Globbing is not available on Windows. Linux users, see details on how \code{glob} affects \code{pattern}.
#' @param details logical, whether to show FFmpeg output on the R console.
#'
#' @return returns the system call to FFmpeg as a character string.
#' @export
#'
#' @examples
#' \dontrun{
#' data(borders)
#' library(dplyr)
#' n <- 90
#' borders <- map(1:n, ~mutate(borders, id = .x)) %>% bind_rows()
#' args <- list(width=300, height=300, res=300, bg="black")
#' save_seq(borders, id="id", n.frames=n, col="white",
#' type="maplines", file="images", png.args=args)
#' ffmpeg(pattern="images_%04d.png", output="video.mp4", rate=10)}
ffmpeg <- function(dir=".", pattern, output, output_dir=".", rate="ntsc", delay=1, start=1, size="source",
preset="ultrafast", codec="default", format="yuv420p", lossless=FALSE, min.rate=10, fps.out=rate, alpha=1.0, overwrite=FALSE, glob=FALSE, details=FALSE){
if (!missing(rate) && !missing(delay)) stop("specify 'rate' or 'delay' but not both")
if(!missing(delay)) rate <- round(1/delay)
# input files
linux <- .Platform$OS.type=="linux"
iglob <- "-pattern_type glob -i "
input <- file.path(dir, pattern)
blend <- if(length(input)==1) FALSE else TRUE
input <- if(linux & glob) paste0(iglob, "\"", input, "\"") else paste("-i", input)
inrate <- paste("-framerate", rate)
start <- paste("-start_number", start)
input <- paste0(paste(start, inrate, input), collapse=" ")
#output files
ext <- strsplit(output, "\\.")[[1]]
ext_stop <- "'output' must end in '.mp4', '.mov', '.mkv', '.webm', or '.gif'"
if(length(ext)==1) stop(ext_stop) else ext <- utils::tail(ext, 1)
if(!ext %in% c("mp4", "mov", "mkv", "webm", "gif")) stop(ext_stop)
output <- file.path(output_dir, output)
# video filter chain
format <- paste0("format=", format)
if(size == "source"){
size <- ""
} else if(ext != "gif"){
size <- paste0(",scale=", size, ",setsar=1:1")
} else size <- paste("-s", size)
if(blend){
blend_prefix <- "-filter_complex \"blend=all_mode='overlay':all_opacity="
if(ext=="gif"){
vf <- paste0(blend_prefix, alpha, "\"")
} else {
vf <- paste0(blend_prefix, alpha, ",", format, size, "\"")
}
} else if(ext=="gif"){
vf <- size
} else vf <- paste0("-vf ", "\"", format, size, "\"")
output <- paste(vf, output)
outrate <- paste("-r", max(fps.out, min.rate))
output <- paste(outrate, output, ifelse(overwrite, "-y", "-n"))
#video codec
if(ext=="gif"){
vc <- " "
} else {
if(codec=="default") codec <- switch(ext, mp4="libx264", mov="libx264", mkv="libx264", webm="libvpx")
vc <- paste0(" -c:v ", codec, " -preset ", preset, " ")
if(lossless & codec %in% c("h264", "libx264")) vc <- paste0(vc, "-qp 0 ")
}
x <- gsub(" ", " ", paste0("ffmpeg ", input, vc, output))
if(details) system(x) else system(x, show.output.on.console=FALSE)
x
}
|
context("Epw Class")
eplusr_option(verbose_info = FALSE)
# IDD {{{
test_that("IDD", {
expect_is(idd <- EpwIdd$new(system.file("extdata/epw.idd", package = "eplusr")), "Idd")
expect_output(idd$print())
})
# }}}
# META {{{
test_that("Meta info", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- read_epw(path_epw), "Epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
# can update the path after saved
expect_equal(epw$path(), normalizePath(path_epw))
# can get definition
expect_is(epw$definition("LOCATION"), "IddObject")
})
# }}}
# HEADER {{{
test_that("Header getter and setter", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
# $location() {{{
expect_equal(
epw$location(city = "Chongqing", state_province = "Chongqing", country = "China",
data_source = "TMY", wmo_number = "724944", latitude = 20.0,
longitude = -120.0, time_zone = 8L, elevation = 100
),
list(city = "Chongqing",
state_province = "Chongqing",
country = "China",
data_source = "TMY",
wmo_number = "724944",
latitude = 20.0,
longitude = -120.0,
time_zone = 8L,
elevation = 100
)
)
expect_equal(epw$location(city = "chongqing")$city, "chongqing")
expect_error(epw$location(city = 1))
# }}}
# $design_condition() {{{
expect_is(epw$design_condition(), "list")
expect_equal(names(epw$design_condition()), c("source", "heating", "cooling", "extremes"))
# }}}
# $typical_extreme_period() {{{
expect_is(epw$typical_extreme_period(), "data.table")
expect_equal(names(epw$typical_extreme_period()), c("index", "name", "type", "start_day", "end_day"))
expect_equal(nrow(epw$typical_extreme_period()), 6L)
# }}}
# $ground_temperature {{{
expect_is(epw$ground_temperature(), "data.table")
expect_equal(names(epw$ground_temperature()), c(
"index", "depth",
"soil_conductivity", "soil_density", "soil_specific_heat",
"January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December"
))
expect_equal(nrow(epw$ground_temperature()), 3L)
# }}}
# $ground_temperature {{{
expect_is(epw$ground_temperature(), "data.table")
expect_equal(names(epw$ground_temperature()), c(
"index", "depth",
"soil_conductivity", "soil_density", "soil_specific_heat",
"January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December"
))
expect_equal(nrow(epw$ground_temperature()), 3L)
# }}}
# $holiday {{{
expect_silent(epw <- Epw$new(path_epw))
expect_is(epw$holiday(), "list")
expect_equal(names(epw$holiday()), c("leapyear", "dst", "holiday"))
# leapyear
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_equal(epw$holiday()$leapyear, FALSE)
expect_error(epw$holiday(TRUE), class = "eplusr_error_epw_header")
# change to leapyear
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_warning(d <- epw$data(1, start_year = 2016, align_wday = FALSE))
feb29 <- d[month == 2 & day == 28][, day := 29L]
d <- rbindlist(list(d, feb29))[order(month, day)]
get_priv_env(epw)$idf_env()$value[object_id == 5, value_chr := {value_chr[1] <- "Yes";value_chr}]
epw$.__enclos_env__$private$m_data <- d
expect_true(epw$holiday()$leapyear)
expect_error(epw$holiday(FALSE))
# dst
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_equal(epw$holiday(dst = c(1, 2))$dst, epw_date(1:2))
expect_equal(epw$holiday(dst = c(as.Date("2008-01-01"), as.Date("2008-02-01")))$dst, epw_date(c("Jan 01", "Feb 01")))
# holiday
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(hol <- epw$holiday(holiday = list(name = "New Year", day = "Jan 01")), "list")
expect_equal(hol$holiday,
data.table(index = 1L, name = "New Year", day = epw_date("1/1"))
)
# can restore the original data
expect_error(epw$holiday(holiday = list(name = "New Year", day = "Jan 41")))
expect_is(hol <- epw$holiday(), "list")
expect_equal(hol$holiday,
data.table(index = 1L, name = "New Year", day = epw_date("1/1"))
)
# }}}
# $comment() {{{
expect_is(epw$comment1(), "character")
expect_equal(epw$comment1("comment1"), "comment1")
expect_equal(epw$comment1(), "comment1")
expect_is(epw$comment2(), "character")
expect_equal(epw$comment2("comment2"), "comment2")
expect_equal(epw$comment2(), "comment2")
expect_null(epw$comment2(""))
expect_null(epw$comment2())
expect_null(epw$comment1(NULL))
expect_null(epw$comment1())
# }}}
# $num_period {{{
expect_equal(epw$num_period(), 1L)
# }}}
# $interval {{{
expect_equal(epw$interval(), 1L)
# }}}
# $period {{{
expect_is(epw$period(), "data.table")
expect_is(epw$period(1), "data.table")
expect_error(epw$period(2), class = "eplusr_error_epw_data_period_index")
expect_equal(epw$period(1, name = "test")$name, "test")
expect_error(epw$period(1, start_day_of_week = "test"), class = "eplusr_error_validity_check")
expect_equal(epw$period(1, start_day_of_week = 3)$start_day_of_week, "Wednesday")
expect_equal(epw$period(1, start_day_of_week = "Wed")$start_day_of_week, "Wednesday")
expect_error(epw$period(1, start_day_of_week = "NoDay"))
expect_equal(epw$period(1)$start_day_of_week, "Wednesday")
# }}}
})
# }}}
# CONSTANTS {{{
test_that("Constant data", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
expect_is(epw$missing_code(), "list")
expect_equal(length(epw$missing_code()), 29L)
expect_is(epw$initial_missing_value(), "list")
expect_equal(length(epw$initial_missing_value()), 14L)
expect_is(epw$range_exist(), "list")
expect_equal(length(epw$range_exist()), 28L)
expect_is(epw$range_valid(), "list")
expect_equal(length(epw$range_valid()), 28L)
expect_is(epw$fill_action(), "list")
expect_equal(names(epw$fill_action()), c("use_previous", "use_zero"))
})
# }}}
# SAVE {{{
test_that("$save() & $is_unsaved()", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d_ori <- epw$data(), "data.table")
# $is_unsaved() {{{
expect_false(epw$is_unsaved())
# }}}
# $save() {{{
expect_error(epw$save(".idf"))
unlink(file.path(tempdir(), "test_save.epw"), force = TRUE)
expect_is(epw$save(file.path(tempdir(), "test_save.epw")), "character")
expect_error(epw$save(file.path(tempdir(), "test_save.epw")), class = "eplusr_error")
expect_is(epw$save(overwrite = TRUE), "character")
expect_is(epw1 <- Epw$new(file.path(tempdir(), "test_save.epw")), "Epw")
expect_equal(epw1$data(), d_ori)
# }}}
})
# }}}
# DATA GETTER {{{
test_that("Data Getter", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
# $data() {{{
# can get weather data
expect_error(epw$data(2), class = "eplusr_error_epw_data_period_index")
expect_equal(ncol(epw$data()), 36L)
expect_equal(nrow(epw$data()), 8760L)
# can use the origial datetime
expect_equal(year(epw$data(align_wday = FALSE)$datetime[8760]), 1998)
# can change year in datetime column
expect_equal(
epw$data(start_year = 2018, tz = "GMT")$datetime,
seq(as.POSIXct("2018-01-01 01:00:00", tz = "GMT"),
as.POSIXct("2019-01-01 00:00:00", tz = "GMT"),
by = "1 hour"
)
)
# can change the year column
expect_equal(epw$data(start_year = 2018, update = TRUE)$year, c(rep(2018L, times = 8759), 2019L))
# can detect if leap year mismatch found
expect_warning(epw$data(start_year = 2016))
expect_warning(epw$data(start_year = 2016, align_wday = FALSE))
# can change the time zone of datetime column in the returned weather data
expect_error(attr(epw$data(tz = "America/Chicago")$datetime, "tzone"), class = "eplusr_error_epw_data")
expect_equal(attr(epw$data(start_year = 2019, tz = "Etc/GMT+8")$datetime, "tzone"), "Etc/GMT+8")
# }}}
# $abnormal_data() {{{
expect_equal(nrow(epw$abnormal_data()), 8760)
expect_equal(nrow(epw$abnormal_data(type = "missing")), 8760)
expect_equal(nrow(epw$abnormal_data(type = "out_of_range")), 0L)
expect_true("line" %in% names(epw$abnormal_data()))
expect_equal(ncol(epw$abnormal_data()), 37L)
expect_equal(ncol(epw$abnormal_data(keep_all = FALSE)), 12L)
expect_equal(nrow(epw$abnormal_data(cols = "albedo")), 2160L)
expect_equal(ncol(epw$abnormal_data(cols = "albedo", keep_all = FALSE)), 8L)
expect_equal(nrow(epw$abnormal_data(cols = "albedo", type = "out_of_range")), 0L)
# }}}
# $redudant_data() {{{
expect_equal(nrow(epw$redundant_data()), 0L)
# }}}
})
# }}}
# DATA TAGGER {{{
test_that("Data Tagger", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
# $make_na() {{{
expect_true({
epw$make_na(missing = TRUE, out_of_range = TRUE)
all(is.na(epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo))
})
expect_message(with_verbose(epw$make_na(missing = TRUE, out_of_range = TRUE)), "already")
# }}}
# $fill_abnormal() {{{
expect_equal(
{
epw$fill_abnormal(missing = TRUE, out_of_range = TRUE, special = TRUE)
epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo
}, rep(999, 2160)
)
expect_message(with_verbose(epw$fill_abnormal(missing = TRUE, out_of_range = TRUE)), "already")
# }}}
# $add_unit() & $drop_unit() {{{
expect_is(class = "units",
{
epw$add_unit()
rad <- epw$data()$direct_normal_radiation
}
)
expect_true(all(units(rad)$numerator %in% c("W", "h")))
expect_equal(units(rad)$denominator, c("m", "m"))
expect_message(with_verbose(epw$add_unit()), "already")
expect_is(epw$drop_unit()$data()$dry_bulb_temperature, "numeric")
expect_message(with_verbose(epw$drop_unit()), "already")
# }}}
# $purge() {{{
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$purge(), "Epw")
epw$.__enclos_env__$private$m_data <- rbindlist(list(get_priv_env(epw)$m_data, epw$data()))
expect_message(with_verbose(epw$purge()))
# }}}
})
# }}}
# DATA SETTER {{{
test_that("Data Setter", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- read_epw(path_epw), "Epw")
# $set() {{{
expect_is(d <- epw$data(), "data.table")
expect_output(with_verbose(epw$set(d, realyear = TRUE)))
expect_equal(epw$period(),
data.table(index = 1L, name = "Data", start_day_of_week = "Sunday",
start_day = epw_date("2017/1/1"), end_day = epw_date("2017/12/31")
)
)
expect_warning(epw$set(d, realyear = TRUE, start_day_of_week = "Monday"))
expect_warning(epw$set(d, realyear = TRUE))
expect_equal(epw$period()$start_day_of_week, "Sunday")
expect_is(epw$set(d[1:48]), "Epw")
expect_equal(epw$period(),
data.table(index = 1L, name = "Data", start_day_of_week = "Sunday",
start_day = epw_date("1/1", F), end_day = epw_date("1/2", F)
)
)
expect_equal(nrow(epw$data()), 48L)
# can remove extra columns
set(d, NULL, "extra_column", 1)
expect_is(epw$set(d), "Epw")
expect_equal(ncol(epw$data()), 36)
expect_error({
epw <- read_epw(path_epw)
suppressWarnings(d <- epw$data(start_year = 2020))
epw$set(d, TRUE)
})
expect_error({
epw <- read_epw(path_epw)
suppressWarnings(d <- epw$data(start_year = 2020))
d[100L, datetime := lubridate::as_datetime("2020-01-01 00:00:00")]
epw$set(d)
})
# }}}
# $add() {{{
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_error(epw$add(epw$data()), class = "eplusr_error_parse_epw")
# after 0L
expect_output(with_verbose(epw$add(epw$data(start_year = 2017), realyear = TRUE)))
expect_equal(epw$period()$name, c("Data1", "Data"))
expect_equal(lubridate::year(epw$data(1, align_wday = FALSE)$datetime[1]), 2017)
expect_equal(get_priv_env(epw)$m_log$matched,
data.table(index = 1:2, row = c(1L, 8761L), num = rep(8760L, 2))
)
# after N
expect_warning(d <- epw$data(start_year = 2014, align_wday = FALSE))
expect_is(epw$add(d, after = 10, realyear = TRUE), "Epw")
expect_equal(epw$period()$name, c("Data1", "Data", "Data2"))
expect_equal(lubridate::year(epw$data(3, align_wday = FALSE)$datetime[1]), 2014)
expect_equal(get_priv_env(epw)$m_log$matched,
data.table(index = 1:3, row = c(1L, 8761L, 17521L), num = rep(8760L, 3))
)
# between
expect_warning(d <- epw$data(1, start_year = 2013, align_wday = FALSE))
expect_is(epw$add(d, after = 2, realyear = TRUE), "Epw")
expect_equal(lubridate::year(epw$data(3, align_wday = FALSE)$datetime[1]), 2013)
expect_equal(get_priv_env(epw)$m_log$matched,
data.table(index = 1:4, row = c(1L, 8761L, 17521L, 26281L), num = rep(8760L, 4))
)
# unit + no unit
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$add_unit(), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(u <- units(epw$data()$liquid_precip_rate)$numerator)
expect_equal(u, "h")
# unit + unit
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$add_unit(), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(u <- units(epw$data()$liquid_precip_rate)$numerator)
expect_equal(u, "h")
# no unit + unit
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$add_unit(), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$drop_unit(), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(u <- epw$data()$liquid_precip_rate)
expect_is(u, "numeric")
# no na + na
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$make_na(TRUE, TRUE), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$fill_abnormal(TRUE, TRUE), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_true(all(!is.na(epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo)))
# na + no na
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$make_na(TRUE, TRUE), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_true(all(is.na(epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo)))
# can remove extra columns
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
set(d, NULL, "extra_column", 1)
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(d <- epw$data())
expect_equal(ncol(d), 36)
# }}}
# $del() {{{
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_error(epw$del())
expect_error(epw$del(1))
expect_is(epw$add(epw$data(start_year = 2017), realyear = TRUE), "Epw")
expect_message(with_verbose(epw$del(1)))
# }}}
})
# }}}
# CLONE {{{
test_that("$clone()", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw1 <- Epw$new(path_epw), "Epw")
epw2 <- epw1$clone()
epw2$period(1, name = "Data2")
expect_equal(epw1$period()$name, "Data")
expect_equal(epw2$period()$name, "Data2")
})
# }}}
# PRINT {{{
test_that("$print()", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
# $print() {{{
expect_output(epw$print())
# }}}
})
# }}}
# S3 FORMAT {{{
test_that("str.Epw & format.Epw", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
expect_output(str(epw))
expect_is(format(epw), "character")
})
# }}}
# S3 EQUALITY {{{
test_that("==.Epw & !=.Epw", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
expect_true(epw == epw)
expect_false(epw == Epw$new(path_epw))
expect_false(epw == 1)
expect_false(epw != epw)
expect_true(epw != Epw$new(path_epw))
})
# }}}
# DOWNLOAD_WEATHER {{{
test_that("download_weather()", {
skip_on_cran()
# download weather
expect_message({path_epw <- with_verbose(
download_weather("USA_CA_San.Francisco.Intl.AP.724940_TMY3", ask = FALSE, type = "epw", dir = tempdir()))}
)
expect_message({path_epw <- with_verbose(
download_weather("USA_CA_San.Francisco.Intl.AP.724940_TMY3", ask = FALSE, type = "all", dir = tempdir()))}
)
expect_message({path_epw <- with_verbose(
download_weather("USA_CA_San.Francisco.Intl.AP.724940_TMY3", ask = FALSE, type = "ddy", dir = tempdir()))}
)
})
# }}}
| /tests/testthat/test-epw.R | permissive | thongle3103/eplusr | R | false | false | 18,989 | r | context("Epw Class")
eplusr_option(verbose_info = FALSE)
# IDD {{{
test_that("IDD", {
expect_is(idd <- EpwIdd$new(system.file("extdata/epw.idd", package = "eplusr")), "Idd")
expect_output(idd$print())
})
# }}}
# META {{{
test_that("Meta info", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- read_epw(path_epw), "Epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
# can update the path after saved
expect_equal(epw$path(), normalizePath(path_epw))
# can get definition
expect_is(epw$definition("LOCATION"), "IddObject")
})
# }}}
# HEADER {{{
test_that("Header getter and setter", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
# $location() {{{
expect_equal(
epw$location(city = "Chongqing", state_province = "Chongqing", country = "China",
data_source = "TMY", wmo_number = "724944", latitude = 20.0,
longitude = -120.0, time_zone = 8L, elevation = 100
),
list(city = "Chongqing",
state_province = "Chongqing",
country = "China",
data_source = "TMY",
wmo_number = "724944",
latitude = 20.0,
longitude = -120.0,
time_zone = 8L,
elevation = 100
)
)
expect_equal(epw$location(city = "chongqing")$city, "chongqing")
expect_error(epw$location(city = 1))
# }}}
# $design_condition() {{{
expect_is(epw$design_condition(), "list")
expect_equal(names(epw$design_condition()), c("source", "heating", "cooling", "extremes"))
# }}}
# $typical_extreme_period() {{{
expect_is(epw$typical_extreme_period(), "data.table")
expect_equal(names(epw$typical_extreme_period()), c("index", "name", "type", "start_day", "end_day"))
expect_equal(nrow(epw$typical_extreme_period()), 6L)
# }}}
# $ground_temperature {{{
expect_is(epw$ground_temperature(), "data.table")
expect_equal(names(epw$ground_temperature()), c(
"index", "depth",
"soil_conductivity", "soil_density", "soil_specific_heat",
"January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December"
))
expect_equal(nrow(epw$ground_temperature()), 3L)
# }}}
# $ground_temperature {{{
expect_is(epw$ground_temperature(), "data.table")
expect_equal(names(epw$ground_temperature()), c(
"index", "depth",
"soil_conductivity", "soil_density", "soil_specific_heat",
"January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December"
))
expect_equal(nrow(epw$ground_temperature()), 3L)
# }}}
# $holiday {{{
expect_silent(epw <- Epw$new(path_epw))
expect_is(epw$holiday(), "list")
expect_equal(names(epw$holiday()), c("leapyear", "dst", "holiday"))
# leapyear
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_equal(epw$holiday()$leapyear, FALSE)
expect_error(epw$holiday(TRUE), class = "eplusr_error_epw_header")
# change to leapyear
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_warning(d <- epw$data(1, start_year = 2016, align_wday = FALSE))
feb29 <- d[month == 2 & day == 28][, day := 29L]
d <- rbindlist(list(d, feb29))[order(month, day)]
get_priv_env(epw)$idf_env()$value[object_id == 5, value_chr := {value_chr[1] <- "Yes";value_chr}]
epw$.__enclos_env__$private$m_data <- d
expect_true(epw$holiday()$leapyear)
expect_error(epw$holiday(FALSE))
# dst
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_equal(epw$holiday(dst = c(1, 2))$dst, epw_date(1:2))
expect_equal(epw$holiday(dst = c(as.Date("2008-01-01"), as.Date("2008-02-01")))$dst, epw_date(c("Jan 01", "Feb 01")))
# holiday
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(hol <- epw$holiday(holiday = list(name = "New Year", day = "Jan 01")), "list")
expect_equal(hol$holiday,
data.table(index = 1L, name = "New Year", day = epw_date("1/1"))
)
# can restore the original data
expect_error(epw$holiday(holiday = list(name = "New Year", day = "Jan 41")))
expect_is(hol <- epw$holiday(), "list")
expect_equal(hol$holiday,
data.table(index = 1L, name = "New Year", day = epw_date("1/1"))
)
# }}}
# $comment() {{{
expect_is(epw$comment1(), "character")
expect_equal(epw$comment1("comment1"), "comment1")
expect_equal(epw$comment1(), "comment1")
expect_is(epw$comment2(), "character")
expect_equal(epw$comment2("comment2"), "comment2")
expect_equal(epw$comment2(), "comment2")
expect_null(epw$comment2(""))
expect_null(epw$comment2())
expect_null(epw$comment1(NULL))
expect_null(epw$comment1())
# }}}
# $num_period {{{
expect_equal(epw$num_period(), 1L)
# }}}
# $interval {{{
expect_equal(epw$interval(), 1L)
# }}}
# $period {{{
expect_is(epw$period(), "data.table")
expect_is(epw$period(1), "data.table")
expect_error(epw$period(2), class = "eplusr_error_epw_data_period_index")
expect_equal(epw$period(1, name = "test")$name, "test")
expect_error(epw$period(1, start_day_of_week = "test"), class = "eplusr_error_validity_check")
expect_equal(epw$period(1, start_day_of_week = 3)$start_day_of_week, "Wednesday")
expect_equal(epw$period(1, start_day_of_week = "Wed")$start_day_of_week, "Wednesday")
expect_error(epw$period(1, start_day_of_week = "NoDay"))
expect_equal(epw$period(1)$start_day_of_week, "Wednesday")
# }}}
})
# }}}
# CONSTANTS {{{
test_that("Constant data", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
expect_is(epw$missing_code(), "list")
expect_equal(length(epw$missing_code()), 29L)
expect_is(epw$initial_missing_value(), "list")
expect_equal(length(epw$initial_missing_value()), 14L)
expect_is(epw$range_exist(), "list")
expect_equal(length(epw$range_exist()), 28L)
expect_is(epw$range_valid(), "list")
expect_equal(length(epw$range_valid()), 28L)
expect_is(epw$fill_action(), "list")
expect_equal(names(epw$fill_action()), c("use_previous", "use_zero"))
})
# }}}
# SAVE {{{
test_that("$save() & $is_unsaved()", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d_ori <- epw$data(), "data.table")
# $is_unsaved() {{{
expect_false(epw$is_unsaved())
# }}}
# $save() {{{
expect_error(epw$save(".idf"))
unlink(file.path(tempdir(), "test_save.epw"), force = TRUE)
expect_is(epw$save(file.path(tempdir(), "test_save.epw")), "character")
expect_error(epw$save(file.path(tempdir(), "test_save.epw")), class = "eplusr_error")
expect_is(epw$save(overwrite = TRUE), "character")
expect_is(epw1 <- Epw$new(file.path(tempdir(), "test_save.epw")), "Epw")
expect_equal(epw1$data(), d_ori)
# }}}
})
# }}}
# DATA GETTER {{{
test_that("Data Getter", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
# $data() {{{
# can get weather data
expect_error(epw$data(2), class = "eplusr_error_epw_data_period_index")
expect_equal(ncol(epw$data()), 36L)
expect_equal(nrow(epw$data()), 8760L)
# can use the origial datetime
expect_equal(year(epw$data(align_wday = FALSE)$datetime[8760]), 1998)
# can change year in datetime column
expect_equal(
epw$data(start_year = 2018, tz = "GMT")$datetime,
seq(as.POSIXct("2018-01-01 01:00:00", tz = "GMT"),
as.POSIXct("2019-01-01 00:00:00", tz = "GMT"),
by = "1 hour"
)
)
# can change the year column
expect_equal(epw$data(start_year = 2018, update = TRUE)$year, c(rep(2018L, times = 8759), 2019L))
# can detect if leap year mismatch found
expect_warning(epw$data(start_year = 2016))
expect_warning(epw$data(start_year = 2016, align_wday = FALSE))
# can change the time zone of datetime column in the returned weather data
expect_error(attr(epw$data(tz = "America/Chicago")$datetime, "tzone"), class = "eplusr_error_epw_data")
expect_equal(attr(epw$data(start_year = 2019, tz = "Etc/GMT+8")$datetime, "tzone"), "Etc/GMT+8")
# }}}
# $abnormal_data() {{{
expect_equal(nrow(epw$abnormal_data()), 8760)
expect_equal(nrow(epw$abnormal_data(type = "missing")), 8760)
expect_equal(nrow(epw$abnormal_data(type = "out_of_range")), 0L)
expect_true("line" %in% names(epw$abnormal_data()))
expect_equal(ncol(epw$abnormal_data()), 37L)
expect_equal(ncol(epw$abnormal_data(keep_all = FALSE)), 12L)
expect_equal(nrow(epw$abnormal_data(cols = "albedo")), 2160L)
expect_equal(ncol(epw$abnormal_data(cols = "albedo", keep_all = FALSE)), 8L)
expect_equal(nrow(epw$abnormal_data(cols = "albedo", type = "out_of_range")), 0L)
# }}}
# $redudant_data() {{{
expect_equal(nrow(epw$redundant_data()), 0L)
# }}}
})
# }}}
# DATA TAGGER {{{
test_that("Data Tagger", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- Epw$new(path_epw), "Epw")
# $make_na() {{{
expect_true({
epw$make_na(missing = TRUE, out_of_range = TRUE)
all(is.na(epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo))
})
expect_message(with_verbose(epw$make_na(missing = TRUE, out_of_range = TRUE)), "already")
# }}}
# $fill_abnormal() {{{
expect_equal(
{
epw$fill_abnormal(missing = TRUE, out_of_range = TRUE, special = TRUE)
epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo
}, rep(999, 2160)
)
expect_message(with_verbose(epw$fill_abnormal(missing = TRUE, out_of_range = TRUE)), "already")
# }}}
# $add_unit() & $drop_unit() {{{
expect_is(class = "units",
{
epw$add_unit()
rad <- epw$data()$direct_normal_radiation
}
)
expect_true(all(units(rad)$numerator %in% c("W", "h")))
expect_equal(units(rad)$denominator, c("m", "m"))
expect_message(with_verbose(epw$add_unit()), "already")
expect_is(epw$drop_unit()$data()$dry_bulb_temperature, "numeric")
expect_message(with_verbose(epw$drop_unit()), "already")
# }}}
# $purge() {{{
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$purge(), "Epw")
epw$.__enclos_env__$private$m_data <- rbindlist(list(get_priv_env(epw)$m_data, epw$data()))
expect_message(with_verbose(epw$purge()))
# }}}
})
# }}}
# DATA SETTER {{{
test_that("Data Setter", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw <- read_epw(path_epw), "Epw")
# $set() {{{
expect_is(d <- epw$data(), "data.table")
expect_output(with_verbose(epw$set(d, realyear = TRUE)))
expect_equal(epw$period(),
data.table(index = 1L, name = "Data", start_day_of_week = "Sunday",
start_day = epw_date("2017/1/1"), end_day = epw_date("2017/12/31")
)
)
expect_warning(epw$set(d, realyear = TRUE, start_day_of_week = "Monday"))
expect_warning(epw$set(d, realyear = TRUE))
expect_equal(epw$period()$start_day_of_week, "Sunday")
expect_is(epw$set(d[1:48]), "Epw")
expect_equal(epw$period(),
data.table(index = 1L, name = "Data", start_day_of_week = "Sunday",
start_day = epw_date("1/1", F), end_day = epw_date("1/2", F)
)
)
expect_equal(nrow(epw$data()), 48L)
# can remove extra columns
set(d, NULL, "extra_column", 1)
expect_is(epw$set(d), "Epw")
expect_equal(ncol(epw$data()), 36)
expect_error({
epw <- read_epw(path_epw)
suppressWarnings(d <- epw$data(start_year = 2020))
epw$set(d, TRUE)
})
expect_error({
epw <- read_epw(path_epw)
suppressWarnings(d <- epw$data(start_year = 2020))
d[100L, datetime := lubridate::as_datetime("2020-01-01 00:00:00")]
epw$set(d)
})
# }}}
# $add() {{{
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_error(epw$add(epw$data()), class = "eplusr_error_parse_epw")
# after 0L
expect_output(with_verbose(epw$add(epw$data(start_year = 2017), realyear = TRUE)))
expect_equal(epw$period()$name, c("Data1", "Data"))
expect_equal(lubridate::year(epw$data(1, align_wday = FALSE)$datetime[1]), 2017)
expect_equal(get_priv_env(epw)$m_log$matched,
data.table(index = 1:2, row = c(1L, 8761L), num = rep(8760L, 2))
)
# after N
expect_warning(d <- epw$data(start_year = 2014, align_wday = FALSE))
expect_is(epw$add(d, after = 10, realyear = TRUE), "Epw")
expect_equal(epw$period()$name, c("Data1", "Data", "Data2"))
expect_equal(lubridate::year(epw$data(3, align_wday = FALSE)$datetime[1]), 2014)
expect_equal(get_priv_env(epw)$m_log$matched,
data.table(index = 1:3, row = c(1L, 8761L, 17521L), num = rep(8760L, 3))
)
# between
expect_warning(d <- epw$data(1, start_year = 2013, align_wday = FALSE))
expect_is(epw$add(d, after = 2, realyear = TRUE), "Epw")
expect_equal(lubridate::year(epw$data(3, align_wday = FALSE)$datetime[1]), 2013)
expect_equal(get_priv_env(epw)$m_log$matched,
data.table(index = 1:4, row = c(1L, 8761L, 17521L, 26281L), num = rep(8760L, 4))
)
# unit + no unit
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$add_unit(), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(u <- units(epw$data()$liquid_precip_rate)$numerator)
expect_equal(u, "h")
# unit + unit
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$add_unit(), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(u <- units(epw$data()$liquid_precip_rate)$numerator)
expect_equal(u, "h")
# no unit + unit
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$add_unit(), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$drop_unit(), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(u <- epw$data()$liquid_precip_rate)
expect_is(u, "numeric")
# no na + na
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(epw$make_na(TRUE, TRUE), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$fill_abnormal(TRUE, TRUE), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_true(all(!is.na(epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo)))
# na + no na
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
expect_is(epw$make_na(TRUE, TRUE), "Epw")
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_true(all(is.na(epw$abnormal_data(cols = "albedo", keep_all = FALSE, type = "missing")$albedo)))
# can remove extra columns
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_is(d <- epw$data(start_year = 2017), "data.table")
set(d, NULL, "extra_column", 1)
expect_is(epw$add(d, realyear = TRUE), "Epw")
expect_warning(d <- epw$data())
expect_equal(ncol(d), 36)
# }}}
# $del() {{{
expect_is(epw <- Epw$new(path_epw), "Epw")
expect_error(epw$del())
expect_error(epw$del(1))
expect_is(epw$add(epw$data(start_year = 2017), realyear = TRUE), "Epw")
expect_message(with_verbose(epw$del(1)))
# }}}
})
# }}}
# CLONE {{{
test_that("$clone()", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_is(epw1 <- Epw$new(path_epw), "Epw")
epw2 <- epw1$clone()
epw2$period(1, name = "Data2")
expect_equal(epw1$period()$name, "Data")
expect_equal(epw2$period()$name, "Data2")
})
# }}}
# PRINT {{{
test_that("$print()", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
# $print() {{{
expect_output(epw$print())
# }}}
})
# }}}
# S3 FORMAT {{{
test_that("str.Epw & format.Epw", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
expect_output(str(epw))
expect_is(format(epw), "character")
})
# }}}
# S3 EQUALITY {{{
test_that("==.Epw & !=.Epw", {
skip_on_cran()
if (!is_avail_eplus(8.8)) install_eplus(8.8)
path_epw <- file.path(eplus_config(8.8)$dir, "WeatherData", "USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw")
expect_silent(epw <- Epw$new(path_epw))
expect_true(epw == epw)
expect_false(epw == Epw$new(path_epw))
expect_false(epw == 1)
expect_false(epw != epw)
expect_true(epw != Epw$new(path_epw))
})
# }}}
# DOWNLOAD_WEATHER {{{
test_that("download_weather()", {
skip_on_cran()
# download weather
expect_message({path_epw <- with_verbose(
download_weather("USA_CA_San.Francisco.Intl.AP.724940_TMY3", ask = FALSE, type = "epw", dir = tempdir()))}
)
expect_message({path_epw <- with_verbose(
download_weather("USA_CA_San.Francisco.Intl.AP.724940_TMY3", ask = FALSE, type = "all", dir = tempdir()))}
)
expect_message({path_epw <- with_verbose(
download_weather("USA_CA_San.Francisco.Intl.AP.724940_TMY3", ask = FALSE, type = "ddy", dir = tempdir()))}
)
})
# }}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/session_run_hooks_builtin_wrappers.R
\name{hook_nan_tensor}
\alias{hook_nan_tensor}
\title{NaN Loss Monitor}
\usage{
hook_nan_tensor(loss_tensor, fail_on_nan_loss = TRUE)
}
\arguments{
\item{loss_tensor}{The loss tensor.}
\item{fail_on_nan_loss}{A boolean indicating whether to raise exception when loss is NaN.}
}
\description{
Monitors loss and stops training if loss is NaN. Can either fail with
exception or just stop training.
}
\seealso{
Other session_run_hook wrappers: \code{\link{hook_checkpoint_saver}},
\code{\link{hook_global_step_waiter}},
\code{\link{hook_history_saver}},
\code{\link{hook_logging_tensor}},
\code{\link{hook_progress_bar}},
\code{\link{hook_step_counter}},
\code{\link{hook_stop_at_step}},
\code{\link{hook_summary_saver}},
\code{\link{session_run_hook}}
}
| /man/hook_nan_tensor.Rd | no_license | MhAmine/tfestimators | R | false | true | 883 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/session_run_hooks_builtin_wrappers.R
\name{hook_nan_tensor}
\alias{hook_nan_tensor}
\title{NaN Loss Monitor}
\usage{
hook_nan_tensor(loss_tensor, fail_on_nan_loss = TRUE)
}
\arguments{
\item{loss_tensor}{The loss tensor.}
\item{fail_on_nan_loss}{A boolean indicating whether to raise exception when loss is NaN.}
}
\description{
Monitors loss and stops training if loss is NaN. Can either fail with
exception or just stop training.
}
\seealso{
Other session_run_hook wrappers: \code{\link{hook_checkpoint_saver}},
\code{\link{hook_global_step_waiter}},
\code{\link{hook_history_saver}},
\code{\link{hook_logging_tensor}},
\code{\link{hook_progress_bar}},
\code{\link{hook_step_counter}},
\code{\link{hook_stop_at_step}},
\code{\link{hook_summary_saver}},
\code{\link{session_run_hook}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{inference}
\alias{inference}
\title{Given a test market, analyze the impact of an intervention}
\usage{
inference(matched_markets=NULL,
bsts_modelargs=NULL,
test_market=NULL,
end_post_period=NULL,
alpha=0.05,
prior_level_sd=0.01,
control_matches=5,
analyze_betas=FALSE,
nseasons=NULL)
}
\arguments{
\item{matched_markets}{A matched_market object created by the market_matching function}
\item{bsts_modelargs}{A list() that passes model parameters directly to bsts -- such as list(niter = 1000, nseasons = 52, prior.level.sd=0.1)
This parameter will overwrite the values specified in prior_level_sd and nseasons. ONLY use this if you're using intricate bsts settings
For most use-cases, using the prior_level_sd and nseasons parameters should be sufficient}
\item{test_market}{The name of the test market (character)}
\item{end_post_period}{The end date of the post period. Must be a character of format "YYYY-MM-DD" -- e.g., "2015-11-01"}
\item{alpha}{Desired tail-area probability for posterior intervals. For example, 0.05 yields 0.95 intervals}
\item{prior_level_sd}{Prior SD for the local level term (Gaussian random walk). Default is 0.01. The bigger this number is, the more wiggliness is allowed for the local level term.
Note that more wiggly local level terms also translate into larger posterior intervals
This parameter will be overwritten if you're using the bsts_modelargs parameter}
\item{control_matches}{Number of matching control markets to use in the analysis (default is 5)}
\item{analyze_betas}{Controls whether to test the model under a variety of different values for prior_level_sd.}
\item{nseasons}{Seasonality for the bsts model -- e.g., 52 for weekly seasonality}
}
\value{
Returns an object of type \code{inference}. The object has the
following elements:
\item{\code{AbsoluteEffect}}{The estimated absolute effect of the intervention}
\item{\code{AbsoluteEffectLower}}{The lower limit of the estimated absolute effect of the intervention.
This is based on the posterior interval of the counterfactual predictions.
The width of the interval is determined by the \code{alpha} parameter.}
\item{\code{AbsoluteEffectUpper}}{The upper limit of the estimated absolute effect of the intervention.
This is based on the posterior interval of the counterfactual predictions.
The width of the interval is determined by the \code{alpha} parameter.}
\item{\code{RelativeEffectLower}}{Same as the above, just for relative (percentage) effects}
\item{\code{RelativeEffectUpper}}{Same as the above, just for relative (percentage) effects}
\item{\code{TailProb}}{Posterior probability of a non-zero effect}
\item{\code{PrePeriodMAPE}}{Pre-intervention period MAPE}
\item{\code{DW}}{Durbin-Watson statistic. Should be close to 2.}
\item{\code{PlotActualVersusExpected}}{Plot of actual versus expected using \code{ggplot2}}
\item{\code{PlotCumulativeEffect}}{Plot of the cumulative effect using \code{ggplot2}}
\item{\code{PlotPointEffect}}{Plot of the pointwise effect using \code{ggplot2}}
\item{\code{PlotActuals}}{Plot of the actual values for the test and control markets using \code{ggplot2}}
\item{\code{PlotPriorLevelSdAnalysis}}{Plot of DW and MAPE for different values of the local level SE using \code{ggplot2}}
\item{\code{PlotLocalLevel}}{Plot of the local level term using \code{ggplot2}}
\item{\code{TestData}}{A \code{data.frame} with the test market data}
\item{\code{ControlData}}{A \code{data.frame} with the data for the control markets}
\item{\code{PlotResiduals}}{Plot of the residuals using \code{ggplot2}}
\item{\code{TestName}}{The name of the test market}
\item{\code{TestName}}{The name of the control market}
\item{\code{zooData}}{A \code{zoo} time series object with the test and control data}
\item{\code{Predictions}}{Actual versus predicted values}
\item{\code{CausalImpactObject}}{The CausalImpact object created}
\item{\code{Coefficients}}{The average posterior coefficients}
}
\description{
\code{inference} Analyzes the causal impact of an intervention using the CausalImpact package, given a test market and a matched_market object from the best_matches function.
The function returns an object of type "market_inference" which contains the estimated impact of the intervention (absolute and relative).
}
\examples{
library(MarketMatching)
##-----------------------------------------------------------------------
## Analyze causal impact of a made-up weather intervention in Copenhagen
## Since this is weather data it is a not a very meaningful example.
## This is merely to demonstrate the function.
##-----------------------------------------------------------------------
data(weather, package="MarketMatching")
mm <- best_matches(data=weather,
id="Area",
markets_to_be_matched=c("CPH", "SFO"),
date_variable="Date",
matching_variable="Mean_TemperatureF",
parallel=FALSE,
warping_limit=1, # warping limit=1
dtw_emphasis=1, # rely only on dtw for pre-screening
matches=5, # request 5 matches
start_match_period="2014-01-01",
end_match_period="2014-10-01")
library(CausalImpact)
results <- inference(matched_markets=mm,
test_market="CPH",
analyze_betas=FALSE,
control_matches=5, # use all 5 matches for inference
end_post_period="2015-12-15",
prior_level_sd=0.002)
}
| /man/inference.Rd | permissive | qwert666/MarketMatching | R | false | true | 5,699 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{inference}
\alias{inference}
\title{Given a test market, analyze the impact of an intervention}
\usage{
inference(matched_markets=NULL,
bsts_modelargs=NULL,
test_market=NULL,
end_post_period=NULL,
alpha=0.05,
prior_level_sd=0.01,
control_matches=5,
analyze_betas=FALSE,
nseasons=NULL)
}
\arguments{
\item{matched_markets}{A matched_market object created by the market_matching function}
\item{bsts_modelargs}{A list() that passes model parameters directly to bsts -- such as list(niter = 1000, nseasons = 52, prior.level.sd=0.1)
This parameter will overwrite the values specified in prior_level_sd and nseasons. ONLY use this if you're using intricate bsts settings
For most use-cases, using the prior_level_sd and nseasons parameters should be sufficient}
\item{test_market}{The name of the test market (character)}
\item{end_post_period}{The end date of the post period. Must be a character of format "YYYY-MM-DD" -- e.g., "2015-11-01"}
\item{alpha}{Desired tail-area probability for posterior intervals. For example, 0.05 yields 0.95 intervals}
\item{prior_level_sd}{Prior SD for the local level term (Gaussian random walk). Default is 0.01. The bigger this number is, the more wiggliness is allowed for the local level term.
Note that more wiggly local level terms also translate into larger posterior intervals
This parameter will be overwritten if you're using the bsts_modelargs parameter}
\item{control_matches}{Number of matching control markets to use in the analysis (default is 5)}
\item{analyze_betas}{Controls whether to test the model under a variety of different values for prior_level_sd.}
\item{nseasons}{Seasonality for the bsts model -- e.g., 52 for weekly seasonality}
}
\value{
Returns an object of type \code{inference}. The object has the
following elements:
\item{\code{AbsoluteEffect}}{The estimated absolute effect of the intervention}
\item{\code{AbsoluteEffectLower}}{The lower limit of the estimated absolute effect of the intervention.
This is based on the posterior interval of the counterfactual predictions.
The width of the interval is determined by the \code{alpha} parameter.}
\item{\code{AbsoluteEffectUpper}}{The upper limit of the estimated absolute effect of the intervention.
This is based on the posterior interval of the counterfactual predictions.
The width of the interval is determined by the \code{alpha} parameter.}
\item{\code{RelativeEffectLower}}{Same as the above, just for relative (percentage) effects}
\item{\code{RelativeEffectUpper}}{Same as the above, just for relative (percentage) effects}
\item{\code{TailProb}}{Posterior probability of a non-zero effect}
\item{\code{PrePeriodMAPE}}{Pre-intervention period MAPE}
\item{\code{DW}}{Durbin-Watson statistic. Should be close to 2.}
\item{\code{PlotActualVersusExpected}}{Plot of actual versus expected using \code{ggplot2}}
\item{\code{PlotCumulativeEffect}}{Plot of the cumulative effect using \code{ggplot2}}
\item{\code{PlotPointEffect}}{Plot of the pointwise effect using \code{ggplot2}}
\item{\code{PlotActuals}}{Plot of the actual values for the test and control markets using \code{ggplot2}}
\item{\code{PlotPriorLevelSdAnalysis}}{Plot of DW and MAPE for different values of the local level SE using \code{ggplot2}}
\item{\code{PlotLocalLevel}}{Plot of the local level term using \code{ggplot2}}
\item{\code{TestData}}{A \code{data.frame} with the test market data}
\item{\code{ControlData}}{A \code{data.frame} with the data for the control markets}
\item{\code{PlotResiduals}}{Plot of the residuals using \code{ggplot2}}
\item{\code{TestName}}{The name of the test market}
\item{\code{TestName}}{The name of the control market}
\item{\code{zooData}}{A \code{zoo} time series object with the test and control data}
\item{\code{Predictions}}{Actual versus predicted values}
\item{\code{CausalImpactObject}}{The CausalImpact object created}
\item{\code{Coefficients}}{The average posterior coefficients}
}
\description{
\code{inference} Analyzes the causal impact of an intervention using the CausalImpact package, given a test market and a matched_market object from the best_matches function.
The function returns an object of type "market_inference" which contains the estimated impact of the intervention (absolute and relative).
}
\examples{
library(MarketMatching)
##-----------------------------------------------------------------------
## Analyze causal impact of a made-up weather intervention in Copenhagen
## Since this is weather data it is a not a very meaningful example.
## This is merely to demonstrate the function.
##-----------------------------------------------------------------------
data(weather, package="MarketMatching")
mm <- best_matches(data=weather,
id="Area",
markets_to_be_matched=c("CPH", "SFO"),
date_variable="Date",
matching_variable="Mean_TemperatureF",
parallel=FALSE,
warping_limit=1, # warping limit=1
dtw_emphasis=1, # rely only on dtw for pre-screening
matches=5, # request 5 matches
start_match_period="2014-01-01",
end_match_period="2014-10-01")
library(CausalImpact)
results <- inference(matched_markets=mm,
test_market="CPH",
analyze_betas=FALSE,
control_matches=5, # use all 5 matches for inference
end_post_period="2015-12-15",
prior_level_sd=0.002)
}
|
server <- function(input, output, session) {
############################################################# Stocks
# load stock dataset
stockdata_DE <- reactive({
req(input$Stock)
stock_dataset_DE(input$Stock,input$dates[1],input$dates[2])
})
# reset button for stock selection
observeEvent(input$reset,{
updateSelectizeInput(session,"Stock",selected = "")
})
# plot of the stocks
output$plot_DE <- renderPlot({
req(input$Stock)
if (!is.null(ranges$x)) {
ranges$x <- as.Date(ranges$x, origin = "1970-01-01")
}
ggplot(stockdata_DE(),aes(Date,Close.,color = name))+
geom_line()+
theme_classic()+
coord_cartesian(xlim = ranges$x, ylim = ranges$y, expand = FALSE)
})
# hover info box
output$hover_info_DE <- renderUI({
req(input$hovering)
create_hover_info_DE(input$plot_hover_DE,stockdata_DE())
})
# zoom functionality
ranges <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$plot1_dblclick, {
brush <- input$plot1_brush
if (!is.null(brush)) {
ranges$x <- c(brush$xmin, brush$xmax)
ranges$y <- c(brush$ymin, brush$ymax)
} else {
ranges$x <- NULL
ranges$y <- NULL
}
})
#####################################################################
##################################################################### Corona
corona_data <- reactive({
CORONA(input$CoronaCountry,input$dates_corona[1],input$dates_corona[2])
})
output$corona_plot <- renderPlot({
if (!is.null(ranges2$x)) {
ranges2$x <- as.Date(ranges2$x, origin = "1970-01-01")
}
ggplot(corona_data(), aes_string("date",input$corona_measurement,color = "location"))+
geom_line() +
theme_classic() +
coord_cartesian(xlim = ranges2$x, ylim = ranges2$y, expand = FALSE)
})
# hover info box
output$hover_info_corona <- renderUI({
req(input$hovering_corona)
create_hover_info_corona(input$plot_hover_corona, corona_data(),input$corona_measurement)
})
# zoom functionality
ranges2 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$plot_corona_dblclick, {
brush <- input$plot_corona_brush
if (!is.null(brush)) {
ranges2$x <- c(brush$xmin, brush$xmax)
ranges2$y <- c(brush$ymin, brush$ymax)
} else {
ranges2$x <- NULL
ranges2$y <- NULL
}
})
}
| /test_simon/test_kai/inst_test/server.R | no_license | lubrunn/DSP_App | R | false | false | 2,378 | r | server <- function(input, output, session) {
############################################################# Stocks
# load stock dataset
stockdata_DE <- reactive({
req(input$Stock)
stock_dataset_DE(input$Stock,input$dates[1],input$dates[2])
})
# reset button for stock selection
observeEvent(input$reset,{
updateSelectizeInput(session,"Stock",selected = "")
})
# plot of the stocks
output$plot_DE <- renderPlot({
req(input$Stock)
if (!is.null(ranges$x)) {
ranges$x <- as.Date(ranges$x, origin = "1970-01-01")
}
ggplot(stockdata_DE(),aes(Date,Close.,color = name))+
geom_line()+
theme_classic()+
coord_cartesian(xlim = ranges$x, ylim = ranges$y, expand = FALSE)
})
# hover info box
output$hover_info_DE <- renderUI({
req(input$hovering)
create_hover_info_DE(input$plot_hover_DE,stockdata_DE())
})
# zoom functionality
ranges <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$plot1_dblclick, {
brush <- input$plot1_brush
if (!is.null(brush)) {
ranges$x <- c(brush$xmin, brush$xmax)
ranges$y <- c(brush$ymin, brush$ymax)
} else {
ranges$x <- NULL
ranges$y <- NULL
}
})
#####################################################################
##################################################################### Corona
corona_data <- reactive({
CORONA(input$CoronaCountry,input$dates_corona[1],input$dates_corona[2])
})
output$corona_plot <- renderPlot({
if (!is.null(ranges2$x)) {
ranges2$x <- as.Date(ranges2$x, origin = "1970-01-01")
}
ggplot(corona_data(), aes_string("date",input$corona_measurement,color = "location"))+
geom_line() +
theme_classic() +
coord_cartesian(xlim = ranges2$x, ylim = ranges2$y, expand = FALSE)
})
# hover info box
output$hover_info_corona <- renderUI({
req(input$hovering_corona)
create_hover_info_corona(input$plot_hover_corona, corona_data(),input$corona_measurement)
})
# zoom functionality
ranges2 <- reactiveValues(x = NULL, y = NULL)
observeEvent(input$plot_corona_dblclick, {
brush <- input$plot_corona_brush
if (!is.null(brush)) {
ranges2$x <- c(brush$xmin, brush$xmax)
ranges2$y <- c(brush$ymin, brush$ymax)
} else {
ranges2$x <- NULL
ranges2$y <- NULL
}
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis_guess.R
\name{vis_guess}
\alias{vis_guess}
\title{Visualise type guess in a data.frame}
\usage{
vis_guess(x, palette = "default")
}
\arguments{
\item{x}{a data.frame object}
\item{palette}{character "default", "qual" or "cb_safe". "default" (the
default) provides the stock ggplot scale for separating the colours.
"qual" uses an experimental qualitative colour scheme for providing
distinct colours for each Type. "cb_safe" is a set of colours that are
appropriate for those with colourblindness. "qual" and "cb_safe" are drawn
from http://colorbrewer2.org/.}
}
\value{
\code{ggplot2} object displaying the guess of the type of values in the
data frame and the position of any missing values.
}
\description{
\code{vis_guess} visualises the class of every single individual cell in a
dataframe and displays it as ggplot object, similar to \code{vis_dat}. Cells
are coloured according to what class they are and whether the values are
missing. \code{vis_guess} estimates the class of individual elements using
\code{readr::guess_parser}. It may be currently slow on larger datasets.
}
\examples{
messy_vector <- c(TRUE,
"TRUE",
"T",
"01/01/01",
"01/01/2001",
NA,
NaN,
"NA",
"Na",
"na",
"10",
10,
"10.1",
10.1,
"abc",
"$\%TG")
set.seed(1114)
messy_df <- data.frame(var1 = messy_vector,
var2 = sample(messy_vector),
var3 = sample(messy_vector))
vis_guess(messy_df)
}
\seealso{
\code{\link[=vis_miss]{vis_miss()}} \code{\link[=vis_dat]{vis_dat()}} \code{\link[=vis_miss_ly]{vis_miss_ly()}} \code{\link[=vis_compare]{vis_compare()}}
}
| /man/vis_guess.Rd | no_license | maskegger/visdat | R | false | true | 1,917 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis_guess.R
\name{vis_guess}
\alias{vis_guess}
\title{Visualise type guess in a data.frame}
\usage{
vis_guess(x, palette = "default")
}
\arguments{
\item{x}{a data.frame object}
\item{palette}{character "default", "qual" or "cb_safe". "default" (the
default) provides the stock ggplot scale for separating the colours.
"qual" uses an experimental qualitative colour scheme for providing
distinct colours for each Type. "cb_safe" is a set of colours that are
appropriate for those with colourblindness. "qual" and "cb_safe" are drawn
from http://colorbrewer2.org/.}
}
\value{
\code{ggplot2} object displaying the guess of the type of values in the
data frame and the position of any missing values.
}
\description{
\code{vis_guess} visualises the class of every single individual cell in a
dataframe and displays it as ggplot object, similar to \code{vis_dat}. Cells
are coloured according to what class they are and whether the values are
missing. \code{vis_guess} estimates the class of individual elements using
\code{readr::guess_parser}. It may be currently slow on larger datasets.
}
\examples{
messy_vector <- c(TRUE,
"TRUE",
"T",
"01/01/01",
"01/01/2001",
NA,
NaN,
"NA",
"Na",
"na",
"10",
10,
"10.1",
10.1,
"abc",
"$\%TG")
set.seed(1114)
messy_df <- data.frame(var1 = messy_vector,
var2 = sample(messy_vector),
var3 = sample(messy_vector))
vis_guess(messy_df)
}
\seealso{
\code{\link[=vis_miss]{vis_miss()}} \code{\link[=vis_dat]{vis_dat()}} \code{\link[=vis_miss_ly]{vis_miss_ly()}} \code{\link[=vis_compare]{vis_compare()}}
}
|
source("./src/global.R")
source("./src/functions.R")
### UI-SIDE --------------------------------------------------------------------
#### Dashboard sidebar elements
sidebar = dashboardSidebar(
width = 400,
useShinyjs(),
includeCSS("www/style.css"),
sidebarMenu(
br(),
#uiOutput('demoButtonUI'),
convertMenuItem(menuItem("About", tabName="Page1", selected = TRUE), tabName="Page1"),
convertMenuItem(menuItem("1. Create Response Surface", tabName="Page2", icon=NULL,
useShinyjs(),
uiOutput('stressTestDataUploadUI'),
#bs_modal(id = "modal1", title = "Stress test data", body = DTOutput('stressTestDataTbl')),
#bs_attach_modal(uiOutput("stressTestDataTblBttnUI"), id_modal = "modal1"),
uiOutput('variable.xUI'),
uiOutput('variable.yUI'),
uiOutput('variable.zUI'),
uiOutput('pthresholdUI')
),tabName="Page2"),
convertMenuItem(menuItem("2. Overlay Climate Information", tabName="Page2",
#uiOutput('GCMDataTblBttn_DefaultUI'),
uiOutput('GCMDataUI'),
#bs_modal(id = "modal2", title = "GCM projections", body = DTOutput('GCMDataTbl')),
#bs_attach_modal(uiOutput("GCMDataTblBttnUI"), id_modal = "modal2"),
uiOutput('scenariosUI'),
uiOutput('modelsUI'),
uiOutput('gcm.marginalUI')
),tabName="Page2"),
convertMenuItem(menuItem("3. Adjust scales", tabName="Page2", icon=NULL,
uiOutput('variable.z.minUI'),
uiOutput('variable.z.maxUI'),
uiOutput('variable.z.binUI'),
uiOutput("plot.colorsUI"),
uiOutput("plot.legendUI")
),tabName="Page2"),
convertMenuItem(menuItem("4. Labels", tabName="Page2", icon=NULL,
uiOutput('plot.titleUI'),
uiOutput('variable.x.labelUI'),
uiOutput('variable.y.labelUI'),
uiOutput('variable.z.labelUI')
),tabName="Page2")
), # sidebarMenu close
tags$style(type = 'text/css',
"footer{position: absolute; bottom:2%; left: 5%; padding:6px; color:gray}"),
HTML('<footer> <a href="mailto:umit.taner@deltares.nl">Contact</a> </footer>')
)#sidebar close
#### Dashboard body elements
body <- dashboardBody(
customOneNote,
useShinyjs(),
tabItems(
tabItem(
tabName = "Page1",
includeHTML("www/About.html")
),
tabItem(
tabName = "Page2",
fluidPage(
br(), br(),
column(12, align="center",
#plotlyOutput("SurfacePlotUI", height = "600px", width = "750px") %>% withSpinner(),
plotOutput("SurfacePlotUI", height = "600px", width = "700px") %>% withSpinner(),
br(),
uiOutput("downloadPlotUI")
)
) #fluidpage close
) #tabitem close
) #tabitems close
) # body close
loadingState()
#### Define the dashboard
appUI <- dashboardPage(
header = dashboardHeader(
title = "Climate Surface Viz Tool",
titleWidth = 360),
sidebar = sidebar,
body = body
)
################################################################################
### SERVER-SIDE ----------------------------------------------------------------
#demoButtonUI /// default data button UI
#demoButton /// default data button
#stressTestDataUpload ///Upload data
#stressTestDataUploadUI ///Upload Buttion
#stressTestData() //// processed stressTest dataframe
#stressTestDataTbl()
#stressTestDataTbl_button
#stressTestDataTblBttnUI
#GCMDataFileUpload
#GCMDataFileUploadUI
#GCMDataDF()
appServer <- function(input, output, session) {
## Stress test results upload (UI element)
output$stressTestDataUploadUI = renderUI({
fileInput("stressTestDataUpload", label = "Upload stress test datafile (csv)", multiple = F, accept = ".csv", width = '95%')
})
# ### Stress Test Data (user interactive)
# stressTestData <- eventReactive(input$demoButton, {
# read.csv("./data/stress_test_sample_data_default.csv", header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
# })
stressTestData <- reactive({
# if (input$demoButton == TRUE) {
# read.csv("./data/stress_test_sample_data_default.csv", header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
# } else {
if (!is.null(input$stressTestDataUpload)) {
read.csv(input$stressTestDataUpload$datapath, header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
}
# }
})
output$demoButtonUI = renderUI({
actionBttn(
inputId = "demoButton",
label = "Live Demo",
style = "material-flat",
size = "xs",
color = "default"
)
})
# ### Stress Test Data results viewing
# output$stressTestDataTbl = renderDataTable({
#
# if (!is.null(stressTestData())) {
# datatable(stressTestData(),
# options = list(lengthChange = FALSE, searching = FALSE, columnDefs = list(list(className = 'dt-left', targets = "_all"))),
# class = 'cell-border stripe') %>%
# formatRound(columns = sapply(stressTestData(), is.numeric), digits = 2) %>%
# formatRound(columns = 0, digits = 0)
# }
# })
# output$stressTestDataTblBttnUI = renderUI({
# req(input$stressTestDataUpload)
# actionBttn(
# inputId = "stressTestDataTblBttn",
# label = "View Data",
# style = "material-flat",
# size = "xs",
# color = "default"
# )
# })
### RENDER CLIMATE RESPONSE SURFACE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
output$variable.xUI = renderUI({
req(stressTestData())
pickerInput("variable.x", label = "X-Axis variable: ", choices = colnames(stressTestData()),
selected = colnames(stressTestData())[1], width = '95%')
})
output$variable.yUI = renderUI({
req(stressTestData())
pickerInput("variable.y", label = "Y-Axis variable: ", choices = colnames(stressTestData()),
selected = colnames(stressTestData())[2], width = '95%')
})
output$variable.zUI = renderUI({
req(stressTestData())
pickerInput("variable.z", label = "Z-Axis: variable", choices = colnames(stressTestData()),
selected = colnames(stressTestData())[3], width = '95%')
})
output$plot.titleUI = renderUI({
req(stressTestData())
textInput("plot.title", label = "Plot title", width = '95%')
})
output$variable.x.labelUI = renderUI({
req(stressTestData())
textInput("variable.x.label", label = "X-label", width = '95%')
})
output$variable.y.labelUI = renderUI({
req(stressTestData())
textInput("variable.y.label", label = "Y-label", width = '95%')
})
output$variable.z.labelUI = renderUI({
req(stressTestData())
textInput("variable.z.label", label = "Z-label", width = '95%')
})
output$pthresholdUI = renderUI({
req(stressTestData())
sliderInput(inputId = "pthreshold",
label = "Performance Threshold",
ticks = FALSE,
step = NULL, #this needs to be fixed
min = stressTestData() %>% pull(input$variable.z) %>% min() %>% floor(),
max = stressTestData() %>% pull(input$variable.z) %>% max() %>% ceiling(),
value = stressTestData() %>% pull(input$variable.z) %>% mean() %>% round(),
round = 0,
width = '95%'
)
})
#### CHeck Boxes
output$plot.colorsUI = renderUI({
req(stressTestData())
awesomeCheckbox("plot.colors", label="Flip colors", value = FALSE)
})
output$plot.legendUI = renderUI({
req(stressTestData())
awesomeCheckbox("plot.legend", label= "Hide legend", value = FALSE)
})
output$variable.z.minUI = renderUI({
req(stressTestData())
numericInput("variable.z.min", label = "Min. value", width = '95%',
value = stressTestData() %>% pull(input$variable.z) %>% min() %>% floor())
})
output$variable.z.maxUI = renderUI({
req(stressTestData())
numericInput("variable.z.max", label = "Max. value", width = '95%',
value = stressTestData() %>% pull(input$variable.z) %>% max() %>% ceiling())
})
output$variable.z.binUI = renderUI({
req(stressTestData())
numericInput("variable.z.bin", label = "Bin num.", width = '95%', value = 20)
})
color.low <- col1
color.high <- col2
SurfacePlot <- reactive({
req(input$pthreshold, input$variable.x, input$variable.y, input$variable.z)
# Set color scheme
if(!is.null(input$plot.colors)) {
if(input$plot.colors == TRUE) {
color.high <- col1; color.low <- col2
} else {
color.high <- col2; color.low <- col1
}
}
# Extract x, y, and z dimensions from the data matrix
x_data <- stressTestData() %>% pull(input$variable.x)
y_data <- stressTestData() %>% pull(input$variable.y)
z_data <- stressTestData() %>% pull(input$variable.z)
z_mid <- input$pthreshold
z_min <- min(z_data) %>% floor()
z_max <- max(z_data) %>% ceiling()
z_bin <- 20
if(!is.null(input$variable.z.min)) {
z_min <- input$variable.z.min
}
if(!is.null(input$variable.z.max)) {
z_max <- input$variable.z.max
}
if(!is.null(input$variable.z.bin)) {
z_bin <- input$variable.z.bin
}
# Specify x, y breaks
x_breaks <- unique(x_data)
y_breaks <- unique(y_data)
z_breaks <- seq(z_min,z_max,length.out = z_bin)
z_breaks_legend <- pretty(range(z_breaks),5)
df = tibble(x = x_data, y = y_data, z = z_data)
# Core climate response surface
p <- ggplot(df, aes(x = x, y = y)) +
# Define theme
theme_light(base_size = 14) +
# Place z dimension
geom_contour_fill(aes(z=z), breaks = z_breaks) +
# Set scales
scale_x_continuous(expand = c(0, 0), breaks = x_breaks) +
scale_y_continuous(expand = c(0, 0), breaks = y_breaks) +
scale_fill_divergent(low = color.low, mid = "white", high = color.high, midpoint = z_mid, limits = range(z_breaks_legend), breaks = z_breaks_legend) +
# Set labs
labs(x = input$variable.x.label,y = input$variable.y.label,color = NULL, fill = input$variable.z.label,title = input$plot.title) +
# Set guides
guides(fill = guide_colorbar(raster=F, barwidth=1.5, ticks = TRUE, barheight = 20, order = 1), color = guide_legend(order = 2)) +
# Threshold line
geom_contour(aes(z = z), breaks = z_mid, color = "black", size = 1.5)
if(!is.null(input$plot.legend)) {
if(input$plot.legend == TRUE) {
p <- p + theme(legend.position = "none")
}
}
# GCM Dots
if(!is.null(GCMDataDF())) {
# Find positions of scenarios
scenario_ind <- which(scenarios_list() %in% unique(GCMDataDF()$scenario))
scenario_col <- rcp_col[scenario_ind]
#if(input$GCM.scenarios == TRUE) {
p <- p + scale_color_manual(values = scenario_col) +
geom_point(aes(color = scenario), data = GCMDataDF(), shape = 1, stroke = 2, size = 3, alpha = 0.75)
}
if(!is.null(input$gcm.marginal)) {
if(input$gcm.marginal == TRUE) {
p_top <- ggplot(GCMDataDF(), aes(x))+
scale_fill_manual(values = scenario_col) +
scale_color_manual(values = scenario_col) +
geom_density(aes(fill = scenario, color = scenario), alpha = 0.4, position="identity") +
scale_x_continuous(expand = c(0, 0), limits = c(0,5)) +
gg_blank + guides(fill=FALSE, color = FALSE) #+
#theme(legend.position = "none") #, panel.border = element_rect(color = "gray80", fill=NA))
p_left <- ggplot(GCMDataDF(), aes(y))+
scale_fill_manual(values = scenario_col) +
scale_color_manual(values = scenario_col) +
geom_density(aes(fill = scenario, color = scenario), alpha = 0.4, position="identity") +
scale_x_continuous(expand = c(0, 0), limits = c(-60,40)) +
coord_flip() + gg_blank + guides(fill=FALSE, color = FALSE)
#theme(legend.position = "none") #, panel.border = element_rect(color = "gray80", fill=NA))
p <- ggarrange(empty, p_top, p_left, p, ncol=2, nrow=2, widths=c(1, 7), heights=c(1, 7))
#p <- subplot(empty, p_top, p_left, p, nrows=2, widths=c(0.15, 0.85), heights=c(0.15, 0.85),
# shareX = TRUE, shareY = TRUE)
}
}
p #+ theme(plot.margin=grid::unit(c(1,1,1,1), "mm"))
})
#output$SurfacePlotUI <- renderPlotly(SurfacePlot())
output$SurfacePlotUI <- renderPlot(SurfacePlot())
### GCM PROJECTIONS TAB ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Climate Projections Data Processing ###
# ### GCM results output table
# output$GCMDataTbl = renderDataTable({
#
# datatable(GCMData(),
# options = list(lengthChange = FALSE, searching = FALSE, columnDefs = list(list(className = 'dt-left', targets = "_all"))),
# class = 'cell-border stripe') %>%
# formatRound(columns = sapply(GCMData(), is.numeric), digits = 2) %>%
# formatRound(columns = 0, digits = 0)
# })
# output$GCMDataTblBttn_DefaultUI = renderUI({
# actionBttn(
# inputId = "GCMDataTblBttn_Default",
# label = "Demo dataset2",
# style = "material-flat",
# size = "sm",
# color = "default"
# )
# })
#
output$GCMDataUI = renderUI({
req(!is.null(stressTestData()))
fileInput("GCMDataUpload", label = "Upload climate projections datafile (csv)", multiple = F, accept = ".csv", width = '95%')
})
GCMData <- reactive({
if (!is.null(input$GCMDataUpload)) {
read.csv(input$GCMDataUpload$datapath, header = TRUE, sep = ",", stringsAsFactors = T, row.names = NULL)
} else {
#if(input$GCMDataTblBttn_Default == T) {
read.csv("./data/gcm_delta_change_2050_default.csv", header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
#}
}
})
GCMDataDF <- reactive({
# Extract x, y, and z dimensions from the data matrix
df <- GCMData() %>% select(scenario, model)
df$x <- GCMData() %>% pull(input$variable.x)
df$y <- GCMData() %>% pull(input$variable.y)
df %<>%
filter(scenario %in% input$scenarios) %>%
filter(model %in% input$models)
})
scenarios_list <- reactive({
dat <- GCMData() %>% pull(scenario) %>% unique()
names(dat) <- dat
as.vector(dat)
})
output$scenariosUI <- renderUI({
req(input$GCMDataUpload)
pickerInput(
inputId = "scenarios",
label = "Scenarios",
choices = scenarios_list(),
selected = scenarios_list(),
options = list(`actions-box` = TRUE),
multiple = TRUE,
width = '95%'
)
})
models_list <- reactive({
dat <- GCMData() %>% pull(model) %>% unique()
names(dat) <- dat
as.vector(dat)
})
output$modelsUI <- renderUI({
req(input$GCMDataUpload)
pickerInput(
inputId = "models",
label = "Climate Models",
choices = models_list(),
selected = models_list(),
options = list(`actions-box` = TRUE),
multiple = TRUE,
width = '95%'
)
})
output$gcm.marginalUI = renderUI({
req(input$GCMDataUpload)
awesomeCheckbox("gcm.marginal", label= "Display marginal distributions", value = FALSE)
})
# #### Dowload response surface
plot_name <- reactive({ifelse(is.null(input$plot.title), "surfaceplot", input$plot.title)})
output$downloadPlot <- downloadHandler(
filename = function() {paste(plot_name(),'.png',sep='')},
content = function(file){
ggsave(file, plot = SurfacePlot(), height = 8, width = 10, units = "in")
}
)
output$downloadPlotUI = renderUI({
req(SurfacePlot())
downloadBttn(
outputId = "downloadPlot",
label = "Download plot",
style = "material-flat",
color = "default",
size = "sm"
)
})
session$onSessionEnded(stopApp)
}
################################################################################
### APPLICATON -----------------------------------------------------------------
shinyApp(ui = appUI, server = appServer)
| /app.R | no_license | tanerumit/climateStressTest | R | false | false | 16,016 | r |
source("./src/global.R")
source("./src/functions.R")
### UI-SIDE --------------------------------------------------------------------
#### Dashboard sidebar elements
sidebar = dashboardSidebar(
width = 400,
useShinyjs(),
includeCSS("www/style.css"),
sidebarMenu(
br(),
#uiOutput('demoButtonUI'),
convertMenuItem(menuItem("About", tabName="Page1", selected = TRUE), tabName="Page1"),
convertMenuItem(menuItem("1. Create Response Surface", tabName="Page2", icon=NULL,
useShinyjs(),
uiOutput('stressTestDataUploadUI'),
#bs_modal(id = "modal1", title = "Stress test data", body = DTOutput('stressTestDataTbl')),
#bs_attach_modal(uiOutput("stressTestDataTblBttnUI"), id_modal = "modal1"),
uiOutput('variable.xUI'),
uiOutput('variable.yUI'),
uiOutput('variable.zUI'),
uiOutput('pthresholdUI')
),tabName="Page2"),
convertMenuItem(menuItem("2. Overlay Climate Information", tabName="Page2",
#uiOutput('GCMDataTblBttn_DefaultUI'),
uiOutput('GCMDataUI'),
#bs_modal(id = "modal2", title = "GCM projections", body = DTOutput('GCMDataTbl')),
#bs_attach_modal(uiOutput("GCMDataTblBttnUI"), id_modal = "modal2"),
uiOutput('scenariosUI'),
uiOutput('modelsUI'),
uiOutput('gcm.marginalUI')
),tabName="Page2"),
convertMenuItem(menuItem("3. Adjust scales", tabName="Page2", icon=NULL,
uiOutput('variable.z.minUI'),
uiOutput('variable.z.maxUI'),
uiOutput('variable.z.binUI'),
uiOutput("plot.colorsUI"),
uiOutput("plot.legendUI")
),tabName="Page2"),
convertMenuItem(menuItem("4. Labels", tabName="Page2", icon=NULL,
uiOutput('plot.titleUI'),
uiOutput('variable.x.labelUI'),
uiOutput('variable.y.labelUI'),
uiOutput('variable.z.labelUI')
),tabName="Page2")
), # sidebarMenu close
tags$style(type = 'text/css',
"footer{position: absolute; bottom:2%; left: 5%; padding:6px; color:gray}"),
HTML('<footer> <a href="mailto:umit.taner@deltares.nl">Contact</a> </footer>')
)#sidebar close
#### Dashboard body elements
body <- dashboardBody(
customOneNote,
useShinyjs(),
tabItems(
tabItem(
tabName = "Page1",
includeHTML("www/About.html")
),
tabItem(
tabName = "Page2",
fluidPage(
br(), br(),
column(12, align="center",
#plotlyOutput("SurfacePlotUI", height = "600px", width = "750px") %>% withSpinner(),
plotOutput("SurfacePlotUI", height = "600px", width = "700px") %>% withSpinner(),
br(),
uiOutput("downloadPlotUI")
)
) #fluidpage close
) #tabitem close
) #tabitems close
) # body close
loadingState()
#### Define the dashboard
appUI <- dashboardPage(
header = dashboardHeader(
title = "Climate Surface Viz Tool",
titleWidth = 360),
sidebar = sidebar,
body = body
)
################################################################################
### SERVER-SIDE ----------------------------------------------------------------
#demoButtonUI /// default data button UI
#demoButton /// default data button
#stressTestDataUpload ///Upload data
#stressTestDataUploadUI ///Upload Buttion
#stressTestData() //// processed stressTest dataframe
#stressTestDataTbl()
#stressTestDataTbl_button
#stressTestDataTblBttnUI
#GCMDataFileUpload
#GCMDataFileUploadUI
#GCMDataDF()
appServer <- function(input, output, session) {
## Stress test results upload (UI element)
output$stressTestDataUploadUI = renderUI({
fileInput("stressTestDataUpload", label = "Upload stress test datafile (csv)", multiple = F, accept = ".csv", width = '95%')
})
# ### Stress Test Data (user interactive)
# stressTestData <- eventReactive(input$demoButton, {
# read.csv("./data/stress_test_sample_data_default.csv", header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
# })
stressTestData <- reactive({
# if (input$demoButton == TRUE) {
# read.csv("./data/stress_test_sample_data_default.csv", header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
# } else {
if (!is.null(input$stressTestDataUpload)) {
read.csv(input$stressTestDataUpload$datapath, header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
}
# }
})
output$demoButtonUI = renderUI({
actionBttn(
inputId = "demoButton",
label = "Live Demo",
style = "material-flat",
size = "xs",
color = "default"
)
})
# ### Stress Test Data results viewing
# output$stressTestDataTbl = renderDataTable({
#
# if (!is.null(stressTestData())) {
# datatable(stressTestData(),
# options = list(lengthChange = FALSE, searching = FALSE, columnDefs = list(list(className = 'dt-left', targets = "_all"))),
# class = 'cell-border stripe') %>%
# formatRound(columns = sapply(stressTestData(), is.numeric), digits = 2) %>%
# formatRound(columns = 0, digits = 0)
# }
# })
# output$stressTestDataTblBttnUI = renderUI({
# req(input$stressTestDataUpload)
# actionBttn(
# inputId = "stressTestDataTblBttn",
# label = "View Data",
# style = "material-flat",
# size = "xs",
# color = "default"
# )
# })
### RENDER CLIMATE RESPONSE SURFACE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
output$variable.xUI = renderUI({
req(stressTestData())
pickerInput("variable.x", label = "X-Axis variable: ", choices = colnames(stressTestData()),
selected = colnames(stressTestData())[1], width = '95%')
})
output$variable.yUI = renderUI({
req(stressTestData())
pickerInput("variable.y", label = "Y-Axis variable: ", choices = colnames(stressTestData()),
selected = colnames(stressTestData())[2], width = '95%')
})
output$variable.zUI = renderUI({
req(stressTestData())
pickerInput("variable.z", label = "Z-Axis: variable", choices = colnames(stressTestData()),
selected = colnames(stressTestData())[3], width = '95%')
})
output$plot.titleUI = renderUI({
req(stressTestData())
textInput("plot.title", label = "Plot title", width = '95%')
})
output$variable.x.labelUI = renderUI({
req(stressTestData())
textInput("variable.x.label", label = "X-label", width = '95%')
})
output$variable.y.labelUI = renderUI({
req(stressTestData())
textInput("variable.y.label", label = "Y-label", width = '95%')
})
output$variable.z.labelUI = renderUI({
req(stressTestData())
textInput("variable.z.label", label = "Z-label", width = '95%')
})
output$pthresholdUI = renderUI({
req(stressTestData())
sliderInput(inputId = "pthreshold",
label = "Performance Threshold",
ticks = FALSE,
step = NULL, #this needs to be fixed
min = stressTestData() %>% pull(input$variable.z) %>% min() %>% floor(),
max = stressTestData() %>% pull(input$variable.z) %>% max() %>% ceiling(),
value = stressTestData() %>% pull(input$variable.z) %>% mean() %>% round(),
round = 0,
width = '95%'
)
})
#### CHeck Boxes
output$plot.colorsUI = renderUI({
req(stressTestData())
awesomeCheckbox("plot.colors", label="Flip colors", value = FALSE)
})
output$plot.legendUI = renderUI({
req(stressTestData())
awesomeCheckbox("plot.legend", label= "Hide legend", value = FALSE)
})
output$variable.z.minUI = renderUI({
req(stressTestData())
numericInput("variable.z.min", label = "Min. value", width = '95%',
value = stressTestData() %>% pull(input$variable.z) %>% min() %>% floor())
})
output$variable.z.maxUI = renderUI({
req(stressTestData())
numericInput("variable.z.max", label = "Max. value", width = '95%',
value = stressTestData() %>% pull(input$variable.z) %>% max() %>% ceiling())
})
output$variable.z.binUI = renderUI({
req(stressTestData())
numericInput("variable.z.bin", label = "Bin num.", width = '95%', value = 20)
})
color.low <- col1
color.high <- col2
SurfacePlot <- reactive({
req(input$pthreshold, input$variable.x, input$variable.y, input$variable.z)
# Set color scheme
if(!is.null(input$plot.colors)) {
if(input$plot.colors == TRUE) {
color.high <- col1; color.low <- col2
} else {
color.high <- col2; color.low <- col1
}
}
# Extract x, y, and z dimensions from the data matrix
x_data <- stressTestData() %>% pull(input$variable.x)
y_data <- stressTestData() %>% pull(input$variable.y)
z_data <- stressTestData() %>% pull(input$variable.z)
z_mid <- input$pthreshold
z_min <- min(z_data) %>% floor()
z_max <- max(z_data) %>% ceiling()
z_bin <- 20
if(!is.null(input$variable.z.min)) {
z_min <- input$variable.z.min
}
if(!is.null(input$variable.z.max)) {
z_max <- input$variable.z.max
}
if(!is.null(input$variable.z.bin)) {
z_bin <- input$variable.z.bin
}
# Specify x, y breaks
x_breaks <- unique(x_data)
y_breaks <- unique(y_data)
z_breaks <- seq(z_min,z_max,length.out = z_bin)
z_breaks_legend <- pretty(range(z_breaks),5)
df = tibble(x = x_data, y = y_data, z = z_data)
# Core climate response surface
p <- ggplot(df, aes(x = x, y = y)) +
# Define theme
theme_light(base_size = 14) +
# Place z dimension
geom_contour_fill(aes(z=z), breaks = z_breaks) +
# Set scales
scale_x_continuous(expand = c(0, 0), breaks = x_breaks) +
scale_y_continuous(expand = c(0, 0), breaks = y_breaks) +
scale_fill_divergent(low = color.low, mid = "white", high = color.high, midpoint = z_mid, limits = range(z_breaks_legend), breaks = z_breaks_legend) +
# Set labs
labs(x = input$variable.x.label,y = input$variable.y.label,color = NULL, fill = input$variable.z.label,title = input$plot.title) +
# Set guides
guides(fill = guide_colorbar(raster=F, barwidth=1.5, ticks = TRUE, barheight = 20, order = 1), color = guide_legend(order = 2)) +
# Threshold line
geom_contour(aes(z = z), breaks = z_mid, color = "black", size = 1.5)
if(!is.null(input$plot.legend)) {
if(input$plot.legend == TRUE) {
p <- p + theme(legend.position = "none")
}
}
# GCM Dots
if(!is.null(GCMDataDF())) {
# Find positions of scenarios
scenario_ind <- which(scenarios_list() %in% unique(GCMDataDF()$scenario))
scenario_col <- rcp_col[scenario_ind]
#if(input$GCM.scenarios == TRUE) {
p <- p + scale_color_manual(values = scenario_col) +
geom_point(aes(color = scenario), data = GCMDataDF(), shape = 1, stroke = 2, size = 3, alpha = 0.75)
}
if(!is.null(input$gcm.marginal)) {
if(input$gcm.marginal == TRUE) {
p_top <- ggplot(GCMDataDF(), aes(x))+
scale_fill_manual(values = scenario_col) +
scale_color_manual(values = scenario_col) +
geom_density(aes(fill = scenario, color = scenario), alpha = 0.4, position="identity") +
scale_x_continuous(expand = c(0, 0), limits = c(0,5)) +
gg_blank + guides(fill=FALSE, color = FALSE) #+
#theme(legend.position = "none") #, panel.border = element_rect(color = "gray80", fill=NA))
p_left <- ggplot(GCMDataDF(), aes(y))+
scale_fill_manual(values = scenario_col) +
scale_color_manual(values = scenario_col) +
geom_density(aes(fill = scenario, color = scenario), alpha = 0.4, position="identity") +
scale_x_continuous(expand = c(0, 0), limits = c(-60,40)) +
coord_flip() + gg_blank + guides(fill=FALSE, color = FALSE)
#theme(legend.position = "none") #, panel.border = element_rect(color = "gray80", fill=NA))
p <- ggarrange(empty, p_top, p_left, p, ncol=2, nrow=2, widths=c(1, 7), heights=c(1, 7))
#p <- subplot(empty, p_top, p_left, p, nrows=2, widths=c(0.15, 0.85), heights=c(0.15, 0.85),
# shareX = TRUE, shareY = TRUE)
}
}
p #+ theme(plot.margin=grid::unit(c(1,1,1,1), "mm"))
})
#output$SurfacePlotUI <- renderPlotly(SurfacePlot())
output$SurfacePlotUI <- renderPlot(SurfacePlot())
### GCM PROJECTIONS TAB ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Climate Projections Data Processing ###
# ### GCM results output table
# output$GCMDataTbl = renderDataTable({
#
# datatable(GCMData(),
# options = list(lengthChange = FALSE, searching = FALSE, columnDefs = list(list(className = 'dt-left', targets = "_all"))),
# class = 'cell-border stripe') %>%
# formatRound(columns = sapply(GCMData(), is.numeric), digits = 2) %>%
# formatRound(columns = 0, digits = 0)
# })
# output$GCMDataTblBttn_DefaultUI = renderUI({
# actionBttn(
# inputId = "GCMDataTblBttn_Default",
# label = "Demo dataset2",
# style = "material-flat",
# size = "sm",
# color = "default"
# )
# })
#
output$GCMDataUI = renderUI({
req(!is.null(stressTestData()))
fileInput("GCMDataUpload", label = "Upload climate projections datafile (csv)", multiple = F, accept = ".csv", width = '95%')
})
GCMData <- reactive({
if (!is.null(input$GCMDataUpload)) {
read.csv(input$GCMDataUpload$datapath, header = TRUE, sep = ",", stringsAsFactors = T, row.names = NULL)
} else {
#if(input$GCMDataTblBttn_Default == T) {
read.csv("./data/gcm_delta_change_2050_default.csv", header = T, sep = ",", stringsAsFactors = T, row.names = NULL)
#}
}
})
GCMDataDF <- reactive({
# Extract x, y, and z dimensions from the data matrix
df <- GCMData() %>% select(scenario, model)
df$x <- GCMData() %>% pull(input$variable.x)
df$y <- GCMData() %>% pull(input$variable.y)
df %<>%
filter(scenario %in% input$scenarios) %>%
filter(model %in% input$models)
})
scenarios_list <- reactive({
dat <- GCMData() %>% pull(scenario) %>% unique()
names(dat) <- dat
as.vector(dat)
})
output$scenariosUI <- renderUI({
req(input$GCMDataUpload)
pickerInput(
inputId = "scenarios",
label = "Scenarios",
choices = scenarios_list(),
selected = scenarios_list(),
options = list(`actions-box` = TRUE),
multiple = TRUE,
width = '95%'
)
})
models_list <- reactive({
dat <- GCMData() %>% pull(model) %>% unique()
names(dat) <- dat
as.vector(dat)
})
output$modelsUI <- renderUI({
req(input$GCMDataUpload)
pickerInput(
inputId = "models",
label = "Climate Models",
choices = models_list(),
selected = models_list(),
options = list(`actions-box` = TRUE),
multiple = TRUE,
width = '95%'
)
})
output$gcm.marginalUI = renderUI({
req(input$GCMDataUpload)
awesomeCheckbox("gcm.marginal", label= "Display marginal distributions", value = FALSE)
})
# #### Dowload response surface
plot_name <- reactive({ifelse(is.null(input$plot.title), "surfaceplot", input$plot.title)})
output$downloadPlot <- downloadHandler(
filename = function() {paste(plot_name(),'.png',sep='')},
content = function(file){
ggsave(file, plot = SurfacePlot(), height = 8, width = 10, units = "in")
}
)
output$downloadPlotUI = renderUI({
req(SurfacePlot())
downloadBttn(
outputId = "downloadPlot",
label = "Download plot",
style = "material-flat",
color = "default",
size = "sm"
)
})
session$onSessionEnded(stopApp)
}
################################################################################
### APPLICATON -----------------------------------------------------------------
shinyApp(ui = appUI, server = appServer)
|
## PostgreSQL.R
## Last Modified: $Date$
## This package was developed as a part of Summer of Code program organized by Google.
## Thanks to David A. James & Saikat DebRoy, the authors of RMySQL package.
## Code from RMySQL package was reused with the permission from the authors.
## Also Thanks to my GSoC mentor Dirk Eddelbuettel for helping me in the development.
##
## Constants
##
##.PostgreSQLRCS <- "$Id: PostgreSQL.R,v 0.1 2008/06/10 14:00:00$"
.PostgreSQLPkgName <- "RPostgreSQL"
.PostgreSQLVersion <- "0.1-0" ##package.description(.PostgreSQLPkgName, fields = "Version")
.PostgreSQL.NA.string <- "\\N" ## on input, PostgreSQL interprets \N as NULL (NA)
setOldClass("data.frame") ## to appease setMethod's signature warnings...
## ------------------------------------------------------------------
## Begin DBI extensions:
##
## dbBeginTransaction
##
setGeneric("dbBeginTransaction",
def = function(conn, ...)
standardGeneric("dbBeginTransaction"),
valueClass = "logical"
)
##
## End DBI extensions
## ------------------------------------------------------------------
##
## Class: DBIObject
##
setClass("PostgreSQLObject", representation("DBIObject", "dbObjectId", "VIRTUAL"))
##
## Class: dbDriver
##
PostgreSQL <- function(max.con=16, fetch.default.rec = 500, force.reload=FALSE) {
postgresqlInitDriver(max.con = max.con, fetch.default.rec = fetch.default.rec,
force.reload = force.reload)
}
##
## Class: DBIDriver
##
setClass("PostgreSQLDriver", representation("DBIDriver", "PostgreSQLObject"))
## coerce (extract) any PostgreSQLObject into a PostgreSQLDriver
setAs("PostgreSQLObject", "PostgreSQLDriver",
def = function(from) new("PostgreSQLDriver", Id = as(from, "integer")[1:2])
)
setMethod("dbUnloadDriver", "PostgreSQLDriver",
def = function(drv, ...) postgresqlCloseDriver(drv, ...),
valueClass = "logical"
)
setMethod("dbGetInfo", "PostgreSQLDriver",
def = function(dbObj, ...) postgresqlDriverInfo(dbObj, ...)
)
setMethod("dbListConnections", "PostgreSQLDriver",
def = function(drv, ...) dbGetInfo(drv, "connectionIds")[[1]]
)
setMethod("summary", "PostgreSQLDriver",
def = function(object, ...) postgresqlDescribeDriver(object, ...)
)
##
## Class: DBIConnection
##
setClass("PostgreSQLConnection", representation("DBIConnection", "PostgreSQLObject"))
setMethod("dbConnect", "PostgreSQLDriver",
def = function(drv, ...) postgresqlNewConnection(drv, ...),
valueClass = "PostgreSQLConnection"
)
setMethod("dbConnect", "character",
def = function(drv, ...) postgresqlNewConnection(dbDriver(drv), ...),
valueClass = "PostgreSQLConnection"
)
## clone a connection
setMethod("dbConnect", "PostgreSQLConnection",
def = function(drv, ...) postgresqlCloneConnection(drv, ...),
valueClass = "PostgreSQLConnection"
)
setMethod("dbDisconnect", "PostgreSQLConnection",
def = function(conn, ...) postgresqlCloseConnection(conn, ...),
valueClass = "logical"
)
setGeneric("dbEscapeStrings", def = function(conn, string, ...) standardGeneric("dbEscapeStrings"))
setMethod("dbEscapeStrings",
signature(conn="PostgreSQLConnection", string="character"),
def = function(conn, string, ...) postgresqlEscapeStrings(conn, string, ...),
valueClass = "character"
)
setMethod("dbSendQuery",
signature(conn = "PostgreSQLConnection", statement = "character"),
def = function(conn, statement,...) postgresqlExecStatement(conn, statement,...),
valueClass = "PostgreSQLResult"
)
setMethod("dbGetQuery",
signature(conn = "PostgreSQLConnection", statement = "character"),
def = function(conn, statement, ...) postgresqlQuickSQL(conn, statement, ...)
)
setMethod("dbGetException", "PostgreSQLConnection",
def = function(conn, ...){
if(!isIdCurrent(conn))
stop(paste("expired", class(conn)))
.Call("RS_PostgreSQL_getException", as(conn, "integer"),
PACKAGE = .PostgreSQLPkgName)
},
valueClass = "list"
)
setMethod("dbGetInfo", "PostgreSQLConnection",
def = function(dbObj, ...) postgresqlConnectionInfo(dbObj, ...)
)
setMethod("dbListResults", "PostgreSQLConnection",
def = function(conn, ...) dbGetInfo(conn, "rsId")[[1]]
)
setMethod("summary", "PostgreSQLConnection",
def = function(object, ...) postgresqlDescribeConnection(object, ...)
)
## convenience methods
setMethod("dbListTables", "PostgreSQLConnection",
def = function(conn, ...){
out <- dbGetQuery(conn,
paste("select tablename from pg_tables where schemaname !='information_schema'",
"and schemaname !='pg_catalog'", ...))
if (is.null(out) || nrow(out) == 0)
out <- character(0)
else
out <- out[, 1]
out
},
valueClass = "character"
)
setMethod("dbReadTable", signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...) postgresqlReadTable(conn, name, ...),
valueClass = "data.frame"
)
setMethod("dbWriteTable",
signature(conn="PostgreSQLConnection", name="character", value="data.frame"),
def = function(conn, name, value, ...){
postgresqlWriteTable(conn, name, value, ...)
},
valueClass = "logical"
)
## write table from filename (TODO: connections)
setMethod("dbWriteTable",
signature(conn="PostgreSQLConnection", name="character", value="character"),
def = function(conn, name, value, ...){
postgresqlImportFile(conn, name, value, ...)
},
valueClass = "logical"
)
setMethod("dbExistsTable",
signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...){
## Edd 09 Oct 2009: Fusion of patches by Joe Conway and Prasenjit Kapat
names <- strsplit(name, ".", fixed=TRUE)[[1]]
if (length(names) == 2) { # format was "public.sometable"
res <- dbGetQuery(conn,
paste("select schemaname,tablename from pg_tables where ",
"schemaname !='information_schema' ",
"and schemaname !='pg_catalog' and schemaname='",
names[1], "' and tablename='", postgresqlEscapeStrings(conn, names[2]), "'", sep=""))
} else {
res <- dbGetQuery(conn,
paste("select tablename from pg_tables where ",
"schemaname !='information_schema' and schemaname !='pg_catalog' ",
"and tablename='", postgresqlEscapeStrings(conn, names[1]), "'", sep=""))
}
return(as.logical(dim(res)[1]))
},
valueClass = "logical"
)
setMethod("dbRemoveTable",
signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...){
if(dbExistsTable(conn, name)){
rc <- try(dbGetQuery(conn, paste("DROP TABLE", postgresqlQuoteId(name))))
!inherits(rc, ErrorClass)
}
else FALSE
},
valueClass = "logical"
)
## return field names (no metadata)
setMethod("dbListFields",
signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...){
flds <- dbGetQuery(conn, paste("SELECT a.attname FROM pg_class c,pg_attribute a,pg_type t WHERE c.relname = '",
name,"' and a.attnum > 0 and a.attrelid = c.oid and a.atttypid = t.oid",sep=""))[,1]
if(length(flds)==0)
flds <- character()
flds
},
valueClass = "character"
)
setMethod("dbCallProc", "PostgreSQLConnection",
def = function(conn, ...) .NotYetImplemented()
)
setMethod("dbCommit", "PostgreSQLConnection",
def = function(conn, ...) postgresqlTransactionStatement(conn, "COMMIT")
)
setMethod("dbRollback", "PostgreSQLConnection",
def = function(conn, ...) {
rsList <- dbListResults(conn)
if (length(rsList))
dbClearResult(rsList[[1]])
postgresqlTransactionStatement(conn, "ROLLBACK")
}
)
setMethod("dbBeginTransaction", "PostgreSQLConnection",
def = function(conn, ...) postgresqlTransactionStatement(conn, "BEGIN")
)
##
## Class: DBIResult
##
setClass("PostgreSQLResult", representation("DBIResult", "PostgreSQLObject"))
setAs("PostgreSQLResult", "PostgreSQLConnection",
def = function(from) new("PostgreSQLConnection", Id = as(from, "integer")[1:3])
)
setAs("PostgreSQLResult", "PostgreSQLDriver",
def = function(from) new("PostgreSQLDriver", Id = as(from, "integer")[1:2])
)
setMethod("dbClearResult", "PostgreSQLResult",
def = function(res, ...) postgresqlCloseResult(res, ...),
valueClass = "logical"
)
setMethod("fetch", signature(res="PostgreSQLResult", n="numeric"),
def = function(res, n, ...){
out <- postgresqlFetch(res, n, ...)
if(is.null(out))
out <- data.frame(out)
out
},
valueClass = "data.frame"
)
setMethod("fetch",
signature(res="PostgreSQLResult", n="missing"),
def = function(res, n, ...){
out <- postgresqlFetch(res, n=0, ...)
if(is.null(out))
out <- data.frame(out)
out
},
valueClass = "data.frame"
)
setMethod("dbGetInfo", "PostgreSQLResult",
def = function(dbObj, ...) postgresqlResultInfo(dbObj, ...),
valueClass = "list"
)
setMethod("dbGetStatement", "PostgreSQLResult",
def = function(res, ...){
st <- dbGetInfo(res, "statement")[[1]]
if(is.null(st))
st <- character()
st
},
valueClass = "character"
)
setMethod("dbListFields",
signature(conn="PostgreSQLResult", name="missing"),
def = function(conn, name, ...){
flds <- dbGetInfo(conn, "fields")$fields$name
if(is.null(flds))
flds <- character()
flds
},
valueClass = "character"
)
setMethod("dbColumnInfo", "PostgreSQLResult",
def = function(res, ...) postgresqlDescribeFields(res, ...),
valueClass = "data.frame"
)
setMethod("dbGetRowsAffected", "PostgreSQLResult",
def = function(res, ...) dbGetInfo(res, "rowsAffected")[[1]],
valueClass = "numeric"
)
setMethod("dbGetRowCount", "PostgreSQLResult",
def = function(res, ...) dbGetInfo(res, "rowCount")[[1]],
valueClass = "numeric"
)
setMethod("dbHasCompleted", "PostgreSQLResult",
def = function(res, ...) dbGetInfo(res, "completed")[[1]] == 1,
valueClass = "logical"
)
setMethod("dbGetException", "PostgreSQLResult",
def = function(conn, ...){
id <- as(conn, "integer")[1:2]
.Call("RS_PostgreSQL_getException", id, PACKAGE = .PostgreSQLPkgName)
},
valueClass = "list" ## TODO: should be a DBIException?
)
setMethod("summary", "PostgreSQLResult",
def = function(object, ...) postgresqlDescribeResult(object, ...)
)
setMethod("dbDataType",
signature(dbObj = "PostgreSQLObject", obj = "ANY"),
def = function(dbObj, obj, ...) postgresqlDataType(obj, ...),
valueClass = "character"
)
## MODIFIED : -- sameer
setMethod("make.db.names",
signature(dbObj="PostgreSQLObject", snames = "character"),
def = function(dbObj, snames,keywords,unique, allow.keywords,...){
make.db.names.default(snames, keywords = .PostgreSQLKeywords,unique, allow.keywords)
},
valueClass = "character"
)
setMethod("SQLKeywords", "PostgreSQLObject",
def = function(dbObj, ...) .PostgreSQLKeywords,
valueClass = "character"
)
setMethod("isSQLKeyword",
signature(dbObj="PostgreSQLObject", name="character"),
def = function(dbObj, name,keywords,case, ...){
isSQLKeyword.default(name, keywords = .PostgreSQLKeywords)
},
valueClass = "character"
)
## extension to the DBI 0.1-4
setGeneric("dbApply", def = function(res, ...) standardGeneric("dbApply"))
setMethod("dbApply", "PostgreSQLResult",
def = function(res, ...) postgresqlDBApply(res, ...),
)
| /R/PostgreSQL.R | no_license | jcdny/RPostgreSQL | R | false | false | 13,409 | r |
## PostgreSQL.R
## Last Modified: $Date$
## This package was developed as a part of Summer of Code program organized by Google.
## Thanks to David A. James & Saikat DebRoy, the authors of RMySQL package.
## Code from RMySQL package was reused with the permission from the authors.
## Also Thanks to my GSoC mentor Dirk Eddelbuettel for helping me in the development.
##
## Constants
##
##.PostgreSQLRCS <- "$Id: PostgreSQL.R,v 0.1 2008/06/10 14:00:00$"
.PostgreSQLPkgName <- "RPostgreSQL"
.PostgreSQLVersion <- "0.1-0" ##package.description(.PostgreSQLPkgName, fields = "Version")
.PostgreSQL.NA.string <- "\\N" ## on input, PostgreSQL interprets \N as NULL (NA)
setOldClass("data.frame") ## to appease setMethod's signature warnings...
## ------------------------------------------------------------------
## Begin DBI extensions:
##
## dbBeginTransaction
##
setGeneric("dbBeginTransaction",
def = function(conn, ...)
standardGeneric("dbBeginTransaction"),
valueClass = "logical"
)
##
## End DBI extensions
## ------------------------------------------------------------------
##
## Class: DBIObject
##
setClass("PostgreSQLObject", representation("DBIObject", "dbObjectId", "VIRTUAL"))
##
## Class: dbDriver
##
PostgreSQL <- function(max.con=16, fetch.default.rec = 500, force.reload=FALSE) {
postgresqlInitDriver(max.con = max.con, fetch.default.rec = fetch.default.rec,
force.reload = force.reload)
}
##
## Class: DBIDriver
##
setClass("PostgreSQLDriver", representation("DBIDriver", "PostgreSQLObject"))
## coerce (extract) any PostgreSQLObject into a PostgreSQLDriver
setAs("PostgreSQLObject", "PostgreSQLDriver",
def = function(from) new("PostgreSQLDriver", Id = as(from, "integer")[1:2])
)
setMethod("dbUnloadDriver", "PostgreSQLDriver",
def = function(drv, ...) postgresqlCloseDriver(drv, ...),
valueClass = "logical"
)
setMethod("dbGetInfo", "PostgreSQLDriver",
def = function(dbObj, ...) postgresqlDriverInfo(dbObj, ...)
)
setMethod("dbListConnections", "PostgreSQLDriver",
def = function(drv, ...) dbGetInfo(drv, "connectionIds")[[1]]
)
setMethod("summary", "PostgreSQLDriver",
def = function(object, ...) postgresqlDescribeDriver(object, ...)
)
##
## Class: DBIConnection
##
setClass("PostgreSQLConnection", representation("DBIConnection", "PostgreSQLObject"))
setMethod("dbConnect", "PostgreSQLDriver",
def = function(drv, ...) postgresqlNewConnection(drv, ...),
valueClass = "PostgreSQLConnection"
)
setMethod("dbConnect", "character",
def = function(drv, ...) postgresqlNewConnection(dbDriver(drv), ...),
valueClass = "PostgreSQLConnection"
)
## clone a connection
setMethod("dbConnect", "PostgreSQLConnection",
def = function(drv, ...) postgresqlCloneConnection(drv, ...),
valueClass = "PostgreSQLConnection"
)
setMethod("dbDisconnect", "PostgreSQLConnection",
def = function(conn, ...) postgresqlCloseConnection(conn, ...),
valueClass = "logical"
)
setGeneric("dbEscapeStrings", def = function(conn, string, ...) standardGeneric("dbEscapeStrings"))
setMethod("dbEscapeStrings",
signature(conn="PostgreSQLConnection", string="character"),
def = function(conn, string, ...) postgresqlEscapeStrings(conn, string, ...),
valueClass = "character"
)
setMethod("dbSendQuery",
signature(conn = "PostgreSQLConnection", statement = "character"),
def = function(conn, statement,...) postgresqlExecStatement(conn, statement,...),
valueClass = "PostgreSQLResult"
)
setMethod("dbGetQuery",
signature(conn = "PostgreSQLConnection", statement = "character"),
def = function(conn, statement, ...) postgresqlQuickSQL(conn, statement, ...)
)
setMethod("dbGetException", "PostgreSQLConnection",
def = function(conn, ...){
if(!isIdCurrent(conn))
stop(paste("expired", class(conn)))
.Call("RS_PostgreSQL_getException", as(conn, "integer"),
PACKAGE = .PostgreSQLPkgName)
},
valueClass = "list"
)
setMethod("dbGetInfo", "PostgreSQLConnection",
def = function(dbObj, ...) postgresqlConnectionInfo(dbObj, ...)
)
setMethod("dbListResults", "PostgreSQLConnection",
def = function(conn, ...) dbGetInfo(conn, "rsId")[[1]]
)
setMethod("summary", "PostgreSQLConnection",
def = function(object, ...) postgresqlDescribeConnection(object, ...)
)
## convenience methods
setMethod("dbListTables", "PostgreSQLConnection",
def = function(conn, ...){
out <- dbGetQuery(conn,
paste("select tablename from pg_tables where schemaname !='information_schema'",
"and schemaname !='pg_catalog'", ...))
if (is.null(out) || nrow(out) == 0)
out <- character(0)
else
out <- out[, 1]
out
},
valueClass = "character"
)
setMethod("dbReadTable", signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...) postgresqlReadTable(conn, name, ...),
valueClass = "data.frame"
)
setMethod("dbWriteTable",
signature(conn="PostgreSQLConnection", name="character", value="data.frame"),
def = function(conn, name, value, ...){
postgresqlWriteTable(conn, name, value, ...)
},
valueClass = "logical"
)
## write table from filename (TODO: connections)
setMethod("dbWriteTable",
signature(conn="PostgreSQLConnection", name="character", value="character"),
def = function(conn, name, value, ...){
postgresqlImportFile(conn, name, value, ...)
},
valueClass = "logical"
)
setMethod("dbExistsTable",
signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...){
## Edd 09 Oct 2009: Fusion of patches by Joe Conway and Prasenjit Kapat
names <- strsplit(name, ".", fixed=TRUE)[[1]]
if (length(names) == 2) { # format was "public.sometable"
res <- dbGetQuery(conn,
paste("select schemaname,tablename from pg_tables where ",
"schemaname !='information_schema' ",
"and schemaname !='pg_catalog' and schemaname='",
names[1], "' and tablename='", postgresqlEscapeStrings(conn, names[2]), "'", sep=""))
} else {
res <- dbGetQuery(conn,
paste("select tablename from pg_tables where ",
"schemaname !='information_schema' and schemaname !='pg_catalog' ",
"and tablename='", postgresqlEscapeStrings(conn, names[1]), "'", sep=""))
}
return(as.logical(dim(res)[1]))
},
valueClass = "logical"
)
setMethod("dbRemoveTable",
signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...){
if(dbExistsTable(conn, name)){
rc <- try(dbGetQuery(conn, paste("DROP TABLE", postgresqlQuoteId(name))))
!inherits(rc, ErrorClass)
}
else FALSE
},
valueClass = "logical"
)
## return field names (no metadata)
setMethod("dbListFields",
signature(conn="PostgreSQLConnection", name="character"),
def = function(conn, name, ...){
flds <- dbGetQuery(conn, paste("SELECT a.attname FROM pg_class c,pg_attribute a,pg_type t WHERE c.relname = '",
name,"' and a.attnum > 0 and a.attrelid = c.oid and a.atttypid = t.oid",sep=""))[,1]
if(length(flds)==0)
flds <- character()
flds
},
valueClass = "character"
)
setMethod("dbCallProc", "PostgreSQLConnection",
def = function(conn, ...) .NotYetImplemented()
)
setMethod("dbCommit", "PostgreSQLConnection",
def = function(conn, ...) postgresqlTransactionStatement(conn, "COMMIT")
)
setMethod("dbRollback", "PostgreSQLConnection",
def = function(conn, ...) {
rsList <- dbListResults(conn)
if (length(rsList))
dbClearResult(rsList[[1]])
postgresqlTransactionStatement(conn, "ROLLBACK")
}
)
setMethod("dbBeginTransaction", "PostgreSQLConnection",
def = function(conn, ...) postgresqlTransactionStatement(conn, "BEGIN")
)
##
## Class: DBIResult
##
setClass("PostgreSQLResult", representation("DBIResult", "PostgreSQLObject"))
setAs("PostgreSQLResult", "PostgreSQLConnection",
def = function(from) new("PostgreSQLConnection", Id = as(from, "integer")[1:3])
)
setAs("PostgreSQLResult", "PostgreSQLDriver",
def = function(from) new("PostgreSQLDriver", Id = as(from, "integer")[1:2])
)
setMethod("dbClearResult", "PostgreSQLResult",
def = function(res, ...) postgresqlCloseResult(res, ...),
valueClass = "logical"
)
setMethod("fetch", signature(res="PostgreSQLResult", n="numeric"),
def = function(res, n, ...){
out <- postgresqlFetch(res, n, ...)
if(is.null(out))
out <- data.frame(out)
out
},
valueClass = "data.frame"
)
setMethod("fetch",
signature(res="PostgreSQLResult", n="missing"),
def = function(res, n, ...){
out <- postgresqlFetch(res, n=0, ...)
if(is.null(out))
out <- data.frame(out)
out
},
valueClass = "data.frame"
)
setMethod("dbGetInfo", "PostgreSQLResult",
def = function(dbObj, ...) postgresqlResultInfo(dbObj, ...),
valueClass = "list"
)
setMethod("dbGetStatement", "PostgreSQLResult",
def = function(res, ...){
st <- dbGetInfo(res, "statement")[[1]]
if(is.null(st))
st <- character()
st
},
valueClass = "character"
)
setMethod("dbListFields",
signature(conn="PostgreSQLResult", name="missing"),
def = function(conn, name, ...){
flds <- dbGetInfo(conn, "fields")$fields$name
if(is.null(flds))
flds <- character()
flds
},
valueClass = "character"
)
setMethod("dbColumnInfo", "PostgreSQLResult",
def = function(res, ...) postgresqlDescribeFields(res, ...),
valueClass = "data.frame"
)
setMethod("dbGetRowsAffected", "PostgreSQLResult",
def = function(res, ...) dbGetInfo(res, "rowsAffected")[[1]],
valueClass = "numeric"
)
setMethod("dbGetRowCount", "PostgreSQLResult",
def = function(res, ...) dbGetInfo(res, "rowCount")[[1]],
valueClass = "numeric"
)
setMethod("dbHasCompleted", "PostgreSQLResult",
def = function(res, ...) dbGetInfo(res, "completed")[[1]] == 1,
valueClass = "logical"
)
setMethod("dbGetException", "PostgreSQLResult",
def = function(conn, ...){
id <- as(conn, "integer")[1:2]
.Call("RS_PostgreSQL_getException", id, PACKAGE = .PostgreSQLPkgName)
},
valueClass = "list" ## TODO: should be a DBIException?
)
setMethod("summary", "PostgreSQLResult",
def = function(object, ...) postgresqlDescribeResult(object, ...)
)
setMethod("dbDataType",
signature(dbObj = "PostgreSQLObject", obj = "ANY"),
def = function(dbObj, obj, ...) postgresqlDataType(obj, ...),
valueClass = "character"
)
## MODIFIED : -- sameer
setMethod("make.db.names",
signature(dbObj="PostgreSQLObject", snames = "character"),
def = function(dbObj, snames,keywords,unique, allow.keywords,...){
make.db.names.default(snames, keywords = .PostgreSQLKeywords,unique, allow.keywords)
},
valueClass = "character"
)
setMethod("SQLKeywords", "PostgreSQLObject",
def = function(dbObj, ...) .PostgreSQLKeywords,
valueClass = "character"
)
setMethod("isSQLKeyword",
signature(dbObj="PostgreSQLObject", name="character"),
def = function(dbObj, name,keywords,case, ...){
isSQLKeyword.default(name, keywords = .PostgreSQLKeywords)
},
valueClass = "character"
)
## extension to the DBI 0.1-4
setGeneric("dbApply", def = function(res, ...) standardGeneric("dbApply"))
setMethod("dbApply", "PostgreSQLResult",
def = function(res, ...) postgresqlDBApply(res, ...),
)
|
plot3 <- function(){
## Only if the file household_power_consumption.txt isn't on the working directory
if(!file.exists("./household_power_consumption.txt")){
##downloading the zip file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","./data_file.zip",method="auto")
##unzip the file
unzip("./data_file.zip")
}
##Exctracting only 1/2/2007 and 2/2/2007
data <- read.csv.sql("./household_power_consumption.txt",sql = 'select * from file where Date = "1/2/2007" OR Date = "2/2/2007"', sep = ";")
##replacing ? by NA
data[data == "?"] <- NA
##creating a new combined variable date + time
data$datetime<-as.POSIXct(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S")
##call function png before plot to correct adjusting the plots and legends
png(file = "plot3.png",width=480,height=480,units="px")
##creating the plot
plot(data$datetime,data$Sub_metering_1,xlab="",ylab="Energy sub metering",type="n")
lines(data$datetime,data$Sub_metering_1,col="black")
lines(data$datetime,data$Sub_metering_2,col="red")
lines(data$datetime,data$Sub_metering_3,col="blue")
legend("topright", lwd =.75,cex=.75, col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.off()
} | /plot3.R | no_license | sergiofbertolin/ExData_Plotting1 | R | false | false | 1,338 | r | plot3 <- function(){
## Only if the file household_power_consumption.txt isn't on the working directory
if(!file.exists("./household_power_consumption.txt")){
##downloading the zip file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","./data_file.zip",method="auto")
##unzip the file
unzip("./data_file.zip")
}
##Exctracting only 1/2/2007 and 2/2/2007
data <- read.csv.sql("./household_power_consumption.txt",sql = 'select * from file where Date = "1/2/2007" OR Date = "2/2/2007"', sep = ";")
##replacing ? by NA
data[data == "?"] <- NA
##creating a new combined variable date + time
data$datetime<-as.POSIXct(paste(data$Date,data$Time), format="%d/%m/%Y %H:%M:%S")
##call function png before plot to correct adjusting the plots and legends
png(file = "plot3.png",width=480,height=480,units="px")
##creating the plot
plot(data$datetime,data$Sub_metering_1,xlab="",ylab="Energy sub metering",type="n")
lines(data$datetime,data$Sub_metering_1,col="black")
lines(data$datetime,data$Sub_metering_2,col="red")
lines(data$datetime,data$Sub_metering_3,col="blue")
legend("topright", lwd =.75,cex=.75, col = c("black","red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.off()
} |
# load libraries
library(shiny)
library(tidyverse)
library(tidytext)
library(glue)
library(plotly)
library(dplyr)
library(janitor)
library(shinyWidgets)
# creating user interface UI
ui<- fluidPage(
tags$div(class = "submit",
tags$a(href = "file:///C:/Users/noura/Documents/Data-Anaytics/content/post/2021-06-16-a05-exploratory-data-analysis/report.html",
"Link to the exploratory data analysis report",
target="_blank")
))
server <- function(input, output,session) {}
# Shiny App
shinyApp(ui = ui, server = server)
| /public/post/2021-06-16-a05-exploratory-data-analysis/A05.R | no_license | N0o0UR/Data-Anaytics | R | false | false | 579 | r |
# load libraries
library(shiny)
library(tidyverse)
library(tidytext)
library(glue)
library(plotly)
library(dplyr)
library(janitor)
library(shinyWidgets)
# creating user interface UI
ui<- fluidPage(
tags$div(class = "submit",
tags$a(href = "file:///C:/Users/noura/Documents/Data-Anaytics/content/post/2021-06-16-a05-exploratory-data-analysis/report.html",
"Link to the exploratory data analysis report",
target="_blank")
))
server <- function(input, output,session) {}
# Shiny App
shinyApp(ui = ui, server = server)
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Load packages
library(shiny)
library(shinythemes)
library(tidyverse)
library(parlitools)
# Load data
#elections_general <- read_csv("data/test1.csv")
#elections_local <- read_csv("data/test2.csv")
polls <- read_csv("Scheduled_polls.csv")
polls2 <- polls %>% select(-`Authority Type`,-`County Council Name`)
ui <- fluidPage(
# Sidebar with a slider input for number of bins
selectInput(inputId = "type", label = strong("Type of Election"),
choices = c("Parliamentary","Local"), #unique(trend_data$type),
selected = "Parliamentary"),
# Sidebar with a slider input for number of bins
selectInput(inputId = "countries", label = strong("Countries of Interest"),
choices = c("UK","GB","England","Scotland","Wales"), #unique(trend_data$type),
selected = "UK"),
# Sidebar with a slider input for number of bins
selectInput(inputId = "year", label = strong("Year Interest"),
choices = c("2018","2019","2020"), #unique(trend_data$type),
selected = "2020"),
# Select date range to be plotted
dateRangeInput("date", strong("Date range"), start = "2019-10-01", end = "2020-12-31",
min = "2019-10-01", max = "2020-12-31"),
tableOutput("polls"),
# Sidebar with a slider input for how to arrange data
selectInput(inputId = "sort", label = strong("Sort By"),
choices = c("Authority Name","Year","Poll"), #unique(trend_data$type),
selected = "Year")
)
server <- function(input, output) {
output$polls <- renderTable(
polls_small <- polls2 %>%
filter(Year==input$year) %>%
arrange(input$sort)
)
}
shinyApp(ui = ui, server = server)
#
# # Define UI for application that draws a histogram
# ui <- fluidPage(theme = shinytheme("lumen"),
# titlePanel("Elections Calendar"), # Application title
# sidebarLayout(
# sidebarPanel(
#
# # Sidebar with a slider input for number of bins
# selectInput(inputId = "type", label = strong("Type of Election"),
# choices = c("Parliamentary","Local"), #unique(trend_data$type),
# selected = "Parliamentary"),
#
# # Sidebar with a slider input for number of bins
# selectInput(inputId = "countries", label = strong("Countries of Interest"),
# choices = c("UK","GB","England","Scotland","Wales"), #unique(trend_data$type),
# selected = "UK"),
#
# # Select date range to be plotted
# dateRangeInput("date", strong("Date range"), start = "2019-10-01", end = "2020-12-31",
# min = "2019-10-01", max = "2020-12-31"),
#
# # Select whether to overlay smooth trend line
# checkboxInput(inputId = "smoother", label = strong("This is a future check box choice"), value = FALSE)
# #
# # # Show a plot of the generated distribution
# ),
#
# mainPanel(
# renderDataTable(iris, options = list(
# pageLength = 5,
# initComplete = I('function(setting, json) { alert("done"); }')
# )) )
# )
# )
#
#
# # Define server logic required to draw a histogram
# server <- function(input, output) {
# output$dto <- renderDataTable({polls})
# # output$distPlot <- renderPlot({
# # # generate bins based on input$bins from ui.R
# # x <- faithful[, 2]
# # bins <- seq(min(x), max(x), length.out = input$bins + 1)
# #
# # # draw the histogram with the specified number of bins
# # hist(x, breaks = bins, col = 'darkgray', border = 'white')
# }
#
# # Create Shiny object
# shinyApp(ui = ui, server = server)
#
#
# #
# # # Define UI
# # ui <- fluidPage(theme = shinytheme("lumen"),
# # titlePanel("Google Trend Index"),
# # sidebarLayout(
# # sidebarPanel(
# #
# # # Select type of trend to plot
# # selectInput(inputId = "type", label = strong("Trend index"),
# # choices = unique(trend_data$type),
# # selected = "Travel"),
# #
# # # Select date range to be plotted
# # dateRangeInput("date", strong("Date range"), start = "2007-01-01", end = "2017-07-31",
# # min = "2007-01-01", max = "2017-07-31"),
# #
# # # Select whether to overlay smooth trend line
# # checkboxInput(inputId = "smoother", label = strong("Overlay smooth trend line"), value = FALSE),
# #
# # # Display only if the smoother is checked
# # conditionalPanel(condition = "input.smoother == true",
# # sliderInput(inputId = "f", label = "Smoother span:",
# # min = 0.01, max = 1, value = 0.67, step = 0.01,
# # animate = animationOptions(interval = 100)),
# # HTML("Higher values give more smoothness.")
# # )
# # ),
# #
# # # Output: Description, lineplot, and reference
# # mainPanel(
# # plotOutput(outputId = "lineplot", height = "300px"),
# # textOutput(outputId = "desc"),
# # tags$a(href = "https://www.google.com/finance/domestic_trends", "Source: Google Domestic Trends", target = "_blank")
# # )
# # )
# # )
# #
# # # Define server function
# # server <- function(input, output) {
# #
# # # Subset data
# # selected_trends <- reactive({
# # req(input$date)
# # validate(need(!is.na(input$date[1]) & !is.na(input$date[2]), "Error: Please provide both a start and an end date."))
# # validate(need(input$date[1] < input$date[2], "Error: Start date should be earlier than end date."))
# # trend_data %>%
# # filter(
# # type == input$type,
# # date > as.POSIXct(input$date[1]) & date < as.POSIXct(input$date[2]
# # ))
# # })
# #
# #
# # # Create scatterplot object the plotOutput function is expecting
# # output$lineplot <- renderPlot({
# # color = "#434343"
# # par(mar = c(4, 4, 1, 1))
# # plot(x = selected_trends()$date, y = selected_trends()$close, type = "l",
# # xlab = "Date", ylab = "Trend index", col = color, fg = color, col.lab = color, col.axis = color)
# # # Display only if smoother is checked
# # if(input$smoother){
# # smooth_curve <- lowess(x = as.numeric(selected_trends()$date), y = selected_trends()$close, f = input$f)
# # lines(smooth_curve, col = "#E6553A", lwd = 3)
# # }
# # })
# #
# # # Pull in description of trend
# # output$desc <- renderText({
# # trend_text <- filter(trend_description, type == input$type) %>% pull(text)
# # paste(trend_text, "The index is set to 1.0 on January 1, 2004 and is calculated only for US search traffic.")
# # })
# # }
# #
# # # Create Shiny object
# # shinyApp(ui = ui, server = server) | /app.R | no_license | CharlotteOwen93/Election-Calendar | R | false | false | 8,352 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Load packages
library(shiny)
library(shinythemes)
library(tidyverse)
library(parlitools)
# Load data
#elections_general <- read_csv("data/test1.csv")
#elections_local <- read_csv("data/test2.csv")
polls <- read_csv("Scheduled_polls.csv")
polls2 <- polls %>% select(-`Authority Type`,-`County Council Name`)
ui <- fluidPage(
# Sidebar with a slider input for number of bins
selectInput(inputId = "type", label = strong("Type of Election"),
choices = c("Parliamentary","Local"), #unique(trend_data$type),
selected = "Parliamentary"),
# Sidebar with a slider input for number of bins
selectInput(inputId = "countries", label = strong("Countries of Interest"),
choices = c("UK","GB","England","Scotland","Wales"), #unique(trend_data$type),
selected = "UK"),
# Sidebar with a slider input for number of bins
selectInput(inputId = "year", label = strong("Year Interest"),
choices = c("2018","2019","2020"), #unique(trend_data$type),
selected = "2020"),
# Select date range to be plotted
dateRangeInput("date", strong("Date range"), start = "2019-10-01", end = "2020-12-31",
min = "2019-10-01", max = "2020-12-31"),
tableOutput("polls"),
# Sidebar with a slider input for how to arrange data
selectInput(inputId = "sort", label = strong("Sort By"),
choices = c("Authority Name","Year","Poll"), #unique(trend_data$type),
selected = "Year")
)
server <- function(input, output) {
output$polls <- renderTable(
polls_small <- polls2 %>%
filter(Year==input$year) %>%
arrange(input$sort)
)
}
shinyApp(ui = ui, server = server)
#
# # Define UI for application that draws a histogram
# ui <- fluidPage(theme = shinytheme("lumen"),
# titlePanel("Elections Calendar"), # Application title
# sidebarLayout(
# sidebarPanel(
#
# # Sidebar with a slider input for number of bins
# selectInput(inputId = "type", label = strong("Type of Election"),
# choices = c("Parliamentary","Local"), #unique(trend_data$type),
# selected = "Parliamentary"),
#
# # Sidebar with a slider input for number of bins
# selectInput(inputId = "countries", label = strong("Countries of Interest"),
# choices = c("UK","GB","England","Scotland","Wales"), #unique(trend_data$type),
# selected = "UK"),
#
# # Select date range to be plotted
# dateRangeInput("date", strong("Date range"), start = "2019-10-01", end = "2020-12-31",
# min = "2019-10-01", max = "2020-12-31"),
#
# # Select whether to overlay smooth trend line
# checkboxInput(inputId = "smoother", label = strong("This is a future check box choice"), value = FALSE)
# #
# # # Show a plot of the generated distribution
# ),
#
# mainPanel(
# renderDataTable(iris, options = list(
# pageLength = 5,
# initComplete = I('function(setting, json) { alert("done"); }')
# )) )
# )
# )
#
#
# # Define server logic required to draw a histogram
# server <- function(input, output) {
# output$dto <- renderDataTable({polls})
# # output$distPlot <- renderPlot({
# # # generate bins based on input$bins from ui.R
# # x <- faithful[, 2]
# # bins <- seq(min(x), max(x), length.out = input$bins + 1)
# #
# # # draw the histogram with the specified number of bins
# # hist(x, breaks = bins, col = 'darkgray', border = 'white')
# }
#
# # Create Shiny object
# shinyApp(ui = ui, server = server)
#
#
# #
# # # Define UI
# # ui <- fluidPage(theme = shinytheme("lumen"),
# # titlePanel("Google Trend Index"),
# # sidebarLayout(
# # sidebarPanel(
# #
# # # Select type of trend to plot
# # selectInput(inputId = "type", label = strong("Trend index"),
# # choices = unique(trend_data$type),
# # selected = "Travel"),
# #
# # # Select date range to be plotted
# # dateRangeInput("date", strong("Date range"), start = "2007-01-01", end = "2017-07-31",
# # min = "2007-01-01", max = "2017-07-31"),
# #
# # # Select whether to overlay smooth trend line
# # checkboxInput(inputId = "smoother", label = strong("Overlay smooth trend line"), value = FALSE),
# #
# # # Display only if the smoother is checked
# # conditionalPanel(condition = "input.smoother == true",
# # sliderInput(inputId = "f", label = "Smoother span:",
# # min = 0.01, max = 1, value = 0.67, step = 0.01,
# # animate = animationOptions(interval = 100)),
# # HTML("Higher values give more smoothness.")
# # )
# # ),
# #
# # # Output: Description, lineplot, and reference
# # mainPanel(
# # plotOutput(outputId = "lineplot", height = "300px"),
# # textOutput(outputId = "desc"),
# # tags$a(href = "https://www.google.com/finance/domestic_trends", "Source: Google Domestic Trends", target = "_blank")
# # )
# # )
# # )
# #
# # # Define server function
# # server <- function(input, output) {
# #
# # # Subset data
# # selected_trends <- reactive({
# # req(input$date)
# # validate(need(!is.na(input$date[1]) & !is.na(input$date[2]), "Error: Please provide both a start and an end date."))
# # validate(need(input$date[1] < input$date[2], "Error: Start date should be earlier than end date."))
# # trend_data %>%
# # filter(
# # type == input$type,
# # date > as.POSIXct(input$date[1]) & date < as.POSIXct(input$date[2]
# # ))
# # })
# #
# #
# # # Create scatterplot object the plotOutput function is expecting
# # output$lineplot <- renderPlot({
# # color = "#434343"
# # par(mar = c(4, 4, 1, 1))
# # plot(x = selected_trends()$date, y = selected_trends()$close, type = "l",
# # xlab = "Date", ylab = "Trend index", col = color, fg = color, col.lab = color, col.axis = color)
# # # Display only if smoother is checked
# # if(input$smoother){
# # smooth_curve <- lowess(x = as.numeric(selected_trends()$date), y = selected_trends()$close, f = input$f)
# # lines(smooth_curve, col = "#E6553A", lwd = 3)
# # }
# # })
# #
# # # Pull in description of trend
# # output$desc <- renderText({
# # trend_text <- filter(trend_description, type == input$type) %>% pull(text)
# # paste(trend_text, "The index is set to 1.0 on January 1, 2004 and is calculated only for US search traffic.")
# # })
# # }
# #
# # # Create Shiny object
# # shinyApp(ui = ui, server = server) |
library(rethinking)
##6.1
sppnames <- c("afarensis", "africanus", "habilis", "boisei", "rudolfensis", "ergaster", "sapiens")
brainvolcc <- c(438, 452, 612, 521, 752, 871, 1350)
masskg <- c(37.0, 35.5, 34.5, 41.5, 55.5, 61.0, 53.5)
d <- data.frame(species = sppnames, brain = brainvolcc, mass = masskg)
##6.2
m6.1 <- lm(brain ~ mass, data = d)
##6.3
1 - var(resid(m6.1))/var(d$brain)
#6.4
m6.2 <- lm(brain ~ mass + I(mass^2), data = d)
##6.5
m6.3 <- lm(brain ~ mass + I(mass^2) + I(mass^3), data = d)
m6.4 <- lm(brain ~ mass + I(mass^2) + I(mass^3) + I(mass^4), data = d)
m6.5 <- lm(brain ~ mass + I(mass^2) + I(mass^3) + I(mass^4) + I(mass^5), data = d)
m6.6 <- lm(brain ~ mass + I(mass^2) + I(mass^3) + I(mass^4) + I(mass^5) + I(mass^6), data = d)
#6.6
m6.7 <- lm(brain ~ 1, data = d)
##6.8
plot(brain~mass, d, col = "slateblue")
for(i in 1:nrow(d)){
d.new <- d[-i,]
m0 <- lm(brain ~ mass, d.new)
abline(m0, col = col.alpha("black", 0.5))
}
#6.10
m6.1 <- lm(brain ~ mass, d)
(-2) * logLik(m6.1)
#6.11
d$mass.s <- (d$mass - mean(d$mass))/sd(d$mass)
m6.8 <- map(
alist(
brain ~ dnorm(mu, sigma),
mu <- a + b * mass.s
), data = d,
start = list(a = mean(d$brain), b=0, sigma = sd(d$brain)),
method = "Nelder-Mead"
)
theta <- coef(m6.8)
dev <- (-2) * sum( dnorm(
d$brain,
mean = theta[1] + theta[2] * d$mass.s,
sd = theta[3],
log = T
))
dev
##6.12
N <- 20
kseq <- 1:5
dev <- sapply(kseq, function(k){
print(k);
r <- replicate(1e4, sim.train.test(N = N, k = k));
c(mean(r[1,]), mean(r[2,]), sd(r[1,]), sd(r[2,]) );
})
##6.14
plot(1:5, dev[1,], ylim = c(min(dev[1:2,])-5, max(dev[1:2,]) + 10),
xlim = c(1,5.1), xlab = "number of parameters", ylab = "deviance",
pch=16, col = rangi2)
mtext(concat( "N =", N))
points( (1:5) + 0.1, dev[2,])
for(i in kseq){
pts_in <- dev[1,i] + c(-1,+1) * dev[3,i]
pts_out <- dev[2,i] + c(-1, +1) * dev[4,i]
lines(c(i,i), pts_in, col = rangi2)
lines(c(i,i) +.1, pts_out)
}
##6.15
data(cars)
m <- map(
alist(
dist ~ dnorm(mu, sigma),
mu <- a + b * speed,
a ~ dnorm(0,100),
b ~ dunif(0,10),
sigma ~ dunif(0,30)
), data = cars
)
post <- extract.samples(m, n = 1000)
##6.16
n_samples <- 1000
ll <- sapply(1:n_samples,
function(s){
mu <- post$a[s] + post$b[s] * cars$speed
dnorm(cars$dist, mu, post$sigma, log = T)
})
##6.17
n_cases <- nrow(cars)
lppd <- sapply(1:n_cases, function(i) log_sum_exp(ll[i,]) - log(n_samples))
##6.18
pWAIC <- sapply(1:n_cases, function(i) var(ll[i,]))
##6.19
-2 * (sum(lppd) - sum(pWAIC))
##6.20
waic_vec <- -2 * (lppd - pWAIC)
sqrt(n_cases * var(waic_vec))
##6.21
data(milk)
d <- milk[complete.cases(milk),]
d$neocortex <- d$neocortex.perc/100
dim(d)
##6.22
a.start <- mean(d$kcal.per.g)
sigma.start <- log(sd(d$kcal.per.g))
m6.11 <- map(
alist(
kcal.per.g ~ dnorm(a, exp(log.sigma))
), data=d, start = list(a = a.start, log.sigma = sigma.start)
)
m6.12 <- map(
alist(
kcal.per.g ~ dnorm(mu, exp(log.sigma)),
mu <- a + bn * neocortex
), data=d, start = list(a = a.start, bn = 0, log.sigma = sigma.start)
)
m6.13 <- map(
alist(
kcal.per.g ~ dnorm(mu, exp(log.sigma)),
mu <- a + bm * log(mass)
), data=d, start = list(a = a.start, bm = 0, log.sigma = sigma.start)
)
m6.14 <- map(
alist(
kcal.per.g ~ dnorm(mu, exp(log.sigma)),
mu <- a + bn * neocortex + bm*log(mass)
), data=d, start = list(a = a.start,bn = 0, bm = 0, log.sigma = sigma.start)
)
##6.23
WAIC(m6.14)
##6.24
(milk.models <- compare(m6.11, m6.12, m6.13, m6.14))
##6.25
plot(milk.models, SE = TRUE, dSE = TRUE)
##6.26
diff <- rnorm(1e5, 6.7, 7.26)
sum(diff<0)/1e5
##6.27
coeftab(m6.11,m6.12,m6.13,m6.14)
###6.28
plot(coeftab(m6.11, m6.12, m6.13, m6.14))
##6.29
nc.seq <- seq(from = 0.5, to = 0.8, length.out = 30)
d.predict <- list(
kcal.per.g = rep(0,30),
neocortex = nc.seq,
mass = rep(4.5, 30)
)
pred.m6.14 <- link(m6.14, data = d.predict)
mu <- apply(pred.m6.14,2,mean)
mu.PI <- apply(pred.m6.14,2,PI)
plot(kcal.per.g ~ neocortex, d, col = rangi2)
lines(nc.seq, mu, lty = 2)
lines(nc.seq, mu.PI[1,], lty = 2)
lines(nc.seq,mu.PI[2,], lty = 2)
##6.30
milk.ensemble <- ensemble(m6.11, m6.12, m6.13, m6.14, data = d.predict)
mu <- apply(milk.ensemble$link, 2, mean)
mu.PI <- apply(milk.ensemble$link,2,PI)
lines(nc.seq,mu)
shade(mu.PI, nc.seq)
| /Chapter 6/CH6.R | no_license | grassdeerdeer/Statistical-Rethinking | R | false | false | 4,418 | r | library(rethinking)
##6.1
sppnames <- c("afarensis", "africanus", "habilis", "boisei", "rudolfensis", "ergaster", "sapiens")
brainvolcc <- c(438, 452, 612, 521, 752, 871, 1350)
masskg <- c(37.0, 35.5, 34.5, 41.5, 55.5, 61.0, 53.5)
d <- data.frame(species = sppnames, brain = brainvolcc, mass = masskg)
##6.2
m6.1 <- lm(brain ~ mass, data = d)
##6.3
1 - var(resid(m6.1))/var(d$brain)
#6.4
m6.2 <- lm(brain ~ mass + I(mass^2), data = d)
##6.5
m6.3 <- lm(brain ~ mass + I(mass^2) + I(mass^3), data = d)
m6.4 <- lm(brain ~ mass + I(mass^2) + I(mass^3) + I(mass^4), data = d)
m6.5 <- lm(brain ~ mass + I(mass^2) + I(mass^3) + I(mass^4) + I(mass^5), data = d)
m6.6 <- lm(brain ~ mass + I(mass^2) + I(mass^3) + I(mass^4) + I(mass^5) + I(mass^6), data = d)
#6.6
m6.7 <- lm(brain ~ 1, data = d)
##6.8
plot(brain~mass, d, col = "slateblue")
for(i in 1:nrow(d)){
d.new <- d[-i,]
m0 <- lm(brain ~ mass, d.new)
abline(m0, col = col.alpha("black", 0.5))
}
#6.10
m6.1 <- lm(brain ~ mass, d)
(-2) * logLik(m6.1)
#6.11
d$mass.s <- (d$mass - mean(d$mass))/sd(d$mass)
m6.8 <- map(
alist(
brain ~ dnorm(mu, sigma),
mu <- a + b * mass.s
), data = d,
start = list(a = mean(d$brain), b=0, sigma = sd(d$brain)),
method = "Nelder-Mead"
)
theta <- coef(m6.8)
dev <- (-2) * sum( dnorm(
d$brain,
mean = theta[1] + theta[2] * d$mass.s,
sd = theta[3],
log = T
))
dev
##6.12
N <- 20
kseq <- 1:5
dev <- sapply(kseq, function(k){
print(k);
r <- replicate(1e4, sim.train.test(N = N, k = k));
c(mean(r[1,]), mean(r[2,]), sd(r[1,]), sd(r[2,]) );
})
##6.14
plot(1:5, dev[1,], ylim = c(min(dev[1:2,])-5, max(dev[1:2,]) + 10),
xlim = c(1,5.1), xlab = "number of parameters", ylab = "deviance",
pch=16, col = rangi2)
mtext(concat( "N =", N))
points( (1:5) + 0.1, dev[2,])
for(i in kseq){
pts_in <- dev[1,i] + c(-1,+1) * dev[3,i]
pts_out <- dev[2,i] + c(-1, +1) * dev[4,i]
lines(c(i,i), pts_in, col = rangi2)
lines(c(i,i) +.1, pts_out)
}
##6.15
data(cars)
m <- map(
alist(
dist ~ dnorm(mu, sigma),
mu <- a + b * speed,
a ~ dnorm(0,100),
b ~ dunif(0,10),
sigma ~ dunif(0,30)
), data = cars
)
post <- extract.samples(m, n = 1000)
##6.16
n_samples <- 1000
ll <- sapply(1:n_samples,
function(s){
mu <- post$a[s] + post$b[s] * cars$speed
dnorm(cars$dist, mu, post$sigma, log = T)
})
##6.17
n_cases <- nrow(cars)
lppd <- sapply(1:n_cases, function(i) log_sum_exp(ll[i,]) - log(n_samples))
##6.18
pWAIC <- sapply(1:n_cases, function(i) var(ll[i,]))
##6.19
-2 * (sum(lppd) - sum(pWAIC))
##6.20
waic_vec <- -2 * (lppd - pWAIC)
sqrt(n_cases * var(waic_vec))
##6.21
data(milk)
d <- milk[complete.cases(milk),]
d$neocortex <- d$neocortex.perc/100
dim(d)
##6.22
a.start <- mean(d$kcal.per.g)
sigma.start <- log(sd(d$kcal.per.g))
m6.11 <- map(
alist(
kcal.per.g ~ dnorm(a, exp(log.sigma))
), data=d, start = list(a = a.start, log.sigma = sigma.start)
)
m6.12 <- map(
alist(
kcal.per.g ~ dnorm(mu, exp(log.sigma)),
mu <- a + bn * neocortex
), data=d, start = list(a = a.start, bn = 0, log.sigma = sigma.start)
)
m6.13 <- map(
alist(
kcal.per.g ~ dnorm(mu, exp(log.sigma)),
mu <- a + bm * log(mass)
), data=d, start = list(a = a.start, bm = 0, log.sigma = sigma.start)
)
m6.14 <- map(
alist(
kcal.per.g ~ dnorm(mu, exp(log.sigma)),
mu <- a + bn * neocortex + bm*log(mass)
), data=d, start = list(a = a.start,bn = 0, bm = 0, log.sigma = sigma.start)
)
##6.23
WAIC(m6.14)
##6.24
(milk.models <- compare(m6.11, m6.12, m6.13, m6.14))
##6.25
plot(milk.models, SE = TRUE, dSE = TRUE)
##6.26
diff <- rnorm(1e5, 6.7, 7.26)
sum(diff<0)/1e5
##6.27
coeftab(m6.11,m6.12,m6.13,m6.14)
###6.28
plot(coeftab(m6.11, m6.12, m6.13, m6.14))
##6.29
nc.seq <- seq(from = 0.5, to = 0.8, length.out = 30)
d.predict <- list(
kcal.per.g = rep(0,30),
neocortex = nc.seq,
mass = rep(4.5, 30)
)
pred.m6.14 <- link(m6.14, data = d.predict)
mu <- apply(pred.m6.14,2,mean)
mu.PI <- apply(pred.m6.14,2,PI)
plot(kcal.per.g ~ neocortex, d, col = rangi2)
lines(nc.seq, mu, lty = 2)
lines(nc.seq, mu.PI[1,], lty = 2)
lines(nc.seq,mu.PI[2,], lty = 2)
##6.30
milk.ensemble <- ensemble(m6.11, m6.12, m6.13, m6.14, data = d.predict)
mu <- apply(milk.ensemble$link, 2, mean)
mu.PI <- apply(milk.ensemble$link,2,PI)
lines(nc.seq,mu)
shade(mu.PI, nc.seq)
|
setwd("C:/Users/Benjamin/Desktop/Projects/dchr_salary/")
source(paste0(getwd(),"/code/",dir(paste0(getwd(),"/code"))[1]))
pdf <- dir(paste0(getwd(),"/pdf"))[2]
link <- paste0(getwd(),"/pdf/",pdf)
salary <- foreach(i=1:get_n_pages(link),.combine='rbind') %dopar% tabulizer::extract_tables(link,pages=i,method="data.frame",header=FALSE)
tmp <- c()
for(a in 1:length(salary)){
if(length(salary[[a]]) != 8) {
for(b in outersect(names(salary[[a]]),c("V1","V2","V3","V4","V5","V6","V7","V8"))) {
tmp <- c(tmp,a)
salary[[a]][paste(b)] <- NA
}
}
}
for(d in tmp){
names(salary[[d]]) <- c("V1","V2","V3","V4","V5","V7","V8","V6")
}
salary <- do.call(rbind,salary)
names(salary) <- c("Type_of_Appointment","Agency_Name","Last_Name","First_Name","Position_Title","Annual_Rate","Hire_Date","V6")
salary %>%
filter(Type_of_Appointment!="Type Appt ") %>%
mutate(V6=NULL,
Export_Date=disclose_date(2015,3),
Hire_Date=as.Date(Hire_Date,"%m/%d/%Y"),
Type_of_Appointment=gsub("[^\\x{00}-\\x{7f}]","-",Type_of_Appointment,perl=TRUE),
Type_of_Appointment=gsub("---","-",Type_of_Appointment),
Last_Name=gsub("[^\\x{00}-\\x{7f}]","-",Last_Name,perl=TRUE),
Last_Name=gsub("---","-",Last_Name),
First_Name=gsub("[^\\x{00}-\\x{7f}]","-",First_Name,perl=TRUE),
First_Name=gsub("---","-",First_Name),
Position_Title=gsub("[^\\x{00}-\\x{7f}]","-",Position_Title,perl=TRUE),
Position_Title=gsub("---","-",Position_Title),
Annual_Rate=gsub("\\$","",Annual_Rate),
Annual_Rate=as.numeric(gsub(",","",Annual_Rate))) %>%
distinct %>%
write.csv(.,"csv/2015_09.csv",row.names=F) | /code/02 2015_09.R | no_license | benjaminrobinson/dchr_salary | R | false | false | 1,741 | r | setwd("C:/Users/Benjamin/Desktop/Projects/dchr_salary/")
source(paste0(getwd(),"/code/",dir(paste0(getwd(),"/code"))[1]))
pdf <- dir(paste0(getwd(),"/pdf"))[2]
link <- paste0(getwd(),"/pdf/",pdf)
salary <- foreach(i=1:get_n_pages(link),.combine='rbind') %dopar% tabulizer::extract_tables(link,pages=i,method="data.frame",header=FALSE)
tmp <- c()
for(a in 1:length(salary)){
if(length(salary[[a]]) != 8) {
for(b in outersect(names(salary[[a]]),c("V1","V2","V3","V4","V5","V6","V7","V8"))) {
tmp <- c(tmp,a)
salary[[a]][paste(b)] <- NA
}
}
}
for(d in tmp){
names(salary[[d]]) <- c("V1","V2","V3","V4","V5","V7","V8","V6")
}
salary <- do.call(rbind,salary)
names(salary) <- c("Type_of_Appointment","Agency_Name","Last_Name","First_Name","Position_Title","Annual_Rate","Hire_Date","V6")
salary %>%
filter(Type_of_Appointment!="Type Appt ") %>%
mutate(V6=NULL,
Export_Date=disclose_date(2015,3),
Hire_Date=as.Date(Hire_Date,"%m/%d/%Y"),
Type_of_Appointment=gsub("[^\\x{00}-\\x{7f}]","-",Type_of_Appointment,perl=TRUE),
Type_of_Appointment=gsub("---","-",Type_of_Appointment),
Last_Name=gsub("[^\\x{00}-\\x{7f}]","-",Last_Name,perl=TRUE),
Last_Name=gsub("---","-",Last_Name),
First_Name=gsub("[^\\x{00}-\\x{7f}]","-",First_Name,perl=TRUE),
First_Name=gsub("---","-",First_Name),
Position_Title=gsub("[^\\x{00}-\\x{7f}]","-",Position_Title,perl=TRUE),
Position_Title=gsub("---","-",Position_Title),
Annual_Rate=gsub("\\$","",Annual_Rate),
Annual_Rate=as.numeric(gsub(",","",Annual_Rate))) %>%
distinct %>%
write.csv(.,"csv/2015_09.csv",row.names=F) |
## File Name: frm_append_list.R
## File Version: 0.13
frm_append_list <- function(list1, list2, overwrite=TRUE)
{
list2_names <- names(list2)
if ( ! overwrite ){
list1_names <- names(list1)
list2_names <- setdiff(list2_names, list1_names)
}
N2 <- length(list2_names)
if (N2>0){
for (nn in 1:N2){
list1[[ list2_names[nn] ]] <- list2[[ list2_names[nn] ]]
}
}
return(list1)
}
| /R/frm_append_list.R | no_license | alexanderrobitzsch/mdmb | R | false | false | 446 | r | ## File Name: frm_append_list.R
## File Version: 0.13
frm_append_list <- function(list1, list2, overwrite=TRUE)
{
list2_names <- names(list2)
if ( ! overwrite ){
list1_names <- names(list1)
list2_names <- setdiff(list2_names, list1_names)
}
N2 <- length(list2_names)
if (N2>0){
for (nn in 1:N2){
list1[[ list2_names[nn] ]] <- list2[[ list2_names[nn] ]]
}
}
return(list1)
}
|
##Gathers data and computes Share Price for a stuck, outputs a plot
SPvis<- function(Ticker,months=6){
library(ggplot2)
library(quantmod)
library(ggvis)
library(dplyr)
library(grid)
library(TTR)
library(lubridate)
library(scales)
histdata <- getSymbols.yahoo(paste(Ticker,".AX",sep = ""),env = .GlobalEnv,return.class = "data.frame",auto.assign=FALSE)
histdata[,7] <- row.names(histdata)
histdata$V7 <- as.POSIXlt(histdata$V7,format = "%Y-%m-%d")
histdata$V7 <- as.Date(histdata$V7)
rownames(histdata) <- NULL
histdata <- histdata[,c(7,4)]
colnames(histdata) <- c("Date","SharePrice")
histdata$Date <- as.Date(histdata$Date)
data <- histdata
rownames(data) <- NULL
data$MA50 <- SMA(x = data$SharePrice,n=50)
data$EMA50 <- EMA(x = data$SharePrice,n = 50)
data <- data[data$Date > Sys.Date() - months(months),]
data <- na.exclude(data)
data$min <- min(data$SharePrice)*0.8
##Share Price Chart
b <- data %>%
ggvis(x = ~Date,y = ~SharePrice) %>%
layer_lines(stroke := "darkorange", strokeWidth := 1) %>%
layer_lines(x = ~Date,y = ~MA50,stroke := "red",strokeWidth := 1,)%>%
layer_lines(x = ~Date,y = ~EMA50,stroke := "black",strokeWidth := 1)%>%
layer_ribbons(x = ~Date, y = ~SharePrice, fill := "orange",fillOpacity := 0.5, y2 = 0) %>%
scale_datetime("x",expand = c(0,0)) %>%
add_axis("y",scale = 0:0.6)
#layer_ribbons(aes(x = Date,ymin=min, ymax=SharePrice), fill="orange",alpha=0.5)+
#geom_line(aes(x = Date,y = SharePrice),color = "darkorange",lwd=0.5) +
#geom_line(aes(x = Date,y = MA50,color="MA50"),lwd=0.5) +
#geom_line(aes(x = Date,y = EMA50,color = "EMA50"),lwd=0.5) +
#scale_color_manual(values=c("MA50" = "red","EMA50"="black"))+
#theme(legend.title=element_blank(),
#plot.title = element_text(lineheight=1,face="bold",vjust = 0.25,hjust = 0.0),
#legend.justification=c(0,0),
# legend.position=c(0,0),
# legend.background = element_rect(colour = 'lightgrey', fill = 'lightgrey'),
# plot.margin=unit(c(0,10,1,3),"mm"))+
# scale_x_date(expand=c(0,0)) +
#scale_y_continuous(labels = dollar_format(largest_with_cents = 5),
# limits = c(min(data$SharePrice)*0.8,max(data$SharePrice)*1.1),expand = c(0,0))+
#labs(title = paste(Ticker," Share Price",sep = ""),x = NULL,y = NULL)+
#geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = SharePrice),hjust=1, vjust=0,size=4,colour = "darkgreen") +
#geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = Date),hjust=1, vjust=-1.5,size=4,colour = "darkgreen")
b
} | /ggvisPlots.R | no_license | highandhigh/nwGitTest | R | false | false | 3,195 | r | ##Gathers data and computes Share Price for a stuck, outputs a plot
SPvis<- function(Ticker,months=6){
library(ggplot2)
library(quantmod)
library(ggvis)
library(dplyr)
library(grid)
library(TTR)
library(lubridate)
library(scales)
histdata <- getSymbols.yahoo(paste(Ticker,".AX",sep = ""),env = .GlobalEnv,return.class = "data.frame",auto.assign=FALSE)
histdata[,7] <- row.names(histdata)
histdata$V7 <- as.POSIXlt(histdata$V7,format = "%Y-%m-%d")
histdata$V7 <- as.Date(histdata$V7)
rownames(histdata) <- NULL
histdata <- histdata[,c(7,4)]
colnames(histdata) <- c("Date","SharePrice")
histdata$Date <- as.Date(histdata$Date)
data <- histdata
rownames(data) <- NULL
data$MA50 <- SMA(x = data$SharePrice,n=50)
data$EMA50 <- EMA(x = data$SharePrice,n = 50)
data <- data[data$Date > Sys.Date() - months(months),]
data <- na.exclude(data)
data$min <- min(data$SharePrice)*0.8
##Share Price Chart
b <- data %>%
ggvis(x = ~Date,y = ~SharePrice) %>%
layer_lines(stroke := "darkorange", strokeWidth := 1) %>%
layer_lines(x = ~Date,y = ~MA50,stroke := "red",strokeWidth := 1,)%>%
layer_lines(x = ~Date,y = ~EMA50,stroke := "black",strokeWidth := 1)%>%
layer_ribbons(x = ~Date, y = ~SharePrice, fill := "orange",fillOpacity := 0.5, y2 = 0) %>%
scale_datetime("x",expand = c(0,0)) %>%
add_axis("y",scale = 0:0.6)
#layer_ribbons(aes(x = Date,ymin=min, ymax=SharePrice), fill="orange",alpha=0.5)+
#geom_line(aes(x = Date,y = SharePrice),color = "darkorange",lwd=0.5) +
#geom_line(aes(x = Date,y = MA50,color="MA50"),lwd=0.5) +
#geom_line(aes(x = Date,y = EMA50,color = "EMA50"),lwd=0.5) +
#scale_color_manual(values=c("MA50" = "red","EMA50"="black"))+
#theme(legend.title=element_blank(),
#plot.title = element_text(lineheight=1,face="bold",vjust = 0.25,hjust = 0.0),
#legend.justification=c(0,0),
# legend.position=c(0,0),
# legend.background = element_rect(colour = 'lightgrey', fill = 'lightgrey'),
# plot.margin=unit(c(0,10,1,3),"mm"))+
# scale_x_date(expand=c(0,0)) +
#scale_y_continuous(labels = dollar_format(largest_with_cents = 5),
# limits = c(min(data$SharePrice)*0.8,max(data$SharePrice)*1.1),expand = c(0,0))+
#labs(title = paste(Ticker," Share Price",sep = ""),x = NULL,y = NULL)+
#geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = SharePrice),hjust=1, vjust=0,size=4,colour = "darkgreen") +
#geom_text(data = subset(data[nrow(data),]),aes(x = Date,y = SharePrice, label = Date),hjust=1, vjust=-1.5,size=4,colour = "darkgreen")
b
} |
\name{recluster}
\alias{recluster}
\title{Re-clustering micro-clusters}
\description{Use a macro clustering algorithm to recluster micro-clusters into a final clustering.
}
\usage{
recluster(macro, dsc, type="auto", ...)
}
\arguments{
\item{macro}{a macro clustering algorithm (class "DSC_Macro")}
\item{dsc}{a DSC object containing micro-clusters. }
\item{type}{controls which clustering is used from \code{dsc}
(typically micro-clusters).}
\item{...}{additional arguments passed on.}
}
\details{
Takes centers and weights of the micro-clusters and applies the macro clustering algorithm.
}
\value{
The object macro is altered and contains the clustering.
}
%\seealso{
%}
\examples{
set.seed(0)
### create a data stream and a micro-clustering
stream <- DSD_Gaussians(k=3, d=3)
sample <- DSC_Sample(k=50)
update(sample, stream, 500)
sample
### recluster using k-means
kmeans <- DSC_Kmeans(k=3)
recluster(kmeans, sample)
### plot clustering
plot(kmeans, stream, main="Macro-clusters (Sampling + k-means)")
}
| /man/recluster.Rd | no_license | grobins/stream | R | false | false | 1,033 | rd | \name{recluster}
\alias{recluster}
\title{Re-clustering micro-clusters}
\description{Use a macro clustering algorithm to recluster micro-clusters into a final clustering.
}
\usage{
recluster(macro, dsc, type="auto", ...)
}
\arguments{
\item{macro}{a macro clustering algorithm (class "DSC_Macro")}
\item{dsc}{a DSC object containing micro-clusters. }
\item{type}{controls which clustering is used from \code{dsc}
(typically micro-clusters).}
\item{...}{additional arguments passed on.}
}
\details{
Takes centers and weights of the micro-clusters and applies the macro clustering algorithm.
}
\value{
The object macro is altered and contains the clustering.
}
%\seealso{
%}
\examples{
set.seed(0)
### create a data stream and a micro-clustering
stream <- DSD_Gaussians(k=3, d=3)
sample <- DSC_Sample(k=50)
update(sample, stream, 500)
sample
### recluster using k-means
kmeans <- DSC_Kmeans(k=3)
recluster(kmeans, sample)
### plot clustering
plot(kmeans, stream, main="Macro-clusters (Sampling + k-means)")
}
|
# Function to calculate hidden layer from data
#
# @keyword internal
#
# Function for calculating hidden layer:
VisToHid <- function(vis, weights, y, y.weights) {
# Function for calculating a hidden layer.
#
# Args:
# vis: Visual layer, or hidden layer from previous layer in DBN
# weights: Trained weights including the bias terms (use RBM)
# y: Label vector if only when training an RBM for classification
# y.weights: Label weights and bias matrix, only neccessary when training a RBM for classification
#
# Returns:
# Returns a hidden layer calculated with the trained RBM weights and bias terms.
#
# Initialize the visual, or i-1 layer
V0 <- vis
if ( is.null(dim(V0))) {
# If visual is a vector create matrix
V0 <- matrix(V0, nrow= length(V0))
}
if(missing(y) & missing(y.weights)) {
# Calculate the hidden layer with the trained weights and bias
H <- 1/(1 + exp(-( V0 %*% weights)))
} else {
Y0 <- y
H <- 1/(1 + exp(- ( V0 %*% weights + Y0 %*% y.weights)))
}
return(H)
}
# Function for reconstructing data from a hidden layer
#
# @keyword internal
# Function for reconstructing visible layer:
HidToVis <- function(inv, weights, y.weights) {
# Function for reconstructing a visible layer.
#
# Args:
# inv: Invisible layer
# vis.bias: Trained visible layer bias (use RBM)
# weights: Trained weights (use RBM)
# y.weights: Label weights, only nessecessary when training a classification RBM.
#
# Returns:
# Returns a vector with reconstructed visible layer or reconstructed labels.
#
if(missing(y.weights)) {
# Reconstruct only the visible layer when y.weights is missing
V <- 1/(1 + exp(-( inv %*% t(weights)) ))
return(V)
} else {
# Reconstruct visible and labels if y.weights
Y <- 1/(1 + exp(-( inv %*% t(y.weights))))
return(Y)
}
}
# Logistic function
#
# @keyword internal
# Logistic function
logistic <- function(x) {
1/(1+exp(-x))
}
# Function to calculate the energy of a RBM
#
# @keyword internal
# Function for calculating the energy of the machine:
Energy <- Energy <- function(vis, inv, weights, y, y.weights) {
# Function for calculating the energy of a trained RBM
#
# Args:
# vis: visible layer
# weights: the weights matrix including the bias terms
# inv: invisible layer
# y: label vector (binary)
# y.weights: trained label weights (use RBM), including bias terms
#
# Returns:
# The energy of the RBM machine for label y
#
# Calculate the energy if supervised
if(!missing(y) & !missing(y.weights)){
E <- -(vis %*% weights %*% t(inv)) - (y %*% y.weights %*% t(inv))
} else {
# Calculate the energy if unsupervised
E <- -(vis %*% weights %*% t(inv))
}
# Return the energy:
return(E)
}
# Function for doing contrastive divergence CD
#
# @keyword internal
CD <- function(vis, weights, y, y.weights) {
# Function for doing k=1 contrastive divergence
#
# Args:
# vis: visible layer values vector of shape n_features * 1
# weights: weights vector of shape n_features * n_hidden
# vis.bias: bias of the visible layer
# inv.bias: bias of the invisible layer
# y: labels, only used when provided
# y.weigths: label weights of shape n_labels * n_hidden, only used when provided
# y.bias: bias term for the labels of shape n_features * 1, only used when provided
#
# Returns:
# A list with all gradients for the bias and weights; adds label bias and weights if y is provided
#
# Start positive phase
if (missing(y) & missing(y.weights)) {
# Calculate hidden layer
H0 <- VisToHid(vis, weights)
H0[,1] <- 1
} else {
# Add a layer with labels if y is provided
H0 <- VisToHid(vis, weights, y, y.weights)
H0[,1] <- 1
}
# Binarize the hidden layer:
unif <- runif(nrow(H0) * (ncol(H0)))
H0.states <- H0 > matrix(unif, nrow=nrow(H0), ncol= ncol(H0))
# Calculate positive phase, we always use the probabilities for this
pos.phase <- t(vis) %*% H0
if (!missing(y)) {
pos.phase.y <- t(y) %*% H0
}
# Start negative phase
# Reconstruct visible layer
V1 <- HidToVis(H0.states, weights)
# Set the bias unit to 1
V1[,1] <- 1
if (missing(y) & missing(y.weights) ) {
# Reconstruct hidden layer unsupervised, no need to fix the bias anymore
H1 <- VisToHid(V1, weights)
} else {
# Reconstruct labels if y is provided
Y1 <- HidToVis(H0, weights, y.weights )
# Set the bias unit to 1
Y1[,1] <- 1
# Reconstruct hidden layer supervised, no need to fix the bias anymore
H1 <- VisToHid(V1, weights, Y1, y.weights)
}
# Calculate negative associations, we alway use the probabilities for this:
neg.phase <- t(V1) %*% H1
if (!missing(y) & !missing(y.weights)) {
# Calculate negative phase y
neg.phase.y <- t(Y1) %*% H1
}
## Calculate the gradients
# Calculate gradients for the weights:
grad.weights <- pos.phase - neg.phase
if (!missing(y) & !missing(y.weights)) {
# Calculate gradients for y.weigths
grad.y.weights <- pos.phase.y - neg.phase.y
# Return list with gradients supervised
return(list('grad.weights' = grad.weights,'grad.y.weights' = grad.y.weights))
} else {
# Return list with gradients unsupervised
return(list('grad.weights' = grad.weights ))
}
}
# Function for binarizing label data
#
# TODO: Replace loop by C++ loop (rcpp?)
# @keyword internal
# Function for binarizing labels:
LabelBinarizer <- function(labels) {
# This function takes as input the labels of the trainset.
# Args:
# Labels: has to be numerical data vector from 1 to 9.
#
# Returns:
# Matrix with binarized vectors for the labels that can be used in the RBM function
#
# Initialize matrix to save label vectors:
y <- matrix(0, length(labels), length(unique(labels)))
for (i in 1:length(labels)) {
# Put a one on position of the number in vector:
y[i, labels[i] + 1] <- 1
}
return(y)
}
| /R/InternalFun.R | no_license | danvdm/RBM | R | false | false | 6,019 | r | # Function to calculate hidden layer from data
#
# @keyword internal
#
# Function for calculating hidden layer:
VisToHid <- function(vis, weights, y, y.weights) {
# Function for calculating a hidden layer.
#
# Args:
# vis: Visual layer, or hidden layer from previous layer in DBN
# weights: Trained weights including the bias terms (use RBM)
# y: Label vector if only when training an RBM for classification
# y.weights: Label weights and bias matrix, only neccessary when training a RBM for classification
#
# Returns:
# Returns a hidden layer calculated with the trained RBM weights and bias terms.
#
# Initialize the visual, or i-1 layer
V0 <- vis
if ( is.null(dim(V0))) {
# If visual is a vector create matrix
V0 <- matrix(V0, nrow= length(V0))
}
if(missing(y) & missing(y.weights)) {
# Calculate the hidden layer with the trained weights and bias
H <- 1/(1 + exp(-( V0 %*% weights)))
} else {
Y0 <- y
H <- 1/(1 + exp(- ( V0 %*% weights + Y0 %*% y.weights)))
}
return(H)
}
# Function for reconstructing data from a hidden layer
#
# @keyword internal
# Function for reconstructing visible layer:
HidToVis <- function(inv, weights, y.weights) {
# Function for reconstructing a visible layer.
#
# Args:
# inv: Invisible layer
# vis.bias: Trained visible layer bias (use RBM)
# weights: Trained weights (use RBM)
# y.weights: Label weights, only nessecessary when training a classification RBM.
#
# Returns:
# Returns a vector with reconstructed visible layer or reconstructed labels.
#
if(missing(y.weights)) {
# Reconstruct only the visible layer when y.weights is missing
V <- 1/(1 + exp(-( inv %*% t(weights)) ))
return(V)
} else {
# Reconstruct visible and labels if y.weights
Y <- 1/(1 + exp(-( inv %*% t(y.weights))))
return(Y)
}
}
# Logistic function
#
# @keyword internal
# Logistic function
logistic <- function(x) {
1/(1+exp(-x))
}
# Function to calculate the energy of a RBM
#
# @keyword internal
# Function for calculating the energy of the machine:
Energy <- Energy <- function(vis, inv, weights, y, y.weights) {
# Function for calculating the energy of a trained RBM
#
# Args:
# vis: visible layer
# weights: the weights matrix including the bias terms
# inv: invisible layer
# y: label vector (binary)
# y.weights: trained label weights (use RBM), including bias terms
#
# Returns:
# The energy of the RBM machine for label y
#
# Calculate the energy if supervised
if(!missing(y) & !missing(y.weights)){
E <- -(vis %*% weights %*% t(inv)) - (y %*% y.weights %*% t(inv))
} else {
# Calculate the energy if unsupervised
E <- -(vis %*% weights %*% t(inv))
}
# Return the energy:
return(E)
}
# Function for doing contrastive divergence CD
#
# @keyword internal
CD <- function(vis, weights, y, y.weights) {
# Function for doing k=1 contrastive divergence
#
# Args:
# vis: visible layer values vector of shape n_features * 1
# weights: weights vector of shape n_features * n_hidden
# vis.bias: bias of the visible layer
# inv.bias: bias of the invisible layer
# y: labels, only used when provided
# y.weigths: label weights of shape n_labels * n_hidden, only used when provided
# y.bias: bias term for the labels of shape n_features * 1, only used when provided
#
# Returns:
# A list with all gradients for the bias and weights; adds label bias and weights if y is provided
#
# Start positive phase
if (missing(y) & missing(y.weights)) {
# Calculate hidden layer
H0 <- VisToHid(vis, weights)
H0[,1] <- 1
} else {
# Add a layer with labels if y is provided
H0 <- VisToHid(vis, weights, y, y.weights)
H0[,1] <- 1
}
# Binarize the hidden layer:
unif <- runif(nrow(H0) * (ncol(H0)))
H0.states <- H0 > matrix(unif, nrow=nrow(H0), ncol= ncol(H0))
# Calculate positive phase, we always use the probabilities for this
pos.phase <- t(vis) %*% H0
if (!missing(y)) {
pos.phase.y <- t(y) %*% H0
}
# Start negative phase
# Reconstruct visible layer
V1 <- HidToVis(H0.states, weights)
# Set the bias unit to 1
V1[,1] <- 1
if (missing(y) & missing(y.weights) ) {
# Reconstruct hidden layer unsupervised, no need to fix the bias anymore
H1 <- VisToHid(V1, weights)
} else {
# Reconstruct labels if y is provided
Y1 <- HidToVis(H0, weights, y.weights )
# Set the bias unit to 1
Y1[,1] <- 1
# Reconstruct hidden layer supervised, no need to fix the bias anymore
H1 <- VisToHid(V1, weights, Y1, y.weights)
}
# Calculate negative associations, we alway use the probabilities for this:
neg.phase <- t(V1) %*% H1
if (!missing(y) & !missing(y.weights)) {
# Calculate negative phase y
neg.phase.y <- t(Y1) %*% H1
}
## Calculate the gradients
# Calculate gradients for the weights:
grad.weights <- pos.phase - neg.phase
if (!missing(y) & !missing(y.weights)) {
# Calculate gradients for y.weigths
grad.y.weights <- pos.phase.y - neg.phase.y
# Return list with gradients supervised
return(list('grad.weights' = grad.weights,'grad.y.weights' = grad.y.weights))
} else {
# Return list with gradients unsupervised
return(list('grad.weights' = grad.weights ))
}
}
# Function for binarizing label data
#
# TODO: Replace loop by C++ loop (rcpp?)
# @keyword internal
# Function for binarizing labels:
LabelBinarizer <- function(labels) {
# This function takes as input the labels of the trainset.
# Args:
# Labels: has to be numerical data vector from 1 to 9.
#
# Returns:
# Matrix with binarized vectors for the labels that can be used in the RBM function
#
# Initialize matrix to save label vectors:
y <- matrix(0, length(labels), length(unique(labels)))
for (i in 1:length(labels)) {
# Put a one on position of the number in vector:
y[i, labels[i] + 1] <- 1
}
return(y)
}
|
/Prática 02/Davi/pratica2.R | no_license | taciomoreira/teste | R | false | false | 1,861 | r | ||
# Model parameters
model_method = "neuralnet"
#model_grid = expand.grid(layer1=2,layer2=1,layer3=1)
model_grid <- expand.grid(layer1 = 30, layer2 = 30, layer3 = 1)
#model_grid = NULL
#extra_params = list(MaxNWts = 100000, linout = TRUE)
#extra_params = list(linear.output = TRUE )
extra_params = NULL
# Cross-validation parameters
do_cv = TRUE
partition_ratio = .8 # for cross-validation
cv_folds = 2 # for cross-validation
verbose_on = TRUE # output cv folds results?
metric = 'MAE' # metric use for evaluating cross-validation
# Misc parameters
subset_ratio = 0.01 # for testing purposes (set to 1 for full data)
create_submission = FALSE # create a submission for Kaggle?
use_log = TRUE # take the log transform of the response?
| /Output/28_11_2016_20.25.22_neuralnet_full/neuralnet_full.R | no_license | NickTalavera/Kaggle---Nick-Josh-Dina | R | false | false | 734 | r | # Model parameters
model_method = "neuralnet"
#model_grid = expand.grid(layer1=2,layer2=1,layer3=1)
model_grid <- expand.grid(layer1 = 30, layer2 = 30, layer3 = 1)
#model_grid = NULL
#extra_params = list(MaxNWts = 100000, linout = TRUE)
#extra_params = list(linear.output = TRUE )
extra_params = NULL
# Cross-validation parameters
do_cv = TRUE
partition_ratio = .8 # for cross-validation
cv_folds = 2 # for cross-validation
verbose_on = TRUE # output cv folds results?
metric = 'MAE' # metric use for evaluating cross-validation
# Misc parameters
subset_ratio = 0.01 # for testing purposes (set to 1 for full data)
create_submission = FALSE # create a submission for Kaggle?
use_log = TRUE # take the log transform of the response?
|
\name{IsaacEtAl}
\alias{IsaacEtAl}
\alias{chiroptera.tree}
\alias{carnivora.tree}
\alias{primates.tree}
\alias{marsupialia.tree}
\alias{chiroptera.data}
\alias{carnivora.data}
\alias{primates.data}
\alias{marsupialia.data}
\title{Example dataset for the caper package}
\description{
This data set contains four species-level comparative datasets used in Isaac et al (2005)
}
\usage{
data(IsaacEtAl)
}
\format{
The datafile contains species level phylogenies and accompanying data frames of nine variables for each of four mammalian orders (Primates, Carnivora, Chiroptera and Marsupialia). The data were published in supplementary material for Isaac et al. (2005) as CAIC format files and text data files and have been converted for use in 'caper'. The data files are incomplete, with some variables having little or no data for some orders.
The variables (all saved as natural log values) are:
\describe{
\item{species.rich}{Species richness at the tips - all are set to 1 for use in \code{macrocaic}}
\item{body.mass}{The average body mass in grams}
\item{age.sexual.maturity}{Age at sexual maturity in months}
\item{gestation}{Gestation length in days}
\item{interbirth.interval}{Interbirth interval in months}
\item{litter.size}{The average number of offspring in a litter}
\item{population.density}{Population density}
\item{group.size}{Number of individuals in a typical group}
\item{mass.dimorphism}{Male mass /female mass}
\item{length.dimorphism}{Male length / female length}
}
}
\examples{
data(IsaacEtAl)
chiroptera <- comparative.data(chiroptera.tree, chiroptera.data, 'binomial', na.omit=FALSE)
carnivora <- comparative.data(carnivora.tree, carnivora.data, 'binomial', na.omit=FALSE)
primates <- comparative.data(primates.tree, primates.data, 'binomial', na.omit=FALSE)
marsupialia <- comparative.data(marsupialia.tree, marsupialia.data, 'binomial', na.omit=FALSE)
}
\references{Isaac, N., Jones, K., Gittleman, J., and Purvis, A. (2005). Correlates of species richness in mammals: Body size, life history, and ecology. American Naturalist, 165(5):600-607.}
\seealso{ caic, pgls }
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{datasets}
| /man/IsaacEtAl.Rd | no_license | cran/caper | R | false | false | 2,219 | rd | \name{IsaacEtAl}
\alias{IsaacEtAl}
\alias{chiroptera.tree}
\alias{carnivora.tree}
\alias{primates.tree}
\alias{marsupialia.tree}
\alias{chiroptera.data}
\alias{carnivora.data}
\alias{primates.data}
\alias{marsupialia.data}
\title{Example dataset for the caper package}
\description{
This data set contains four species-level comparative datasets used in Isaac et al (2005)
}
\usage{
data(IsaacEtAl)
}
\format{
The datafile contains species level phylogenies and accompanying data frames of nine variables for each of four mammalian orders (Primates, Carnivora, Chiroptera and Marsupialia). The data were published in supplementary material for Isaac et al. (2005) as CAIC format files and text data files and have been converted for use in 'caper'. The data files are incomplete, with some variables having little or no data for some orders.
The variables (all saved as natural log values) are:
\describe{
\item{species.rich}{Species richness at the tips - all are set to 1 for use in \code{macrocaic}}
\item{body.mass}{The average body mass in grams}
\item{age.sexual.maturity}{Age at sexual maturity in months}
\item{gestation}{Gestation length in days}
\item{interbirth.interval}{Interbirth interval in months}
\item{litter.size}{The average number of offspring in a litter}
\item{population.density}{Population density}
\item{group.size}{Number of individuals in a typical group}
\item{mass.dimorphism}{Male mass /female mass}
\item{length.dimorphism}{Male length / female length}
}
}
\examples{
data(IsaacEtAl)
chiroptera <- comparative.data(chiroptera.tree, chiroptera.data, 'binomial', na.omit=FALSE)
carnivora <- comparative.data(carnivora.tree, carnivora.data, 'binomial', na.omit=FALSE)
primates <- comparative.data(primates.tree, primates.data, 'binomial', na.omit=FALSE)
marsupialia <- comparative.data(marsupialia.tree, marsupialia.data, 'binomial', na.omit=FALSE)
}
\references{Isaac, N., Jones, K., Gittleman, J., and Purvis, A. (2005). Correlates of species richness in mammals: Body size, life history, and ecology. American Naturalist, 165(5):600-607.}
\seealso{ caic, pgls }
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{datasets}
|
# https://github.com/yihui/knitr-examples/blob/master/113-externalization.Rmd
# ---- pkgchk-scrap ----
#' Check whether the package contains any useless files like `.DS_Store`.
#'
#' Files currently considered "scrap" are:
#'
#' 1. ".DS_Store"
#' 2. "Thumbs.db"
#' 3. ".vscode"
#' 4. ".o" files
#'
#' @param checks A 'pkgcheck' object with full \pkg{pkgstats} summary and
#' \pkg{goodpractice} results.
#' @return Names of any items which should not be present; otherwise an empty
#' character.
#' @noRd
pkgchk_has_scrap <- function (checks) {
# Have to tryCatch because gert errors anywhere other than a git repo. This
# means scrap can only be detected in git repos.
contents <- tryCatch (gert::git_ls (repo = checks$pkg$path)$path,
error = function (e) NULL
)
if (is.null (contents)) {
return (character (0))
} # not NULL!
contents_short <- vapply (
decompose_path (contents),
function (i) utils::tail (i, 1L),
character (1)
)
scrap <- function () {
paste0 (c (
"^\\.DS_Store$",
"^Thumbs.db$",
"^\\.vscode$",
"\\.o$"
),
collapse = "|"
)
}
return (contents [grep (scrap (), contents_short)])
}
# ---- output-pkgchk-scrap ----
output_pkgchk_has_scrap <- function (checks) {
out <- list (
check_pass = length (checks$checks$has_scrap) == 0L,
summary = "",
print = ""
)
if (!out$check_pass) {
out$summary <- "Package contains unexpected files."
out$print <- list (
msg_pre = paste0 (
"Package contains the ",
"following unexpected files:"
),
obj = checks$checks$has_scrap,
msg_post = character (0)
)
}
return (out)
}
| /R/check-scrap.R | no_license | ropensci-review-tools/pkgcheck | R | false | false | 1,838 | r | # https://github.com/yihui/knitr-examples/blob/master/113-externalization.Rmd
# ---- pkgchk-scrap ----
#' Check whether the package contains any useless files like `.DS_Store`.
#'
#' Files currently considered "scrap" are:
#'
#' 1. ".DS_Store"
#' 2. "Thumbs.db"
#' 3. ".vscode"
#' 4. ".o" files
#'
#' @param checks A 'pkgcheck' object with full \pkg{pkgstats} summary and
#' \pkg{goodpractice} results.
#' @return Names of any items which should not be present; otherwise an empty
#' character.
#' @noRd
pkgchk_has_scrap <- function (checks) {
# Have to tryCatch because gert errors anywhere other than a git repo. This
# means scrap can only be detected in git repos.
contents <- tryCatch (gert::git_ls (repo = checks$pkg$path)$path,
error = function (e) NULL
)
if (is.null (contents)) {
return (character (0))
} # not NULL!
contents_short <- vapply (
decompose_path (contents),
function (i) utils::tail (i, 1L),
character (1)
)
scrap <- function () {
paste0 (c (
"^\\.DS_Store$",
"^Thumbs.db$",
"^\\.vscode$",
"\\.o$"
),
collapse = "|"
)
}
return (contents [grep (scrap (), contents_short)])
}
# ---- output-pkgchk-scrap ----
output_pkgchk_has_scrap <- function (checks) {
out <- list (
check_pass = length (checks$checks$has_scrap) == 0L,
summary = "",
print = ""
)
if (!out$check_pass) {
out$summary <- "Package contains unexpected files."
out$print <- list (
msg_pre = paste0 (
"Package contains the ",
"following unexpected files:"
),
obj = checks$checks$has_scrap,
msg_post = character (0)
)
}
return (out)
}
|
# Baumann et al (2021) GCB Mixed Effects Model Code #
#By: Justin Baumann
#####################################################
#load packages
library(tidyverse)
library(ggplot2)
library(lme4)
library(car) #For checking multicollinearity, using performance instead
library(performance) #for checking R2 or ICC, and comparing model performances
library(ggeffects)
#library(cowplot)
#library(egg)
#library(ggpubr)
library(patchwork)
library(ggrepel)
library(ggsci)
library(parameters) #get model params
library(effects)
library(broom)
library(devtools)
#devtools::install_github("m-clark/visibly")
library(visibly)
library(rgdal)
library(raster)
library(spdep)
######################################################
#data prep
#read in data
a=read.csv('coral_recov_master_2021.csv')
a1<-a %>% mutate_if(is.character, as.factor)
nrow(a1)
head(a1)
str(a1)
########## Build recovery datasets#########
##building a recovery dataset
#remove Arabian Gulf sample (n=1)
levels(a1$region)
a2<-subset(a1, region != "Arabian Gulf")
droplevels(a2$region)
#remove disturbance= disease, dredging (n=1) & COTS, Storm (n=1)
levels(a2$disturbance)
a3<-subset(a2, disturbance != "Disease, Dredging")
droplevels(a3$disturbance)
a4<-subset(a3, disturbance != "COTS, Storm")
droplevels(a4$disturbance)
#remove NAs from data
recov<-a4 %>% drop_na(calculated.recovery.rate) %>% droplevels()
tail(recov)
nrow(recov) #182 rows of data for recovery!
str(recov)
levels(recov$disturbance)
#exploring the structure of the data
#HISTOGRAM OF RECOVERY RATES
hist(recov$calculated.recovery.rate) #positively skewed normal
ggplot(recov, aes(x=calculated.recovery.rate))+
geom_histogram(binwidth=5)
#center (standardise) explanatory variable(s) (mean of zero=centering, sd=1 = scaling --doing both here)
recov$hii100km2<-scale(recov$hii100km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(recov$hii100km2)
recov$hii500km2<-scale(recov$hii500km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(recov$hii500km2)
recov$recovery_time2<-scale(recov$recovery_time, center=TRUE, scale=TRUE)
hist(recov$recovery_time2)
hist(recov$distance_to_shore_m)
recov$distance_to_shore_m2<-scale(recov$distance_to_shore_m, center=TRUE, scale=TRUE)
hist(recov$distance_to_shore_m2)
recov$dist_to_riv_m2<-scale(recov$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(recov$dist_to_riv_m2)
hist(recov$grav_NC)
recov$grav_NC2<-scale(recov$grav_NC, center=TRUE, scale=TRUE)
hist(recov$grav_NC2)
hist(recov$cml_scr)
recov$cml_scr2<-scale(recov$cml_scr, center=TRUE, scale=TRUE)
hist(recov$cml_scr2)
hist(recov$gravity.Grav_tot)
recov$gravity.Grav_tot2<-scale(recov$gravity.Grav_tot, center=TRUE, scale=TRUE)
hist(recov$gravity.Grav_tot2)
hist(recov$travel_time.tt_pop)
recov$travel_time.tt_pop2<-scale(recov$travel_time.tt_pop, center=TRUE, scale=TRUE)
hist(recov$travel_time.tt_pop2)
head(recov)
#How many studies are in the recov dataset?
length(unique(recov$study)) #57 studies
#write.csv(recov, 'recov_2021.csv')
####EXPLORING DATA BEFORE MODEL TESTING####
recov<-read.csv('recov_2021.csv')
#basic lm
#how many unique lat/longs are there?
nrow(recov) #182
head(recov)
lm1<-lm(calculated.recovery.rate ~ hii100km2, data=recov)
summary(lm1)
lm2<-lm(calculated.recovery.rate ~ cml_scr2, data=recov)
summary(lm2)
lm3<-lm(calculated.recovery.rate ~ grav_NC2, data=recov)
summary(lm3)
plot1<-ggplot(recov, aes(x=hii100km, y=calculated.recovery.rate))+
geom_point()+
geom_smooth(method="lm")
plot1
plot2<-ggplot(recov, aes(x=cml_scr2, y=calculated.recovery.rate))+
geom_point()+
geom_smooth(method="lm")
plot2
plot3<-ggplot(recov, aes(x=grav_NC2, y=calculated.recovery.rate))+
geom_point()+
geom_smooth(method="lm")
plot3
plot1+plot2+plot3
#are assumptions met?
#residuals
plot(lm1, which=1) #gray line is flat, red should be nearly flat (mimicking gray)
#looks ok
plot(lm2, which=1) # looks ok
plot(lm3, which=1) # looks ok
#qq
plot(lm1, which=2) #points should be close to the line. They diverge at the ends a little
plot(lm2, which=2) #same as above
plot(lm3, which=2) #same as above
#check for observation independence (use categorical vars here)
#if data from within each category are more similar to each other than to data from different categories then they are correlated!
#region
boxplot(calculated.recovery.rate ~ region, data=recov) #maybe correlated? Possibly not.
#disturbance
boxplot(calculated.recovery.rate ~ disturbance, data= recov) #most likely correlated though sample sizes might be small
#plot w/ colors by category to see
color1<-ggplot(recov, aes(x=hii100km2, y=calculated.recovery.rate, color=region))+
geom_point(size=2)+
theme_bw()
color1
#regions vary by recovery rate and hii, so observations within are not independent
color2<-ggplot(recov, aes(x=hii100km2, y=calculated.recovery.rate, color=disturbance))+
geom_point(size=2)+
theme_bw()
color2
#disturbances vary by recovery rate AND hii100km, so observations within these are NOT INDEPENDENT
#add fixed effects into the model
lm2.1<-lm(calculated.recovery.rate ~ hii100km2 + region + disturbance, data=recov)
summary(lm2.1)
######MODEL TESTING#######
###GLM MODELS FOR RECOVERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
#with all vars in
r1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r1)
performance::check_model(r1)
check_outliers(r1)
summary(r1)
#distance to river has high VIF (18.52). So does region. So let's remove dist to river and try again
r2<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r2) #region still high, so something colinear w/ that...
performance::check_model(r2) #this look pretty good
check_outliers(r2) #1 outlier detected
summary(r2)
#cut gravity
r3<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r3)
performance::check_model(r3)
check_outliers(r3)
summary(r3)
#cut distance from shore (and not gravity)
r4<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4)
performance::check_model(r4)
check_outliers(r4) #no outliers
summary(r4)
#cut both distance from shore and gravity
r4.1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #max VIF is region at 4.92
performance::check_model(r4.1)
check_outliers(r4.1)
summary(r4.1)
#cut travel time
r4.2<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.2) #max VIF region at 5.35
performance::check_model(r4.2)
check_outliers(r4.2) #none
summary(r4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
r4.3<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.3) #VERY low VIF (nothing about 2.06--we like this)
performance::check_model(r4.3)
check_outliers(r4.3) #none
summary(r4.3)
#original final model
r5<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r5) #all VIF below 2.38 :)
performance::check_model(r5)
check_outliers(r5) #1 outlier
summary(r5)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5, rank=TRUE)
perf
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
r6<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r6) #all VIF are low
performance::check_model(r6)
check_outliers(r6) #3 outliers!
summary(r6)
#only region, disturbance, and hii
r7<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r7) #all VIF are low
performance::check_model(r7)
check_outliers(r7) #5 outliers!
summary(r7)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r6,r7, rank=TRUE)
perf
#old models (from model building)
r2.0<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2*MPA_during_recov + (1|study), data = recov, REML=FALSE)
summary(r2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
r2.1<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2*MPA_during_recov*recovery_time2 + (1|study), data = recov, REML=FALSE)# rank deficient
r2.2<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2*MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_model(r2.2)
check_collinearity(r2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(r2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
r2.2.1<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
r2.2.2<-lmer(calculated.recovery.rate ~ region*disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
r2.2.3<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
r2.2.4<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
r2.2.5<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove recovery time
r2.2.6<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
#remove distance to shore
r2.2.7<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove hii
r2.2.8<-lmer(calculated.recovery.rate ~ region+disturbance+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove MPA status
r2.2.9<-lmer(calculated.recovery.rate ~ region+disturbance+ (1|study), data = recov, REML=FALSE)
#remove region
r2.2.10<-lmer(calculated.recovery.rate ~ disturbance+ (1|study), data = recov, REML=FALSE)
##COMPARE model performance
perf1<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r2.0,r2.1,r2.2,r2.2.1,r2.2.2,r2.2.3,r2.2.4,r2.2.5,r2.2.6,r2.2.7,r2.2.8,r2.2.9,r2.2.10, rank= TRUE)
perf1
perf2<-performance::compare_performance(r1,r2,r3,r4,r5,r4.1,r4.2,r4.3,rank=TRUE)
perf2
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#model r4.1 is best AIC and BIC. IT does not contain gravity or distance to shore
r4.1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #highest VIF=4.92, suggests some multicolinearity
performance::check_model(r4.1)
check_outliers(r4.1) #non3
summary(r4.1)
#replace travel time w/ dist to shore
r4.1.2<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1.2) #highest VIF = 2.06, so this is better than the above
performance::check_model(r4.1.2)
check_outliers(r4.1.2) #none
summary(r4.1.2)
perf3<-performance::compare_performance(r4.1,r4.1.2,rank=TRUE)
perf3
##4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(r4.1)
0.883/(0.883+3.152) #21.88%
summary(r4.1.2)
3.978/(3.978+3.171) #55.64%
#add REML in
finr4.1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=TRUE)
check_collinearity(finr4.1)#VIF max =3.85
performance::check_model(finr4.1)
performance::check_outliers(finr4.1, method=c("cook","pareto")) #none
summary(finr4.1)
2.148/(2.148+3.334) #39.18% of variance
#HII estimate = 0.80077 +/-0.41651
#cml_scr estimate = -0.02998 +/- 0.38993
#travel time estimate = 0.347 +/- 0.33291
compare_performance(r4.1,finr4.1, rank=TRUE)
#finr4.1 has lower AIC :)
###CHECK FOR SPATIAL AUTOCORRELATION###
#method 1: use the check_autocorrelation() feature of the performance package
check_autocorrelation(finr4.1)# OK: Residuals appear to be independent and not autocorrelated (p = 0.118).
#method 2:
#based on: https://datascienceplus.com/spatial-regression-in-r-part-1-spamm-vs-glmmtmb/
ggplot(recov, aes(x=long, y=lat, size=calculated.recovery.rate))+
geom_point()+
theme_bw()
#maybe size looks very close to the same throughout but maybe a pattern? let's investigate.
#We will check this using our "non-spatial" model (finr4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
recov$resid<-resid(finr4.1)
resids<-as.data.frame(resid(finr4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
lmerresid<-broom::augment(finr4.1)
head(lmerresid)
nrow(lmerresid)#109
nrow(recov)#182
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on recov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
recov1<-rename(recov, .rownames = X.2)
head(recov1)
spatialdf<-merge(recov1, lmerresid, by='.rownames')
head(spatialdf)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdf$longlat<-paste(spatialdf$long, spatialdf$lat, sep="_")
#make a df with no duplicate lat/long
spdf2<-spatialdf %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2)
#make recov2 into a spatial object
WGScoor<-spdf2
coordinates(WGScoor)=~long+lat
proj4string(WGScoor)<-CRS("+proj=longlat +datum=WGS84")
#raster::shapefile(WGScoor, "recovshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoor)
#find nearest neighbors
tri.nb<-tri2nb(coords, row.names=rownames(WGScoor))
tri.nb #THIS WORKED AND MADE nearest neighbors!
summary(tri.nb)
nb2listw(tri.nb) #this also worked!
head(spdf2)
vect=spdf2$.resid #vector of model residuals
vect
vect1=spdf2$calculated.recovery.rate.y #vector of response var
vect1
#MORANS test for spatial autocorrelation!
moran.test(vect, nb2listw(tri.nb))
# Moran I statistic standard deviate = -1.8338, p-value = 0.9667
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# -0.122120186 -0.010989011 0.003672439
moran.test(vect1, nb2listw(tri.nb))
# Moran I statistic standard deviate = 0.21191, p-value = 0.4161
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.001747535 -0.010989011 0.003612590
#######################################################################################################
#######################################################################################################
#######################################################################################################
###RESISTANCE####
#read in data
a=read.csv('coral_recov_master_2021.csv')
a1<-a %>% mutate_if(is.character, as.factor)
nrow(a1)
head(a1)
str(a1)
#BUILDING RESISTANCE DATASET#
#remove Arabian Gulf sample (n=1)
levels(a1$region)
a2<-subset(a1, region != "Arabian Gulf")
droplevels(a2$region)
#remove disturbance= disease, dredging (n=1) & COTS, Storm (n=1)
levels(a2$disturbance)
a3<-subset(a2, disturbance != "Disease, Dredging")
droplevels(a3$disturbance)
a4<-subset(a3, disturbance != "COTS, Storm")
droplevels(a4$disturbance)
#remove distrubance = Bleaching, Disease (n=1 for resistance only)
a5<-subset(a4, disturbance != "Bleaching, Disease")
droplevels(a5$disturbance)
#remove NAs from data
resist<-a5 %>% drop_na(resistance) %>% droplevels()
tail(resist)
nrow(resist) #184 rows of data
str(resist)
levels(resist$region)
#exploring the structure of the data
#HISTOGRAM OF RECOVERY RATES
hist(resist$resistance) #slight negative skew but approx normal
ggplot(resist, aes(x=resistance))+
geom_histogram(binwidth=5)
#center (standardise) explanatory variable(s) (mean of zero=centering, sd=1 = scaling --doing both here)
resist$hii100km2<-scale(resist$hii100km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii100km2)
resist$hii500km2<-scale(resist$hii500km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii500km2)
resist$resistance_time_2<-scale(resist$resistance.time, center=TRUE, scale=TRUE)
hist(resist$resistance_time2)
hist(resist$distance_to_shore_m)
resist$distance_to_shore_m2<-scale(resist$distance_to_shore_m, center=TRUE, scale=TRUE)
hist(resist$distance_to_shore_m2)
recov$dist_to_riv_m2<-scale(recov$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(recov$dist_to_riv_m2)
hist(resist$grav_NC)
resist$grav_NC2<-scale(resist$grav_NC, center=TRUE, scale=TRUE)
hist(resist$grav_NC2)
hist(resist$cml_scr)
resist$cml_scr2<-scale(resist$cml_scr, center=TRUE, scale=TRUE)
hist(resist$cml_scr2)
hist(resist$gravity.Grav_tot)
resist$gravity.Grav_tot2<-scale(resist$gravity.Grav_tot, center=TRUE, scale=TRUE)
hist(resist$gravity.Grav_tot2)
hist(resist$travel_time.tt_pop)
resist$travel_time.tt_pop2<-scale(resist$travel_time.tt_pop, center=TRUE, scale=TRUE)
hist(resist$travel_time.tt_pop2)
resist$dist_to_riv_m2<-scale(resist$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(resist$dist_to_rivm2)
head(resist)
#How many studies are in the recov dataset?
length(unique(resist$study)) #59 studies
#write.csv(resist, 'resist_2021.csv')
######MODEL TESTING#######
###GLM MODELS FOR RECOVERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
resist<-read.csv('resist_2021.csv')
#with all vars in
s1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s1) #dist to riv is HIGH correlation. Will need to remove
performance::check_model(s1)
check_outliers(s1)#none
summary(s1)
#distance to river has high VIF (12.28). So does region. So let's remove dist to river and try again
s2<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s2) #Looks like we are good on this
performance::check_model(s2) #this look pretty good
check_outliers(s2) #none
summary(s2)
#cut gravity since it built into cml_scr
s3<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s3) #good
performance::check_model(s3)
check_outliers(s3) #none
summary(s3)
#cut distance from shore (and not gravity)-- since travel time is used for same thing
s4<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4) #good
performance::check_model(s4)
check_outliers(s4) #no outliers
summary(s4)
#cut both distance from shore and gravity
s4.1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1)
#cut travel time (see if this differs from using distance from shore)
s4.2<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.2) #good
performance::check_model(s4.2)
check_outliers(s4.2) #none
summary(s4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
s4.3<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.3) #good
performance::check_model(s4.3)
check_outliers(s4.3) #none
summary(s4.3)
#original final model
s5<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s5) #good)
performance::check_model(s5)
check_outliers(s5) #1 outlier
summary(s5)
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
s6<-lmer(resistance ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s6) #all VIF are low
performance::check_model(s6)
check_outliers(s6) #3 outliers!
summary(s6)
#only region, disturbance, and hii
s7<-lmer(resistance ~ region+disturbance+hii100km2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s7) #all VIF are low
performance::check_model(s7)
check_outliers(s7) #5 outliers!
summary(s7)
rperf<-compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s6,s7, rank=TRUE)
rperf
#old models (from model building)
s2.0<-lmer(resistance ~ region*disturbance*hii100km2*MPA_during_resist + (1|study), data = resist, REML=FALSE)
summary(s2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
s2.1<-lmer(resistance ~ region*disturbance*hii100km2*MPA_during_resist*resistance_time_2 + (1|study), data = resist, REML=FALSE)# rank deficient
s2.2<-lmer(resistance ~ region*disturbance*hii100km2*MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_model(s2.2)
check_collinearity(s2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(s2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
s2.2.1<-lmer(resistance ~ region*disturbance*hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
s2.2.2<-lmer(resistance ~ region*disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
s2.2.3<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
s2.2.4<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
s2.2.5<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove resistance time
s2.2.6<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
#remove distance to shore
s2.2.7<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove hii
s2.2.8<-lmer(resistance ~ region+disturbance+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove MPA status
s2.2.9<-lmer(resistance ~ region+disturbance+ (1|study), data = resist, REML=FALSE)
#remove region
s2.2.10<-lmer(resistance ~ disturbance+ (1|study), data = resist, REML=FALSE)
##COMPARE model performance
perfs1<-performance::compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s2.0,s2.1,s2.2,s2.2.1,s2.2.2,s2.2.3,s2.2.4,s2.2.5,s2.2.6,s2.2.7,s2.2.8,s2.2.9,s2.2.10, rank= TRUE)
perfs1 #Model S3 (w/ distance to shore and travel time performs best but S2, S4.1, S4, S4.2, and S1 all VERY similar--essentially not different)
#WE WILL SELECT s4.1 as it matches what we did for recovery! s4.1 uses travel time in place of distance to shore as a proxy for remoteness and includes cml_scr from the WCS pre-print in addition to HII
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#does not contain gravity or distance to shore
s4.1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1)
#replace travel time w/ dist to shore
s4.1.2<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1.2) #good
performance::check_model(s4.1.2)
check_outliers(s4.1.2) #none
summary(s4.1.2)
perf3<-performance::compare_performance(s4.1,s4.1.2,rank=TRUE)
perf3
##S4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(s4.1)
38.72/(38.72+75) #34.05%
summary(s4.1.2)
39.05/(39.05+84.87) #31.51%
#add REML in
fins4.1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=TRUE)
check_collinearity(fins4.1)#good
performance::check_model(fins4.1)
check_outliers(fins4.1) #none
summary(fins4.1)
69.98/(69.98+77.00) #47.61% of variance
#HII estimate = -.57795 +/-1.90827
#cml_scr estimate = 1.70562 +/- 1.66645
#travel time estimate = -0.72707 +/- 1.27091
####SPATIAL AUTOCORRELATION FOR RESISTANCE
#Method 1: use the check_autocorrelation() function in the performance package
check_autocorrelation(fins4.1) #OK: Residuals appear to be independent and not autocorrelated (p = 0.610).
#method 2:
ggplot(resist, aes(x=long, y=lat, size=resistance))+
geom_point()+
theme_bw()
#Doesn't look like much but we should investigate residuals
#We will check this using our "non-spatial" model (fins4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
#resist$resid<-resid(fins4.1)
resids<-as.data.frame(resid(fins4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
lmerresidres<-broom::augment(fins4.1)
head(lmerresidres)
nrow(lmerresidres)#134
nrow(resist)#184
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on recov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
head(resist)
resist1<-rename(resist, .rownames = X.1)
head(resist1)
spatialdfres<-merge(resist1, lmerresidres, by='.rownames')
head(spatialdfres)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdfres$longlat<-paste(spatialdfres$long, spatialdfres$lat, sep="_")
#make a df with no duplicate lat/long
spdf2res<-spatialdfres %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2res)
#make recov2 into a spatial object
WGScoorres<-spdf2res
coordinates(WGScoorres)=~long+lat
proj4string(WGScoorres)<-CRS("+proj=longlat +datum=WGS84")
raster::shapefile(WGScoorres, "resistshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoorres)
#find nearest neighbors
tri.nbres<-tri2nb(coords, row.names=rownames(WGScoorres))
tri.nbres #THIS WORKED AND MADE nearest neighbors!
summary(tri.nbres)
nb2listw(tri.nbres) #this also worked!
head(spdf2res)
vectr=spdf2res$.resid #vector of model residuals
vectr
vectr1=spdf2res$resistance.y #vector of response var
vectr1
#MORANS test for spatial autocorrelation!
moran.test(vectr, nb2listw(tri.nbres))
# Moran I statistic standard deviate = 0.60469, p-value = 0.2727
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.024899265 -0.009523810 0.003240714
moran.test(vectr1, nb2listw(tri.nbres))
# Moran I statistic standard deviate = 0.47645, p-value = 0.3169
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.017786786 -0.009523810 0.003285649
#############################################################
#############################################################
### IGR ######################################################
# Read in data
recov<-read.csv('recov_2021.csv')
head(recov)
### Instantaneous Growth Rate (IGR) from Ortiz et al (2018)
#calculated as: r = LN ((recovery coral cover + 5) / (post dist coral cover + 5))/recovery time
#histogram of IGR
ggplot(recov, aes(x=IGR))+
geom_histogram(binwidth=1) #actually appears approx normal
#using same model structure we used for other vars:
######MODEL TESTING#######
###GLM MODELS FOR RECOVERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
#with all vars in
r1<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r1)
performance::check_model(r1)
check_outliers(r1) #1 outlier
summary(r1)
#dist to riv and region have HIGH VIF
r2<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r2) #looks good actually
performance::check_model(r2) #this look pretty good
check_outliers(r2) #1 outlier detected
summary(r2)
#cut gravity
r3<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r3) #looks good
performance::check_model(r3)
check_outliers(r3) # 1 outlier
summary(r3)
#cut distance from shore (and not gravity)
r4<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4) #looks good
performance::check_model(r4)
check_outliers(r4) #no outliers
summary(r4)
#cut both distance from shore and gravity
r4.1<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #looks good
performance::check_model(r4.1)
check_outliers(r4.1) #none
summary(r4.1)
#cut travel time
r4.2<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.2) #looks good
performance::check_model(r4.2)
check_outliers(r4.2) #none
summary(r4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
r4.3<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.3) #VERY low VIF
performance::check_model(r4.3)
check_outliers(r4.3) #1 outlier
summary(r4.3)
#original final model
r5<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r5) #all VIF below 2.38 :)
performance::check_model(r5)
check_outliers(r5) #1 outlier
summary(r5)
compare_performance(r1,r2)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5, rank=TRUE)
perf
#model r4 wins, though lowest AIC and BIC are model r4.1
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
r6<-lmer(IGR ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r6) #all VIF are low
performance::check_model(r6)
check_outliers(r6) #1 outlier!
summary(r6)
#only region, disturbance, and hii
r7<-lmer(IGR ~ region+disturbance+hii100km2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r7) #all VIF are low
performance::check_model(r7)
check_outliers(r7) #1 outlier!
summary(r7)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r6,r7, rank=TRUE)
perf #still r4.1 and r4
#old models (from model building)
r2.0<-lmer(IGR ~ region*disturbance*hii100km2*MPA_during_recov + (1|study), data = recov, REML=FALSE)
summary(r2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
r2.1<-lmer(IGR ~ region*disturbance*hii100km2*MPA_during_recov*recovery_time2 + (1|study), data = recov, REML=FALSE)# rank deficient
r2.2<-lmer(IGR ~ region*disturbance*hii100km2*MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_model(r2.2) #singular fit
check_collinearity(r2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(r2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
r2.2.1<-lmer(IGR ~ region*disturbance*hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1) #rank deficient
r2.2.2<-lmer(IGR ~ region*disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2) #rank deficient
r2.2.3<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
r2.2.4<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
r2.2.5<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove recovery time
r2.2.6<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
#remove distance to shore
r2.2.7<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove hii
r2.2.8<-lmer(IGR ~ region+disturbance+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove MPA status
r2.2.9<-lmer(IGR ~ region+disturbance+ (1|study), data = recov, REML=FALSE)
#remove region
r2.2.10<-lmer(IGR ~ disturbance+ (1|study), data = recov, REML=FALSE)
##COMPARE model performance
perf1<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r2.0,r2.1,r2.2,r2.2.1,r2.2.2,r2.2.3,r2.2.4,r2.2.5,r2.2.6,r2.2.7,r2.2.8,r2.2.9,r2.2.10, rank= TRUE)
perf1
perf2<-performance::compare_performance(r1,r2,r3,r4,r5,r4.1,r4.2,r4.3,rank=TRUE)
perf2 #r4 and 4.1 still the best! Let's use 4.1 as it is the best for recov and resist
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#model r4.1 is best AIC and BIC. IT does not contain gravity or distance to shore
r4.1<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #all good
performance::check_model(r4.1)
check_outliers(r4.1) #NONE
summary(r4.1)
#replace travel time w/ dist to shore
r4.1.2<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1.2) #good
performance::check_model(r4.1.2)
check_outliers(r4.1.2) #1 outlier
summary(r4.1.2)
perf3<-performance::compare_performance(r4.1,r4.1.2,rank=TRUE)
perf3
##4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(r4.1)
0.1348/(0.1348+0.1580) #46.04%
#add REML in
finr4.1igr<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=TRUE)
check_collinearity(finr4.1igr)#ALL good
performance::check_model(finr4.1igr)
performance::check_outliers(finr4.1igr, method=c("cook","pareto")) #none
summary(finr4.1igr)
0.2619/(0.2619+0.1659) #61.22% of variance
compare_performance(r4.1,finr4.1igr, rank=TRUE)
#finr4.1 has lower AIC :)
##########################################################################################################
# Relative Recovery and Resistance ########################################################################
####EXPLORING DATA BEFORE MODEL TESTING####
relrecov<-read.csv('rel_recov_2021.csv')
#basic lm
#how many unique lat/longs are there?
nrow(relrecov) #151 (182 in the non-relative recov df--had to cut when we didn't have a pre-dist value to standardize to)
head(relrecov)
lm1<-lm(rel_rec_rate ~ hii100km2, data=relrecov)
summary(lm1)
lm2<-lm(rel_rec_rate ~ cml_scr2, data=relrecov)
summary(lm2)
lm3<-lm(rel_rec_rate ~ grav_NC2, data=relrecov)
summary(lm3)
plot1<-ggplot(relrecov, aes(x=hii100km, y=rel_rec_rate))+
geom_point()+
geom_smooth(method="lm")
plot1
plot2<-ggplot(relrecov, aes(x=cml_scr2, y=rel_rec_rate))+
geom_point()+
geom_smooth(method="lm")
plot2
plot3<-ggplot(relrecov, aes(x=grav_NC2, y=rel_rec_rate))+
geom_point()+
geom_smooth(method="lm")
plot3
plot1+plot2+plot3
#are assumptions met?
#residuals
plot(lm1, which=1) #gray line is flat, red should be nearly flat (mimicking gray)
#looks ok
plot(lm2, which=1) # looks ok
plot(lm3, which=1) # looks ok
#qq
plot(lm1, which=2) #points should be close to the line. They diverge at the ends a little
plot(lm2, which=2) #same as above
plot(lm3, which=2) #same as above
#check for observation independence (use categorical vars here)
#if data from within each category are more similar to each other than to data from different categories then they are correlated!
#region
boxplot(rel_rec_rate ~ region, data=relrecov) #maybe correlated? Possibly not.
#disturbance
boxplot(rel_rec_rate ~ disturbance, data= relrecov) #most likely correlated though sample sizes might be small
#plot w/ colors by category to see
color1<-ggplot(relrecov, aes(x=hii100km2, y=rel_rec_rate, color=region))+
geom_point(size=2)+
theme_bw()
color1
#regions vary by relrecovery rate and hii, so observations within are not independent
color2<-ggplot(relrecov, aes(x=hii100km2, y=rel_rec_rate, color=disturbance))+
geom_point(size=2)+
theme_bw()
color2
#disturbances vary by relrecovery rate AND hii100km, so observations within these are NOT INDEPENDENT
#add fixed effects into the model
lm2.1<-lm(rel_rec_rate ~ hii100km2 + region + disturbance, data=relrecov)
summary(lm2.1)
######MODEL TESTING#######
###GLM MODELS FOR relrecovERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
#with all vars in
r1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r1)
performance::check_model(r1)
check_outliers(r1) #1 outlier
summary(r1)
#Singular fit -- so need to simplify. VIFs for region and dist to riv are very high. CUT dist to riv
r2<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2) #region still high, so something colinear w/ that...
performance::check_model(r2) #this look pretty good
check_outliers(r2) #1 outlier detected
summary(r2)
#cut gravity
r3<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r3) #still moderate multicollinearity (VIF for region = 7.05, disturbance=5.34)
performance::check_model(r3)
check_outliers(r3)#1 outlier
summary(r3)
#cut distance from shore (and not gravity)
r4<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4) #still moderate VIF for region and dist
performance::check_model(r4)
check_outliers(r4) #1 outlier
summary(r4)
#cut both distance from shore and gravity
r4.1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.1) #still moderate VIFS (region =6.79, dist=5.16)
performance::check_model(r4.1)
check_outliers(r4.1) #no outliers
summary(r4.1)
#cut travel time
r4.2<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.2) #Still moderate Vifs for region and dist
performance::check_model(r4.2)
check_outliers(r4.2) #none
summary(r4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
r4.3<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.3) #Vifs are less bad BUT SINGULAR FIT. So cannot use
performance::check_model(r4.3)
check_outliers(r4.3) #none
summary(r4.3)
#original final model
r5<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r5) #VIFS are low but singular fit!
performance::check_model(r5)
check_outliers(r5) #none
summary(r5)
perf<-performance::compare_performance(r4.1,r4.2,r4.3,r5)
perf
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
r6<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r6) #all VIF are low but SINGULAR FIT
performance::check_model(r6)
check_outliers(r6) #none
summary(r6)
#only region, disturbance, and hii
r7<-lmer(rel_rec_rate ~ region+disturbance+hii100km2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r7) #all VIF are low but SINGULAR FIT
performance::check_model(r7)
check_outliers(r7) #1 outlier
summary(r7)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r6,r7, rank=TRUE)
perf
#old models (from model building)
r2.0<-lmer(rel_rec_rate ~ region*disturbance*hii100km2*MPA_during_recov + (1|study), data = relrecov, REML=FALSE)
summary(r2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
r2.1<-lmer(rel_rec_rate ~ region*disturbance*hii100km2*MPA_during_recov*recovery_time2 + (1|study), data = relrecov, REML=FALSE)# rank deficient
r2.2<-lmer(rel_rec_rate ~ region*disturbance*hii100km2*MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_model(r2.2)
check_collinearity(r2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(r2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
r2.2.1<-lmer(rel_rec_rate ~ region*disturbance*hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
r2.2.2<-lmer(rel_rec_rate ~ region*disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
r2.2.3<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
r2.2.4<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = relrecov, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
r2.2.5<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2 + (1|study), data = relrecov, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove relrecovery time
r2.2.6<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+distance_to_shore_m2 + (1|study), data = relrecov, REML=FALSE)
#remove distance to shore
r2.2.7<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+ (1|study), data = relrecov, REML=FALSE)
#remove hii
r2.2.8<-lmer(rel_rec_rate ~ region+disturbance+MPA_during_recov+ (1|study), data = relrecov, REML=FALSE)
#remove MPA status
r2.2.9<-lmer(rel_rec_rate ~ region+disturbance+ (1|study), data = relrecov, REML=FALSE)
#remove region
r2.2.10<-lmer(rel_rec_rate ~ disturbance+ (1|study), data = relrecov, REML=FALSE)
##COMPARE model performance
perf1<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r2.0,r2.1,r2.2,r2.2.1,r2.2.2,r2.2.3,r2.2.4,r2.2.5,r2.2.6,r2.2.7,r2.2.8,r2.2.9,r2.2.10, rank= TRUE)
perf1
perf2<-performance::compare_performance(r1,r2,r3,r4,r5,r4.1,r4.2,r4.3,rank=TRUE)
perf2
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#model r4.1 is best AIC and BIC. IT does not contain gravity or distance to shore
r4.1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.1) #highest VIF=6.79, suggest some multicollinearity!
performance::check_model(r4.1) #these look ok though except for multicollinearity
check_outliers(r4.1) #none
summary(r4.1) #AIC = 776.1, BIC=824.6
#replace travel time w/ dist to shore
r4.1.2<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.1.2) #VIFs a little lower but SINGULAR FIT.
performance::check_model(r4.1.2)
check_outliers(r4.1.2) #none
summary(r4.1.2)
perf3<-performance::compare_performance(r4.1,r4.1.2,rank=TRUE)
perf3
##4.1 has lowest AIC and is not a singular fit. In spite of collinearity concerns, lets use it use it.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(r4.1)
3.932/(3.932+48.775) #7.46%
#add REML in
finr4.1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=TRUE)
check_collinearity(finr4.1)#VIF max =5.29 (region). We can live with that I think
performance::check_model(finr4.1) #looks good for (note 1 VIF > 5--region)
performance::check_outliers(finr4.1, method=c("cook","pareto")) #none
summary(finr4.1)
11.60/(11.60+53.68) #17.76961% of variance
#HII estimate = 2.456 +/-1.34970
#cml_scr estimate = 0.22960 +/- 1.40278
#travel time estimate = 1.12155 +/- 1.10504
compare_performance(r4.1,finr4.1, rank=TRUE)
#finr4.1 has lower AIC :)
###CHECK FOR SPATIAL AUTOCORRELATION###
#method 1: use the check_autocorrelation() feature of the performance package
check_autocorrelation(finr4.1)# OK: Residuals appear to be independent and not autocorrelated (p = 0.812).
#method 2:
#based on: https://datascienceplus.com/spatial-regression-in-r-part-1-spamm-vs-glmmtmb/
ggplot(relrecov, aes(x=long, y=lat, size=rel_rec_rate))+
geom_point()+
theme_bw()
#maybe size looks very close to the same throughout but maybe a pattern? let's investigate.
#We will check this using our "non-spatial" model (finr4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
relrecov$resid<-resid(finr4.1)
resids<-as.data.frame(resid(finr4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
#due to updates and changes we now need the broom.mixed package
library(broom.mixed)
lmerresid<-broom.mixed::augment(finr4.1)
head(lmerresid)
nrow(lmerresid)#109
nrow(relrecov)#151
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on relrecov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
relrecov1<-rename(relrecov, .rownames = X.2)
head(relrecov1)
spatialdf<-merge(relrecov1, lmerresid, by='.rownames')
head(spatialdf)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdf$longlat<-paste(spatialdf$long, spatialdf$lat, sep="_")
#make a df with no duplicate lat/long
spdf2<-spatialdf %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2)
#make relrecov2 into a spatial object
WGScoor<-spdf2
coordinates(WGScoor)=~long+lat
proj4string(WGScoor)<-CRS("+proj=longlat +datum=WGS84")
#raster::shapefile(WGScoor, "relrecovshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoor)
#find nearest neighbors
tri.nb<-tri2nb(coords, row.names=rownames(WGScoor))
tri.nb #THIS WORKED AND MADE nearest neighbors!
summary(tri.nb)
nb2listw(tri.nb) #this also worked!
head(spdf2)
vect=spdf2$.resid #vector of model residuals
vect
vect1=spdf2$rel_rec_rate.y #vector of response var
vect1
#MORANS test for spatial autocorrelation!
moran.test(vect, nb2listw(tri.nb))
# Moran I statistic standard deviate = -0.39564, p-value = 0.6538
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# -0.037737131 -0.012500000 0.004068959
moran.test(vect1, nb2listw(tri.nb))
# Moran I statistic standard deviate = 1.2743, p-value = 0.1013
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.067806433 -0.012500000 0.003971795
#######################################################################################################
#######################################################################################################
#######################################################################################################
###RESISTANCE####
#read in data
a=read.csv('coral_relrecov_master_2021.csv')
a1<-a %>% mutate_if(is.character, as.factor)
nrow(a1)
head(a1)
str(a1)
#BUILDING RESISTANCE DATASET#
#remove Arabian Gulf sample (n=1)
levels(a1$region)
a2<-subset(a1, region != "Arabian Gulf")
droplevels(a2$region)
#remove disturbance= disease, dredging (n=1) & COTS, Storm (n=1)
levels(a2$disturbance)
a3<-subset(a2, disturbance != "Disease, Dredging")
droplevels(a3$disturbance)
a4<-subset(a3, disturbance != "COTS, Storm")
droplevels(a4$disturbance)
#remove distrubance = Bleaching, Disease (n=1 for resistance only)
a5<-subset(a4, disturbance != "Bleaching, Disease")
droplevels(a5$disturbance)
#remove NAs from data
resist<-a5 %>% drop_na(resistance) %>% droplevels()
tail(resist)
nrow(resist) #184 rows of data
str(resist)
levels(resist$region)
#exploring the structure of the data
#HISTOGRAM OF relrecovERY RATES
hist(resist$resistance) #slight negative skew but approx normal
ggplot(resist, aes(x=resistance))+
geom_histogram(binwidth=5)
#center (standardise) explanatory variable(s) (mean of zero=centering, sd=1 = scaling --doing both here)
resist$hii100km2<-scale(resist$hii100km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii100km2)
resist$hii500km2<-scale(resist$hii500km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii500km2)
resist$resistance_time_2<-scale(resist$resistance.time, center=TRUE, scale=TRUE)
hist(resist$resistance_time2)
hist(resist$distance_to_shore_m)
resist$distance_to_shore_m2<-scale(resist$distance_to_shore_m, center=TRUE, scale=TRUE)
hist(resist$distance_to_shore_m2)
relrecov$dist_to_riv_m2<-scale(relrecov$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(relrecov$dist_to_riv_m2)
hist(resist$grav_NC)
resist$grav_NC2<-scale(resist$grav_NC, center=TRUE, scale=TRUE)
hist(resist$grav_NC2)
hist(resist$cml_scr)
resist$cml_scr2<-scale(resist$cml_scr, center=TRUE, scale=TRUE)
hist(resist$cml_scr2)
hist(resist$gravity.Grav_tot)
resist$gravity.Grav_tot2<-scale(resist$gravity.Grav_tot, center=TRUE, scale=TRUE)
hist(resist$gravity.Grav_tot2)
hist(resist$travel_time.tt_pop)
resist$travel_time.tt_pop2<-scale(resist$travel_time.tt_pop, center=TRUE, scale=TRUE)
hist(resist$travel_time.tt_pop2)
resist$dist_to_riv_m2<-scale(resist$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(resist$dist_to_rivm2)
head(resist)
#How many studies are in the relrecov dataset?
length(unique(resist$study)) #59 studies
#write.csv(resist, 'rel_resist_2021.csv')
######MODEL TESTING#######
###GLM MODELS FOR relrecovERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
resist<-read.csv('rel_resist_2021.csv')
#with all vars in
s1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s1) #dist to riv is HIGH correlation. Will need to remove
performance::check_model(s1)
check_outliers(s1)#none
summary(s1)
#distance to river has high VIF (13.25). So does region. So let's remove dist to river and try again
s2<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s2) #Looks like we are good on this
performance::check_model(s2) #this look pretty good
check_outliers(s2) #none
summary(s2)
#cut gravity since it built into cml_scr
s3<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s3) #good
performance::check_model(s3)
check_outliers(s3) #1 outlier
summary(s3)
#cut distance from shore (and not gravity)-- since travel time is used for same thing
s4<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4) #good
performance::check_model(s4)
check_outliers(s4) #no outliers
summary(s4)
#cut both distance from shore and gravity
s4.1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1) #AIC = 1275.1, BIC=1324.4
#cut travel time (see if this differs from using distance from shore)
s4.2<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.2) #good
performance::check_model(s4.2)
check_outliers(s4.2) #1 outlier
summary(s4.2) #AIC=1277.1, BIC=1329.3
#cut travel time and gravity, ends up being just the old model +cml_scr
s4.3<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.3) #good
performance::check_model(s4.3)
check_outliers(s4.3) #none
summary(s4.3)
#original final model
s5<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s5) #good)
performance::check_model(s5)
check_outliers(s5) #none
summary(s5)
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
s6<-lmer(rel_res ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s6) #all VIF are low
performance::check_model(s6)
check_outliers(s6) #3 outliers!
summary(s6)
#only region, disturbance, and hii
s7<-lmer(rel_res ~ region+disturbance+hii100km2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s7) #all VIF are low
performance::check_model(s7)
check_outliers(s7) #5 outliers!
summary(s7)
rperf<-compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s6,s7, rank=TRUE)
rperf
#old models (from model building)
s2.0<-lmer(rel_res ~ region*disturbance*hii100km2*MPA_during_resist + (1|study), data = resist, REML=FALSE)
summary(s2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
s2.1<-lmer(rel_res ~ region*disturbance*hii100km2*MPA_during_resist*resistance_time_2 + (1|study), data = resist, REML=FALSE)# rank deficient
s2.2<-lmer(rel_res ~ region*disturbance*hii100km2*MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_model(s2.2)
check_collinearity(s2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(s2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
s2.2.1<-lmer(rel_res ~ region*disturbance*hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
s2.2.2<-lmer(rel_res ~ region*disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
s2.2.3<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
s2.2.4<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
s2.2.5<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove resistance time
s2.2.6<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
#remove distance to shore
s2.2.7<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove hii
s2.2.8<-lmer(rel_res ~ region+disturbance+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove MPA status
s2.2.9<-lmer(rel_res ~ region+disturbance+ (1|study), data = resist, REML=FALSE)
#remove region
s2.2.10<-lmer(rel_res ~ disturbance+ (1|study), data = resist, REML=FALSE)
##COMPARE model performance
perfs1<-performance::compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s2.0,s2.1,s2.2,s2.2.1,s2.2.2,s2.2.3,s2.2.4,s2.2.5,s2.2.6,s2.2.7,s2.2.8,s2.2.9,s2.2.10, rank= TRUE)
perfs1 #Model S3 (w/ distance to shore and travel time performs best but S2, S4.1, S4, S4.2, and S1 all VERY similar--essentially not different)
#WE WILL SELECT s4.1 as it matches what we did for relrecovery! s4.1 uses travel time in place of distance to shore as a proxy for remoteness and includes cml_scr from the WCS pre-print in addition to HII
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#does not contain gravity or distance to shore
s4.1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1) #AIC=1275.1, BIC=1324.4
#replace travel time w/ dist to shore
s4.1.2<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1.2) #good
performance::check_model(s4.1.2)
check_outliers(s4.1.2) #none
summary(s4.1.2) #AIC = 1722.3, BIC=1776.3
perf3<-performance::compare_performance(s4.1,s4.1.2,rank=TRUE)
perf3
##S4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(s4.1)
191.9/(191.9+498.1) #27.81%
#add REML in
fins4.1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=TRUE)
check_collinearity(fins4.1)#good
performance::check_model(fins4.1)
check_outliers(fins4.1) #none
summary(fins4.1)
400.3/(400.3+502.9) #44.32% of variance
#HII estimate = -5.1338 +/-4.7084
#cml_scr estimate = 8.9376 +/- 4.1876
#travel time estimate = -0.1736 +/-3.1860
####SPATIAL AUTOCORRELATION FOR RESISTANCE
#Method 1: use the check_autocorrelation() function in the performance package
check_autocorrelation(fins4.1) #OK: Residuals appear to be independent and not autocorrelated (p = 0.344).
#method 2:
ggplot(resist, aes(x=long, y=lat, size=resistance))+
geom_point()+
theme_bw()
#Doesn't look like much but we should investigate residuals
#We will check this using our "non-spatial" model (fins4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
#resist$resid<-resid(fins4.1)
resids<-as.data.frame(resid(fins4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
lmerresidres<-broom.mixed::augment(fins4.1)
head(lmerresidres)
nrow(lmerresidres)#134
nrow(resist)#184
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on relrecov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
head(resist)
resist1<-rename(resist, .rownames = X.1)
head(resist1)
spatialdfres<-merge(resist1, lmerresidres, by='.rownames')
head(spatialdfres)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdfres$longlat<-paste(spatialdfres$long, spatialdfres$lat, sep="_")
#make a df with no duplicate lat/long
spdf2res<-spatialdfres %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2res)
#make relrecov2 into a spatial object
WGScoorres<-spdf2res
coordinates(WGScoorres)=~long+lat
proj4string(WGScoorres)<-CRS("+proj=longlat +datum=WGS84")
raster::shapefile(WGScoorres, "resistshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoorres)
#find nearest neighbors
tri.nbres<-tri2nb(coords, row.names=rownames(WGScoorres))
tri.nbres #THIS WORKED AND MADE nearest neighbors!
summary(tri.nbres)
nb2listw(tri.nbres) #this also worked!
head(spdf2res)
vectr=spdf2res$.resid #vector of model residuals
vectr
vectr1=spdf2res$resistance.y #vector of response var
vectr1
#MORANS test for spatial autocorrelation!
moran.test(vectr, nb2listw(tri.nbres))
# Moran I statistic standard deviate = 0.47746, p-value = 0.3165
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.017704076 -0.009523810 0.003252007
moran.test(vectr1, nb2listw(tri.nbres))
#FAILS- is not a numeric vector
| /code/Baumann_et_al(2021)_GCB_model_code.R | no_license | jbaumann3/Baumann-et-al-coral-recovery- | R | false | false | 73,755 | r | # Baumann et al (2021) GCB Mixed Effects Model Code #
#By: Justin Baumann
#####################################################
#load packages
library(tidyverse)
library(ggplot2)
library(lme4)
library(car) #For checking multicollinearity, using performance instead
library(performance) #for checking R2 or ICC, and comparing model performances
library(ggeffects)
#library(cowplot)
#library(egg)
#library(ggpubr)
library(patchwork)
library(ggrepel)
library(ggsci)
library(parameters) #get model params
library(effects)
library(broom)
library(devtools)
#devtools::install_github("m-clark/visibly")
library(visibly)
library(rgdal)
library(raster)
library(spdep)
######################################################
#data prep
#read in data
a=read.csv('coral_recov_master_2021.csv')
a1<-a %>% mutate_if(is.character, as.factor)
nrow(a1)
head(a1)
str(a1)
########## Build recovery datasets#########
##building a recovery dataset
#remove Arabian Gulf sample (n=1)
levels(a1$region)
a2<-subset(a1, region != "Arabian Gulf")
droplevels(a2$region)
#remove disturbance= disease, dredging (n=1) & COTS, Storm (n=1)
levels(a2$disturbance)
a3<-subset(a2, disturbance != "Disease, Dredging")
droplevels(a3$disturbance)
a4<-subset(a3, disturbance != "COTS, Storm")
droplevels(a4$disturbance)
#remove NAs from data
recov<-a4 %>% drop_na(calculated.recovery.rate) %>% droplevels()
tail(recov)
nrow(recov) #182 rows of data for recovery!
str(recov)
levels(recov$disturbance)
#exploring the structure of the data
#HISTOGRAM OF RECOVERY RATES
hist(recov$calculated.recovery.rate) #positively skewed normal
ggplot(recov, aes(x=calculated.recovery.rate))+
geom_histogram(binwidth=5)
#center (standardise) explanatory variable(s) (mean of zero=centering, sd=1 = scaling --doing both here)
recov$hii100km2<-scale(recov$hii100km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(recov$hii100km2)
recov$hii500km2<-scale(recov$hii500km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(recov$hii500km2)
recov$recovery_time2<-scale(recov$recovery_time, center=TRUE, scale=TRUE)
hist(recov$recovery_time2)
hist(recov$distance_to_shore_m)
recov$distance_to_shore_m2<-scale(recov$distance_to_shore_m, center=TRUE, scale=TRUE)
hist(recov$distance_to_shore_m2)
recov$dist_to_riv_m2<-scale(recov$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(recov$dist_to_riv_m2)
hist(recov$grav_NC)
recov$grav_NC2<-scale(recov$grav_NC, center=TRUE, scale=TRUE)
hist(recov$grav_NC2)
hist(recov$cml_scr)
recov$cml_scr2<-scale(recov$cml_scr, center=TRUE, scale=TRUE)
hist(recov$cml_scr2)
hist(recov$gravity.Grav_tot)
recov$gravity.Grav_tot2<-scale(recov$gravity.Grav_tot, center=TRUE, scale=TRUE)
hist(recov$gravity.Grav_tot2)
hist(recov$travel_time.tt_pop)
recov$travel_time.tt_pop2<-scale(recov$travel_time.tt_pop, center=TRUE, scale=TRUE)
hist(recov$travel_time.tt_pop2)
head(recov)
#How many studies are in the recov dataset?
length(unique(recov$study)) #57 studies
#write.csv(recov, 'recov_2021.csv')
####EXPLORING DATA BEFORE MODEL TESTING####
recov<-read.csv('recov_2021.csv')
#basic lm
#how many unique lat/longs are there?
nrow(recov) #182
head(recov)
lm1<-lm(calculated.recovery.rate ~ hii100km2, data=recov)
summary(lm1)
lm2<-lm(calculated.recovery.rate ~ cml_scr2, data=recov)
summary(lm2)
lm3<-lm(calculated.recovery.rate ~ grav_NC2, data=recov)
summary(lm3)
plot1<-ggplot(recov, aes(x=hii100km, y=calculated.recovery.rate))+
geom_point()+
geom_smooth(method="lm")
plot1
plot2<-ggplot(recov, aes(x=cml_scr2, y=calculated.recovery.rate))+
geom_point()+
geom_smooth(method="lm")
plot2
plot3<-ggplot(recov, aes(x=grav_NC2, y=calculated.recovery.rate))+
geom_point()+
geom_smooth(method="lm")
plot3
plot1+plot2+plot3
#are assumptions met?
#residuals
plot(lm1, which=1) #gray line is flat, red should be nearly flat (mimicking gray)
#looks ok
plot(lm2, which=1) # looks ok
plot(lm3, which=1) # looks ok
#qq
plot(lm1, which=2) #points should be close to the line. They diverge at the ends a little
plot(lm2, which=2) #same as above
plot(lm3, which=2) #same as above
#check for observation independence (use categorical vars here)
#if data from within each category are more similar to each other than to data from different categories then they are correlated!
#region
boxplot(calculated.recovery.rate ~ region, data=recov) #maybe correlated? Possibly not.
#disturbance
boxplot(calculated.recovery.rate ~ disturbance, data= recov) #most likely correlated though sample sizes might be small
#plot w/ colors by category to see
color1<-ggplot(recov, aes(x=hii100km2, y=calculated.recovery.rate, color=region))+
geom_point(size=2)+
theme_bw()
color1
#regions vary by recovery rate and hii, so observations within are not independent
color2<-ggplot(recov, aes(x=hii100km2, y=calculated.recovery.rate, color=disturbance))+
geom_point(size=2)+
theme_bw()
color2
#disturbances vary by recovery rate AND hii100km, so observations within these are NOT INDEPENDENT
#add fixed effects into the model
lm2.1<-lm(calculated.recovery.rate ~ hii100km2 + region + disturbance, data=recov)
summary(lm2.1)
######MODEL TESTING#######
###GLM MODELS FOR RECOVERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
#with all vars in
r1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r1)
performance::check_model(r1)
check_outliers(r1)
summary(r1)
#distance to river has high VIF (18.52). So does region. So let's remove dist to river and try again
r2<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r2) #region still high, so something colinear w/ that...
performance::check_model(r2) #this look pretty good
check_outliers(r2) #1 outlier detected
summary(r2)
#cut gravity
r3<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r3)
performance::check_model(r3)
check_outliers(r3)
summary(r3)
#cut distance from shore (and not gravity)
r4<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4)
performance::check_model(r4)
check_outliers(r4) #no outliers
summary(r4)
#cut both distance from shore and gravity
r4.1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #max VIF is region at 4.92
performance::check_model(r4.1)
check_outliers(r4.1)
summary(r4.1)
#cut travel time
r4.2<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.2) #max VIF region at 5.35
performance::check_model(r4.2)
check_outliers(r4.2) #none
summary(r4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
r4.3<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.3) #VERY low VIF (nothing about 2.06--we like this)
performance::check_model(r4.3)
check_outliers(r4.3) #none
summary(r4.3)
#original final model
r5<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r5) #all VIF below 2.38 :)
performance::check_model(r5)
check_outliers(r5) #1 outlier
summary(r5)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5, rank=TRUE)
perf
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
r6<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r6) #all VIF are low
performance::check_model(r6)
check_outliers(r6) #3 outliers!
summary(r6)
#only region, disturbance, and hii
r7<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r7) #all VIF are low
performance::check_model(r7)
check_outliers(r7) #5 outliers!
summary(r7)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r6,r7, rank=TRUE)
perf
#old models (from model building)
r2.0<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2*MPA_during_recov + (1|study), data = recov, REML=FALSE)
summary(r2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
r2.1<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2*MPA_during_recov*recovery_time2 + (1|study), data = recov, REML=FALSE)# rank deficient
r2.2<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2*MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_model(r2.2)
check_collinearity(r2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(r2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
r2.2.1<-lmer(calculated.recovery.rate ~ region*disturbance*hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
r2.2.2<-lmer(calculated.recovery.rate ~ region*disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
r2.2.3<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
r2.2.4<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
r2.2.5<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove recovery time
r2.2.6<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
#remove distance to shore
r2.2.7<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove hii
r2.2.8<-lmer(calculated.recovery.rate ~ region+disturbance+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove MPA status
r2.2.9<-lmer(calculated.recovery.rate ~ region+disturbance+ (1|study), data = recov, REML=FALSE)
#remove region
r2.2.10<-lmer(calculated.recovery.rate ~ disturbance+ (1|study), data = recov, REML=FALSE)
##COMPARE model performance
perf1<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r2.0,r2.1,r2.2,r2.2.1,r2.2.2,r2.2.3,r2.2.4,r2.2.5,r2.2.6,r2.2.7,r2.2.8,r2.2.9,r2.2.10, rank= TRUE)
perf1
perf2<-performance::compare_performance(r1,r2,r3,r4,r5,r4.1,r4.2,r4.3,rank=TRUE)
perf2
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#model r4.1 is best AIC and BIC. IT does not contain gravity or distance to shore
r4.1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #highest VIF=4.92, suggests some multicolinearity
performance::check_model(r4.1)
check_outliers(r4.1) #non3
summary(r4.1)
#replace travel time w/ dist to shore
r4.1.2<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1.2) #highest VIF = 2.06, so this is better than the above
performance::check_model(r4.1.2)
check_outliers(r4.1.2) #none
summary(r4.1.2)
perf3<-performance::compare_performance(r4.1,r4.1.2,rank=TRUE)
perf3
##4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(r4.1)
0.883/(0.883+3.152) #21.88%
summary(r4.1.2)
3.978/(3.978+3.171) #55.64%
#add REML in
finr4.1<-lmer(calculated.recovery.rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=TRUE)
check_collinearity(finr4.1)#VIF max =3.85
performance::check_model(finr4.1)
performance::check_outliers(finr4.1, method=c("cook","pareto")) #none
summary(finr4.1)
2.148/(2.148+3.334) #39.18% of variance
#HII estimate = 0.80077 +/-0.41651
#cml_scr estimate = -0.02998 +/- 0.38993
#travel time estimate = 0.347 +/- 0.33291
compare_performance(r4.1,finr4.1, rank=TRUE)
#finr4.1 has lower AIC :)
###CHECK FOR SPATIAL AUTOCORRELATION###
#method 1: use the check_autocorrelation() feature of the performance package
check_autocorrelation(finr4.1)# OK: Residuals appear to be independent and not autocorrelated (p = 0.118).
#method 2:
#based on: https://datascienceplus.com/spatial-regression-in-r-part-1-spamm-vs-glmmtmb/
ggplot(recov, aes(x=long, y=lat, size=calculated.recovery.rate))+
geom_point()+
theme_bw()
#maybe size looks very close to the same throughout but maybe a pattern? let's investigate.
#We will check this using our "non-spatial" model (finr4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
recov$resid<-resid(finr4.1)
resids<-as.data.frame(resid(finr4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
lmerresid<-broom::augment(finr4.1)
head(lmerresid)
nrow(lmerresid)#109
nrow(recov)#182
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on recov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
recov1<-rename(recov, .rownames = X.2)
head(recov1)
spatialdf<-merge(recov1, lmerresid, by='.rownames')
head(spatialdf)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdf$longlat<-paste(spatialdf$long, spatialdf$lat, sep="_")
#make a df with no duplicate lat/long
spdf2<-spatialdf %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2)
#make recov2 into a spatial object
WGScoor<-spdf2
coordinates(WGScoor)=~long+lat
proj4string(WGScoor)<-CRS("+proj=longlat +datum=WGS84")
#raster::shapefile(WGScoor, "recovshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoor)
#find nearest neighbors
tri.nb<-tri2nb(coords, row.names=rownames(WGScoor))
tri.nb #THIS WORKED AND MADE nearest neighbors!
summary(tri.nb)
nb2listw(tri.nb) #this also worked!
head(spdf2)
vect=spdf2$.resid #vector of model residuals
vect
vect1=spdf2$calculated.recovery.rate.y #vector of response var
vect1
#MORANS test for spatial autocorrelation!
moran.test(vect, nb2listw(tri.nb))
# Moran I statistic standard deviate = -1.8338, p-value = 0.9667
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# -0.122120186 -0.010989011 0.003672439
moran.test(vect1, nb2listw(tri.nb))
# Moran I statistic standard deviate = 0.21191, p-value = 0.4161
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.001747535 -0.010989011 0.003612590
#######################################################################################################
#######################################################################################################
#######################################################################################################
###RESISTANCE####
#read in data
a=read.csv('coral_recov_master_2021.csv')
a1<-a %>% mutate_if(is.character, as.factor)
nrow(a1)
head(a1)
str(a1)
#BUILDING RESISTANCE DATASET#
#remove Arabian Gulf sample (n=1)
levels(a1$region)
a2<-subset(a1, region != "Arabian Gulf")
droplevels(a2$region)
#remove disturbance= disease, dredging (n=1) & COTS, Storm (n=1)
levels(a2$disturbance)
a3<-subset(a2, disturbance != "Disease, Dredging")
droplevels(a3$disturbance)
a4<-subset(a3, disturbance != "COTS, Storm")
droplevels(a4$disturbance)
#remove distrubance = Bleaching, Disease (n=1 for resistance only)
a5<-subset(a4, disturbance != "Bleaching, Disease")
droplevels(a5$disturbance)
#remove NAs from data
resist<-a5 %>% drop_na(resistance) %>% droplevels()
tail(resist)
nrow(resist) #184 rows of data
str(resist)
levels(resist$region)
#exploring the structure of the data
#HISTOGRAM OF RECOVERY RATES
hist(resist$resistance) #slight negative skew but approx normal
ggplot(resist, aes(x=resistance))+
geom_histogram(binwidth=5)
#center (standardise) explanatory variable(s) (mean of zero=centering, sd=1 = scaling --doing both here)
resist$hii100km2<-scale(resist$hii100km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii100km2)
resist$hii500km2<-scale(resist$hii500km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii500km2)
resist$resistance_time_2<-scale(resist$resistance.time, center=TRUE, scale=TRUE)
hist(resist$resistance_time2)
hist(resist$distance_to_shore_m)
resist$distance_to_shore_m2<-scale(resist$distance_to_shore_m, center=TRUE, scale=TRUE)
hist(resist$distance_to_shore_m2)
recov$dist_to_riv_m2<-scale(recov$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(recov$dist_to_riv_m2)
hist(resist$grav_NC)
resist$grav_NC2<-scale(resist$grav_NC, center=TRUE, scale=TRUE)
hist(resist$grav_NC2)
hist(resist$cml_scr)
resist$cml_scr2<-scale(resist$cml_scr, center=TRUE, scale=TRUE)
hist(resist$cml_scr2)
hist(resist$gravity.Grav_tot)
resist$gravity.Grav_tot2<-scale(resist$gravity.Grav_tot, center=TRUE, scale=TRUE)
hist(resist$gravity.Grav_tot2)
hist(resist$travel_time.tt_pop)
resist$travel_time.tt_pop2<-scale(resist$travel_time.tt_pop, center=TRUE, scale=TRUE)
hist(resist$travel_time.tt_pop2)
resist$dist_to_riv_m2<-scale(resist$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(resist$dist_to_rivm2)
head(resist)
#How many studies are in the recov dataset?
length(unique(resist$study)) #59 studies
#write.csv(resist, 'resist_2021.csv')
######MODEL TESTING#######
###GLM MODELS FOR RECOVERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
resist<-read.csv('resist_2021.csv')
#with all vars in
s1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s1) #dist to riv is HIGH correlation. Will need to remove
performance::check_model(s1)
check_outliers(s1)#none
summary(s1)
#distance to river has high VIF (12.28). So does region. So let's remove dist to river and try again
s2<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s2) #Looks like we are good on this
performance::check_model(s2) #this look pretty good
check_outliers(s2) #none
summary(s2)
#cut gravity since it built into cml_scr
s3<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s3) #good
performance::check_model(s3)
check_outliers(s3) #none
summary(s3)
#cut distance from shore (and not gravity)-- since travel time is used for same thing
s4<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4) #good
performance::check_model(s4)
check_outliers(s4) #no outliers
summary(s4)
#cut both distance from shore and gravity
s4.1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1)
#cut travel time (see if this differs from using distance from shore)
s4.2<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.2) #good
performance::check_model(s4.2)
check_outliers(s4.2) #none
summary(s4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
s4.3<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.3) #good
performance::check_model(s4.3)
check_outliers(s4.3) #none
summary(s4.3)
#original final model
s5<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s5) #good)
performance::check_model(s5)
check_outliers(s5) #1 outlier
summary(s5)
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
s6<-lmer(resistance ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s6) #all VIF are low
performance::check_model(s6)
check_outliers(s6) #3 outliers!
summary(s6)
#only region, disturbance, and hii
s7<-lmer(resistance ~ region+disturbance+hii100km2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s7) #all VIF are low
performance::check_model(s7)
check_outliers(s7) #5 outliers!
summary(s7)
rperf<-compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s6,s7, rank=TRUE)
rperf
#old models (from model building)
s2.0<-lmer(resistance ~ region*disturbance*hii100km2*MPA_during_resist + (1|study), data = resist, REML=FALSE)
summary(s2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
s2.1<-lmer(resistance ~ region*disturbance*hii100km2*MPA_during_resist*resistance_time_2 + (1|study), data = resist, REML=FALSE)# rank deficient
s2.2<-lmer(resistance ~ region*disturbance*hii100km2*MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_model(s2.2)
check_collinearity(s2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(s2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
s2.2.1<-lmer(resistance ~ region*disturbance*hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
s2.2.2<-lmer(resistance ~ region*disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
s2.2.3<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
s2.2.4<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
s2.2.5<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove resistance time
s2.2.6<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
#remove distance to shore
s2.2.7<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove hii
s2.2.8<-lmer(resistance ~ region+disturbance+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove MPA status
s2.2.9<-lmer(resistance ~ region+disturbance+ (1|study), data = resist, REML=FALSE)
#remove region
s2.2.10<-lmer(resistance ~ disturbance+ (1|study), data = resist, REML=FALSE)
##COMPARE model performance
perfs1<-performance::compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s2.0,s2.1,s2.2,s2.2.1,s2.2.2,s2.2.3,s2.2.4,s2.2.5,s2.2.6,s2.2.7,s2.2.8,s2.2.9,s2.2.10, rank= TRUE)
perfs1 #Model S3 (w/ distance to shore and travel time performs best but S2, S4.1, S4, S4.2, and S1 all VERY similar--essentially not different)
#WE WILL SELECT s4.1 as it matches what we did for recovery! s4.1 uses travel time in place of distance to shore as a proxy for remoteness and includes cml_scr from the WCS pre-print in addition to HII
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#does not contain gravity or distance to shore
s4.1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1)
#replace travel time w/ dist to shore
s4.1.2<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1.2) #good
performance::check_model(s4.1.2)
check_outliers(s4.1.2) #none
summary(s4.1.2)
perf3<-performance::compare_performance(s4.1,s4.1.2,rank=TRUE)
perf3
##S4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(s4.1)
38.72/(38.72+75) #34.05%
summary(s4.1.2)
39.05/(39.05+84.87) #31.51%
#add REML in
fins4.1<-lmer(resistance ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=TRUE)
check_collinearity(fins4.1)#good
performance::check_model(fins4.1)
check_outliers(fins4.1) #none
summary(fins4.1)
69.98/(69.98+77.00) #47.61% of variance
#HII estimate = -.57795 +/-1.90827
#cml_scr estimate = 1.70562 +/- 1.66645
#travel time estimate = -0.72707 +/- 1.27091
####SPATIAL AUTOCORRELATION FOR RESISTANCE
#Method 1: use the check_autocorrelation() function in the performance package
check_autocorrelation(fins4.1) #OK: Residuals appear to be independent and not autocorrelated (p = 0.610).
#method 2:
ggplot(resist, aes(x=long, y=lat, size=resistance))+
geom_point()+
theme_bw()
#Doesn't look like much but we should investigate residuals
#We will check this using our "non-spatial" model (fins4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
#resist$resid<-resid(fins4.1)
resids<-as.data.frame(resid(fins4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
lmerresidres<-broom::augment(fins4.1)
head(lmerresidres)
nrow(lmerresidres)#134
nrow(resist)#184
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on recov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
head(resist)
resist1<-rename(resist, .rownames = X.1)
head(resist1)
spatialdfres<-merge(resist1, lmerresidres, by='.rownames')
head(spatialdfres)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdfres$longlat<-paste(spatialdfres$long, spatialdfres$lat, sep="_")
#make a df with no duplicate lat/long
spdf2res<-spatialdfres %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2res)
#make recov2 into a spatial object
WGScoorres<-spdf2res
coordinates(WGScoorres)=~long+lat
proj4string(WGScoorres)<-CRS("+proj=longlat +datum=WGS84")
raster::shapefile(WGScoorres, "resistshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoorres)
#find nearest neighbors
tri.nbres<-tri2nb(coords, row.names=rownames(WGScoorres))
tri.nbres #THIS WORKED AND MADE nearest neighbors!
summary(tri.nbres)
nb2listw(tri.nbres) #this also worked!
head(spdf2res)
vectr=spdf2res$.resid #vector of model residuals
vectr
vectr1=spdf2res$resistance.y #vector of response var
vectr1
#MORANS test for spatial autocorrelation!
moran.test(vectr, nb2listw(tri.nbres))
# Moran I statistic standard deviate = 0.60469, p-value = 0.2727
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.024899265 -0.009523810 0.003240714
moran.test(vectr1, nb2listw(tri.nbres))
# Moran I statistic standard deviate = 0.47645, p-value = 0.3169
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.017786786 -0.009523810 0.003285649
#############################################################
#############################################################
### IGR ######################################################
# Read in data
recov<-read.csv('recov_2021.csv')
head(recov)
### Instantaneous Growth Rate (IGR) from Ortiz et al (2018)
#calculated as: r = LN ((recovery coral cover + 5) / (post dist coral cover + 5))/recovery time
#histogram of IGR
ggplot(recov, aes(x=IGR))+
geom_histogram(binwidth=1) #actually appears approx normal
#using same model structure we used for other vars:
######MODEL TESTING#######
###GLM MODELS FOR RECOVERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
#with all vars in
r1<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r1)
performance::check_model(r1)
check_outliers(r1) #1 outlier
summary(r1)
#dist to riv and region have HIGH VIF
r2<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r2) #looks good actually
performance::check_model(r2) #this look pretty good
check_outliers(r2) #1 outlier detected
summary(r2)
#cut gravity
r3<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r3) #looks good
performance::check_model(r3)
check_outliers(r3) # 1 outlier
summary(r3)
#cut distance from shore (and not gravity)
r4<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4) #looks good
performance::check_model(r4)
check_outliers(r4) #no outliers
summary(r4)
#cut both distance from shore and gravity
r4.1<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #looks good
performance::check_model(r4.1)
check_outliers(r4.1) #none
summary(r4.1)
#cut travel time
r4.2<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.2) #looks good
performance::check_model(r4.2)
check_outliers(r4.2) #none
summary(r4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
r4.3<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.3) #VERY low VIF
performance::check_model(r4.3)
check_outliers(r4.3) #1 outlier
summary(r4.3)
#original final model
r5<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r5) #all VIF below 2.38 :)
performance::check_model(r5)
check_outliers(r5) #1 outlier
summary(r5)
compare_performance(r1,r2)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5, rank=TRUE)
perf
#model r4 wins, though lowest AIC and BIC are model r4.1
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
r6<-lmer(IGR ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r6) #all VIF are low
performance::check_model(r6)
check_outliers(r6) #1 outlier!
summary(r6)
#only region, disturbance, and hii
r7<-lmer(IGR ~ region+disturbance+hii100km2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r7) #all VIF are low
performance::check_model(r7)
check_outliers(r7) #1 outlier!
summary(r7)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r6,r7, rank=TRUE)
perf #still r4.1 and r4
#old models (from model building)
r2.0<-lmer(IGR ~ region*disturbance*hii100km2*MPA_during_recov + (1|study), data = recov, REML=FALSE)
summary(r2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
r2.1<-lmer(IGR ~ region*disturbance*hii100km2*MPA_during_recov*recovery_time2 + (1|study), data = recov, REML=FALSE)# rank deficient
r2.2<-lmer(IGR ~ region*disturbance*hii100km2*MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_model(r2.2) #singular fit
check_collinearity(r2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(r2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
r2.2.1<-lmer(IGR ~ region*disturbance*hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1) #rank deficient
r2.2.2<-lmer(IGR ~ region*disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2) #rank deficient
r2.2.3<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = recov, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
r2.2.4<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
r2.2.5<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove recovery time
r2.2.6<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+distance_to_shore_m2 + (1|study), data = recov, REML=FALSE)
#remove distance to shore
r2.2.7<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove hii
r2.2.8<-lmer(IGR ~ region+disturbance+MPA_during_recov+ (1|study), data = recov, REML=FALSE)
#remove MPA status
r2.2.9<-lmer(IGR ~ region+disturbance+ (1|study), data = recov, REML=FALSE)
#remove region
r2.2.10<-lmer(IGR ~ disturbance+ (1|study), data = recov, REML=FALSE)
##COMPARE model performance
perf1<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r2.0,r2.1,r2.2,r2.2.1,r2.2.2,r2.2.3,r2.2.4,r2.2.5,r2.2.6,r2.2.7,r2.2.8,r2.2.9,r2.2.10, rank= TRUE)
perf1
perf2<-performance::compare_performance(r1,r2,r3,r4,r5,r4.1,r4.2,r4.3,rank=TRUE)
perf2 #r4 and 4.1 still the best! Let's use 4.1 as it is the best for recov and resist
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#model r4.1 is best AIC and BIC. IT does not contain gravity or distance to shore
r4.1<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1) #all good
performance::check_model(r4.1)
check_outliers(r4.1) #NONE
summary(r4.1)
#replace travel time w/ dist to shore
r4.1.2<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = recov, REML=FALSE)
check_collinearity(r4.1.2) #good
performance::check_model(r4.1.2)
check_outliers(r4.1.2) #1 outlier
summary(r4.1.2)
perf3<-performance::compare_performance(r4.1,r4.1.2,rank=TRUE)
perf3
##4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(r4.1)
0.1348/(0.1348+0.1580) #46.04%
#add REML in
finr4.1igr<-lmer(IGR ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = recov, REML=TRUE)
check_collinearity(finr4.1igr)#ALL good
performance::check_model(finr4.1igr)
performance::check_outliers(finr4.1igr, method=c("cook","pareto")) #none
summary(finr4.1igr)
0.2619/(0.2619+0.1659) #61.22% of variance
compare_performance(r4.1,finr4.1igr, rank=TRUE)
#finr4.1 has lower AIC :)
##########################################################################################################
# Relative Recovery and Resistance ########################################################################
####EXPLORING DATA BEFORE MODEL TESTING####
relrecov<-read.csv('rel_recov_2021.csv')
#basic lm
#how many unique lat/longs are there?
nrow(relrecov) #151 (182 in the non-relative recov df--had to cut when we didn't have a pre-dist value to standardize to)
head(relrecov)
lm1<-lm(rel_rec_rate ~ hii100km2, data=relrecov)
summary(lm1)
lm2<-lm(rel_rec_rate ~ cml_scr2, data=relrecov)
summary(lm2)
lm3<-lm(rel_rec_rate ~ grav_NC2, data=relrecov)
summary(lm3)
plot1<-ggplot(relrecov, aes(x=hii100km, y=rel_rec_rate))+
geom_point()+
geom_smooth(method="lm")
plot1
plot2<-ggplot(relrecov, aes(x=cml_scr2, y=rel_rec_rate))+
geom_point()+
geom_smooth(method="lm")
plot2
plot3<-ggplot(relrecov, aes(x=grav_NC2, y=rel_rec_rate))+
geom_point()+
geom_smooth(method="lm")
plot3
plot1+plot2+plot3
#are assumptions met?
#residuals
plot(lm1, which=1) #gray line is flat, red should be nearly flat (mimicking gray)
#looks ok
plot(lm2, which=1) # looks ok
plot(lm3, which=1) # looks ok
#qq
plot(lm1, which=2) #points should be close to the line. They diverge at the ends a little
plot(lm2, which=2) #same as above
plot(lm3, which=2) #same as above
#check for observation independence (use categorical vars here)
#if data from within each category are more similar to each other than to data from different categories then they are correlated!
#region
boxplot(rel_rec_rate ~ region, data=relrecov) #maybe correlated? Possibly not.
#disturbance
boxplot(rel_rec_rate ~ disturbance, data= relrecov) #most likely correlated though sample sizes might be small
#plot w/ colors by category to see
color1<-ggplot(relrecov, aes(x=hii100km2, y=rel_rec_rate, color=region))+
geom_point(size=2)+
theme_bw()
color1
#regions vary by relrecovery rate and hii, so observations within are not independent
color2<-ggplot(relrecov, aes(x=hii100km2, y=rel_rec_rate, color=disturbance))+
geom_point(size=2)+
theme_bw()
color2
#disturbances vary by relrecovery rate AND hii100km, so observations within these are NOT INDEPENDENT
#add fixed effects into the model
lm2.1<-lm(rel_rec_rate ~ hii100km2 + region + disturbance, data=relrecov)
summary(lm2.1)
######MODEL TESTING#######
###GLM MODELS FOR relrecovERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
#with all vars in
r1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r1)
performance::check_model(r1)
check_outliers(r1) #1 outlier
summary(r1)
#Singular fit -- so need to simplify. VIFs for region and dist to riv are very high. CUT dist to riv
r2<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2) #region still high, so something colinear w/ that...
performance::check_model(r2) #this look pretty good
check_outliers(r2) #1 outlier detected
summary(r2)
#cut gravity
r3<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r3) #still moderate multicollinearity (VIF for region = 7.05, disturbance=5.34)
performance::check_model(r3)
check_outliers(r3)#1 outlier
summary(r3)
#cut distance from shore (and not gravity)
r4<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4) #still moderate VIF for region and dist
performance::check_model(r4)
check_outliers(r4) #1 outlier
summary(r4)
#cut both distance from shore and gravity
r4.1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.1) #still moderate VIFS (region =6.79, dist=5.16)
performance::check_model(r4.1)
check_outliers(r4.1) #no outliers
summary(r4.1)
#cut travel time
r4.2<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.2) #Still moderate Vifs for region and dist
performance::check_model(r4.2)
check_outliers(r4.2) #none
summary(r4.2)
#cut travel time and gravity, ends up being just the old model +cml_scr
r4.3<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.3) #Vifs are less bad BUT SINGULAR FIT. So cannot use
performance::check_model(r4.3)
check_outliers(r4.3) #none
summary(r4.3)
#original final model
r5<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r5) #VIFS are low but singular fit!
performance::check_model(r5)
check_outliers(r5) #none
summary(r5)
perf<-performance::compare_performance(r4.1,r4.2,r4.3,r5)
perf
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
r6<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r6) #all VIF are low but SINGULAR FIT
performance::check_model(r6)
check_outliers(r6) #none
summary(r6)
#only region, disturbance, and hii
r7<-lmer(rel_rec_rate ~ region+disturbance+hii100km2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r7) #all VIF are low but SINGULAR FIT
performance::check_model(r7)
check_outliers(r7) #1 outlier
summary(r7)
perf<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r6,r7, rank=TRUE)
perf
#old models (from model building)
r2.0<-lmer(rel_rec_rate ~ region*disturbance*hii100km2*MPA_during_recov + (1|study), data = relrecov, REML=FALSE)
summary(r2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
r2.1<-lmer(rel_rec_rate ~ region*disturbance*hii100km2*MPA_during_recov*recovery_time2 + (1|study), data = relrecov, REML=FALSE)# rank deficient
r2.2<-lmer(rel_rec_rate ~ region*disturbance*hii100km2*MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_model(r2.2)
check_collinearity(r2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(r2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
r2.2.1<-lmer(rel_rec_rate ~ region*disturbance*hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
r2.2.2<-lmer(rel_rec_rate ~ region*disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
r2.2.3<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2 + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
r2.2.4<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = relrecov, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
r2.2.5<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+distance_to_shore_m2 + (1|study), data = relrecov, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove relrecovery time
r2.2.6<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+distance_to_shore_m2 + (1|study), data = relrecov, REML=FALSE)
#remove distance to shore
r2.2.7<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+ (1|study), data = relrecov, REML=FALSE)
#remove hii
r2.2.8<-lmer(rel_rec_rate ~ region+disturbance+MPA_during_recov+ (1|study), data = relrecov, REML=FALSE)
#remove MPA status
r2.2.9<-lmer(rel_rec_rate ~ region+disturbance+ (1|study), data = relrecov, REML=FALSE)
#remove region
r2.2.10<-lmer(rel_rec_rate ~ disturbance+ (1|study), data = relrecov, REML=FALSE)
##COMPARE model performance
perf1<-performance::compare_performance(r1,r2,r3,r4,r4.1,r4.2,r4.3,r5,r2.0,r2.1,r2.2,r2.2.1,r2.2.2,r2.2.3,r2.2.4,r2.2.5,r2.2.6,r2.2.7,r2.2.8,r2.2.9,r2.2.10, rank= TRUE)
perf1
perf2<-performance::compare_performance(r1,r2,r3,r4,r5,r4.1,r4.2,r4.3,rank=TRUE)
perf2
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#model r4.1 is best AIC and BIC. IT does not contain gravity or distance to shore
r4.1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.1) #highest VIF=6.79, suggest some multicollinearity!
performance::check_model(r4.1) #these look ok though except for multicollinearity
check_outliers(r4.1) #none
summary(r4.1) #AIC = 776.1, BIC=824.6
#replace travel time w/ dist to shore
r4.1.2<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = relrecov, REML=FALSE)
check_collinearity(r4.1.2) #VIFs a little lower but SINGULAR FIT.
performance::check_model(r4.1.2)
check_outliers(r4.1.2) #none
summary(r4.1.2)
perf3<-performance::compare_performance(r4.1,r4.1.2,rank=TRUE)
perf3
##4.1 has lowest AIC and is not a singular fit. In spite of collinearity concerns, lets use it use it.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(r4.1)
3.932/(3.932+48.775) #7.46%
#add REML in
finr4.1<-lmer(rel_rec_rate ~ region+disturbance+hii100km2+MPA_during_recov+recovery_time2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = relrecov, REML=TRUE)
check_collinearity(finr4.1)#VIF max =5.29 (region). We can live with that I think
performance::check_model(finr4.1) #looks good for (note 1 VIF > 5--region)
performance::check_outliers(finr4.1, method=c("cook","pareto")) #none
summary(finr4.1)
11.60/(11.60+53.68) #17.76961% of variance
#HII estimate = 2.456 +/-1.34970
#cml_scr estimate = 0.22960 +/- 1.40278
#travel time estimate = 1.12155 +/- 1.10504
compare_performance(r4.1,finr4.1, rank=TRUE)
#finr4.1 has lower AIC :)
###CHECK FOR SPATIAL AUTOCORRELATION###
#method 1: use the check_autocorrelation() feature of the performance package
check_autocorrelation(finr4.1)# OK: Residuals appear to be independent and not autocorrelated (p = 0.812).
#method 2:
#based on: https://datascienceplus.com/spatial-regression-in-r-part-1-spamm-vs-glmmtmb/
ggplot(relrecov, aes(x=long, y=lat, size=rel_rec_rate))+
geom_point()+
theme_bw()
#maybe size looks very close to the same throughout but maybe a pattern? let's investigate.
#We will check this using our "non-spatial" model (finr4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
relrecov$resid<-resid(finr4.1)
resids<-as.data.frame(resid(finr4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
#due to updates and changes we now need the broom.mixed package
library(broom.mixed)
lmerresid<-broom.mixed::augment(finr4.1)
head(lmerresid)
nrow(lmerresid)#109
nrow(relrecov)#151
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on relrecov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
relrecov1<-rename(relrecov, .rownames = X.2)
head(relrecov1)
spatialdf<-merge(relrecov1, lmerresid, by='.rownames')
head(spatialdf)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdf$longlat<-paste(spatialdf$long, spatialdf$lat, sep="_")
#make a df with no duplicate lat/long
spdf2<-spatialdf %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2)
#make relrecov2 into a spatial object
WGScoor<-spdf2
coordinates(WGScoor)=~long+lat
proj4string(WGScoor)<-CRS("+proj=longlat +datum=WGS84")
#raster::shapefile(WGScoor, "relrecovshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoor)
#find nearest neighbors
tri.nb<-tri2nb(coords, row.names=rownames(WGScoor))
tri.nb #THIS WORKED AND MADE nearest neighbors!
summary(tri.nb)
nb2listw(tri.nb) #this also worked!
head(spdf2)
vect=spdf2$.resid #vector of model residuals
vect
vect1=spdf2$rel_rec_rate.y #vector of response var
vect1
#MORANS test for spatial autocorrelation!
moran.test(vect, nb2listw(tri.nb))
# Moran I statistic standard deviate = -0.39564, p-value = 0.6538
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# -0.037737131 -0.012500000 0.004068959
moran.test(vect1, nb2listw(tri.nb))
# Moran I statistic standard deviate = 1.2743, p-value = 0.1013
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.067806433 -0.012500000 0.003971795
#######################################################################################################
#######################################################################################################
#######################################################################################################
###RESISTANCE####
#read in data
a=read.csv('coral_relrecov_master_2021.csv')
a1<-a %>% mutate_if(is.character, as.factor)
nrow(a1)
head(a1)
str(a1)
#BUILDING RESISTANCE DATASET#
#remove Arabian Gulf sample (n=1)
levels(a1$region)
a2<-subset(a1, region != "Arabian Gulf")
droplevels(a2$region)
#remove disturbance= disease, dredging (n=1) & COTS, Storm (n=1)
levels(a2$disturbance)
a3<-subset(a2, disturbance != "Disease, Dredging")
droplevels(a3$disturbance)
a4<-subset(a3, disturbance != "COTS, Storm")
droplevels(a4$disturbance)
#remove distrubance = Bleaching, Disease (n=1 for resistance only)
a5<-subset(a4, disturbance != "Bleaching, Disease")
droplevels(a5$disturbance)
#remove NAs from data
resist<-a5 %>% drop_na(resistance) %>% droplevels()
tail(resist)
nrow(resist) #184 rows of data
str(resist)
levels(resist$region)
#exploring the structure of the data
#HISTOGRAM OF relrecovERY RATES
hist(resist$resistance) #slight negative skew but approx normal
ggplot(resist, aes(x=resistance))+
geom_histogram(binwidth=5)
#center (standardise) explanatory variable(s) (mean of zero=centering, sd=1 = scaling --doing both here)
resist$hii100km2<-scale(resist$hii100km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii100km2)
resist$hii500km2<-scale(resist$hii500km, center=TRUE, scale=TRUE) #only works for numeric variables
hist(resist$hii500km2)
resist$resistance_time_2<-scale(resist$resistance.time, center=TRUE, scale=TRUE)
hist(resist$resistance_time2)
hist(resist$distance_to_shore_m)
resist$distance_to_shore_m2<-scale(resist$distance_to_shore_m, center=TRUE, scale=TRUE)
hist(resist$distance_to_shore_m2)
relrecov$dist_to_riv_m2<-scale(relrecov$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(relrecov$dist_to_riv_m2)
hist(resist$grav_NC)
resist$grav_NC2<-scale(resist$grav_NC, center=TRUE, scale=TRUE)
hist(resist$grav_NC2)
hist(resist$cml_scr)
resist$cml_scr2<-scale(resist$cml_scr, center=TRUE, scale=TRUE)
hist(resist$cml_scr2)
hist(resist$gravity.Grav_tot)
resist$gravity.Grav_tot2<-scale(resist$gravity.Grav_tot, center=TRUE, scale=TRUE)
hist(resist$gravity.Grav_tot2)
hist(resist$travel_time.tt_pop)
resist$travel_time.tt_pop2<-scale(resist$travel_time.tt_pop, center=TRUE, scale=TRUE)
hist(resist$travel_time.tt_pop2)
resist$dist_to_riv_m2<-scale(resist$dist_to_riv_m, center=TRUE, scale=TRUE)
hist(resist$dist_to_rivm2)
head(resist)
#How many studies are in the relrecov dataset?
length(unique(resist$study)) #59 studies
#write.csv(resist, 'rel_resist_2021.csv')
######MODEL TESTING#######
###GLM MODELS FOR relrecovERY
#RUN ONCE! JUST READ .csv IN FOR GRAPHING!
# mixed effect model run using lmer()
#model testing (REML=FALSE for this step)
#REML=restricted (or residual) maximum likelihood
#fixed effects fo after the ~, random go after that var ~ fixed + (1| random)
#When VIFs are very high, collinearity is an issue. Remove VIFs > ~ 2 and retest.
#######
#Building the "full" model. Can remove variables one by one from here to find optimal model
#######
resist<-read.csv('rel_resist_2021.csv')
#with all vars in
s1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s1) #dist to riv is HIGH correlation. Will need to remove
performance::check_model(s1)
check_outliers(s1)#none
summary(s1)
#distance to river has high VIF (13.25). So does region. So let's remove dist to river and try again
s2<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s2) #Looks like we are good on this
performance::check_model(s2) #this look pretty good
check_outliers(s2) #none
summary(s2)
#cut gravity since it built into cml_scr
s3<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s3) #good
performance::check_model(s3)
check_outliers(s3) #1 outlier
summary(s3)
#cut distance from shore (and not gravity)-- since travel time is used for same thing
s4<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+gravity.Grav_tot2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4) #good
performance::check_model(s4)
check_outliers(s4) #no outliers
summary(s4)
#cut both distance from shore and gravity
s4.1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1) #AIC = 1275.1, BIC=1324.4
#cut travel time (see if this differs from using distance from shore)
s4.2<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+gravity.Grav_tot2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.2) #good
performance::check_model(s4.2)
check_outliers(s4.2) #1 outlier
summary(s4.2) #AIC=1277.1, BIC=1329.3
#cut travel time and gravity, ends up being just the old model +cml_scr
s4.3<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+cml_scr2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.3) #good
performance::check_model(s4.3)
check_outliers(s4.3) #none
summary(s4.3)
#original final model
s5<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s5) #good)
performance::check_model(s5)
check_outliers(s5) #none
summary(s5)
#testing some simpler models for comparisons sake
#only region, dist, hii, and cml_scr
s6<-lmer(rel_res ~ region+disturbance+hii100km2+cml_scr2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s6) #all VIF are low
performance::check_model(s6)
check_outliers(s6) #3 outliers!
summary(s6)
#only region, disturbance, and hii
s7<-lmer(rel_res ~ region+disturbance+hii100km2 + (1|study), data = resist, REML=FALSE)
check_collinearity(s7) #all VIF are low
performance::check_model(s7)
check_outliers(s7) #5 outliers!
summary(s7)
rperf<-compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s6,s7, rank=TRUE)
rperf
#old models (from model building)
s2.0<-lmer(rel_res ~ region*disturbance*hii100km2*MPA_during_resist + (1|study), data = resist, REML=FALSE)
summary(s2.0)
#if modeled slope is < modeled error, then the effect cannot be distinguished from zero!
#variance from random effects / total variance #Tells us how much var left over AFTER fixed effects is explained by random effects
#rank deficient
s2.1<-lmer(rel_res ~ region*disturbance*hii100km2*MPA_during_resist*resistance_time_2 + (1|study), data = resist, REML=FALSE)# rank deficient
s2.2<-lmer(rel_res ~ region*disturbance*hii100km2*MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_model(s2.2)
check_collinearity(s2.2) #VIFs are very high, indicating collinearity is an issue. Remove VIFs > ~ 2 and retest.
#can use the vif function in the cars package to see the VIFS as well
vif(s2.2)
#REMOVE THE INTERACTION TERMS 1 at a time and compare VIFs
s2.2.1<-lmer(rel_res ~ region*disturbance*hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.1) #still high
performance::check_model(r2.2.1)
s2.2.2<-lmer(rel_res ~ region*disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.2) #still high
performance::check_model(r2.2.2)
s2.2.3<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2 + (1|study), data = resist, REML=FALSE)
check_collinearity(r2.2.3) #This finally looks good!
performance::check_model(r2.2.3) #THIS LOOKS GOOD. Use this model moving forward.
summary(r2.2.3)
#add in both distance to nearest river and distance to shore
s2.2.4<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+dist_to_riv_m2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.4) #multicollinearity is high
check_collinearity(r2.2.4) #region and dist to river are highly correlated. Remove dist to river
#just distance to shore added
s2.2.5<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
performance::check_model(r2.2.5) #multicollinearity is fine
check_collinearity(r2.2.5)
#remove resistance time
s2.2.6<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+distance_to_shore_m2 + (1|study), data = resist, REML=FALSE)
#remove distance to shore
s2.2.7<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove hii
s2.2.8<-lmer(rel_res ~ region+disturbance+MPA_during_resist+ (1|study), data = resist, REML=FALSE)
#remove MPA status
s2.2.9<-lmer(rel_res ~ region+disturbance+ (1|study), data = resist, REML=FALSE)
#remove region
s2.2.10<-lmer(rel_res ~ disturbance+ (1|study), data = resist, REML=FALSE)
##COMPARE model performance
perfs1<-performance::compare_performance(s1,s2,s3,s4,s4.1,s4.2,s4.3,s5,s2.0,s2.1,s2.2,s2.2.1,s2.2.2,s2.2.3,s2.2.4,s2.2.5,s2.2.6,s2.2.7,s2.2.8,s2.2.9,s2.2.10, rank= TRUE)
perfs1 #Model S3 (w/ distance to shore and travel time performs best but S2, S4.1, S4, S4.2, and S1 all VERY similar--essentially not different)
#WE WILL SELECT s4.1 as it matches what we did for relrecovery! s4.1 uses travel time in place of distance to shore as a proxy for remoteness and includes cml_scr from the WCS pre-print in addition to HII
#compare_performance explained
#AIC, BIC are both criterion for model selection-> smaller is better
#R2_marginal is R2 consdiering the fixed factors only
#R2 condtiional takes both fixed and random factors into account
#ICC is Intraclass correlation coefficient - "the proportion of the variance explained by the grouping structure in the population" (Hox 2010)
#RMSE= root mean squared error (smaller is generally better)
#BF is bayes factor (not sure it is relevant)
#does not contain gravity or distance to shore
s4.1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1) #good
performance::check_model(s4.1)
check_outliers(s4.1) #none
summary(s4.1) #AIC=1275.1, BIC=1324.4
#replace travel time w/ dist to shore
s4.1.2<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+distance_to_shore_m2+pre.dist.cc + (1|study), data = resist, REML=FALSE)
check_collinearity(s4.1.2) #good
performance::check_model(s4.1.2)
check_outliers(s4.1.2) #none
summary(s4.1.2) #AIC = 1722.3, BIC=1776.3
perf3<-performance::compare_performance(s4.1,s4.1.2,rank=TRUE)
perf3
##S4.1 has lowest AIC, so let's move forward with that one.
#Differs from previous final model because cml_scr is added and dist to shore replaced w/ travel time
summary(s4.1)
191.9/(191.9+498.1) #27.81%
#add REML in
fins4.1<-lmer(rel_res ~ region+disturbance+hii100km2+MPA_during_resist+resistance_time_2+cml_scr2+travel_time.tt_pop2+pre.dist.cc + (1|study), data = resist, REML=TRUE)
check_collinearity(fins4.1)#good
performance::check_model(fins4.1)
check_outliers(fins4.1) #none
summary(fins4.1)
400.3/(400.3+502.9) #44.32% of variance
#HII estimate = -5.1338 +/-4.7084
#cml_scr estimate = 8.9376 +/- 4.1876
#travel time estimate = -0.1736 +/-3.1860
####SPATIAL AUTOCORRELATION FOR RESISTANCE
#Method 1: use the check_autocorrelation() function in the performance package
check_autocorrelation(fins4.1) #OK: Residuals appear to be independent and not autocorrelated (p = 0.344).
#method 2:
ggplot(resist, aes(x=long, y=lat, size=resistance))+
geom_point()+
theme_bw()
#Doesn't look like much but we should investigate residuals
#We will check this using our "non-spatial" model (fins4.1) above
#first we need the residuals from the model
#option 1-> the resid() function generates just a list of residiuals
#resist$resid<-resid(fins4.1)
resids<-as.data.frame(resid(fins4.1))
resids
#option 2: augment from the broom package makes a df of all model stuff
lmerresidres<-broom.mixed::augment(fins4.1)
head(lmerresidres)
nrow(lmerresidres)#134
nrow(resist)#184
#problem: neither contains lat/long so a join or bind is needed.
#luckily column X.2 on relrecov and column .rownames on the augmented data are the same! So we can bind by that is we name the columns the same thing
head(resist)
resist1<-rename(resist, .rownames = X.1)
head(resist1)
spatialdfres<-merge(resist1, lmerresidres, by='.rownames')
head(spatialdfres)
#now we have resids and data together so we can carry on
#using tri2nb in the spdep package to find nearest neighbors of points
#https://rdrr.io/rforge/spdep/man/tri2nb.html
#make new column for longlat
spatialdfres$longlat<-paste(spatialdfres$long, spatialdfres$lat, sep="_")
#make a df with no duplicate lat/long
spdf2res<-spatialdfres %>%
distinct(longlat, .keep_all=TRUE)
head(spdf2res)
#make relrecov2 into a spatial object
WGScoorres<-spdf2res
coordinates(WGScoorres)=~long+lat
proj4string(WGScoorres)<-CRS("+proj=longlat +datum=WGS84")
raster::shapefile(WGScoorres, "resistshape.shp")
#WGScoor is a spatialpointsdataframe and was saved as a shape file
coords<-coordinates(WGScoorres)
#find nearest neighbors
tri.nbres<-tri2nb(coords, row.names=rownames(WGScoorres))
tri.nbres #THIS WORKED AND MADE nearest neighbors!
summary(tri.nbres)
nb2listw(tri.nbres) #this also worked!
head(spdf2res)
vectr=spdf2res$.resid #vector of model residuals
vectr
vectr1=spdf2res$resistance.y #vector of response var
vectr1
#MORANS test for spatial autocorrelation!
moran.test(vectr, nb2listw(tri.nbres))
# Moran I statistic standard deviate = 0.47746, p-value = 0.3165
# alternative hypothesis: greater
# sample estimates:
# Moran I statistic Expectation Variance
# 0.017704076 -0.009523810 0.003252007
moran.test(vectr1, nb2listw(tri.nbres))
#FAILS- is not a numeric vector
|
#robustness
library(MASS)
#generates a data set, mean vector (0, 10):
n <-20
A <-matrix(c(4,2,2,3), ncol=2)
d <- mvrnorm(n, c(0,10), A)
#lets take a look
d
###########################
#BPmean
############################
#just saves the data as a new variable:
dc<- d
#lets take a look, dc is equal to d
dc
#lets make one of the points very nasty:
dc[n,1]<-10000000000
dc[n,2]<-10000000000
#lets take a look at our contaminated data
dc
#lets check Edistance
bdmean <- sqrt(sum((colMeans(d)- colMeans(dc))^2))
bdmean
#Based on this, is mean robust?
###########################
#empiricalIFmean
############################
#Empirical influence function for mean
IF<-function(x,d,n){ #eats a data set, a point, and n=sample size
d2<-rbind(d,x)#adds a point d to the data set
(n+1)*(colMeans(d2)-colMeans(d))#difference of the means
}
#point (0,0)
Y<-matrix(c(0,0), ncol=2)
#lets take a quick look
Y
for(i in 1:200){
x<- matrix(c(i,i), ncol=2)#new points (1,1), (2,2), (3,3) added
Y<-rbind(Y,IF(x,d,n))#influence function calculated (with points (i,i))
}
#Y now contains differences of the means
plot(Y[,1], type="l", col="blue")#first column
lines(Y[,2], col="red")#second column
#Based on this, is mean robust?
| /Ex 4/data/Lecture4_BP_and_IF_O.R | no_license | imsrgadich/MS-E2112-Multivariate-Statistical-Analysis | R | false | false | 1,310 | r | #robustness
library(MASS)
#generates a data set, mean vector (0, 10):
n <-20
A <-matrix(c(4,2,2,3), ncol=2)
d <- mvrnorm(n, c(0,10), A)
#lets take a look
d
###########################
#BPmean
############################
#just saves the data as a new variable:
dc<- d
#lets take a look, dc is equal to d
dc
#lets make one of the points very nasty:
dc[n,1]<-10000000000
dc[n,2]<-10000000000
#lets take a look at our contaminated data
dc
#lets check Edistance
bdmean <- sqrt(sum((colMeans(d)- colMeans(dc))^2))
bdmean
#Based on this, is mean robust?
###########################
#empiricalIFmean
############################
#Empirical influence function for mean
IF<-function(x,d,n){ #eats a data set, a point, and n=sample size
d2<-rbind(d,x)#adds a point d to the data set
(n+1)*(colMeans(d2)-colMeans(d))#difference of the means
}
#point (0,0)
Y<-matrix(c(0,0), ncol=2)
#lets take a quick look
Y
for(i in 1:200){
x<- matrix(c(i,i), ncol=2)#new points (1,1), (2,2), (3,3) added
Y<-rbind(Y,IF(x,d,n))#influence function calculated (with points (i,i))
}
#Y now contains differences of the means
plot(Y[,1], type="l", col="blue")#first column
lines(Y[,2], col="red")#second column
#Based on this, is mean robust?
|
try({
system("git pull")
system("git add *")
system("git commit -m 'Ensemble V2'")
system("git push")
Sys.sleep(900)
system("init 0")
}) | /ensembles/ensemblev2/git.R | no_license | MarcosGrzeca/drunktweets | R | false | false | 142 | r | try({
system("git pull")
system("git add *")
system("git commit -m 'Ensemble V2'")
system("git push")
Sys.sleep(900)
system("init 0")
}) |
library(reshape2)
##Before starting out - making sure that the working directory is right
print ("checking if the file is installed in the correct directory")
result <- sum(as.numeric(list.files() %in% "UCI HAR Dataset"))
if (result == 0) {warning("The 'run_analysis.R' file seems to be installed in the
wrong directory. Please install it in the directory in which the 'UCI HAR Dataset'
directory is installed")
STOP()
}
print ("executing")
##First thing - read the files into data frames
x_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
names(y_train) <- "Activity"
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
names(subject_train) <- "Subject"
x_test <- read.table("UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
names(y_test) <- "Activity"
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
names(subject_test) <- "Subject"
##Second - merge all train and test data to one data frame
Test <- cbind(y_test, subject_test, x_test) #a dataframe containing all the Test data
Train <- cbind(y_train, subject_train, x_train) #a dataframe containing all the Train data
MergedDF <- rbind(Test, Train) #a dataframe containing both previous dataframes
##Third - extract only the measures regarding mean and standard deviation
features <- read.table("UCI HAR Dataset/features.txt")
MeanCOLS <- which(grepl("mean()", features[,2])) # returns all measures which
#end with "mean()"
StdCOLS <- which(grepl("std()", features[,2])) # returns all measures which end
# with "std()"
InterestCols <- sort(unique(as.vector(rbind(MeanCOLS, StdCOLS)))) #one vector that
#units both of the above
ToExtract <- c(1,2,InterestCols+2) #correction due to the fact that the data frame
#has two id variables before the measurements themselves
NewDF <- MergedDF[,ToExtract] #a new DF with only desired measurements
#extracted, plus the two id variables
##Fourth - use descriptive activity names to name the activities in the data set
act_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
names(act_labels) <- c("Activity", "Activity_Name")
NewDF <- merge(act_labels, NewDF)
##Fifth - label the data set with descriptive variable names #(as set earlier in
#the interesting columns inside the "features" data frame
VarNames <- as.character(features[InterestCols, 2])
names(NewDF) <- c("Activity", "Activity_Name", "Subject", VarNames)
##Sixth - reshape the DF to a new, tidy, dataset (long form)
MeltDF <- melt(NewDF, id=c("Activity_Name", "Subject"), measure.vars=VarNames)
| /run_analysis.R | no_license | 50stuck/Getting-and-Cleaning-Data | R | false | false | 2,909 | r | library(reshape2)
##Before starting out - making sure that the working directory is right
print ("checking if the file is installed in the correct directory")
result <- sum(as.numeric(list.files() %in% "UCI HAR Dataset"))
if (result == 0) {warning("The 'run_analysis.R' file seems to be installed in the
wrong directory. Please install it in the directory in which the 'UCI HAR Dataset'
directory is installed")
STOP()
}
print ("executing")
##First thing - read the files into data frames
x_train <- read.table("UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
names(y_train) <- "Activity"
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
names(subject_train) <- "Subject"
x_test <- read.table("UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
names(y_test) <- "Activity"
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
names(subject_test) <- "Subject"
##Second - merge all train and test data to one data frame
Test <- cbind(y_test, subject_test, x_test) #a dataframe containing all the Test data
Train <- cbind(y_train, subject_train, x_train) #a dataframe containing all the Train data
MergedDF <- rbind(Test, Train) #a dataframe containing both previous dataframes
##Third - extract only the measures regarding mean and standard deviation
features <- read.table("UCI HAR Dataset/features.txt")
MeanCOLS <- which(grepl("mean()", features[,2])) # returns all measures which
#end with "mean()"
StdCOLS <- which(grepl("std()", features[,2])) # returns all measures which end
# with "std()"
InterestCols <- sort(unique(as.vector(rbind(MeanCOLS, StdCOLS)))) #one vector that
#units both of the above
ToExtract <- c(1,2,InterestCols+2) #correction due to the fact that the data frame
#has two id variables before the measurements themselves
NewDF <- MergedDF[,ToExtract] #a new DF with only desired measurements
#extracted, plus the two id variables
##Fourth - use descriptive activity names to name the activities in the data set
act_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
names(act_labels) <- c("Activity", "Activity_Name")
NewDF <- merge(act_labels, NewDF)
##Fifth - label the data set with descriptive variable names #(as set earlier in
#the interesting columns inside the "features" data frame
VarNames <- as.character(features[InterestCols, 2])
names(NewDF) <- c("Activity", "Activity_Name", "Subject", VarNames)
##Sixth - reshape the DF to a new, tidy, dataset (long form)
MeltDF <- melt(NewDF, id=c("Activity_Name", "Subject"), measure.vars=VarNames)
|
library(faraway)
attach(diabetes)
# let's get some summary stats of the data:
# ?diabetes
summary(diabetes)
# note:
# 1. bp.2s and bp.2d have 262 NA's... we should probably just remove
# those columns.
# 2. Other columns have more reasonable numbers of NA's. Let's clean the data
# up and just remove rows with NA's.
# 3. id's is unique for each individual, so there's no point keeping that.
# 4. ratio is the ratio of chol/hdl, so for prediction purposes, we're going to
# drop that too.
df <- diabetes[, !(colnames(diabetes) %in% c("id", "bp.2s", "bp.2d", "ratio"))]
df <- df[!apply(df, 1, function(x) any(is.na(x)) ), ] # 403 -> 366 observations
summary(df) # no more row's with NA's.
set.seed(1234)
holdoutIndices <- sample(seq.int(nrow(df)), size=72, replace=FALSE)
holdoutData <- df[holdoutIndices, ]
df <- df[-holdoutIndices, ]
# normally we'd do some EDA, but for the sake of time, let's just explore the
# relationship between chol and the other variables
library(ggplot2)
library(reshape2)
melted.df <- melt(data=df, id.vars=c("chol", "location", "gender", "frame"))
responsePlot <- ggplot(data=melted.df, aes(x=value, y=chol)) +
geom_point(shape=1, alpha=0.5) +
stat_smooth(color='red', method='loess', se=FALSE, alpha=0.5) +
stat_smooth(color='blue', method='lm', se=FALSE, alpha=0.5) +
facet_wrap(~variable, scales="free_x", ncol=2) +
labs(x=NULL, title=NULL) +
theme_bw()
responsePlot # these all seem to be pretty crappy predictors of chol (not surprising)
# let's do some brute force subset selection:
library(leaps)
leaps(x=df[, colnames(df) != "chol"], y=df[, "chol"], nbest=3) # ERROR!
# leaps can't handle handle factor variables:
df2 <- df[, !(colnames(df) %in% c("gender", "location", "frame"))]
leaps(x=as.matrix(df2[, colnames(df2) != "chol"]), y=as.vector(df2[, "chol"]), nbest=3)
# one way around this is to re-incode df:
unrollFactorVar <- function(factorVar, factorVarName=names(factorVar)) {
factorVarLevels <- levels(factorVar)
out <- sapply(factorVarLevels, FUN=function(lvl){
as.numeric(factorVar == lvl)
})
colnames(out) <-if (is.null(factorVarName)) {
factorVarLevels
} else {
paste0(factorVarName, ".", factorVarLevels)
}
return(out)
}
df3 <- lapply(df, function(col) {
if (inherits(col, "factor")) {
unrollFactorVar(col)
} else {
col
}
})
df3 <- as.data.frame(df3)
leaps(x=df3[, colnames(df3) != "chol"], y=df3[, "chol"], nbest=3) # ERROR again
leapsOutput <- regsubsets(chol ~ 0 + ., data=df3, nbest=10, nvmax=19)
leapsSummary <- summary(leapsOutput)
modelSize <- as.vector(sapply(seq.int(14), function(i) rep.int(i, 10)))
modelStats <- cbind(modelSize=modelSize, as.data.frame(leapsSummary[2:6]))
modelStatsExtrema <- as.data.frame(lapply(modelStats[-1],
function(col) {
c(min=modelStats$modelSize[which.min(col)],
max=modelStats$modelSize[which.max(col)])
}))
modelStatsExtrema <- merge(melt(modelStatsExtrema[1,], value.name="min"),
melt(modelStatsExtrema[2,], value.name="max"))
subsetStatsPlot <- ggplot(data=melt(modelStats, id.vars="modelSize"),
aes(x=as.factor(modelSize), y=value)) +
geom_boxplot() +
geom_point(shape=1, alpha=0.25) +
facet_wrap(~variable, scales="free_y") +
geom_vline(data=modelStatsExtrema, aes(xintercept=min), color='blue') +
geom_vline(data=modelStatsExtrema, aes(xintercept=max), color='red') +
labs(x="Number of Predictors", y=NULL, title=NULL) +
theme_bw()
subsetStatsPlot
modelStatsExtrema
# Cp and adjR^2 both imply a 7 variable model is best
# not surprisingly, R^2 and RSS both imply full models.
# BIC says take a 5 variable model.
modelStats
# Check out models 69 and 70, and 41
colnames(df3[, leapsSummary$which[69, ]]) # leapsSummary$adjr2[69] == 0.1732
colnames(df3[, leapsSummary$which[51, ]]) # leapsSummary$cp[70] == 2.057817
# BIC chosen model:
colnames(df3[, leapsSummary$which[41, ]])
# ^ heavy overlap with the models above.
# build models
m69 <- lm(chol ~ 0 + . , data=df3[, leapsSummary$which[51, ]])
m70 <- lm(chol ~ 0 + . , data=df3[, leapsSummary$which[52, ]]) # <- has lower adjR^2
# these models were built using an exhaustive search. Let's see what kind of
# full model would have been made had we use LASSO, instead.
# see: http://www.stanford.edu/~hastie/glmnet/glmnet_alpha.html
library(glmnet)
library(doParallel)
registerDoParallel(cores=detectCores())
# note that alpha=1 <=> LASSO and alpha=0 <=> Ridge inside glmnet
cvLasso <- cv.glmnet(type.measure="mse", parallel=TRUE, nfolds=10, alpha=1,
x=as.matrix(df3[, colnames(df3) != "chol"]), y=df3[, "chol"])
plot(cvLasso)
colnames(df3)[as.logical(coef(cvLasso, s="lambda.min"))] # 10 predictors
colnames(df3)[as.logical(coef(cvLasso, s="lambda.1se"))] # 2 predictors
minLassoMSE <- cvLasso$cvm[which(cvLasso$lambda == cvLasso$lambda.min)] #1747
# build the models
lassoPreds <- predict(cvLasso, newx=as.matrix(df3[,-1]),
s=c(cvLasso$lambda.min, cvLasso$lambda.1se))
calcAdjR2 <- function(y_M, p) {
y <- df3$chol
n <- length(y)
sigma2_hat <- var(y)
1 - sum((y_M - y)^2) / ( (n - p - 1) * sigma2_hat )
}
calcAdjR2(y_M=lassoPreds[,1], p=10) # 0.1467
calcAdjR2(y_M=lassoPreds[,2], p=2) # 0.0467
# now consider ridge:
cvRidge <- cv.glmnet(type.measure="mse", parallel=TRUE, nfolds=10, alpha=0,
x=as.matrix(df3[, colnames(df3) != "chol"]), y=df3[, "chol"])
plot(cvRidge) # note that this does little model selection
coef(cvRidge, s="lambda.min")
coef(cvRidge, s="lambda.1se")
minRidgeMSE <- cvRidge$cvm[which(cvRidge$lambda == cvRidge$lambda.min)] #1664.5
ridgePreds <- predict(cvRidge, newx=as.matrix(df3[,-1]),
s=c(cvRidge$lambda.min, cvRidge$lambda.1se))
calcAdjR2(y_M=ridgePreds[,1], p=18) # 0.118
calcAdjR2(y_M=ridgePreds[,2], p=18) # -0.065
## test these models on the holdout data
holdoutData <-lapply(holdoutData, function(col) {
if (inherits(col, "factor")) {
unrollFactorVar(col)
} else {
col
}
})
holdoutData <- as.data.frame(holdoutData)
lassoPreds <- predict(cvLasso, newx=as.matrix(holdoutData[,-1]),
s=c(cvLasso$lambda.min, cvLasso$lambda.1se))
ridgePreds <- predict(cvRidge, newx=as.matrix(holdoutData[,-1]),
s=c(cvRidge$lambda.min, cvRidge$lambda.1se))
MSE_hat <- within(data.frame(lasso.Min=0), {
lasso.Min <- mean( (lassoPreds[,1] - holdoutData$chol)^2 )
lasso.1se <- mean( (lassoPreds[,2] - holdoutData$chol)^2 )
ridge.Min <- mean( (ridgePreds[,1] - holdoutData$chol)^2 )
ridge.1se <- mean( (ridgePreds[,2] - holdoutData$chol)^2 )
# m69 <- mean( (predict(m69, newdata=holdoutData) - holdoutData$chol)^2 )
# m70 <- mean( (predict(m70, newdata=holdoutData) - holdoutData$chol)^2 )
})
msePlot <- ggplot(data=melt(MSE_hat), aes(x=variable, y=value, size=1/value, fill=value)) +
geom_point(shape=21) +
theme_bw() +
labs(x="model", y="MSE", title=NULL) +
scale_fill_gradient("MSE", high="blue", low="red")
msePlot
detach(diabetes)
| /From_Lab/April-9-2014.R | no_license | timmytyo/stat151a | R | false | false | 7,189 | r | library(faraway)
attach(diabetes)
# let's get some summary stats of the data:
# ?diabetes
summary(diabetes)
# note:
# 1. bp.2s and bp.2d have 262 NA's... we should probably just remove
# those columns.
# 2. Other columns have more reasonable numbers of NA's. Let's clean the data
# up and just remove rows with NA's.
# 3. id's is unique for each individual, so there's no point keeping that.
# 4. ratio is the ratio of chol/hdl, so for prediction purposes, we're going to
# drop that too.
df <- diabetes[, !(colnames(diabetes) %in% c("id", "bp.2s", "bp.2d", "ratio"))]
df <- df[!apply(df, 1, function(x) any(is.na(x)) ), ] # 403 -> 366 observations
summary(df) # no more row's with NA's.
set.seed(1234)
holdoutIndices <- sample(seq.int(nrow(df)), size=72, replace=FALSE)
holdoutData <- df[holdoutIndices, ]
df <- df[-holdoutIndices, ]
# normally we'd do some EDA, but for the sake of time, let's just explore the
# relationship between chol and the other variables
library(ggplot2)
library(reshape2)
melted.df <- melt(data=df, id.vars=c("chol", "location", "gender", "frame"))
responsePlot <- ggplot(data=melted.df, aes(x=value, y=chol)) +
geom_point(shape=1, alpha=0.5) +
stat_smooth(color='red', method='loess', se=FALSE, alpha=0.5) +
stat_smooth(color='blue', method='lm', se=FALSE, alpha=0.5) +
facet_wrap(~variable, scales="free_x", ncol=2) +
labs(x=NULL, title=NULL) +
theme_bw()
responsePlot # these all seem to be pretty crappy predictors of chol (not surprising)
# let's do some brute force subset selection:
library(leaps)
leaps(x=df[, colnames(df) != "chol"], y=df[, "chol"], nbest=3) # ERROR!
# leaps can't handle handle factor variables:
df2 <- df[, !(colnames(df) %in% c("gender", "location", "frame"))]
leaps(x=as.matrix(df2[, colnames(df2) != "chol"]), y=as.vector(df2[, "chol"]), nbest=3)
# one way around this is to re-incode df:
unrollFactorVar <- function(factorVar, factorVarName=names(factorVar)) {
factorVarLevels <- levels(factorVar)
out <- sapply(factorVarLevels, FUN=function(lvl){
as.numeric(factorVar == lvl)
})
colnames(out) <-if (is.null(factorVarName)) {
factorVarLevels
} else {
paste0(factorVarName, ".", factorVarLevels)
}
return(out)
}
df3 <- lapply(df, function(col) {
if (inherits(col, "factor")) {
unrollFactorVar(col)
} else {
col
}
})
df3 <- as.data.frame(df3)
leaps(x=df3[, colnames(df3) != "chol"], y=df3[, "chol"], nbest=3) # ERROR again
leapsOutput <- regsubsets(chol ~ 0 + ., data=df3, nbest=10, nvmax=19)
leapsSummary <- summary(leapsOutput)
modelSize <- as.vector(sapply(seq.int(14), function(i) rep.int(i, 10)))
modelStats <- cbind(modelSize=modelSize, as.data.frame(leapsSummary[2:6]))
modelStatsExtrema <- as.data.frame(lapply(modelStats[-1],
function(col) {
c(min=modelStats$modelSize[which.min(col)],
max=modelStats$modelSize[which.max(col)])
}))
modelStatsExtrema <- merge(melt(modelStatsExtrema[1,], value.name="min"),
melt(modelStatsExtrema[2,], value.name="max"))
subsetStatsPlot <- ggplot(data=melt(modelStats, id.vars="modelSize"),
aes(x=as.factor(modelSize), y=value)) +
geom_boxplot() +
geom_point(shape=1, alpha=0.25) +
facet_wrap(~variable, scales="free_y") +
geom_vline(data=modelStatsExtrema, aes(xintercept=min), color='blue') +
geom_vline(data=modelStatsExtrema, aes(xintercept=max), color='red') +
labs(x="Number of Predictors", y=NULL, title=NULL) +
theme_bw()
subsetStatsPlot
modelStatsExtrema
# Cp and adjR^2 both imply a 7 variable model is best
# not surprisingly, R^2 and RSS both imply full models.
# BIC says take a 5 variable model.
modelStats
# Check out models 69 and 70, and 41
colnames(df3[, leapsSummary$which[69, ]]) # leapsSummary$adjr2[69] == 0.1732
colnames(df3[, leapsSummary$which[51, ]]) # leapsSummary$cp[70] == 2.057817
# BIC chosen model:
colnames(df3[, leapsSummary$which[41, ]])
# ^ heavy overlap with the models above.
# build models
m69 <- lm(chol ~ 0 + . , data=df3[, leapsSummary$which[51, ]])
m70 <- lm(chol ~ 0 + . , data=df3[, leapsSummary$which[52, ]]) # <- has lower adjR^2
# these models were built using an exhaustive search. Let's see what kind of
# full model would have been made had we use LASSO, instead.
# see: http://www.stanford.edu/~hastie/glmnet/glmnet_alpha.html
library(glmnet)
library(doParallel)
registerDoParallel(cores=detectCores())
# note that alpha=1 <=> LASSO and alpha=0 <=> Ridge inside glmnet
cvLasso <- cv.glmnet(type.measure="mse", parallel=TRUE, nfolds=10, alpha=1,
x=as.matrix(df3[, colnames(df3) != "chol"]), y=df3[, "chol"])
plot(cvLasso)
colnames(df3)[as.logical(coef(cvLasso, s="lambda.min"))] # 10 predictors
colnames(df3)[as.logical(coef(cvLasso, s="lambda.1se"))] # 2 predictors
minLassoMSE <- cvLasso$cvm[which(cvLasso$lambda == cvLasso$lambda.min)] #1747
# build the models
lassoPreds <- predict(cvLasso, newx=as.matrix(df3[,-1]),
s=c(cvLasso$lambda.min, cvLasso$lambda.1se))
calcAdjR2 <- function(y_M, p) {
y <- df3$chol
n <- length(y)
sigma2_hat <- var(y)
1 - sum((y_M - y)^2) / ( (n - p - 1) * sigma2_hat )
}
calcAdjR2(y_M=lassoPreds[,1], p=10) # 0.1467
calcAdjR2(y_M=lassoPreds[,2], p=2) # 0.0467
# now consider ridge:
cvRidge <- cv.glmnet(type.measure="mse", parallel=TRUE, nfolds=10, alpha=0,
x=as.matrix(df3[, colnames(df3) != "chol"]), y=df3[, "chol"])
plot(cvRidge) # note that this does little model selection
coef(cvRidge, s="lambda.min")
coef(cvRidge, s="lambda.1se")
minRidgeMSE <- cvRidge$cvm[which(cvRidge$lambda == cvRidge$lambda.min)] #1664.5
ridgePreds <- predict(cvRidge, newx=as.matrix(df3[,-1]),
s=c(cvRidge$lambda.min, cvRidge$lambda.1se))
calcAdjR2(y_M=ridgePreds[,1], p=18) # 0.118
calcAdjR2(y_M=ridgePreds[,2], p=18) # -0.065
## test these models on the holdout data
holdoutData <-lapply(holdoutData, function(col) {
if (inherits(col, "factor")) {
unrollFactorVar(col)
} else {
col
}
})
holdoutData <- as.data.frame(holdoutData)
lassoPreds <- predict(cvLasso, newx=as.matrix(holdoutData[,-1]),
s=c(cvLasso$lambda.min, cvLasso$lambda.1se))
ridgePreds <- predict(cvRidge, newx=as.matrix(holdoutData[,-1]),
s=c(cvRidge$lambda.min, cvRidge$lambda.1se))
MSE_hat <- within(data.frame(lasso.Min=0), {
lasso.Min <- mean( (lassoPreds[,1] - holdoutData$chol)^2 )
lasso.1se <- mean( (lassoPreds[,2] - holdoutData$chol)^2 )
ridge.Min <- mean( (ridgePreds[,1] - holdoutData$chol)^2 )
ridge.1se <- mean( (ridgePreds[,2] - holdoutData$chol)^2 )
# m69 <- mean( (predict(m69, newdata=holdoutData) - holdoutData$chol)^2 )
# m70 <- mean( (predict(m70, newdata=holdoutData) - holdoutData$chol)^2 )
})
msePlot <- ggplot(data=melt(MSE_hat), aes(x=variable, y=value, size=1/value, fill=value)) +
geom_point(shape=21) +
theme_bw() +
labs(x="model", y="MSE", title=NULL) +
scale_fill_gradient("MSE", high="blue", low="red")
msePlot
detach(diabetes)
|
####### This tests deviance for poisson, tweedie and gamma distributions in deeplearing by comparing with expected results ######
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
test <- function(h) {
Hexpend = read.csv(locate("smalldata/glm_test/HealthExpend.csv"))
MEPS=subset(Hexpend,EXPENDIP>0)
hdata = as.h2o(MEPS,destination_frame = "MEPS")
hdata$RACE = as.factor(hdata$RACE)
hdata$REGION = as.factor(hdata$REGION)
hdata$EDUC = as.factor(hdata$EDUC)
hdata$PHSTAT = as.factor(hdata$PHSTAT)
hdata$INCOME = as.factor(hdata$INCOME)
print("gamma")
myX = c("COUNTIP","AGE", "GENDER", "RACE" ,"REGION", "EDUC","PHSTAT","MNHPOOR" ,"ANYLIMIT","INCOME","insure")
hh = h2o.deeplearning(x = myX,y = "EXPENDIP",training_frame = hdata,hidden = c(25,25),epochs = 100,
train_samples_per_iteration = -1,validation_frame = hdata,activation = "Tanh",distribution = "gamma", score_training_samples=0)
pr = as.data.frame(h2o.predict(hh,newdata = hdata))
pr = log(pr)
mean_deviance = (sum(MEPS$EXPENDIP*exp(-pr[,1])+pr[,1])*2)/157
#print(mean_deviance)
#print(hh@model$training_metrics@metrics$mean_residual_deviance)
#print(hh@model$validation_metrics@metrics$mean_residual_deviance)
expect_equal(mean_deviance,hh@model$training_metrics@metrics$mean_residual_deviance, 1e-8)
expect_equal(mean_deviance,hh@model$validation_metrics@metrics$mean_residual_deviance, 1e-8)
print("tweedie")
myX = c("COUNTIP","AGE", "insure")
hh = h2o.deeplearning(x = myX,y = "EXPENDIP",training_frame = hdata,hidden = c(25,25),epochs = 100,
train_samples_per_iteration = -1,validation_frame = hdata,activation = "Tanh",distribution = "tweedie", score_training_samples=0)
pr = as.data.frame(h2o.predict(hh,newdata = hdata))
pr = log(pr)
dPower=1.5
mean_deviance = (sum((MEPS$EXPENDIP^(2.0-dPower)/((1.0-dPower)*(2.0-dPower)) -
MEPS$EXPENDIP*exp(pr[,1]*(1.0-dPower))/(1.0-dPower) + exp(pr[,1]*(2.0-dPower))/(2.0-dPower) ))*2) /157 ## tweedie deviance
#print(mean_deviance)
#print(hh@model$training_metrics@metrics$mean_residual_deviance)
#print(hh@model$validation_metrics@metrics$mean_residual_deviance)
expect_equal(mean_deviance,hh@model$training_metrics@metrics$mean_residual_deviance, 1e-8)
expect_equal(mean_deviance,hh@model$validation_metrics@metrics$mean_residual_deviance, 1e-8)
print("poisson")
fre = h2o.uploadFile(locate("smalldata/glm_test/freMTPL2freq.csv.zip"),conn = h,destination_frame = "fre")
fre$VehPower = as.factor(fre$VehPower)
hh = h2o.deeplearning(x = 4:12,y = "ClaimNb",training_frame = fre,hidden = c(5,5),epochs = 1,
train_samples_per_iteration = -1,validation_frame = fre,activation = "Tanh",distribution = "poisson", score_training_samples=0)
p = h2o.predict(hh,newdata = fre)[,1]
mean_deviance = -2*sum(fre$ClaimNb*log(p) - p)/nrow(p) ## Poisson deviance
#print(mean_deviance)
#print(hh@model$training_metrics@metrics$mean_residual_deviance)
#print(hh@model$validation_metrics@metrics$mean_residual_deviance)
expect_equal(mean_deviance,hh@model$training_metrics@metrics$mean_residual_deviance, 1e-8)
expect_equal(mean_deviance,hh@model$validation_metrics@metrics$mean_residual_deviance, 1e-8)
testEnd()
}
doTest("DL residual deviance Test: DL deviance for poisson/gamma/tweedie distributions", test)
| /h2o-r/tests/testdir_algos/deeplearning/runit_deeplearning_res_deviance.R | permissive | mrgloom/h2o-3 | R | false | false | 3,458 | r | ####### This tests deviance for poisson, tweedie and gamma distributions in deeplearing by comparing with expected results ######
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
test <- function(h) {
Hexpend = read.csv(locate("smalldata/glm_test/HealthExpend.csv"))
MEPS=subset(Hexpend,EXPENDIP>0)
hdata = as.h2o(MEPS,destination_frame = "MEPS")
hdata$RACE = as.factor(hdata$RACE)
hdata$REGION = as.factor(hdata$REGION)
hdata$EDUC = as.factor(hdata$EDUC)
hdata$PHSTAT = as.factor(hdata$PHSTAT)
hdata$INCOME = as.factor(hdata$INCOME)
print("gamma")
myX = c("COUNTIP","AGE", "GENDER", "RACE" ,"REGION", "EDUC","PHSTAT","MNHPOOR" ,"ANYLIMIT","INCOME","insure")
hh = h2o.deeplearning(x = myX,y = "EXPENDIP",training_frame = hdata,hidden = c(25,25),epochs = 100,
train_samples_per_iteration = -1,validation_frame = hdata,activation = "Tanh",distribution = "gamma", score_training_samples=0)
pr = as.data.frame(h2o.predict(hh,newdata = hdata))
pr = log(pr)
mean_deviance = (sum(MEPS$EXPENDIP*exp(-pr[,1])+pr[,1])*2)/157
#print(mean_deviance)
#print(hh@model$training_metrics@metrics$mean_residual_deviance)
#print(hh@model$validation_metrics@metrics$mean_residual_deviance)
expect_equal(mean_deviance,hh@model$training_metrics@metrics$mean_residual_deviance, 1e-8)
expect_equal(mean_deviance,hh@model$validation_metrics@metrics$mean_residual_deviance, 1e-8)
print("tweedie")
myX = c("COUNTIP","AGE", "insure")
hh = h2o.deeplearning(x = myX,y = "EXPENDIP",training_frame = hdata,hidden = c(25,25),epochs = 100,
train_samples_per_iteration = -1,validation_frame = hdata,activation = "Tanh",distribution = "tweedie", score_training_samples=0)
pr = as.data.frame(h2o.predict(hh,newdata = hdata))
pr = log(pr)
dPower=1.5
mean_deviance = (sum((MEPS$EXPENDIP^(2.0-dPower)/((1.0-dPower)*(2.0-dPower)) -
MEPS$EXPENDIP*exp(pr[,1]*(1.0-dPower))/(1.0-dPower) + exp(pr[,1]*(2.0-dPower))/(2.0-dPower) ))*2) /157 ## tweedie deviance
#print(mean_deviance)
#print(hh@model$training_metrics@metrics$mean_residual_deviance)
#print(hh@model$validation_metrics@metrics$mean_residual_deviance)
expect_equal(mean_deviance,hh@model$training_metrics@metrics$mean_residual_deviance, 1e-8)
expect_equal(mean_deviance,hh@model$validation_metrics@metrics$mean_residual_deviance, 1e-8)
print("poisson")
fre = h2o.uploadFile(locate("smalldata/glm_test/freMTPL2freq.csv.zip"),conn = h,destination_frame = "fre")
fre$VehPower = as.factor(fre$VehPower)
hh = h2o.deeplearning(x = 4:12,y = "ClaimNb",training_frame = fre,hidden = c(5,5),epochs = 1,
train_samples_per_iteration = -1,validation_frame = fre,activation = "Tanh",distribution = "poisson", score_training_samples=0)
p = h2o.predict(hh,newdata = fre)[,1]
mean_deviance = -2*sum(fre$ClaimNb*log(p) - p)/nrow(p) ## Poisson deviance
#print(mean_deviance)
#print(hh@model$training_metrics@metrics$mean_residual_deviance)
#print(hh@model$validation_metrics@metrics$mean_residual_deviance)
expect_equal(mean_deviance,hh@model$training_metrics@metrics$mean_residual_deviance, 1e-8)
expect_equal(mean_deviance,hh@model$validation_metrics@metrics$mean_residual_deviance, 1e-8)
testEnd()
}
doTest("DL residual deviance Test: DL deviance for poisson/gamma/tweedie distributions", test)
|
#~~~
# Mixed effects models for linear growth rates
#
# By: R. A. Johnson
# Email: robert.a.johnson@wisc.edu
#~~~
## Run "data_processing" script first.
# Load packages and install if necessary
if (!require(nlme)) install.packages('nlme')
library(nlme)
# Linear growth model dataset
mdat_linear = length_growth %>%
select(-interval) %>%
left_join(temp_sal %>%
select(-date) %>%
# adjust the week when it is offset by 1 from the reference treatment linear growth data nearest date
mutate(exp_week = if_else(treatment=="reference" & exp_week==75, exp_week - 1, exp_week))) %>%
# add a "time" variable (for autocorrelation)
mutate(plot = as.factor(plot)) %>%
group_by(plot) %>%
arrange(date, .by_group=TRUE) %>%
mutate(time = seq_len(n())) %>%
ungroup()
#--
# Effects of temperature and salinity on linear growth
#--
#_Full Model
# start with the most complex model:
# temperature, salinity, and treatment as fixed effects
# include "plot" as a random effect to account for repeated measures sampling
# include an autocorrelation function for "time" (numeric data; grouped within plot) to account for temporal autocorrelation in data
f1 = lme(gr_length ~ mean_temp + salinity + treatment + mean_temp:treatment + salinity:treatment,
random = ~ 1 | plot, data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")), method="ML")
# is the model significantly better with an AR(1) autocorrelation structure?
f2 = update(f1, correlation = corAR1())
anova(f1, f2) # ns; f1 is better model without autocorrelation
# is the model significantly better without random effects?
f3 = gls(gr_length ~ mean_temp + salinity + treatment + mean_temp:treatment + salinity:treatment,
data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")), method="ML")
anova(f1, f3) # no; f1 is the better model
# is the model better without interaction terms?
f4 = update(f1, . ~ . - mean_temp:treatment - salinity:treatment)
anova(f1, f4) # ns; delta_AIC < 1; use simpler model f4 without interactions
summary(update(f4, method="REML"))
# without random effects
f5 = gls(gr_length ~ mean_temp + salinity + treatment,
data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")), method="ML")
anova(f4, f5) # sig.; f4 is better model
# AR1
f6 = update(f4, correlation = corAR1())
anova(f4, f6) # ns; f4 is better model
#_Temperature only
# is the model significantly different if salinity is removed?
t1 = update(f4, . ~ . - salinity)
anova(f4, t1) # yes, significantly different when salinity is removed; f4 is better model
#_Salinity only
# is the model significantly different if temperature is removed?
s1 = update(f4, . ~ . - mean_temp)
anova(f4, s1) # yes, significantly different when temperature is removed; f4 is better model
#--
# Best-fit Model
#--
#_Best model for linear growth
# model f4
lme.linear = lme(gr_length ~ mean_temp + salinity + treatment,
random = ~ 1 | plot,
data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")),
method="REML")
# model output
summary(lme.linear)
broom.mixed::tidy(lme.linear)
# R-squared values: marginal (on fixed effects only) and conditional (on fixed and random effects)
MuMIn::r.squaredGLMM(lme.linear)
#--
# Table of model AIC comparisons
#--
aic.linear = anova(f2, f1, f3, f4, t1, s1) %>%
rownames_to_column() %>%
as_tibble() %>%
select(rowname, df, AIC) %>%
rename(model = rowname)
## remove objects
rm(f1,f2,f3,f4,f5,f6,t1,s1)
##
| /stats_linear-growth.R | no_license | johnson-robert3/SG-Temp-Grazing | R | false | false | 3,685 | r | #~~~
# Mixed effects models for linear growth rates
#
# By: R. A. Johnson
# Email: robert.a.johnson@wisc.edu
#~~~
## Run "data_processing" script first.
# Load packages and install if necessary
if (!require(nlme)) install.packages('nlme')
library(nlme)
# Linear growth model dataset
mdat_linear = length_growth %>%
select(-interval) %>%
left_join(temp_sal %>%
select(-date) %>%
# adjust the week when it is offset by 1 from the reference treatment linear growth data nearest date
mutate(exp_week = if_else(treatment=="reference" & exp_week==75, exp_week - 1, exp_week))) %>%
# add a "time" variable (for autocorrelation)
mutate(plot = as.factor(plot)) %>%
group_by(plot) %>%
arrange(date, .by_group=TRUE) %>%
mutate(time = seq_len(n())) %>%
ungroup()
#--
# Effects of temperature and salinity on linear growth
#--
#_Full Model
# start with the most complex model:
# temperature, salinity, and treatment as fixed effects
# include "plot" as a random effect to account for repeated measures sampling
# include an autocorrelation function for "time" (numeric data; grouped within plot) to account for temporal autocorrelation in data
f1 = lme(gr_length ~ mean_temp + salinity + treatment + mean_temp:treatment + salinity:treatment,
random = ~ 1 | plot, data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")), method="ML")
# is the model significantly better with an AR(1) autocorrelation structure?
f2 = update(f1, correlation = corAR1())
anova(f1, f2) # ns; f1 is better model without autocorrelation
# is the model significantly better without random effects?
f3 = gls(gr_length ~ mean_temp + salinity + treatment + mean_temp:treatment + salinity:treatment,
data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")), method="ML")
anova(f1, f3) # no; f1 is the better model
# is the model better without interaction terms?
f4 = update(f1, . ~ . - mean_temp:treatment - salinity:treatment)
anova(f1, f4) # ns; delta_AIC < 1; use simpler model f4 without interactions
summary(update(f4, method="REML"))
# without random effects
f5 = gls(gr_length ~ mean_temp + salinity + treatment,
data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")), method="ML")
anova(f4, f5) # sig.; f4 is better model
# AR1
f6 = update(f4, correlation = corAR1())
anova(f4, f6) # ns; f4 is better model
#_Temperature only
# is the model significantly different if salinity is removed?
t1 = update(f4, . ~ . - salinity)
anova(f4, t1) # yes, significantly different when salinity is removed; f4 is better model
#_Salinity only
# is the model significantly different if temperature is removed?
s1 = update(f4, . ~ . - mean_temp)
anova(f4, s1) # yes, significantly different when temperature is removed; f4 is better model
#--
# Best-fit Model
#--
#_Best model for linear growth
# model f4
lme.linear = lme(gr_length ~ mean_temp + salinity + treatment,
random = ~ 1 | plot,
data = mdat_linear %>% mutate(treatment = fct_relevel(treatment, "reference")),
method="REML")
# model output
summary(lme.linear)
broom.mixed::tidy(lme.linear)
# R-squared values: marginal (on fixed effects only) and conditional (on fixed and random effects)
MuMIn::r.squaredGLMM(lme.linear)
#--
# Table of model AIC comparisons
#--
aic.linear = anova(f2, f1, f3, f4, t1, s1) %>%
rownames_to_column() %>%
as_tibble() %>%
select(rowname, df, AIC) %>%
rename(model = rowname)
## remove objects
rm(f1,f2,f3,f4,f5,f6,t1,s1)
##
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sjTabPropTable.R
\name{sjt.xtab}
\alias{sjt.xtab}
\title{Summary of contingency tables as HTML table}
\usage{
sjt.xtab(var.row, var.col, weight.by = NULL, title = NULL,
var.labels = NULL, value.labels = NULL, wrap.labels = 20,
show.obs = TRUE, show.cell.prc = FALSE, show.row.prc = FALSE,
show.col.prc = FALSE, show.exp = FALSE, show.legend = FALSE,
show.na = FALSE, show.summary = TRUE, drop.empty = TRUE,
statistics = c("auto", "cramer", "phi", "spearman", "kendall",
"pearson", "fisher"), string.total = "Total", digits = 1,
tdcol.n = "black", tdcol.expected = "#339999",
tdcol.cell = "#993333", tdcol.row = "#333399",
tdcol.col = "#339933", emph.total = FALSE, emph.color = "#f8f8f8",
prc.sign = " %", hundret = "100.0", CSS = NULL,
encoding = NULL, file = NULL, use.viewer = TRUE,
remove.spaces = TRUE, ...)
}
\arguments{
\item{var.row}{Variable that should be displayed in the table rows.}
\item{var.col}{Cariable that should be displayed in the table columns.}
\item{weight.by}{Vector of weights that will be applied to weight all cases.
Must be a vector of same length as the input vector. Default is
\code{NULL}, so no weights are used.}
\item{title}{String, will be used as table caption.}
\item{var.labels}{Character vector with variable names, which will be used
to label variables in the output.}
\item{value.labels}{Character vector (or \code{list} of character vectors)
with value labels of the supplied variables, which will be used
to label variable values in the output.}
\item{wrap.labels}{Numeric, determines how many chars of the value, variable
or axis labels are displayed in one line and when a line break is inserted.}
\item{show.obs}{Logical, if \code{TRUE}, observed values are shown}
\item{show.cell.prc}{Logical, if \code{TRUE}, cell percentage values are shown}
\item{show.row.prc}{Logical, if \code{TRUE}, row percentage values are shown}
\item{show.col.prc}{Logical, if \code{TRUE}, column percentage values are shown}
\item{show.exp}{Logical, if \code{TRUE}, expected values are also shown}
\item{show.legend}{logical, if \code{TRUE}, and depending on plot type and
function, a legend is added to the plot.}
\item{show.na}{logical, if \code{TRUE}, \code{\link{NA}}'s (missing values)
are added to the output.}
\item{show.summary}{Logical, if \code{TRUE}, a summary row with
chi-squared statistics, degrees of freedom and Cramer's V or Phi
coefficient and p-value for the chi-squared statistics.}
\item{drop.empty}{Logical, if \code{TRUE} and the variable's values are labelled,
values that have no observations are still printed in the table (with
frequency \code{0}). If \code{FALSE}, values / factor levels with no occurence
in the data are omitted from the output.}
\item{statistics}{Name of measure of association that should be computed. May
be one of \code{"auto"}, \code{"cramer"}, \code{"phi"}, \code{"spearman"},
\code{"kendall"}, \code{"pearson"} or \code{"fisher"}. See
\code{\link[sjstats]{xtab_statistics}}.}
\item{string.total}{Character label for the total column / row header}
\item{digits}{Amount of decimals for estimates}
\item{tdcol.n}{Color for highlighting count (observed) values in table cells. Default is black.}
\item{tdcol.expected}{Color for highlighting expected values in table cells. Default is cyan.}
\item{tdcol.cell}{Color for highlighting cell percentage values in table cells. Default is red.}
\item{tdcol.row}{Color for highlighting row percentage values in table cells. Default is blue.}
\item{tdcol.col}{Color for highlighting column percentage values in table cells. Default is green.}
\item{emph.total}{Logical, if \code{TRUE}, the total column and row will be emphasized with a
different background color. See \code{emph.color}.}
\item{emph.color}{Logical, if \code{emph.total = TRUE}, this color value will be used
for painting the background of the total column and row. Default is a light grey.}
\item{prc.sign}{The percentage sign that is printed in the table cells, in HTML-format.
Default is \code{" \%"}, hence the percentage sign has a non-breaking-space after
the percentage value.}
\item{hundret}{Default value that indicates the 100-percent column-sums (since rounding values
may lead to non-exact results). Default is \code{"100.0"}.}
\item{CSS}{A \code{\link{list}} with user-defined style-sheet-definitions,
according to the \href{http://www.w3.org/Style/CSS/}{official CSS syntax}.
See 'Details' or \href{../doc/table_css.html}{this package-vignette}.}
\item{encoding}{String, indicating the charset encoding used for variable and
value labels. Default is \code{NULL}, so encoding will be auto-detected
depending on your platform (e.g., \code{"UTF-8"} for Unix and \code{"Windows-1252"} for
Windows OS). Change encoding if specific chars are not properly displayed (e.g. German umlauts).}
\item{file}{Destination file, if the output should be saved as file.
If \code{NULL} (default), the output will be saved as temporary file and
openend either in the IDE's viewer pane or the default web browser.}
\item{use.viewer}{Logical, if \code{TRUE}, the HTML table is shown in the IDE's
viewer pane. If \code{FALSE} or no viewer available, the HTML table is
opened in a web browser.}
\item{remove.spaces}{Logical, if \code{TRUE}, leading spaces are removed from all lines in the final string
that contains the html-data. Use this, if you want to remove parantheses for html-tags. The html-source
may look less pretty, but it may help when exporting html-tables to office tools.}
\item{...}{Other arguments, currently passed down to the test statistics functions
\code{chisq.test()} or \code{fisher.test()}.}
}
\value{
Invisibly returns
\itemize{
\item the web page style sheet (\code{page.style}),
\item the web page content (\code{page.content}),
\item the complete html-output (\code{page.complete}) and
\item the html-table with inline-css for use with knitr (\code{knitr})
}
for further use.
}
\description{
Shows contingency tables as HTML file in browser or viewer pane, or saves them as file.
}
\examples{
# prepare sample data set
data(efc)
# print simple cross table with labels
\dontrun{
sjt.xtab(efc$e16sex, efc$e42dep)
# print cross table with manually set
# labels and expected values
sjt.xtab(
efc$e16sex,
efc$e42dep,
var.labels = c("Elder's gender", "Elder's dependency"),
show.exp = TRUE
)
# print minimal cross table with labels, total col/row highlighted
sjt.xtab(efc$e16sex, efc$e42dep, show.cell.prc = FALSE, emph.total = TRUE)
# User defined style sheet
sjt.xtab(efc$e16sex, efc$e42dep,
CSS = list(css.table = "border: 2px solid;",
css.tdata = "border: 1px solid;",
css.horline = "border-bottom: double blue;"))}
# ordinal data, use Kendall's tau
sjt.xtab(efc$e42dep, efc$quol_5, statistics = "kendall")
# calculate Spearman's rho, with continuity correction
sjt.xtab(
efc$e42dep,
efc$quol_5,
statistics = "spearman",
exact = FALSE,
continuity = TRUE
)
}
| /man/sjt.xtab.Rd | no_license | AnxietyVendor/sjPlot | R | false | true | 7,157 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sjTabPropTable.R
\name{sjt.xtab}
\alias{sjt.xtab}
\title{Summary of contingency tables as HTML table}
\usage{
sjt.xtab(var.row, var.col, weight.by = NULL, title = NULL,
var.labels = NULL, value.labels = NULL, wrap.labels = 20,
show.obs = TRUE, show.cell.prc = FALSE, show.row.prc = FALSE,
show.col.prc = FALSE, show.exp = FALSE, show.legend = FALSE,
show.na = FALSE, show.summary = TRUE, drop.empty = TRUE,
statistics = c("auto", "cramer", "phi", "spearman", "kendall",
"pearson", "fisher"), string.total = "Total", digits = 1,
tdcol.n = "black", tdcol.expected = "#339999",
tdcol.cell = "#993333", tdcol.row = "#333399",
tdcol.col = "#339933", emph.total = FALSE, emph.color = "#f8f8f8",
prc.sign = " %", hundret = "100.0", CSS = NULL,
encoding = NULL, file = NULL, use.viewer = TRUE,
remove.spaces = TRUE, ...)
}
\arguments{
\item{var.row}{Variable that should be displayed in the table rows.}
\item{var.col}{Cariable that should be displayed in the table columns.}
\item{weight.by}{Vector of weights that will be applied to weight all cases.
Must be a vector of same length as the input vector. Default is
\code{NULL}, so no weights are used.}
\item{title}{String, will be used as table caption.}
\item{var.labels}{Character vector with variable names, which will be used
to label variables in the output.}
\item{value.labels}{Character vector (or \code{list} of character vectors)
with value labels of the supplied variables, which will be used
to label variable values in the output.}
\item{wrap.labels}{Numeric, determines how many chars of the value, variable
or axis labels are displayed in one line and when a line break is inserted.}
\item{show.obs}{Logical, if \code{TRUE}, observed values are shown}
\item{show.cell.prc}{Logical, if \code{TRUE}, cell percentage values are shown}
\item{show.row.prc}{Logical, if \code{TRUE}, row percentage values are shown}
\item{show.col.prc}{Logical, if \code{TRUE}, column percentage values are shown}
\item{show.exp}{Logical, if \code{TRUE}, expected values are also shown}
\item{show.legend}{logical, if \code{TRUE}, and depending on plot type and
function, a legend is added to the plot.}
\item{show.na}{logical, if \code{TRUE}, \code{\link{NA}}'s (missing values)
are added to the output.}
\item{show.summary}{Logical, if \code{TRUE}, a summary row with
chi-squared statistics, degrees of freedom and Cramer's V or Phi
coefficient and p-value for the chi-squared statistics.}
\item{drop.empty}{Logical, if \code{TRUE} and the variable's values are labelled,
values that have no observations are still printed in the table (with
frequency \code{0}). If \code{FALSE}, values / factor levels with no occurence
in the data are omitted from the output.}
\item{statistics}{Name of measure of association that should be computed. May
be one of \code{"auto"}, \code{"cramer"}, \code{"phi"}, \code{"spearman"},
\code{"kendall"}, \code{"pearson"} or \code{"fisher"}. See
\code{\link[sjstats]{xtab_statistics}}.}
\item{string.total}{Character label for the total column / row header}
\item{digits}{Amount of decimals for estimates}
\item{tdcol.n}{Color for highlighting count (observed) values in table cells. Default is black.}
\item{tdcol.expected}{Color for highlighting expected values in table cells. Default is cyan.}
\item{tdcol.cell}{Color for highlighting cell percentage values in table cells. Default is red.}
\item{tdcol.row}{Color for highlighting row percentage values in table cells. Default is blue.}
\item{tdcol.col}{Color for highlighting column percentage values in table cells. Default is green.}
\item{emph.total}{Logical, if \code{TRUE}, the total column and row will be emphasized with a
different background color. See \code{emph.color}.}
\item{emph.color}{Logical, if \code{emph.total = TRUE}, this color value will be used
for painting the background of the total column and row. Default is a light grey.}
\item{prc.sign}{The percentage sign that is printed in the table cells, in HTML-format.
Default is \code{" \%"}, hence the percentage sign has a non-breaking-space after
the percentage value.}
\item{hundret}{Default value that indicates the 100-percent column-sums (since rounding values
may lead to non-exact results). Default is \code{"100.0"}.}
\item{CSS}{A \code{\link{list}} with user-defined style-sheet-definitions,
according to the \href{http://www.w3.org/Style/CSS/}{official CSS syntax}.
See 'Details' or \href{../doc/table_css.html}{this package-vignette}.}
\item{encoding}{String, indicating the charset encoding used for variable and
value labels. Default is \code{NULL}, so encoding will be auto-detected
depending on your platform (e.g., \code{"UTF-8"} for Unix and \code{"Windows-1252"} for
Windows OS). Change encoding if specific chars are not properly displayed (e.g. German umlauts).}
\item{file}{Destination file, if the output should be saved as file.
If \code{NULL} (default), the output will be saved as temporary file and
openend either in the IDE's viewer pane or the default web browser.}
\item{use.viewer}{Logical, if \code{TRUE}, the HTML table is shown in the IDE's
viewer pane. If \code{FALSE} or no viewer available, the HTML table is
opened in a web browser.}
\item{remove.spaces}{Logical, if \code{TRUE}, leading spaces are removed from all lines in the final string
that contains the html-data. Use this, if you want to remove parantheses for html-tags. The html-source
may look less pretty, but it may help when exporting html-tables to office tools.}
\item{...}{Other arguments, currently passed down to the test statistics functions
\code{chisq.test()} or \code{fisher.test()}.}
}
\value{
Invisibly returns
\itemize{
\item the web page style sheet (\code{page.style}),
\item the web page content (\code{page.content}),
\item the complete html-output (\code{page.complete}) and
\item the html-table with inline-css for use with knitr (\code{knitr})
}
for further use.
}
\description{
Shows contingency tables as HTML file in browser or viewer pane, or saves them as file.
}
\examples{
# prepare sample data set
data(efc)
# print simple cross table with labels
\dontrun{
sjt.xtab(efc$e16sex, efc$e42dep)
# print cross table with manually set
# labels and expected values
sjt.xtab(
efc$e16sex,
efc$e42dep,
var.labels = c("Elder's gender", "Elder's dependency"),
show.exp = TRUE
)
# print minimal cross table with labels, total col/row highlighted
sjt.xtab(efc$e16sex, efc$e42dep, show.cell.prc = FALSE, emph.total = TRUE)
# User defined style sheet
sjt.xtab(efc$e16sex, efc$e42dep,
CSS = list(css.table = "border: 2px solid;",
css.tdata = "border: 1px solid;",
css.horline = "border-bottom: double blue;"))}
# ordinal data, use Kendall's tau
sjt.xtab(efc$e42dep, efc$quol_5, statistics = "kendall")
# calculate Spearman's rho, with continuity correction
sjt.xtab(
efc$e42dep,
efc$quol_5,
statistics = "spearman",
exact = FALSE,
continuity = TRUE
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ML_POLRModel.R
\name{POLRModel}
\alias{POLRModel}
\title{Ordered Logistic or Probit Regression Model}
\usage{
POLRModel(method = c("logistic", "probit", "loglog", "cloglog", "cauchit"))
}
\arguments{
\item{method}{logistic or probit or (complementary) log-log or cauchit
(corresponding to a Cauchy latent variable).}
}
\value{
\code{MLModel} class object.
}
\description{
Fit a logistic or probit regression model to an ordered factor response.
}
\details{
\describe{
\item{Response Types:}{\code{ordered}}
}
Further model details can be found in the source link below.
In calls to \code{\link{varimp}} for \code{POLRModel}, numeric argument
\code{base} may be specified for the (negative) logarithmic transformation of
p-values [defaul: \code{exp(1)}]. Transformed p-values are automatically
scaled in the calculation of variable importance to range from 0 to 100. To
obtain unscaled importance values, set \code{scale = FALSE}.
}
\examples{
data(Boston, package = "MASS")
df <- within(Boston,
medv <- cut(medv,
breaks = c(0, 10, 15, 20, 25, 50),
ordered = TRUE))
fit(medv ~ ., data = df, model = POLRModel)
}
\seealso{
\code{\link[MASS]{polr}}, \code{\link{fit}}, \code{\link{resample}}
}
| /man/POLRModel.Rd | no_license | Drbuxie/MachineShop | R | false | true | 1,340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ML_POLRModel.R
\name{POLRModel}
\alias{POLRModel}
\title{Ordered Logistic or Probit Regression Model}
\usage{
POLRModel(method = c("logistic", "probit", "loglog", "cloglog", "cauchit"))
}
\arguments{
\item{method}{logistic or probit or (complementary) log-log or cauchit
(corresponding to a Cauchy latent variable).}
}
\value{
\code{MLModel} class object.
}
\description{
Fit a logistic or probit regression model to an ordered factor response.
}
\details{
\describe{
\item{Response Types:}{\code{ordered}}
}
Further model details can be found in the source link below.
In calls to \code{\link{varimp}} for \code{POLRModel}, numeric argument
\code{base} may be specified for the (negative) logarithmic transformation of
p-values [defaul: \code{exp(1)}]. Transformed p-values are automatically
scaled in the calculation of variable importance to range from 0 to 100. To
obtain unscaled importance values, set \code{scale = FALSE}.
}
\examples{
data(Boston, package = "MASS")
df <- within(Boston,
medv <- cut(medv,
breaks = c(0, 10, 15, 20, 25, 50),
ordered = TRUE))
fit(medv ~ ., data = df, model = POLRModel)
}
\seealso{
\code{\link[MASS]{polr}}, \code{\link{fit}}, \code{\link{resample}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_promoters}
\alias{get_promoters}
\title{Define promoter regions}
\usage{
get_promoters(upstream = 500, downstream = 50)
}
\arguments{
\item{upstream}{bp upstream to TSS}
\item{downstream}{bp downstread from tss}
}
\description{
Define promoter regions
}
| /man/get_promoters.Rd | no_license | tanaylab/gpwm | R | false | true | 351 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{get_promoters}
\alias{get_promoters}
\title{Define promoter regions}
\usage{
get_promoters(upstream = 500, downstream = 50)
}
\arguments{
\item{upstream}{bp upstream to TSS}
\item{downstream}{bp downstread from tss}
}
\description{
Define promoter regions
}
|
#' Stock biomarker discovery pipeline for microarray data
#'
#' More detail will go here
#'
#' @docType data
#'
#' @usage data(stockPipeline)
#'
#' @format An object of class \code{Pipeline}
#'
#' @keywords datasets
#'
#'
#' @examples
#' data(stockPipeline)
"stockPipeline" | /R/stockPipeline.R | no_license | jperezrogers/rabbit | R | false | false | 274 | r | #' Stock biomarker discovery pipeline for microarray data
#'
#' More detail will go here
#'
#' @docType data
#'
#' @usage data(stockPipeline)
#'
#' @format An object of class \code{Pipeline}
#'
#' @keywords datasets
#'
#'
#' @examples
#' data(stockPipeline)
"stockPipeline" |
## Bayesian particle filtering codes
##
## in annotation L&W AGM == Liu & West "A General Algorithm"
##
## params = the initial particles for the parameter values;
## these should be drawn from the prior distribution for the parameters
## est = names of parameters to estimate; other parameters are not updated.
## smooth = parameter 'h' from AGM
bsmc2.internal <- function (object, params, Np, est,
smooth, tol, seed = NULL,
verbose = getOption("verbose"),
max.fail, transform, .getnativesymbolinfo = TRUE,
...) {
ep <- paste0("in ",sQuote("bsmc2"),": ")
object <- as(object,"pomp")
pompLoad(object)
gnsi.rproc <- gnsi.dmeas <- as.logical(.getnativesymbolinfo)
ptsi.inv <- ptsi.for <- TRUE
transform <- as.logical(transform)
if (!is.null(seed))
warning(ep,"argument ",sQuote("seed"),
" now has no effect. Consider using ",
sQuote("freeze"),".",call.=FALSE)
if (missing(params)) {
if (length(coef(object))>0) {
params <- coef(object)
} else {
stop(ep,sQuote("params")," must be supplied",call.=FALSE)
}
}
if (missing(Np)) Np <- NCOL(params)
else if (is.matrix(params)&&(Np!=ncol(params)))
warning(ep,sQuote("Np")," is ignored when ",sQuote("params"),
" is a matrix",call.=FALSE)
if ((!is.matrix(params)) && (Np > 1)) {
params <- tryCatch(
rprior(object,params=parmat(params,Np)),
error = function (e) {
stop(ep,sQuote("rprior")," error: ",conditionMessage(e),call.=FALSE)
}
)
}
if (transform)
params <- partrans(object,params,dir="toEstimationScale",
.getnativesymbolinfo=ptsi.inv)
ptsi.inv <- FALSE
params <- as.matrix(params)
ntimes <- length(time(object))
npars <- nrow(params)
paramnames <- rownames(params)
prior <- params
if (missing(est))
est <- paramnames[apply(params,1,function(x)diff(range(x))>0)]
estind <- match(est,paramnames)
npars.est <- length(estind)
if (npars.est<1)
stop(ep,"no parameters to estimate",call.=FALSE)
if (is.null(paramnames))
stop(ep,sQuote("params")," must have rownames",call.=FALSE)
if ((length(smooth)!=1)||(smooth>1)||(smooth<=0))
stop(ep,sQuote("smooth")," must be a scalar in [0,1)",call.=FALSE)
hsq <- smooth^2 # see Liu & West eq(10.3.12)
shrink <- sqrt(1-hsq) # 'a' parameter of Liu & West
xstart <- init.state(
object,
params=if (transform) {
partrans(object,params,dir="fromEstimationScale",
.getnativesymbolinfo=ptsi.for)
} else {
params
}
)
nvars <- nrow(xstart)
ptsi.for <- FALSE
times <- time(object,t0=TRUE)
x <- xstart
evidence <- as.numeric(rep(NA,ntimes))
eff.sample.size <- as.numeric(rep(NA,ntimes))
nfail <- 0L
mu <- array(data=NA,dim=c(nvars,Np,1))
rownames(mu) <- rownames(xstart)
m <- array(data=NA,dim=c(npars,Np))
rownames(m) <- rownames(params)
for (nt in seq_len(ntimes)) {
## calculate particle means ; as per L&W AGM (1)
params.mean <- apply(params,1,mean)
## calculate particle covariances : as per L&W AGM (1)
params.var <- cov(t(params[estind,,drop=FALSE]))
if (verbose) {
cat("at step ",nt," (time =",times[nt+1],")\n",sep="")
print(
rbind(
prior.mean=params.mean[estind],
prior.sd=sqrt(diag(params.var))
)
)
}
m <- shrink*params+(1-shrink)*params.mean
## sample new parameter vector as per L&W AGM (3) and Liu & West eq(3.2)
pert <- tryCatch(
rmvnorm(
n=Np,
mean=rep(0,npars.est),
sigma=hsq*params.var,
method="svd"
),
error = function (e) {
stop(ep,"in ",sQuote("rmvnorm"),": ",conditionMessage(e),call.=FALSE)
}
)
if (!all(is.finite(pert)))
stop(ep,"extreme particle depletion",call.=FALSE)
params[estind,] <- m[estind,]+t(pert)
if (transform)
tparams <- partrans(object,params,dir="fromEstimationScale",
.getnativesymbolinfo=ptsi.for)
xpred <- rprocess(
object,
xstart=x,
times=times[c(nt,nt+1)],
params=if (transform) {
tparams
} else {
params
},
offset=1,
.getnativesymbolinfo=gnsi.rproc
)
gnsi.rproc <- FALSE
## evaluate likelihood of observation given xpred (from L&W AGM (4))
weights <- tryCatch(
dmeasure(
object,
y=object@data[,nt,drop=FALSE],
x=xpred,
times=times[nt+1],
params=if (transform) {
tparams
} else {
params
},
.getnativesymbolinfo=gnsi.dmeas
),
error = function (e) {
stop(ep,sQuote("dmeasure")," error: ",conditionMessage(e),call.=FALSE)
}
)
gnsi.dmeas <- FALSE
## evaluate weights as per L&W AGM (5)
storeForEvidence <- log(mean(weights))
x[,] <- xpred
## test for failure to filter
dim(weights) <- NULL ### needed?
failures <- ((weights<tol)|(!is.finite(weights))) # test for NA weights
all.fail <- all(failures)
if (all.fail) { # all particles are lost
if (verbose) {
message("filtering failure at time t = ",times[nt+1])
}
nfail <- nfail+1
if (nfail > max.fail)
stop(ep,"too many filtering failures",call.=FALSE)
evidence[nt] <- log(tol) # worst log-likelihood
weights <- rep(1/Np,Np)
eff.sample.size[nt] <- 0
} else { # not all particles are lost
## compute log-likelihood
evidence[nt] <- storeForEvidence
weights[failures] <- 0
weights <- weights/sum(weights)
## compute effective sample-size
eff.sample.size[nt] <- 1/crossprod(weights)
}
if (verbose) {
cat("effective sample size =",round(eff.sample.size[nt],1),"\n")
}
## Matrix with samples (columns) from filtering distribution theta.t | Y.t
if (!all.fail) {
smp <- .Call(systematic_resampling,weights)
x <- x[,smp,drop=FALSE]
params[estind,] <- params[estind,smp,drop=FALSE]
}
.getnativesymbolinfo <- FALSE
}
if (nfail>0)
warning(
ep,nfail,
ngettext(
nfail,
msg1=" filtering failure occurred.",
msg2=" filtering failures occurred."
),
call.=FALSE
)
## replace parameters with point estimate (posterior median)
coef(object,transform=transform) <- apply(params,1,median)
pompUnload(object)
new(
"bsmcd.pomp",
object,
transform=transform,
post=params,
prior=prior,
est=as.character(est),
eff.sample.size=eff.sample.size,
smooth=smooth,
nfail=as.integer(nfail),
cond.log.evidence=evidence,
log.evidence=sum(evidence)
)
}
setMethod(
"bsmc2",
signature=signature(object="pomp"),
definition = function (object, params, Np, est,
smooth = 0.1, tol = 1e-17,
verbose = getOption("verbose"),
max.fail = 0, transform = FALSE,
...) {
bsmc2.internal(
object=object,
params=params,
Np=Np,
est=est,
smooth=smooth,
tol=tol,
verbose=verbose,
max.fail=max.fail,
transform=transform,
...
)
}
)
| /R/bsmc2.R | no_license | ashtonbaker/pomp | R | false | false | 8,605 | r | ## Bayesian particle filtering codes
##
## in annotation L&W AGM == Liu & West "A General Algorithm"
##
## params = the initial particles for the parameter values;
## these should be drawn from the prior distribution for the parameters
## est = names of parameters to estimate; other parameters are not updated.
## smooth = parameter 'h' from AGM
bsmc2.internal <- function (object, params, Np, est,
smooth, tol, seed = NULL,
verbose = getOption("verbose"),
max.fail, transform, .getnativesymbolinfo = TRUE,
...) {
ep <- paste0("in ",sQuote("bsmc2"),": ")
object <- as(object,"pomp")
pompLoad(object)
gnsi.rproc <- gnsi.dmeas <- as.logical(.getnativesymbolinfo)
ptsi.inv <- ptsi.for <- TRUE
transform <- as.logical(transform)
if (!is.null(seed))
warning(ep,"argument ",sQuote("seed"),
" now has no effect. Consider using ",
sQuote("freeze"),".",call.=FALSE)
if (missing(params)) {
if (length(coef(object))>0) {
params <- coef(object)
} else {
stop(ep,sQuote("params")," must be supplied",call.=FALSE)
}
}
if (missing(Np)) Np <- NCOL(params)
else if (is.matrix(params)&&(Np!=ncol(params)))
warning(ep,sQuote("Np")," is ignored when ",sQuote("params"),
" is a matrix",call.=FALSE)
if ((!is.matrix(params)) && (Np > 1)) {
params <- tryCatch(
rprior(object,params=parmat(params,Np)),
error = function (e) {
stop(ep,sQuote("rprior")," error: ",conditionMessage(e),call.=FALSE)
}
)
}
if (transform)
params <- partrans(object,params,dir="toEstimationScale",
.getnativesymbolinfo=ptsi.inv)
ptsi.inv <- FALSE
params <- as.matrix(params)
ntimes <- length(time(object))
npars <- nrow(params)
paramnames <- rownames(params)
prior <- params
if (missing(est))
est <- paramnames[apply(params,1,function(x)diff(range(x))>0)]
estind <- match(est,paramnames)
npars.est <- length(estind)
if (npars.est<1)
stop(ep,"no parameters to estimate",call.=FALSE)
if (is.null(paramnames))
stop(ep,sQuote("params")," must have rownames",call.=FALSE)
if ((length(smooth)!=1)||(smooth>1)||(smooth<=0))
stop(ep,sQuote("smooth")," must be a scalar in [0,1)",call.=FALSE)
hsq <- smooth^2 # see Liu & West eq(10.3.12)
shrink <- sqrt(1-hsq) # 'a' parameter of Liu & West
xstart <- init.state(
object,
params=if (transform) {
partrans(object,params,dir="fromEstimationScale",
.getnativesymbolinfo=ptsi.for)
} else {
params
}
)
nvars <- nrow(xstart)
ptsi.for <- FALSE
times <- time(object,t0=TRUE)
x <- xstart
evidence <- as.numeric(rep(NA,ntimes))
eff.sample.size <- as.numeric(rep(NA,ntimes))
nfail <- 0L
mu <- array(data=NA,dim=c(nvars,Np,1))
rownames(mu) <- rownames(xstart)
m <- array(data=NA,dim=c(npars,Np))
rownames(m) <- rownames(params)
for (nt in seq_len(ntimes)) {
## calculate particle means ; as per L&W AGM (1)
params.mean <- apply(params,1,mean)
## calculate particle covariances : as per L&W AGM (1)
params.var <- cov(t(params[estind,,drop=FALSE]))
if (verbose) {
cat("at step ",nt," (time =",times[nt+1],")\n",sep="")
print(
rbind(
prior.mean=params.mean[estind],
prior.sd=sqrt(diag(params.var))
)
)
}
m <- shrink*params+(1-shrink)*params.mean
## sample new parameter vector as per L&W AGM (3) and Liu & West eq(3.2)
pert <- tryCatch(
rmvnorm(
n=Np,
mean=rep(0,npars.est),
sigma=hsq*params.var,
method="svd"
),
error = function (e) {
stop(ep,"in ",sQuote("rmvnorm"),": ",conditionMessage(e),call.=FALSE)
}
)
if (!all(is.finite(pert)))
stop(ep,"extreme particle depletion",call.=FALSE)
params[estind,] <- m[estind,]+t(pert)
if (transform)
tparams <- partrans(object,params,dir="fromEstimationScale",
.getnativesymbolinfo=ptsi.for)
xpred <- rprocess(
object,
xstart=x,
times=times[c(nt,nt+1)],
params=if (transform) {
tparams
} else {
params
},
offset=1,
.getnativesymbolinfo=gnsi.rproc
)
gnsi.rproc <- FALSE
## evaluate likelihood of observation given xpred (from L&W AGM (4))
weights <- tryCatch(
dmeasure(
object,
y=object@data[,nt,drop=FALSE],
x=xpred,
times=times[nt+1],
params=if (transform) {
tparams
} else {
params
},
.getnativesymbolinfo=gnsi.dmeas
),
error = function (e) {
stop(ep,sQuote("dmeasure")," error: ",conditionMessage(e),call.=FALSE)
}
)
gnsi.dmeas <- FALSE
## evaluate weights as per L&W AGM (5)
storeForEvidence <- log(mean(weights))
x[,] <- xpred
## test for failure to filter
dim(weights) <- NULL ### needed?
failures <- ((weights<tol)|(!is.finite(weights))) # test for NA weights
all.fail <- all(failures)
if (all.fail) { # all particles are lost
if (verbose) {
message("filtering failure at time t = ",times[nt+1])
}
nfail <- nfail+1
if (nfail > max.fail)
stop(ep,"too many filtering failures",call.=FALSE)
evidence[nt] <- log(tol) # worst log-likelihood
weights <- rep(1/Np,Np)
eff.sample.size[nt] <- 0
} else { # not all particles are lost
## compute log-likelihood
evidence[nt] <- storeForEvidence
weights[failures] <- 0
weights <- weights/sum(weights)
## compute effective sample-size
eff.sample.size[nt] <- 1/crossprod(weights)
}
if (verbose) {
cat("effective sample size =",round(eff.sample.size[nt],1),"\n")
}
## Matrix with samples (columns) from filtering distribution theta.t | Y.t
if (!all.fail) {
smp <- .Call(systematic_resampling,weights)
x <- x[,smp,drop=FALSE]
params[estind,] <- params[estind,smp,drop=FALSE]
}
.getnativesymbolinfo <- FALSE
}
if (nfail>0)
warning(
ep,nfail,
ngettext(
nfail,
msg1=" filtering failure occurred.",
msg2=" filtering failures occurred."
),
call.=FALSE
)
## replace parameters with point estimate (posterior median)
coef(object,transform=transform) <- apply(params,1,median)
pompUnload(object)
new(
"bsmcd.pomp",
object,
transform=transform,
post=params,
prior=prior,
est=as.character(est),
eff.sample.size=eff.sample.size,
smooth=smooth,
nfail=as.integer(nfail),
cond.log.evidence=evidence,
log.evidence=sum(evidence)
)
}
setMethod(
"bsmc2",
signature=signature(object="pomp"),
definition = function (object, params, Np, est,
smooth = 0.1, tol = 1e-17,
verbose = getOption("verbose"),
max.fail = 0, transform = FALSE,
...) {
bsmc2.internal(
object=object,
params=params,
Np=Np,
est=est,
smooth=smooth,
tol=tol,
verbose=verbose,
max.fail=max.fail,
transform=transform,
...
)
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{sub_curves}
\alias{sub_curves}
\title{Get subset of curves/group curves columns.}
\usage{
sub_curves(x, stat, name, size = 0.6, lty = 1)
}
\arguments{
\item{x}{ZCurvesDataFrame or ZGroupCurvesDataFrame object}
\item{stat}{character string of the statistic used ['min', 'mean', 'max',
'w.mean', 'ext2'].}
\item{name}{character name of a group/feature.}
\item{size}{numeric defining line width.}
\item{lty}{integer defining line type.}
}
\value{
data frame for a single column in curves / group curves data.
}
\description{
Function gets a single column (feature/group/stat) from curves or group
curves. Useful for construction melt-type data frames for plotting.
}
\author{
Joona Lehtomaki \email{joona.lehtomaki@gmail.com}
}
\keyword{internal}
| /man/sub_curves.Rd | no_license | hmorzaria/zonator | R | false | true | 845 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{sub_curves}
\alias{sub_curves}
\title{Get subset of curves/group curves columns.}
\usage{
sub_curves(x, stat, name, size = 0.6, lty = 1)
}
\arguments{
\item{x}{ZCurvesDataFrame or ZGroupCurvesDataFrame object}
\item{stat}{character string of the statistic used ['min', 'mean', 'max',
'w.mean', 'ext2'].}
\item{name}{character name of a group/feature.}
\item{size}{numeric defining line width.}
\item{lty}{integer defining line type.}
}
\value{
data frame for a single column in curves / group curves data.
}
\description{
Function gets a single column (feature/group/stat) from curves or group
curves. Useful for construction melt-type data frames for plotting.
}
\author{
Joona Lehtomaki \email{joona.lehtomaki@gmail.com}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{safeNull}
\alias{safeNull}
\title{A function to replace null and length 0 value with NA}
\usage{
safeNull(x, replace = NA)
}
\arguments{
\item{x}{A variable}
\item{replace}{The value to be returned if NULL. Default = NA.}
}
\value{
Returns x or replace.
}
\description{
This is the description
}
| /man/safeNull.Rd | no_license | beatnaut/remaputils | R | false | true | 389 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{safeNull}
\alias{safeNull}
\title{A function to replace null and length 0 value with NA}
\usage{
safeNull(x, replace = NA)
}
\arguments{
\item{x}{A variable}
\item{replace}{The value to be returned if NULL. Default = NA.}
}
\value{
Returns x or replace.
}
\description{
This is the description
}
|
\name{unmarkedFrame-class}
\Rdversion{1.1}
\docType{class}
\alias{unmarkedFrame-class}
\alias{coordinates,unmarkedFrame-method}
\alias{coordinates}
\alias{coords}
\alias{getY}
\alias{getY,unmarkedFrame-method}
\alias{head,unmarkedFrame-method}
\alias{hist,unmarkedFrameDS-method}
\alias{mapInfo}
\alias{numSites}
\alias{numSites,unmarkedFrame-method}
\alias{numY}
\alias{numY,unmarkedFrame-method}
\alias{obsCovs,unmarkedFrame-method}
\alias{obsCovs<-,unmarkedFrame-method}
\alias{obsCovs}
\alias{obsCovs<-}
\alias{obsNum,unmarkedFrame-method}
\alias{obsNum}
\alias{obsToY,unmarkedFrame-method}
\alias{obsToY<-,unmarkedFrame-method}
\alias{obsToY}
\alias{obsToY<-}
\alias{plot,unmarkedFrame,missing-method}
\alias{plot,unmarkedFrameOccuMulti,missing-method}
\alias{plot,unmarkedFrameOccuTTD,missing-method}
\alias{powerAnalysis}
\alias{powerAnalysis,formula,unmarkedFramePCount,numeric-method}
\alias{projection,unmarkedFrame-method}
\alias{projection}
\alias{siteCovs,unmarkedFrame-method}
\alias{siteCovs<-,unmarkedFrame-method}
\alias{siteCovs}
\alias{siteCovs<-}
\alias{unmarkedFrameOccu-class}
\alias{unmarkedFrameOccuMulti-class}
\alias{unmarkedFrameOccuMS-class}
\alias{unmarkedFrameOccuTTD-class}
\alias{unmarkedFrameMPois-class}
\alias{unmarkedFramePCount-class}
\alias{unmarkedFrameDS-class}
\alias{unmarkedMultFrame-class}
\alias{unmarkedFramePCO-class}
\alias{unmarkedFrameGMM-class}
\alias{unmarkedFrameGDS-class}
\alias{unmarkedFrameGPC-class}
\alias{unmarkedFrameDSO-class}
\alias{unmarkedFrameMMO-class}
\alias{show,unmarkedFrame-method}
\alias{show,unmarkedFrameOccuMulti-method}
\alias{show,unmarkedFrameOccuTTD-method}
\alias{show,unmarkedMultFrame-method}
\alias{summary,unmarkedFrame-method}
\alias{summary,unmarkedFrameDS-method}
\alias{summary,unmarkedMultFrame-method}
\alias{summary,unmarkedFrameOccuMulti-method}
\alias{summary,unmarkedFrameOccuTTD-method}
\alias{[,unmarkedFrameOccuMulti,missing,numeric,missing-method}
\alias{[,unmarkedFrameOccuTTD,missing,numeric,missing-method}
\alias{[,unmarkedFrameGDR,missing,numeric,missing-method}
\alias{[,unmarkedFrameOccuMS,numeric,missing,missing-method}
\alias{[,unmarkedFrameOccuTTD,numeric,missing,missing-method}
\alias{[,unmarkedFrameOccuMulti,numeric,missing,missing-method}
\alias{[,unmarkedFrameDSO,numeric,missing,missing-method}
\alias{[,unmarkedFrameGDR,numeric,missing,missing-method}
\alias{[,unmarkedFrameGDR,logical,missing,missing-method}
\title{Class "unmarkedFrame" }
\description{Methods for manipulating, summarizing and viewing
unmarkedFrames}
\section{Objects from the Class}{
Objects can be created by calls to the constructor function
\code{\link{unmarkedFrame}}. These objects are passed to the data
argument of the fitting functions.
}
\section{Slots}{
\describe{
\item{\code{y}:}{Object of class \code{"matrix"}}
\item{\code{obsCovs}:}{Object of class \code{"optionalDataFrame"}}
\item{\code{siteCovs}:}{Object of class \code{"optionalDataFrame"}}
\item{\code{mapInfo}:}{Object of class \code{"optionalMapInfo"}}
\item{\code{obsToY}:}{Object of class \code{"optionalMatrix"}}
}
}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "unmarkedFrame", i = "numeric", j =
"missing", drop = "missing")}: ... }
\item{[}{\code{signature(x = "unmarkedFrame", i = "numeric", j =
"numeric", drop = "missing")}: ... }
\item{[}{\code{signature(x = "unmarkedFrame", i = "missing", j =
"numeric", drop = "missing")}: ... }
\item{coordinates}{\code{signature(object = "unmarkedFrame")}: extract
coordinates }
\item{getY}{\code{signature(object = "unmarkedFrame")}: extract y
matrix }
\item{numSites}{\code{signature(object = "unmarkedFrame")}: extract M }
\item{numY}{\code{signature(object = "unmarkedFrame")}: extract
ncol(y) }
\item{obsCovs}{\code{signature(object = "unmarkedFrame")}: extract
observation-level covariates }
\item{obsCovs<-}{\code{signature(object = "unmarkedFrame")}: add or
modify observation-level covariates }
\item{obsNum}{\code{signature(object = "unmarkedFrame")}: extract
number of observations }
\item{obsToY}{\code{signature(object = "unmarkedFrame")}: }
\item{obsToY<-}{\code{signature(object = "unmarkedFrame")}: ... }
\item{plot}{\code{signature(x = "unmarkedFrame", y = "missing")}:
visualize response variable.
Takes additional argument \code{panels} which specifies how many
panels data should be split over.}
\item{projection}{\code{signature(object = "unmarkedFrame")}: extract
projection information }
\item{show}{\code{signature(object = "unmarkedFrame")}: view data as
data.frame }
\item{siteCovs}{\code{signature(object = "unmarkedFrame")}: extract
site-level covariates }
\item{siteCovs<-}{\code{signature(object = "unmarkedFrame")}: add or
modify site-level covariates }
\item{summary}{\code{signature(object = "unmarkedFrame")}: summarize
data }
}
}
\note{ This is a superclass with child classes for each fitting function }
\seealso{\code{\link{unmarkedFrame}}, \code{\linkS4class{unmarkedFit}},
\code{\link{unmarked-package}}
}
\examples{
# Organize data for pcount()
data(mallard)
mallardUMF <- unmarkedFramePCount(mallard.y, siteCovs = mallard.site,
obsCovs = mallard.obs)
# Vizualize it
plot(mallardUMF)
mallardUMF
# Summarize it
summary(mallardUMF)
str(mallardUMF)
numSites(mallardUMF)
numY(mallardUMF)
obsNum(mallardUMF)
# Extract components of data
getY(mallardUMF)
obsCovs(mallardUMF)
obsCovs(mallardUMF, matrices = TRUE)
siteCovs(mallardUMF)
mallardUMF[1:5,] # First 5 rows in wide format
mallardUMF[,1:2] # First 2 observations
}
\keyword{classes}
| /man/unmarkedFrame-class.Rd | no_license | adamdsmith/unmarked | R | false | false | 5,689 | rd | \name{unmarkedFrame-class}
\Rdversion{1.1}
\docType{class}
\alias{unmarkedFrame-class}
\alias{coordinates,unmarkedFrame-method}
\alias{coordinates}
\alias{coords}
\alias{getY}
\alias{getY,unmarkedFrame-method}
\alias{head,unmarkedFrame-method}
\alias{hist,unmarkedFrameDS-method}
\alias{mapInfo}
\alias{numSites}
\alias{numSites,unmarkedFrame-method}
\alias{numY}
\alias{numY,unmarkedFrame-method}
\alias{obsCovs,unmarkedFrame-method}
\alias{obsCovs<-,unmarkedFrame-method}
\alias{obsCovs}
\alias{obsCovs<-}
\alias{obsNum,unmarkedFrame-method}
\alias{obsNum}
\alias{obsToY,unmarkedFrame-method}
\alias{obsToY<-,unmarkedFrame-method}
\alias{obsToY}
\alias{obsToY<-}
\alias{plot,unmarkedFrame,missing-method}
\alias{plot,unmarkedFrameOccuMulti,missing-method}
\alias{plot,unmarkedFrameOccuTTD,missing-method}
\alias{powerAnalysis}
\alias{powerAnalysis,formula,unmarkedFramePCount,numeric-method}
\alias{projection,unmarkedFrame-method}
\alias{projection}
\alias{siteCovs,unmarkedFrame-method}
\alias{siteCovs<-,unmarkedFrame-method}
\alias{siteCovs}
\alias{siteCovs<-}
\alias{unmarkedFrameOccu-class}
\alias{unmarkedFrameOccuMulti-class}
\alias{unmarkedFrameOccuMS-class}
\alias{unmarkedFrameOccuTTD-class}
\alias{unmarkedFrameMPois-class}
\alias{unmarkedFramePCount-class}
\alias{unmarkedFrameDS-class}
\alias{unmarkedMultFrame-class}
\alias{unmarkedFramePCO-class}
\alias{unmarkedFrameGMM-class}
\alias{unmarkedFrameGDS-class}
\alias{unmarkedFrameGPC-class}
\alias{unmarkedFrameDSO-class}
\alias{unmarkedFrameMMO-class}
\alias{show,unmarkedFrame-method}
\alias{show,unmarkedFrameOccuMulti-method}
\alias{show,unmarkedFrameOccuTTD-method}
\alias{show,unmarkedMultFrame-method}
\alias{summary,unmarkedFrame-method}
\alias{summary,unmarkedFrameDS-method}
\alias{summary,unmarkedMultFrame-method}
\alias{summary,unmarkedFrameOccuMulti-method}
\alias{summary,unmarkedFrameOccuTTD-method}
\alias{[,unmarkedFrameOccuMulti,missing,numeric,missing-method}
\alias{[,unmarkedFrameOccuTTD,missing,numeric,missing-method}
\alias{[,unmarkedFrameGDR,missing,numeric,missing-method}
\alias{[,unmarkedFrameOccuMS,numeric,missing,missing-method}
\alias{[,unmarkedFrameOccuTTD,numeric,missing,missing-method}
\alias{[,unmarkedFrameOccuMulti,numeric,missing,missing-method}
\alias{[,unmarkedFrameDSO,numeric,missing,missing-method}
\alias{[,unmarkedFrameGDR,numeric,missing,missing-method}
\alias{[,unmarkedFrameGDR,logical,missing,missing-method}
\title{Class "unmarkedFrame" }
\description{Methods for manipulating, summarizing and viewing
unmarkedFrames}
\section{Objects from the Class}{
Objects can be created by calls to the constructor function
\code{\link{unmarkedFrame}}. These objects are passed to the data
argument of the fitting functions.
}
\section{Slots}{
\describe{
\item{\code{y}:}{Object of class \code{"matrix"}}
\item{\code{obsCovs}:}{Object of class \code{"optionalDataFrame"}}
\item{\code{siteCovs}:}{Object of class \code{"optionalDataFrame"}}
\item{\code{mapInfo}:}{Object of class \code{"optionalMapInfo"}}
\item{\code{obsToY}:}{Object of class \code{"optionalMatrix"}}
}
}
\section{Methods}{
\describe{
\item{[}{\code{signature(x = "unmarkedFrame", i = "numeric", j =
"missing", drop = "missing")}: ... }
\item{[}{\code{signature(x = "unmarkedFrame", i = "numeric", j =
"numeric", drop = "missing")}: ... }
\item{[}{\code{signature(x = "unmarkedFrame", i = "missing", j =
"numeric", drop = "missing")}: ... }
\item{coordinates}{\code{signature(object = "unmarkedFrame")}: extract
coordinates }
\item{getY}{\code{signature(object = "unmarkedFrame")}: extract y
matrix }
\item{numSites}{\code{signature(object = "unmarkedFrame")}: extract M }
\item{numY}{\code{signature(object = "unmarkedFrame")}: extract
ncol(y) }
\item{obsCovs}{\code{signature(object = "unmarkedFrame")}: extract
observation-level covariates }
\item{obsCovs<-}{\code{signature(object = "unmarkedFrame")}: add or
modify observation-level covariates }
\item{obsNum}{\code{signature(object = "unmarkedFrame")}: extract
number of observations }
\item{obsToY}{\code{signature(object = "unmarkedFrame")}: }
\item{obsToY<-}{\code{signature(object = "unmarkedFrame")}: ... }
\item{plot}{\code{signature(x = "unmarkedFrame", y = "missing")}:
visualize response variable.
Takes additional argument \code{panels} which specifies how many
panels data should be split over.}
\item{projection}{\code{signature(object = "unmarkedFrame")}: extract
projection information }
\item{show}{\code{signature(object = "unmarkedFrame")}: view data as
data.frame }
\item{siteCovs}{\code{signature(object = "unmarkedFrame")}: extract
site-level covariates }
\item{siteCovs<-}{\code{signature(object = "unmarkedFrame")}: add or
modify site-level covariates }
\item{summary}{\code{signature(object = "unmarkedFrame")}: summarize
data }
}
}
\note{ This is a superclass with child classes for each fitting function }
\seealso{\code{\link{unmarkedFrame}}, \code{\linkS4class{unmarkedFit}},
\code{\link{unmarked-package}}
}
\examples{
# Organize data for pcount()
data(mallard)
mallardUMF <- unmarkedFramePCount(mallard.y, siteCovs = mallard.site,
obsCovs = mallard.obs)
# Vizualize it
plot(mallardUMF)
mallardUMF
# Summarize it
summary(mallardUMF)
str(mallardUMF)
numSites(mallardUMF)
numY(mallardUMF)
obsNum(mallardUMF)
# Extract components of data
getY(mallardUMF)
obsCovs(mallardUMF)
obsCovs(mallardUMF, matrices = TRUE)
siteCovs(mallardUMF)
mallardUMF[1:5,] # First 5 rows in wide format
mallardUMF[,1:2] # First 2 observations
}
\keyword{classes}
|
library(ggplot2)
library(dplyr)
Dataset<- read.csv("CompleteResponses.csv")
attributes(Dataset)
str(Dataset)
is.na(Dataset)
# Change datatypes ####
Dataset$elevel<- as.ordered(Dataset$elevel)
str(Dataset)
Dataset$car<- as.factor(Dataset$car)
str(Dataset)
Dataset$zipcode<- as.factor(Dataset$zipcode)
str(Dataset)
Dataset$brand<- as.factor(Dataset$brand)
Dataset$brand_name <- apply(Dataset["brand"],
MARGIN = 2,
function(x) if_else(x == 0, "Acer", "Sony"))
#create ranges for age and salary ####
salarycat<- Dataset$salary
salarycat<- cut(salarycat,5)
agecat<- Dataset$age
agecat<- cut(agecat,4)
creditcat<- Dataset$credit
creditcat <- cut(creditcat, 5)
age# Create plot ####
library("ggplot2")
ggplot(Dataset, aes(x = age, y = salary)) + geom_point()
ggplot(Dataset, aes(x = zipcode, y = elevel)) + geom_point()
ggplot(Dataset, aes(x = agecat, y = brand))+
geom_point(position = "jitter")
ggplot(Dataset, aes(x = salarycat, y = brand))+
geom_point(position = "jitter")
ggplot(Dataset, aes(x = zipcode, y = salarycat))+
geom_point(position = "jitter")
ggplot(Dataset, aes(x = zipcode, y = elevel))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = creditcat, y = salary))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = age, y = salary))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = car, y = zipcode))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = brand, y = salary)) +
geom_boxplot()
#Create bar graph ####
library (gcookbook)
Dataset %>%
group_by(brand) %>%
summarize(mean_salary = mean(salary, na.rm = TRUE)) -> dataset2
Dataset %>%
group_by(brand) %>%
summarize(mean_credit = mean(credit, na.rm = TRUE)) -> dataset3
Dataset %>%
group_by(brand) %>%
summarize(mean_age = mean(age, na.rm = TRUE)) -> dataset4
ggplot(dataset2, aes(x = elevel , y = mean_salary, fill = brand)) +
geom_col(position = "dodge")
ggplot(dataset2, aes(x = brand , y = mean_salary)) +
geom_col()
ggplot(dataset2, aes(x = zipcode , y = mean_salary, fill = brand)) +
geom_col()
ggplot(dataset2, aes(x = car , y = mean_salary, fill = brand)) +
geom_col()
ggplot(dataset2, aes(x = zipcode , y = mean_salary, fill = brand)) +
geom_col()
# Create bargraph of count ####
ggplot(Dataset, aes(x = brand)) +
geom_histogram(stat = "count")
ggplot(Dataset, aes(x = car)) +
geom_bar()
ggplot(Dataset, aes(x = zipcode)) +
geom_bar()
ggplot(age, aes(x = age)) +
geom_bar()
ggplot(Dataset, aes(x = agecat)) +
geom_bar()
# Create histogram ####
hist(Dataset$salary)
hist(Dataset$age)
hist(Dataset$credit)
hist(Dataset$zipcode)
hist(Dataset$credit)
hist(Dataset$brand)
hist(Dataset$elevel)
hist(Dataset$brand)
ggplot(Dataset, aes(x = salary)) +
geom_histogram(binwidth = 20000, fill = "white", colour = "black")
ggplot(Dataset, aes(x = age)) +
geom_histogram(binwidth = 5, fill = "white", colour = "black")
ggplot(Dataset, aes(x = credit)) +
geom_histogram(binwidth = 50000, fill = "white", colour = "black")
summary(Dataset)
# Train C5.0 model ####
str(Dataset)
library(caret)
library(lattice)
install.packages("C50")
library(C50)
install.packages("inum")
library(inum)
set.seed(123)
inTrain <- createDataPartition(y = Dataset$brand, p=.75,
list=FALSE)
training <- Dataset[ inTrain,]
testing <- Dataset[-inTrain,]
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
C5.0Fit <- train(brand ~ .,
data = training,
method = "C5.0",
tuneLength = 2,
trControl = ctrl)
C5.0Fit
modelLookup("C5.0")
# check errors on testing C5.0
predictions <- predict(object = C5.0Fit, newdata = testing)
testing$pred <- predictions
head(testing)
# metrics
PostResample(pred = predictions, obs = testing$brand)
# Train RF model ####
library(randomForest)
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
mtryGrid <- expand.grid(mtry = 6)
RFFit <- train(brand ~ .,
data = training,
method = "rf",
tunegrid = mtryGrid,
trControl = ctrl)
RFFit
# check errors on testing RF
predictions <- predict(object = RFFit, newdata = testing)
testing$pred <- predictions
head(testing)
testing$brand_name_pred. <- apply(testing["pred"],
MARGIN = 2,
function(x) if_else(x == 0, "Acer", "Sony"))
RF_Plot<- testing %>% ggplot(aes(x = age, y = salary)) +
geom_point(aes(color = testing$brand_name_pred.)) +
labs(title = "RF model with predictors Age and Salary") +
scale_color_manual(values = c("royalblue4", "red")) +
theme(element_blank())
RF_Plot
# metrics
postResample(pred = predictions, obs = testing$brand)
#Train knn model####
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
set.seed(123)
knnFit <- train(brand ~ .,
data = training,
method = "knn",
tuneLength = 15,
preProcess =c("center", "scale"),
trControl = ctrl)
knnFit
# check errors on testing knnFit
predictions <- predict(object = knnFit, newdata = testing)
testing$pred <- predictions
head(testing)
# metrics
postResample(pred = predictions, obs = testing$brand)
# Train svm model####
install.packages("e1071")
library("e1071")
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
set.seed(123)
SvmFit <- train(brand ~ .,
data = training,
method = "svmRadial",
preProcess =c("center", "scale"),
tuneLength = 15,
trControl = ctrl)
SvmFit
# check errors on testing SvmFit
predictions <- predict(object = SvmFit, newdata = testing)
testing$pred <- predictions
head(testing)
# metrics
postResample(pred = predictions, obs = testing$brand)
# Estimate variable importance####
C5.0Imp<- varImp(C5.0Fit, scale = FALSE)
C5.0Imp
RFFitImp<- varImp(RFFit, scale = TRUE)
RFFitImp
plot(RFFitImp, top = 10)
varImp
#Plot training model####
plot(C5.0Fit)
plot(RFFit)
#Predict model####
Dataset<- read.csv("CompleteResponses.csv")
attributes(Dataset)
str(Dataset)
is.na(Dataset)
# Change datatypes for prediction model####
DataInc<- read.csv("SurveyIncomplete.csv")
attributes(DataSurveyIncomplete)
str(DataSurveyIncomplete)
is.na(DataSurveyIncomplete)
DataInc$elevel<- as.ordered(DataInc$elevel)
DataInc$car<- as.factor(DataInc$car)
DataInc$zipcode<- as.factor(DataInc$zipcode)
DataInc$brand<- as.factor(DataInc$brand)
summary(DataInc)
predictions <- predict(object = RFFit, newdata = DataInc)
DataInc$brand <- predictions
head(DataInc)
# metrics
# postResample(pred = predictions, obs = DataInc$brand)
summary(Dataset)
rbind() | /Brand_Preference.R | no_license | gwesterbeek/Customer-Brand-Preferences | R | false | false | 7,141 | r | library(ggplot2)
library(dplyr)
Dataset<- read.csv("CompleteResponses.csv")
attributes(Dataset)
str(Dataset)
is.na(Dataset)
# Change datatypes ####
Dataset$elevel<- as.ordered(Dataset$elevel)
str(Dataset)
Dataset$car<- as.factor(Dataset$car)
str(Dataset)
Dataset$zipcode<- as.factor(Dataset$zipcode)
str(Dataset)
Dataset$brand<- as.factor(Dataset$brand)
Dataset$brand_name <- apply(Dataset["brand"],
MARGIN = 2,
function(x) if_else(x == 0, "Acer", "Sony"))
#create ranges for age and salary ####
salarycat<- Dataset$salary
salarycat<- cut(salarycat,5)
agecat<- Dataset$age
agecat<- cut(agecat,4)
creditcat<- Dataset$credit
creditcat <- cut(creditcat, 5)
age# Create plot ####
library("ggplot2")
ggplot(Dataset, aes(x = age, y = salary)) + geom_point()
ggplot(Dataset, aes(x = zipcode, y = elevel)) + geom_point()
ggplot(Dataset, aes(x = agecat, y = brand))+
geom_point(position = "jitter")
ggplot(Dataset, aes(x = salarycat, y = brand))+
geom_point(position = "jitter")
ggplot(Dataset, aes(x = zipcode, y = salarycat))+
geom_point(position = "jitter")
ggplot(Dataset, aes(x = zipcode, y = elevel))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = creditcat, y = salary))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = age, y = salary))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = car, y = zipcode))+
geom_point(position = "jitter") +
facet_wrap("brand")
ggplot(Dataset, aes(x = brand, y = salary)) +
geom_boxplot()
#Create bar graph ####
library (gcookbook)
Dataset %>%
group_by(brand) %>%
summarize(mean_salary = mean(salary, na.rm = TRUE)) -> dataset2
Dataset %>%
group_by(brand) %>%
summarize(mean_credit = mean(credit, na.rm = TRUE)) -> dataset3
Dataset %>%
group_by(brand) %>%
summarize(mean_age = mean(age, na.rm = TRUE)) -> dataset4
ggplot(dataset2, aes(x = elevel , y = mean_salary, fill = brand)) +
geom_col(position = "dodge")
ggplot(dataset2, aes(x = brand , y = mean_salary)) +
geom_col()
ggplot(dataset2, aes(x = zipcode , y = mean_salary, fill = brand)) +
geom_col()
ggplot(dataset2, aes(x = car , y = mean_salary, fill = brand)) +
geom_col()
ggplot(dataset2, aes(x = zipcode , y = mean_salary, fill = brand)) +
geom_col()
# Create bargraph of count ####
ggplot(Dataset, aes(x = brand)) +
geom_histogram(stat = "count")
ggplot(Dataset, aes(x = car)) +
geom_bar()
ggplot(Dataset, aes(x = zipcode)) +
geom_bar()
ggplot(age, aes(x = age)) +
geom_bar()
ggplot(Dataset, aes(x = agecat)) +
geom_bar()
# Create histogram ####
hist(Dataset$salary)
hist(Dataset$age)
hist(Dataset$credit)
hist(Dataset$zipcode)
hist(Dataset$credit)
hist(Dataset$brand)
hist(Dataset$elevel)
hist(Dataset$brand)
ggplot(Dataset, aes(x = salary)) +
geom_histogram(binwidth = 20000, fill = "white", colour = "black")
ggplot(Dataset, aes(x = age)) +
geom_histogram(binwidth = 5, fill = "white", colour = "black")
ggplot(Dataset, aes(x = credit)) +
geom_histogram(binwidth = 50000, fill = "white", colour = "black")
summary(Dataset)
# Train C5.0 model ####
str(Dataset)
library(caret)
library(lattice)
install.packages("C50")
library(C50)
install.packages("inum")
library(inum)
set.seed(123)
inTrain <- createDataPartition(y = Dataset$brand, p=.75,
list=FALSE)
training <- Dataset[ inTrain,]
testing <- Dataset[-inTrain,]
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
C5.0Fit <- train(brand ~ .,
data = training,
method = "C5.0",
tuneLength = 2,
trControl = ctrl)
C5.0Fit
modelLookup("C5.0")
# check errors on testing C5.0
predictions <- predict(object = C5.0Fit, newdata = testing)
testing$pred <- predictions
head(testing)
# metrics
PostResample(pred = predictions, obs = testing$brand)
# Train RF model ####
library(randomForest)
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
mtryGrid <- expand.grid(mtry = 6)
RFFit <- train(brand ~ .,
data = training,
method = "rf",
tunegrid = mtryGrid,
trControl = ctrl)
RFFit
# check errors on testing RF
predictions <- predict(object = RFFit, newdata = testing)
testing$pred <- predictions
head(testing)
testing$brand_name_pred. <- apply(testing["pred"],
MARGIN = 2,
function(x) if_else(x == 0, "Acer", "Sony"))
RF_Plot<- testing %>% ggplot(aes(x = age, y = salary)) +
geom_point(aes(color = testing$brand_name_pred.)) +
labs(title = "RF model with predictors Age and Salary") +
scale_color_manual(values = c("royalblue4", "red")) +
theme(element_blank())
RF_Plot
# metrics
postResample(pred = predictions, obs = testing$brand)
#Train knn model####
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
set.seed(123)
knnFit <- train(brand ~ .,
data = training,
method = "knn",
tuneLength = 15,
preProcess =c("center", "scale"),
trControl = ctrl)
knnFit
# check errors on testing knnFit
predictions <- predict(object = knnFit, newdata = testing)
testing$pred <- predictions
head(testing)
# metrics
postResample(pred = predictions, obs = testing$brand)
# Train svm model####
install.packages("e1071")
library("e1071")
ctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 1)
set.seed(123)
SvmFit <- train(brand ~ .,
data = training,
method = "svmRadial",
preProcess =c("center", "scale"),
tuneLength = 15,
trControl = ctrl)
SvmFit
# check errors on testing SvmFit
predictions <- predict(object = SvmFit, newdata = testing)
testing$pred <- predictions
head(testing)
# metrics
postResample(pred = predictions, obs = testing$brand)
# Estimate variable importance####
C5.0Imp<- varImp(C5.0Fit, scale = FALSE)
C5.0Imp
RFFitImp<- varImp(RFFit, scale = TRUE)
RFFitImp
plot(RFFitImp, top = 10)
varImp
#Plot training model####
plot(C5.0Fit)
plot(RFFit)
#Predict model####
Dataset<- read.csv("CompleteResponses.csv")
attributes(Dataset)
str(Dataset)
is.na(Dataset)
# Change datatypes for prediction model####
DataInc<- read.csv("SurveyIncomplete.csv")
attributes(DataSurveyIncomplete)
str(DataSurveyIncomplete)
is.na(DataSurveyIncomplete)
DataInc$elevel<- as.ordered(DataInc$elevel)
DataInc$car<- as.factor(DataInc$car)
DataInc$zipcode<- as.factor(DataInc$zipcode)
DataInc$brand<- as.factor(DataInc$brand)
summary(DataInc)
predictions <- predict(object = RFFit, newdata = DataInc)
DataInc$brand <- predictions
head(DataInc)
# metrics
# postResample(pred = predictions, obs = DataInc$brand)
summary(Dataset)
rbind() |
### libraries# #################################################################
library(cowplot)
library(tidyverse)
library(quickpsy)
### parameters #################################################################
oneColumnWidth <- 3.42
onehalfColumnWidth <- 4.5
twoColumnWidth <- 7
sizeLine1 <- .25
sizePoint1 <- 1
sizePoint2 <- 1.2
B <- 20
### read and prepare the data ##################################################
# second participants cc -> mc, ad: le he visto no haciendo la tarea en algunos trials
# participants that did the task poorly: eb, hw, lm, ym
first_participants <- c('pa', 'da', 'cc', 'cd', 'ja', 'al')
second_participants <- c('ac', 'dl', 'if', 'ld', 'ls', 'mc', 'mp', 'nv', 'xf',
'zg', 'ad', 'eb', 'hw', 'lm', 'ym')
participants <- c(first_participants, second_participants)
dat <- quickreadfiles(path = 'data',
subject = second_participants,
cond = 'angle', session = as.character(1:6))
dat <- dat %>%
select(subject, session, orLarge, orSmall, task, response) %>%
mutate(response = ifelse(
(orLarge == 0 & response == 'right') |
(orLarge == 90 & response == 'down') |
(orLarge == 180 & response == 'left') |
(orLarge == 270 & response == 'up') |
(response == 'm'), 1, 0),
vertical = ifelse(orLarge==0 | orLarge==180, TRUE, FALSE))
#dat$subject <- factor(dat$subject,
# labels = paste0('Participant ', 1:length(levelsSubj)))
dat$orLarge <- factor(dat$orLarge,
levels = c(0, 90, 180, 270),
labels = c('Top', 'Right', 'Botton', 'Left'))
datcomp <- dat %>% filter(task == 'comp')
datequ <- dat %>% filter(task == 'equ')
### comp preliminary ###########################################################
fitcomp <- quickpsy(datcomp, orSmall, response,
grouping = .(subject, orLarge),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'none')
fitcomp %>% plot(xpanel = subject, ypanel = orLarge)
participants_ok <- participants %>% setdiff(c('eb', 'hw', 'lm', 'ym'))
dat <- dat %>% filter(subject %in% participants_ok)
### removing participants that could not do the task ###########################
participants_ok <- participants %>% setdiff(c('eb', 'hw', 'lm', 'ym'))
dat <- dat %>% filter(subject %in% participants_ok)
fitcomp <- quickpsy(datcomp, orSmall, response,
grouping = .(subject, orLarge, vertical),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = B)
### frequency of button presses
datcomp %>% group_by(subject,orLarge) %>% summarise(p = mean(response))
### comparisons
fitcomp$thresholdcomparisons %>%
filter(subject==subject2, vertical, vertical2)
### choice bias prediction
choicebiascurves <- fitcomp$curves %>%
filter(orLarge=='Top' | orLarge=='Right' ) %>%
merge(fitcomp$par %>% filter(parn == 'p1')) %>%
mutate(x = x - 2*par) #%>%
filter(x > -2.1, x < 2.1)
### thresholds
fitcompthrelong <- fitcomp$thresholds %>%
select(-threinf, -thresup, -vertical) %>% spread(orLarge, thre)
fitcompthrelonginf <- fitcomp$thresholds %>%
select(-thre, -thresup, -vertical) %>% spread(orLarge, threinf) %>%
rename(Topinf = Top, Rightinf = Right, Bottoninf = Botton, Leftinf = Left)
fitcompthrelongsup <- fitcomp$thresholds %>%
select(-thre, -threinf, -vertical) %>% spread(orLarge, thresup) %>%
rename(Topsup = Top, Rightsup = Right, Bottonsup = Botton, Leftsup = Left)
fitcompthrelongwithci <- merge(fitcompthrelong,
merge(fitcompthrelonginf, fitcompthrelongsup))
### correlations
topbot <- fitcompthrelongwithci %>% select(subject, Top, Botton) %>%
rename(x = Top, y = Botton)
riglef <- fitcompthrelongwithci %>% select(subject, Right, Left) %>%
rename(x = Right, y = Left)
topbotriglef <- rbind(topbot, riglef)
cor.test(topbotriglef$x, topbotriglef$y)
toprig <- fitcompthrelongwithci %>% select(subject, Top, Right) %>%
rename(x = Top, y = Right)
lefbot <- fitcompthrelongwithci %>% select(subject, Left, Botton) %>%
rename(x = Left, y = Botton)
topriglefbot <- rbind(toprig, lefbot)
cor.test(topriglefbot$x, topriglefbot$y)
### figure comp
textReference <- 'Reference'
textProb <- 'Prob. responding clockwise'
theme_bias <- theme_set(theme_classic(10))
#theme_bias <- theme_update(axis.line = element_line(size = sizeLine1),
# axis.ticks= element_line(size = sizeLine1))
theme_bias <- theme_update(axis.line.x = element_line(colour = 'black',
size=sizeLine1,
linetype='solid'),
axis.line.y = element_line(colour = 'black',
size=sizeLine1,
linetype='solid'),
axis.ticks= element_line(size = sizeLine1))
funpsychocomp <- function(flagVertical, flagOrder) {
colorcurves1 <- ifelse(flagVertical,'#e41a1c','#4daf4a')
colorcurves2 <- ifelse(flagVertical,'#377eb8','#984ea3')
colorchoicebiascurves <- ifelse(flagVertical,'#377eb8','#984ea3')
ggplot(fitcomp$averages %>% filter(vertical==flagVertical),
aes(x = orSmall, y = prob, color = orLarge, shape=orLarge)) +
facet_wrap(~subject,scales = 'free_x') +
geom_vline(xintercept = 0, lty = 2, size = sizeLine1)+
geom_point(size = sizePoint1) +
geom_line(data = fitcomp$curves %>% filter(vertical==flagVertical),
aes(x = x, y = y),
size = sizeLine1) +
geom_line(data = choicebiascurves %>% filter(vertical==flagVertical),
aes(x = x, y = y),
size = sizeLine1, lty =2, color = colorchoicebiascurves) +
geom_segment(data = fitcomp$thresholds %>%
filter(vertical==flagVertical),
aes(x=threinf,xend = thresup, y = .5, yend = 0.5,
color=orLarge),
size = sizeLine1) +
scale_color_manual(values = c(colorcurves1,colorcurves2)) +
guides(color = guide_legend(reverse=flagOrder),
shape = guide_legend(reverse=flagOrder)) +
labs(x = 'Orientation (deg)', y = textProb,
color = textReference, shape = textReference) +
scale_y_continuous(breaks = c(0,.5,1)) +
coord_cartesian(xlim=c(-2.1, 2.1),ylim=c(0,1)) +
theme(strip.background = element_blank())
}
plotcomp0 <- funpsychocomp(TRUE, FALSE)
plotcomp90 <- funpsychocomp(FALSE, TRUE)
pcor2 <- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Right, color ='Top-Right'),
size = sizePoint2) +
geom_segment(aes(x = Topinf, xend = Topsup, y = Right, yend = Right,
color ='Top-Right'), size = sizeLine1) +
geom_segment(aes(x = Top, xend = Top, y = Rightinf, yend = Rightsup,
color ='Top-Right'), size = sizeLine1) +
geom_point(aes(x=Left,y=Botton, color='Left-Bottom', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Leftinf, xend = Leftsup, y = Botton, yend = Botton,
color ='Left-Bottom'), size = sizeLine1) +
geom_segment(aes(x = Left, xend = Left, y = Bottoninf, yend = Bottonsup,
color ='Left-Bottom'), size = sizeLine1) +
guides(shape = FALSE) +
scale_color_manual(values = c('#a65628','#f781bf')) +
scale_shape_discrete(solid=F) +
labs(x = 'PND (deg)', y = 'PND (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1'))
# coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
pcor1 <- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Botton, color ='Top-Bottom'),
size = sizePoint2) +
# geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
# color ='Top-Bottom'), size = sizeLine1) +
# geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
# color ='Top-Bottom'), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, color='Right-Left'),
size = sizePoint2) +
# geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
# color ='Right-Left'), size = sizeLine1) +
# geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
# color ='Right-Left'), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PND (deg)', y = 'PND (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1, 1), ylim = c(-1, 1))
coord_equal()
pcomppsy <- plot_grid(plotcomp90, plotcomp0, labels = c('A','B'),
ncol = 1, hjust = 0, vjust = 1)
pcor <- plot_grid(pcor1, labels = 'C', hjust = 0)
pcomp <- plot_grid(pcomppsy, pcor, ncol =1, rel_heights = c(2.6,.8))
save_plot('figures/comp.pdf', pcomp,
base_width = onehalfColumnWidth,
base_height = 2.5 * onehalfColumnWidth)
### fit two cumulative normal ##################################################
f <- function(x, p) pnorm(x, p[1] - p[3], p[2]) - pnorm(x, p[1] + p[3], p[2])
fitequcumnorm <- quickpsy(datequ, orSmall, response,
grouping = .(subject,orLarge,vertical),
B = 20, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
### calculating the maximum
equcumnormmax <- fitequcumnorm$curves %>%
summarise(maxi=approx(x=y,y=x,xout=max(y))[[1]])
### comparisons
fitequcumnorm$parcomparisons %>%
filter(parn =='p1', subject==subject2, vertical, vertical2)
### pse
pse <- fitequcumnorm$par %>% filter(parn=='p1') %>% merge(equcumnormmax)
fiteqpselong <- pse %>%
select(-parinf, -parsup, -vertical, -maxi) %>% spread(orLarge, par)
fiteqpselonginf <- pse %>%
select(-par, -parsup, -vertical,-maxi) %>% spread(orLarge, parinf) %>%
rename(Topinf = Top, Rightinf = Right, Bottoninf = Botton, Leftinf = Left)
fiteqpselongsup <- pse %>%
select(-par, -parinf, -vertical,-maxi) %>% spread(orLarge, parsup) %>%
rename(Topsup = Top, Rightsup = Right, Bottonsup = Botton, Leftsup = Left)
fiteqpselongwithci <- merge(fiteqpselong,
merge(fiteqpselonginf, fiteqpselongsup))
### correlations
topboteq <- fiteqpselong %>% select(subject, Top, Botton) %>%
rename(x = Top, y = Botton)
riglefeq <- fiteqpselong %>% select(subject, Right, Left) %>%
rename(x = Right, y = Left)
topbotriglefeq <- rbind(topboteq, riglefeq)
cor.test(topbotriglefeq$x, topbotriglefeq$y)
toprigeq <- fiteqpselong %>% select(subject, Top, Right) %>%
rename(x = Top, y = Right)
lefboteq <- fiteqpselong %>% select(subject, Left, Botton) %>%
rename(x = Left, y = Botton)
topriglefboteq <- rbind(toprigeq, lefboteq)
cor.test(topriglefboteq$x, topriglefboteq$y)
### figure eq
textProb2 <- 'Prob. responding aligned'
funpsychoeq <- function(flagVertical, flagOrder) {
colorcurves1 <- ifelse(flagVertical,'#e41a1c','#4daf4a')
colorcurves2 <- ifelse(flagVertical,'#377eb8','#984ea3')
colorchoicebiascurves <- ifelse(flagVertical,'#377eb8','#984ea3')
ggplot() +
facet_wrap(~subject,scales = 'free_x') +
geom_vline(xintercept = 0, lty = 2, size = sizeLine1)+
geom_rect(data = fitcomp$thresholds %>%
filter(vertical==flagVertical),
aes(xmin=threinf,xmax=thresup,ymin=0, ymax=1, fill=orLarge),
show.legend = FALSE,alpha = .25) +
# geom_segment(data = fitcomp$thresholds %>%
# filter(vertical==flagVertical),
# aes(x=threinf,xend = thresup, y = 0, yend = 0,
# color=orLarge), show.legend = FALSE,
# alpha = .25, size = 2.25) +
geom_point(data=fitequcumnorm$averages %>% filter(vertical==flagVertical),
aes(x = orSmall, y = prob, color = orLarge, shape=orLarge),
size = sizePoint1) +
geom_segment(data = pse %>% filter(vertical==flagVertical),
aes(x = parinf, xend = parsup, y = 0, yend = 0,
color = orLarge),size = sizeLine1) +
geom_segment(data = pse %>% filter(vertical==flagVertical),
aes(x = par, xend = par, y = 0, yend = maxi,
color = orLarge),size = sizeLine1) +
geom_line(data = fitequcumnorm$curves %>% filter(vertical==flagVertical),
aes(x = x, y = y, color = orLarge),
size = sizeLine1) +
scale_color_manual(values = c(colorcurves1,colorcurves2)) +
guides(color = guide_legend(reverse=flagOrder),
shape = guide_legend(reverse=flagOrder)) +
labs(x = 'Orientation (deg)', y = textProb2,
color = textReference, shape = textReference) +
scale_y_continuous(breaks = c(0,.5,1)) +
coord_cartesian(xlim=c(-2.1, 2.1),ylim=c(0,1)) +
theme(strip.background = element_blank())
}
ploteq0 <- funpsychoeq(TRUE, FALSE)
ploteq90 <- funpsychoeq(FALSE, TRUE)
pcoreq2 <- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Right, color ='Top-Right', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Topinf, xend = Topsup, y = Right, yend = Right,
color ='Top-Right', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Top, xend = Top, y = Rightinf, yend = Rightsup,
color ='Top-Right', shape = subject), size = sizeLine1) +
geom_point(aes(x=Left,y=Botton, color='Left-Bottom', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Leftinf, xend = Leftsup, y = Botton, yend = Botton,
color ='Left-Bottom', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Left, xend = Left, y = Bottoninf, yend = Bottonsup,
color ='Left-Bottom', shape = subject), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#a65628','#f781bf')) +
labs(x = 'PMR (deg)', y = 'PMR (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
pcoreq1 <- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Botton, color ='Top-Bottom'),
size = sizePoint2) +
# geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
# color ='Top-Bottom'), size = sizeLine1) +
# geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
# color ='Top-Bottom'), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, color='Right-Left'),
size = sizePoint2) +
# geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
# color ='Right-Left'), size = sizeLine1) +
# geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
# color ='Right-Left'), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PMR (deg)', y = 'PMR (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
peqpsy <- plot_grid(ploteq90, ploteq0, labels = c('A','B'),
ncol = 1, hjust = 0, vjust = 1)
pcoreq <- plot_grid(pcoreq1, labels = 'C', hjust = 0)
peq <- plot_grid(peqpsy, pcoreq, ncol =1, rel_heights = c(2.6,.8))
save_plot('figures/eq.pdf', peq,
base_width = onehalfColumnWidth,
base_height = 2.5 * onehalfColumnWidth)
### fig sup ###################################################################
psup <-plot_grid(pcor2, pcoreq2, labels = c('A','B'), ncol=1, hjust = 0)
save_plot('figures/sup.pdf', psup,
base_width = oneColumnWidth,
base_height = 1.5*oneColumnWidth)
### fig all ####################################################################
all <- merge(pse, fitcomp$thresholds)
confint(lm(all$thre~all$par))
cor.test(all$par,all$thre)
pcorcompeq <- ggplot(data = all )+ #facet_wrap(~orLarge)+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=par,y=thre,color=orLarge))+
# geom_segment(aes(x = parinf, xend = parsup, y = thre, yend = thre,
# color =orLarge), size = sizeLine1) +
# geom_segment(aes(x = par, xend = par, y = threinf, yend = thresup,
# color =orLarge), size = sizeLine1) +
scale_shape_discrete(solid=F) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))+
labs(x = 'PMR asymmetric task (deg)', y = 'PND symmetric task (deg)') +
guides(shape = FALSE) +
scale_colour_brewer(palette = 'Set1')+
labs(color = textReference) +
theme(legend.key.size = unit(1,'line'),
plot.margin = unit(c(-5,0,0,0), 'line'))
save_plot('figures/all.pdf', pcorcompeq,
base_width = oneColumnWidth,
base_height = oneColumnWidth)
#### comp and eq comp ##########################################################
compeq <- fitequcumnorm$parbootstrap %>% filter(parn == 'p1') %>%
select(-parn) %>% merge(fitcomp$thresholdsbootstrap %>% select(-prob)) %>%
mutate(dif = par - thre) %>% group_by(subject, orLarge) %>%
summarise(inf = quantile(dif, .025), sup = quantile(dif,.975),
sign = ifelse(inf * sup > 0, 1, 0))
################################################################################
### serial effects #############################################################
################################################################################
#### orLarge == lagorLarge, task == lagTask
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge, task == lagTask) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife <- fitcompser$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser, color = sign)
datser %>% group_by(sign, orSmall) %>% summarise(n=n())
fitcompserind <- quickpsy(datserind, orSmall, response,
grouping = .(sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
difeind <- fitcompserind$thresholdcomparisons
plot(fitcompserind, color = sign)
datserpse <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge, task == lagTask) %>%
merge(fitcomp$thresholds) %>%
mutate(sign = ifelse(lagorSmall > thre, 1, -1)) %>%
filter(task == 'comp')
datserpse %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompserpse <- quickpsy(datserpse, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
difepse <- fitcompserpse$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompserpse, color = sign)
datser2 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(task == lagTask,
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser2 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser2 <- quickpsy(datser2, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife2 <- fitcompser2$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser2, color = sign)
datser2 %>% group_by(sign, orSmall) %>% summarise(n=n())
fitcompser2ind <- quickpsy(datser2, orSmall, response,
grouping = .(sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife2ind <- fitcompser2ind$thresholdcomparisons #%>% filter(subject==subject2)
plot(fitcompser2ind, color = sign)
datser3 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter( task == lagTask,
orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser3 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser3 <- quickpsy(datser3, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife3 <- fitcompser3$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser3, color = sign)
fitcompser3ind <- quickpsy(datser3, orSmall, response,
grouping = .(sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife3ind <- fitcompser3ind$thresholdcomparisons
plot(fitcompser3ind, color = sign)
#### orLarge == lagorLarge
datserc <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datserc %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompserc <- quickpsy(datserc, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
difec <- fitcompserc$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompserc, color = sign)
datser2c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser2c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser2c <- quickpsy(datser2c, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife2c <- fitcompser2c$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser2c, color = sign)
datser3c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter( orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser3c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser3c <- quickpsy(datser3c, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife3c <- fitcompser3c$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser3c, color = sign)
#### orLarge == lagorLarge, task == lagTask
datsereq <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge, task == lagTask) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
f <- function(x, p) pnorm(x, p[1] - p[3], p[2]) - pnorm(x, p[1] + p[3], p[2])
fitequcumnormser <- quickpsy(datsereq, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser, color = sign)
datsereq2 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter( task == lagTask,
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq2 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser2 <- quickpsy(datsereq2, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser2$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser2, color = sign)
datsereq3 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(task == lagTask,
orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq3 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser3 <- quickpsy(datsereq3, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser3$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser3, color = sign)
#### orLarge == lagorLarge
datsereqc <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereqc %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormserc <- quickpsy(datsereqc, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormserc$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormserc, color = sign)
datsereq2c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter((orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq2c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser2c <- quickpsy(datsereq2c, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser2c$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser2c, color = sign)
datsereq3c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq3c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser3c <- quickpsy(datsereq3c, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser3c$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser3c, color = sign)
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B=1)
dife <- fitcompser$thresholdcomparisons %>%
filter(subject==subject2)
t.test(dife$dif)
plot(fitcompser, color = sign)
### comp orLarge
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge =='Left', lagorLarge=='Left') %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, orLarge, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B=1)
dife <- fitcompser$thresholdcomparisons %>%
filter(subject==subject2,orLarge==orLarge2)
t.test(dife$dif)
p1<- plot(fitcompser, color = sign)
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge =='Left', lagorLarge=='Right') %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, orLarge, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B=1)
dife <- fitcompser$thresholdcomparisons %>%
filter(subject==subject2,orLarge==orLarge2)
t.test(dife$dif)
p2<- plot(fitcompser, color = sign)
plot_grid(p1,p2, ncol = 1)
### comp orLarge
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(lagorLarge == 'Top', orLarge == 'Top') %>%
#filter(vertical == lagvertical) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser, color = sign)
+
geom_vline(xintercept = 0,lty=2) +
geom_line(data=fitcomp$curves, aes(x=x,y=y))+
geom_vline(data=fitcomp$thresholds,aes(xintercept=thre)) +
geom_point(data=fitcomp$averages,aes(x=orSmall,y=prob)) +xlim(-2,2)
### comp orLarge
### comp orLarge
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge) %>%
#filter(vertical == lagvertical) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
fitcompser$thresholdcomparisons %>% filter(subject==subject2,orLarge==orLarge2)
plot(fitcompser, color = sign) +
geom_vline(xintercept = 0,lty=2) +
geom_line(data=fitcomp$curves, aes(x=x,y=y))+
geom_vline(data=fitcomp$thresholds,aes(xintercept=thre)) +
geom_point(data=fitcomp$averages,aes(x=orSmall,y=prob)) +xlim(-2,2)
### comp orLarge
datser <- dat %>% merge(fitcomp$thresholds) %>%
mutate(lagvertical = lag(vertical), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(vertical == lagvertical) %>%
mutate(sign = ifelse(lagorSmall > thre, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 50)
fitcompser$thresholdcomparisons %>% filter(subject==subject2,orLarge==orLarge2)
plot(fitcompser, color = sign) +
geom_vline(xintercept = 0,lty=2) +
geom_line(data=fitcomp$curves, aes(x=x,y=y))+
geom_vline(data=fitcomp$thresholds,aes(xintercept=thre)) +
geom_point(data=fitcomp$averages,aes(x=orSmall,y=prob)) +xlim(-2,2)
fitcompserthrelong <- fitcompser$thresholds %>%
select(-threinf, -thresup) %>% spread(orLarge, thre)
p1<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p2<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p3<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p4<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
plot_grid(p1,p2,p3,p4)
### comp orLarge
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser, color = sign)
plotthresholds(fitcompser, color=sign)
fitcompserthrelong <- fitcompser$thresholds %>%
select(-threinf, -thresup) %>% spread(orLarge, thre)
p1<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p2<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p3<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p4<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
plot_grid(p1,p2,p3,p4)
### equ orLarge
datsereq <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagtask=lag(task),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical,task==lagtask) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
f <- function(x, p) pnorm(x, p[1] - p[3], p[2]) - pnorm(x, p[1] + p[3], p[2])
fitequcumnormser <- quickpsy(datsereq, orSmall, response,
grouping = .(subject,orLarge,sign),
B = 1, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
plot(fitequcumnormser,color=sign)
pp<-plot_grid(plot(fitcompser, color = sign),plot(fitequcumnormser,color=sign))
save_plot('figures/pp.pdf',pp, base_height = 10,base_width = 30)
fitequserthrelong <- fitequcumnormser$par %>% filter(parn=='p1') %>%
select(-parinf, -parsup) %>% spread(orLarge, par)
pe1<- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
pe2<- ggplot(data = fitequserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
pe3<- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
pe4<- ggplot(data = fitequserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
plot_grid(pe1,pe2,pe3,pe4)
fitequserthrelong <-fitequserthrelong %>% mutate(task='equ') %>%
select(-parn)
fitcompserthrelong <- fitcompserthrelong %>% mutate(task='comp') %>%
select(-prob)
allser <- rbind(fitequserthrelong,fitcompserthrelong)
allserlong <- allser %>% gather(orLarge,pse,-subject,-task,-sign)
allserwide <- allserlong %>% spread(task,pse)
ggplot(data = allserwide ) + facet_grid(subject~orLarge)+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=comp,y=equ, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
allserlongdif <- allserlong %>% group_by(subject,task,orLarge) %>%
summarise(dif=diff(pse))
allserlongdiflong<- allserlongdif %>% spread(task,dif)
ggplot(data = allserlongdiflong ) +# facet_grid(~orLarge)+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_abline(slope = 0, lty =2, size = sizeLine1)+
geom_abline(slope = 99999, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=comp,y=equ, shape=subject,color = factor(orLarge))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
color ='Top-Bottom', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
color ='Top-Bottom', shape = subject), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, color='Right-Left', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
color ='Right-Left', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
color ='Right-Left', shape = subject), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PSE (deg)', y = 'PSE (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
pcorcompser1 <- ggplot(data = fitcompserthrelong )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
# geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
# color ='Top-Bottom', shape = subject), size = sizeLine1) +
# geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
# color ='Top-Bottom', shape = subject), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, shape=subject, color = factor(sign))) +
# geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
# color ='Right-Left', shape = subject), size = sizeLine1) +
# geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
# color ='Right-Left', shape = subject), size = sizeLine1) +
# guides(shape = FALSE) +
# scale_shape_discrete(solid=F) +
# scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PSE (deg)', y = 'PSE (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
plot_grid(pcor1,pcorcompser1)
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagorLarge = lag(orLarge), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagorLarge)) %>%
group_by(orLarge, lagorLarge) %>%
filter(orLarge==lagorLarge) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, sign),
# guess = TRUE, lapses = TRUE,
# parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser,panel=subject)
### comp vertical
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp') %>%
group_by(subject,orLarge,sign) %>%
mutate(vertical = ifelse(orLarge=='Top' | orLarge=='Botton', TRUE, FALSE))
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, vertical,sign),
# guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
# parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 100)
plot(fitcompser,xpanel=subject,fill=sign)
plotthresholds(fitcompser, color=sign)
plot(fitcompser,xpanel=subject,ypanel=orLarge)
ggplot(fitcompser$averages %>% filter(orLarge=='Top' | orLarge=='Botton'),
aes(x=orSmall,y=prob,lty=orLarge,color=factor(sign)))+
facet_wrap(~subject)+
geom_point()+
geom_line(data=fitcompser$curves%>% filter(orLarge=='Top' | orLarge=='Botton'),
aes(x=x,y=y,lty=orLarge,color=factor(sign)))
ggplot(fitcompser$averages %>% filter(orLarge=='Top' | orLarge=='Botton'),
aes(x=orSmall,y=prob,lty=orLarge,color=factor(sign)))+
facet_wrap(~subject)+
geom_point()+
geom_line(data=fitcompser$averages%>% filter(orLarge=='Top' | orLarge=='Botton'))
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagresponse = lag(response)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical) %>%
filter(task == 'comp')
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge, lagresponse),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser,xpanel=subject,ypanel=orLarge)
### comparisons
fitcompser$thresholdcomparisons %>%
filter(subject==subject2, orLarge==orLarge2)
do(print(head(.)))
filter(!is.na(lagvertical)) %>% do(print(as.data.frame(.)))
group_by(lagvertical) %>% do(print(.))
filter(!is.na(lagorSmall), lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1))
datser <- dat %>% ungroup() %>% mutate(lagorSmall=lag(orSmall)) %>%
filter(!is.na(lagorSmall), lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1))
datcompser <- datser %>% filter(task=='comp') %>%
mutate()
fitcompser <- quickpsy(datcompser, orSmall, response,
grouping = .(subject, orLarge, sign),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
plot(fitcompser,xpanel=subject,ypanel=orLarge)
datm <- datm %>% group_by(name) %>%
mutate(lagcoh=lag(cohwithsign), lagresp=lag(resp),
signcoh=ifelse(lagcoh>0,'Preferred','Null')) %>%
filter(!is.na(lagcoh),!is.na(lagresp),coh != 0)
### HHT NO SYM psi = . 5 ######################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(p[5] - mu)))
if (reference==1)
return(pnorm(mu + p[5]) + (1 - p[4]) *(pnorm(-p[5]- mu) - pnorm(p[5] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -3,-3),
upper = c( 3,3, 0, 0))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
#geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))+
geom_vline(data = hhtparlong, aes(xintercept = -p1/p2),lty=2)
### pru ########################################################################
garciafun <- function(x,p) {
mu <- p[1] + p[2] *x
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(p[5] - mu)))
}
xseq <- seq(-2,2,.01)
p1 <- c(0, 4.4, 1.5, 0.6, -1)
p2 <- c(0, 4.4, 1, .4, -1.5)
#p1 <- c(2, 4.4, 1, 0.6, -1)
#p2 <- c(2, 4.4, 1, .4, -1)
yseq <- garciafun(xseq,p1)
yseq2 <- garciafun(xseq,p2)
dd <- data.frame(x=xseq,y=yseq,y2=yseq2)
ggplot(dd) +
geom_line(aes(x=x,y=y)) +
geom_line(aes(x=x,y=y2),lty=2) +
geom_vline(xintercept = c(p1[3],p1[5]))+
ylim(0,1)
### HHT NO SYM xi = .5 ########################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - (p[3]+p[5])) + .5 * (pnorm(p[3]+p[5]- mu) - pnorm(p[4]+p[5] - mu)))
if (reference==1)
return(pnorm(mu + p[4]+p[5]) + .5 *(pnorm(-p[4]-p[5]- mu) - pnorm(-p[3]-p[5] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, 0,-3,-3),
upper = c( 3,10, 3, 0,3))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p4'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))
### HHT NO SYM xi = .5 ########################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + .5 * (pnorm(p[3]- mu) - pnorm(p[4] - mu)))
if (reference==1)
return(pnorm(mu + p[4]) + .5 *(pnorm(-p[4]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -3,-3),
upper = c( 3,10, 3, 3))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))+
geom_vline(data = hhtparlong, aes(xintercept = -p1/p2),lty=2)
### HHT SYM ####################################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(-p[3] - mu)))
if (reference==1)
return(pnorm(mu - p[3]) + (1 - p[4]) *(pnorm(p[3]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.55), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -10, 0),
upper = c(3, 10, 10,1))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))
### HHT NO SYM #################################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(p[5] - mu)))
if (reference==1)
return(pnorm(mu + p[5]) + (1 - p[4]) *(pnorm(-p[5]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -3, 0,-3),
upper = c( 3,10, 0, 1, 0))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))+
geom_vline(data = hhtparlong, aes(xintercept = -p1/p2),lty=2)
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[5] *(pnorm(p[3]- mu) - pnorm(p[4] - mu)))
if (reference==1)
return(pnorm(mu + p[4]) + (1 - p[5]) *(pnorm(-p[4]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,-2,2, 0.55), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -10, -10, 0),
upper = c(3, 10, 10, 10,1))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))
+
geom_vline(xintercept = 0, lty = 2) +
geom_hline(yintercept = 0.5, lty = 2) +
geom_vline(data = sdtperceptualbiasparlong, aes(xintercept = -(p3-p2)/p1)) +
geom_vline(data = sdtperceptualbiasparlong, aes(xintercept = -(p3+p2)/p1)) +
geom_vline(data = sdtperceptualbiasparlong, aes(xintercept = -p3/p1))
| /angularbias14.R | no_license | danilinares/2016LinaresAguilarLopezmoliner | R | false | false | 64,836 | r | ### libraries# #################################################################
library(cowplot)
library(tidyverse)
library(quickpsy)
### parameters #################################################################
oneColumnWidth <- 3.42
onehalfColumnWidth <- 4.5
twoColumnWidth <- 7
sizeLine1 <- .25
sizePoint1 <- 1
sizePoint2 <- 1.2
B <- 20
### read and prepare the data ##################################################
# second participants cc -> mc, ad: le he visto no haciendo la tarea en algunos trials
# participants that did the task poorly: eb, hw, lm, ym
first_participants <- c('pa', 'da', 'cc', 'cd', 'ja', 'al')
second_participants <- c('ac', 'dl', 'if', 'ld', 'ls', 'mc', 'mp', 'nv', 'xf',
'zg', 'ad', 'eb', 'hw', 'lm', 'ym')
participants <- c(first_participants, second_participants)
dat <- quickreadfiles(path = 'data',
subject = second_participants,
cond = 'angle', session = as.character(1:6))
dat <- dat %>%
select(subject, session, orLarge, orSmall, task, response) %>%
mutate(response = ifelse(
(orLarge == 0 & response == 'right') |
(orLarge == 90 & response == 'down') |
(orLarge == 180 & response == 'left') |
(orLarge == 270 & response == 'up') |
(response == 'm'), 1, 0),
vertical = ifelse(orLarge==0 | orLarge==180, TRUE, FALSE))
#dat$subject <- factor(dat$subject,
# labels = paste0('Participant ', 1:length(levelsSubj)))
dat$orLarge <- factor(dat$orLarge,
levels = c(0, 90, 180, 270),
labels = c('Top', 'Right', 'Botton', 'Left'))
datcomp <- dat %>% filter(task == 'comp')
datequ <- dat %>% filter(task == 'equ')
### comp preliminary ###########################################################
fitcomp <- quickpsy(datcomp, orSmall, response,
grouping = .(subject, orLarge),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'none')
fitcomp %>% plot(xpanel = subject, ypanel = orLarge)
participants_ok <- participants %>% setdiff(c('eb', 'hw', 'lm', 'ym'))
dat <- dat %>% filter(subject %in% participants_ok)
### removing participants that could not do the task ###########################
participants_ok <- participants %>% setdiff(c('eb', 'hw', 'lm', 'ym'))
dat <- dat %>% filter(subject %in% participants_ok)
fitcomp <- quickpsy(datcomp, orSmall, response,
grouping = .(subject, orLarge, vertical),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = B)
### frequency of button presses
datcomp %>% group_by(subject,orLarge) %>% summarise(p = mean(response))
### comparisons
fitcomp$thresholdcomparisons %>%
filter(subject==subject2, vertical, vertical2)
### choice bias prediction
choicebiascurves <- fitcomp$curves %>%
filter(orLarge=='Top' | orLarge=='Right' ) %>%
merge(fitcomp$par %>% filter(parn == 'p1')) %>%
mutate(x = x - 2*par) #%>%
filter(x > -2.1, x < 2.1)
### thresholds
fitcompthrelong <- fitcomp$thresholds %>%
select(-threinf, -thresup, -vertical) %>% spread(orLarge, thre)
fitcompthrelonginf <- fitcomp$thresholds %>%
select(-thre, -thresup, -vertical) %>% spread(orLarge, threinf) %>%
rename(Topinf = Top, Rightinf = Right, Bottoninf = Botton, Leftinf = Left)
fitcompthrelongsup <- fitcomp$thresholds %>%
select(-thre, -threinf, -vertical) %>% spread(orLarge, thresup) %>%
rename(Topsup = Top, Rightsup = Right, Bottonsup = Botton, Leftsup = Left)
fitcompthrelongwithci <- merge(fitcompthrelong,
merge(fitcompthrelonginf, fitcompthrelongsup))
### correlations
topbot <- fitcompthrelongwithci %>% select(subject, Top, Botton) %>%
rename(x = Top, y = Botton)
riglef <- fitcompthrelongwithci %>% select(subject, Right, Left) %>%
rename(x = Right, y = Left)
topbotriglef <- rbind(topbot, riglef)
cor.test(topbotriglef$x, topbotriglef$y)
toprig <- fitcompthrelongwithci %>% select(subject, Top, Right) %>%
rename(x = Top, y = Right)
lefbot <- fitcompthrelongwithci %>% select(subject, Left, Botton) %>%
rename(x = Left, y = Botton)
topriglefbot <- rbind(toprig, lefbot)
cor.test(topriglefbot$x, topriglefbot$y)
### figure comp
textReference <- 'Reference'
textProb <- 'Prob. responding clockwise'
theme_bias <- theme_set(theme_classic(10))
#theme_bias <- theme_update(axis.line = element_line(size = sizeLine1),
# axis.ticks= element_line(size = sizeLine1))
theme_bias <- theme_update(axis.line.x = element_line(colour = 'black',
size=sizeLine1,
linetype='solid'),
axis.line.y = element_line(colour = 'black',
size=sizeLine1,
linetype='solid'),
axis.ticks= element_line(size = sizeLine1))
funpsychocomp <- function(flagVertical, flagOrder) {
colorcurves1 <- ifelse(flagVertical,'#e41a1c','#4daf4a')
colorcurves2 <- ifelse(flagVertical,'#377eb8','#984ea3')
colorchoicebiascurves <- ifelse(flagVertical,'#377eb8','#984ea3')
ggplot(fitcomp$averages %>% filter(vertical==flagVertical),
aes(x = orSmall, y = prob, color = orLarge, shape=orLarge)) +
facet_wrap(~subject,scales = 'free_x') +
geom_vline(xintercept = 0, lty = 2, size = sizeLine1)+
geom_point(size = sizePoint1) +
geom_line(data = fitcomp$curves %>% filter(vertical==flagVertical),
aes(x = x, y = y),
size = sizeLine1) +
geom_line(data = choicebiascurves %>% filter(vertical==flagVertical),
aes(x = x, y = y),
size = sizeLine1, lty =2, color = colorchoicebiascurves) +
geom_segment(data = fitcomp$thresholds %>%
filter(vertical==flagVertical),
aes(x=threinf,xend = thresup, y = .5, yend = 0.5,
color=orLarge),
size = sizeLine1) +
scale_color_manual(values = c(colorcurves1,colorcurves2)) +
guides(color = guide_legend(reverse=flagOrder),
shape = guide_legend(reverse=flagOrder)) +
labs(x = 'Orientation (deg)', y = textProb,
color = textReference, shape = textReference) +
scale_y_continuous(breaks = c(0,.5,1)) +
coord_cartesian(xlim=c(-2.1, 2.1),ylim=c(0,1)) +
theme(strip.background = element_blank())
}
plotcomp0 <- funpsychocomp(TRUE, FALSE)
plotcomp90 <- funpsychocomp(FALSE, TRUE)
pcor2 <- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Right, color ='Top-Right'),
size = sizePoint2) +
geom_segment(aes(x = Topinf, xend = Topsup, y = Right, yend = Right,
color ='Top-Right'), size = sizeLine1) +
geom_segment(aes(x = Top, xend = Top, y = Rightinf, yend = Rightsup,
color ='Top-Right'), size = sizeLine1) +
geom_point(aes(x=Left,y=Botton, color='Left-Bottom', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Leftinf, xend = Leftsup, y = Botton, yend = Botton,
color ='Left-Bottom'), size = sizeLine1) +
geom_segment(aes(x = Left, xend = Left, y = Bottoninf, yend = Bottonsup,
color ='Left-Bottom'), size = sizeLine1) +
guides(shape = FALSE) +
scale_color_manual(values = c('#a65628','#f781bf')) +
scale_shape_discrete(solid=F) +
labs(x = 'PND (deg)', y = 'PND (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1'))
# coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
pcor1 <- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Botton, color ='Top-Bottom'),
size = sizePoint2) +
# geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
# color ='Top-Bottom'), size = sizeLine1) +
# geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
# color ='Top-Bottom'), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, color='Right-Left'),
size = sizePoint2) +
# geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
# color ='Right-Left'), size = sizeLine1) +
# geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
# color ='Right-Left'), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PND (deg)', y = 'PND (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1, 1), ylim = c(-1, 1))
coord_equal()
pcomppsy <- plot_grid(plotcomp90, plotcomp0, labels = c('A','B'),
ncol = 1, hjust = 0, vjust = 1)
pcor <- plot_grid(pcor1, labels = 'C', hjust = 0)
pcomp <- plot_grid(pcomppsy, pcor, ncol =1, rel_heights = c(2.6,.8))
save_plot('figures/comp.pdf', pcomp,
base_width = onehalfColumnWidth,
base_height = 2.5 * onehalfColumnWidth)
### fit two cumulative normal ##################################################
f <- function(x, p) pnorm(x, p[1] - p[3], p[2]) - pnorm(x, p[1] + p[3], p[2])
fitequcumnorm <- quickpsy(datequ, orSmall, response,
grouping = .(subject,orLarge,vertical),
B = 20, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
### calculating the maximum
equcumnormmax <- fitequcumnorm$curves %>%
summarise(maxi=approx(x=y,y=x,xout=max(y))[[1]])
### comparisons
fitequcumnorm$parcomparisons %>%
filter(parn =='p1', subject==subject2, vertical, vertical2)
### pse
pse <- fitequcumnorm$par %>% filter(parn=='p1') %>% merge(equcumnormmax)
fiteqpselong <- pse %>%
select(-parinf, -parsup, -vertical, -maxi) %>% spread(orLarge, par)
fiteqpselonginf <- pse %>%
select(-par, -parsup, -vertical,-maxi) %>% spread(orLarge, parinf) %>%
rename(Topinf = Top, Rightinf = Right, Bottoninf = Botton, Leftinf = Left)
fiteqpselongsup <- pse %>%
select(-par, -parinf, -vertical,-maxi) %>% spread(orLarge, parsup) %>%
rename(Topsup = Top, Rightsup = Right, Bottonsup = Botton, Leftsup = Left)
fiteqpselongwithci <- merge(fiteqpselong,
merge(fiteqpselonginf, fiteqpselongsup))
### correlations
topboteq <- fiteqpselong %>% select(subject, Top, Botton) %>%
rename(x = Top, y = Botton)
riglefeq <- fiteqpselong %>% select(subject, Right, Left) %>%
rename(x = Right, y = Left)
topbotriglefeq <- rbind(topboteq, riglefeq)
cor.test(topbotriglefeq$x, topbotriglefeq$y)
toprigeq <- fiteqpselong %>% select(subject, Top, Right) %>%
rename(x = Top, y = Right)
lefboteq <- fiteqpselong %>% select(subject, Left, Botton) %>%
rename(x = Left, y = Botton)
topriglefboteq <- rbind(toprigeq, lefboteq)
cor.test(topriglefboteq$x, topriglefboteq$y)
### figure eq
textProb2 <- 'Prob. responding aligned'
funpsychoeq <- function(flagVertical, flagOrder) {
colorcurves1 <- ifelse(flagVertical,'#e41a1c','#4daf4a')
colorcurves2 <- ifelse(flagVertical,'#377eb8','#984ea3')
colorchoicebiascurves <- ifelse(flagVertical,'#377eb8','#984ea3')
ggplot() +
facet_wrap(~subject,scales = 'free_x') +
geom_vline(xintercept = 0, lty = 2, size = sizeLine1)+
geom_rect(data = fitcomp$thresholds %>%
filter(vertical==flagVertical),
aes(xmin=threinf,xmax=thresup,ymin=0, ymax=1, fill=orLarge),
show.legend = FALSE,alpha = .25) +
# geom_segment(data = fitcomp$thresholds %>%
# filter(vertical==flagVertical),
# aes(x=threinf,xend = thresup, y = 0, yend = 0,
# color=orLarge), show.legend = FALSE,
# alpha = .25, size = 2.25) +
geom_point(data=fitequcumnorm$averages %>% filter(vertical==flagVertical),
aes(x = orSmall, y = prob, color = orLarge, shape=orLarge),
size = sizePoint1) +
geom_segment(data = pse %>% filter(vertical==flagVertical),
aes(x = parinf, xend = parsup, y = 0, yend = 0,
color = orLarge),size = sizeLine1) +
geom_segment(data = pse %>% filter(vertical==flagVertical),
aes(x = par, xend = par, y = 0, yend = maxi,
color = orLarge),size = sizeLine1) +
geom_line(data = fitequcumnorm$curves %>% filter(vertical==flagVertical),
aes(x = x, y = y, color = orLarge),
size = sizeLine1) +
scale_color_manual(values = c(colorcurves1,colorcurves2)) +
guides(color = guide_legend(reverse=flagOrder),
shape = guide_legend(reverse=flagOrder)) +
labs(x = 'Orientation (deg)', y = textProb2,
color = textReference, shape = textReference) +
scale_y_continuous(breaks = c(0,.5,1)) +
coord_cartesian(xlim=c(-2.1, 2.1),ylim=c(0,1)) +
theme(strip.background = element_blank())
}
ploteq0 <- funpsychoeq(TRUE, FALSE)
ploteq90 <- funpsychoeq(FALSE, TRUE)
pcoreq2 <- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Right, color ='Top-Right', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Topinf, xend = Topsup, y = Right, yend = Right,
color ='Top-Right', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Top, xend = Top, y = Rightinf, yend = Rightsup,
color ='Top-Right', shape = subject), size = sizeLine1) +
geom_point(aes(x=Left,y=Botton, color='Left-Bottom', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Leftinf, xend = Leftsup, y = Botton, yend = Botton,
color ='Left-Bottom', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Left, xend = Left, y = Bottoninf, yend = Bottonsup,
color ='Left-Bottom', shape = subject), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#a65628','#f781bf')) +
labs(x = 'PMR (deg)', y = 'PMR (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
pcoreq1 <- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Botton, color ='Top-Bottom'),
size = sizePoint2) +
# geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
# color ='Top-Bottom'), size = sizeLine1) +
# geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
# color ='Top-Bottom'), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, color='Right-Left'),
size = sizePoint2) +
# geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
# color ='Right-Left'), size = sizeLine1) +
# geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
# color ='Right-Left'), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PMR (deg)', y = 'PMR (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
peqpsy <- plot_grid(ploteq90, ploteq0, labels = c('A','B'),
ncol = 1, hjust = 0, vjust = 1)
pcoreq <- plot_grid(pcoreq1, labels = 'C', hjust = 0)
peq <- plot_grid(peqpsy, pcoreq, ncol =1, rel_heights = c(2.6,.8))
save_plot('figures/eq.pdf', peq,
base_width = onehalfColumnWidth,
base_height = 2.5 * onehalfColumnWidth)
### fig sup ###################################################################
psup <-plot_grid(pcor2, pcoreq2, labels = c('A','B'), ncol=1, hjust = 0)
save_plot('figures/sup.pdf', psup,
base_width = oneColumnWidth,
base_height = 1.5*oneColumnWidth)
### fig all ####################################################################
all <- merge(pse, fitcomp$thresholds)
confint(lm(all$thre~all$par))
cor.test(all$par,all$thre)
pcorcompeq <- ggplot(data = all )+ #facet_wrap(~orLarge)+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=par,y=thre,color=orLarge))+
# geom_segment(aes(x = parinf, xend = parsup, y = thre, yend = thre,
# color =orLarge), size = sizeLine1) +
# geom_segment(aes(x = par, xend = par, y = threinf, yend = thresup,
# color =orLarge), size = sizeLine1) +
scale_shape_discrete(solid=F) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))+
labs(x = 'PMR asymmetric task (deg)', y = 'PND symmetric task (deg)') +
guides(shape = FALSE) +
scale_colour_brewer(palette = 'Set1')+
labs(color = textReference) +
theme(legend.key.size = unit(1,'line'),
plot.margin = unit(c(-5,0,0,0), 'line'))
save_plot('figures/all.pdf', pcorcompeq,
base_width = oneColumnWidth,
base_height = oneColumnWidth)
#### comp and eq comp ##########################################################
compeq <- fitequcumnorm$parbootstrap %>% filter(parn == 'p1') %>%
select(-parn) %>% merge(fitcomp$thresholdsbootstrap %>% select(-prob)) %>%
mutate(dif = par - thre) %>% group_by(subject, orLarge) %>%
summarise(inf = quantile(dif, .025), sup = quantile(dif,.975),
sign = ifelse(inf * sup > 0, 1, 0))
################################################################################
### serial effects #############################################################
################################################################################
#### orLarge == lagorLarge, task == lagTask
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge, task == lagTask) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife <- fitcompser$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser, color = sign)
datser %>% group_by(sign, orSmall) %>% summarise(n=n())
fitcompserind <- quickpsy(datserind, orSmall, response,
grouping = .(sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
difeind <- fitcompserind$thresholdcomparisons
plot(fitcompserind, color = sign)
datserpse <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge, task == lagTask) %>%
merge(fitcomp$thresholds) %>%
mutate(sign = ifelse(lagorSmall > thre, 1, -1)) %>%
filter(task == 'comp')
datserpse %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompserpse <- quickpsy(datserpse, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
difepse <- fitcompserpse$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompserpse, color = sign)
datser2 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(task == lagTask,
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser2 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser2 <- quickpsy(datser2, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife2 <- fitcompser2$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser2, color = sign)
datser2 %>% group_by(sign, orSmall) %>% summarise(n=n())
fitcompser2ind <- quickpsy(datser2, orSmall, response,
grouping = .(sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife2ind <- fitcompser2ind$thresholdcomparisons #%>% filter(subject==subject2)
plot(fitcompser2ind, color = sign)
datser3 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter( task == lagTask,
orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser3 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser3 <- quickpsy(datser3, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife3 <- fitcompser3$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser3, color = sign)
fitcompser3ind <- quickpsy(datser3, orSmall, response,
grouping = .(sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife3ind <- fitcompser3ind$thresholdcomparisons
plot(fitcompser3ind, color = sign)
#### orLarge == lagorLarge
datserc <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datserc %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompserc <- quickpsy(datserc, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
difec <- fitcompserc$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompserc, color = sign)
datser2c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser2c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser2c <- quickpsy(datser2c, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife2c <- fitcompser2c$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser2c, color = sign)
datser3c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter( orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser3c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitcompser3c <- quickpsy(datser3c, orSmall, response,
grouping = .(subject, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
dife3c <- fitcompser3c$thresholdcomparisons %>% filter(subject==subject2)
plot(fitcompser3c, color = sign)
#### orLarge == lagorLarge, task == lagTask
datsereq <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge, task == lagTask) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
f <- function(x, p) pnorm(x, p[1] - p[3], p[2]) - pnorm(x, p[1] + p[3], p[2])
fitequcumnormser <- quickpsy(datsereq, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser, color = sign)
datsereq2 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter( task == lagTask,
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')
) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq2 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser2 <- quickpsy(datsereq2, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser2$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser2, color = sign)
datsereq3 <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall),
lagTask = lag(task)) %>%
filter(!is.na(lagvertical)) %>%
filter(task == lagTask,
orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq3 %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser3 <- quickpsy(datsereq3, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser3$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser3, color = sign)
#### orLarge == lagorLarge
datsereqc <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereqc %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormserc <- quickpsy(datsereqc, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormserc$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormserc, color = sign)
datsereq2c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter((orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq2c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser2c <- quickpsy(datsereq2c, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser2c$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser2c, color = sign)
datsereq3c <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge==lagorLarge |
(orLarge=='Top' & lagorLarge=='Botton') |
(orLarge=='Botton' & lagorLarge=='Top') |
(orLarge=='Left' & lagorLarge=='Rigth') |
(orLarge=='Right' & lagorLarge=='Left')) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
datsereq3c %>% group_by(subject, sign, orSmall) %>% summarise(n=n())
fitequcumnormser3c <- quickpsy(datsereq3c, orSmall, response,
grouping = .(subject,sign),
B = 1000, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
fitequcumnormser3c$parcomparisons %>% filter(parn=='p1', subject==subject2)
plot(fitequcumnormser3c, color = sign)
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B=1)
dife <- fitcompser$thresholdcomparisons %>%
filter(subject==subject2)
t.test(dife$dif)
plot(fitcompser, color = sign)
### comp orLarge
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge =='Left', lagorLarge=='Left') %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, orLarge, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B=1)
dife <- fitcompser$thresholdcomparisons %>%
filter(subject==subject2,orLarge==orLarge2)
t.test(dife$dif)
p1<- plot(fitcompser, color = sign)
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge =='Left', lagorLarge=='Right') %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, orLarge, sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B=1)
dife <- fitcompser$thresholdcomparisons %>%
filter(subject==subject2,orLarge==orLarge2)
t.test(dife$dif)
p2<- plot(fitcompser, color = sign)
plot_grid(p1,p2, ncol = 1)
### comp orLarge
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(lagorLarge == 'Top', orLarge == 'Top') %>%
#filter(vertical == lagvertical) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datser, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser, color = sign)
+
geom_vline(xintercept = 0,lty=2) +
geom_line(data=fitcomp$curves, aes(x=x,y=y))+
geom_vline(data=fitcomp$thresholds,aes(xintercept=thre)) +
geom_point(data=fitcomp$averages,aes(x=orSmall,y=prob)) +xlim(-2,2)
### comp orLarge
### comp orLarge
datser <- dat %>%
mutate(lagvertical = lag(vertical),
lagorLarge = lag(orLarge),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(orLarge == lagorLarge) %>%
#filter(vertical == lagvertical) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
fitcompser$thresholdcomparisons %>% filter(subject==subject2,orLarge==orLarge2)
plot(fitcompser, color = sign) +
geom_vline(xintercept = 0,lty=2) +
geom_line(data=fitcomp$curves, aes(x=x,y=y))+
geom_vline(data=fitcomp$thresholds,aes(xintercept=thre)) +
geom_point(data=fitcomp$averages,aes(x=orSmall,y=prob)) +xlim(-2,2)
### comp orLarge
datser <- dat %>% merge(fitcomp$thresholds) %>%
mutate(lagvertical = lag(vertical), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
filter(vertical == lagvertical) %>%
mutate(sign = ifelse(lagorSmall > thre, 1, -1)) %>%
filter(task == 'comp')
datser %>% group_by(subject,orLarge,sign,orSmall) %>% summarise(n=n())
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 50)
fitcompser$thresholdcomparisons %>% filter(subject==subject2,orLarge==orLarge2)
plot(fitcompser, color = sign) +
geom_vline(xintercept = 0,lty=2) +
geom_line(data=fitcomp$curves, aes(x=x,y=y))+
geom_vline(data=fitcomp$thresholds,aes(xintercept=thre)) +
geom_point(data=fitcomp$averages,aes(x=orSmall,y=prob)) +xlim(-2,2)
fitcompserthrelong <- fitcompser$thresholds %>%
select(-threinf, -thresup) %>% spread(orLarge, thre)
p1<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p2<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p3<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p4<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
plot_grid(p1,p2,p3,p4)
### comp orLarge
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge,sign),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser, color = sign)
plotthresholds(fitcompser, color=sign)
fitcompserthrelong <- fitcompser$thresholds %>%
select(-threinf, -thresup) %>% spread(orLarge, thre)
p1<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p2<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p3<- ggplot(data = fitcompthrelongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
p4<- ggplot(data = fitcompserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
plot_grid(p1,p2,p3,p4)
### equ orLarge
datsereq <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagtask=lag(task),
lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical,task==lagtask) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'equ')
f <- function(x, p) pnorm(x, p[1] - p[3], p[2]) - pnorm(x, p[1] + p[3], p[2])
fitequcumnormser <- quickpsy(datsereq, orSmall, response,
grouping = .(subject,orLarge,sign),
B = 1, fun = f,
parini=list(c(-2,2),c(0.1,3),c(0.1,3)),
bootstrap = 'nonparametric', thresholds = F)
plot(fitequcumnormser,color=sign)
pp<-plot_grid(plot(fitcompser, color = sign),plot(fitequcumnormser,color=sign))
save_plot('figures/pp.pdf',pp, base_height = 10,base_width = 30)
fitequserthrelong <- fitequcumnormser$par %>% filter(parn=='p1') %>%
select(-parinf, -parsup) %>% spread(orLarge, par)
pe1<- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
pe2<- ggplot(data = fitequserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
pe3<- ggplot(data = fiteqpselongwithci )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, color ='Top-Bottom', shape = subject)) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
pe4<- ggplot(data = fitequserthrelong ) +
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=Left,y=Right, shape =subject, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
plot_grid(pe1,pe2,pe3,pe4)
fitequserthrelong <-fitequserthrelong %>% mutate(task='equ') %>%
select(-parn)
fitcompserthrelong <- fitcompserthrelong %>% mutate(task='comp') %>%
select(-prob)
allser <- rbind(fitequserthrelong,fitcompserthrelong)
allserlong <- allser %>% gather(orLarge,pse,-subject,-task,-sign)
allserwide <- allserlong %>% spread(task,pse)
ggplot(data = allserwide ) + facet_grid(subject~orLarge)+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=comp,y=equ, color = factor(sign))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
allserlongdif <- allserlong %>% group_by(subject,task,orLarge) %>%
summarise(dif=diff(pse))
allserlongdiflong<- allserlongdif %>% spread(task,dif)
ggplot(data = allserlongdiflong ) +# facet_grid(~orLarge)+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_abline(slope = 0, lty =2, size = sizeLine1)+
geom_abline(slope = 99999, lty =2, size = sizeLine1)+
geom_point(size=3,aes(x=comp,y=equ, shape=subject,color = factor(orLarge))) +
coord_equal(xlim = c(-1.5, 1), ylim = c(-1.5,1))
geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
color ='Top-Bottom', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
color ='Top-Bottom', shape = subject), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, color='Right-Left', shape = subject),
size = sizePoint2) +
geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
color ='Right-Left', shape = subject), size = sizeLine1) +
geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
color ='Right-Left', shape = subject), size = sizeLine1) +
guides(shape = FALSE) +
scale_shape_discrete(solid=F) +
scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PSE (deg)', y = 'PSE (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
pcorcompser1 <- ggplot(data = fitcompserthrelong )+
geom_abline(slope = 1, lty =2, size = sizeLine1)+
geom_point(aes(x=Top,y=Botton, shape =subject, color = factor(sign))) +
# geom_segment(aes(x = Topinf, xend = Topsup, y = Botton, yend = Botton,
# color ='Top-Bottom', shape = subject), size = sizeLine1) +
# geom_segment(aes(x = Top, xend = Top, y = Bottoninf, yend = Bottonsup,
# color ='Top-Bottom', shape = subject), size = sizeLine1) +
geom_point(aes(x=Right,y=Left, shape=subject, color = factor(sign))) +
# geom_segment(aes(x = Rightinf, xend = Rightsup, y = Left, yend = Left,
# color ='Right-Left', shape = subject), size = sizeLine1) +
# geom_segment(aes(x = Right, xend = Right, y = Leftinf, yend = Leftsup,
# color ='Right-Left', shape = subject), size = sizeLine1) +
# guides(shape = FALSE) +
# scale_shape_discrete(solid=F) +
# scale_color_manual(values = c('#ff7f00','#999999')) +
labs(x = 'PSE (deg)', y = 'PSE (deg)') +
theme(legend.title = element_blank()) +
scale_x_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
scale_y_continuous(breaks =seq(-1,1,.5),
labels = c('-1','-0.5','0','0.5','1')) +
coord_equal(xlim = c(-1.2, 1.2), ylim = c(-1.2, 1.2))
plot_grid(pcor1,pcorcompser1)
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagorLarge = lag(orLarge), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagorLarge)) %>%
group_by(orLarge, lagorLarge) %>%
filter(orLarge==lagorLarge) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp')
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, sign),
# guess = TRUE, lapses = TRUE,
# parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser,panel=subject)
### comp vertical
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagorSmall = lag(orSmall)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical) %>%
filter(lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1)) %>%
filter(task == 'comp') %>%
group_by(subject,orLarge,sign) %>%
mutate(vertical = ifelse(orLarge=='Top' | orLarge=='Botton', TRUE, FALSE))
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, vertical,sign),
# guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
# parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 100)
plot(fitcompser,xpanel=subject,fill=sign)
plotthresholds(fitcompser, color=sign)
plot(fitcompser,xpanel=subject,ypanel=orLarge)
ggplot(fitcompser$averages %>% filter(orLarge=='Top' | orLarge=='Botton'),
aes(x=orSmall,y=prob,lty=orLarge,color=factor(sign)))+
facet_wrap(~subject)+
geom_point()+
geom_line(data=fitcompser$curves%>% filter(orLarge=='Top' | orLarge=='Botton'),
aes(x=x,y=y,lty=orLarge,color=factor(sign)))
ggplot(fitcompser$averages %>% filter(orLarge=='Top' | orLarge=='Botton'),
aes(x=orSmall,y=prob,lty=orLarge,color=factor(sign)))+
facet_wrap(~subject)+
geom_point()+
geom_line(data=fitcompser$averages%>% filter(orLarge=='Top' | orLarge=='Botton'))
datsercomp <- dat %>% group_by(subject) %>%
mutate(lagvertical = lag(vertical), lagresponse = lag(response)) %>%
filter(!is.na(lagvertical)) %>%
group_by(vertical, lagvertical) %>%
filter(vertical==lagvertical) %>%
filter(task == 'comp')
fitcompser <- quickpsy(datsercomp, orSmall, response,
grouping = .(subject, orLarge, lagresponse),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1)
plot(fitcompser,xpanel=subject,ypanel=orLarge)
### comparisons
fitcompser$thresholdcomparisons %>%
filter(subject==subject2, orLarge==orLarge2)
do(print(head(.)))
filter(!is.na(lagvertical)) %>% do(print(as.data.frame(.)))
group_by(lagvertical) %>% do(print(.))
filter(!is.na(lagorSmall), lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1))
datser <- dat %>% ungroup() %>% mutate(lagorSmall=lag(orSmall)) %>%
filter(!is.na(lagorSmall), lagorSmall != 0) %>%
mutate(sign = ifelse(lagorSmall > 0, 1, -1))
datcompser <- datser %>% filter(task=='comp') %>%
mutate()
fitcompser <- quickpsy(datcompser, orSmall, response,
grouping = .(subject, orLarge, sign),
guess = TRUE, lapses = TRUE, xmax = -4, xmin = 4,
parini = list(c(-2, 2), c(0.1,3), c(0,.4), c(0,.4)),
bootstrap = 'nonparametric',
B = 1000)
plot(fitcompser,xpanel=subject,ypanel=orLarge)
datm <- datm %>% group_by(name) %>%
mutate(lagcoh=lag(cohwithsign), lagresp=lag(resp),
signcoh=ifelse(lagcoh>0,'Preferred','Null')) %>%
filter(!is.na(lagcoh),!is.na(lagresp),coh != 0)
### HHT NO SYM psi = . 5 ######################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(p[5] - mu)))
if (reference==1)
return(pnorm(mu + p[5]) + (1 - p[4]) *(pnorm(-p[5]- mu) - pnorm(p[5] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -3,-3),
upper = c( 3,3, 0, 0))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
#geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))+
geom_vline(data = hhtparlong, aes(xintercept = -p1/p2),lty=2)
### pru ########################################################################
garciafun <- function(x,p) {
mu <- p[1] + p[2] *x
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(p[5] - mu)))
}
xseq <- seq(-2,2,.01)
p1 <- c(0, 4.4, 1.5, 0.6, -1)
p2 <- c(0, 4.4, 1, .4, -1.5)
#p1 <- c(2, 4.4, 1, 0.6, -1)
#p2 <- c(2, 4.4, 1, .4, -1)
yseq <- garciafun(xseq,p1)
yseq2 <- garciafun(xseq,p2)
dd <- data.frame(x=xseq,y=yseq,y2=yseq2)
ggplot(dd) +
geom_line(aes(x=x,y=y)) +
geom_line(aes(x=x,y=y2),lty=2) +
geom_vline(xintercept = c(p1[3],p1[5]))+
ylim(0,1)
### HHT NO SYM xi = .5 ########################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - (p[3]+p[5])) + .5 * (pnorm(p[3]+p[5]- mu) - pnorm(p[4]+p[5] - mu)))
if (reference==1)
return(pnorm(mu + p[4]+p[5]) + .5 *(pnorm(-p[4]-p[5]- mu) - pnorm(-p[3]-p[5] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, 0,-3,-3),
upper = c( 3,10, 3, 0,3))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p4'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))
### HHT NO SYM xi = .5 ########################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + .5 * (pnorm(p[3]- mu) - pnorm(p[4] - mu)))
if (reference==1)
return(pnorm(mu + p[4]) + .5 *(pnorm(-p[4]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -3,-3),
upper = c( 3,10, 3, 3))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))+
geom_vline(data = hhtparlong, aes(xintercept = -p1/p2),lty=2)
### HHT SYM ####################################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(-p[3] - mu)))
if (reference==1)
return(pnorm(mu - p[3]) + (1 - p[4]) *(pnorm(p[3]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.55), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -10, 0),
upper = c(3, 10, 10,1))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))
### HHT NO SYM #################################################################
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[4] * (pnorm(p[3]- mu) - pnorm(p[5] - mu)))
if (reference==1)
return(pnorm(mu + p[5]) + (1 - p[4]) *(pnorm(-p[5]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,0, 0.5,1), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -3, 0,-3),
upper = c( 3,10, 0, 1, 0))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
hhtparlong <- hhtpar %>% spread(parn,p)
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))+
geom_vline(data=hhtpar %>% filter(parn=='p3'),aes(xintercept = p))+
geom_vline(data=hhtpar %>% filter(parn=='p5'),aes(xintercept = p))+
geom_vline(data = hhtparlong, aes(xintercept = -p1/p2),lty=2)
hhtfun <-function(x, p, reference) {
mu <- p[1] + p[2] * x
if (reference==0)
return(pnorm(mu - p[3]) + p[5] *(pnorm(p[3]- mu) - pnorm(p[4] - mu)))
if (reference==1)
return(pnorm(mu + p[4]) + (1 - p[5]) *(pnorm(-p[4]- mu) - pnorm(-p[3] - mu)))
}
hhtparfun <- function(d) {
create_nll <- function(d){
function(p) {
d1 <- d %>% filter(orLarge=='Top' | orLarge=='Right')
d2 <- d %>% filter(orLarge=='Botton' | orLarge=='Left')
phi1 <- hhtfun(d1$orSmall, p, 0)
phi2 <- hhtfun(d2$orSmall, p, 1)
phi1[phi1 < .Machine$double.eps] <- .Machine$double.eps
phi1[phi1 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
phi2[phi2 < .Machine$double.eps] <- .Machine$double.eps
phi2[phi2 > (1 - .Machine$double.eps)] <- 1 - .Machine$double.eps
sum1 <- -sum(d1$response * log(phi1) +
(d1$n - d1$response) * log(1 - phi1) )
sum2 <- -sum(d2$response * log(phi2) +
(d2$n - d2$response) * log(1 - phi2) )
sum1 + sum2
}
}
nll <- create_nll(d)
#p <- optim(c(0,1,-1,1, 0.5), nll)$p
p <- optim(c(0,1,-2,2, 0.55), nll, method = 'L-BFGS-B',
lower = c(-3,.1, -10, -10, 0),
upper = c(3, 10, 10, 10,1))$p
data.frame(p,parn=paste0('p', seq(1, length(p))))
}
hhtpar <- fitcomp$averages %>% group_by(subject, vertical) %>% do(hhtparfun(.))
hhtcurvesfun <- function(d) {
x <- seq(min(fitcomp$curves$x),max(fitcomp$curves$x),.01)
y1 <- hhtfun(x, d$p, 0)
y2 <- hhtfun(x, d$p, 1)
cond1 <- ifelse(first(d$vertical), 'Top','Right')
cond2 <- ifelse(first(d$vertical), 'Botton','Left')
rbind(data.frame(x, y = y1) %>% mutate(orLarge = cond1),
data.frame(x, y = y2) %>% mutate(orLarge = cond2))
}
hhtcurves <- hhtpar %>% do(hhtcurvesfun(.))
ggplot()+ facet_grid(subject~vertical)+
geom_point(data=fitcomp$averages, aes(x=orSmall,y=prob,color=orLarge))+
geom_line(data=hhtcurves,aes(x=x,y=y,color=orLarge))
+
geom_vline(xintercept = 0, lty = 2) +
geom_hline(yintercept = 0.5, lty = 2) +
geom_vline(data = sdtperceptualbiasparlong, aes(xintercept = -(p3-p2)/p1)) +
geom_vline(data = sdtperceptualbiasparlong, aes(xintercept = -(p3+p2)/p1)) +
geom_vline(data = sdtperceptualbiasparlong, aes(xintercept = -p3/p1))
|
# Clear out the R-Environment
rm(list=ls())
# Load packages
library(metafor)
library(plyr)
# Function to calculate the effect sizes both ways
my_calc_es<-function(c_mean, c_sd, c_n, t_mean, t_sd, t_n, study_id){
# Load package locally
require(plyr)
# bind all together
data<-data.frame(c_mean=c_mean, c_sd=c_sd, c_n=c_n, t_mean=t_mean, t_sd=t_sd, t_n=t_n, study_id=study_id)
# Calculate the CVs
data$c_cv<-data$c_sd/data$c_mean
data$t_cv<-data$t_sd/data$t_mean
# Get the weighted CV^2
# First pool at the level of the study, weighted
pooled<-ddply(data, .(study_id), summarise, c_cv2=weighted.mean(c_cv^2, c_n, na.rm=T), t_cv2=weighted.mean(t_cv^2, t_n, na.rm=T), c_n=mean(c_n, na.rm=T), t_n=mean(t_n, na.rm=T))
# Now get the weighted cv2s
mean_c_cv2<-weighted.mean(pooled$c_cv2, pooled$c_n, na.rm=T)
mean_t_cv2<-weighted.mean(pooled$t_cv2, pooled$t_n, na.rm=T)
# Calculate the effect sizes and variances different ways
yi_miss<-log(data$t_mean / data$c_mean) + 0.5 * (data$t_cv^2 / data$t_n - data$c_cv^2 / data$c_n)
vi_miss<-data$t_cv^2 / data$t_n + data$c_cv^2 / data$c_n + data$t_cv^4 / (2 * data$t_n^2) + data$c_cv^4 / (2 * data$c_n^2)
yi_1B<-log(data$t_mean / data$c_mean) + 0.5 * (mean_t_cv2 / data$t_n - mean_c_cv2 / data$c_n)
vi_1B<-mean_t_cv2 / data$t_n + mean_c_cv2 / data$c_n + mean_t_cv2^2 / (2 * data$t_n^2) + mean_c_cv2^2 / (2 * data$c_n^2)
yi_1A<-yi_miss
vi_1A<-vi_miss
missing<-which(is.na(data$c_sd) == T)
yi_1A[missing]<-yi_1B[missing]
vi_1A[missing]<-vi_1B[missing]
# Caluclate the matrix for method 2 - note this is just method 1B in the diagonal
Vf<-diag(vi_1B)
row.names(Vf)<-seq(1, length(yi_1A), 1)
colnames(Vf)<-seq(1, length(yi_1A), 1)
# Package up and return as list - first object is effect sizes, second is matrix for method 2
output<-list()
output[[1]]<-data.frame(ES_ID = seq(1, length(yi_1A), 1), yi_miss = yi_miss, vi_miss = vi_miss, yi_1A = yi_1A, vi_1A = vi_1A, yi_1B = yi_1B, vi_1B = vi_1B)
output[[2]]<-Vf
return(output)
}
# Load the data
data<-read.csv("./example/worked1/data_test.csv")
head(data)
# Lets follow their lead and split by grazing type as per the paper. Here I will only re-analyse the CG group
data<-data[which(data$Comparison == "CG-SRG"),]
# Double check some stuff
# How many effect sizes
dim(data)[1]
# 173
# How many studies
length(unique(data$Study))
# 67 Studies
# How many grazing organisms
length(unique(data$Stock.type))
# 4, which are
unique(data$Stock.type)
# Is there a common control issue
data$control_ID<-paste0(data$Study, "_", data$Common.control)
length(unique(data$control_ID))
length((data$control_ID))
# Yes some studies have the issue
# Which are they
check<-ddply(data, .(Study), summarise, n_controls=length(unique(control_ID)), n_effects=length(control_ID))
shared<-check[which(check$n_controls != check$n_effects),]
shared
dim(shared)[1] / dim(check)[1]
# 13.4% have a shared control issue to some degree
# Double check they really are shared control - should be no variance within shared control
check<-ddply(data, .(control_ID), summarise, length(CN), sd(CN, na.rm=T), sd(CM, na.rm=T))
check[which(check[,2] > 1),]
# Yep looks good
# Can also check number of effect sizes with shared control issue here
sum(check[which(check[,2] > 1),2]) / dim(data)[1]
# 13.9% of effect sizes
# How much missing SD data?
plot(is.na(data$CSD), is.na(data$TSD))
# If control is missing, so is treatment - always here
sum(is.na(data$CSD)) / dim(data)[1]
# 35.8% of effects sizes have missing sds
# Function to calculate the effect sizes the four different ways
# Originally Macdonald et al handle the missing SD data by calculating the mean CV (for each grazing type, CG-SRG and UG-SRG) for the present data, then use this with the known mean to impute the SD. Essentially impute as a function of the mean assuming a linear assocation between mean and SD. This is very similar to our method 1A, but without weighted average and not on CV^2
# Now calculate effect sizes
data<-cbind(data, my_calc_es(c_mean=data$CM, c_sd=data$CSD, c_n=data$CN, t_mean=data$TM, t_sd=data$TSD, t_n=data$TN, study_id=data$Study)[[1]])
data$ES_ID<-as.factor(seq(1, nrow(data), 1))
# Model 1A: MLMA of missing SD effect sizes using pooled CV2
MLMA1A<-rma.mv(yi = yi_1A, V = vi_1A, random=list(~1|Study, ~1|ES_ID), data=data)
summary(MLMA1A)
# Model 1B: MLMA of all effect sizes using pooled CV2
MLMA1B<-rma.mv(yi = yi_1B, V = vi_1B, random=list(~1|Study, ~1|ES_ID), data=data)
summary(MLMA1B)
# Model 2: weighted regression of all effect sizes using pooled CV2
# Note here and below I analyse here with effect sizes estimated as the mix of known CV and mean CV - doesn't have to be though
Vf<-my_calc_es(c_mean=data$CM, c_sd=data$CSD, c_n=data$CN, t_mean=data$TM, t_sd=data$TSD, t_n=data$TN, study_id=data$Study)[[2]]
data$ES_ID2<-rownames(Vf)
MLMA2<-rma.mv(yi = yi_1A, V = 0, random=list(~1|Study, ~1|ES_ID, ~1|ES_ID2), data=data, R=list(ES_ID2=Vf), Rscale=F)
summary(MLMA2)
# Model 3: add 0s in the matrix where SDs are not missing, and to vi where they are
missing<-which(is.na(data$yi_miss) == T)
diag(Vf)[-missing]<-0
data$vi_3<-data$vi_miss
data$vi_3[missing]<-0
MLMA3<-rma.mv(yi = yi_1A, V = vi_3, random=list(~1|Study, ~1|ES_ID, ~1|ES_ID2), data=data, R=list(ES_ID2=Vf), Rscale=F)
summary(MLMA3)
# Package up as table
results<-data.frame(method=c("1A", "1B", "2", "3"), b=c(MLMA1A$b, MLMA1B$b, MLMA2$b, MLMA3$b), ci.lb=c(MLMA1A$ci.lb, MLMA1B$ci.lb, MLMA2$ci.lb, MLMA3$ci.lb), ci.ub=c(MLMA1A$ci.ub, MLMA1B$ci.ub, MLMA2$ci.ub, MLMA3$ci.ub), tau2=c(sum(MLMA1A$sigma2), sum(MLMA1B$sigma2), sum(MLMA2$sigma2), sum(MLMA3$sigma2)))
write.table(results, file="Example.results.csv", sep=",", row.names=F, col.names=names(results))
| /example/worked2/Worked_Example.R | no_license | AlistairMcNairSenior/Miss_SD_Sim | R | false | false | 5,740 | r |
# Clear out the R-Environment
rm(list=ls())
# Load packages
library(metafor)
library(plyr)
# Function to calculate the effect sizes both ways
my_calc_es<-function(c_mean, c_sd, c_n, t_mean, t_sd, t_n, study_id){
# Load package locally
require(plyr)
# bind all together
data<-data.frame(c_mean=c_mean, c_sd=c_sd, c_n=c_n, t_mean=t_mean, t_sd=t_sd, t_n=t_n, study_id=study_id)
# Calculate the CVs
data$c_cv<-data$c_sd/data$c_mean
data$t_cv<-data$t_sd/data$t_mean
# Get the weighted CV^2
# First pool at the level of the study, weighted
pooled<-ddply(data, .(study_id), summarise, c_cv2=weighted.mean(c_cv^2, c_n, na.rm=T), t_cv2=weighted.mean(t_cv^2, t_n, na.rm=T), c_n=mean(c_n, na.rm=T), t_n=mean(t_n, na.rm=T))
# Now get the weighted cv2s
mean_c_cv2<-weighted.mean(pooled$c_cv2, pooled$c_n, na.rm=T)
mean_t_cv2<-weighted.mean(pooled$t_cv2, pooled$t_n, na.rm=T)
# Calculate the effect sizes and variances different ways
yi_miss<-log(data$t_mean / data$c_mean) + 0.5 * (data$t_cv^2 / data$t_n - data$c_cv^2 / data$c_n)
vi_miss<-data$t_cv^2 / data$t_n + data$c_cv^2 / data$c_n + data$t_cv^4 / (2 * data$t_n^2) + data$c_cv^4 / (2 * data$c_n^2)
yi_1B<-log(data$t_mean / data$c_mean) + 0.5 * (mean_t_cv2 / data$t_n - mean_c_cv2 / data$c_n)
vi_1B<-mean_t_cv2 / data$t_n + mean_c_cv2 / data$c_n + mean_t_cv2^2 / (2 * data$t_n^2) + mean_c_cv2^2 / (2 * data$c_n^2)
yi_1A<-yi_miss
vi_1A<-vi_miss
missing<-which(is.na(data$c_sd) == T)
yi_1A[missing]<-yi_1B[missing]
vi_1A[missing]<-vi_1B[missing]
# Caluclate the matrix for method 2 - note this is just method 1B in the diagonal
Vf<-diag(vi_1B)
row.names(Vf)<-seq(1, length(yi_1A), 1)
colnames(Vf)<-seq(1, length(yi_1A), 1)
# Package up and return as list - first object is effect sizes, second is matrix for method 2
output<-list()
output[[1]]<-data.frame(ES_ID = seq(1, length(yi_1A), 1), yi_miss = yi_miss, vi_miss = vi_miss, yi_1A = yi_1A, vi_1A = vi_1A, yi_1B = yi_1B, vi_1B = vi_1B)
output[[2]]<-Vf
return(output)
}
# Load the data
data<-read.csv("./example/worked1/data_test.csv")
head(data)
# Lets follow their lead and split by grazing type as per the paper. Here I will only re-analyse the CG group
data<-data[which(data$Comparison == "CG-SRG"),]
# Double check some stuff
# How many effect sizes
dim(data)[1]
# 173
# How many studies
length(unique(data$Study))
# 67 Studies
# How many grazing organisms
length(unique(data$Stock.type))
# 4, which are
unique(data$Stock.type)
# Is there a common control issue
data$control_ID<-paste0(data$Study, "_", data$Common.control)
length(unique(data$control_ID))
length((data$control_ID))
# Yes some studies have the issue
# Which are they
check<-ddply(data, .(Study), summarise, n_controls=length(unique(control_ID)), n_effects=length(control_ID))
shared<-check[which(check$n_controls != check$n_effects),]
shared
dim(shared)[1] / dim(check)[1]
# 13.4% have a shared control issue to some degree
# Double check they really are shared control - should be no variance within shared control
check<-ddply(data, .(control_ID), summarise, length(CN), sd(CN, na.rm=T), sd(CM, na.rm=T))
check[which(check[,2] > 1),]
# Yep looks good
# Can also check number of effect sizes with shared control issue here
sum(check[which(check[,2] > 1),2]) / dim(data)[1]
# 13.9% of effect sizes
# How much missing SD data?
plot(is.na(data$CSD), is.na(data$TSD))
# If control is missing, so is treatment - always here
sum(is.na(data$CSD)) / dim(data)[1]
# 35.8% of effects sizes have missing sds
# Function to calculate the effect sizes the four different ways
# Originally Macdonald et al handle the missing SD data by calculating the mean CV (for each grazing type, CG-SRG and UG-SRG) for the present data, then use this with the known mean to impute the SD. Essentially impute as a function of the mean assuming a linear assocation between mean and SD. This is very similar to our method 1A, but without weighted average and not on CV^2
# Now calculate effect sizes
data<-cbind(data, my_calc_es(c_mean=data$CM, c_sd=data$CSD, c_n=data$CN, t_mean=data$TM, t_sd=data$TSD, t_n=data$TN, study_id=data$Study)[[1]])
data$ES_ID<-as.factor(seq(1, nrow(data), 1))
# Model 1A: MLMA of missing SD effect sizes using pooled CV2
MLMA1A<-rma.mv(yi = yi_1A, V = vi_1A, random=list(~1|Study, ~1|ES_ID), data=data)
summary(MLMA1A)
# Model 1B: MLMA of all effect sizes using pooled CV2
MLMA1B<-rma.mv(yi = yi_1B, V = vi_1B, random=list(~1|Study, ~1|ES_ID), data=data)
summary(MLMA1B)
# Model 2: weighted regression of all effect sizes using pooled CV2
# Note here and below I analyse here with effect sizes estimated as the mix of known CV and mean CV - doesn't have to be though
Vf<-my_calc_es(c_mean=data$CM, c_sd=data$CSD, c_n=data$CN, t_mean=data$TM, t_sd=data$TSD, t_n=data$TN, study_id=data$Study)[[2]]
data$ES_ID2<-rownames(Vf)
MLMA2<-rma.mv(yi = yi_1A, V = 0, random=list(~1|Study, ~1|ES_ID, ~1|ES_ID2), data=data, R=list(ES_ID2=Vf), Rscale=F)
summary(MLMA2)
# Model 3: add 0s in the matrix where SDs are not missing, and to vi where they are
missing<-which(is.na(data$yi_miss) == T)
diag(Vf)[-missing]<-0
data$vi_3<-data$vi_miss
data$vi_3[missing]<-0
MLMA3<-rma.mv(yi = yi_1A, V = vi_3, random=list(~1|Study, ~1|ES_ID, ~1|ES_ID2), data=data, R=list(ES_ID2=Vf), Rscale=F)
summary(MLMA3)
# Package up as table
results<-data.frame(method=c("1A", "1B", "2", "3"), b=c(MLMA1A$b, MLMA1B$b, MLMA2$b, MLMA3$b), ci.lb=c(MLMA1A$ci.lb, MLMA1B$ci.lb, MLMA2$ci.lb, MLMA3$ci.lb), ci.ub=c(MLMA1A$ci.ub, MLMA1B$ci.ub, MLMA2$ci.ub, MLMA3$ci.ub), tau2=c(sum(MLMA1A$sigma2), sum(MLMA1B$sigma2), sum(MLMA2$sigma2), sum(MLMA3$sigma2)))
write.table(results, file="Example.results.csv", sep=",", row.names=F, col.names=names(results))
|
#' GMLAbstractGeneralOperationParameter
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO GML abstract general operation parameter
#' @return Object of \code{\link{R6Class}} for modelling an GMLAbstractGeneralOperationParameter
#' @format \code{\link{R6Class}} object.
#'
#' @references
#' ISO 19136:2007 Geographic Information -- Geographic Markup Language.
#' http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=32554
#'
#' OGC Geography Markup Language. http://www.opengeospatial.org/standards/gml
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
GMLAbstractGeneralOperationParameter <- R6Class("GMLAbstractGeneralOperationParameter",
inherit = GMLDefinition,
private = list(
xmlElement = "AbstractGeneralOperationParameter",
xmlNamespacePrefix = "GML"
),
public = list(
#'@field minimumOccurs minimumOccurs [0..1]: integer
minimumOccurs = NULL,
#'@description Set minimum occurs
#'@param minimumOccurs object of class \link{integer}
setMinimumOccurs = function(minimumOccurs){
if(!is(minimumOccurs, "integer")){
minimumOccurs <- as.integer(minimumOccurs)
if(is.na(minimumOccurs)){
stop("The argument value should be an object of class 'integer'")
}
}
self$minimumOccurs <- GMLElement$create("minimumOccurs", value = minimumOccurs)
}
)
) | /R/GMLAbstractGeneralOperationParameter.R | no_license | cran/geometa | R | false | false | 1,494 | r | #' GMLAbstractGeneralOperationParameter
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO GML abstract general operation parameter
#' @return Object of \code{\link{R6Class}} for modelling an GMLAbstractGeneralOperationParameter
#' @format \code{\link{R6Class}} object.
#'
#' @references
#' ISO 19136:2007 Geographic Information -- Geographic Markup Language.
#' http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=32554
#'
#' OGC Geography Markup Language. http://www.opengeospatial.org/standards/gml
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
GMLAbstractGeneralOperationParameter <- R6Class("GMLAbstractGeneralOperationParameter",
inherit = GMLDefinition,
private = list(
xmlElement = "AbstractGeneralOperationParameter",
xmlNamespacePrefix = "GML"
),
public = list(
#'@field minimumOccurs minimumOccurs [0..1]: integer
minimumOccurs = NULL,
#'@description Set minimum occurs
#'@param minimumOccurs object of class \link{integer}
setMinimumOccurs = function(minimumOccurs){
if(!is(minimumOccurs, "integer")){
minimumOccurs <- as.integer(minimumOccurs)
if(is.na(minimumOccurs)){
stop("The argument value should be an object of class 'integer'")
}
}
self$minimumOccurs <- GMLElement$create("minimumOccurs", value = minimumOccurs)
}
)
) |
library(shiny)
library(leaflet)
library(tidyverse)
library(plotly)
source("read_data.R")
diffData <- dataDaily %>%
left_join((config %>% drop), by=c("MeterID", "Units")) %>%
filter(.$Units == "kWh") %>%
# arrange(-Time) %>%
group_by(MeterID) %>%
filter(CurrentValue > lag(CurrentValue)) %>%
filter(lead(CurrentValue) > CurrentValue) %>%
filter(CurrentValue > lag(CurrentValue)) %>%
filter(lead(CurrentValue) > CurrentValue) %>%
mutate(Diff = CurrentValue - lag(CurrentValue)) %>%
drop_na(Diff) %>%
filter(!(abs(Diff - median(Diff)) > 1.5*sd(Diff)))
shinyServer(function(input, output, session){
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(lng=config$Longitude, lat=config$Latitude, popup=config$BuildingName, layerId = config$BuildingID)
})
output$building_graph_ui <- renderPlot({
if (is.null(input$map_marker_click$id )) {
return()
}
# print("Rendering new graph")
data <- diffData %>% filter(BuildingID == input$map_marker_click$id)
# print(data)
ggplot(data, aes(x = Time, y = Diff, color = Description.x)) + geom_point()
# return(plotOutput())
#
# ggplotly(p)
})
}) | /server.R | no_license | nickgros/hackOHI.O | R | false | false | 1,240 | r | library(shiny)
library(leaflet)
library(tidyverse)
library(plotly)
source("read_data.R")
diffData <- dataDaily %>%
left_join((config %>% drop), by=c("MeterID", "Units")) %>%
filter(.$Units == "kWh") %>%
# arrange(-Time) %>%
group_by(MeterID) %>%
filter(CurrentValue > lag(CurrentValue)) %>%
filter(lead(CurrentValue) > CurrentValue) %>%
filter(CurrentValue > lag(CurrentValue)) %>%
filter(lead(CurrentValue) > CurrentValue) %>%
mutate(Diff = CurrentValue - lag(CurrentValue)) %>%
drop_na(Diff) %>%
filter(!(abs(Diff - median(Diff)) > 1.5*sd(Diff)))
shinyServer(function(input, output, session){
output$map <- renderLeaflet({
leaflet() %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addMarkers(lng=config$Longitude, lat=config$Latitude, popup=config$BuildingName, layerId = config$BuildingID)
})
output$building_graph_ui <- renderPlot({
if (is.null(input$map_marker_click$id )) {
return()
}
# print("Rendering new graph")
data <- diffData %>% filter(BuildingID == input$map_marker_click$id)
# print(data)
ggplot(data, aes(x = Time, y = Diff, color = Description.x)) + geom_point()
# return(plotOutput())
#
# ggplotly(p)
})
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.