content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(ape)
testtree <- read.tree("5873_8.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5873_8_unrooted.txt") | /codeml_files/newick_trees_processed/5873_8/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("5873_8.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5873_8_unrooted.txt") |
##### Template R code for mapping of woodland units from known points.
##### Colin Mahony, UBC Forestry, 778-288-4008, c_mahony@alumni.ubc.ca
##### November 4th, 2016
rm(list=ls()) #clean the workspace so all previous objects are deleted
#Download projects that are not currently downloaded and load packages
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
pkgs = c("scales","MASS", "stats", "rgl", "RColorBrewer", "FNN", "igraph", "raster", "maps"
, "maptools", "sp", "colorRamps", "rgeos", "rgdal", "foreign", "randomForest")
ipak(pkgs)
# get Parameters
arc.progress_label("Reading parameters...")
arc.progress_pos(0)
Workspace = in_params[[1]]
ClimateBC_CSV = in_params[[2]]
Training_Pts = in_params[[3]]
Output_Raster = out_params[[1]]
## need to create this folder and a "Results" and "InputData" folder in it.
setwd(Workspace)
### generate a data frame of analysis variables for the grid points.
grid.ref <- read.csv(ClimateBC_CSV, strip.white = TRUE, na.strings = c("NA","",-9999) )
nonCNA <- which(is.na(grid.ref[,6])) # dem cells outside climateNA extent. need this for later
arc.progress_label("Beginning ")
arc.progress_pos(0)
#######WILLS CODE FOR CLIMATE BC VARIABLES TO USE####
##Expects data with PlotNo, BGC, Lat, long, Elev as first 5 columns and ALL Variable output from ClimateWNA
####modify
colnames(X1)[1]=c("BGC")
colnames(X1)[2]=c("_")
records <- nrow(X1)
##X1$PlotNo <- as.integer(seq(from = 1, to = records, by =1))
attr(X1, "row.names") <- (X1$PlotNo)
X2 <- X1 [, c("PlotNo", "BGC", "Latitude", "Longitude", "Elevation")]
X1=X1[,-c(1,3:5)]
# Drop
X1$BGC <- as.factor(X1$BGC)
save(X1,file=paste(fname,".Rda",sep=""))
X1$CMDMax <- X1$CMD07
X1$PPTJune <- X1$PPT06
X1$CMDJune <- X1$CMD06
X1$TMaxJune <- X1$TMax06
X1$DD_18June <- X1$DD_18_06
X1$DD5May <- X1$DD5_05
X1$CMD.grow <- X1$CMD05 + X1$CMD06 +X1$CMD07 +X1$CMD08 +X1$CMD09
X1$PPT.dormant <- X1$PPT_at + X1$PPT_wt
X1$CMD.def <- 500 - (X1$PPT.dormant)
X1$CMD.def [X1$CMD.def < 0] <- 0
X1$CMD.total <- X1$CMD.def + X1$CMD
X1save = X1
#### Choose Biologically Interpretable Variables Only:
TEMP.list=c("Tmax_sp","Tmax_sm","Tmin_wt","Tmin_sp","Tmin_sm","Tmin_at","DD_0_sp", "DD_0_at",
"DD5_sp","DD5_sm","DD5_at","DD_18_sp","DD_18_sm","TMaxJune",
"MAT","MWMT","MCMT","DD5","EMT","EXT", "DD_18June", "DD5May")
PPT.list=c("PPTJune", "PPT_sp", "PPT_sm", "PPT_at", "PPT_wt" ,"MSP", "MAP","PAS", "PPT.dormant")
OTHER.list=c("CMD_sp","CMD_sm","CMDMax","AHM","SHM","NFFD","bFFP","FFP","CMD", "CMD.grow", "CMDJune", "CMD.def", "CMD.total")
ClimateVar=c(TEMP.list,PPT.list,OTHER.list)
List=c("BGC")
X1save = X1
#############Create final data set based on Options selected above
X1$BGC <- as.factor(X1$BGC)
X1=X1[,names(X1) %in% c(List,ClimateVar)]
save(X1,file=paste(fname,"_DataSet",".Rda",sep=""))
####################
#########END OF WILLS CODE
###########################
#select predictor variables
predictors <- names(grid.ref)[-grep("id|tude|Elev|MAR|RH|18",names(grid.ref))] #remove selected variables from the variable set (could keep elevation in... perhaps it will be a good predictor)
X.grid.ref <- grid.ref[-nonCNA,which(names(grid.ref)%in%predictors)] #data frame of analysis variables. removes NA values
sum(is.na(X.grid.ref)) #check if there are any NA values. the answer should be "0".
##log-transform zero-limited variables
zerolim <- grep("MAP|MSP|PAS|DD|CMD|FF|ref",names(X.grid.ref))
for(i in zerolim){X.grid.ref[which(X.grid.ref[,i]==0),i] <- 1} #set zero values to one, to facilitate log-transformation
X.grid.ref[,zerolim] <- log(X.grid.ref[,zerolim]) #log-transform
write.csv(X.grid.ref,"InputData\\X.grid.ref.csv", row.names=FALSE)
############
## Analysis
############
BGCv10.pts <- droplevels(BGCv10.pts[-nonCNA,]) #remove NA points and unused factor levels
BGC <- BGCv10.pts$MAP_LABEL
## create three classes: subalpine, parkland, and alpine
class <- rep(NA, length(BGC))
class[BGC%in%c("ESSFmcp", "ESSFmkp", "ESSFmvp", "ESSFunp", "ESSFwvp", "MHunp", "MHwhp")] <- "parkland"
class[grep("BAFA|CMA", BGC)] <- "alpine"
class[is.na(class)] <- "subalpine"
class <- factor(class)
# # map the three classes
# par(mfrow=c(1,1))
# par(mar=c(0,0,0,0))
# ColScheme <- c("dodgerblue", "yellow", "black")
X <- dem #uses dem as a template raster
values(X) <- NA
values(X)[land] <- class
write.GDAL(x,Output_Raster, TFW = YES)
# plot(X, col=ColScheme, xaxt="n", yaxt="n", legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# plot(X, col=ColScheme, xaxt="n", yaxt="n", xlim=c(-132, -127.5), ylim=c(56,58), legend=FALSE, legend.mar=0, maxpixels=ncell(X))
#
##################
#### Parkland classification and mapping Trial 1: balanced data
##################
#
# trial <- "FirstApprox"
# create a fake set of "known points" by subsampling the grid. in reality, these points will be provided by Will and Erica (and will also require a separte climateNA file)
training <- arc.open(Training_Pts)
table(class[training])
# classify zone based on plant community
rf <- randomForest(X.grid.ref[training,], class[training], strata=class[training], sampsize=rep(min(table(class[training])), length(levels(class[training])))) #train the RF model. the strata and sampsize arguments are used for tree-level downsampling to balance the training set
rf.pred <- predict(rf, X.grid.ref) #predict back to the whole grid.
ct <- table(group=class,class=rf.pred)
ClassCorrect <- diag(prop.table(ct, 1))
AllCorrect <- sum(diag(ct))/sum(ct)
#Unsure if need to write another raster
#
# png(filename=paste("Results\\WoodlandPrediction_",trial,".png",sep=""), type="cairo", units="in", width=12, height=6, pointsize=12, res=400)
# par(mfrow=c(1,2))
# par(mar=c(0,0,1.5,1))
# ColScheme <- c("dodgerblue", "yellow", "black")
#
# #BGCv10 map
# par(plt = c(0, 1, 0, 0.93), new = F)
# X <- dem
# values(X) <- NA
# values(X)[land][-nonCNA] <- class
# plot(X, col=ColScheme, xaxt="n", yaxt="n", legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# box(col="black", lwd=1.5)
# mtext("BGC mapping v10", 3, adj=0.5, padj=0, cex=1.1, line=0.2)
# legend(extent(X)[1]-0.25, 58, legend=paste(levels(class), ": n=", table(class[training]), " (", round(100*table(class[training])/table(class),0), "%)", sep=""),
# title=paste("Training sample: n=", sum(table(class[training])), sep="") ,
# fill=ColScheme, bg="white", col="lightgrey", box.col="white", cex=1.1, inset=0.01)
#inset zoom map
# xlim=c(-131, -128)
# ylim=c(56.25,57.76)
# rect(xlim[1], ylim[1], xlim[2], ylim[2], col=(alpha("lightgrey", 0.3)))
# par(plt = c(0.01, 0.54, 0.01, 0.5), new = TRUE)
# plot(X, xaxt="n", yaxt="n", xlim=xlim, ylim=ylim, col=ColScheme, legend=FALSE, legend.mar=0, maxpixels=ncell(X))
#
# par(plt = c(0, 1, 0, 0.93), new = F)
# values(X)[land][-nonCNA] <- rf.pred
# plot(X, col=ColScheme, xaxt="n", yaxt="n", legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# box(col="black", lwd=1.5)
# mtext("Random Forest Prediction", 3, adj=0.5, padj=0, cex=1.1, line=0.2)
# legend(extent(X)[1], 58.2, legend=paste(levels(class), ": ", as.integer((1-ClassCorrect)*100), "% error", sep=""),
# title=paste("Total error: ", round((1-AllCorrect)*100, 1), "%", sep="") ,
# cex=1.1, inset=0.01, bty="n")
# text(extent(X)[1], 56.5, paste("change in parkland area: ", round(100*(sum(rf.pred=="parkland")-sum(class=="parkland"))/sum(class=="parkland"),0), "%", sep=""), pos=4)
#inset zoom map
# xlim=c(-131, -128)
# ylim=c(56.25,57.76)
# rect(xlim[1], ylim[1], xlim[2], ylim[2], col=(alpha("lightgrey", 0.3)))
# par(plt = c(0.01, 0.54, 0.01, 0.5), new = TRUE)
# plot(X, xaxt="n", yaxt="n", xlim=xlim, ylim=ylim, col=ColScheme, legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# dev.off()
| /BEC_Automation_part2_Analysis.R | no_license | Brian-Hearnden/BEC_Automation | R | false | false | 8,083 | r | ##### Template R code for mapping of woodland units from known points.
##### Colin Mahony, UBC Forestry, 778-288-4008, c_mahony@alumni.ubc.ca
##### November 4th, 2016
rm(list=ls()) #clean the workspace so all previous objects are deleted
#Download projects that are not currently downloaded and load packages
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
pkgs = c("scales","MASS", "stats", "rgl", "RColorBrewer", "FNN", "igraph", "raster", "maps"
, "maptools", "sp", "colorRamps", "rgeos", "rgdal", "foreign", "randomForest")
ipak(pkgs)
# get Parameters
arc.progress_label("Reading parameters...")
arc.progress_pos(0)
Workspace = in_params[[1]]
ClimateBC_CSV = in_params[[2]]
Training_Pts = in_params[[3]]
Output_Raster = out_params[[1]]
## need to create this folder and a "Results" and "InputData" folder in it.
setwd(Workspace)
### generate a data frame of analysis variables for the grid points.
grid.ref <- read.csv(ClimateBC_CSV, strip.white = TRUE, na.strings = c("NA","",-9999) )
nonCNA <- which(is.na(grid.ref[,6])) # dem cells outside climateNA extent. need this for later
arc.progress_label("Beginning ")
arc.progress_pos(0)
#######WILLS CODE FOR CLIMATE BC VARIABLES TO USE####
##Expects data with PlotNo, BGC, Lat, long, Elev as first 5 columns and ALL Variable output from ClimateWNA
####modify
colnames(X1)[1]=c("BGC")
colnames(X1)[2]=c("_")
records <- nrow(X1)
##X1$PlotNo <- as.integer(seq(from = 1, to = records, by =1))
attr(X1, "row.names") <- (X1$PlotNo)
X2 <- X1 [, c("PlotNo", "BGC", "Latitude", "Longitude", "Elevation")]
X1=X1[,-c(1,3:5)]
# Drop
X1$BGC <- as.factor(X1$BGC)
save(X1,file=paste(fname,".Rda",sep=""))
X1$CMDMax <- X1$CMD07
X1$PPTJune <- X1$PPT06
X1$CMDJune <- X1$CMD06
X1$TMaxJune <- X1$TMax06
X1$DD_18June <- X1$DD_18_06
X1$DD5May <- X1$DD5_05
X1$CMD.grow <- X1$CMD05 + X1$CMD06 +X1$CMD07 +X1$CMD08 +X1$CMD09
X1$PPT.dormant <- X1$PPT_at + X1$PPT_wt
X1$CMD.def <- 500 - (X1$PPT.dormant)
X1$CMD.def [X1$CMD.def < 0] <- 0
X1$CMD.total <- X1$CMD.def + X1$CMD
X1save = X1
#### Choose Biologically Interpretable Variables Only:
TEMP.list=c("Tmax_sp","Tmax_sm","Tmin_wt","Tmin_sp","Tmin_sm","Tmin_at","DD_0_sp", "DD_0_at",
"DD5_sp","DD5_sm","DD5_at","DD_18_sp","DD_18_sm","TMaxJune",
"MAT","MWMT","MCMT","DD5","EMT","EXT", "DD_18June", "DD5May")
PPT.list=c("PPTJune", "PPT_sp", "PPT_sm", "PPT_at", "PPT_wt" ,"MSP", "MAP","PAS", "PPT.dormant")
OTHER.list=c("CMD_sp","CMD_sm","CMDMax","AHM","SHM","NFFD","bFFP","FFP","CMD", "CMD.grow", "CMDJune", "CMD.def", "CMD.total")
ClimateVar=c(TEMP.list,PPT.list,OTHER.list)
List=c("BGC")
X1save = X1
#############Create final data set based on Options selected above
X1$BGC <- as.factor(X1$BGC)
X1=X1[,names(X1) %in% c(List,ClimateVar)]
save(X1,file=paste(fname,"_DataSet",".Rda",sep=""))
####################
#########END OF WILLS CODE
###########################
#select predictor variables
predictors <- names(grid.ref)[-grep("id|tude|Elev|MAR|RH|18",names(grid.ref))] #remove selected variables from the variable set (could keep elevation in... perhaps it will be a good predictor)
X.grid.ref <- grid.ref[-nonCNA,which(names(grid.ref)%in%predictors)] #data frame of analysis variables. removes NA values
sum(is.na(X.grid.ref)) #check if there are any NA values. the answer should be "0".
##log-transform zero-limited variables
zerolim <- grep("MAP|MSP|PAS|DD|CMD|FF|ref",names(X.grid.ref))
for(i in zerolim){X.grid.ref[which(X.grid.ref[,i]==0),i] <- 1} #set zero values to one, to facilitate log-transformation
X.grid.ref[,zerolim] <- log(X.grid.ref[,zerolim]) #log-transform
write.csv(X.grid.ref,"InputData\\X.grid.ref.csv", row.names=FALSE)
############
## Analysis
############
BGCv10.pts <- droplevels(BGCv10.pts[-nonCNA,]) #remove NA points and unused factor levels
BGC <- BGCv10.pts$MAP_LABEL
## create three classes: subalpine, parkland, and alpine
class <- rep(NA, length(BGC))
class[BGC%in%c("ESSFmcp", "ESSFmkp", "ESSFmvp", "ESSFunp", "ESSFwvp", "MHunp", "MHwhp")] <- "parkland"
class[grep("BAFA|CMA", BGC)] <- "alpine"
class[is.na(class)] <- "subalpine"
class <- factor(class)
# # map the three classes
# par(mfrow=c(1,1))
# par(mar=c(0,0,0,0))
# ColScheme <- c("dodgerblue", "yellow", "black")
X <- dem #uses dem as a template raster
values(X) <- NA
values(X)[land] <- class
write.GDAL(x,Output_Raster, TFW = YES)
# plot(X, col=ColScheme, xaxt="n", yaxt="n", legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# plot(X, col=ColScheme, xaxt="n", yaxt="n", xlim=c(-132, -127.5), ylim=c(56,58), legend=FALSE, legend.mar=0, maxpixels=ncell(X))
#
##################
#### Parkland classification and mapping Trial 1: balanced data
##################
#
# trial <- "FirstApprox"
# create a fake set of "known points" by subsampling the grid. in reality, these points will be provided by Will and Erica (and will also require a separte climateNA file)
training <- arc.open(Training_Pts)
table(class[training])
# classify zone based on plant community
rf <- randomForest(X.grid.ref[training,], class[training], strata=class[training], sampsize=rep(min(table(class[training])), length(levels(class[training])))) #train the RF model. the strata and sampsize arguments are used for tree-level downsampling to balance the training set
rf.pred <- predict(rf, X.grid.ref) #predict back to the whole grid.
ct <- table(group=class,class=rf.pred)
ClassCorrect <- diag(prop.table(ct, 1))
AllCorrect <- sum(diag(ct))/sum(ct)
#Unsure if need to write another raster
#
# png(filename=paste("Results\\WoodlandPrediction_",trial,".png",sep=""), type="cairo", units="in", width=12, height=6, pointsize=12, res=400)
# par(mfrow=c(1,2))
# par(mar=c(0,0,1.5,1))
# ColScheme <- c("dodgerblue", "yellow", "black")
#
# #BGCv10 map
# par(plt = c(0, 1, 0, 0.93), new = F)
# X <- dem
# values(X) <- NA
# values(X)[land][-nonCNA] <- class
# plot(X, col=ColScheme, xaxt="n", yaxt="n", legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# box(col="black", lwd=1.5)
# mtext("BGC mapping v10", 3, adj=0.5, padj=0, cex=1.1, line=0.2)
# legend(extent(X)[1]-0.25, 58, legend=paste(levels(class), ": n=", table(class[training]), " (", round(100*table(class[training])/table(class),0), "%)", sep=""),
# title=paste("Training sample: n=", sum(table(class[training])), sep="") ,
# fill=ColScheme, bg="white", col="lightgrey", box.col="white", cex=1.1, inset=0.01)
#inset zoom map
# xlim=c(-131, -128)
# ylim=c(56.25,57.76)
# rect(xlim[1], ylim[1], xlim[2], ylim[2], col=(alpha("lightgrey", 0.3)))
# par(plt = c(0.01, 0.54, 0.01, 0.5), new = TRUE)
# plot(X, xaxt="n", yaxt="n", xlim=xlim, ylim=ylim, col=ColScheme, legend=FALSE, legend.mar=0, maxpixels=ncell(X))
#
# par(plt = c(0, 1, 0, 0.93), new = F)
# values(X)[land][-nonCNA] <- rf.pred
# plot(X, col=ColScheme, xaxt="n", yaxt="n", legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# box(col="black", lwd=1.5)
# mtext("Random Forest Prediction", 3, adj=0.5, padj=0, cex=1.1, line=0.2)
# legend(extent(X)[1], 58.2, legend=paste(levels(class), ": ", as.integer((1-ClassCorrect)*100), "% error", sep=""),
# title=paste("Total error: ", round((1-AllCorrect)*100, 1), "%", sep="") ,
# cex=1.1, inset=0.01, bty="n")
# text(extent(X)[1], 56.5, paste("change in parkland area: ", round(100*(sum(rf.pred=="parkland")-sum(class=="parkland"))/sum(class=="parkland"),0), "%", sep=""), pos=4)
#inset zoom map
# xlim=c(-131, -128)
# ylim=c(56.25,57.76)
# rect(xlim[1], ylim[1], xlim[2], ylim[2], col=(alpha("lightgrey", 0.3)))
# par(plt = c(0.01, 0.54, 0.01, 0.5), new = TRUE)
# plot(X, xaxt="n", yaxt="n", xlim=xlim, ylim=ylim, col=ColScheme, legend=FALSE, legend.mar=0, maxpixels=ncell(X))
# dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_context_freq.R
\name{get_context_freq}
\alias{get_context_freq}
\title{Extract occurence of tri-nucleotide contexts}
\usage{
get_context_freq(genome, region = NULL)
}
\arguments{
\item{genome}{a BSgenome object}
\item{region}{a GRanges object, path, URL, connection or BEDFile object.}
}
\value{
matrix containing the frequencies of the trinucleotide contexts
}
\description{
Extracts the frequencies of the tri-nucleotide contexts in a given region
of the genome. These frequencies are needed to normalize a mutational
catalogue. The output can be input to normalize().
}
\examples{
gr<-GenomicRanges::GRanges(seqnames=c("chr1"),
ranges=IRanges::IRanges(start=c(100000),end=c(1000000)),
strand=c("+"))
get_context_freq(BSgenome.Hsapiens.UCSC.hg19::BSgenome.Hsapiens.UCSC.hg19, gr)
get_context_freq(BSgenome.Hsapiens.UCSC.hg19::BSgenome.Hsapiens.UCSC.hg19)
\dontrun{
get_context_freq(BSgenome.Hsapiens.UCSC.hg19::BSgenome.Hsapiens.UCSC.hg19, 'example.bed')
}
}
| /man/get_context_freq.Rd | no_license | bihealth/SigsPack | R | false | true | 1,068 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_context_freq.R
\name{get_context_freq}
\alias{get_context_freq}
\title{Extract occurence of tri-nucleotide contexts}
\usage{
get_context_freq(genome, region = NULL)
}
\arguments{
\item{genome}{a BSgenome object}
\item{region}{a GRanges object, path, URL, connection or BEDFile object.}
}
\value{
matrix containing the frequencies of the trinucleotide contexts
}
\description{
Extracts the frequencies of the tri-nucleotide contexts in a given region
of the genome. These frequencies are needed to normalize a mutational
catalogue. The output can be input to normalize().
}
\examples{
gr<-GenomicRanges::GRanges(seqnames=c("chr1"),
ranges=IRanges::IRanges(start=c(100000),end=c(1000000)),
strand=c("+"))
get_context_freq(BSgenome.Hsapiens.UCSC.hg19::BSgenome.Hsapiens.UCSC.hg19, gr)
get_context_freq(BSgenome.Hsapiens.UCSC.hg19::BSgenome.Hsapiens.UCSC.hg19)
\dontrun{
get_context_freq(BSgenome.Hsapiens.UCSC.hg19::BSgenome.Hsapiens.UCSC.hg19, 'example.bed')
}
}
|
#############################################################
# #
# ♥ FAIRE DES CARTES AVEC R ♥ #
# APPLICATION A LA FRONTIERE ETATS-UNIS / MEXIQUE #
# NICOLAS LAMBERT, RONAN YSEBAERT, UMS RIATE, NOV.2019 #
# #
#############################################################
# Ce programme exécute l'ensemble des représentations graphiques de la
# présentation
authors <- "N. Lambert & R. Ysebaert, 2019\nData source: IOM, Didelon, Vandermotten, Dessouroux, (c) OpenStreetMap contributors, 2019"
######################################################################
# 0 Préparation des données et création des modèles cartographiques
#####################################################################
# Appel des librairies ------
library("sf")
library("rnaturalearth")
library("geojsonsf")
library("cartography")
library("cartogram")
library("leaflet")
library("SpatialPosition")
library("units")
library("OECD")
library("ggplot2")
library("ggthemes")
library("osmdata")
library("htmlwidgets")
library("animation")
# Import des géométries ------
# Pays - Natural Earth
countries <- ne_countries(scale = 50, type = "countries", continent = NULL,
country = NULL, geounit = NULL, sovereignty = NULL,
returnclass = "sf")
countries <- countries[countries$adm0_a3 %in% c("MEX","USA"),]
# Rivières - Natural Earth
rivers <- ne_download(scale = 50, type = "rivers_lake_centerlines",
category = "physical", returnclass = "sf")
# Trait de côte - Natural Earth
coastline <- ne_download(scale = 50, type = "coastline",
category = "physical", returnclass = "sf")
# Océans - Natural Earth
ocean <- ne_download(scale = 50, type = "ocean", category = "physical",
returnclass = "sf")
# -- Source : Cartographier le monde à l'échelle infranationale (CIST) --
subregions <- st_read(dsn = "data/regions/mex_us_admin.shp" ,
options = "ENCODING=UTF-8", stringsAsFactors = FALSE)
# -- Source : data.world (https://data.world/carlvlewis/border-fence-boundaries-u-s-mexico)
# Mur de séparation
fences <- geojson_sf("data/data.world/border-fence.geojson")
# Mise en forme des géométries et création d'un template [Projection Albers] ------
# Choix de la projection
albers <- "+proj=aea +lat_1=14.5 +lat_2=32.5 +lat_0=24 +lon_0=-105 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
countries_aea <- st_transform(countries,crs = albers)
fences_aea <- st_transform(fences,crs = albers)
rivers_aea <- st_transform(rivers,crs = albers)
coastline_aea <- st_transform(coastline,crs = albers)
ocean_aea <- st_transform(ocean,crs = albers)
subregions_aea <- st_transform(subregions,crs = albers)
subregions_all <- st_transform(subregions,crs = albers)
# Choix de l'emprise (étape un peu manuelle pour optimiser l'emprise du modèle carto pour l'export png)
bb_aea <- c(-1342784.0, -739750.5, 793341.2, 1317849.8)
d <- 100000
bbox_aea <- st_as_sfc(st_bbox(c(xmin = bb_aea[1]-2*d , xmax = bb_aea[3]+2*d,
ymin = bb_aea[4]+d, ymax = bb_aea[2]-d),
crs = albers))
# Intersection du fond régional avec la bbox
subregions_aea <- st_intersection(x = subregions_aea, st_geometry(bbox_aea))
coastline_aea <- st_intersection(x = coastline_aea, y = bbox_aea)
rivers_aea <- st_intersection(x = rivers_aea, y = bbox_aea)
countries_aea <- st_intersection(x = countries_aea, y = bbox_aea)
# création du template
lay_aea <- function(title = ""){
par(mar = c(0,0,1.2,0))
plot(st_geometry(ocean_aea), col= "#b8d5e3", border = NA, xlim = bb_aea[c(1,3)],
ylim = bb_aea[c(2,4)])
plot(st_geometry(subregions_aea) + c(-10000, -10000), col ="#827e6c50",
border = NA, add = T)
plot(st_geometry(subregions_aea), col= "#ede6bb", border = "white",
cex = 0.5, add=T)
plot(st_geometry(coastline_aea), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(rivers_aea), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(fences_aea), col= "#3d3c3c",lwd = 3 ,add= T)
layoutLayer(title = title,
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
}
sizes_aea <- getFigDim(x = bbox_aea, width = 1500,mar = c(0,0,1.2,0), res = 150)
png("img/fig01.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
lay_aea("Template cartographique 1 (projection Albers)")
dev.off()
# Mise en forme des géométries et création d'un template [Projection Orthographique] ------
# Choix de la projection
ortho <- "+proj=ortho +lat_0=-35 +lon_0=-104 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs"
countries_ortho <- st_transform(countries,crs = ortho)
fences_ortho <- st_transform(fences,crs = ortho)
rivers_ortho <- st_transform(rivers,crs = ortho)
coastline_ortho <- st_transform(coastline,crs = ortho)
ocean_ortho <- st_transform(ocean,crs = ortho)
subregions_ortho <- st_transform(subregions,crs = ortho)
# Choix de l'emprise (étape un peu manuelle pour optimiser l'emprise du modèle carto pour l'export png)
bb_ortho <- c(-1668000, 5100000, 934863, 5900000)
d <- 100000
bbox_ortho <- st_as_sfc(st_bbox(c(xmin = bb_ortho[1]-2*d , xmax = bb_ortho[3]+2*d,
ymin = bb_ortho[4]+3.5*d, ymax = bb_ortho[2]-3.5*d),
crs = ortho))
# Intersection du fond régional avec la bbox
subregions_ortho <- st_intersection(x = subregions_ortho, st_geometry(bbox_ortho))
rivers_ortho <- st_intersection(x = rivers_ortho, y = bbox_ortho)
coastline_ortho <- st_intersection(x = coastline_ortho, y = bbox_ortho)
# création du template
lay_ortho <- function(title = ""){
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
plot(st_geometry(subregions_ortho) + c(-10000, -10000), col ="#827e6c50", border = NA, add = T)
plot(st_geometry(subregions_ortho), col= "#ede6bb", border = "white", cex = 0.5, add=T)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(rivers_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 3 ,add= T)
layoutLayer(title = title,
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
}
sizes_ortho <- getFigDim(x = bbox_ortho, width = 1500,mar = c(0,0,1.2,0), res = 150)
png("img/fig02.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Template cartographique 2 (projection orthographique)")
dev.off()
# Template bis avec les murs en 2.5D ------
lay_ortho2 <- function(title = ""){
authors <- "N. Lambert & R. Ysebaert, 2019\nData source: IOM, Didelon, Vandermotten, Dessouroux, (c) OpenStreetMap contributors, 2019"
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
plot(st_geometry(subregions_ortho) + c(-10000, -10000), col ="#827e6c50", border = NA, add = T)
plot(st_geometry(subregions_ortho), col= "#ede6bb", border = "white", cex = 0.5, add=T)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(rivers_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 2 ,add= T)
line <- st_geometry(fences_ortho)
for (i in 1:20){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#565b6380",lwd = 2 ,add= T)
}
plot(st_geometry(line), col= "#3d3c3c",lwd = 2 ,add= T)
layoutLayer(title = title,
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
}
sizes_ortho <- getFigDim(x = bbox_ortho, width = 1500,mar = c(0,0,1.2,0), res = 150)
png("img/fig03.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho2("Template cartographique 2 (projection orthographique)")
dev.off()
# Import des données socio-éco et jointure ------
pib <- read.csv("data/regions/PIB.csv", sep = "\t",encoding = "UTF-8", dec = ",",
stringsAsFactors=FALSE)
pop <- read.csv("data/regions/POP.csv", sep = "\t", encoding = "UTF-8", dec = ",",
stringsAsFactors=FALSE)
subregions_aea <- merge (x = subregions_aea, y = pib,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_aea <- merge (x = subregions_aea, y = pop,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_ortho <- merge (x = subregions_ortho, y = pib,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_ortho <- merge (x = subregions_ortho, y = pop,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_all <- merge (x = subregions_all, y = pib,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_all <- merge (x = subregions_all, y = pop,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
##############################################################
# 1 - Statistiques comparées en utilisant des données OCDE
###############################################################
# PIB par habitant ------
# Télécharger les données de la table PDB_LV pour USA, Mexique, pays de l'OCDE
df <- get_dataset(dataset = "PDB_LV",
filter = list(c("MEX", "USA","OECD"), "T_GDPPOP", "CPC"))
# Transformer la date au format numérique
df$obsTime <- as.numeric(df$obsTime)
# Représentation graphique
png("img/fig05.png", width = 1500, height = 1000, res = 150)
ggplot(data = df, aes(x = obsTime, y = obsValue, color = LOCATION)) +
geom_line(size = 1) +
labs(x = NULL, y = "Dollars, prix courant", color = NULL,
title = "Évolution comparée du PIB par habitant (Mexique - USA - OCDE)") +
theme_hc() +
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))
dev.off()
# Moins de 20 ans ------
df <- as.data.frame(get_dataset(dataset = "POP_PROJ",
filter = list(c("MEX","USA","OECD"),"TT","D1TTR5Y4")))
df$obsTime <- as.numeric(df$obsTime)
# Représentation graphique
png("img/fig06.png", width = 1500, height = 1000, res = 150)
ggplot(data = df, aes(x = obsTime, y = obsValue, color = LOCATION)) +
geom_line(size = 1) +
labs(x = NULL, y = "Part de la population totale (%)", color = NULL,
title = "Évolution comparée des moins de 20 ans (Mexique - USA - OCDE)") +
theme_hc() +
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))
dev.off()
################################################
# 2 - Cartographie des ruptures spatiales
################################################
# Vieillisement démographique (version 1) ------
png("img/fig07.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_aea), col= "#b8d5e3", border = NA, xlim = bb_aea[c(1,3)], ylim = bb_aea[c(2,4)])
choroLayer(x = subregions_aea, var = "POP65_POP15",
breaks = c(min(subregions_aea$POP65_POP15, na.rm = T),
20,25,35,50,65, max(subregions_aea$POP65_POP15, na.rm = T)),
col = carto.pal(pal1 = "green.pal", n1 = 3, pal2 = "red.pal", n2 = 3),
legend.pos = c(-1400000 , -600000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "Rapport entre la population âgée de plus de 65 ans\net la population âgée de moins de 15 ans\nen 2015 (%)",
border = NA, add = TRUE)
subregions.borders <- getBorders(subregions_aea)
discLayer(x = subregions.borders, df = subregions_aea,
var = "POP65_POP15", col="black", nclass=3,
method="equal", threshold = 0.3, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités sur l'indice de veillissement 2015\n(différences absolues)",
legend.pos = c(-1400000 , -300000), legend.title.cex = 0.7, legend.values.cex = 0.5,
add = TRUE)
plot(st_geometry(coastline_aea), col= "#6d9cb3",lwd = 1 ,add= T)
layoutLayer(title = "Une barrière démographique...",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# Vieillisement démographique (version 2) ------
png("img/fig08.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "POP65_POP15",
breaks = c(min(subregions_ortho$POP65_POP15, na.rm = T),
20,25,35,50,65, max(subregions_ortho$POP65_POP15, na.rm = T)),
col = carto.pal(pal1 = "green.pal", n1 = 3, pal2 = "red.pal", n2 = 3),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "Rapport entre la population âgée de plus de 65 ans\net la population âgée de moins de 15 ans\nen 2015 (%)",
border = NA, add = TRUE)
subregions.borders <- getBorders(subregions_ortho)
discontinuities <- discLayer(x = subregions.borders, df = subregions_ortho,
var = "POP65_POP15", col = "black", nclass=3,
method = "equal", threshold = 0.3, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités sur l'indice\nde veillissement 2015\n(différences\nabsolues)",
legend.pos = c(-1700000, 5300000), legend.title.cex = 0.7,
legend.values.cex = 0.5, add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
layoutLayer(title = "Une barrière démographique...",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# Vieillisement démographique (version 3) ------
threshold <- 0.3
minvar <- as.numeric(quantile(discontinuities$disc, probs = c(1 - threshold)))
discontinuities <- discontinuities[discontinuities$disc >= minvar,]
discontinuities$height <- round(discontinuities$disc / 2,0)
extrude <- function(id){
line <- st_geometry(discontinuities[id,])
plot(line, col = "black",lwd = 2 ,add = TRUE)
nb <- as.numeric(discontinuities[id,"height"])[1]
for (j in 1:nb){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#ebd23490",lwd = 2 ,add = TRUE)
}
plot(line, col= "black",lwd = 2 ,add = TRUE)
}
png("img/fig09.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "POP65_POP15",
breaks = c(min(subregions_ortho$POP65_POP15, na.rm = T),
20,25,35,50,65, max(subregions_ortho$POP65_POP15, na.rm = T)),
col = carto.pal(pal1 = "green.pal", n1 = 3, pal2 = "red.pal", n2 = 3),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "Rapport entre la population âgée de plus de 65 ans\net la population âgée de moins de 15 ans\nen 2015 (%)",
border = NA, add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
for (i in 1:length(discontinuities$disc)) {
extrude(i)
}
legtxt <- "Sur cette carte, la hauteur\ndes barrières est proportionnelle\nà la valeur des discontinuités\nabsolues sur l'indice du\nvieillissement en 2015."
text(-1700000, y = 5400000, legtxt , cex = 0.9, pos = 4, font = 2)
layoutLayer(title = "Une barrière démographique...",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# PIB par habitant (version 1) ------
png("img/fig10.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_aea), col= "#b8d5e3", border = NA, xlim = bb_aea[c(1,3)], ylim = bb_aea[c(2,4)])
choroLayer(x = subregions_aea, var = "PIB100_2017",
breaks = c(min(subregions_aea$PIB100_2017, na.rm = T),
75,90,100,125,150,200, max(subregions_aea$PIB100_2017, na.rm = T)),
col = carto.pal(pal1 = "red.pal", n1 = 3, pal2 = "green.pal", n2 = 4),
legend.pos = c(-1400000 , -600000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "PIB par habitant 2017\n(100 = moyenne mondiale)",
border = NA,
add = TRUE)
plot(st_geometry(coastline_aea), col= "#6d9cb3",lwd = 1 ,add= T)
subregions.borders <- getBorders(subregions_aea)
discLayer(x = subregions.borders, df = subregions_aea,
var = "PIB100_2017", col="black", nclass=3,
method="equal", threshold = 0.2, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités de PIB par habitant 2017\n(différences absolues)",
legend.pos = c(-1400000 , -300000), legend.title.cex = 0.7, legend.values.cex = 0.5,
add = TRUE)
layoutLayer(title = "Doublée d'un mur de richesse... Mais quelles conséquences ?",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# PIB par habitant (version 2) ------
png("img/fig11.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "PIB100_2017",
breaks = c(min(subregions_ortho$PIB100_2017, na.rm = T),
75,90,100,125,150,200, max(subregions_ortho$PIB100_2017, na.rm = T)),
col = carto.pal(pal1 = "red.pal", n1 = 3, pal2 = "green.pal", n2 = 4),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "PIB par habitant 2017\n(100 = moyenne mondiale)",
border = NA,
add = TRUE)
subregions.borders <- getBorders(subregions_ortho)
discontinuities <- discLayer(x = subregions.borders, df = subregions_aea,
var = "PIB100_2017", col="black", nclass=3,
method="equal", threshold = 0.2, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités de PIB par habitant 2017\n(différences absolues)",
legend.pos = c(-1700000, 5300000), legend.title.cex = 0.7, legend.values.cex = 0.5,
add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
layoutLayer(title = "Doublée d'un mur de richesse... Mais quelles conséquences ?",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# PIB par habitant (version 3) ------
threshold <- 0.3
minvar <- as.numeric(quantile(discontinuities$disc, probs = c(1 - threshold)))
discontinuities <- discontinuities[discontinuities$disc >= minvar,]
discontinuities$height <- round(discontinuities$disc / 8,0)
extrude <- function(id){
line <- st_geometry(discontinuities[id,])
plot(line, col= "black",lwd = 2 ,add= T)
nb <- as.numeric(discontinuities[id,"height"])[1]
for (j in 1:nb){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#ebd23490",lwd = 2 ,add= T)
}
plot(line, col= "black",lwd = 2 ,add= T)
}
extrude <- function(id){
line <- st_geometry(discontinuities[id,])
plot(line, col= "black",lwd = 2 ,add= T)
nb <- as.numeric(discontinuities[id,"height"])[1]
for (j in 1:nb){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#ebd23490",lwd = 2 ,add= T)
}
plot(line, col= "black",lwd = 2 ,add= T)
}
png("img/fig12.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "PIB100_2017",
breaks = c(min(subregions_ortho$PIB100_2017, na.rm = T),
75,90,100,125,150,200, max(subregions_ortho$PIB100_2017, na.rm = T)),
col = carto.pal(pal1 = "red.pal", n1 = 3, pal2 = "green.pal", n2 = 4),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "PIB par habitant 2017\n(100 = moyenne mondiale)",
border = NA,
add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
for (i in 1:length(discontinuities$disc))
{
extrude(i)
}
legtxt <- "Sur cette carte, la hauteur\ndes barrières est proportionnelle\nà la valeur des discontinuités\nabsolues sur le PIB par habitant\nen 2017."
text(-1700000, y = 5400000, legtxt , cex = 0.9, pos = 4, font = 2)
layoutLayer(title = "Doublée d'un mur de richesse... Mais quelles conséquences ?",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
############
# 3 - Border Control - Visualisation des postes de contrôle
#############
# Extraction des données ------
# Convertir la bounding box en WGS 84
bbox <- st_transform(bbox_aea, 4326)
# Définir la requête (clé/valeur OSM sur bounding box)
opqbox <- opq(bbox = bbox , timeout = 5000)
opquery <- add_osm_feature(opq = opqbox, key = "barrier", value = "border_control")
feat <- osmdata_sf(opquery)
# Extraire les points qui répondent à la requête
featpt <- st_transform(feat$osm_points, albers)
featpt <- featpt[featpt[["barrier"]] %in% "border_control", ]
# Extraire les polygones qui répondent à la requête
featpo <- st_transform(feat$osm_polygons, albers)
st_geometry(featpo) <- st_centroid(st_geometry(featpo))
featpo$osm_id <- row.names(featpo)
# Combiner points et polygones, les intersecter avec la bounding box
featpt <- rbind(featpt[, c("osm_id", "geometry")], featpo[, c("osm_id", "geometry")])
poi_osm <- st_intersection(x = featpt, st_geometry(subregions_aea))
# Créer une grille sur l'espace d'étude
grid <- st_make_grid(subregions_aea, cellsize = 50000)
grid <- st_sf(grid)
# Compter le nombre de postes de police par points de grille
grid$ncops <- lengths(st_covers(grid, poi_osm))
# Calcul de la part du total sur l'espace d'étude
grid$dens <- (grid$ncops / sum(grid$ncops)) * 100
grid <- grid[grid$ncops != 0, ]
# Visualisation 1 (points) ------
png("img/fig13.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
par(mar = c(0,0,1.2,0))
lay_aea(paste0("Au moins ", sum(grid$ncops)," postes frontières..."))
plot(st_geometry(poi_osm), bg = "red", col = NA, pch = 21, cex = 0.8, add = TRUE)
legtxt <- "Chaque point rouge\nreprésente un poste\nfrontalier recensé\ndans OpenStreetMap."
text(-1400000, y = -100000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Visualisation 2 (figurés proportionnels) ------
png("img/fig14.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
lay_aea("Les hot-spots du contrôle frontalier - dénombrement")
propSymbolsLayer(grid, var = "ncops", col = "red", symbols = "square", add = T,
legend.pos = "left",
legend.title.cex = 0.7, legend.values.cex = 0.6,
legend.title.txt = "Nombre de postes frontière\n(zones de 50km²)")
dev.off()
# Visualisation 3 (Grille) ------
png("img/fig15.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
lay_aea("Les hot-spots du contrôle frontalier - Part du total")
choroLayer(x = grid, var = "dens",
breaks = c(min(grid$dens), 1, 2, 5, 10, max(grid$dens)),
col = carto.pal(pal1 = "brown.pal", n1 = 5),
legend.pos = "left", legend.title.cex = 0.7, legend.values.cex = 0.6,
legend.title.txt = "Part des postes frontaliers\nde l'espace d'étude (%)",
border = NA, add = TRUE)
plot(st_geometry(fences_aea), col= "#3d3c3c",lwd = 3 ,add= T)
dev.off()
# Visualisation 4 (Tours de contrôle) ------
# On affine la résolution des grilles (20 km)
grid <- st_make_grid(subregions_ortho, cellsize = 20000)
grid <- st_sf(grid)
poi_osm <- st_transform(poi_osm,ortho)
grid$ncops <- lengths(st_covers(grid, poi_osm))
grid <- grid[grid$ncops>0,]
png("img/fig16.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho(title = "Sacrées tours de contrôle !")
propSymbolsLayer(x = grid, var = "ncops", col = "darkred",
symbols = "bar",
inches = 1.3,
border = "white", lwd = 1, legend.pos = c(-1700000, 5000000),
legend.title.txt = "Nombre de postes frontières\n(zones de 20 km²)",
legend.style = "e")
line <- st_geometry(fences_ortho)
for (i in 1:15){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#565b6380",lwd = 2 ,add= T)
}
plot(st_geometry(line), col= "#3d3c3c",lwd = 2 ,add= T)
dev.off()
###########################
# 4 - Morts aux frontières
###########################
# Import et mise en forme des données ------
# Import du fichier brut (OIM)
iom <- read.csv("data/iom/MissingMigrants-Global-2019-10-29T14-11-50.csv", stringsAsFactors = F)
# Gestion des coordonnées
iom <- iom[(iom$Location.Coordinates)!="",]
latlon <- matrix(as.numeric(unlist(strsplit(iom$Location.Coordinates, split = ", "))), ncol = 2, byrow = T)
colnames(latlon) <- c("lat", 'lon')
iom <- cbind(iom, latlon)
# Sélection et renommage des variables, conversion au format numérique des champs
iom <- iom[,c("Web.ID","Reported.Year","Total.Dead.and.Missing","Number.of.Survivors","Region.of.Incident","lat","lon")]
colnames(iom) <- c("id","year","deads","survivors","region","latitude","longitude")
iom$deads <- as.numeric(iom$deads)
iom <- iom[!is.na(iom$deads),]
iom$latitude <- as.numeric(iom$latitude)
iom$longitude <- as.numeric(iom$longitude)
# Conversion en objet sf et reprojection
iom_sf <- st_as_sf(iom, coords = c("longitude", "latitude"), crs = 4326, agr = "constant")
iom_aea <- st_transform(iom_sf,crs = albers)
iom_ortho <- st_transform(iom_sf,crs = ortho)
# Découpage
iom_ortho <- st_intersection(x = iom_ortho, st_geometry(bbox_ortho))
iom_aea <- st_intersection(x = iom_aea, st_geometry(bbox_aea))
# Comparaisons régionales ------
png("img/fig17.png", width = 1500, height = 1000, res = 150)
par(mar=c(8,8,4,2))
# Agréger pae zone géographique
med <- aggregate(iom_sf$deads,list(iom_sf$region), sum, simplify = TRUE )
# Sélection des colonnes utiles, arrondis
colnames(med) <- c("region","nb")
total <- round(sum(med$nb),-2)
# Ordonner nombre de morts par zone géographique
med <- med[order(med$nb,decreasing = TRUE),]
# Gestion des couleurs
cols <- rep("#ffbaba",length(med$region))
cols[c(3)] <- "red"
# Barplot
barplot(med$nb, ylab = "Nombre de personnes", names.arg = med$region, las = 2,
border="#991313",col = cols, cex.names = 0.8, cex.axis = 0.8)
dev.off()
# Evolution temporelle------
# Extraction de la zone USA-Mexique
iom_sf <- iom_sf[iom_sf$region =="US-Mexico Border",]
png("img/fig18.png", width = 1500, height = 1000, res = 150)
par(mar=c(5,8,4,2))
# Agréger par années
med <- aggregate(iom_sf$deads,list(iom_sf$year), sum, simplify = TRUE )
# Gestion des labels, arrondis
colnames(med) <- c("year","nb")
total <- round(sum(med$nb),-1)
# Un petit label spécial pour 2019 (année en cours)
med[med$year==2019,"year"] <- "2019*"
# Barplot
barplot(med$nb, xlab=paste0("Total sur la période: ",total,"\n(*) Du 1er janvier au 29 octobre 2019"),
ylab="Nombre de personnes", names.arg=med$year,
border="#991313",col=c("red","red","red","red","red","#ffbaba"),
cex.names = 0.8, cex.axis = 0.8)
dev.off()
# Carte de localisation ------
png("img/fig19.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
plot(st_geometry(iom_ortho), pch=20, col= "#eb3850", cex = 0.5, add=T)
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 3 ,add= T)
legtxt <- "Sur cette carte, chaque point\ncorrespond à un évenement\nayant donné la mort d'au\nmoins une personne\nsur la période 2014 - 2019"
text(-1700000, y = 5200000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Localisation et nombre de personnes ------
png("img/fig20.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
propSymbolsLayer(x = iom_ortho, var = "deads",
symbols = "circle", col = "#eb3850",
legend.pos = "left", border = "#ede6bb", lwd = 0.5,
legend.title.txt = "Nombre de morts\net portés disparus\nsur la période\n2014 - 2019",
legend.style = "e")
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 3 ,add= T)
dev.off()
# Cartogramme de Dorling ------
iom_ortho$m_weight <- 1
iom_ortho$m_weight[iom_ortho$deads > 1] <- 0.5
iom_ortho$m_weight[iom_ortho$deads >= 25] <- 0
deathsdor <- cartogram_dorling(x = st_jitter(iom_ortho),
weight = "deads",
m_weight = iom_ortho$m_weight, k = .4)
png("img/fig21.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
plot(st_geometry(deathsdor), pch=20, col= "#eb3850", border ="#ede6bb", cex = 0.1, add=T)
plot(st_geometry(fences_ortho), col= "#3d3c3c", lwd = 3 ,add= T)
legtxt <- "Sur cette carte, chaque cercle\ncorrespond à un évenement\nayant donné la mort d'au\nmoins une personne\nsur la période 2014 - 2019.\nLa surface des cercles\nest proportionnelle\nau nombre de personnes."
text(-1700000, y = 5200000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Cartogramme de Dorling (avec désagrégation) ------
all <- iom_ortho[,c("id","deads","year","geometry")]
iom_unique <- all[all$deads == 1,]
iom_multi <- all[all$deads > 1,]
for (i in 1:dim(iom_multi)[1]){
nb <- as.numeric(iom_multi[i,"deads"])[1]
tmp <- iom_multi[i,]
tmp$deads <- 1
for (j in 1:nb){ iom_unique <- rbind(iom_unique,tmp)}
}
png("img/fig22.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
deathsdor2 <- cartogram_dorling(x = st_jitter(iom_unique),weight = "deads", k = .003)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
plot(st_geometry(deathsdor2), pch=20, col= "#eb3850", border ="#ede6bb", cex = 0.1, add=T)
plot(st_geometry(fences_ortho), col= "#3d3c3c", lwd = 3 ,add= T)
legtxt <- "Sur cette carte, chaque point\ncorrespond à une personne morte\nou portée disparue\nnsur la période 2014 - 2019"
text(-1700000, y = 5200000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Carte animée ------
# Import & handeling
iom <- read.csv("data/iom/MissingMigrants-Global-2019-10-29T14-11-50.csv", stringsAsFactors = F)
iom <- iom[(iom$Location.Coordinates)!="",]
latlon <- matrix(as.numeric(unlist(strsplit(iom$Location.Coordinates, split = ", "))), ncol = 2, byrow = T)
colnames(latlon) <- c("lat", 'lon')
iom <- cbind(iom, latlon)
colnames(iom)
iom <- iom[,c("Web.ID","Reported.Year","Reported.Month","Total.Dead.and.Missing","Region.of.Incident","lat","lon")]
colnames(iom) <- c("id","year","month","deads","region","latitude","longitude")
iom$deads <- as.numeric(iom$deads)
iom <- iom[!is.na(iom$deads),]
iom$latitude <- as.numeric(iom$latitude)
iom$longitude <- as.numeric(iom$longitude)
# Conversion en objet sf, reprojection et découpage
iom_sf <- st_as_sf(iom, coords = c("longitude", "latitude"), crs = 4326, agr = "constant")
iom_ortho <- st_transform(iom_sf,crs = ortho)
iom_ortho <- st_intersection(x = iom_ortho, st_geometry(bbox_ortho))
# Aggregete & barplot
iom_ortho$date <- paste0(iom_ortho$month," ",iom_ortho$year)
bymonth <- aggregate(iom_ortho$deads,list(iom_ortho$date), sum, simplify = TRUE )
colnames(bymonth) <- c("date","deads")
m <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
y <- c("2014","2015","2016","2017","2018","2019")
for (i in 1:length(y)){
d <- paste0(m," ",y[i])
if (i == 1) { all <- d } else {all <- c(all,d)}
}
all <- as.data.frame(all)
all$id <- as.numeric(row.names(all))
colnames(all) <- c("date","id")
all <- merge (x = all, y = bymonth,
by.x = "date",
by.y = "date",
all.x = T)
all <- all[order(all$id),]
iom_ortho <- merge (x = iom_ortho, y = all[,c("date","id")],
by.x = "date",
by.y = "date",
all.x = T)
iom_ortho <- iom_ortho[,c("id.y","date","deads","geometry")]
colnames(iom_ortho) <- c("id","date","deads","geometry")
# Cartography
inches <- 0.8
fixmax <- max(iom_ortho$deads)
for (i in 1:length(all$id)){
mapdate <- as.character(all$date[i])
if(i < 10) { num <- paste0("0",i) } else { num <- i }
file <- paste0("tmp/",num,".png")
png(file, width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,0,0))
plot(st_geometry(bbox_ortho), col= "#555760", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
plot(st_geometry(subregions_ortho) + c(-10000, -10000), col ="#303135", border = NA, add = T)
plot(st_geometry(subregions_ortho), col= "#4d4e54", border = NA, cex = 0.5, add=T)
plot(st_geometry(rivers_ortho), col= "#858585",lwd = 1 ,add= T)
# Textes
title <- "DEAD AND MISSING MIGRANTS, 2014 - 2019"
source <- "Data source: IOM, 2019"
authors <- "Map designed by Nicolas Lambert and Ronan Ysebaert, 2019"
rect(-1900000, 6040000, -1900000 + 3000000, 6040000 + 150000, border = NA, col = "#00000080")
text(-1700000, y = 6100000, title , cex = 2.5, pos = 4, font = 2, col="#ffe100")
text(-1700000, y = 4900000, source , cex = 0.5, pos = 4, font = 2, col="#ffe100")
text(-1700000, y = 4850000, authors , cex = 0.5, pos = 4, font = 2, col="#ffe100")
text(-1700000, y = 5200000, substr(mapdate,1,3) , cex = 2, pos = 4, font = 2, col="#ffe100")
text(-1500000, y = 5200000, substr(mapdate,5,9) , cex = 4, pos = 4, font = 2, col="#ffe100")
# compteur
yref <- 4980000
height <- 100000
total <- sum(iom_ortho$deads[iom_ortho$id <= i])
val <- total *920
rect(xmin, yref, xmin + val, yref + height, border = NA, col = "#ffe100")
text(-1680000, y = 5020000, paste0(total, " people since January 1, 2014") , cex = 1.6, pos = 4, font = 2, col="#eb3850")
layer1 <- iom_ortho[iom_ortho$id <= i,]
layer2 <- iom_ortho[iom_ortho$id == i,]
propSymbolsLayer(x = layer1, var = "deads",
symbols = "circle", col = "#eb385040",
inches = inches, fixmax = fixmax,
legend.pos = NA, border = NULL, lwd = 0.5)
plot(st_geometry(fences_ortho), col= "#bfbfbf",lwd = 1.5 ,add= T)
line <- st_geometry(fences_ortho)
for (i in 1:30){
line <- st_geometry(line) + c(0,2000)
plot(st_geometry(line), col= "#bfbfbf70",lwd = 1 ,add= T)
}
plot(st_geometry(line), col= "#bfbfbf",lwd = 1.5 ,add= T)
if (length(layer2$deads) > 0){ propSymbolsLayer(x = layer2, var = "deads",
symbols = "circle", col = "#ffe100",
inches = inches, fixmax = fixmax,
legend.pos = NA, border = "black", lwd = 1)
}
dev.off()
}
# convert pngs to one gif using ImageMagick
system("convert -loop 1 -delay 40 tmp/*.png img/animate.gif")
# Carte interactive ------
iom <- read.csv("data/iom/MissingMigrants-Global-2019-09-04T11-59-55.csv", stringsAsFactors = F)
iom <- iom[(iom$Location.Coordinates)!="",]
iom <- iom[iom$Region.of.Incident =="US-Mexico Border",]
latlon <- matrix(as.numeric(unlist(strsplit(iom$Location.Coordinates, split = ", "))), ncol = 2, byrow = T)
colnames(latlon) <- c("lat", 'lon')
iom <- cbind(iom, latlon)
iom <- iom[,c("Reported.Year","Total.Dead.and.Missing","lat","lon","Location.Description","Cause.of.Death","Information.Source")]
colnames(iom) <- c("year","deads","lat","lon","location","cause","source")
fences <- geojson_sf("data/data.world/border-fence.geojson")
# Disaggregation
iom_unique <- iom[iom$deads == 1,]
iom_multi <- iom[iom$deads > 1,]
for (i in 1:dim(iom_multi)[1]){
nb <- as.numeric(iom_multi[i,"deads"])[1]
tmp <- iom_multi[i,]
tmp$deads <- 1
for (j in 1:nb){ iom_unique <- rbind(iom_unique,tmp)}
}
iom <- iom_unique
pins <- makeIcon(
iconUrl = "data/pin.svg",
iconWidth = 30, iconHeight = 30,
iconAnchorX = 15, iconAnchorY = 15
)
iom$label <- paste0(
"<h1>",iom$cause,"</h1>
<h3>year: </b>",iom$year,"<br/>
location: ",iom$location,"</h3>
<i>Source: ",iom$source,"</i>"
)
m <- leaflet(iom) %>%
addProviderTiles(providers$Esri.WorldStreetMap) %>%
setView(lng = -104, lat = 30, zoom = 06) %>%
addMarkers(~lon, ~lat, popup = ~label, clusterOptions = markerClusterOptions(), icon = pins ) %>%
addScaleBar(position = "bottomleft") %>%
addPolylines(data = fences, color = "black", weight = 7, opacity = 1)
saveWidget(m, file="leaflet.html", title = "The Border Kills", selfcontained = TRUE) | /texmex.R | no_license | riatelab/mexusaborder | R | false | false | 38,455 | r | #############################################################
# #
# ♥ FAIRE DES CARTES AVEC R ♥ #
# APPLICATION A LA FRONTIERE ETATS-UNIS / MEXIQUE #
# NICOLAS LAMBERT, RONAN YSEBAERT, UMS RIATE, NOV.2019 #
# #
#############################################################
# Ce programme exécute l'ensemble des représentations graphiques de la
# présentation
authors <- "N. Lambert & R. Ysebaert, 2019\nData source: IOM, Didelon, Vandermotten, Dessouroux, (c) OpenStreetMap contributors, 2019"
######################################################################
# 0 Préparation des données et création des modèles cartographiques
#####################################################################
# Appel des librairies ------
library("sf")
library("rnaturalearth")
library("geojsonsf")
library("cartography")
library("cartogram")
library("leaflet")
library("SpatialPosition")
library("units")
library("OECD")
library("ggplot2")
library("ggthemes")
library("osmdata")
library("htmlwidgets")
library("animation")
# Import des géométries ------
# Pays - Natural Earth
countries <- ne_countries(scale = 50, type = "countries", continent = NULL,
country = NULL, geounit = NULL, sovereignty = NULL,
returnclass = "sf")
countries <- countries[countries$adm0_a3 %in% c("MEX","USA"),]
# Rivières - Natural Earth
rivers <- ne_download(scale = 50, type = "rivers_lake_centerlines",
category = "physical", returnclass = "sf")
# Trait de côte - Natural Earth
coastline <- ne_download(scale = 50, type = "coastline",
category = "physical", returnclass = "sf")
# Océans - Natural Earth
ocean <- ne_download(scale = 50, type = "ocean", category = "physical",
returnclass = "sf")
# -- Source : Cartographier le monde à l'échelle infranationale (CIST) --
subregions <- st_read(dsn = "data/regions/mex_us_admin.shp" ,
options = "ENCODING=UTF-8", stringsAsFactors = FALSE)
# -- Source : data.world (https://data.world/carlvlewis/border-fence-boundaries-u-s-mexico)
# Mur de séparation
fences <- geojson_sf("data/data.world/border-fence.geojson")
# Mise en forme des géométries et création d'un template [Projection Albers] ------
# Choix de la projection
albers <- "+proj=aea +lat_1=14.5 +lat_2=32.5 +lat_0=24 +lon_0=-105 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
countries_aea <- st_transform(countries,crs = albers)
fences_aea <- st_transform(fences,crs = albers)
rivers_aea <- st_transform(rivers,crs = albers)
coastline_aea <- st_transform(coastline,crs = albers)
ocean_aea <- st_transform(ocean,crs = albers)
subregions_aea <- st_transform(subregions,crs = albers)
subregions_all <- st_transform(subregions,crs = albers)
# Choix de l'emprise (étape un peu manuelle pour optimiser l'emprise du modèle carto pour l'export png)
bb_aea <- c(-1342784.0, -739750.5, 793341.2, 1317849.8)
d <- 100000
bbox_aea <- st_as_sfc(st_bbox(c(xmin = bb_aea[1]-2*d , xmax = bb_aea[3]+2*d,
ymin = bb_aea[4]+d, ymax = bb_aea[2]-d),
crs = albers))
# Intersection du fond régional avec la bbox
subregions_aea <- st_intersection(x = subregions_aea, st_geometry(bbox_aea))
coastline_aea <- st_intersection(x = coastline_aea, y = bbox_aea)
rivers_aea <- st_intersection(x = rivers_aea, y = bbox_aea)
countries_aea <- st_intersection(x = countries_aea, y = bbox_aea)
# création du template
lay_aea <- function(title = ""){
par(mar = c(0,0,1.2,0))
plot(st_geometry(ocean_aea), col= "#b8d5e3", border = NA, xlim = bb_aea[c(1,3)],
ylim = bb_aea[c(2,4)])
plot(st_geometry(subregions_aea) + c(-10000, -10000), col ="#827e6c50",
border = NA, add = T)
plot(st_geometry(subregions_aea), col= "#ede6bb", border = "white",
cex = 0.5, add=T)
plot(st_geometry(coastline_aea), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(rivers_aea), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(fences_aea), col= "#3d3c3c",lwd = 3 ,add= T)
layoutLayer(title = title,
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
}
sizes_aea <- getFigDim(x = bbox_aea, width = 1500,mar = c(0,0,1.2,0), res = 150)
png("img/fig01.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
lay_aea("Template cartographique 1 (projection Albers)")
dev.off()
# Mise en forme des géométries et création d'un template [Projection Orthographique] ------
# Choix de la projection
ortho <- "+proj=ortho +lat_0=-35 +lon_0=-104 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs"
countries_ortho <- st_transform(countries,crs = ortho)
fences_ortho <- st_transform(fences,crs = ortho)
rivers_ortho <- st_transform(rivers,crs = ortho)
coastline_ortho <- st_transform(coastline,crs = ortho)
ocean_ortho <- st_transform(ocean,crs = ortho)
subregions_ortho <- st_transform(subregions,crs = ortho)
# Choix de l'emprise (étape un peu manuelle pour optimiser l'emprise du modèle carto pour l'export png)
bb_ortho <- c(-1668000, 5100000, 934863, 5900000)
d <- 100000
bbox_ortho <- st_as_sfc(st_bbox(c(xmin = bb_ortho[1]-2*d , xmax = bb_ortho[3]+2*d,
ymin = bb_ortho[4]+3.5*d, ymax = bb_ortho[2]-3.5*d),
crs = ortho))
# Intersection du fond régional avec la bbox
subregions_ortho <- st_intersection(x = subregions_ortho, st_geometry(bbox_ortho))
rivers_ortho <- st_intersection(x = rivers_ortho, y = bbox_ortho)
coastline_ortho <- st_intersection(x = coastline_ortho, y = bbox_ortho)
# création du template
lay_ortho <- function(title = ""){
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
plot(st_geometry(subregions_ortho) + c(-10000, -10000), col ="#827e6c50", border = NA, add = T)
plot(st_geometry(subregions_ortho), col= "#ede6bb", border = "white", cex = 0.5, add=T)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(rivers_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 3 ,add= T)
layoutLayer(title = title,
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
}
sizes_ortho <- getFigDim(x = bbox_ortho, width = 1500,mar = c(0,0,1.2,0), res = 150)
png("img/fig02.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Template cartographique 2 (projection orthographique)")
dev.off()
# Template bis avec les murs en 2.5D ------
lay_ortho2 <- function(title = ""){
authors <- "N. Lambert & R. Ysebaert, 2019\nData source: IOM, Didelon, Vandermotten, Dessouroux, (c) OpenStreetMap contributors, 2019"
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
plot(st_geometry(subregions_ortho) + c(-10000, -10000), col ="#827e6c50", border = NA, add = T)
plot(st_geometry(subregions_ortho), col= "#ede6bb", border = "white", cex = 0.5, add=T)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(rivers_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 2 ,add= T)
line <- st_geometry(fences_ortho)
for (i in 1:20){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#565b6380",lwd = 2 ,add= T)
}
plot(st_geometry(line), col= "#3d3c3c",lwd = 2 ,add= T)
layoutLayer(title = title,
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
}
sizes_ortho <- getFigDim(x = bbox_ortho, width = 1500,mar = c(0,0,1.2,0), res = 150)
png("img/fig03.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho2("Template cartographique 2 (projection orthographique)")
dev.off()
# Import des données socio-éco et jointure ------
pib <- read.csv("data/regions/PIB.csv", sep = "\t",encoding = "UTF-8", dec = ",",
stringsAsFactors=FALSE)
pop <- read.csv("data/regions/POP.csv", sep = "\t", encoding = "UTF-8", dec = ",",
stringsAsFactors=FALSE)
subregions_aea <- merge (x = subregions_aea, y = pib,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_aea <- merge (x = subregions_aea, y = pop,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_ortho <- merge (x = subregions_ortho, y = pib,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_ortho <- merge (x = subregions_ortho, y = pop,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_all <- merge (x = subregions_all, y = pib,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
subregions_all <- merge (x = subregions_all, y = pop,
by.x = "ID_ADMIN_1",
by.y = "ID_ADMIN",
all.x = TRUE)
##############################################################
# 1 - Statistiques comparées en utilisant des données OCDE
###############################################################
# PIB par habitant ------
# Télécharger les données de la table PDB_LV pour USA, Mexique, pays de l'OCDE
df <- get_dataset(dataset = "PDB_LV",
filter = list(c("MEX", "USA","OECD"), "T_GDPPOP", "CPC"))
# Transformer la date au format numérique
df$obsTime <- as.numeric(df$obsTime)
# Représentation graphique
png("img/fig05.png", width = 1500, height = 1000, res = 150)
ggplot(data = df, aes(x = obsTime, y = obsValue, color = LOCATION)) +
geom_line(size = 1) +
labs(x = NULL, y = "Dollars, prix courant", color = NULL,
title = "Évolution comparée du PIB par habitant (Mexique - USA - OCDE)") +
theme_hc() +
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))
dev.off()
# Moins de 20 ans ------
df <- as.data.frame(get_dataset(dataset = "POP_PROJ",
filter = list(c("MEX","USA","OECD"),"TT","D1TTR5Y4")))
df$obsTime <- as.numeric(df$obsTime)
# Représentation graphique
png("img/fig06.png", width = 1500, height = 1000, res = 150)
ggplot(data = df, aes(x = obsTime, y = obsValue, color = LOCATION)) +
geom_line(size = 1) +
labs(x = NULL, y = "Part de la population totale (%)", color = NULL,
title = "Évolution comparée des moins de 20 ans (Mexique - USA - OCDE)") +
theme_hc() +
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))
dev.off()
################################################
# 2 - Cartographie des ruptures spatiales
################################################
# Vieillisement démographique (version 1) ------
png("img/fig07.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_aea), col= "#b8d5e3", border = NA, xlim = bb_aea[c(1,3)], ylim = bb_aea[c(2,4)])
choroLayer(x = subregions_aea, var = "POP65_POP15",
breaks = c(min(subregions_aea$POP65_POP15, na.rm = T),
20,25,35,50,65, max(subregions_aea$POP65_POP15, na.rm = T)),
col = carto.pal(pal1 = "green.pal", n1 = 3, pal2 = "red.pal", n2 = 3),
legend.pos = c(-1400000 , -600000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "Rapport entre la population âgée de plus de 65 ans\net la population âgée de moins de 15 ans\nen 2015 (%)",
border = NA, add = TRUE)
subregions.borders <- getBorders(subregions_aea)
discLayer(x = subregions.borders, df = subregions_aea,
var = "POP65_POP15", col="black", nclass=3,
method="equal", threshold = 0.3, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités sur l'indice de veillissement 2015\n(différences absolues)",
legend.pos = c(-1400000 , -300000), legend.title.cex = 0.7, legend.values.cex = 0.5,
add = TRUE)
plot(st_geometry(coastline_aea), col= "#6d9cb3",lwd = 1 ,add= T)
layoutLayer(title = "Une barrière démographique...",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# Vieillisement démographique (version 2) ------
png("img/fig08.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "POP65_POP15",
breaks = c(min(subregions_ortho$POP65_POP15, na.rm = T),
20,25,35,50,65, max(subregions_ortho$POP65_POP15, na.rm = T)),
col = carto.pal(pal1 = "green.pal", n1 = 3, pal2 = "red.pal", n2 = 3),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "Rapport entre la population âgée de plus de 65 ans\net la population âgée de moins de 15 ans\nen 2015 (%)",
border = NA, add = TRUE)
subregions.borders <- getBorders(subregions_ortho)
discontinuities <- discLayer(x = subregions.borders, df = subregions_ortho,
var = "POP65_POP15", col = "black", nclass=3,
method = "equal", threshold = 0.3, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités sur l'indice\nde veillissement 2015\n(différences\nabsolues)",
legend.pos = c(-1700000, 5300000), legend.title.cex = 0.7,
legend.values.cex = 0.5, add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
layoutLayer(title = "Une barrière démographique...",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# Vieillisement démographique (version 3) ------
threshold <- 0.3
minvar <- as.numeric(quantile(discontinuities$disc, probs = c(1 - threshold)))
discontinuities <- discontinuities[discontinuities$disc >= minvar,]
discontinuities$height <- round(discontinuities$disc / 2,0)
extrude <- function(id){
line <- st_geometry(discontinuities[id,])
plot(line, col = "black",lwd = 2 ,add = TRUE)
nb <- as.numeric(discontinuities[id,"height"])[1]
for (j in 1:nb){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#ebd23490",lwd = 2 ,add = TRUE)
}
plot(line, col= "black",lwd = 2 ,add = TRUE)
}
png("img/fig09.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "POP65_POP15",
breaks = c(min(subregions_ortho$POP65_POP15, na.rm = T),
20,25,35,50,65, max(subregions_ortho$POP65_POP15, na.rm = T)),
col = carto.pal(pal1 = "green.pal", n1 = 3, pal2 = "red.pal", n2 = 3),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "Rapport entre la population âgée de plus de 65 ans\net la population âgée de moins de 15 ans\nen 2015 (%)",
border = NA, add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
for (i in 1:length(discontinuities$disc)) {
extrude(i)
}
legtxt <- "Sur cette carte, la hauteur\ndes barrières est proportionnelle\nà la valeur des discontinuités\nabsolues sur l'indice du\nvieillissement en 2015."
text(-1700000, y = 5400000, legtxt , cex = 0.9, pos = 4, font = 2)
layoutLayer(title = "Une barrière démographique...",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# PIB par habitant (version 1) ------
png("img/fig10.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_aea), col= "#b8d5e3", border = NA, xlim = bb_aea[c(1,3)], ylim = bb_aea[c(2,4)])
choroLayer(x = subregions_aea, var = "PIB100_2017",
breaks = c(min(subregions_aea$PIB100_2017, na.rm = T),
75,90,100,125,150,200, max(subregions_aea$PIB100_2017, na.rm = T)),
col = carto.pal(pal1 = "red.pal", n1 = 3, pal2 = "green.pal", n2 = 4),
legend.pos = c(-1400000 , -600000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "PIB par habitant 2017\n(100 = moyenne mondiale)",
border = NA,
add = TRUE)
plot(st_geometry(coastline_aea), col= "#6d9cb3",lwd = 1 ,add= T)
subregions.borders <- getBorders(subregions_aea)
discLayer(x = subregions.borders, df = subregions_aea,
var = "PIB100_2017", col="black", nclass=3,
method="equal", threshold = 0.2, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités de PIB par habitant 2017\n(différences absolues)",
legend.pos = c(-1400000 , -300000), legend.title.cex = 0.7, legend.values.cex = 0.5,
add = TRUE)
layoutLayer(title = "Doublée d'un mur de richesse... Mais quelles conséquences ?",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# PIB par habitant (version 2) ------
png("img/fig11.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "PIB100_2017",
breaks = c(min(subregions_ortho$PIB100_2017, na.rm = T),
75,90,100,125,150,200, max(subregions_ortho$PIB100_2017, na.rm = T)),
col = carto.pal(pal1 = "red.pal", n1 = 3, pal2 = "green.pal", n2 = 4),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "PIB par habitant 2017\n(100 = moyenne mondiale)",
border = NA,
add = TRUE)
subregions.borders <- getBorders(subregions_ortho)
discontinuities <- discLayer(x = subregions.borders, df = subregions_aea,
var = "PIB100_2017", col="black", nclass=3,
method="equal", threshold = 0.2, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuités de PIB par habitant 2017\n(différences absolues)",
legend.pos = c(-1700000, 5300000), legend.title.cex = 0.7, legend.values.cex = 0.5,
add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
layoutLayer(title = "Doublée d'un mur de richesse... Mais quelles conséquences ?",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
# PIB par habitant (version 3) ------
threshold <- 0.3
minvar <- as.numeric(quantile(discontinuities$disc, probs = c(1 - threshold)))
discontinuities <- discontinuities[discontinuities$disc >= minvar,]
discontinuities$height <- round(discontinuities$disc / 8,0)
extrude <- function(id){
line <- st_geometry(discontinuities[id,])
plot(line, col= "black",lwd = 2 ,add= T)
nb <- as.numeric(discontinuities[id,"height"])[1]
for (j in 1:nb){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#ebd23490",lwd = 2 ,add= T)
}
plot(line, col= "black",lwd = 2 ,add= T)
}
extrude <- function(id){
line <- st_geometry(discontinuities[id,])
plot(line, col= "black",lwd = 2 ,add= T)
nb <- as.numeric(discontinuities[id,"height"])[1]
for (j in 1:nb){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#ebd23490",lwd = 2 ,add= T)
}
plot(line, col= "black",lwd = 2 ,add= T)
}
png("img/fig12.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,1.2,0))
plot(st_geometry(bbox_ortho), col= "#b8d5e3", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
choroLayer(x = subregions_ortho, var = "PIB100_2017",
breaks = c(min(subregions_ortho$PIB100_2017, na.rm = T),
75,90,100,125,150,200, max(subregions_ortho$PIB100_2017, na.rm = T)),
col = carto.pal(pal1 = "red.pal", n1 = 3, pal2 = "green.pal", n2 = 4),
legend.pos = c(-1700000, 5000000),
legend.horiz = TRUE, legend.title.cex = 0.7, legend.values.cex = 0.5,
legend.title.txt = "PIB par habitant 2017\n(100 = moyenne mondiale)",
border = NA,
add = TRUE)
plot(st_geometry(coastline_ortho), col= "#6d9cb3",lwd = 1 ,add= T)
for (i in 1:length(discontinuities$disc))
{
extrude(i)
}
legtxt <- "Sur cette carte, la hauteur\ndes barrières est proportionnelle\nà la valeur des discontinuités\nabsolues sur le PIB par habitant\nen 2017."
text(-1700000, y = 5400000, legtxt , cex = 0.9, pos = 4, font = 2)
layoutLayer(title = "Doublée d'un mur de richesse... Mais quelles conséquences ?",
author = authors,
scale = 300, south = TRUE, frame = TRUE,
col = "#6d9cb3", coltitle = "white")
dev.off()
############
# 3 - Border Control - Visualisation des postes de contrôle
#############
# Extraction des données ------
# Convertir la bounding box en WGS 84
bbox <- st_transform(bbox_aea, 4326)
# Définir la requête (clé/valeur OSM sur bounding box)
opqbox <- opq(bbox = bbox , timeout = 5000)
opquery <- add_osm_feature(opq = opqbox, key = "barrier", value = "border_control")
feat <- osmdata_sf(opquery)
# Extraire les points qui répondent à la requête
featpt <- st_transform(feat$osm_points, albers)
featpt <- featpt[featpt[["barrier"]] %in% "border_control", ]
# Extraire les polygones qui répondent à la requête
featpo <- st_transform(feat$osm_polygons, albers)
st_geometry(featpo) <- st_centroid(st_geometry(featpo))
featpo$osm_id <- row.names(featpo)
# Combiner points et polygones, les intersecter avec la bounding box
featpt <- rbind(featpt[, c("osm_id", "geometry")], featpo[, c("osm_id", "geometry")])
poi_osm <- st_intersection(x = featpt, st_geometry(subregions_aea))
# Créer une grille sur l'espace d'étude
grid <- st_make_grid(subregions_aea, cellsize = 50000)
grid <- st_sf(grid)
# Compter le nombre de postes de police par points de grille
grid$ncops <- lengths(st_covers(grid, poi_osm))
# Calcul de la part du total sur l'espace d'étude
grid$dens <- (grid$ncops / sum(grid$ncops)) * 100
grid <- grid[grid$ncops != 0, ]
# Visualisation 1 (points) ------
png("img/fig13.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
par(mar = c(0,0,1.2,0))
lay_aea(paste0("Au moins ", sum(grid$ncops)," postes frontières..."))
plot(st_geometry(poi_osm), bg = "red", col = NA, pch = 21, cex = 0.8, add = TRUE)
legtxt <- "Chaque point rouge\nreprésente un poste\nfrontalier recensé\ndans OpenStreetMap."
text(-1400000, y = -100000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Visualisation 2 (figurés proportionnels) ------
png("img/fig14.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
lay_aea("Les hot-spots du contrôle frontalier - dénombrement")
propSymbolsLayer(grid, var = "ncops", col = "red", symbols = "square", add = T,
legend.pos = "left",
legend.title.cex = 0.7, legend.values.cex = 0.6,
legend.title.txt = "Nombre de postes frontière\n(zones de 50km²)")
dev.off()
# Visualisation 3 (Grille) ------
png("img/fig15.png", width = sizes_aea[1], height = sizes_aea[2], res = 150)
lay_aea("Les hot-spots du contrôle frontalier - Part du total")
choroLayer(x = grid, var = "dens",
breaks = c(min(grid$dens), 1, 2, 5, 10, max(grid$dens)),
col = carto.pal(pal1 = "brown.pal", n1 = 5),
legend.pos = "left", legend.title.cex = 0.7, legend.values.cex = 0.6,
legend.title.txt = "Part des postes frontaliers\nde l'espace d'étude (%)",
border = NA, add = TRUE)
plot(st_geometry(fences_aea), col= "#3d3c3c",lwd = 3 ,add= T)
dev.off()
# Visualisation 4 (Tours de contrôle) ------
# On affine la résolution des grilles (20 km)
grid <- st_make_grid(subregions_ortho, cellsize = 20000)
grid <- st_sf(grid)
poi_osm <- st_transform(poi_osm,ortho)
grid$ncops <- lengths(st_covers(grid, poi_osm))
grid <- grid[grid$ncops>0,]
png("img/fig16.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho(title = "Sacrées tours de contrôle !")
propSymbolsLayer(x = grid, var = "ncops", col = "darkred",
symbols = "bar",
inches = 1.3,
border = "white", lwd = 1, legend.pos = c(-1700000, 5000000),
legend.title.txt = "Nombre de postes frontières\n(zones de 20 km²)",
legend.style = "e")
line <- st_geometry(fences_ortho)
for (i in 1:15){
line <- st_geometry(line) + c(0,5000)
plot(st_geometry(line), col= "#565b6380",lwd = 2 ,add= T)
}
plot(st_geometry(line), col= "#3d3c3c",lwd = 2 ,add= T)
dev.off()
###########################
# 4 - Morts aux frontières
###########################
# Import et mise en forme des données ------
# Import du fichier brut (OIM)
iom <- read.csv("data/iom/MissingMigrants-Global-2019-10-29T14-11-50.csv", stringsAsFactors = F)
# Gestion des coordonnées
iom <- iom[(iom$Location.Coordinates)!="",]
latlon <- matrix(as.numeric(unlist(strsplit(iom$Location.Coordinates, split = ", "))), ncol = 2, byrow = T)
colnames(latlon) <- c("lat", 'lon')
iom <- cbind(iom, latlon)
# Sélection et renommage des variables, conversion au format numérique des champs
iom <- iom[,c("Web.ID","Reported.Year","Total.Dead.and.Missing","Number.of.Survivors","Region.of.Incident","lat","lon")]
colnames(iom) <- c("id","year","deads","survivors","region","latitude","longitude")
iom$deads <- as.numeric(iom$deads)
iom <- iom[!is.na(iom$deads),]
iom$latitude <- as.numeric(iom$latitude)
iom$longitude <- as.numeric(iom$longitude)
# Conversion en objet sf et reprojection
iom_sf <- st_as_sf(iom, coords = c("longitude", "latitude"), crs = 4326, agr = "constant")
iom_aea <- st_transform(iom_sf,crs = albers)
iom_ortho <- st_transform(iom_sf,crs = ortho)
# Découpage
iom_ortho <- st_intersection(x = iom_ortho, st_geometry(bbox_ortho))
iom_aea <- st_intersection(x = iom_aea, st_geometry(bbox_aea))
# Comparaisons régionales ------
png("img/fig17.png", width = 1500, height = 1000, res = 150)
par(mar=c(8,8,4,2))
# Agréger pae zone géographique
med <- aggregate(iom_sf$deads,list(iom_sf$region), sum, simplify = TRUE )
# Sélection des colonnes utiles, arrondis
colnames(med) <- c("region","nb")
total <- round(sum(med$nb),-2)
# Ordonner nombre de morts par zone géographique
med <- med[order(med$nb,decreasing = TRUE),]
# Gestion des couleurs
cols <- rep("#ffbaba",length(med$region))
cols[c(3)] <- "red"
# Barplot
barplot(med$nb, ylab = "Nombre de personnes", names.arg = med$region, las = 2,
border="#991313",col = cols, cex.names = 0.8, cex.axis = 0.8)
dev.off()
# Evolution temporelle------
# Extraction de la zone USA-Mexique
iom_sf <- iom_sf[iom_sf$region =="US-Mexico Border",]
png("img/fig18.png", width = 1500, height = 1000, res = 150)
par(mar=c(5,8,4,2))
# Agréger par années
med <- aggregate(iom_sf$deads,list(iom_sf$year), sum, simplify = TRUE )
# Gestion des labels, arrondis
colnames(med) <- c("year","nb")
total <- round(sum(med$nb),-1)
# Un petit label spécial pour 2019 (année en cours)
med[med$year==2019,"year"] <- "2019*"
# Barplot
barplot(med$nb, xlab=paste0("Total sur la période: ",total,"\n(*) Du 1er janvier au 29 octobre 2019"),
ylab="Nombre de personnes", names.arg=med$year,
border="#991313",col=c("red","red","red","red","red","#ffbaba"),
cex.names = 0.8, cex.axis = 0.8)
dev.off()
# Carte de localisation ------
png("img/fig19.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
plot(st_geometry(iom_ortho), pch=20, col= "#eb3850", cex = 0.5, add=T)
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 3 ,add= T)
legtxt <- "Sur cette carte, chaque point\ncorrespond à un évenement\nayant donné la mort d'au\nmoins une personne\nsur la période 2014 - 2019"
text(-1700000, y = 5200000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Localisation et nombre de personnes ------
png("img/fig20.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
propSymbolsLayer(x = iom_ortho, var = "deads",
symbols = "circle", col = "#eb3850",
legend.pos = "left", border = "#ede6bb", lwd = 0.5,
legend.title.txt = "Nombre de morts\net portés disparus\nsur la période\n2014 - 2019",
legend.style = "e")
plot(st_geometry(fences_ortho), col= "#3d3c3c",lwd = 3 ,add= T)
dev.off()
# Cartogramme de Dorling ------
iom_ortho$m_weight <- 1
iom_ortho$m_weight[iom_ortho$deads > 1] <- 0.5
iom_ortho$m_weight[iom_ortho$deads >= 25] <- 0
deathsdor <- cartogram_dorling(x = st_jitter(iom_ortho),
weight = "deads",
m_weight = iom_ortho$m_weight, k = .4)
png("img/fig21.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
plot(st_geometry(deathsdor), pch=20, col= "#eb3850", border ="#ede6bb", cex = 0.1, add=T)
plot(st_geometry(fences_ortho), col= "#3d3c3c", lwd = 3 ,add= T)
legtxt <- "Sur cette carte, chaque cercle\ncorrespond à un évenement\nayant donné la mort d'au\nmoins une personne\nsur la période 2014 - 2019.\nLa surface des cercles\nest proportionnelle\nau nombre de personnes."
text(-1700000, y = 5200000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Cartogramme de Dorling (avec désagrégation) ------
all <- iom_ortho[,c("id","deads","year","geometry")]
iom_unique <- all[all$deads == 1,]
iom_multi <- all[all$deads > 1,]
for (i in 1:dim(iom_multi)[1]){
nb <- as.numeric(iom_multi[i,"deads"])[1]
tmp <- iom_multi[i,]
tmp$deads <- 1
for (j in 1:nb){ iom_unique <- rbind(iom_unique,tmp)}
}
png("img/fig22.png", width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
deathsdor2 <- cartogram_dorling(x = st_jitter(iom_unique),weight = "deads", k = .003)
lay_ortho("Migrants morts et portés disparus à la frontière USA-Mexique, 2014 - 2019")
plot(st_geometry(deathsdor2), pch=20, col= "#eb3850", border ="#ede6bb", cex = 0.1, add=T)
plot(st_geometry(fences_ortho), col= "#3d3c3c", lwd = 3 ,add= T)
legtxt <- "Sur cette carte, chaque point\ncorrespond à une personne morte\nou portée disparue\nnsur la période 2014 - 2019"
text(-1700000, y = 5200000, legtxt , cex = 0.9, pos = 4, font = 2)
dev.off()
# Carte animée ------
# Import & handeling
iom <- read.csv("data/iom/MissingMigrants-Global-2019-10-29T14-11-50.csv", stringsAsFactors = F)
iom <- iom[(iom$Location.Coordinates)!="",]
latlon <- matrix(as.numeric(unlist(strsplit(iom$Location.Coordinates, split = ", "))), ncol = 2, byrow = T)
colnames(latlon) <- c("lat", 'lon')
iom <- cbind(iom, latlon)
colnames(iom)
iom <- iom[,c("Web.ID","Reported.Year","Reported.Month","Total.Dead.and.Missing","Region.of.Incident","lat","lon")]
colnames(iom) <- c("id","year","month","deads","region","latitude","longitude")
iom$deads <- as.numeric(iom$deads)
iom <- iom[!is.na(iom$deads),]
iom$latitude <- as.numeric(iom$latitude)
iom$longitude <- as.numeric(iom$longitude)
# Conversion en objet sf, reprojection et découpage
iom_sf <- st_as_sf(iom, coords = c("longitude", "latitude"), crs = 4326, agr = "constant")
iom_ortho <- st_transform(iom_sf,crs = ortho)
iom_ortho <- st_intersection(x = iom_ortho, st_geometry(bbox_ortho))
# Aggregete & barplot
iom_ortho$date <- paste0(iom_ortho$month," ",iom_ortho$year)
bymonth <- aggregate(iom_ortho$deads,list(iom_ortho$date), sum, simplify = TRUE )
colnames(bymonth) <- c("date","deads")
m <- c("Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec")
y <- c("2014","2015","2016","2017","2018","2019")
for (i in 1:length(y)){
d <- paste0(m," ",y[i])
if (i == 1) { all <- d } else {all <- c(all,d)}
}
all <- as.data.frame(all)
all$id <- as.numeric(row.names(all))
colnames(all) <- c("date","id")
all <- merge (x = all, y = bymonth,
by.x = "date",
by.y = "date",
all.x = T)
all <- all[order(all$id),]
iom_ortho <- merge (x = iom_ortho, y = all[,c("date","id")],
by.x = "date",
by.y = "date",
all.x = T)
iom_ortho <- iom_ortho[,c("id.y","date","deads","geometry")]
colnames(iom_ortho) <- c("id","date","deads","geometry")
# Cartography
inches <- 0.8
fixmax <- max(iom_ortho$deads)
for (i in 1:length(all$id)){
mapdate <- as.character(all$date[i])
if(i < 10) { num <- paste0("0",i) } else { num <- i }
file <- paste0("tmp/",num,".png")
png(file, width = sizes_ortho[1], height = sizes_ortho[2], res = 150)
par(mar = c(0,0,0,0))
plot(st_geometry(bbox_ortho), col= "#555760", border = NA, xlim = bb_ortho[c(1,3)], ylim = bb_ortho[c(2,4)])
plot(st_geometry(subregions_ortho) + c(-10000, -10000), col ="#303135", border = NA, add = T)
plot(st_geometry(subregions_ortho), col= "#4d4e54", border = NA, cex = 0.5, add=T)
plot(st_geometry(rivers_ortho), col= "#858585",lwd = 1 ,add= T)
# Textes
title <- "DEAD AND MISSING MIGRANTS, 2014 - 2019"
source <- "Data source: IOM, 2019"
authors <- "Map designed by Nicolas Lambert and Ronan Ysebaert, 2019"
rect(-1900000, 6040000, -1900000 + 3000000, 6040000 + 150000, border = NA, col = "#00000080")
text(-1700000, y = 6100000, title , cex = 2.5, pos = 4, font = 2, col="#ffe100")
text(-1700000, y = 4900000, source , cex = 0.5, pos = 4, font = 2, col="#ffe100")
text(-1700000, y = 4850000, authors , cex = 0.5, pos = 4, font = 2, col="#ffe100")
text(-1700000, y = 5200000, substr(mapdate,1,3) , cex = 2, pos = 4, font = 2, col="#ffe100")
text(-1500000, y = 5200000, substr(mapdate,5,9) , cex = 4, pos = 4, font = 2, col="#ffe100")
# compteur
yref <- 4980000
height <- 100000
total <- sum(iom_ortho$deads[iom_ortho$id <= i])
val <- total *920
rect(xmin, yref, xmin + val, yref + height, border = NA, col = "#ffe100")
text(-1680000, y = 5020000, paste0(total, " people since January 1, 2014") , cex = 1.6, pos = 4, font = 2, col="#eb3850")
layer1 <- iom_ortho[iom_ortho$id <= i,]
layer2 <- iom_ortho[iom_ortho$id == i,]
propSymbolsLayer(x = layer1, var = "deads",
symbols = "circle", col = "#eb385040",
inches = inches, fixmax = fixmax,
legend.pos = NA, border = NULL, lwd = 0.5)
plot(st_geometry(fences_ortho), col= "#bfbfbf",lwd = 1.5 ,add= T)
line <- st_geometry(fences_ortho)
for (i in 1:30){
line <- st_geometry(line) + c(0,2000)
plot(st_geometry(line), col= "#bfbfbf70",lwd = 1 ,add= T)
}
plot(st_geometry(line), col= "#bfbfbf",lwd = 1.5 ,add= T)
if (length(layer2$deads) > 0){ propSymbolsLayer(x = layer2, var = "deads",
symbols = "circle", col = "#ffe100",
inches = inches, fixmax = fixmax,
legend.pos = NA, border = "black", lwd = 1)
}
dev.off()
}
# convert pngs to one gif using ImageMagick
system("convert -loop 1 -delay 40 tmp/*.png img/animate.gif")
# Carte interactive ------
iom <- read.csv("data/iom/MissingMigrants-Global-2019-09-04T11-59-55.csv", stringsAsFactors = F)
iom <- iom[(iom$Location.Coordinates)!="",]
iom <- iom[iom$Region.of.Incident =="US-Mexico Border",]
latlon <- matrix(as.numeric(unlist(strsplit(iom$Location.Coordinates, split = ", "))), ncol = 2, byrow = T)
colnames(latlon) <- c("lat", 'lon')
iom <- cbind(iom, latlon)
iom <- iom[,c("Reported.Year","Total.Dead.and.Missing","lat","lon","Location.Description","Cause.of.Death","Information.Source")]
colnames(iom) <- c("year","deads","lat","lon","location","cause","source")
fences <- geojson_sf("data/data.world/border-fence.geojson")
# Disaggregation
iom_unique <- iom[iom$deads == 1,]
iom_multi <- iom[iom$deads > 1,]
for (i in 1:dim(iom_multi)[1]){
nb <- as.numeric(iom_multi[i,"deads"])[1]
tmp <- iom_multi[i,]
tmp$deads <- 1
for (j in 1:nb){ iom_unique <- rbind(iom_unique,tmp)}
}
iom <- iom_unique
pins <- makeIcon(
iconUrl = "data/pin.svg",
iconWidth = 30, iconHeight = 30,
iconAnchorX = 15, iconAnchorY = 15
)
iom$label <- paste0(
"<h1>",iom$cause,"</h1>
<h3>year: </b>",iom$year,"<br/>
location: ",iom$location,"</h3>
<i>Source: ",iom$source,"</i>"
)
m <- leaflet(iom) %>%
addProviderTiles(providers$Esri.WorldStreetMap) %>%
setView(lng = -104, lat = 30, zoom = 06) %>%
addMarkers(~lon, ~lat, popup = ~label, clusterOptions = markerClusterOptions(), icon = pins ) %>%
addScaleBar(position = "bottomleft") %>%
addPolylines(data = fences, color = "black", weight = 7, opacity = 1)
saveWidget(m, file="leaflet.html", title = "The Border Kills", selfcontained = TRUE) |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% Verbose.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{str.Verbose}
\alias{str.Verbose}
\alias{Verbose.str}
\alias{str,Verbose-method}
\title{Prints the structure of an object if above threshold}
\description{
Prints the structure of an object if above threshold.
The output is \emph{not} indented.
}
\usage{
\method{str}{Verbose}(object, ..., level=this$defaultLevel)
}
\arguments{
\item{...}{Objects to be passed to \code{\link[utils]{str}}.}
\item{level}{A \code{\link[base]{numeric}} value to be compared to the threshold.}
}
\value{
Returns nothing.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{Verbose}}.
}
\keyword{internal}
\keyword{methods}
\keyword{programming}
| /man/str.Verbose.Rd | no_license | HenrikBengtsson/R.utils | R | false | false | 960 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% Verbose.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{str.Verbose}
\alias{str.Verbose}
\alias{Verbose.str}
\alias{str,Verbose-method}
\title{Prints the structure of an object if above threshold}
\description{
Prints the structure of an object if above threshold.
The output is \emph{not} indented.
}
\usage{
\method{str}{Verbose}(object, ..., level=this$defaultLevel)
}
\arguments{
\item{...}{Objects to be passed to \code{\link[utils]{str}}.}
\item{level}{A \code{\link[base]{numeric}} value to be compared to the threshold.}
}
\value{
Returns nothing.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{Verbose}}.
}
\keyword{internal}
\keyword{methods}
\keyword{programming}
|
#' Initialize a new Trajectory object.
#'
#' @param input trajectory to model cell progression. Wrapped result
#' of a trajectory inference by the dynverse/dynwrap library
#' @return Trajectory object
Trajectory <- function(input) {
# Create the adjacency matrix
network <- as.data.frame(input$milestone_network)
milestone_ids <- union(unique(network$from), unique(network$to))
adjMat <- matrix(0.0, nrow = length(milestone_ids), ncol = length(milestone_ids),
dimnames = list(milestone_ids, milestone_ids))
for (i in seq(nrow(network))){
from <- as.character(network[i, "from"])
to <- as.character(network[i, "to"])
edgeLength <- network[i, "length"]
adjMat[from, to] <- edgeLength
adjMat[to, from] <- edgeLength
}
progressions <- as.data.frame(input$progressions)
rownames(progressions) <- progressions$cell_id
progressions$cell_id <- NULL
colnames(progressions) <- gsub("percentage", "position",
colnames(progressions))
if (!("from" %in% colnames(progressions))){
stop("input Trajectory missing 'from' column")
}
if (!("to" %in% colnames(progressions))){
stop("input Trajectory missing 'to' column")
}
if (length(setdiff(unique(progressions$from), milestone_ids)) > 0) {
stop("milestones in progressions$from don't match those in milestone_network")
}
if (length(setdiff(unique(progressions$to), milestone_ids)) > 0) {
stop("milestones in progressions$to don't match those in milestone_network")
}
.Object <- new("Trajectory", adjMat = adjMat, progressions = progressions)
return(.Object)
}
#' Compute KNN weights based on geodesic distances for Trajectory objects
#' @importFrom stats quantile
#' @importFrom Matrix rowSums
#' @importFrom Matrix sparseMatrix
#' @importFrom matrixStats rowMaxs
#' @param object a Trajectory object
#' @param K Number of neighbors to consider.
#' @return a list of two items:
#' indices: matrix, cells X neighbors
#' Each row specifies indices of nearest neighbors
#' weights: matrix, cells X neighbors
#' Corresponding weights to nearest neighbors
setMethod("computeKNNWeights", signature(object = "Trajectory"),
function(object, K = round(sqrt(nrow(object)))) {
edgePos <- object@progressions$position
names(edgePos) <- rownames(object@progressions)
edgeAssoc <- t(object@progressions[, c("from", "to")])
colnames(edgeAssoc) <- rownames(object@progressions)
distmat <- calculateTrajectoryDistances(adjMat = object@adjMat,
edgeAssoc = edgeAssoc,
edgePos = edgePos)
kQuantile <- K / nrow(object@progressions)
knnmat <- apply(distmat, 1, function(d) {
partition <- quantile(d, kQuantile, na.rm=T)
d[d > partition] <- Inf
return(d)
})
nn <- t(apply(distmat, 1, function(r) {
order(r)[1:K]
}))
d <- lapply(seq(nrow(nn)), function(i) {
distmat[i, nn[i, ]]
})
d <- do.call(rbind, d)
d[is.na(d)] = 0
sigma <- rowMaxs(d)
sigma[sigma == 0] <- 1.0 # occurs if all nearest neighbors at same point
sparse_weights <- exp(-1 * (d * d) / sigma ^ 2)
# Normalize row sums = 1
weightsNormFactor <- rowSums(sparse_weights)
weightsNormFactor[weightsNormFactor == 0] <- 1.0
sparse_weights <- sparse_weights / weightsNormFactor
rownames(nn) <- rownames(object)
rownames(d) <- rownames(object)
return(list(indices = nn, weights = sparse_weights))
})
#' Generate meta-data associated with this trajectory
#'
#' Creates a categorical variable mapping cells to edges
#' and numeric variables for their position along edges
#'
#' @importFrom igraph graph_from_adjacency_matrix
#' @importFrom igraph ends
#' @importFrom igraph E
#'
#' @param trajectory Trajectory on which to operate
#' @return metaData dataframe with meta-data
createTrajectoryMetaData <- function(trajectory){
adjMat <- trajectory@adjMat
progressions <- trajectory@progressions
net <- igraph::graph_from_adjacency_matrix(adjMat, weighted = TRUE,
mode = "undirected")
edges <- igraph::ends(net, igraph::E(net), names = TRUE)
meta <- data.frame(row.names = rownames(progressions))
meta[, "TrajectoryEdge"] <- ""
for (i in seq(nrow(edges))) {
from <- edges[i, 1]
to <- edges[i, 2]
cells <- progressions[
(progressions$from == from) & (progressions$to == to),
, drop = FALSE
]
cells_i <- progressions[
(progressions$from == to) & (progressions$to == from),
, drop = FALSE
]
cells_i$position <- 1 - cells_i$position
cells <- rbind(cells, cells_i)
edge_name <- paste(from, to, sep = "->")
position_var <- paste0("Position: ", edge_name)
meta[rownames(cells), "TrajectoryEdge"] <- edge_name
meta[, position_var] <- NA
meta[rownames(cells), position_var] <- cells$position
}
meta$TrajectoryEdge <- as.factor(meta$TrajectoryEdge)
return(meta)
}
#' Generate 2d representations of a trajectory model
#'
#' Creates 2d layouts of the milestone network and translates the
#' cell positions into these layouts
#'
#' @importFrom igraph graph_from_adjacency_matrix
#' @importFrom igraph ends
#' @importFrom igraph layout_with_fr
#' @importFrom igraph layout_with_dh
#' @importFrom igraph layout_as_tree
#' @importFrom igraph layout_with_mds
#' @importFrom igraph E
#' @importFrom igraph V
#'
#' @param trajectory Trajectory on which to operate
#' @return trajectoryProjections list of TrajectoryProjection
generateTrajectoryProjections <- function(trajectory) {
progressions <- trajectory@progressions
adjMat <- trajectory@adjMat
net <- igraph::graph_from_adjacency_matrix(adjMat, weighted = TRUE,
mode = "undirected")
adjMatInv <- adjMat ** -1
adjMatInv[is.infinite(adjMatInv)] <- 0
# some algorithms use weights to represent 'inverse' distance
invnet <- igraph::graph_from_adjacency_matrix(adjMatInv, weighted = TRUE,
mode = "undirected")
edges <- igraph::ends(net, igraph::E(net), names = TRUE)
adjMatBinary <- (adjMat > 0) * 1
tp_list <- list()
# layout with Fruchterman-Reingold algorithm
vData <- igraph::layout_with_fr(invnet)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "FR", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
# layout with Davidson-Harel
vData <- igraph::layout_with_dh(net)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "DH", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
# layout with Davidson-Harel
vData <- igraph::layout_as_tree(net)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "Tree", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
# layout with MDS
vData <- igraph::layout_with_mds(net)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "MDS", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
names(tp_list) <- vapply(tp_list, function(x) x@name, FUN.VALUE = "")
return(tp_list)
}
#' Translate cell positions
#'
#' Maps cell positions between edges
#'
#' @importFrom stats rnorm
#' @param progressions data.frame describing cell positions between milestones
#' @param vData Mx2 matrix mapping miletones into 2d
#' @param edges Edges x 2 matrix describing connectivity between edges
#' @return pData Cells x 2 matrix with cell positions in 2d
translateCellPositions <- function(progressions, vData, edges) {
pData <- lapply(seq(nrow(edges)), function(i) {
from <- edges[i, 1]
to <- edges[i, 2]
edge_dist <- sum( (vData[from, ] - vData[to, ]) ** 2) ** .5
cells <- progressions[
(progressions$from == from) & (progressions$to == to),
, drop = FALSE
]
cells_i <- progressions[
(progressions$from == to) & (progressions$to == from),
, drop = FALSE
]
cells_i$position <- 1 - cells_i$position
cells <- rbind(cells, cells_i)
coordinates <- matrix(numeric(nrow(cells) * 2),
nrow = nrow(cells), ncol = 2,
dimnames = list(rownames(cells), c("x", "y")))
coordinates[, "x"] <- cells$position * edge_dist
coordinates[, "y"] <- rnorm(nrow(coordinates), sd = .1)
dx <- vData[to, 1] - vData[from, 1]
dy <- vData[to, 2] - vData[from, 2]
sinTheta <- dy / edge_dist
cosTheta <- dx / edge_dist
R <- matrix(c(cosTheta, sinTheta, -1 * sinTheta, cosTheta), nrow = 2)
coordinates <- coordinates %*% t(R) # rotation
coordinates <- t(t(coordinates) + vData[from, ]) # offset
return(coordinates)
})
pData <- do.call(rbind, pData)
pData <- pData[rownames(progressions), , drop = FALSE]
return(pData)
}
| /R/methods-Trajectory.R | permissive | YosefLab/VISION | R | false | false | 10,180 | r | #' Initialize a new Trajectory object.
#'
#' @param input trajectory to model cell progression. Wrapped result
#' of a trajectory inference by the dynverse/dynwrap library
#' @return Trajectory object
Trajectory <- function(input) {
# Create the adjacency matrix
network <- as.data.frame(input$milestone_network)
milestone_ids <- union(unique(network$from), unique(network$to))
adjMat <- matrix(0.0, nrow = length(milestone_ids), ncol = length(milestone_ids),
dimnames = list(milestone_ids, milestone_ids))
for (i in seq(nrow(network))){
from <- as.character(network[i, "from"])
to <- as.character(network[i, "to"])
edgeLength <- network[i, "length"]
adjMat[from, to] <- edgeLength
adjMat[to, from] <- edgeLength
}
progressions <- as.data.frame(input$progressions)
rownames(progressions) <- progressions$cell_id
progressions$cell_id <- NULL
colnames(progressions) <- gsub("percentage", "position",
colnames(progressions))
if (!("from" %in% colnames(progressions))){
stop("input Trajectory missing 'from' column")
}
if (!("to" %in% colnames(progressions))){
stop("input Trajectory missing 'to' column")
}
if (length(setdiff(unique(progressions$from), milestone_ids)) > 0) {
stop("milestones in progressions$from don't match those in milestone_network")
}
if (length(setdiff(unique(progressions$to), milestone_ids)) > 0) {
stop("milestones in progressions$to don't match those in milestone_network")
}
.Object <- new("Trajectory", adjMat = adjMat, progressions = progressions)
return(.Object)
}
#' Compute KNN weights based on geodesic distances for Trajectory objects
#' @importFrom stats quantile
#' @importFrom Matrix rowSums
#' @importFrom Matrix sparseMatrix
#' @importFrom matrixStats rowMaxs
#' @param object a Trajectory object
#' @param K Number of neighbors to consider.
#' @return a list of two items:
#' indices: matrix, cells X neighbors
#' Each row specifies indices of nearest neighbors
#' weights: matrix, cells X neighbors
#' Corresponding weights to nearest neighbors
setMethod("computeKNNWeights", signature(object = "Trajectory"),
function(object, K = round(sqrt(nrow(object)))) {
edgePos <- object@progressions$position
names(edgePos) <- rownames(object@progressions)
edgeAssoc <- t(object@progressions[, c("from", "to")])
colnames(edgeAssoc) <- rownames(object@progressions)
distmat <- calculateTrajectoryDistances(adjMat = object@adjMat,
edgeAssoc = edgeAssoc,
edgePos = edgePos)
kQuantile <- K / nrow(object@progressions)
knnmat <- apply(distmat, 1, function(d) {
partition <- quantile(d, kQuantile, na.rm=T)
d[d > partition] <- Inf
return(d)
})
nn <- t(apply(distmat, 1, function(r) {
order(r)[1:K]
}))
d <- lapply(seq(nrow(nn)), function(i) {
distmat[i, nn[i, ]]
})
d <- do.call(rbind, d)
d[is.na(d)] = 0
sigma <- rowMaxs(d)
sigma[sigma == 0] <- 1.0 # occurs if all nearest neighbors at same point
sparse_weights <- exp(-1 * (d * d) / sigma ^ 2)
# Normalize row sums = 1
weightsNormFactor <- rowSums(sparse_weights)
weightsNormFactor[weightsNormFactor == 0] <- 1.0
sparse_weights <- sparse_weights / weightsNormFactor
rownames(nn) <- rownames(object)
rownames(d) <- rownames(object)
return(list(indices = nn, weights = sparse_weights))
})
#' Generate meta-data associated with this trajectory
#'
#' Creates a categorical variable mapping cells to edges
#' and numeric variables for their position along edges
#'
#' @importFrom igraph graph_from_adjacency_matrix
#' @importFrom igraph ends
#' @importFrom igraph E
#'
#' @param trajectory Trajectory on which to operate
#' @return metaData dataframe with meta-data
createTrajectoryMetaData <- function(trajectory){
adjMat <- trajectory@adjMat
progressions <- trajectory@progressions
net <- igraph::graph_from_adjacency_matrix(adjMat, weighted = TRUE,
mode = "undirected")
edges <- igraph::ends(net, igraph::E(net), names = TRUE)
meta <- data.frame(row.names = rownames(progressions))
meta[, "TrajectoryEdge"] <- ""
for (i in seq(nrow(edges))) {
from <- edges[i, 1]
to <- edges[i, 2]
cells <- progressions[
(progressions$from == from) & (progressions$to == to),
, drop = FALSE
]
cells_i <- progressions[
(progressions$from == to) & (progressions$to == from),
, drop = FALSE
]
cells_i$position <- 1 - cells_i$position
cells <- rbind(cells, cells_i)
edge_name <- paste(from, to, sep = "->")
position_var <- paste0("Position: ", edge_name)
meta[rownames(cells), "TrajectoryEdge"] <- edge_name
meta[, position_var] <- NA
meta[rownames(cells), position_var] <- cells$position
}
meta$TrajectoryEdge <- as.factor(meta$TrajectoryEdge)
return(meta)
}
#' Generate 2d representations of a trajectory model
#'
#' Creates 2d layouts of the milestone network and translates the
#' cell positions into these layouts
#'
#' @importFrom igraph graph_from_adjacency_matrix
#' @importFrom igraph ends
#' @importFrom igraph layout_with_fr
#' @importFrom igraph layout_with_dh
#' @importFrom igraph layout_as_tree
#' @importFrom igraph layout_with_mds
#' @importFrom igraph E
#' @importFrom igraph V
#'
#' @param trajectory Trajectory on which to operate
#' @return trajectoryProjections list of TrajectoryProjection
generateTrajectoryProjections <- function(trajectory) {
progressions <- trajectory@progressions
adjMat <- trajectory@adjMat
net <- igraph::graph_from_adjacency_matrix(adjMat, weighted = TRUE,
mode = "undirected")
adjMatInv <- adjMat ** -1
adjMatInv[is.infinite(adjMatInv)] <- 0
# some algorithms use weights to represent 'inverse' distance
invnet <- igraph::graph_from_adjacency_matrix(adjMatInv, weighted = TRUE,
mode = "undirected")
edges <- igraph::ends(net, igraph::E(net), names = TRUE)
adjMatBinary <- (adjMat > 0) * 1
tp_list <- list()
# layout with Fruchterman-Reingold algorithm
vData <- igraph::layout_with_fr(invnet)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "FR", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
# layout with Davidson-Harel
vData <- igraph::layout_with_dh(net)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "DH", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
# layout with Davidson-Harel
vData <- igraph::layout_as_tree(net)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "Tree", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
# layout with MDS
vData <- igraph::layout_with_mds(net)
rownames(vData) <- igraph::V(net)$name
pData <- translateCellPositions(progressions, vData, edges)
tp <- TrajectoryProjection(name = "MDS", pData = pData, vData = vData,
adjMat = adjMatBinary)
tp_list <- c(tp_list, tp)
names(tp_list) <- vapply(tp_list, function(x) x@name, FUN.VALUE = "")
return(tp_list)
}
#' Translate cell positions
#'
#' Maps cell positions between edges
#'
#' @importFrom stats rnorm
#' @param progressions data.frame describing cell positions between milestones
#' @param vData Mx2 matrix mapping miletones into 2d
#' @param edges Edges x 2 matrix describing connectivity between edges
#' @return pData Cells x 2 matrix with cell positions in 2d
translateCellPositions <- function(progressions, vData, edges) {
pData <- lapply(seq(nrow(edges)), function(i) {
from <- edges[i, 1]
to <- edges[i, 2]
edge_dist <- sum( (vData[from, ] - vData[to, ]) ** 2) ** .5
cells <- progressions[
(progressions$from == from) & (progressions$to == to),
, drop = FALSE
]
cells_i <- progressions[
(progressions$from == to) & (progressions$to == from),
, drop = FALSE
]
cells_i$position <- 1 - cells_i$position
cells <- rbind(cells, cells_i)
coordinates <- matrix(numeric(nrow(cells) * 2),
nrow = nrow(cells), ncol = 2,
dimnames = list(rownames(cells), c("x", "y")))
coordinates[, "x"] <- cells$position * edge_dist
coordinates[, "y"] <- rnorm(nrow(coordinates), sd = .1)
dx <- vData[to, 1] - vData[from, 1]
dy <- vData[to, 2] - vData[from, 2]
sinTheta <- dy / edge_dist
cosTheta <- dx / edge_dist
R <- matrix(c(cosTheta, sinTheta, -1 * sinTheta, cosTheta), nrow = 2)
coordinates <- coordinates %*% t(R) # rotation
coordinates <- t(t(coordinates) + vData[from, ]) # offset
return(coordinates)
})
pData <- do.call(rbind, pData)
pData <- pData[rownames(progressions), , drop = FALSE]
return(pData)
}
|
# 날짜 : 2021/11/16
# 이름 : 박시현
# 내용 : R 상관관계 분석 실습
install.packages('corrplot')
library(corrplot)
data(iris)
df_iris <- iris
View(df_iris)
# 상관계수 확인
cor(df_iris$Sepal.Length, df_iris$Sepal.Width)
cor(df_iris$Petal.Length, df_iris$Petal.Width)
cor(df_iris$Petal.Length, df_iris$Petal.Width)
# 상관행렬
corr_iris <- cor(df_iris[, 1:4], method = 'pearson')
corr_iris
# 시각화
corrplot(corr_iris) # 원이 진하고 커질수록 상관도가 높음
corrplot(corr_iris, type='lower') # 원이 진하고 커질수록 상관도가 높음
corrplot(corr_iris, method='number') # 0.2이하일때 상관관계가 없음
################################################################################
# product 상관분석
df_product <- read.csv('../file/product.csv')
df_product
# 변수간 상관계수 확인
cor(df_product$제품_친밀도, df_product$제품_적절성)
cor(df_product$제품_친밀도, df_product$제품_만족도)
cor(df_product$제품_적절성, df_product$제품_만족도)
# 상관행렬 확인
corr_product <- cor(df_product, method = 'pearson')
corr_product
# 시각화
corrplot(corr_product)
corrplot(corr_product, type='lower')
corrplot(corr_product, method = 'number')
| /Ch10/2_상관관계 분석 실습.R | no_license | SihyunPark01/R | R | false | false | 1,259 | r |
# 날짜 : 2021/11/16
# 이름 : 박시현
# 내용 : R 상관관계 분석 실습
install.packages('corrplot')
library(corrplot)
data(iris)
df_iris <- iris
View(df_iris)
# 상관계수 확인
cor(df_iris$Sepal.Length, df_iris$Sepal.Width)
cor(df_iris$Petal.Length, df_iris$Petal.Width)
cor(df_iris$Petal.Length, df_iris$Petal.Width)
# 상관행렬
corr_iris <- cor(df_iris[, 1:4], method = 'pearson')
corr_iris
# 시각화
corrplot(corr_iris) # 원이 진하고 커질수록 상관도가 높음
corrplot(corr_iris, type='lower') # 원이 진하고 커질수록 상관도가 높음
corrplot(corr_iris, method='number') # 0.2이하일때 상관관계가 없음
################################################################################
# product 상관분석
df_product <- read.csv('../file/product.csv')
df_product
# 변수간 상관계수 확인
cor(df_product$제품_친밀도, df_product$제품_적절성)
cor(df_product$제품_친밀도, df_product$제품_만족도)
cor(df_product$제품_적절성, df_product$제품_만족도)
# 상관행렬 확인
corr_product <- cor(df_product, method = 'pearson')
corr_product
# 시각화
corrplot(corr_product)
corrplot(corr_product, type='lower')
corrplot(corr_product, method = 'number')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stan_confidenceRegion.R
\name{stan_confidenceRegion}
\alias{stan_confidenceRegion}
\title{Extract functions of multiple variables from a stanfit object}
\usage{
stan_confidenceRegion(stanfit, parstrings, prefuncstring = "(",
joinfuncstring = " + ", postfuncstring = ")")
}
\arguments{
\item{stanfit}{object of class stanfit.}
\item{parstrings}{vector of strings containing partial (or full) matches of parameter names.
When more than one string is passed, functions are computed based on the combination of the first match for
each string, then the second match for each string, etc. The first match of the first string is only ever combined
with the first match of the second, similarly for the 2nd match, etc.}
\item{prefuncstring}{string containing front element of function. E.g., 'exp(' for an exponential region.}
\item{joinfuncstring}{string used to join the (possibly) multiple parameters involved.}
\item{postfuncstring}{string containing end element of function. E.g., ') *2' to multiply the result by 2.}
}
\value{
matrix of values of the specified interactions at each iteration.
}
\description{
Can be useful for determining quantiles or plotting multidimensional regions --
for instance in case of colinearity of predictors.
}
\examples{
temp<-stan_confidenceRegion(stanfit=ctstantestfit$stanfit,
parstrings=c('drift_eta1_eta2','drift_eta2_eta1'))
t(apply(temp,2,quantile))
}
| /man/stan_confidenceRegion.Rd | no_license | karchjd/ctsem | R | false | true | 1,478 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stan_confidenceRegion.R
\name{stan_confidenceRegion}
\alias{stan_confidenceRegion}
\title{Extract functions of multiple variables from a stanfit object}
\usage{
stan_confidenceRegion(stanfit, parstrings, prefuncstring = "(",
joinfuncstring = " + ", postfuncstring = ")")
}
\arguments{
\item{stanfit}{object of class stanfit.}
\item{parstrings}{vector of strings containing partial (or full) matches of parameter names.
When more than one string is passed, functions are computed based on the combination of the first match for
each string, then the second match for each string, etc. The first match of the first string is only ever combined
with the first match of the second, similarly for the 2nd match, etc.}
\item{prefuncstring}{string containing front element of function. E.g., 'exp(' for an exponential region.}
\item{joinfuncstring}{string used to join the (possibly) multiple parameters involved.}
\item{postfuncstring}{string containing end element of function. E.g., ') *2' to multiply the result by 2.}
}
\value{
matrix of values of the specified interactions at each iteration.
}
\description{
Can be useful for determining quantiles or plotting multidimensional regions --
for instance in case of colinearity of predictors.
}
\examples{
temp<-stan_confidenceRegion(stanfit=ctstantestfit$stanfit,
parstrings=c('drift_eta1_eta2','drift_eta2_eta1'))
t(apply(temp,2,quantile))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textFunction.R
\name{splitData}
\alias{splitData}
\title{Elongate data.frame with column split by comma}
\usage{
splitData(df, colname)
}
\arguments{
\item{df}{a data.frame}
\item{colname}{column name}
}
\value{
An elongated data.frame
}
\description{
Elongate data.frame with column split by comma
}
| /man/splitData.Rd | no_license | hsh2001/ggplotAssist | R | false | true | 380 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textFunction.R
\name{splitData}
\alias{splitData}
\title{Elongate data.frame with column split by comma}
\usage{
splitData(df, colname)
}
\arguments{
\item{df}{a data.frame}
\item{colname}{column name}
}
\value{
An elongated data.frame
}
\description{
Elongate data.frame with column split by comma
}
|
# Date: July 31st, 2018
# Processes Well Soft data :
# - AUTOMATICALLY SORT COLUMNS from heuristics into empty columns, all zero and dates
# - CLEAN ERRONEOUS INPUT based on patterns noticed by inspection
# - PROCESS DATE COLUMNS where date columns are automatically derived and assembled from inspection
# - MANUALLY CLEAN REMAINING VARIABLES FROM INSPECTION
#
# Sort columns into factors, natural language, blobs, numeric, dates, practically empty, entirely empty,
# sensitive personal health information
#
# Input: raw_wellSoft.csv
# Output:
# - saves cleaned_wellSoft.csv
# - returns cleaned wellSoft data
cleanWellSoft <- function(data, file.path) {
print(dim(data))
# replace white space with NA
print("Replacing white spaces")
data <- data[ , lapply(.SD, function(x) stri_replace_all_regex(x, "^$|^ $|^Na$", NA))]
# ================================= CLEAN ERRONEOUS INPUT ================================= #
all.columns <- colnames(data)
cat("\nProcessing errors\n")
pattern <- "\\\005\\(\\(\\(\\(|\\\005\\.\\(\\(\\(|\\\0051\\(\\(\\(|\\\005\\,\\(\\(\\(|\\\005\\)\\(\\(\\(|\\\005\\)\\(\\(\\(|\\\005\\-\\(\\(\\(|\\\005\\=\\(\\(\\(|\\\005\\*\\(\\(\\(|\\\0058\\(\\(\\("
#pattern <- "\\\005\(\(\(\(|\\\005\.\(\(\(|\\0051\(\(\(|\\\005\\,\(\(\(|\\\005\)\(\(\(|\\\005\)\(\(\(|\\\005\\-\(\(\(|\\\005\=\(\(\(|\\005\*\(\(\(|\\0058\(\(\("
pattern.matched <- sapply(data[,c(all.columns), with=FALSE], function(y) any(grep(pattern, y)))
sum(pattern.matched=='TRUE')
pattern.matched <- names(pattern.matched[pattern.matched=='TRUE'])
i <- 1
for (name in pattern.matched) {
print(paste0(i, ": ", name))
eval(parse(text=paste0('data$', name, ' <- gsub(pattern, "", data$', name, ')')))
i <- i + 1
}
# ================================= MANUALLY CLEAN REMAINING VARIABLES FROM INSPECTION ================================= #
print("Manually Cleaning Variables")
data$Ctas_Index_11 <- gsub("N/A", "", data$Ctas_Index_11)
data$T2_Priority_12 <- gsub("Team Triage", "Triage Team", data$T2_Priority_12)
data$Method_Of_Arrival_Indexed_S_33 <- gsub("Xxxx", "", data$Method_Of_Arrival_Indexed_S_33)
data$Method_Of_Arrival_321 <- gsub(".*Land Ambulance.*", "Land Ambulance", data$Method_Of_Arrival_321)
data$Method_Of_Arrival_321 <- gsub("Air Ambulance.*", "Air Ambulance", data$Method_Of_Arrival_321)
data$Method_Of_Arrival_321 <- gsub(".*Ambulatory.*", "Ambulatory", data$Method_Of_Arrival_321)
data$Method_Of_Arrival_321 <- gsub(".*DTS.*", "DTS", data$Method_Of_Arrival_321)
data$Alias_38 <- gsub("\\\0059\\(\\(\\(", "", data$Alias_38)
data$Sex_41 <- gsub("m", "M", data$Sex_41)
data$Sex_41 <- gsub("0", "", data$Sex_41)
data$Hc_Issuing_Prov_70 <- gsub("XX", "", data$Hc_Issuing_Prov_70)
data$Reg_Status_75 <- gsub("XXXX", "", data$Reg_Status_75)
data$Permanent_Address_77 <- gsub("\\`", "", data$Permanent_Address_77)
data$Relationship_93 <- gsub("Not Available", "", data$Relationship_93)
data$Relationship_93 <- gsub("Grandmothe|Grandmom|G\\'Mother", "Grandmother", data$Relationship_93)
data$Relationship_93 <- gsub("^Motherr$|Mother\\,|Motherwr|Motherr\\,|Motherr|Mothrr|Mother\\,|Mothewr|Mother1|Motheran|Motehr|Mothedr|Mom|Mtoher|Mother Mo|Mother\\`|Motherd|Mothe|Mtother|Mohter", "Mother", data$Relationship_93)
data$Relationship_93 <- gsub("Dad|Fahter", "Father", data$Relationship_93)
data$Relationship_93 <- gsub("\\`Other", "Other", data$Relationship_93)
data$Relationship_93 <- gsub("Foster Mot", "Fostermom", data$Relationship_93)
data$Relationship_93 <- gsub("Legal\\ Guardian", "Guardian", data$Relationship_93)
data$Relationship_93 <- gsub("Unknown", "", data$Relationship_93)
data$Emp_Status_110 <- gsub("^Grandmotherr$|Grandmnother|Maternal Gm|G\\-Mother|Grand Mother|Grandmohter|Nana|Grannie|Paternal Gm|G\\'Mother|G Mother|Granmother|Paternal Grandm|Grandma|Grandmom|Garndmother|Grandmothe", "Grandmother", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("Foster$", "Foster Parent", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("G\\'Parents|Gparents|Grandparents|Grandparen$", "Grandparent", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("Grandpa|Grand\\-Dad|Grandfathe|Granddad|Grandad", "Grandfather", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("^Grandmotherr$", "Grandmother", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("^Not Applicable$|^Not Available$|N/A", "", data$Emp_Status_110)
data$Relationship_120 <- gsub("Faather|Fahter|Faher|Fatherr|Fatherf|Fther|Dad|Ather|Father\\`|Fatherq|Fathe|Fahter|Father\\,", "Father", data$Relationship_120)
data$Relationship_120 <- gsub("C\\.C\\.A\\.S\\.|Cas Worker|Cas", "C.A.S.", data$Relationship_120)
data$Relationship_120 <- gsub("G\\'Mother|G\\-Mother|Grandmothe|Grandmom", "Grandmother", data$Relationship_120)
data$Relationship_120 <- gsub("Grandpa|Granddad|Grandfathe", "Grandfather", data$Relationship_120)
data$Relationship_120 <- gsub("Unckle", "Uncle", data$Relationship_120)
data$Relationship_120 <- gsub("Applicable|Applic", "Available", data$Relationship_120)
data$Relationship_120 <- gsub("Step Fatherr|Stepdad|Stepfather", "Step Father", data$Relationship_120)
data$Relationship_120 <- gsub("Grandfatherrent", "Grandfather", data$Relationship_120)
data$Relationship_120 <- gsub("Fatherr", "Father", data$Relationship_120)
data$Relationship_120 <- gsub("Stepmom|Stepmother", "Step Mother", data$Relationship_120)
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(H|h)ospital|Hospita|(H|h)osp|Urgent Care|General|Gen|general", y), "Hospital", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("^(D|d)r|(D|d)octor|(O|o)ffice|Family|Medical Centre|^[0-9]+", y), "Doctor", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(E|e)merg|ER|Emergency", y), "Emerg", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(C|c)linic", y), "Clinic", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(H|h)ealth (C|c)entre|health center", y), "Health Centre", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("Dentist|Dental", y), "Dentist", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(H|h)ome", y), "Home", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("Bloorveiw|Bloorview|bloorview|BLOORVIEW|(R|r)ehab", y), "Rehab", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(A|a)fter (H|h)our|after hour", y), "After Hours", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(C|c)redit (V|v)alley|Creit Valley|Credit Vallley", y), "Hospital", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("Danforth", y), "Doctor", y))) # Danforth Paediatrics is a family doctor
unique.locations <- names(which(table(data$Referring_Location_148) == 1))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(y %in% unique.locations, "Other", y)))
data$Admit_Service_186 <- gsub("\\\0053\\(\\(\\(|\\\0056\\(\\(\\(", "", data$Admit_Service_186)
data$R_A_Override_Minutes_295 <- as.numeric(as.character(data$R_A_Override_Minutes_295))
data$Ctas_326 <- gsub("\\\005(\\:|\\+|\\;)\\(\\(\\(|\\\005A\\(\\(\\(","", data$Ctas_326)
data$Ctas_326 <- gsub("N\\/A|NA","", data$Ctas_326)
data$Potential_Study_343 <- gsub("\\?rivr|RIVR\\ \\?|\\?\\ rivr\\ study|rivr\\ study", "RIVR", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("Bronch\\ Study\\?\\?|Bronchiolitis\\ study\\,\\ Richard\\ Paged|\\?bronch\\ study", "Bronchiolitis", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("flugene study", "flugene", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("WR\\-N|wr\\ n", "WR\\ N", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("^Wr$|wr", "WR", data$Potential_Study_343)
data$Pt_Weight_350 <- gsub("kg", "", data$Pt_Weight_350)
data$Pt_Weight_350 <- as.numeric(data$Pt_Weight_350)
data$Triage_Intervention_354 <- gsub("\\\0057\\(\\(\\(","", data$Triage_Intervention_354)
data$Isolation_363 <- gsub("\\\005(\\+|\\:|A|3)\\(\\(\\(", "", data$Isolation_363)
data$Rn_Tmplt_Athr_376 <- gsub("Sick\\ Kids", "SickKids", data$Rn_Tmplt_Athr_376)
data$Private_Cc_377 <- gsub("Xxxxxxx|Xxxxxxx\\;", "", data$Private_Cc_377)
data$Cpso_385 <- as.factor(data$Cpso_385)
data$H_P_Template_402 <- gsub("neonate", "Neonate", data$H_P_Template_402)
data$H_P_Template_402 <- gsub("L\\,\\ ", "", data$H_P_Template_402)
data$Billing_Status_431 <- gsub("\\\005(\\+|7)\\(\\(\\(|\\ \\-\\ (TRENT|KATHLEEN)", "", data$Billing_Status_431)
data$Billing_Status_431 <- gsub("Complete", "complete", data$Billing_Status_431)
data$Billing_Status_431 <- gsub("Billing\\ Clerk", "billing\\ clerk", data$Billing_Status_431)
data$Disposition_480 <- gsub("home", "Home", data$Disposition_480)
data$Condition_At_Disposition_481 <- gsub("GOod.*|.*Good.*|.*good.*|^ood|^god$|^(G|g)oo$|^g$|GOOD.*|^G$", "Good", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub("Fair.*|^f$|fair.*|FAIR.*", "Fair", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub(".*Stable.*|.*stable.*|Satble", "Stable", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub(".*Well.*|.*well.*", "Well", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub("Ok|ok", "OK", data$Condition_At_Disposition_481)
data$Bed_Type_565 <- gsub("\\\0054\\(\\(\\(", "", data$Bed_Type_565)
data$Bed_Type_565 <- gsub("yes|^y$", "Yes", data$Bed_Type_565)
data$Bed_Type_565 <- gsub("^n$|no", "No", data$Bed_Type_565)
data$Hc_Swipe_573 <- gsub("\\`\\`\\`", "", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub("\\`", "", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub("0", "", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub(".*Swipe", "Swipe", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub(".*Scan", "Scan", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub("^[Swipe|Scan]", "", data$Hc_Swipe_573)
data$Arrival_Fda_627 <- gsub("\\\005\\/\\(\\(\\(|\\\0057\\(\\(\\(", "", data$Arrival_Fda_627)
data$Language_56 <- gsub("^Amachric$|^Amaric$|Ethopian|^Amharic/English$|Amhric|^Ahmaric$|^Ethiopian$|^Aderic/English$|^Amrrk$",
"Amharic", data$Language_56)
data$Language_56 <- gsub("^Alabian$|^Albainian$|Albamian|^Albania$|Albanian|^Albaninan$|^Albanion$|^Albian$|^Albina$|^Labanian$",
"Albanian", data$Language_56)
data$Language_56 <- gsub("^Armanian$|^Armanian/Russian$|Armeian|^Armenian$|Armian|^Arminaian$",
"Armenian", data$Language_56)
data$Language_56 <- gsub("^Aramic$",
"Arabic", data$Language_56)
data$Language_56 <- gsub("^Asirian$|^Asserion$|^Assryain$|^Assryan$|Assryial|^Assyarn$|^Assryial$|^Assyarn$|^Assyrian$",
"Assyrian", data$Language_56)
data$Language_56 <- gsub("^Bangalalesh$|^Bangali$|^Bangely$|^Bangla$|Bangladash|^Bangladeshi$|^Bangladi$|^Bangli$|^Bangoli$|^Bengala$|^Bengali$|^Bangoli$|^Bengeli$|^Bengla$|^Bengli$|^Bengoli$|^Bengoly$|^Bengali/Some English$|^Bingoli$",
"Bengali", data$Language_56)
data$Language_56 <- gsub("^Bosnian$|^Boznian$",
"Bosnian", data$Language_56)
data$Language_56 <- gsub("^Corean$",
"Korean", data$Language_56)
data$Language_56 <- gsub("^Cough/Congestion$",
"", data$Language_56)
data$Language_56 <- gsub("^Creatian$|^Croatia$|^Croatian$|^Croation$|^Creatia$",
"Croatian", data$Language_56)
data$Language_56 <- gsub("^Dahri$|^Dari$|^Dira$",
"Dari", data$Language_56)
data$Language_56 <- gsub("^E$",
"English", data$Language_56)
data$Language_56 <- gsub("^Dagalog/ And English$",
"Dagalog", data$Language_56)
data$Language_56 <- gsub("^English/Portuguese$",
"Portuguese", data$Language_56)
data$Language_56 <- gsub("^English/Spanish$",
"Spanish", data$Language_56)
data$Language_56 <- gsub("^English/Chinese$",
"Chinese", data$Language_56)
data$Language_56 <- gsub("^French/Russian$",
"French", data$Language_56)
data$Language_56 <- gsub("^Gana$",
"Twi", data$Language_56)
data$Language_56 <- gsub("^Gejarti$|^Jujarati$|^Gudjarti$|^Gugarat$|^Gugarati$|^Gujarati$|^Gujerati$|^Gujraki$|^Gujrathi$|^Gujrati$|^Gurati$|^Gujrathi$|^Gujrati$|^Gurati$|^Gusurati$|^Kujerati$|^Kurshrati$",
"Gujarati", data$Language_56)
data$Language_56 <- gsub("^Haka$",
"Hakka", data$Language_56)
data$Language_56 <- gsub("^Haraic$|^Harar$|^Harari$|^Hararic$|^Hararie$|^Harie$|^Hariha$|^Harry$",
"Harar", data$Language_56)
data$Language_56 <- gsub("^Hearing Inpaired$",
"Hearing Impaired", data$Language_56)
data$Language_56 <- gsub("^Hindu,Panjabi$|^Hiri$|^Sindhi$",
"Hindi", data$Language_56)
data$Language_56 <- gsub("^Kinyarwa$|^Kinyarwanda$|^Kinyrwanda$",
"Kinyarwanda", data$Language_56)
data$Language_56 <- gsub("^Kiswahili$|^Swaheli$|^Swahilee$",
"Swahili", data$Language_56)
data$Language_56 <- gsub("^Kurdish/Farsi$",
"Kurdish", data$Language_56)
data$Language_56 <- gsub("^Lebonise$",
"Lebonese", data$Language_56)
data$Language_56 <- gsub("^Lithiwanian$|^Lithuania$|^Lithuaniain$|^Lithuanian$|^Lithunian$|^Lithunrian$",
"Lithuanian", data$Language_56)
data$Language_56 <- gsub("^Macedonean$|^Macedonia$|^Macedonian$|^Masedonian$|^Masadonian$|^Masedonian$|^Masidonian$|^Masodonian$|^Masadoian$",
"Macedonian", data$Language_56)
data$Language_56 <- gsub("^Malayalam$|^Malayalan$|^Malayam$|^Malyalam$",
"Malayalam", data$Language_56)
data$Language_56 <- gsub("^Mandarin$|^Mandarin/English$|^Manderine$|^Malyalam$",
"Mandarin", data$Language_56)
data$Language_56 <- gsub("^Marthi$",
"Marathi", data$Language_56)
data$Language_56 <- gsub("^Mongol$|^Mongolia$|^Mongolian$",
"Mongolian", data$Language_56)
data$Language_56 <- gsub("^Myanmavnese$|^Myanmer$",
"Burmese", data$Language_56)
data$Language_56 <- gsub("^Napali$|^Napaly$|^Nepal$|^Nepalese$|^Nipali$|^Nupali$",
"Napali", data$Language_56)
data$Language_56 <- gsub("^Ojibway$",
"Ojibwe", data$Language_56)
data$Language_56 <- gsub("^Ordu$",
"Urdu", data$Language_56)
data$Language_56 <- gsub("^Orumo$",
"Oromo", data$Language_56)
data$Language_56 <- gsub("^Oyiherero$",
"Otjiherero", data$Language_56)
data$Language_56 <- gsub("^Pashato$|^Pashda$|^Pashtoo$|^Pashtu$|^Pastu$|^Pershdu$|^Pershta$|^Pshto$|^Pushto$|^Purshdu$|^Poshto$",
"Pashto", data$Language_56)
data$Language_56 <- gsub("^Persain$|^Persan$|^Persian/Dari$|^Persian/French$|^Persion$|^Pirson$",
"Persian", data$Language_56)
data$Language_56 <- gsub("^Philipino$|^Philippino$|^Phillipino$|^Philopeno$|^Philopino$",
"Filipino", data$Language_56)
data$Language_56 <- gsub("^Portuguese/English$",
"Portuguese", data$Language_56)
data$Language_56 <- gsub("^Polish/English$",
"Polish", data$Language_56)
data$Language_56 <- gsub("^Segrenia$|^Serb-Croatia$|^Serb/Croatian$|^Serb/Croation$|^Serbia$|^Serbien$|^Serbin$|^Serian$",
"Serbian", data$Language_56)
data$Language_56 <- gsub("^Botswana$",
"Setswana", data$Language_56)
data$Language_56 <- gsub("^Shana$",
"Shona", data$Language_56)
data$Language_56 <- gsub("^Sign$|^Asl$",
"Sign Language", data$Language_56)
data$Language_56 <- gsub("^Sihalies$|^Singalese$|^Sinhalese$|^Sinhalasese$|^Sinhalese$|^Sinhaless$|^Sinhalish$|^Sinhanles$|^Srilaken$",
"Sinhala", data$Language_56)
data$Language_56 <- gsub("^Ska$|^Skova$|^Slovaka$|^Slovakia$|^Slovakian$|^Slovac$",
"Slovak", data$Language_56)
data$Language_56 <- gsub("^Somali/English$",
"Somali", data$Language_56)
data$Language_56 <- gsub("^Tagalag$|^Tagalio$|^Tagallo$|^Tagalo$|^Tagalo/English$|^Taglog$|^Tagolog$|^Talalog$|^Talgalog$|^Tigalog$|^Tagalog$|^Talogh$|^Tegalo$|^Tegelo$|^Tigalo$|^Thigalo$|^Tangalo$",
"Filipino", data$Language_56)
data$Language_56 <- gsub("^Tebrena$|^Tegrigna$|^Tegrina$|^Tegrino$|^Tgirinia$|^Tgrinia$|^Tigera$|^Tigeran$|^Tighna$|^Tighrina$|^Tigisty$|^Tigma$|^Tigrana$|^Tigrani$|^Tigre$|^Tigregna$",
"Tigrigna", data$Language_56)
data$Language_56 <- gsub("^Tigregne$|^Tigrena$|^Tigrenia$|^Tigrgina$|^Tigrgna$|^Tigri$|^Tigria$|^Tigrian$|^Tigrigna$|^Tigrignha$|^Tigrina$",
"Tigrigna", data$Language_56)
data$Language_56 <- gsub("^Tigrigna$|^Tigrinea$|^Tigrna$|^Tigrnea$|^Tirgcha$|^Tirgiary$|^Tirnayia$|^Triring$|^Tugrnia$|^Tigrigna$|^Tigring$|^Tigrinia$|^Tigrinya$",
"Tigrigna", data$Language_56)
data$Language_56 <- gsub("^Trgir$|^Tirge$",
"Tigre", data$Language_56)
data$Language_56 <- gsub("^Talugu$|^Telegu$|^Telugiu$",
"Telugu", data$Language_56)
data$Language_56 <- gsub("^Teibetan$|^Tibetian$|^Tibetin$|^Tibian$",
"Tibetan", data$Language_56)
data$Language_56 <- gsub("^Turkis$",
"Turkish", data$Language_56)
data$Language_56 <- gsub("^Teranian$",
"Iranian", data$Language_56)
data$Language_56 <- gsub("^Yoruda$",
"Yoruba", data$Language_56)
data$Language_56 <- gsub("^Varsi$",
"Farsi", data$Language_56)
data$Language_56 <- gsub("^Uzbec$|^Uzbeki$",
"Uzbek", data$Language_56)
data$Language_56 <- gsub("^Shangu$",
"Sangu", data$Language_56)
# numerics into numerics
print("Convert numeric columns into numbers")
# numeric (or need to be converted) from manual inspection
dataNumeric <- c("Override_Et_Status_Cascadi_261",
"R_A_Override_Minutes_295",
"Pt_Weight_350")
for (fac in dataNumeric) {
print(fac)
eval(parse(text=paste0("data$", fac, "<- as.numeric(as.character(data$", fac, "))")))
}
# ================================= AUTOMATICALLY SORT COLUMNS ================================= #
print("Examining Missingness")
# check % of missingness in columns
na_count <-sapply(data, function(y) round((sum(length(which(is.na(y))))/nrow(data))*100, 5))
na_count <- data.frame(na_count);
na_count$colName <- rownames(na_count)
rownames(na_count) <- NULL
print(paste("There are", nrow(na_count %>% dplyr::filter(na_count == 100)), "columns with 100% missingness"))
print(paste("There are", nrow(na_count %>% dplyr::filter(na_count == 0)), "columns with 0% missingness"))
all.zeros <- as.character(unlist((na_count %>% dplyr::filter(na_count == 100))$colName))
# check which columns are dates
print("Matching Dates")
date.pattern <-"^(19|(2(0|1)))([0-9]{6}|[0-9]{10}|[0-9]{12})((?!([0-9]+|\ |[A-Za-z])))"
possible_dates <- sapply(data , function(y) any(grep(date.pattern, y, perl=T)))
possible_dates <- names(possible_dates[possible_dates==TRUE])
print(length(possible_dates))
# From manual examinatin, exclude these values
possible_dates <- possible_dates[!possible_dates=='Pt_Accnt_5']
possible_dates <- possible_dates[!possible_dates=='Hsc_7']
possible_dates <- possible_dates[!possible_dates=='Ems_Id_32']
possible_dates <- possible_dates[!possible_dates=='Health_Card_68']
possible_dates <- possible_dates[!possible_dates=='Acct_Label_2_197']
possible_dates <- possible_dates[!possible_dates=='Acct_Label_3_198']
possible_dates <- possible_dates[!possible_dates=='Status_Admt_246']
possible_dates <- possible_dates[!possible_dates=='Meds_Review_Prntd_278']
possible_dates <- possible_dates[!possible_dates=='Pt_Lock_Boxed_D_T_282']
possible_dates <- possible_dates[!possible_dates=='Glass_Broken_D_T_283']
possible_dates <- possible_dates[!possible_dates=='Cpso_396']
possible_dates <- possible_dates[!possible_dates=='Lic_549']
possible_dates <- possible_dates[!possible_dates=='Post_It_Note_Arrival_612']
possible_dates <- possible_dates[!possible_dates=='Vs_Acknowledge_618']
possible_dates <- possible_dates[!possible_dates=='String_016_711']
possible_dates <- possible_dates[!possible_dates=='String_021_712']
possible_dates <- possible_dates[!possible_dates=='Emar_Trigger_745']
print("saving dates")
write.csv(possible_dates, "dates_colnames.csv")
# BLOB
print("Retrieving blob names")
blob.pattern <- "\\(BLOB\\)"
blob.pattern.matched <- sapply(data, function(y) any(grep(blob.pattern, y)))
sum(blob.pattern.matched=='TRUE')
BLOBS <- names(blob.pattern.matched[blob.pattern.matched=='TRUE'])
all.columns <- colnames(data)
all.columns <- all.columns[!all.columns %in% c(all.zeros, possible_dates, BLOBS)]
write.csv(all.columns, "other_colnames.csv")
# ================================= PROCESS DATE COLUMNS ================================= #
cat("\nProcessing dates\n")
j <- 1
length(possible_dates)
for (date.col in possible_dates) {
print(paste0(j, ": ", date.col))
proc.date <- as.character(unlist(data[,c(date.col), with=FALSE]))
proc.date <-as.character(sapply(proc.date, function(x) str_extract(x, date.pattern)))
print(paste("Before Processing: ", unique(na.exclude(proc.date)[1])))
if (nchar(unique(na.exclude(proc.date)[1]))==8) {
proc.date <- as.POSIXct(strptime(proc.date, format="%Y%m%d"), tz="EST")
} else if (nchar(unique(na.exclude(proc.date)[1]))==12) {
proc.date <- as.POSIXct(strptime(proc.date, format="%Y%m%d%H%M"), tz="EST")
} else {
paste("Not Valid String Format")
}
print(paste("After Processing: ", unique(na.exclude(proc.date)[1])))
eval(parse(text=paste0("data$", date.col, " <- proc.date")))
j <- j + 1
}
# save processed wellSoft data to file for reading in future
cat("\nWriting file\n") # remove BLOBS and empty columns when writing
print(dim(data[,c(all.columns, possible_dates), with=FALSE]))
fwrite(data[,c(all.columns, possible_dates), with=FALSE], paste0(file.path, "cleaned_wellSoft.csv"), dateTimeAs="write.csv")
cat("\nDone\n")
return(data)
}
| /wellSoft/cleanWellSoft.R | no_license | dhidru/ED_code | R | false | false | 24,066 | r | # Date: July 31st, 2018
# Processes Well Soft data :
# - AUTOMATICALLY SORT COLUMNS from heuristics into empty columns, all zero and dates
# - CLEAN ERRONEOUS INPUT based on patterns noticed by inspection
# - PROCESS DATE COLUMNS where date columns are automatically derived and assembled from inspection
# - MANUALLY CLEAN REMAINING VARIABLES FROM INSPECTION
#
# Sort columns into factors, natural language, blobs, numeric, dates, practically empty, entirely empty,
# sensitive personal health information
#
# Input: raw_wellSoft.csv
# Output:
# - saves cleaned_wellSoft.csv
# - returns cleaned wellSoft data
cleanWellSoft <- function(data, file.path) {
print(dim(data))
# replace white space with NA
print("Replacing white spaces")
data <- data[ , lapply(.SD, function(x) stri_replace_all_regex(x, "^$|^ $|^Na$", NA))]
# ================================= CLEAN ERRONEOUS INPUT ================================= #
all.columns <- colnames(data)
cat("\nProcessing errors\n")
pattern <- "\\\005\\(\\(\\(\\(|\\\005\\.\\(\\(\\(|\\\0051\\(\\(\\(|\\\005\\,\\(\\(\\(|\\\005\\)\\(\\(\\(|\\\005\\)\\(\\(\\(|\\\005\\-\\(\\(\\(|\\\005\\=\\(\\(\\(|\\\005\\*\\(\\(\\(|\\\0058\\(\\(\\("
#pattern <- "\\\005\(\(\(\(|\\\005\.\(\(\(|\\0051\(\(\(|\\\005\\,\(\(\(|\\\005\)\(\(\(|\\\005\)\(\(\(|\\\005\\-\(\(\(|\\\005\=\(\(\(|\\005\*\(\(\(|\\0058\(\(\("
pattern.matched <- sapply(data[,c(all.columns), with=FALSE], function(y) any(grep(pattern, y)))
sum(pattern.matched=='TRUE')
pattern.matched <- names(pattern.matched[pattern.matched=='TRUE'])
i <- 1
for (name in pattern.matched) {
print(paste0(i, ": ", name))
eval(parse(text=paste0('data$', name, ' <- gsub(pattern, "", data$', name, ')')))
i <- i + 1
}
# ================================= MANUALLY CLEAN REMAINING VARIABLES FROM INSPECTION ================================= #
print("Manually Cleaning Variables")
data$Ctas_Index_11 <- gsub("N/A", "", data$Ctas_Index_11)
data$T2_Priority_12 <- gsub("Team Triage", "Triage Team", data$T2_Priority_12)
data$Method_Of_Arrival_Indexed_S_33 <- gsub("Xxxx", "", data$Method_Of_Arrival_Indexed_S_33)
data$Method_Of_Arrival_321 <- gsub(".*Land Ambulance.*", "Land Ambulance", data$Method_Of_Arrival_321)
data$Method_Of_Arrival_321 <- gsub("Air Ambulance.*", "Air Ambulance", data$Method_Of_Arrival_321)
data$Method_Of_Arrival_321 <- gsub(".*Ambulatory.*", "Ambulatory", data$Method_Of_Arrival_321)
data$Method_Of_Arrival_321 <- gsub(".*DTS.*", "DTS", data$Method_Of_Arrival_321)
data$Alias_38 <- gsub("\\\0059\\(\\(\\(", "", data$Alias_38)
data$Sex_41 <- gsub("m", "M", data$Sex_41)
data$Sex_41 <- gsub("0", "", data$Sex_41)
data$Hc_Issuing_Prov_70 <- gsub("XX", "", data$Hc_Issuing_Prov_70)
data$Reg_Status_75 <- gsub("XXXX", "", data$Reg_Status_75)
data$Permanent_Address_77 <- gsub("\\`", "", data$Permanent_Address_77)
data$Relationship_93 <- gsub("Not Available", "", data$Relationship_93)
data$Relationship_93 <- gsub("Grandmothe|Grandmom|G\\'Mother", "Grandmother", data$Relationship_93)
data$Relationship_93 <- gsub("^Motherr$|Mother\\,|Motherwr|Motherr\\,|Motherr|Mothrr|Mother\\,|Mothewr|Mother1|Motheran|Motehr|Mothedr|Mom|Mtoher|Mother Mo|Mother\\`|Motherd|Mothe|Mtother|Mohter", "Mother", data$Relationship_93)
data$Relationship_93 <- gsub("Dad|Fahter", "Father", data$Relationship_93)
data$Relationship_93 <- gsub("\\`Other", "Other", data$Relationship_93)
data$Relationship_93 <- gsub("Foster Mot", "Fostermom", data$Relationship_93)
data$Relationship_93 <- gsub("Legal\\ Guardian", "Guardian", data$Relationship_93)
data$Relationship_93 <- gsub("Unknown", "", data$Relationship_93)
data$Emp_Status_110 <- gsub("^Grandmotherr$|Grandmnother|Maternal Gm|G\\-Mother|Grand Mother|Grandmohter|Nana|Grannie|Paternal Gm|G\\'Mother|G Mother|Granmother|Paternal Grandm|Grandma|Grandmom|Garndmother|Grandmothe", "Grandmother", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("Foster$", "Foster Parent", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("G\\'Parents|Gparents|Grandparents|Grandparen$", "Grandparent", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("Grandpa|Grand\\-Dad|Grandfathe|Granddad|Grandad", "Grandfather", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("^Grandmotherr$", "Grandmother", data$Emp_Status_110)
data$Emp_Status_110 <- gsub("^Not Applicable$|^Not Available$|N/A", "", data$Emp_Status_110)
data$Relationship_120 <- gsub("Faather|Fahter|Faher|Fatherr|Fatherf|Fther|Dad|Ather|Father\\`|Fatherq|Fathe|Fahter|Father\\,", "Father", data$Relationship_120)
data$Relationship_120 <- gsub("C\\.C\\.A\\.S\\.|Cas Worker|Cas", "C.A.S.", data$Relationship_120)
data$Relationship_120 <- gsub("G\\'Mother|G\\-Mother|Grandmothe|Grandmom", "Grandmother", data$Relationship_120)
data$Relationship_120 <- gsub("Grandpa|Granddad|Grandfathe", "Grandfather", data$Relationship_120)
data$Relationship_120 <- gsub("Unckle", "Uncle", data$Relationship_120)
data$Relationship_120 <- gsub("Applicable|Applic", "Available", data$Relationship_120)
data$Relationship_120 <- gsub("Step Fatherr|Stepdad|Stepfather", "Step Father", data$Relationship_120)
data$Relationship_120 <- gsub("Grandfatherrent", "Grandfather", data$Relationship_120)
data$Relationship_120 <- gsub("Fatherr", "Father", data$Relationship_120)
data$Relationship_120 <- gsub("Stepmom|Stepmother", "Step Mother", data$Relationship_120)
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(H|h)ospital|Hospita|(H|h)osp|Urgent Care|General|Gen|general", y), "Hospital", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("^(D|d)r|(D|d)octor|(O|o)ffice|Family|Medical Centre|^[0-9]+", y), "Doctor", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(E|e)merg|ER|Emergency", y), "Emerg", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(C|c)linic", y), "Clinic", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(H|h)ealth (C|c)entre|health center", y), "Health Centre", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("Dentist|Dental", y), "Dentist", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(H|h)ome", y), "Home", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("Bloorveiw|Bloorview|bloorview|BLOORVIEW|(R|r)ehab", y), "Rehab", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(A|a)fter (H|h)our|after hour", y), "After Hours", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("(C|c)redit (V|v)alley|Creit Valley|Credit Vallley", y), "Hospital", y)))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(grepl("Danforth", y), "Doctor", y))) # Danforth Paediatrics is a family doctor
unique.locations <- names(which(table(data$Referring_Location_148) == 1))
data$Referring_Location_148 <- as.vector(sapply(data$Referring_Location_148, function(y) ifelse(y %in% unique.locations, "Other", y)))
data$Admit_Service_186 <- gsub("\\\0053\\(\\(\\(|\\\0056\\(\\(\\(", "", data$Admit_Service_186)
data$R_A_Override_Minutes_295 <- as.numeric(as.character(data$R_A_Override_Minutes_295))
data$Ctas_326 <- gsub("\\\005(\\:|\\+|\\;)\\(\\(\\(|\\\005A\\(\\(\\(","", data$Ctas_326)
data$Ctas_326 <- gsub("N\\/A|NA","", data$Ctas_326)
data$Potential_Study_343 <- gsub("\\?rivr|RIVR\\ \\?|\\?\\ rivr\\ study|rivr\\ study", "RIVR", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("Bronch\\ Study\\?\\?|Bronchiolitis\\ study\\,\\ Richard\\ Paged|\\?bronch\\ study", "Bronchiolitis", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("flugene study", "flugene", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("WR\\-N|wr\\ n", "WR\\ N", data$Potential_Study_343)
data$Potential_Study_343 <- gsub("^Wr$|wr", "WR", data$Potential_Study_343)
data$Pt_Weight_350 <- gsub("kg", "", data$Pt_Weight_350)
data$Pt_Weight_350 <- as.numeric(data$Pt_Weight_350)
data$Triage_Intervention_354 <- gsub("\\\0057\\(\\(\\(","", data$Triage_Intervention_354)
data$Isolation_363 <- gsub("\\\005(\\+|\\:|A|3)\\(\\(\\(", "", data$Isolation_363)
data$Rn_Tmplt_Athr_376 <- gsub("Sick\\ Kids", "SickKids", data$Rn_Tmplt_Athr_376)
data$Private_Cc_377 <- gsub("Xxxxxxx|Xxxxxxx\\;", "", data$Private_Cc_377)
data$Cpso_385 <- as.factor(data$Cpso_385)
data$H_P_Template_402 <- gsub("neonate", "Neonate", data$H_P_Template_402)
data$H_P_Template_402 <- gsub("L\\,\\ ", "", data$H_P_Template_402)
data$Billing_Status_431 <- gsub("\\\005(\\+|7)\\(\\(\\(|\\ \\-\\ (TRENT|KATHLEEN)", "", data$Billing_Status_431)
data$Billing_Status_431 <- gsub("Complete", "complete", data$Billing_Status_431)
data$Billing_Status_431 <- gsub("Billing\\ Clerk", "billing\\ clerk", data$Billing_Status_431)
data$Disposition_480 <- gsub("home", "Home", data$Disposition_480)
data$Condition_At_Disposition_481 <- gsub("GOod.*|.*Good.*|.*good.*|^ood|^god$|^(G|g)oo$|^g$|GOOD.*|^G$", "Good", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub("Fair.*|^f$|fair.*|FAIR.*", "Fair", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub(".*Stable.*|.*stable.*|Satble", "Stable", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub(".*Well.*|.*well.*", "Well", data$Condition_At_Disposition_481)
data$Condition_At_Disposition_481 <- gsub("Ok|ok", "OK", data$Condition_At_Disposition_481)
data$Bed_Type_565 <- gsub("\\\0054\\(\\(\\(", "", data$Bed_Type_565)
data$Bed_Type_565 <- gsub("yes|^y$", "Yes", data$Bed_Type_565)
data$Bed_Type_565 <- gsub("^n$|no", "No", data$Bed_Type_565)
data$Hc_Swipe_573 <- gsub("\\`\\`\\`", "", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub("\\`", "", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub("0", "", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub(".*Swipe", "Swipe", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub(".*Scan", "Scan", data$Hc_Swipe_573)
data$Hc_Swipe_573 <- gsub("^[Swipe|Scan]", "", data$Hc_Swipe_573)
data$Arrival_Fda_627 <- gsub("\\\005\\/\\(\\(\\(|\\\0057\\(\\(\\(", "", data$Arrival_Fda_627)
data$Language_56 <- gsub("^Amachric$|^Amaric$|Ethopian|^Amharic/English$|Amhric|^Ahmaric$|^Ethiopian$|^Aderic/English$|^Amrrk$",
"Amharic", data$Language_56)
data$Language_56 <- gsub("^Alabian$|^Albainian$|Albamian|^Albania$|Albanian|^Albaninan$|^Albanion$|^Albian$|^Albina$|^Labanian$",
"Albanian", data$Language_56)
data$Language_56 <- gsub("^Armanian$|^Armanian/Russian$|Armeian|^Armenian$|Armian|^Arminaian$",
"Armenian", data$Language_56)
data$Language_56 <- gsub("^Aramic$",
"Arabic", data$Language_56)
data$Language_56 <- gsub("^Asirian$|^Asserion$|^Assryain$|^Assryan$|Assryial|^Assyarn$|^Assryial$|^Assyarn$|^Assyrian$",
"Assyrian", data$Language_56)
data$Language_56 <- gsub("^Bangalalesh$|^Bangali$|^Bangely$|^Bangla$|Bangladash|^Bangladeshi$|^Bangladi$|^Bangli$|^Bangoli$|^Bengala$|^Bengali$|^Bangoli$|^Bengeli$|^Bengla$|^Bengli$|^Bengoli$|^Bengoly$|^Bengali/Some English$|^Bingoli$",
"Bengali", data$Language_56)
data$Language_56 <- gsub("^Bosnian$|^Boznian$",
"Bosnian", data$Language_56)
data$Language_56 <- gsub("^Corean$",
"Korean", data$Language_56)
data$Language_56 <- gsub("^Cough/Congestion$",
"", data$Language_56)
data$Language_56 <- gsub("^Creatian$|^Croatia$|^Croatian$|^Croation$|^Creatia$",
"Croatian", data$Language_56)
data$Language_56 <- gsub("^Dahri$|^Dari$|^Dira$",
"Dari", data$Language_56)
data$Language_56 <- gsub("^E$",
"English", data$Language_56)
data$Language_56 <- gsub("^Dagalog/ And English$",
"Dagalog", data$Language_56)
data$Language_56 <- gsub("^English/Portuguese$",
"Portuguese", data$Language_56)
data$Language_56 <- gsub("^English/Spanish$",
"Spanish", data$Language_56)
data$Language_56 <- gsub("^English/Chinese$",
"Chinese", data$Language_56)
data$Language_56 <- gsub("^French/Russian$",
"French", data$Language_56)
data$Language_56 <- gsub("^Gana$",
"Twi", data$Language_56)
data$Language_56 <- gsub("^Gejarti$|^Jujarati$|^Gudjarti$|^Gugarat$|^Gugarati$|^Gujarati$|^Gujerati$|^Gujraki$|^Gujrathi$|^Gujrati$|^Gurati$|^Gujrathi$|^Gujrati$|^Gurati$|^Gusurati$|^Kujerati$|^Kurshrati$",
"Gujarati", data$Language_56)
data$Language_56 <- gsub("^Haka$",
"Hakka", data$Language_56)
data$Language_56 <- gsub("^Haraic$|^Harar$|^Harari$|^Hararic$|^Hararie$|^Harie$|^Hariha$|^Harry$",
"Harar", data$Language_56)
data$Language_56 <- gsub("^Hearing Inpaired$",
"Hearing Impaired", data$Language_56)
data$Language_56 <- gsub("^Hindu,Panjabi$|^Hiri$|^Sindhi$",
"Hindi", data$Language_56)
data$Language_56 <- gsub("^Kinyarwa$|^Kinyarwanda$|^Kinyrwanda$",
"Kinyarwanda", data$Language_56)
data$Language_56 <- gsub("^Kiswahili$|^Swaheli$|^Swahilee$",
"Swahili", data$Language_56)
data$Language_56 <- gsub("^Kurdish/Farsi$",
"Kurdish", data$Language_56)
data$Language_56 <- gsub("^Lebonise$",
"Lebonese", data$Language_56)
data$Language_56 <- gsub("^Lithiwanian$|^Lithuania$|^Lithuaniain$|^Lithuanian$|^Lithunian$|^Lithunrian$",
"Lithuanian", data$Language_56)
data$Language_56 <- gsub("^Macedonean$|^Macedonia$|^Macedonian$|^Masedonian$|^Masadonian$|^Masedonian$|^Masidonian$|^Masodonian$|^Masadoian$",
"Macedonian", data$Language_56)
data$Language_56 <- gsub("^Malayalam$|^Malayalan$|^Malayam$|^Malyalam$",
"Malayalam", data$Language_56)
data$Language_56 <- gsub("^Mandarin$|^Mandarin/English$|^Manderine$|^Malyalam$",
"Mandarin", data$Language_56)
data$Language_56 <- gsub("^Marthi$",
"Marathi", data$Language_56)
data$Language_56 <- gsub("^Mongol$|^Mongolia$|^Mongolian$",
"Mongolian", data$Language_56)
data$Language_56 <- gsub("^Myanmavnese$|^Myanmer$",
"Burmese", data$Language_56)
data$Language_56 <- gsub("^Napali$|^Napaly$|^Nepal$|^Nepalese$|^Nipali$|^Nupali$",
"Napali", data$Language_56)
data$Language_56 <- gsub("^Ojibway$",
"Ojibwe", data$Language_56)
data$Language_56 <- gsub("^Ordu$",
"Urdu", data$Language_56)
data$Language_56 <- gsub("^Orumo$",
"Oromo", data$Language_56)
data$Language_56 <- gsub("^Oyiherero$",
"Otjiherero", data$Language_56)
data$Language_56 <- gsub("^Pashato$|^Pashda$|^Pashtoo$|^Pashtu$|^Pastu$|^Pershdu$|^Pershta$|^Pshto$|^Pushto$|^Purshdu$|^Poshto$",
"Pashto", data$Language_56)
data$Language_56 <- gsub("^Persain$|^Persan$|^Persian/Dari$|^Persian/French$|^Persion$|^Pirson$",
"Persian", data$Language_56)
data$Language_56 <- gsub("^Philipino$|^Philippino$|^Phillipino$|^Philopeno$|^Philopino$",
"Filipino", data$Language_56)
data$Language_56 <- gsub("^Portuguese/English$",
"Portuguese", data$Language_56)
data$Language_56 <- gsub("^Polish/English$",
"Polish", data$Language_56)
data$Language_56 <- gsub("^Segrenia$|^Serb-Croatia$|^Serb/Croatian$|^Serb/Croation$|^Serbia$|^Serbien$|^Serbin$|^Serian$",
"Serbian", data$Language_56)
data$Language_56 <- gsub("^Botswana$",
"Setswana", data$Language_56)
data$Language_56 <- gsub("^Shana$",
"Shona", data$Language_56)
data$Language_56 <- gsub("^Sign$|^Asl$",
"Sign Language", data$Language_56)
data$Language_56 <- gsub("^Sihalies$|^Singalese$|^Sinhalese$|^Sinhalasese$|^Sinhalese$|^Sinhaless$|^Sinhalish$|^Sinhanles$|^Srilaken$",
"Sinhala", data$Language_56)
data$Language_56 <- gsub("^Ska$|^Skova$|^Slovaka$|^Slovakia$|^Slovakian$|^Slovac$",
"Slovak", data$Language_56)
data$Language_56 <- gsub("^Somali/English$",
"Somali", data$Language_56)
data$Language_56 <- gsub("^Tagalag$|^Tagalio$|^Tagallo$|^Tagalo$|^Tagalo/English$|^Taglog$|^Tagolog$|^Talalog$|^Talgalog$|^Tigalog$|^Tagalog$|^Talogh$|^Tegalo$|^Tegelo$|^Tigalo$|^Thigalo$|^Tangalo$",
"Filipino", data$Language_56)
data$Language_56 <- gsub("^Tebrena$|^Tegrigna$|^Tegrina$|^Tegrino$|^Tgirinia$|^Tgrinia$|^Tigera$|^Tigeran$|^Tighna$|^Tighrina$|^Tigisty$|^Tigma$|^Tigrana$|^Tigrani$|^Tigre$|^Tigregna$",
"Tigrigna", data$Language_56)
data$Language_56 <- gsub("^Tigregne$|^Tigrena$|^Tigrenia$|^Tigrgina$|^Tigrgna$|^Tigri$|^Tigria$|^Tigrian$|^Tigrigna$|^Tigrignha$|^Tigrina$",
"Tigrigna", data$Language_56)
data$Language_56 <- gsub("^Tigrigna$|^Tigrinea$|^Tigrna$|^Tigrnea$|^Tirgcha$|^Tirgiary$|^Tirnayia$|^Triring$|^Tugrnia$|^Tigrigna$|^Tigring$|^Tigrinia$|^Tigrinya$",
"Tigrigna", data$Language_56)
data$Language_56 <- gsub("^Trgir$|^Tirge$",
"Tigre", data$Language_56)
data$Language_56 <- gsub("^Talugu$|^Telegu$|^Telugiu$",
"Telugu", data$Language_56)
data$Language_56 <- gsub("^Teibetan$|^Tibetian$|^Tibetin$|^Tibian$",
"Tibetan", data$Language_56)
data$Language_56 <- gsub("^Turkis$",
"Turkish", data$Language_56)
data$Language_56 <- gsub("^Teranian$",
"Iranian", data$Language_56)
data$Language_56 <- gsub("^Yoruda$",
"Yoruba", data$Language_56)
data$Language_56 <- gsub("^Varsi$",
"Farsi", data$Language_56)
data$Language_56 <- gsub("^Uzbec$|^Uzbeki$",
"Uzbek", data$Language_56)
data$Language_56 <- gsub("^Shangu$",
"Sangu", data$Language_56)
# numerics into numerics
print("Convert numeric columns into numbers")
# numeric (or need to be converted) from manual inspection
dataNumeric <- c("Override_Et_Status_Cascadi_261",
"R_A_Override_Minutes_295",
"Pt_Weight_350")
for (fac in dataNumeric) {
print(fac)
eval(parse(text=paste0("data$", fac, "<- as.numeric(as.character(data$", fac, "))")))
}
# ================================= AUTOMATICALLY SORT COLUMNS ================================= #
print("Examining Missingness")
# check % of missingness in columns
na_count <-sapply(data, function(y) round((sum(length(which(is.na(y))))/nrow(data))*100, 5))
na_count <- data.frame(na_count);
na_count$colName <- rownames(na_count)
rownames(na_count) <- NULL
print(paste("There are", nrow(na_count %>% dplyr::filter(na_count == 100)), "columns with 100% missingness"))
print(paste("There are", nrow(na_count %>% dplyr::filter(na_count == 0)), "columns with 0% missingness"))
all.zeros <- as.character(unlist((na_count %>% dplyr::filter(na_count == 100))$colName))
# check which columns are dates
print("Matching Dates")
date.pattern <-"^(19|(2(0|1)))([0-9]{6}|[0-9]{10}|[0-9]{12})((?!([0-9]+|\ |[A-Za-z])))"
possible_dates <- sapply(data , function(y) any(grep(date.pattern, y, perl=T)))
possible_dates <- names(possible_dates[possible_dates==TRUE])
print(length(possible_dates))
# From manual examinatin, exclude these values
possible_dates <- possible_dates[!possible_dates=='Pt_Accnt_5']
possible_dates <- possible_dates[!possible_dates=='Hsc_7']
possible_dates <- possible_dates[!possible_dates=='Ems_Id_32']
possible_dates <- possible_dates[!possible_dates=='Health_Card_68']
possible_dates <- possible_dates[!possible_dates=='Acct_Label_2_197']
possible_dates <- possible_dates[!possible_dates=='Acct_Label_3_198']
possible_dates <- possible_dates[!possible_dates=='Status_Admt_246']
possible_dates <- possible_dates[!possible_dates=='Meds_Review_Prntd_278']
possible_dates <- possible_dates[!possible_dates=='Pt_Lock_Boxed_D_T_282']
possible_dates <- possible_dates[!possible_dates=='Glass_Broken_D_T_283']
possible_dates <- possible_dates[!possible_dates=='Cpso_396']
possible_dates <- possible_dates[!possible_dates=='Lic_549']
possible_dates <- possible_dates[!possible_dates=='Post_It_Note_Arrival_612']
possible_dates <- possible_dates[!possible_dates=='Vs_Acknowledge_618']
possible_dates <- possible_dates[!possible_dates=='String_016_711']
possible_dates <- possible_dates[!possible_dates=='String_021_712']
possible_dates <- possible_dates[!possible_dates=='Emar_Trigger_745']
print("saving dates")
write.csv(possible_dates, "dates_colnames.csv")
# BLOB
print("Retrieving blob names")
blob.pattern <- "\\(BLOB\\)"
blob.pattern.matched <- sapply(data, function(y) any(grep(blob.pattern, y)))
sum(blob.pattern.matched=='TRUE')
BLOBS <- names(blob.pattern.matched[blob.pattern.matched=='TRUE'])
all.columns <- colnames(data)
all.columns <- all.columns[!all.columns %in% c(all.zeros, possible_dates, BLOBS)]
write.csv(all.columns, "other_colnames.csv")
# ================================= PROCESS DATE COLUMNS ================================= #
cat("\nProcessing dates\n")
j <- 1
length(possible_dates)
for (date.col in possible_dates) {
print(paste0(j, ": ", date.col))
proc.date <- as.character(unlist(data[,c(date.col), with=FALSE]))
proc.date <-as.character(sapply(proc.date, function(x) str_extract(x, date.pattern)))
print(paste("Before Processing: ", unique(na.exclude(proc.date)[1])))
if (nchar(unique(na.exclude(proc.date)[1]))==8) {
proc.date <- as.POSIXct(strptime(proc.date, format="%Y%m%d"), tz="EST")
} else if (nchar(unique(na.exclude(proc.date)[1]))==12) {
proc.date <- as.POSIXct(strptime(proc.date, format="%Y%m%d%H%M"), tz="EST")
} else {
paste("Not Valid String Format")
}
print(paste("After Processing: ", unique(na.exclude(proc.date)[1])))
eval(parse(text=paste0("data$", date.col, " <- proc.date")))
j <- j + 1
}
# save processed wellSoft data to file for reading in future
cat("\nWriting file\n") # remove BLOBS and empty columns when writing
print(dim(data[,c(all.columns, possible_dates), with=FALSE]))
fwrite(data[,c(all.columns, possible_dates), with=FALSE], paste0(file.path, "cleaned_wellSoft.csv"), dateTimeAs="write.csv")
cat("\nDone\n")
return(data)
}
|
#' Stock returns of 18 US tech companies
#'
#' Datset containing stock returns of 18 important US tech firms (See \href{https://finance.yahoo.com/u/yahoo-finance/watchlists/tech-stocks-that-move-the-market/}{Yahoo Finance})
#' and the NASDAQ return.
#' The dataset covers all trading days between 2015-11-03 and 2019-11-30 (e.g. 1045).
#'
#' @docType data
#'
#' @usage data("tech_returns", package="estudy2car")
#'
#' @keywords datasets stock firms
#'
#' @format An objects of class \code{zoo} containing 1045 observations and 19 variables.
#' \describe{
#' \item{^NDX}{NASDAQ return from 2015-11-03 to 2017-11-29.}
#' \item{MSFT}{Microsoft's from 2015-11-03 to 2017-11-29.}
#' \item{AMZ}{Amazon's from 2015-11-03 to 2017-11-29.}
#' \item{AAPL}{Apple's from 2015-11-03 to 2017-11-29.}
#' \item{GOOG}{Google's from 2015-11-03 to 2017-11-29.}
#' \item{FB}{Facebook's from 2015-11-03 to 2017-11-29.}
#' \item{BABA}{Alibaba's from 2015-11-03 to 2017-11-29.}
#' \item{INTC}{Intel's from 2015-11-03 to 2017-11-29.}
#' \item{PYPL}{PayPal's from 2015-11-03 to 2017-11-29.}
#' \item{NVDA}{NVIDIA's from 2015-11-03 to 2017-11-29.}
#' \item{TSLA}{Tesla's from 2015-11-03 to 2017-11-29.}
#' \item{ATVI}{Activision Blizzard's from 2015-11-03 to 2017-11-29.}
#' \item{AMD}{Advanced Micro's from 2015-11-03 to 2017-11-29.}
#' \item{EA}{Electronic Arts's from 2015-11-03 to 2017-11-29.}
#' \item{MTCH}{Match Group's from 2015-11-03 to 2017-11-29.}
#' \item{TTD}{The Trade Desk's from 2015-11-03 to 2017-11-29.}
#' \item{ZG}{Zillow Group's from 2015-11-03 to 2017-11-29.}
#' \item{YELP}{Yelp's from 2015-11-03 to 2017-11-29.}
#' \item{TIVO}{TiVo's from 2015-11-03 to 2017-11-29.}
#' ...
#' }
#' @source \href{https://finance.yahoo.com/}{Yahoo Finance}
"tech_returns"
| /R/tech_returns.R | no_license | LisaLechner/event2car | R | false | false | 1,792 | r | #' Stock returns of 18 US tech companies
#'
#' Datset containing stock returns of 18 important US tech firms (See \href{https://finance.yahoo.com/u/yahoo-finance/watchlists/tech-stocks-that-move-the-market/}{Yahoo Finance})
#' and the NASDAQ return.
#' The dataset covers all trading days between 2015-11-03 and 2019-11-30 (e.g. 1045).
#'
#' @docType data
#'
#' @usage data("tech_returns", package="estudy2car")
#'
#' @keywords datasets stock firms
#'
#' @format An objects of class \code{zoo} containing 1045 observations and 19 variables.
#' \describe{
#' \item{^NDX}{NASDAQ return from 2015-11-03 to 2017-11-29.}
#' \item{MSFT}{Microsoft's from 2015-11-03 to 2017-11-29.}
#' \item{AMZ}{Amazon's from 2015-11-03 to 2017-11-29.}
#' \item{AAPL}{Apple's from 2015-11-03 to 2017-11-29.}
#' \item{GOOG}{Google's from 2015-11-03 to 2017-11-29.}
#' \item{FB}{Facebook's from 2015-11-03 to 2017-11-29.}
#' \item{BABA}{Alibaba's from 2015-11-03 to 2017-11-29.}
#' \item{INTC}{Intel's from 2015-11-03 to 2017-11-29.}
#' \item{PYPL}{PayPal's from 2015-11-03 to 2017-11-29.}
#' \item{NVDA}{NVIDIA's from 2015-11-03 to 2017-11-29.}
#' \item{TSLA}{Tesla's from 2015-11-03 to 2017-11-29.}
#' \item{ATVI}{Activision Blizzard's from 2015-11-03 to 2017-11-29.}
#' \item{AMD}{Advanced Micro's from 2015-11-03 to 2017-11-29.}
#' \item{EA}{Electronic Arts's from 2015-11-03 to 2017-11-29.}
#' \item{MTCH}{Match Group's from 2015-11-03 to 2017-11-29.}
#' \item{TTD}{The Trade Desk's from 2015-11-03 to 2017-11-29.}
#' \item{ZG}{Zillow Group's from 2015-11-03 to 2017-11-29.}
#' \item{YELP}{Yelp's from 2015-11-03 to 2017-11-29.}
#' \item{TIVO}{TiVo's from 2015-11-03 to 2017-11-29.}
#' ...
#' }
#' @source \href{https://finance.yahoo.com/}{Yahoo Finance}
"tech_returns"
|
#**************************************************************************************************************************************
#Early pregnancy outcome analysis
#This file conducts the primary and sensitivity analysis for infection and miscarriage (with historical uninfected group):
# 1. Prepares data including grouping outcome variable for analysis
# 2. Looks at distribution of covariates in infected and uninfected group
# 3. Looks at distribution of pregnancy outcomes at 19+6 weeks gestation in infected and uninfected group
# 4. Conducts crude analysis of association between infection and miscarriage (only accounting for matching factors)
# 5. Conducts adjusted analysis of association between infection and miscarriage (accounting for all covariates of interest)
#**************************************************************************************************************************************
####HOUSEKEEPING####
library(survival)
library(nnet)
library(hablar)
library(dplyr)
library(expss)
library(readr)
library(janitor)
library(ggplot2)
setwd("x")
folder_temp_data <- "x"
folder_results <- "x"
v.misc.inf.data <- readRDS(paste0(folder_temp_data, "matched_miscarriage_infection_cohort_one.rds"))
ls(v.misc.inf.data)
####CREATE VARIABLES REQUIRED FOR ANALYSIS####
#create a variable to capture infected versus uninfected
table(v.misc.inf.data$inf_or_uninf)
v.misc.inf.data <-
v.misc.inf.data %>%
mutate(
inf_descrip = case_when(
inf_or_uninf == "uninf" ~ "Uninfected historical cohort (N=9,198)",
inf_or_uninf == "inf" ~ "Infected cohort (N=3,066)"
))
v.misc.inf.data$inf_cat <- factor(v.misc.inf.data$inf_descrip, levels=c("Uninfected historical cohort (N=9,198)", "Infected cohort (N=3,066)"))
v.misc.inf.data$inf_cat_graph <- factor(v.misc.inf.data$inf_descrip, levels=c("Infected cohort (N=3,066)", "Uninfected historical cohort (N=9,198)"))
table(v.misc.inf.data$inf_descrip)
table(v.misc.inf.data$miscarriage_gestation_at_index_date)
#create a variable to capture gestational age at matching
table(v.misc.inf.data$miscarriage_gestation_at_index_date)
v.misc.inf.data$miscarriage_gestation_at_index_date <- floor(v.misc.inf.data$miscarriage_gestation_at_index_date)
v.misc.inf.data <- v.misc.inf.data %>%
arrange(index) %>%
group_by(index) %>%
mutate(gest_at_match = max_(miscarriage_gestation_at_index_date)) %>%
ungroup()
addmargins(table(v.misc.inf.data$gest_at_match))
#data check: there shouldn't be anyone with a gestation at outcome that is less than the gestation at matching
check <- v.misc.inf.data[ , c("inf_or_uninf", "index", "miscarriage_gestation_at_index_date", "gest_at_match", "index_date_covid_infection_1", "est_conception_date", "pregnancy_end_date", "gestation_at_outcome")]
check2 <- v.misc.inf.data %>% filter(gestation_at_outcome<gest_at_match)
#create outcome variables
#for multinomial analysis
#note that livebirths are categorised as miscarriages given early gestation precluding survival
#molar pregnancies also grouped with miscarriages
table(v.misc.inf.data$miscarriage_study_outcome)
v.misc.inf.data$outcomes_cat <- dplyr::recode(v.misc.inf.data$miscarriage_study_outcome, "Termination"="Termination",
"Molar pregnancy"="Miscarriage",
"Miscarriage"="Miscarriage",
"Ectopic pregnancy"="Ectopic pregnancy",
"Live birth"="Miscarriage",
"Ongoing wk 19"="Ongoing pregnancy")
v.misc.inf.data$outcomes_cat <- factor(v.misc.inf.data$outcomes_cat, levels=c("Ongoing pregnancy", "Miscarriage", "Termination", "Ectopic pregnancy"))
v.misc.inf.data$outcomes_cat_forgraph <- factor(v.misc.inf.data$outcomes_cat, levels=c("Ectopic pregnancy", "Termination", "Miscarriage", "Ongoing pregnancy"))
addmargins(table(v.misc.inf.data$outcomes_cat, v.misc.inf.data$miscarriage_study_outcome))
#Tidy up covariates as needed
addmargins(table(v.misc.inf.data$ethnicity_cat))
addmargins(table(v.misc.inf.data$cv_clinical_vulnerability_category))
addmargins(table(v.misc.inf.data$UR2_categories, exclude=NULL))
addmargins(table(v.misc.inf.data$bmi_cat))
addmargins(table(v.misc.inf.data$diabetes_cat))
addmargins(table(v.misc.inf.data$overall_smoking_status))
addmargins(table(v.misc.inf.data$simd))
v.misc.inf.data$simd[is.na(v.misc.inf.data$simd)] <- "Unknown/Missing"
v.misc.inf.data$simd[v.misc.inf.data$simd==9] <- "Unknown/Missing"
v.misc.inf.data$overall_smoking_status[is.na(v.misc.inf.data$overall_smoking_status)] <- "Unknown/Missing"
v.misc.inf.data$bmi_cat <- factor(v.misc.inf.data$bmi_cat, levels=c(levels(v.misc.inf.data$bmi_cat), NA), labels = c(levels(v.misc.inf.data$bmi_cat), 88), exclude=NULL)
####Descriptive for each group: key characteristics####
#age median and range
infection_age_mean <- v.misc.inf.data %>%
group_by(inf_or_uninf) %>%
summarise(age_median=median(mother_age_at_conception),
age_min = min(mother_age_at_conception),
age_max = max(mother_age_at_conception))
infection_age_mean
#Look at outcomes over time
#by pregnancy outcome year
vaccination_by_ethnicity <- v.misc.inf.data %>%
tabyl(ethnicity_cat, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_ethnicity, paste(folder_results, "primary_infected_by_ethnicity.csv", sep = ''))
vaccination_by_urban_rural_cat <- v.misc.inf.data %>%
tabyl(UR6_categories, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_urban_rural_cat, paste(folder_results, "primary_infected_by_urban_rural_6cat.csv", sep = ''))
vaccination_by_simd <- v.misc.inf.data %>%
tabyl(simd, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_simd, paste(folder_results, "primary_infected_by_simd.csv", sep = ''))
vaccination_by_bmi_cat <- v.misc.inf.data %>%
tabyl(bmi_cat, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_bmi_cat, paste(folder_results, "primary_infected_by_bmi_cat.csv", sep = ''))
vaccination_by_overall_smoking_status <- v.misc.inf.data %>%
tabyl(overall_smoking_status, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_overall_smoking_status, paste(folder_results, "primary_infected_by_overall_smoking_status.csv", sep = ''))
vaccination_by_cv_clinical_vulnerability_category <- v.misc.inf.data %>%
tabyl(cv_clinical_vulnerability_category, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_cv_clinical_vulnerability_category, paste(folder_results, "primary_infected_by_cv_clinical_vulnerability_category.csv", sep = ''))
vaccination_by_diabetes_cat <- v.misc.inf.data %>%
tabyl(diabetes_cat, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_diabetes_cat, paste(folder_results, "primary_infected_by_diabetes_cat.csv", sep = ''))
#look at some descriptives for infected cohort
#timing of fist infection
v.misc.inf.data.infected <- v.misc.inf.data %>%
filter(inf_or_uninf=="inf") %>%
mutate(gest_group = case_when(
(gest_at_match<2) ~ "1. pre-conception",
(gest_at_match>=2 & gest_at_match<6) ~ "2. 2-5 weeks",
(gest_at_match>=6 & gest_at_match<11) ~ "3. 6-10 weeks",
(gest_at_match>=11 & gest_at_match<16) ~ "4. 11-15 weeks",
(gest_at_match>=16 & gest_at_match<20) ~ "5. 16-20 weeks"
))
addmargins(table(v.misc.inf.data.infected$gest_group))
#number of infections
v.misc.inf.data.infected$infection1 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_1) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_1-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_1<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_1<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
v.misc.inf.data.infected$infection2 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_2) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_2-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_2<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_2<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
v.misc.inf.data.infected$infection3 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_3) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_3-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_3<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_3<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
v.misc.inf.data.infected$infection4 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_4) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_4-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_4<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_4<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
table(v.misc.inf.data.infected$infection1)
table(v.misc.inf.data.infected$infection2)
table(v.misc.inf.data.infected$infection3)
table(v.misc.inf.data.infected$infection4)
v.misc.inf.data.infected$infection <- ifelse(v.misc.inf.data.infected$infection1==1 | v.misc.inf.data.infected$infection2==1 | v.misc.inf.data.infected$infection3==1 | v.misc.inf.data.infected$infection4==1, 1, 0)
addmargins(table(v.misc.inf.data.infected$infection, exclude=NULL))
v.misc.inf.data.infected <- v.misc.inf.data.infected %>%
mutate(infection_no = case_when(
(infection1==1 & infection2==1 & infection3==1) ~ "3",
(infection1==1 & infection2==1 & infection3==0) ~ "2",
(infection1==1 & infection2==0 & infection3==1) ~ "2",
(infection1==0 & infection2==1 & infection3==1) ~ "2",
(infection1==1 & infection2==0 & infection3==0) ~ "1",
(infection1==0 & infection2==1 & infection3==0) ~ "1",
(infection1==0 & infection2==0 & infection3==1) ~ "1"
))
table(v.misc.inf.data.infected$infection_no)
#symptomatic or non sypmtomatic
v.misc.inf.data.infected <- v.misc.inf.data.infected %>%
mutate(symptomatic_infection = case_when(
(infection1==1 & final_symptomatic_covid_infection_1=="true") ~ "Symptomatic",
(infection2==1 & final_symptomatic_covid_infection_2=="true") ~ "Symptomatic",
(infection3==1 & final_symptomatic_covid_infection_3=="true") ~ "Symptomatic"
))
table(v.misc.inf.data.infected$symptomatic_infection)
####Primary analysis: miscarriage in infected versus uninfected (pre-pandemic)####
#Decription of infected and uninfected
#Look at outcomes in infected and uninfected
outcomes_by_inf_status <- v.misc.inf.data %>%
tabyl(outcomes_cat, inf_cat_graph) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(outcomes_by_inf_status, paste(folder_results, "Pregnancy_events_by_inf_cohort.csv", sep = ''))
#Look at outcomes in infected and uninfected - detailed
outcomes_by_inf_status <- v.misc.inf.data %>%
tabyl(outcomes_cat, inf_detail) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(outcomes_by_inf_status, paste(folder_results, "Pregnancy_events_by_inf_cohort_detail.csv", sep = ''))
#Graph of outcomes by infection status
v.misc.inf.data$count <-1
outcome_distribution <- v.misc.inf.data %>%
group_by(inf_cat_graph, outcomes_cat_forgraph) %>%
summarise(count.sum = sum(count))
outcome_distribution_primary <- outcome_distribution %>%
group_by(inf_cat_graph) %>%
mutate(denominator = sum(count.sum)) %>%
mutate(prop_outcome = (count.sum / denominator) * 100)
outcome_distribution_primary %>% write_rds(paste0(folder_temp_data, "outcome_distribution_primary_infection_miscarriage.rds"), compress = "gz")
#Crude analysis
#multinomial regression adjusting for matching factors - baseline all non miscarriage outcomes
model3 <- multinom(outcomes_cat ~ inf_cat + gest_at_match + mother_age_at_conception + conception_quarter, data=v.misc.inf.data)
summary(model3)
exp(coef(model3))
exp(confint(model3))
nrow(fitted(model3))
z <- summary(model3)$coefficients/summary(model3)$standard.errors
p <- (1-pnorm(abs(z), 0, 1))*2
p
#multinomial regression adjusting for matching factors + other covariates (excluding ethnicity)
model5 <- multinom(outcomes_cat ~ inf_cat + gest_at_match + mother_age_at_conception + conception_quarter
+ UR6_categories + simd + cv_clinical_vulnerability_category, data=v.misc.inf.data)
summary(model5)
exp(coef(model5))
exp(confint(model5))
z3 <- summary(model5)$coefficients/summary(model5)$standard.errors
p3 <- (1-pnorm(abs(z3), 0, 1))*2
p3
#check removing those with missing SIMD or area
model_check <- v.misc.inf.data %>%
filter(simd!="Unknown/Missing" & UR6_categories!="Unknown/Missing")
table(model_check$simd)
table(model_check$UR6_categories)
modelx <- multinom(outcomes_cat ~ inf_cat + gest_at_match + mother_age_at_conception + conception_quarter
+ UR6_categories + simd + cv_clinical_vulnerability_category, data=model_check)
summary(modelx)
exp(coef(modelx))
exp(confint(modelx))
| /Early pregnancy paper scripts/06a_miscarriage_infection_primary_analysis.R | no_license | Public-Health-Scotland/COPS-public | R | false | false | 15,687 | r | #**************************************************************************************************************************************
#Early pregnancy outcome analysis
#This file conducts the primary and sensitivity analysis for infection and miscarriage (with historical uninfected group):
# 1. Prepares data including grouping outcome variable for analysis
# 2. Looks at distribution of covariates in infected and uninfected group
# 3. Looks at distribution of pregnancy outcomes at 19+6 weeks gestation in infected and uninfected group
# 4. Conducts crude analysis of association between infection and miscarriage (only accounting for matching factors)
# 5. Conducts adjusted analysis of association between infection and miscarriage (accounting for all covariates of interest)
#**************************************************************************************************************************************
####HOUSEKEEPING####
library(survival)
library(nnet)
library(hablar)
library(dplyr)
library(expss)
library(readr)
library(janitor)
library(ggplot2)
setwd("x")
folder_temp_data <- "x"
folder_results <- "x"
v.misc.inf.data <- readRDS(paste0(folder_temp_data, "matched_miscarriage_infection_cohort_one.rds"))
ls(v.misc.inf.data)
####CREATE VARIABLES REQUIRED FOR ANALYSIS####
#create a variable to capture infected versus uninfected
table(v.misc.inf.data$inf_or_uninf)
v.misc.inf.data <-
v.misc.inf.data %>%
mutate(
inf_descrip = case_when(
inf_or_uninf == "uninf" ~ "Uninfected historical cohort (N=9,198)",
inf_or_uninf == "inf" ~ "Infected cohort (N=3,066)"
))
v.misc.inf.data$inf_cat <- factor(v.misc.inf.data$inf_descrip, levels=c("Uninfected historical cohort (N=9,198)", "Infected cohort (N=3,066)"))
v.misc.inf.data$inf_cat_graph <- factor(v.misc.inf.data$inf_descrip, levels=c("Infected cohort (N=3,066)", "Uninfected historical cohort (N=9,198)"))
table(v.misc.inf.data$inf_descrip)
table(v.misc.inf.data$miscarriage_gestation_at_index_date)
#create a variable to capture gestational age at matching
table(v.misc.inf.data$miscarriage_gestation_at_index_date)
v.misc.inf.data$miscarriage_gestation_at_index_date <- floor(v.misc.inf.data$miscarriage_gestation_at_index_date)
v.misc.inf.data <- v.misc.inf.data %>%
arrange(index) %>%
group_by(index) %>%
mutate(gest_at_match = max_(miscarriage_gestation_at_index_date)) %>%
ungroup()
addmargins(table(v.misc.inf.data$gest_at_match))
#data check: there shouldn't be anyone with a gestation at outcome that is less than the gestation at matching
check <- v.misc.inf.data[ , c("inf_or_uninf", "index", "miscarriage_gestation_at_index_date", "gest_at_match", "index_date_covid_infection_1", "est_conception_date", "pregnancy_end_date", "gestation_at_outcome")]
check2 <- v.misc.inf.data %>% filter(gestation_at_outcome<gest_at_match)
#create outcome variables
#for multinomial analysis
#note that livebirths are categorised as miscarriages given early gestation precluding survival
#molar pregnancies also grouped with miscarriages
table(v.misc.inf.data$miscarriage_study_outcome)
v.misc.inf.data$outcomes_cat <- dplyr::recode(v.misc.inf.data$miscarriage_study_outcome, "Termination"="Termination",
"Molar pregnancy"="Miscarriage",
"Miscarriage"="Miscarriage",
"Ectopic pregnancy"="Ectopic pregnancy",
"Live birth"="Miscarriage",
"Ongoing wk 19"="Ongoing pregnancy")
v.misc.inf.data$outcomes_cat <- factor(v.misc.inf.data$outcomes_cat, levels=c("Ongoing pregnancy", "Miscarriage", "Termination", "Ectopic pregnancy"))
v.misc.inf.data$outcomes_cat_forgraph <- factor(v.misc.inf.data$outcomes_cat, levels=c("Ectopic pregnancy", "Termination", "Miscarriage", "Ongoing pregnancy"))
addmargins(table(v.misc.inf.data$outcomes_cat, v.misc.inf.data$miscarriage_study_outcome))
#Tidy up covariates as needed
addmargins(table(v.misc.inf.data$ethnicity_cat))
addmargins(table(v.misc.inf.data$cv_clinical_vulnerability_category))
addmargins(table(v.misc.inf.data$UR2_categories, exclude=NULL))
addmargins(table(v.misc.inf.data$bmi_cat))
addmargins(table(v.misc.inf.data$diabetes_cat))
addmargins(table(v.misc.inf.data$overall_smoking_status))
addmargins(table(v.misc.inf.data$simd))
v.misc.inf.data$simd[is.na(v.misc.inf.data$simd)] <- "Unknown/Missing"
v.misc.inf.data$simd[v.misc.inf.data$simd==9] <- "Unknown/Missing"
v.misc.inf.data$overall_smoking_status[is.na(v.misc.inf.data$overall_smoking_status)] <- "Unknown/Missing"
v.misc.inf.data$bmi_cat <- factor(v.misc.inf.data$bmi_cat, levels=c(levels(v.misc.inf.data$bmi_cat), NA), labels = c(levels(v.misc.inf.data$bmi_cat), 88), exclude=NULL)
####Descriptive for each group: key characteristics####
#age median and range
infection_age_mean <- v.misc.inf.data %>%
group_by(inf_or_uninf) %>%
summarise(age_median=median(mother_age_at_conception),
age_min = min(mother_age_at_conception),
age_max = max(mother_age_at_conception))
infection_age_mean
#Look at outcomes over time
#by pregnancy outcome year
vaccination_by_ethnicity <- v.misc.inf.data %>%
tabyl(ethnicity_cat, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_ethnicity, paste(folder_results, "primary_infected_by_ethnicity.csv", sep = ''))
vaccination_by_urban_rural_cat <- v.misc.inf.data %>%
tabyl(UR6_categories, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_urban_rural_cat, paste(folder_results, "primary_infected_by_urban_rural_6cat.csv", sep = ''))
vaccination_by_simd <- v.misc.inf.data %>%
tabyl(simd, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_simd, paste(folder_results, "primary_infected_by_simd.csv", sep = ''))
vaccination_by_bmi_cat <- v.misc.inf.data %>%
tabyl(bmi_cat, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_bmi_cat, paste(folder_results, "primary_infected_by_bmi_cat.csv", sep = ''))
vaccination_by_overall_smoking_status <- v.misc.inf.data %>%
tabyl(overall_smoking_status, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_overall_smoking_status, paste(folder_results, "primary_infected_by_overall_smoking_status.csv", sep = ''))
vaccination_by_cv_clinical_vulnerability_category <- v.misc.inf.data %>%
tabyl(cv_clinical_vulnerability_category, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_cv_clinical_vulnerability_category, paste(folder_results, "primary_infected_by_cv_clinical_vulnerability_category.csv", sep = ''))
vaccination_by_diabetes_cat <- v.misc.inf.data %>%
tabyl(diabetes_cat, inf_or_uninf) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(vaccination_by_diabetes_cat, paste(folder_results, "primary_infected_by_diabetes_cat.csv", sep = ''))
#look at some descriptives for infected cohort
#timing of fist infection
v.misc.inf.data.infected <- v.misc.inf.data %>%
filter(inf_or_uninf=="inf") %>%
mutate(gest_group = case_when(
(gest_at_match<2) ~ "1. pre-conception",
(gest_at_match>=2 & gest_at_match<6) ~ "2. 2-5 weeks",
(gest_at_match>=6 & gest_at_match<11) ~ "3. 6-10 weeks",
(gest_at_match>=11 & gest_at_match<16) ~ "4. 11-15 weeks",
(gest_at_match>=16 & gest_at_match<20) ~ "5. 16-20 weeks"
))
addmargins(table(v.misc.inf.data.infected$gest_group))
#number of infections
v.misc.inf.data.infected$infection1 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_1) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_1-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_1<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_1<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
v.misc.inf.data.infected$infection2 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_2) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_2-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_2<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_2<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
v.misc.inf.data.infected$infection3 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_3) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_3-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_3<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_3<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
v.misc.inf.data.infected$infection4 <- ifelse(!is.na(v.misc.inf.data.infected$index_date_covid_infection_4) &
as.numeric(v.misc.inf.data.infected$index_date_covid_infection_4-v.misc.inf.data.infected$est_conception_date)>=-6*7 &
v.misc.inf.data.infected$index_date_covid_infection_4<=v.misc.inf.data.infected$miscarriage_study_pregnancy_end_date &
v.misc.inf.data.infected$index_date_covid_infection_4<=v.misc.inf.data.infected$pregnancy_end_date, 1,0)
table(v.misc.inf.data.infected$infection1)
table(v.misc.inf.data.infected$infection2)
table(v.misc.inf.data.infected$infection3)
table(v.misc.inf.data.infected$infection4)
v.misc.inf.data.infected$infection <- ifelse(v.misc.inf.data.infected$infection1==1 | v.misc.inf.data.infected$infection2==1 | v.misc.inf.data.infected$infection3==1 | v.misc.inf.data.infected$infection4==1, 1, 0)
addmargins(table(v.misc.inf.data.infected$infection, exclude=NULL))
v.misc.inf.data.infected <- v.misc.inf.data.infected %>%
mutate(infection_no = case_when(
(infection1==1 & infection2==1 & infection3==1) ~ "3",
(infection1==1 & infection2==1 & infection3==0) ~ "2",
(infection1==1 & infection2==0 & infection3==1) ~ "2",
(infection1==0 & infection2==1 & infection3==1) ~ "2",
(infection1==1 & infection2==0 & infection3==0) ~ "1",
(infection1==0 & infection2==1 & infection3==0) ~ "1",
(infection1==0 & infection2==0 & infection3==1) ~ "1"
))
table(v.misc.inf.data.infected$infection_no)
#symptomatic or non sypmtomatic
v.misc.inf.data.infected <- v.misc.inf.data.infected %>%
mutate(symptomatic_infection = case_when(
(infection1==1 & final_symptomatic_covid_infection_1=="true") ~ "Symptomatic",
(infection2==1 & final_symptomatic_covid_infection_2=="true") ~ "Symptomatic",
(infection3==1 & final_symptomatic_covid_infection_3=="true") ~ "Symptomatic"
))
table(v.misc.inf.data.infected$symptomatic_infection)
####Primary analysis: miscarriage in infected versus uninfected (pre-pandemic)####
#Decription of infected and uninfected
#Look at outcomes in infected and uninfected
outcomes_by_inf_status <- v.misc.inf.data %>%
tabyl(outcomes_cat, inf_cat_graph) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(outcomes_by_inf_status, paste(folder_results, "Pregnancy_events_by_inf_cohort.csv", sep = ''))
#Look at outcomes in infected and uninfected - detailed
outcomes_by_inf_status <- v.misc.inf.data %>%
tabyl(outcomes_cat, inf_detail) %>%
adorn_totals(where="col") %>%
adorn_totals(where="row") %>%
adorn_percentages(denominator="col") %>%
adorn_pct_formatting() %>%
adorn_ns(position = "front")
write.csv(outcomes_by_inf_status, paste(folder_results, "Pregnancy_events_by_inf_cohort_detail.csv", sep = ''))
#Graph of outcomes by infection status
v.misc.inf.data$count <-1
outcome_distribution <- v.misc.inf.data %>%
group_by(inf_cat_graph, outcomes_cat_forgraph) %>%
summarise(count.sum = sum(count))
outcome_distribution_primary <- outcome_distribution %>%
group_by(inf_cat_graph) %>%
mutate(denominator = sum(count.sum)) %>%
mutate(prop_outcome = (count.sum / denominator) * 100)
outcome_distribution_primary %>% write_rds(paste0(folder_temp_data, "outcome_distribution_primary_infection_miscarriage.rds"), compress = "gz")
#Crude analysis
#multinomial regression adjusting for matching factors - baseline all non miscarriage outcomes
model3 <- multinom(outcomes_cat ~ inf_cat + gest_at_match + mother_age_at_conception + conception_quarter, data=v.misc.inf.data)
summary(model3)
exp(coef(model3))
exp(confint(model3))
nrow(fitted(model3))
z <- summary(model3)$coefficients/summary(model3)$standard.errors
p <- (1-pnorm(abs(z), 0, 1))*2
p
#multinomial regression adjusting for matching factors + other covariates (excluding ethnicity)
model5 <- multinom(outcomes_cat ~ inf_cat + gest_at_match + mother_age_at_conception + conception_quarter
+ UR6_categories + simd + cv_clinical_vulnerability_category, data=v.misc.inf.data)
summary(model5)
exp(coef(model5))
exp(confint(model5))
z3 <- summary(model5)$coefficients/summary(model5)$standard.errors
p3 <- (1-pnorm(abs(z3), 0, 1))*2
p3
#check removing those with missing SIMD or area
model_check <- v.misc.inf.data %>%
filter(simd!="Unknown/Missing" & UR6_categories!="Unknown/Missing")
table(model_check$simd)
table(model_check$UR6_categories)
modelx <- multinom(outcomes_cat ~ inf_cat + gest_at_match + mother_age_at_conception + conception_quarter
+ UR6_categories + simd + cv_clinical_vulnerability_category, data=model_check)
summary(modelx)
exp(coef(modelx))
exp(confint(modelx))
|
#' Convert weights, as COO matrix, to CSC matrix and weights vector
#'
#' \code{convert} Takes a weights triple (COO matrix) and converts it to a sparse edge-incidence matrix (CSC matrix) and weights vector.
#'
#' @param W COO matrix of weights: (i,j,w[ij])
#' @import Matrix
#' @export
#' @examples
#' W <- matrix(0,3,3)
#' W[1,] <- c(1,2,1)
#' W[2,] <- c(1,3,2)
#' W[3,] <- c(2,3,3)
#'
#' sol <- convert(W)
convert <- function(W) {
m <- nrow(W)
i <- j <- x <- integer(2*m)
i[1:m] <- i[(m+1):(2*m)] <- 1:m
j[1:m] <- W[,1]
j[(m+1):(2*m)] <- W[,2]
x[1:m] <- rep(1,m)
x[(m+1):(2*m)] <- rep(-1,m)
Phi <- sparseMatrix(i=i, j=j, x=x)
w <- W[,3]
return(list(Phi=Phi, w=w))
}
## Clusterpath preprocessing
tri2vec <- function(i,j,n) {
return(n*(i-1) - i*(i-1)/2 + j -i)
}
vec2tri <- function(k,n) {
i <- ceiling(0.5*(2*n-1 - sqrt((2*n-1)^2 - 8*k)))
j <- k - n*(i-1) + i*(i-1)/2 + i
return(as.matrix(cbind(i,j)))
}
#' Construct indices matrices
#'
#' \code{compactify_edges} constructs M1, M2, and ix index matrices.
#' @param w weights vector
#' @param n number of points to cluster
compactify_edges <- function(w,n) {
sizes1 <- double(n)
sizes2 <- double(n)
P <- vec2tri(w@i+1,n)
nEdge <- nrow(P)
M1 <- matrix(0,nEdge,n)
M2 <- matrix(0,nEdge,n)
for (i in 1:n) {
group1 <- which(P[,1] == i)
sizes1[i] <- length(group1)
if (sizes1[i] > 0) {
M1[1:sizes1[i],i] <- group1
}
group2 <- which(P[,2] == i)
sizes2[i] <- length(group2)
if (sizes2[i] > 0) {
M2[1:sizes2[i],i] <- group2
}
}
M1 <- M1[1:max(sizes1),,drop=FALSE]
M2 <- M2[1:max(sizes2),,drop=FALSE]
return(list(ix=P,M1=M1,M2=M2,s1=sizes1,s2=sizes2))
}
#' Gaussian Kernel + k-Nearest Neighbor Weights
#'
#' \code{gkn_weights} combines Gaussian kernel weights with k-nearest neighbor weights
#'
#' @param X The data matrix to be clustered. The rows are the features, and the columns are the samples.
#' @param phi The nonnegative parameter that controls the scale of kernel weights
#' @param k_row The number of row nearest neighbors
#' @param k_col The number of column nearest neighbors
#' @export
gkn_weights <- function(X,phi=0.5,k_row=5,k_col=5) {
p <- nrow(X); n <- ncol(X)
## Construct Gaussian kernel weights
w_row <- kernel_weights(t(X),phi/n)
w_col <- kernel_weights(X,phi/p)
## Thin weights to k-nearest neighbors
w_row <- knn_weights(w_row,k_row,p)
w_col <- knn_weights(w_col,k_col,n)
## Normalize weights to sum to 1
w_row <- w_row/sum(w_row)
w_col <- w_col/sum(w_col)
## Rescale weights to make column and row penalties commensurate
w_row <- w_row/sqrt(n)
w_col <- w_col/sqrt(p)
## Construct edge-incidence matrices
E_row <- create_edge_incidence(w_row,p)
E_col <- create_edge_incidence(w_col,n)
## Get connectivity information
nRowComp <- length(find_clusters(weights_graph(w = w_row,p))$size)
nColComp <- length(find_clusters(weights_graph(w = w_col,n))$size)
return(list(w_row=w_row@x,w_col=w_col@x,E_row=E_row,E_col=E_col,
nRowComp=nRowComp,nColComp=nColComp))
}
#' "Thin" a weight vector to be positive only for its k-nearest neighbors
#'
#' \code{knn_weights} takes a weight vector \code{w} and sets the ith
#' component \code{w[i]} to zero if either of the two corresponding nodes
#' is not among the other's \code{k} nearest neighbors.
#'
#' @param w A vector of nonnegative weights. The ith entry \code{w[i]} denotes the weight used between the ith pair of centroids. The weights are in dictionary order.
#' @param k The number of nearest neighbors
#' @param n The number of data points.
#' @return A vector \cite{w} of weights for convex clustering.
knn_weights <- function(w,k,n) {
i <- 1
neighbors <- tri2vec(i,(i+1):n,n)
keep <- neighbors[sort(w[neighbors],decreasing=TRUE,index.return=TRUE)$ix[1:k]]
for (i in 2:(n-1)) {
group_A <- tri2vec(i,(i+1):n,n)
group_B <- tri2vec(1:(i-1),i,n)
neighbors <- c(group_A,group_B)
knn <- neighbors[sort(w[neighbors],decreasing=TRUE,index.return=TRUE)$ix[1:k]]
keep <- union(knn,keep)
}
i <- n
neighbors <- tri2vec(1:(i-1),i,n)
knn <- neighbors[sort(w[neighbors],decreasing=TRUE,index.return=TRUE)$ix[1:k]]
keep <- union(knn,keep)
if (length(keep) > 0)
w[-keep] <- 0
return(Matrix(data=w,ncol=1,sparse=TRUE))
}
#' Compute Gaussian Kernel Weights
#'
#' \code{kernel_weights} computes Gaussian kernel weights given a data matrix \code{X} and a scale parameter \code{phi}. Namely,
#' the lth weight \code{w[l]} is given by
#' \deqn{
#' w[l] = exp(-phi ||X[,i]-X[,j]||^2)
#' }, where the lth pair of nodes is (\code{i},\code{j}).
#' @param X The data matrix to be clustered. The rows are the features, and the columns are the samples.
#' @param phi The nonnegative parameter that controls the scale of kernel weights
#' @useDynLib cvxbiclustr
#' @return A vector \cite{w} of weights for convex clustering.
kernel_weights <- function(X,phi=1) {
storage.mode(X) <- "double"
p <- as.integer(nrow(X))
n <- as.integer(ncol(X))
phi <- as.double(phi)
w <- double(n*(n-1)/2)
sol <- .C('kernel_weights',X=X,p=p,n=n,phi=phi,w=w)
return(weights=sol$w)
}
#' Edge-Incidence Matrix of Weights Graph
#'
#' Construct the edge-incidence matrix of the weights graph.
#'
#' @param w Weights vector
#' @param n Number of points being clustered
#' @import Matrix
create_edge_incidence <- function(w,n) {
P <- vec2tri(w@i+1,n)
nEdges <- nrow(P)
E <- Matrix(data=0,nrow=nEdges,ncol=n,sparse=TRUE)
r <- 1:nEdges
c <- P[,1]
E[(c-1)*nEdges + r] <- 1
c <- P[,2]
E[(c-1)*nEdges + r] <- -1
return(E)
}
#' Create adjacency matrix from V
#'
#' \code{create_adjacency} creates an n-by-n sparse adjacency matrix from the matrix of centroid differences.
#'
#' @param V Matrix of centroid differences
#' @param Phi Edge-incidence matrix
#' @import Matrix
#' @export
create_adjacency <- function(V,Phi) {
differences <- apply(V,2,FUN=function(x) {norm(as.matrix(x),'f')})
connected_ix <- which(differences == 0)
n <- ncol(Phi)
m <- length(connected_ix)
A <- Matrix(0, nrow = n, ncol = n, sparse = TRUE)
if (m > 0) {
ix <- integer(m)
jx <- integer(m)
for (i in 1:m) {
ix[i] <- which(Phi[connected_ix[i],]==1)
jx[i] <- which(Phi[connected_ix[i],]==-1)
}
A[(jx-1)*n + ix] <- 1
}
return(A)
}
#' Find clusters
#'
#' \code{find_clusters} uses breadth-first search to identify the connected components of the corresponding
#' adjacency graph of the centroid differences vectors.
#'
#' @param A adjacency matrix
#' @export
#' @import igraph
find_clusters <- function(A) {
G <- graph.adjacency(A, mode = 'upper')
n <- nrow(A)
node_seen <- logical(n)
cluster <- integer(n)
k <- 1
for (i in 1:n) {
if (!node_seen[i]) {
connected_set <- graph.bfs(G, root=i, unreachable = FALSE)$order
node_seen[connected_set] <- TRUE
cluster[connected_set] <- k
k <- k + 1
}
}
nClusters <- k - 1
size <- integer(nClusters)
for (j in 1:nClusters) {
size[j] <- length(which(cluster == j))
}
return(list(cluster=cluster, size=size))
}
#' Weights Graph Adjacency Matrix
#'
#' Constructs the adjacency matrix of the weights graph. This is useful to determine the connectivity of the weights graph.
#'
#' @param w Weights vector
#' @param n Number of points being clustered
#' @import Matrix
weights_graph <- function(w,n) {
# k <- which(w > 0)
ix <- vec2tri(w@i+1,n)
i <- ix[,1]
j <- ix[,2]
A <- Matrix(0, nrow = n, ncol = n, sparse = TRUE)
A[(j-1)*n + i] <- 1
return(A)
}
| /cvxbiclustr/R/util.r | no_license | ingted/R-Examples | R | false | false | 7,573 | r | #' Convert weights, as COO matrix, to CSC matrix and weights vector
#'
#' \code{convert} Takes a weights triple (COO matrix) and converts it to a sparse edge-incidence matrix (CSC matrix) and weights vector.
#'
#' @param W COO matrix of weights: (i,j,w[ij])
#' @import Matrix
#' @export
#' @examples
#' W <- matrix(0,3,3)
#' W[1,] <- c(1,2,1)
#' W[2,] <- c(1,3,2)
#' W[3,] <- c(2,3,3)
#'
#' sol <- convert(W)
convert <- function(W) {
m <- nrow(W)
i <- j <- x <- integer(2*m)
i[1:m] <- i[(m+1):(2*m)] <- 1:m
j[1:m] <- W[,1]
j[(m+1):(2*m)] <- W[,2]
x[1:m] <- rep(1,m)
x[(m+1):(2*m)] <- rep(-1,m)
Phi <- sparseMatrix(i=i, j=j, x=x)
w <- W[,3]
return(list(Phi=Phi, w=w))
}
## Clusterpath preprocessing
tri2vec <- function(i,j,n) {
return(n*(i-1) - i*(i-1)/2 + j -i)
}
vec2tri <- function(k,n) {
i <- ceiling(0.5*(2*n-1 - sqrt((2*n-1)^2 - 8*k)))
j <- k - n*(i-1) + i*(i-1)/2 + i
return(as.matrix(cbind(i,j)))
}
#' Construct indices matrices
#'
#' \code{compactify_edges} constructs M1, M2, and ix index matrices.
#' @param w weights vector
#' @param n number of points to cluster
compactify_edges <- function(w,n) {
sizes1 <- double(n)
sizes2 <- double(n)
P <- vec2tri(w@i+1,n)
nEdge <- nrow(P)
M1 <- matrix(0,nEdge,n)
M2 <- matrix(0,nEdge,n)
for (i in 1:n) {
group1 <- which(P[,1] == i)
sizes1[i] <- length(group1)
if (sizes1[i] > 0) {
M1[1:sizes1[i],i] <- group1
}
group2 <- which(P[,2] == i)
sizes2[i] <- length(group2)
if (sizes2[i] > 0) {
M2[1:sizes2[i],i] <- group2
}
}
M1 <- M1[1:max(sizes1),,drop=FALSE]
M2 <- M2[1:max(sizes2),,drop=FALSE]
return(list(ix=P,M1=M1,M2=M2,s1=sizes1,s2=sizes2))
}
#' Gaussian Kernel + k-Nearest Neighbor Weights
#'
#' \code{gkn_weights} combines Gaussian kernel weights with k-nearest neighbor weights
#'
#' @param X The data matrix to be clustered. The rows are the features, and the columns are the samples.
#' @param phi The nonnegative parameter that controls the scale of kernel weights
#' @param k_row The number of row nearest neighbors
#' @param k_col The number of column nearest neighbors
#' @export
gkn_weights <- function(X,phi=0.5,k_row=5,k_col=5) {
p <- nrow(X); n <- ncol(X)
## Construct Gaussian kernel weights
w_row <- kernel_weights(t(X),phi/n)
w_col <- kernel_weights(X,phi/p)
## Thin weights to k-nearest neighbors
w_row <- knn_weights(w_row,k_row,p)
w_col <- knn_weights(w_col,k_col,n)
## Normalize weights to sum to 1
w_row <- w_row/sum(w_row)
w_col <- w_col/sum(w_col)
## Rescale weights to make column and row penalties commensurate
w_row <- w_row/sqrt(n)
w_col <- w_col/sqrt(p)
## Construct edge-incidence matrices
E_row <- create_edge_incidence(w_row,p)
E_col <- create_edge_incidence(w_col,n)
## Get connectivity information
nRowComp <- length(find_clusters(weights_graph(w = w_row,p))$size)
nColComp <- length(find_clusters(weights_graph(w = w_col,n))$size)
return(list(w_row=w_row@x,w_col=w_col@x,E_row=E_row,E_col=E_col,
nRowComp=nRowComp,nColComp=nColComp))
}
#' "Thin" a weight vector to be positive only for its k-nearest neighbors
#'
#' \code{knn_weights} takes a weight vector \code{w} and sets the ith
#' component \code{w[i]} to zero if either of the two corresponding nodes
#' is not among the other's \code{k} nearest neighbors.
#'
#' @param w A vector of nonnegative weights. The ith entry \code{w[i]} denotes the weight used between the ith pair of centroids. The weights are in dictionary order.
#' @param k The number of nearest neighbors
#' @param n The number of data points.
#' @return A vector \cite{w} of weights for convex clustering.
knn_weights <- function(w,k,n) {
i <- 1
neighbors <- tri2vec(i,(i+1):n,n)
keep <- neighbors[sort(w[neighbors],decreasing=TRUE,index.return=TRUE)$ix[1:k]]
for (i in 2:(n-1)) {
group_A <- tri2vec(i,(i+1):n,n)
group_B <- tri2vec(1:(i-1),i,n)
neighbors <- c(group_A,group_B)
knn <- neighbors[sort(w[neighbors],decreasing=TRUE,index.return=TRUE)$ix[1:k]]
keep <- union(knn,keep)
}
i <- n
neighbors <- tri2vec(1:(i-1),i,n)
knn <- neighbors[sort(w[neighbors],decreasing=TRUE,index.return=TRUE)$ix[1:k]]
keep <- union(knn,keep)
if (length(keep) > 0)
w[-keep] <- 0
return(Matrix(data=w,ncol=1,sparse=TRUE))
}
#' Compute Gaussian Kernel Weights
#'
#' \code{kernel_weights} computes Gaussian kernel weights given a data matrix \code{X} and a scale parameter \code{phi}. Namely,
#' the lth weight \code{w[l]} is given by
#' \deqn{
#' w[l] = exp(-phi ||X[,i]-X[,j]||^2)
#' }, where the lth pair of nodes is (\code{i},\code{j}).
#' @param X The data matrix to be clustered. The rows are the features, and the columns are the samples.
#' @param phi The nonnegative parameter that controls the scale of kernel weights
#' @useDynLib cvxbiclustr
#' @return A vector \cite{w} of weights for convex clustering.
kernel_weights <- function(X,phi=1) {
storage.mode(X) <- "double"
p <- as.integer(nrow(X))
n <- as.integer(ncol(X))
phi <- as.double(phi)
w <- double(n*(n-1)/2)
sol <- .C('kernel_weights',X=X,p=p,n=n,phi=phi,w=w)
return(weights=sol$w)
}
#' Edge-Incidence Matrix of Weights Graph
#'
#' Construct the edge-incidence matrix of the weights graph.
#'
#' @param w Weights vector
#' @param n Number of points being clustered
#' @import Matrix
create_edge_incidence <- function(w,n) {
P <- vec2tri(w@i+1,n)
nEdges <- nrow(P)
E <- Matrix(data=0,nrow=nEdges,ncol=n,sparse=TRUE)
r <- 1:nEdges
c <- P[,1]
E[(c-1)*nEdges + r] <- 1
c <- P[,2]
E[(c-1)*nEdges + r] <- -1
return(E)
}
#' Create adjacency matrix from V
#'
#' \code{create_adjacency} creates an n-by-n sparse adjacency matrix from the matrix of centroid differences.
#'
#' @param V Matrix of centroid differences
#' @param Phi Edge-incidence matrix
#' @import Matrix
#' @export
create_adjacency <- function(V,Phi) {
differences <- apply(V,2,FUN=function(x) {norm(as.matrix(x),'f')})
connected_ix <- which(differences == 0)
n <- ncol(Phi)
m <- length(connected_ix)
A <- Matrix(0, nrow = n, ncol = n, sparse = TRUE)
if (m > 0) {
ix <- integer(m)
jx <- integer(m)
for (i in 1:m) {
ix[i] <- which(Phi[connected_ix[i],]==1)
jx[i] <- which(Phi[connected_ix[i],]==-1)
}
A[(jx-1)*n + ix] <- 1
}
return(A)
}
#' Find clusters
#'
#' \code{find_clusters} uses breadth-first search to identify the connected components of the corresponding
#' adjacency graph of the centroid differences vectors.
#'
#' @param A adjacency matrix
#' @export
#' @import igraph
find_clusters <- function(A) {
G <- graph.adjacency(A, mode = 'upper')
n <- nrow(A)
node_seen <- logical(n)
cluster <- integer(n)
k <- 1
for (i in 1:n) {
if (!node_seen[i]) {
connected_set <- graph.bfs(G, root=i, unreachable = FALSE)$order
node_seen[connected_set] <- TRUE
cluster[connected_set] <- k
k <- k + 1
}
}
nClusters <- k - 1
size <- integer(nClusters)
for (j in 1:nClusters) {
size[j] <- length(which(cluster == j))
}
return(list(cluster=cluster, size=size))
}
#' Weights Graph Adjacency Matrix
#'
#' Constructs the adjacency matrix of the weights graph. This is useful to determine the connectivity of the weights graph.
#'
#' @param w Weights vector
#' @param n Number of points being clustered
#' @import Matrix
weights_graph <- function(w,n) {
# k <- which(w > 0)
ix <- vec2tri(w@i+1,n)
i <- ix[,1]
j <- ix[,2]
A <- Matrix(0, nrow = n, ncol = n, sparse = TRUE)
A[(j-1)*n + i] <- 1
return(A)
}
|
#load data for the plot
source("load_data.R")
#open PNG device
png(filename="plot1.png")
with(data_plot,
hist(as.double(as.character(Global_active_power)),
main="Global Active Power",
xlab="Global Active Power (Kilowatts)",
col="red"
)
)
#Close the PNG device
dev.off()
| /plot1.R | no_license | dsinibaldi77/ExData_Plotting1 | R | false | false | 322 | r |
#load data for the plot
source("load_data.R")
#open PNG device
png(filename="plot1.png")
with(data_plot,
hist(as.double(as.character(Global_active_power)),
main="Global Active Power",
xlab="Global Active Power (Kilowatts)",
col="red"
)
)
#Close the PNG device
dev.off()
|
#Accessing the flymine lists for testing the multiple query ranking
source("ranking_FUNCTIONS_TPM.R")
source("mulq_genes_FUNCTIONS.R")
#Loading in fly atlas brain info
setwd("Test_fly_lists/")
class3b_top <- read.delim("Class_IIIb_18_20hrs.tsv", stringsAsFactors = F, header = F)
colnames(class3b_top) <- c("Secondary_identifier", "Gene_symbol", "GeneDB_Identifier", "Organism_name")
#Make function to compare methods (consensus ranking and per gene ranking)
compare_methods <- function(tlist, X){
#tlist = dataframe of pre-made list from FlyMine containing genes
#X = binarised counts
#Setting up the test list
#How many rows to get 90%
n = round(0.9 * nrow(tlist))
#Which rows are going to make up the 90%
ind <- sample(1:nrow(tlist), n)
#Find the selected 90% genes in the pergene dataset
list_names <- tlist[ind,"GeneDB_Identifier"]
findgenes <- which(rownames(X) %in% list_names == TRUE)
#10% left out
list_10 <- tlist[-ind, "GeneDB_Identifier"]
find10 <- which(rownames(X) %in% list_10 == TRUE)
#Store the results
foundgenes <- c(rownames(X)[findgenes], rownames(X)[find10])
fin <- data.frame(Genes=foundgenes)
####### CONSENSUS RANKING
#Do consensus ranking
r <- consensus_rank(N=NA, X=X, method="random", gene=findgenes,
type = "null")
#Rankings of the 90% inputted
res90 <- r$Initial_rankings
#Find where the 10% left out are
y <- r$Ranked_list[which(r$Ranked_list[,"rn"] %in% list_10 == TRUE), c("rn","Rank")]
res10 <- y$Rank; names(res10) <- y$rn ##Create named vector
#Match the order of the gene names and insert into fin
all <- c(res90, res10)
fin$Rank_consensus <- all[match(names(all), fin$Genes)]
####### PERGENE RANKING
pg <- pergene_rank(N=NA, X=X, gene = findgenes,
type = "null")
#Put 10% through runsum
cand <- X[find10,]
runrank <- runsum_rank(df = setDF(pg$ranked_genes), q = cand,
col_start = 2+length(findgenes)+1)
#Put 90% through runsum
runrank90 <- runsum_rank(df = setDF(pg$ranked_genes), q=pg$queries,
col_start = 2+length(findgenes)+1)
#Match the order of the gene names and insert into fin
all2 <- c(runrank90, runrank)
fin$Rank_pergene <- all2[match(names(all2), fin$Genes)]
#Add to dataframe which ones are 10% and which ones are 90%
fin$Set <- c(rep(90, length(findgenes)), rep(10, length(find10)))
return(fin)
}
#Doing 20 replicates
complete <- list()
system.time({
for(i in 1:10){
complete[[i]] <- compare_methods(tlist = class3b_top, X = pergene$reclassified)
}
})
save(complete, file="class3b_10rep.Rdata") | /Assessment of multiple query/Class3b_list_rep10.R | no_license | oknox/Research-project | R | false | false | 2,669 | r | #Accessing the flymine lists for testing the multiple query ranking
source("ranking_FUNCTIONS_TPM.R")
source("mulq_genes_FUNCTIONS.R")
#Loading in fly atlas brain info
setwd("Test_fly_lists/")
class3b_top <- read.delim("Class_IIIb_18_20hrs.tsv", stringsAsFactors = F, header = F)
colnames(class3b_top) <- c("Secondary_identifier", "Gene_symbol", "GeneDB_Identifier", "Organism_name")
#Make function to compare methods (consensus ranking and per gene ranking)
compare_methods <- function(tlist, X){
#tlist = dataframe of pre-made list from FlyMine containing genes
#X = binarised counts
#Setting up the test list
#How many rows to get 90%
n = round(0.9 * nrow(tlist))
#Which rows are going to make up the 90%
ind <- sample(1:nrow(tlist), n)
#Find the selected 90% genes in the pergene dataset
list_names <- tlist[ind,"GeneDB_Identifier"]
findgenes <- which(rownames(X) %in% list_names == TRUE)
#10% left out
list_10 <- tlist[-ind, "GeneDB_Identifier"]
find10 <- which(rownames(X) %in% list_10 == TRUE)
#Store the results
foundgenes <- c(rownames(X)[findgenes], rownames(X)[find10])
fin <- data.frame(Genes=foundgenes)
####### CONSENSUS RANKING
#Do consensus ranking
r <- consensus_rank(N=NA, X=X, method="random", gene=findgenes,
type = "null")
#Rankings of the 90% inputted
res90 <- r$Initial_rankings
#Find where the 10% left out are
y <- r$Ranked_list[which(r$Ranked_list[,"rn"] %in% list_10 == TRUE), c("rn","Rank")]
res10 <- y$Rank; names(res10) <- y$rn ##Create named vector
#Match the order of the gene names and insert into fin
all <- c(res90, res10)
fin$Rank_consensus <- all[match(names(all), fin$Genes)]
####### PERGENE RANKING
pg <- pergene_rank(N=NA, X=X, gene = findgenes,
type = "null")
#Put 10% through runsum
cand <- X[find10,]
runrank <- runsum_rank(df = setDF(pg$ranked_genes), q = cand,
col_start = 2+length(findgenes)+1)
#Put 90% through runsum
runrank90 <- runsum_rank(df = setDF(pg$ranked_genes), q=pg$queries,
col_start = 2+length(findgenes)+1)
#Match the order of the gene names and insert into fin
all2 <- c(runrank90, runrank)
fin$Rank_pergene <- all2[match(names(all2), fin$Genes)]
#Add to dataframe which ones are 10% and which ones are 90%
fin$Set <- c(rep(90, length(findgenes)), rep(10, length(find10)))
return(fin)
}
#Doing 20 replicates
complete <- list()
system.time({
for(i in 1:10){
complete[[i]] <- compare_methods(tlist = class3b_top, X = pergene$reclassified)
}
})
save(complete, file="class3b_10rep.Rdata") |
library(readxl) # readxl 패키지 로드
exdata1 <- read_excel("C:/Rstudy/Sample1.xlsx") # Sample1 엑셀 파일을 exdata1 데이터 세트로 저장
exdata1 # exdata1 데이터 세트 확인
stem(exdata1$AGE) # exdata1의 AGE 변수에 대한 줄기 잎 그림 | /Code/Chapter05/p136.R | no_license | newstars/HelloR | R | false | false | 264 | r | library(readxl) # readxl 패키지 로드
exdata1 <- read_excel("C:/Rstudy/Sample1.xlsx") # Sample1 엑셀 파일을 exdata1 데이터 세트로 저장
exdata1 # exdata1 데이터 세트 확인
stem(exdata1$AGE) # exdata1의 AGE 변수에 대한 줄기 잎 그림 |
##########################
# Code for figure 2 in the Extended data figures section
#
# Author: Hanmo Li & Mengyang Gu
#
# Email: mengyang at pstat.ucsb.edu
##########################
library(deSolve)
library(EpiEstim)
source(file = "./Reproducing results in the paper/Codes/functions_8th_version.R")
noise_index = F
gamma = 0.2
theta = 0.1
delta = 0.0066
n = 100
N = 10^7
I_0 = 1000
R_0 = 1000
D_0 = 0
C_0 = 0
set.seed(3)
if(noise_index){
beta_t_simulation = exp(-0.7 * seq(1,10,length.out = n))+ rnorm(n, 0, 0.2)
beta_t_simulation[beta_t_simulation<0] = 0
} else{
beta_t_simulation = exp(-0.7 * seq(1,10,length.out = n))
}
plot(beta_t_simulation)
betafun = stepfun(2:(n),beta_t_simulation)
plot(betafun)
parameters = list(1:5)
parameters[[1]] = gamma
parameters[[2]] = theta
parameters[[3]] = delta
parameters[[4]] = N
parameters[[5]] = betafun
init = c(
S = N - (I_0 + R_0 + D_0 + C_0) ,
I = I_0,
R = R_0,
D = D_0,
C = C_0
)
out_ode = ode(
y = init,
times = 1:n,
func = SIRDC,
parms = parameters,
)
death_selected = out_ode[,5]
S_t_seq = out_ode[,2]
confirm_selected_smoothed = N - S_t_seq
plot(death_selected)
out_ode_step_size_1 = ode(
y = init,
times = 1:n,
func = SIRDC,
parms = parameters,
method = "rk4",
hini=1,
)
death_selected_rk4_step_1 = out_ode_step_size_1[,5]
infective_rk4_step_1 = out_ode_step_size_1[,3]
susceptive_rk4_step_1 = out_ode_step_size_1[,2]
plot(death_selected_rk4_step_1)
lines(death_selected)
out_ode_step_size_0.1 = ode(
y = init,
times = 1:n,
func = SIRDC,
parms = parameters,
method = "rk4",
hini=0.1,
)
death_selected_rk4_step_0.1 = out_ode_step_size_0.1[,5]
infective_rk4_step_0.1 = out_ode_step_size_0.1[,3]
susceptive_rk4_step_0.1 = out_ode_step_size_0.1[,2]
plot(death_selected_rk4_step_0.1)
lines(death_selected)
#########################
# Part 1: Parameter Estimation via our approximation algorithm
#########################
I_0_approx = I_0
R_0_approx = R_0
ratio = confirm_selected_smoothed[1]/(I_0_approx+ R_0_approx+ death_selected[1])
# use ratio to adjust smoothed confirmed cases and get susceptible cases S_t
estimated_confirm = confirm_selected_smoothed/ratio
S_t_seq = N - estimated_confirm
init_for_beta = c(S_t_seq[1], I_0_approx, R_0_approx, death_selected[1], 0)
param_record_approx_for_beta = matrix(0, 5, n) # 5 rows: S_t, I_t, R_t, D_t, C_t
param_record_approx_for_beta[,1] = init_for_beta
param_record_approx_for_beta[1,] = S_t_seq
approx_beta_seq = rep(0, n-1) # record the value of transmission rate
# system.time({
for (i in 1:(n-1)){
S_t_1 = param_record_approx_for_beta[1,i]
S_t_2 = param_record_approx_for_beta[1,i+1]
I_t_1 = param_record_approx_for_beta[2,i]
R_t_1 = param_record_approx_for_beta[3,i]
D_t_1 = param_record_approx_for_beta[4,i]
C_t_1 = param_record_approx_for_beta[5,i]
if(I_t_1<1){
I_t_1 = 1
}
beta_t_1_2 = uniroot(find_root_beta, c(0, 10^6), tol = 0.0001, param = c(S_t_1, S_t_2, I_t_1), N = N, gamma=gamma)
I_t_2 = I_t_1 * exp(beta_t_1_2$root*(S_t_1 + S_t_2)/(2*N) - gamma)
R_t_2 = (2-theta)/(2+theta)*R_t_1 + gamma/(2+theta)*(I_t_1+I_t_2)
D_t_2 = D_t_1 + delta*theta*(R_t_1+R_t_2)/2
C_t_2 = C_t_1 + (1-delta)*theta*(R_t_1+R_t_2)/2
param_record_approx_for_beta[2:5, i+1] = c(I_t_2, R_t_2, D_t_2, C_t_2)
approx_beta_seq[i] = beta_t_1_2$root
}
death_fit = param_record_approx_for_beta[4, ]
infectious_fit = param_record_approx_for_beta[2,]
beta_t_fit = approx_beta_seq
plot(beta_t_fit)
lines(beta_t_simulation)
plot(death_fit)
lines(death_selected)
#######################################
#Part 2: Euler method
#######################################
beta_t_from_euler = calculate_Beta_t_directly(death_selected, N)
beta_t_from_euler[beta_t_from_euler<0] = 0
beta_t_from_euler[beta_t_from_euler>10] = 10
betafun_euler = approxfun(beta_t_from_euler)
plot(beta_t_from_euler)
SIRDC_multi_beta <- function(time, state, parameters) {
par <- as.list(c(state))
with(par, {
beta = betafun_euler(time)
dS <- -beta/N * I * S
dI <- beta/N * I * S - gamma * I
dR <- gamma * I - theta * R
dD <- delta * theta * R
dC <- (1-delta) * theta * R
list(c(dS, dI, dR, dD, dC))
})
}
t_euler <- (1):(length(beta_t_from_euler))
I_0_euler = I_0
R_0_euler = R_0
D_0 = 0
C_0 = 0
init_fitted_euler <- c(S = N-I_0_euler-R_0_euler-D_0-C_0 , I = I_0_euler, R = R_0_euler, D =0, C = 0)
fit_euler <- as.data.frame(ode(y = init_fitted_euler, times = t_euler, func = SIRDC_multi_beta, parms = NULL))
y_limit_death = c(min(death_selected, fit_euler$D, death_fit, na.rm=T), max(death_selected, fit_euler$D, death_fit , na.rm=T))
par(mfrow=c(1,1))
if(noise_index){
noise_text = "with noise"
}else{
noise_text = "without noise"
}
#######################################
#Part 2: Draw and save figures
#######################################
file_path = "./Reproducing results in the paper/Results/Extended_fig_2/"
cairo_ps( file = paste0(file_path, "Simulation_results_compare_with_Euler_noise_free.eps"), onefile = FALSE, fallback_resolution = 600, width =3.6 * 5.5, height = 3.6)
par(mfrow=c(1,3))
par( mai=c(0.55,0.55,0.2,0.2),mgp = c(1.9,0.65,0), cex.lab=2, cex.axis=1.8, cex.main=2, cex.sub = 2) # , mgp = c(1.5,0.2,0)
approx_R_eff = beta_t_fit/0.2 * S_t_seq[1: (length(S_t_seq)-1)]/N
euler_R_eff = beta_t_from_euler[1:(length(beta_t_from_euler)-1)]/0.2 * fit_euler$S[1:(length(fit_euler$S)-1)] / N
lsoda_R_eff = beta_t_simulation/0.2 * out_ode[,2] / N
RK_4_step_1_R_eff = beta_t_simulation/0.2 * susceptive_rk4_step_1/N
RK_4_step_0.1_R_eff = beta_t_simulation/0.2 * susceptive_rk4_step_0.1/N
approx_R_eff_smoothed = data_seven_day_smoothing(approx_R_eff)
real_R_eff_smoothed = data_seven_day_smoothing(lsoda_R_eff)
euler_R_eff_smoothed = data_seven_day_smoothing(euler_R_eff)
RK_4_step_1_R_eff_smoothed = data_seven_day_smoothing(RK_4_step_1_R_eff)
RK_4_step_0.1_R_eff_smoothed = data_seven_day_smoothing(RK_4_step_0.1_R_eff)
y_limit_R_t = range(approx_R_eff_smoothed,real_R_eff_smoothed ,euler_R_eff_smoothed, na.rm=T)
plot(approx_R_eff_smoothed, type="l",ylim = y_limit_R_t, ylab = "Effective reproduction number", xlab = "Days", col="blue") #, main = paste0(county_names[each_index], ", population=", round(N/10^6,2),"M", ", Ratio = ", round(ratio_real,3)))
lines(real_R_eff_smoothed, col = "black", type="p")
lines(approx_R_eff_smoothed, col = "blue", lwd = 1)
lines( euler_R_eff_smoothed, col = "red", lty=2, lwd = 2)
lines(RK_4_step_1_R_eff_smoothed, col = "green", lwd=2)
lines(RK_4_step_0.1_R_eff_smoothed, col = "green", lty=2, lwd=2)
abline(h=1, lty=2,col="black")
y_limit_infectious = range(fit_euler$I/10^5, infectious_fit/10^5, na.rm=T)
plot(infectious_fit/10^5,ylim = y_limit_infectious, type = "l",lwd = 2, col = "blue",xlab = "Days", ylab = expression(paste("Active infectious individuals ", (10^5)))) #,main = paste0(county_name_selected, ", population = ", round(N/10^6,2), "M"))
lines(fit_euler$I/10^5~t_euler, col = "red", lty=2, lwd = 2)
lines(out_ode[,3]/10^5, type = "p")
lines(infective_rk4_step_1/10^5, col ="green", lwd=2)
lines(infective_rk4_step_0.1/10^5, col ="green", lwd=2, lty=2)
plot(death_selected/10^3,ylim = y_limit_death/10^3, type = "p",pch = 2, cex = 1.2, col = "black",xlab = "Days", ylab = expression(paste("Cumulative death toll ", (10^3)))) #,main = paste0(county_name_selected, ", population = ", round(N/10^6,2), "M"))
lines(fit_euler$D/10^3~t_euler, col = "red", lty=2, lwd = 2)
lines(death_fit/10^3, col = "blue", lwd = 4)
lines(death_selected_rk4_step_1/10^3, col = "green", lwd=2)
lines(death_selected_rk4_step_0.1/10^3, col = "green", lty = 2, lwd = 2)
# legend(
# "bottomright",
# legend = c("Config 1: Robust estimation", "Config 2: F&J", "Config 3: RK4, step size=1", "Config 4: RK4, step size=0.1", "Config 5: lsoda"),
# # type = c("l", "l", "p"),
# pch = c(NA, NA, NA, NA, 2),
# lty = c(1, 2, 1,2, NA),
# col = c("blue", "red", "green","green", "black"),
# lwd = c(4,2,2,2,1.2),
# cex = 1.5
# )
dev.off()
| /Reproducing results in the paper/Codes/Extend_fig_2_Simulation_for_comparing_Euler_and_our_algorithm.R | no_license | UncertaintyQuantification/Robust-estimation-of-SARS-CoV-2-epidemic-in-US-counties | R | false | false | 8,326 | r | ##########################
# Code for figure 2 in the Extended data figures section
#
# Author: Hanmo Li & Mengyang Gu
#
# Email: mengyang at pstat.ucsb.edu
##########################
library(deSolve)
library(EpiEstim)
source(file = "./Reproducing results in the paper/Codes/functions_8th_version.R")
noise_index = F
gamma = 0.2
theta = 0.1
delta = 0.0066
n = 100
N = 10^7
I_0 = 1000
R_0 = 1000
D_0 = 0
C_0 = 0
set.seed(3)
if(noise_index){
beta_t_simulation = exp(-0.7 * seq(1,10,length.out = n))+ rnorm(n, 0, 0.2)
beta_t_simulation[beta_t_simulation<0] = 0
} else{
beta_t_simulation = exp(-0.7 * seq(1,10,length.out = n))
}
plot(beta_t_simulation)
betafun = stepfun(2:(n),beta_t_simulation)
plot(betafun)
parameters = list(1:5)
parameters[[1]] = gamma
parameters[[2]] = theta
parameters[[3]] = delta
parameters[[4]] = N
parameters[[5]] = betafun
init = c(
S = N - (I_0 + R_0 + D_0 + C_0) ,
I = I_0,
R = R_0,
D = D_0,
C = C_0
)
out_ode = ode(
y = init,
times = 1:n,
func = SIRDC,
parms = parameters,
)
death_selected = out_ode[,5]
S_t_seq = out_ode[,2]
confirm_selected_smoothed = N - S_t_seq
plot(death_selected)
out_ode_step_size_1 = ode(
y = init,
times = 1:n,
func = SIRDC,
parms = parameters,
method = "rk4",
hini=1,
)
death_selected_rk4_step_1 = out_ode_step_size_1[,5]
infective_rk4_step_1 = out_ode_step_size_1[,3]
susceptive_rk4_step_1 = out_ode_step_size_1[,2]
plot(death_selected_rk4_step_1)
lines(death_selected)
out_ode_step_size_0.1 = ode(
y = init,
times = 1:n,
func = SIRDC,
parms = parameters,
method = "rk4",
hini=0.1,
)
death_selected_rk4_step_0.1 = out_ode_step_size_0.1[,5]
infective_rk4_step_0.1 = out_ode_step_size_0.1[,3]
susceptive_rk4_step_0.1 = out_ode_step_size_0.1[,2]
plot(death_selected_rk4_step_0.1)
lines(death_selected)
#########################
# Part 1: Parameter Estimation via our approximation algorithm
#########################
I_0_approx = I_0
R_0_approx = R_0
ratio = confirm_selected_smoothed[1]/(I_0_approx+ R_0_approx+ death_selected[1])
# use ratio to adjust smoothed confirmed cases and get susceptible cases S_t
estimated_confirm = confirm_selected_smoothed/ratio
S_t_seq = N - estimated_confirm
init_for_beta = c(S_t_seq[1], I_0_approx, R_0_approx, death_selected[1], 0)
param_record_approx_for_beta = matrix(0, 5, n) # 5 rows: S_t, I_t, R_t, D_t, C_t
param_record_approx_for_beta[,1] = init_for_beta
param_record_approx_for_beta[1,] = S_t_seq
approx_beta_seq = rep(0, n-1) # record the value of transmission rate
# system.time({
for (i in 1:(n-1)){
S_t_1 = param_record_approx_for_beta[1,i]
S_t_2 = param_record_approx_for_beta[1,i+1]
I_t_1 = param_record_approx_for_beta[2,i]
R_t_1 = param_record_approx_for_beta[3,i]
D_t_1 = param_record_approx_for_beta[4,i]
C_t_1 = param_record_approx_for_beta[5,i]
if(I_t_1<1){
I_t_1 = 1
}
beta_t_1_2 = uniroot(find_root_beta, c(0, 10^6), tol = 0.0001, param = c(S_t_1, S_t_2, I_t_1), N = N, gamma=gamma)
I_t_2 = I_t_1 * exp(beta_t_1_2$root*(S_t_1 + S_t_2)/(2*N) - gamma)
R_t_2 = (2-theta)/(2+theta)*R_t_1 + gamma/(2+theta)*(I_t_1+I_t_2)
D_t_2 = D_t_1 + delta*theta*(R_t_1+R_t_2)/2
C_t_2 = C_t_1 + (1-delta)*theta*(R_t_1+R_t_2)/2
param_record_approx_for_beta[2:5, i+1] = c(I_t_2, R_t_2, D_t_2, C_t_2)
approx_beta_seq[i] = beta_t_1_2$root
}
death_fit = param_record_approx_for_beta[4, ]
infectious_fit = param_record_approx_for_beta[2,]
beta_t_fit = approx_beta_seq
plot(beta_t_fit)
lines(beta_t_simulation)
plot(death_fit)
lines(death_selected)
#######################################
#Part 2: Euler method
#######################################
beta_t_from_euler = calculate_Beta_t_directly(death_selected, N)
beta_t_from_euler[beta_t_from_euler<0] = 0
beta_t_from_euler[beta_t_from_euler>10] = 10
betafun_euler = approxfun(beta_t_from_euler)
plot(beta_t_from_euler)
SIRDC_multi_beta <- function(time, state, parameters) {
par <- as.list(c(state))
with(par, {
beta = betafun_euler(time)
dS <- -beta/N * I * S
dI <- beta/N * I * S - gamma * I
dR <- gamma * I - theta * R
dD <- delta * theta * R
dC <- (1-delta) * theta * R
list(c(dS, dI, dR, dD, dC))
})
}
t_euler <- (1):(length(beta_t_from_euler))
I_0_euler = I_0
R_0_euler = R_0
D_0 = 0
C_0 = 0
init_fitted_euler <- c(S = N-I_0_euler-R_0_euler-D_0-C_0 , I = I_0_euler, R = R_0_euler, D =0, C = 0)
fit_euler <- as.data.frame(ode(y = init_fitted_euler, times = t_euler, func = SIRDC_multi_beta, parms = NULL))
y_limit_death = c(min(death_selected, fit_euler$D, death_fit, na.rm=T), max(death_selected, fit_euler$D, death_fit , na.rm=T))
par(mfrow=c(1,1))
if(noise_index){
noise_text = "with noise"
}else{
noise_text = "without noise"
}
#######################################
#Part 2: Draw and save figures
#######################################
file_path = "./Reproducing results in the paper/Results/Extended_fig_2/"
cairo_ps( file = paste0(file_path, "Simulation_results_compare_with_Euler_noise_free.eps"), onefile = FALSE, fallback_resolution = 600, width =3.6 * 5.5, height = 3.6)
par(mfrow=c(1,3))
par( mai=c(0.55,0.55,0.2,0.2),mgp = c(1.9,0.65,0), cex.lab=2, cex.axis=1.8, cex.main=2, cex.sub = 2) # , mgp = c(1.5,0.2,0)
approx_R_eff = beta_t_fit/0.2 * S_t_seq[1: (length(S_t_seq)-1)]/N
euler_R_eff = beta_t_from_euler[1:(length(beta_t_from_euler)-1)]/0.2 * fit_euler$S[1:(length(fit_euler$S)-1)] / N
lsoda_R_eff = beta_t_simulation/0.2 * out_ode[,2] / N
RK_4_step_1_R_eff = beta_t_simulation/0.2 * susceptive_rk4_step_1/N
RK_4_step_0.1_R_eff = beta_t_simulation/0.2 * susceptive_rk4_step_0.1/N
approx_R_eff_smoothed = data_seven_day_smoothing(approx_R_eff)
real_R_eff_smoothed = data_seven_day_smoothing(lsoda_R_eff)
euler_R_eff_smoothed = data_seven_day_smoothing(euler_R_eff)
RK_4_step_1_R_eff_smoothed = data_seven_day_smoothing(RK_4_step_1_R_eff)
RK_4_step_0.1_R_eff_smoothed = data_seven_day_smoothing(RK_4_step_0.1_R_eff)
y_limit_R_t = range(approx_R_eff_smoothed,real_R_eff_smoothed ,euler_R_eff_smoothed, na.rm=T)
plot(approx_R_eff_smoothed, type="l",ylim = y_limit_R_t, ylab = "Effective reproduction number", xlab = "Days", col="blue") #, main = paste0(county_names[each_index], ", population=", round(N/10^6,2),"M", ", Ratio = ", round(ratio_real,3)))
lines(real_R_eff_smoothed, col = "black", type="p")
lines(approx_R_eff_smoothed, col = "blue", lwd = 1)
lines( euler_R_eff_smoothed, col = "red", lty=2, lwd = 2)
lines(RK_4_step_1_R_eff_smoothed, col = "green", lwd=2)
lines(RK_4_step_0.1_R_eff_smoothed, col = "green", lty=2, lwd=2)
abline(h=1, lty=2,col="black")
y_limit_infectious = range(fit_euler$I/10^5, infectious_fit/10^5, na.rm=T)
plot(infectious_fit/10^5,ylim = y_limit_infectious, type = "l",lwd = 2, col = "blue",xlab = "Days", ylab = expression(paste("Active infectious individuals ", (10^5)))) #,main = paste0(county_name_selected, ", population = ", round(N/10^6,2), "M"))
lines(fit_euler$I/10^5~t_euler, col = "red", lty=2, lwd = 2)
lines(out_ode[,3]/10^5, type = "p")
lines(infective_rk4_step_1/10^5, col ="green", lwd=2)
lines(infective_rk4_step_0.1/10^5, col ="green", lwd=2, lty=2)
plot(death_selected/10^3,ylim = y_limit_death/10^3, type = "p",pch = 2, cex = 1.2, col = "black",xlab = "Days", ylab = expression(paste("Cumulative death toll ", (10^3)))) #,main = paste0(county_name_selected, ", population = ", round(N/10^6,2), "M"))
lines(fit_euler$D/10^3~t_euler, col = "red", lty=2, lwd = 2)
lines(death_fit/10^3, col = "blue", lwd = 4)
lines(death_selected_rk4_step_1/10^3, col = "green", lwd=2)
lines(death_selected_rk4_step_0.1/10^3, col = "green", lty = 2, lwd = 2)
# legend(
# "bottomright",
# legend = c("Config 1: Robust estimation", "Config 2: F&J", "Config 3: RK4, step size=1", "Config 4: RK4, step size=0.1", "Config 5: lsoda"),
# # type = c("l", "l", "p"),
# pch = c(NA, NA, NA, NA, 2),
# lty = c(1, 2, 1,2, NA),
# col = c("blue", "red", "green","green", "black"),
# lwd = c(4,2,2,2,1.2),
# cex = 1.5
# )
dev.off()
|
library(PRROC)
demos <- odata
levels(demos$MARRIAGE) <- c("Casado","Otro","Soltero")
levels(demos$EDUCATION) <- c("Otro","Licenciatura","Maestría/Doctorado","Otro","Bachillerato")
levels(fdata$MARRIAGE) <- c("Casado","Otro","Soltero")
levels(fdata$EDUCATION) <- c("Otro","Licenciatura","Maestría/Doctorado","Otro","Bachillerato")
fdata <- fdata[,-14]
finalmodel <- train(default~.,data=fdata,method="nnet",trControl=trainControl(method="cv", number=9, summaryFunction = prSummary, classProbs = TRUE, savePredictions = TRUE),preProcess=c("center","scale"),tuneGrid=expand.grid(size=c(14), decay=c(0.1)),metric="ROC",maxit=900)
results <- data.frame(real=as.numeric(fdata$default)-1,pred=finalmodel$finalModel$fitted.values)
rocdata <- as.data.frame(roc.curve(scores.class1 = filter(results,real==0)[,2],scores.class0 = filter(results,real==1)[,2],curve=T)$curve)
prdata <- as.data.frame(pr.curve(scores.class1 = filter(results,real==0)[,2],scores.class0 = filter(results,real==1)[,2],curve=T)$curve)
rocdata[,3] <- round(rocdata[,3],2)
rocdata <- group_by(rocdata,V3) %>% summarise(V1=head(V1,1),V2=head(V2,1))
prdata[,3] <- round(prdata[,3],2)
prdata <- group_by(prdata,V3) %>% summarise(V1=head(V1,1),V2=head(V2,1))
| /ShinyObjects.R | no_license | MiguelAngel243/CreditCardPredictor | R | false | false | 1,225 | r | library(PRROC)
demos <- odata
levels(demos$MARRIAGE) <- c("Casado","Otro","Soltero")
levels(demos$EDUCATION) <- c("Otro","Licenciatura","Maestría/Doctorado","Otro","Bachillerato")
levels(fdata$MARRIAGE) <- c("Casado","Otro","Soltero")
levels(fdata$EDUCATION) <- c("Otro","Licenciatura","Maestría/Doctorado","Otro","Bachillerato")
fdata <- fdata[,-14]
finalmodel <- train(default~.,data=fdata,method="nnet",trControl=trainControl(method="cv", number=9, summaryFunction = prSummary, classProbs = TRUE, savePredictions = TRUE),preProcess=c("center","scale"),tuneGrid=expand.grid(size=c(14), decay=c(0.1)),metric="ROC",maxit=900)
results <- data.frame(real=as.numeric(fdata$default)-1,pred=finalmodel$finalModel$fitted.values)
rocdata <- as.data.frame(roc.curve(scores.class1 = filter(results,real==0)[,2],scores.class0 = filter(results,real==1)[,2],curve=T)$curve)
prdata <- as.data.frame(pr.curve(scores.class1 = filter(results,real==0)[,2],scores.class0 = filter(results,real==1)[,2],curve=T)$curve)
rocdata[,3] <- round(rocdata[,3],2)
rocdata <- group_by(rocdata,V3) %>% summarise(V1=head(V1,1),V2=head(V2,1))
prdata[,3] <- round(prdata[,3],2)
prdata <- group_by(prdata,V3) %>% summarise(V1=head(V1,1),V2=head(V2,1))
|
################################################################################
### R BASICS WORKSHOP ###
### EJERCISIO 1.1: Una muestra de una sesión de R ###
### ###
### Unida de Servicios Bioinformáticos ###
### Instituto Nacional de Medicina Genómica ###
### Website: github.com/hachepunto/R_Basics_workshop ###
################################################################################
## OBJECTIVE:
## El propósito de este ejercicio es familiarizarte con R y con la forma de
## interactuar con la linea de comandos
## PARTE 1 ##
# Aquí te introduciré a algunos conceptos importantes los cuales veremos con más
# detalle durante el resto de las presentaciones.
# Para buscar en los manuales en linea, las referencias y otros materiales puedes usar
help.start()
# el símbolo '<-' se usa para indicar asignación. También se usa para guardar información
# en un objeto:
x <- 50
# En este primer comando asignaste el valor 50 al objeto llamado 'x'.
# Los objetos en R se usan para guardar información. Para encontrar que está almacenado
# en un objeto solo es necesario escribir su nombre:
x
# R no corre lineas que comiencen con '#'. Esto se usa para crear comentarios.
# R ya tiene integrados algunos valores de algunas constantes fundamentales.
# Por ejemplo para encontrar el valor de Pi, solo escribe:
pi
# Puedes copiar el valor de Pi en otro objeto. Por ejemplo:
y <- pi
y
pi
# En R, las acciones, la manipulación de datos, las gráficas y los análisis se llevan
# a cabo usando funciones, las cuales son elementos de R que hacen acciones en específico.
# Por ejemplo, la función 'rnorm' genera valores a azar con distribución normal:
rnorm(50)
# Las funciones actúan en o son modificadas por argumentos. Los argumentos definen como
# trabajará una función. En este ejemplo, la función 'rnorm' es modificada por un
# argumento que tiene el valor de 50. Como resultado, obtienes 50 valores al azar de una
# distribución normal. Puedes pedir el número de valores que quieras:
rnorm(50)
rnorm(5)
rnorm(1)
# Puedes almacenar el resultado de una función en un objeto para usarlo después con
# otro propósito:
x <- rnorm(50)
# Puedes hacer cosas con los valores almacenados en 'x'. Por ejemplo, puedes usar la
# función 'mean' para calcular la media de los valores dentro de 'x':
mean(x)
# Puedes calcular otros estadísticos, como la desviación estándar, o simplemente crear
# un resumen de los valores en x:
sd(x)
summary(x)
# Puedes cambiar el orden de los valores:
sort(x)
sort(x, decreasing=TRUE)
# Por ejemplo, nota que la función 'sort' toma dos argumentos, uno son los valores
# en 'x', el otro es el valor 'TRUE'. ya aprenderemos más acerca de los argumentos.
# También puedes hacer un histograma de estos valores:
hist(x)
# R también tiene 'operadores' que efectúan una multitud de acciones. Los más comunes
# son los operadores aritméticos de suma '+', resta '-', multiplicación '*', y
# división '/'. Por ejemplo, podemos multiplicar los valores en 'x' por una constante:
x*2
# Tambien podemos escribir una sola linea de código que haga múltiples acciones y
# guarde la salida. Por ejemplo:
y <- rnorm(50)*2
# Esto crea 50 valores al azar de una distribución normal, Luego multiplica cada valor
# por 2 y finalmente guarda el resultado en un objeto llamado 'y'.
# Podemos crear también secuencias de acciones más complicadas, por ejemplo:
y <- 0.5 + 1.5*x + rnorm(50)
# Esto 1) crea un conjunto de 50 valores al azar de una distribución normal, 2)
# multiplica los valores en 'x' por 1.5, 3) suma elemento por elemento los resultados
# por (1) y (2), 4) suma 0.5 a cada valor en el resultado de (3), y 5) guarda los
# resultados de este cómputo dentro de 'y'. Para ver el valor dentro de 'y' solo escribe:
y
# Ahora puedes usar los valores en los objetos 'x' y 'y' para muchas cosas. Por ejemplo,
# para hacer un scatterplot puedes usar la función 'plot':
plot(x, y)
# Esto tendría que abrir automáticamente una ventana de gráficos. Para hallar la
# correlación entre 'x' y 'y':
cor(x, y)
# Para producir un boxplot y hacer una prueba de t:
boxplot(x,y)
# Para hacer una prueba de t:
t.test(x, y)
# Para hacer una prueba de t de una cola:
t.test(x, y, alternative="greater")
# Nota la diferencia entre los valores de p.
# Para ver que hay en tu espacio de trabajo hasta ahora, simplemente escribe:
ls()
# Nota que los objetos que has creado están enlistados ('x' y 'y')
## PARTE 2 ##
# En esta segunda parte seguirás jugando con varios elementos de R.
# Solamente ejecuta el código y mira los resultados. Sería mejor si trataras de escribir
# el código en vez de solo copiar y pegar:
# Para hacer varias gráficas del sin(theta):
theta <- seq(0, 2*pi, length=100)
plot(theta, sin(theta))
par(new=TRUE)
plot(theta, sin(theta), type="h")
plot(theta, sin(theta), type="l")
plot(theta, sin(theta), type="s")
theta <- seq(0, 2*pi, length=10)
plot(theta, sin(theta), type="l")
# Para ver que significa un comando, escribe:
help(plot)
# Para hacer simple aritmética y repetir secuencias escribe:
c(1:25)
seq(1, 25)
seq(25, 1, -1)
seq(1, 25, 2)
seq(1, 25, length=6)
seq(0, 2, 0.1)
rep(0, 25)
rep(1, 25)
# Genera un vector de enteros del 1 al 25:
n <- c(1:25)
# Haz una columna de vectores igual a la raíz cuadrada de n:
w <- sqrt(n)
# Simula alguna respuesta de variables, y despliégalas en una tabla:
r <- n + rnorm(n) * w
data.frame(n, r)
# Ejecuta una regresión lineal, despliega los resultados, crea un scatterplot, y dibuja
# la regresión lineal en rojo en la gráfica:
regress.rn <- lm(r ~ n)
summary(regress.rn)
plot(n, r)
abline(regress.rn, col="red")
# Nota que el orden de r y n de la linea de regresión es opuesta al del orden en
# la gráfica.
# Grafica los residuos y ponle leyendas a los ejes:
plot(fitted(regress.rn), resid(regress.rn), xlab="ValoresAjustados",
ylab="Residuos", main="Residuos vs Ajustados")
# Simula 100 lanzamientos de una moneda y ve los resultados:
x <- rbinom(100,1,0.5)
x
# Luego, guarda el total acumulado de número de caras, grafica los resultados
# con pasos (type = "s"):
c <- cumsum(x)
plot(c, type="s")
# Tira un dado 1000 veces y mira un resumen:
fair <- sample(c(1:6), 1000, replace=TRUE)
summary(fair)
# Tira un dado sesgado 1000 veces y ver un resumen:
biased <- sample(c(1:6), 1000, replace=TRUE, prob=c(1/12,1/12,1/12,1/4,1/4,1/4))
summary(biased)
# El siguiente conjunto de datos surgen del famoso experimento de Michelson-Morley.
# Hay cinco experimentos (columna 'Expt') y cada uno tiene 20 corridas (columna 'Run')
# y 'Speed' es la velocidad de la luz grabada menos 290,000 km/sec.
# Para ver el conjunto de datos, escribe:
morley
# Los datos de las primeras dos columnas son leyendas. Haz el número de experimento un
# factor:
morley$Expt <- factor(morley$Expt)
# Ahora, haz un boxplot con las leyendas de la velocidad en la columna 3:
boxplot(morley[ ,3] ~ morley$Expt, main="Datos de la velocidad de la luz",
xlab="Experimento", ylab="Velocidad")
# Ejecuta un análisis de varianza para ver si las velocidades medidas son
# significativamente distintas entre experimentos.
anova.mm <- aov(Speed ~ Expt, data=morley)
summary(anova.mm)
# Dibuja una cúbica:
x <- seq(-2, 2, 0.01)
plot(x, x^3-3*x, type="l")
# Dibuja una curva de campana:
curve(dnorm(x), -3, 3)
# Checa la función de masa de probabilidad de una distribución binomial:
x <- c(0:100)
prob <- dbinom(x, 100, 0.5)
plot(x, prob, type="h")
# Para trazar una curva parametrizada, comenzar con una secuencia y dar los valores
# de 'x' y 'y':
angle <- seq(-pi, pi, 0.01)
x <- sin(3*angle)
y <- cos(4*angle)
plot(x, y, type="l")
# Ahora vamos a trazar curvas de nivel y una superficie. En primer lugar, le damos una
# secuencia de valores. Esta vez especificamos el número de términos:
x <- seq(-pi, pi, len=50)
y <- x
# Entonces, definimos una función de estos valores 'x' y 'y' y dibujamos un mapa
# de contornos.
f <- outer(x, y, function(x, y) (cos(3*x) + cos(y)) / (1 + x^2 + y^2))
contour(x,y,f)
# Para dibujar un gráfico de superficie:
persp(x,y,f,col="orange")
# Para cambiar el ángulo de visión:
persp(x, y, f, col="orange", theta=-30, phi=45)
| /1-2_introduccion.R | no_license | hachepunto/R_Basics_workshop | R | false | false | 8,837 | r | ################################################################################
### R BASICS WORKSHOP ###
### EJERCISIO 1.1: Una muestra de una sesión de R ###
### ###
### Unida de Servicios Bioinformáticos ###
### Instituto Nacional de Medicina Genómica ###
### Website: github.com/hachepunto/R_Basics_workshop ###
################################################################################
## OBJECTIVE:
## El propósito de este ejercicio es familiarizarte con R y con la forma de
## interactuar con la linea de comandos
## PARTE 1 ##
# Aquí te introduciré a algunos conceptos importantes los cuales veremos con más
# detalle durante el resto de las presentaciones.
# Para buscar en los manuales en linea, las referencias y otros materiales puedes usar
help.start()
# el símbolo '<-' se usa para indicar asignación. También se usa para guardar información
# en un objeto:
x <- 50
# En este primer comando asignaste el valor 50 al objeto llamado 'x'.
# Los objetos en R se usan para guardar información. Para encontrar que está almacenado
# en un objeto solo es necesario escribir su nombre:
x
# R no corre lineas que comiencen con '#'. Esto se usa para crear comentarios.
# R ya tiene integrados algunos valores de algunas constantes fundamentales.
# Por ejemplo para encontrar el valor de Pi, solo escribe:
pi
# Puedes copiar el valor de Pi en otro objeto. Por ejemplo:
y <- pi
y
pi
# En R, las acciones, la manipulación de datos, las gráficas y los análisis se llevan
# a cabo usando funciones, las cuales son elementos de R que hacen acciones en específico.
# Por ejemplo, la función 'rnorm' genera valores a azar con distribución normal:
rnorm(50)
# Las funciones actúan en o son modificadas por argumentos. Los argumentos definen como
# trabajará una función. En este ejemplo, la función 'rnorm' es modificada por un
# argumento que tiene el valor de 50. Como resultado, obtienes 50 valores al azar de una
# distribución normal. Puedes pedir el número de valores que quieras:
rnorm(50)
rnorm(5)
rnorm(1)
# Puedes almacenar el resultado de una función en un objeto para usarlo después con
# otro propósito:
x <- rnorm(50)
# Puedes hacer cosas con los valores almacenados en 'x'. Por ejemplo, puedes usar la
# función 'mean' para calcular la media de los valores dentro de 'x':
mean(x)
# Puedes calcular otros estadísticos, como la desviación estándar, o simplemente crear
# un resumen de los valores en x:
sd(x)
summary(x)
# Puedes cambiar el orden de los valores:
sort(x)
sort(x, decreasing=TRUE)
# Por ejemplo, nota que la función 'sort' toma dos argumentos, uno son los valores
# en 'x', el otro es el valor 'TRUE'. ya aprenderemos más acerca de los argumentos.
# También puedes hacer un histograma de estos valores:
hist(x)
# R también tiene 'operadores' que efectúan una multitud de acciones. Los más comunes
# son los operadores aritméticos de suma '+', resta '-', multiplicación '*', y
# división '/'. Por ejemplo, podemos multiplicar los valores en 'x' por una constante:
x*2
# Tambien podemos escribir una sola linea de código que haga múltiples acciones y
# guarde la salida. Por ejemplo:
y <- rnorm(50)*2
# Esto crea 50 valores al azar de una distribución normal, Luego multiplica cada valor
# por 2 y finalmente guarda el resultado en un objeto llamado 'y'.
# Podemos crear también secuencias de acciones más complicadas, por ejemplo:
y <- 0.5 + 1.5*x + rnorm(50)
# Esto 1) crea un conjunto de 50 valores al azar de una distribución normal, 2)
# multiplica los valores en 'x' por 1.5, 3) suma elemento por elemento los resultados
# por (1) y (2), 4) suma 0.5 a cada valor en el resultado de (3), y 5) guarda los
# resultados de este cómputo dentro de 'y'. Para ver el valor dentro de 'y' solo escribe:
y
# Ahora puedes usar los valores en los objetos 'x' y 'y' para muchas cosas. Por ejemplo,
# para hacer un scatterplot puedes usar la función 'plot':
plot(x, y)
# Esto tendría que abrir automáticamente una ventana de gráficos. Para hallar la
# correlación entre 'x' y 'y':
cor(x, y)
# Para producir un boxplot y hacer una prueba de t:
boxplot(x,y)
# Para hacer una prueba de t:
t.test(x, y)
# Para hacer una prueba de t de una cola:
t.test(x, y, alternative="greater")
# Nota la diferencia entre los valores de p.
# Para ver que hay en tu espacio de trabajo hasta ahora, simplemente escribe:
ls()
# Nota que los objetos que has creado están enlistados ('x' y 'y')
## PARTE 2 ##
# En esta segunda parte seguirás jugando con varios elementos de R.
# Solamente ejecuta el código y mira los resultados. Sería mejor si trataras de escribir
# el código en vez de solo copiar y pegar:
# Para hacer varias gráficas del sin(theta):
theta <- seq(0, 2*pi, length=100)
plot(theta, sin(theta))
par(new=TRUE)
plot(theta, sin(theta), type="h")
plot(theta, sin(theta), type="l")
plot(theta, sin(theta), type="s")
theta <- seq(0, 2*pi, length=10)
plot(theta, sin(theta), type="l")
# Para ver que significa un comando, escribe:
help(plot)
# Para hacer simple aritmética y repetir secuencias escribe:
c(1:25)
seq(1, 25)
seq(25, 1, -1)
seq(1, 25, 2)
seq(1, 25, length=6)
seq(0, 2, 0.1)
rep(0, 25)
rep(1, 25)
# Genera un vector de enteros del 1 al 25:
n <- c(1:25)
# Haz una columna de vectores igual a la raíz cuadrada de n:
w <- sqrt(n)
# Simula alguna respuesta de variables, y despliégalas en una tabla:
r <- n + rnorm(n) * w
data.frame(n, r)
# Ejecuta una regresión lineal, despliega los resultados, crea un scatterplot, y dibuja
# la regresión lineal en rojo en la gráfica:
regress.rn <- lm(r ~ n)
summary(regress.rn)
plot(n, r)
abline(regress.rn, col="red")
# Nota que el orden de r y n de la linea de regresión es opuesta al del orden en
# la gráfica.
# Grafica los residuos y ponle leyendas a los ejes:
plot(fitted(regress.rn), resid(regress.rn), xlab="ValoresAjustados",
ylab="Residuos", main="Residuos vs Ajustados")
# Simula 100 lanzamientos de una moneda y ve los resultados:
x <- rbinom(100,1,0.5)
x
# Luego, guarda el total acumulado de número de caras, grafica los resultados
# con pasos (type = "s"):
c <- cumsum(x)
plot(c, type="s")
# Tira un dado 1000 veces y mira un resumen:
fair <- sample(c(1:6), 1000, replace=TRUE)
summary(fair)
# Tira un dado sesgado 1000 veces y ver un resumen:
biased <- sample(c(1:6), 1000, replace=TRUE, prob=c(1/12,1/12,1/12,1/4,1/4,1/4))
summary(biased)
# El siguiente conjunto de datos surgen del famoso experimento de Michelson-Morley.
# Hay cinco experimentos (columna 'Expt') y cada uno tiene 20 corridas (columna 'Run')
# y 'Speed' es la velocidad de la luz grabada menos 290,000 km/sec.
# Para ver el conjunto de datos, escribe:
morley
# Los datos de las primeras dos columnas son leyendas. Haz el número de experimento un
# factor:
morley$Expt <- factor(morley$Expt)
# Ahora, haz un boxplot con las leyendas de la velocidad en la columna 3:
boxplot(morley[ ,3] ~ morley$Expt, main="Datos de la velocidad de la luz",
xlab="Experimento", ylab="Velocidad")
# Ejecuta un análisis de varianza para ver si las velocidades medidas son
# significativamente distintas entre experimentos.
anova.mm <- aov(Speed ~ Expt, data=morley)
summary(anova.mm)
# Dibuja una cúbica:
x <- seq(-2, 2, 0.01)
plot(x, x^3-3*x, type="l")
# Dibuja una curva de campana:
curve(dnorm(x), -3, 3)
# Checa la función de masa de probabilidad de una distribución binomial:
x <- c(0:100)
prob <- dbinom(x, 100, 0.5)
plot(x, prob, type="h")
# Para trazar una curva parametrizada, comenzar con una secuencia y dar los valores
# de 'x' y 'y':
angle <- seq(-pi, pi, 0.01)
x <- sin(3*angle)
y <- cos(4*angle)
plot(x, y, type="l")
# Ahora vamos a trazar curvas de nivel y una superficie. En primer lugar, le damos una
# secuencia de valores. Esta vez especificamos el número de términos:
x <- seq(-pi, pi, len=50)
y <- x
# Entonces, definimos una función de estos valores 'x' y 'y' y dibujamos un mapa
# de contornos.
f <- outer(x, y, function(x, y) (cos(3*x) + cos(y)) / (1 + x^2 + y^2))
contour(x,y,f)
# Para dibujar un gráfico de superficie:
persp(x,y,f,col="orange")
# Para cambiar el ángulo de visión:
persp(x, y, f, col="orange", theta=-30, phi=45)
|
library(mapview)
library(sp)
library(RColorBrewer)
library(ggplot2)
#loading in data
overlaps<-read.csv("data/gis for nick.csv", header=T)
chloride_2007_2012<-read.csv("data/NLA_chloride_2007_2012.csv", header=T)
pract_data<-read.csv("data/practice model 2007 data.csv", header=T)
#plotting overlaps
overlap_sp<-SpatialPoints(overlaps[,c("long", "lat")], proj4string = CRS("+init=epsg:4326"))
mapview(overlap_sp)
chloride_spatial<-SpatialPointsDataFrame(coords = chloride_2007_2012[,c("LON_DD83", "LAT_DD83")], data = chloride_2007_2012[,c(-35,-36)], proj4string = CRS("+init=epsg:4326") )
#plotting chloride
chloride_spatial@data$log_X2012_Chloride<-log(chloride_spatial@data$X2012_Chloride)
chloride_spatial@data$log_X2007_Chloride<-log(chloride_spatial@data$X2007_Chloride)
chloride_spatial@data$log_chloride_Difference<-chloride_spatial@data$chloride_Difference
#2012
pal <- colorRampPalette(rev(brewer.pal(9, "Spectral")))
chlr_data<-chloride_spatial[!is.na(chloride_spatial$X2012_Chloride),]
mapview(chlr_data, zcol="log_X2012_Chloride", col.regions=pal)
#2007
chlr_data<-chloride_spatial[!is.na(chloride_spatial$X2007_Chloride),]
mapview(chlr_data, zcol="log_X2007_Chloride", col.regions=pal)
#dif between 2007 2012
chlr_data<-chloride_spatial[!is.na(chloride_spatial$X2007_Chloride) & !is.na(chloride_spatial$X2007_Chloride) ,]
mapview(chlr_data[chlr_data@data$chloride_Difference<10 & chlr_data@data$chloride_Difference>-10,], zcol="chloride_Difference", col.regions=pal, map.types="OpenStreetMap")
#catchment size
pract_spatial<-SpatialPointsDataFrame(coords = pract_data[,c("LON_DD", "LAT_DD")], data = pract_data[,c(-24,-25)], proj4string = CRS("+init=epsg:4326") )
pract_spatial@data$log_Relative.Catchment.Size<-log(pract_spatial@data$Relative.Catchment.Size)
mapview(pract_spatial, zcol="log_Relative.Catchment.Size", col.regions=pal, legend=T, labels)
##histograms of chloride
#chlor 2012
ggplot() + geom_histogram(data=chloride_spatial@data, aes(x=X2012_Chloride), fill="red") + scale_x_continuous("Chloride 2012", trans="log10")
#chlor 2007
ggplot() + geom_histogram(data=chloride_spatial@data, aes(x=X2007_Chloride), fill="blue") + scale_x_continuous("Chloride 2007", trans="log10")
#diff in chlor
ggplot() + geom_histogram(data=chloride_spatial@data[chloride_spatial@data$chloride_Difference<100 & chloride_spatial@data$chloride_Difference>-100,], aes(x=chloride_Difference), fill="black") + scale_x_continuous("Chloride diff")
#lake/watershed area hist
ggplot() + geom_histogram(data=pract_data, aes(x=Relative.Catchment.Size), fill="black") + scale_x_continuous("Relative Catchment Size", trans="log10") + theme(
plot.margin = margin(12, 12, 12, 12))
| /lake_chloride_maps_histograms.R | no_license | nskaff/gleon_predict_salt | R | false | false | 2,698 | r | library(mapview)
library(sp)
library(RColorBrewer)
library(ggplot2)
#loading in data
overlaps<-read.csv("data/gis for nick.csv", header=T)
chloride_2007_2012<-read.csv("data/NLA_chloride_2007_2012.csv", header=T)
pract_data<-read.csv("data/practice model 2007 data.csv", header=T)
#plotting overlaps
overlap_sp<-SpatialPoints(overlaps[,c("long", "lat")], proj4string = CRS("+init=epsg:4326"))
mapview(overlap_sp)
chloride_spatial<-SpatialPointsDataFrame(coords = chloride_2007_2012[,c("LON_DD83", "LAT_DD83")], data = chloride_2007_2012[,c(-35,-36)], proj4string = CRS("+init=epsg:4326") )
#plotting chloride
chloride_spatial@data$log_X2012_Chloride<-log(chloride_spatial@data$X2012_Chloride)
chloride_spatial@data$log_X2007_Chloride<-log(chloride_spatial@data$X2007_Chloride)
chloride_spatial@data$log_chloride_Difference<-chloride_spatial@data$chloride_Difference
#2012
pal <- colorRampPalette(rev(brewer.pal(9, "Spectral")))
chlr_data<-chloride_spatial[!is.na(chloride_spatial$X2012_Chloride),]
mapview(chlr_data, zcol="log_X2012_Chloride", col.regions=pal)
#2007
chlr_data<-chloride_spatial[!is.na(chloride_spatial$X2007_Chloride),]
mapview(chlr_data, zcol="log_X2007_Chloride", col.regions=pal)
#dif between 2007 2012
chlr_data<-chloride_spatial[!is.na(chloride_spatial$X2007_Chloride) & !is.na(chloride_spatial$X2007_Chloride) ,]
mapview(chlr_data[chlr_data@data$chloride_Difference<10 & chlr_data@data$chloride_Difference>-10,], zcol="chloride_Difference", col.regions=pal, map.types="OpenStreetMap")
#catchment size
pract_spatial<-SpatialPointsDataFrame(coords = pract_data[,c("LON_DD", "LAT_DD")], data = pract_data[,c(-24,-25)], proj4string = CRS("+init=epsg:4326") )
pract_spatial@data$log_Relative.Catchment.Size<-log(pract_spatial@data$Relative.Catchment.Size)
mapview(pract_spatial, zcol="log_Relative.Catchment.Size", col.regions=pal, legend=T, labels)
##histograms of chloride
#chlor 2012
ggplot() + geom_histogram(data=chloride_spatial@data, aes(x=X2012_Chloride), fill="red") + scale_x_continuous("Chloride 2012", trans="log10")
#chlor 2007
ggplot() + geom_histogram(data=chloride_spatial@data, aes(x=X2007_Chloride), fill="blue") + scale_x_continuous("Chloride 2007", trans="log10")
#diff in chlor
ggplot() + geom_histogram(data=chloride_spatial@data[chloride_spatial@data$chloride_Difference<100 & chloride_spatial@data$chloride_Difference>-100,], aes(x=chloride_Difference), fill="black") + scale_x_continuous("Chloride diff")
#lake/watershed area hist
ggplot() + geom_histogram(data=pract_data, aes(x=Relative.Catchment.Size), fill="black") + scale_x_continuous("Relative Catchment Size", trans="log10") + theme(
plot.margin = margin(12, 12, 12, 12))
|
##################################################################################################
####
#' Simple version of DeClust with linear regression and without iterative optimization
#'
#' This function is a simple version of DeClust. It uses linear regression instead of nonlinear optimization assuming the gene expression follows normal distribution instead of log-normal distribution. In addition, unlike the function deClustFromMarker(), it doesn't use iteractive optimization.
#'
#' @param exprM a gene by sample expression matrix (in the original expression scale, not log-transformed,has to be non-negative)
#' @param k number of clusters
#' @param seed the random seed set for kmeans clustering
#' @return A named list with three components: subtype, subtypeprofileM and subtypefractionM. Subtype is a vector with length equal to the sample size, and it stores the sample clustering results;subtypeprofileM is a gene by compartment matrix, and it stores the inferred expression profile for each cancer subtype, immune and stromal compartment;subtypefractionM is a sample by compartment matrix, and it stores the estimated fraction of each compartment for each sample.
#' @examples data(exprM);
#' r<-simpleFromMarker(exprM,3);
#' @export
simpleFromMarker<-function(exprM,k,seed=1)
{
data(SI_geneset)
stromal<-as.matrix(SI_geneset)[1,-1];
immune<-as.matrix(SI_geneset)[2,-1];
if(sum(rownames(exprM)%in%immune)<10|sum(rownames(exprM)%in%stromal)<10)stop("too few markers for immune and stromal")
stromalMean<-exp(apply(log(exprM)[rownames(exprM)%in%stromal,],2,median))
immuneMean<-exp(apply(log(exprM)[rownames(exprM)%in%immune,],2,median))
markerM<-cbind(stromal=stromalMean/quantile(stromalMean,0.99),immune=immuneMean/quantile(immuneMean,0.99))
totalrM<-foreach( rates = seq(0.1,1,by=0.1))%dopar%
{
print(rates)
rM<-c()
for(ratei in seq(0.1,1,by=0.1))
{
print(ratei)
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
if(mean(fractionM[,"cancer"]<0)>0.05)
{
############we stop when there are more than 5% of samples with purity<0
next();
}else{
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-simpleFromCC(exprM,fractionM,k)
reM<-r$subtypeprofileM%*%t(r$subtypefractionM);
reM[reM<1]<-1
rM<-rbind(rM,c(rates=rates,ratei=ratei,MSE=mean(as.vector((log(reM)-log(exprM))^2))))
}
}
rM;
}
rM<-do.call("rbind",totalrM)
rM<-rM[which.min(rM[,"MSE"]),]
rates<-rM["rates"]
ratei<-rM["ratei"]
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-simpleFromCC(exprM,fractionM,k,seed=seed)
r;
}
#######################################################################################
########################################################################################
####Use marker to infer fraction
#' Simple version of DeClust with linear regression
#'
#' This function is a simple version of DeClust. It uses linear regression instead of nonlinear optimization assuming the gene expression follows normal distribution instead of log-normal distribution. Unlike simpleFromMarker, it uses iteractive optimization.
#'
#' @param exprM a gene by sample expression matrix (in the original expression scale, not log-transformed, has to be non-negative)
#' @param k number of clusters
#' @param maxsubtypestep the maximum number of iterations in the inner lay of optimization for sample clustering
#' @param maxstep the maximum number of iterations in the outer lay of optimization for the fraction of each compartment
#' @param seed the random seed set for kmeans clustering
#' @return A named list with three components: subtype, subtypeprofileM and subtypefractionM. Subtype is a vector with length equal to the sample size, and it stores the sample clustering results;subtypeprofileM is a gene by compartment matrix, and it stores the inferred expression profile for each cancer subtype, immune and stromal compartment;subtypefractionM is a sample by compartment matrix, and it stores the estimated fraction of each compartment for each sample.
#' @examples data(exprM);
#' r<-deClustFromMarker(exprM,3);
#' @export
deClustFromMarker<-function(exprM,k,maxsubtypestep=100,maxstep=100,seed=1)
{
data(SI_geneset)
stromal<-as.matrix(SI_geneset)[1,-1];
immune<-as.matrix(SI_geneset)[2,-1];
if(sum(rownames(exprM)%in%immune)<10|sum(rownames(exprM)%in%stromal)<10)stop("too few markers for immune and stromal")
stromalMean<-exp(apply(log(exprM)[rownames(exprM)%in%stromal,],2,median))
immuneMean<-exp(apply(log(exprM)[rownames(exprM)%in%immune,],2,median))
markerM<-cbind(stromal=stromalMean/quantile(stromalMean,0.99),immune=immuneMean/quantile(immuneMean,0.99))
totalrM<-foreach( rates = seq(0.1,1,by=0.1))%dopar%
{
print(rates)
rM<-c()
for(ratei in seq(0.1,1,by=0.1))
{
print(ratei)
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
if(mean(fractionM[,"cancer"]<0)>0.05)
{
############we stop when there are more than 5% of samples with purity<0
next();
}else{
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-deClustFromCC(exprM,fractionM,k=k,maxsubtypestep=maxsubtypestep,seed=seed)
reM<-r$subtypeprofileM%*%t(r$subtypefractionM);
reM[reM<1]<-1
rM<-rbind(rM,c(rates=rates,ratei=ratei,MSE=mean(as.vector((log(reM)-log(exprM))^2))))
}
}
rM;
}
rM<-do.call("rbind",totalrM)
rM<-rM[which.min(rM[,"MSE"]),]
rates<-rM["rates"]
ratei<-rM["ratei"]
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
#r0<-simpleDeconv(exprM,fractionM,k=k,maxsubtypestep=maxsubtypestep)
r<-deClustFromCCiterative(exprM,fractionM,k=k,maxstep=maxstep,maxsubtypestep=maxsubtypestep,seed=seed)
r;
}
##############################################################################################################################from marker: exprM in the original scale, it has to be a matrix
###################
#' Full version of DeClust
#'
#' This function is a full version of DeClust. It uses nonlinear optimization assuming the gene expression follows log-normal distribution. It also contains iteractive optimization procedures.
#'
#' @param exprM a gene by sample expression matrix (in the original expression scale, not log-transformed, has to be non-negative)
#' @param k number of clusters
#' @param maxsubtypestep the maximum number of iterations in the inner lay of optimization for sample clustering
#' @param maxstep the maximum number of iterations in the outer lay of optimization for the fraction of each compartment
#' @param seed the random seed set for kmeans clustering
#' @return A named list with three components: subtype, subtypeprofileM and subtypefratioM. Subtype is a vector with length equal to the sample size, and it stores the sample clustering results;subtypeprofileM is a gene by compartment matrix, and it stores the inferred expression profile for each cancer subtype, immune and stromal compartment;subtypefractionM is a sample by compartment matrix, and it stores the estimated fraction of each compartment for each sample.
#' @examples data(exprM);
#' library("doParallel");
#' cl<-makeCluster(5,type="FORK",outfile="");
#' registerDoParallel(cl);
#' r<-deClustFromMarkerlognormal(exprM,3);
#' @export
deClustFromMarkerlognormal<-function(exprM,k,maxsubtypestep=20,maxstep=20,seed=1)
{
options(warn=-1)
data(SI_geneset)
stromal<-as.matrix(SI_geneset)[1,-1];
immune<-as.matrix(SI_geneset)[2,-1];
if(sum(rownames(exprM)%in%immune)<10|sum(rownames(exprM)%in%stromal)<10)stop("too few markers for immune and stromal")
stromalMean<-exp(apply(log(exprM)[rownames(exprM)%in%stromal,],2,median))
immuneMean<-exp(apply(log(exprM)[rownames(exprM)%in%immune,],2,median))
markerM<-cbind(stromal=stromalMean/quantile(stromalMean,0.99),immune=immuneMean/quantile(immuneMean,0.99))
#registerDoMC(10)
rates<-c()
ratei<-c()
totalrM<-foreach( rates = seq(0.1,1,by=0.1))%dopar%
{
# source("~/projects/Li/TCGAtools/Rcode/metaDeconvolution_simulationfunctions.r")
print(rates)
rM<-c()
for(ratei in seq(0.1,1,by=0.1))
{
print(ratei)
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
if(mean(fractionM[,"cancer"]<0)>0.05)
{
############we stop when there are more than 5% of samples with purity<0
next();
}else{
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-deClustFromCC(exprM+1,fractionM,k=k,maxsubtypestep=maxsubtypestep,seed=seed)
reM<-r$subtypeprofileM%*%t(r$subtypefractionM);
reM[reM<1]<-1
rM<-rbind(rM,c(rates=rates,ratei=ratei,MSE=mean(as.vector((log(reM)-log(exprM+1))^2))))
}
}
rM;
}
rM<-do.call("rbind",totalrM)
rM<-rM[which.min(rM[,"MSE"]),]
rates<-rM["rates"]
ratei<-rM["ratei"]
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
# save(rates,ratei,fractionM,file=paste(outputDir,label,"_bestrate.rda",sep=""))
r<-deClustFromCCiterativelognormal(exprM,fractionM,k,maxstep=maxstep,maxsubtypestep=maxsubtypestep,seed=seed)
r;
}
#########################################################################################
#' calculate BIC from the output of DeClust
#'
#'
#'
#' @param exprM the bulky expression data as input for deClust functions
#' @param deClustoutput the output of deClust functions
#'
#' @return BIC value
#' @examples data(exprM);
#' library("doParallel");
#' cl<-makeCluster(5,type="FORK",outfile="");
#' registerDoParallel(cl);
#' r<-deClustFromMarkerlognormal(exprM,3);
#' BIC<-calculateBIC(exprM,r)
#' @export
calculateBIC<-function(exprM,deClustoutput)
{
r<-deClustoutput
mse<-mean(as.vector((log(r$subtypeprofileM%*%t(r$subtypefractionM)+1)-log(exprM+1))^2))
sampleN<-length(r$subtype)
subtypeN<-length(unique(r$subtype))
return(log(mse)*sampleN+log(sampleN)*subtypeN);
}
##################################################################################################internal function
############################################################################################
#######################################################################################################optimizefractionM
optimizefractionM<-function(exprM,profileM,subtype,initfractionM)
{
###########for samples with unknown initfractionM, assign to the subtype with the most similar profile of top 10% cancer genes, assign initfractionM randomly
if(any(is.na(subtype)))
{
FC<-apply(profileM[,grep("subtype",colnames(profileM)),drop=FALSE],1,min)-apply(profileM[,c("stromal","immune")],1,max)
cancergenes<-which(FC>quantile(FC,0.9))
ms<-names(subtype)[is.na(subtype)]
for(ss in ms)
{
subtypenames<-setdiff(unique(subtype),NA)
subtype[ss]<-subtypenames[which.max(cor(exprM[cancergenes,ss],profileM[cancergenes,subtypenames]))[1]]
initfractionM[ss,]<-0;
initfractionM[ss,c("stromal","immune",subtype[ss])]<-1/3
}
}
#####when geneV is too small(or even zero), it will cause problem, so remove extreme genes
geneV<-apply((log((exp(profileM)-1)%*%t(initfractionM)+1)-exprM)^2,1,mean)
# gg<-which(geneV<quantile(geneV,0.95)&geneV>quantile(geneV,0.05))
r<-foreach(i = 1:ncol(exprM))%dopar%{
# print(i)
initfractionV<-initfractionM[i,c("stromal","immune",subtype[i])];
######when the initfractionM has close to zero fraction,the optimization may have issues
initfractionV[initfractionV<0.0001]<-0.0001;
t<-optimizefractionV(exprM[,i],profileM[,c("stromal","immune",subtype[i])],geneV,initfractionV)
names(t)[3]<-"cancer"
t;
}
# proc.time()-pt
fractionM<-do.call("rbind",r)
fractionM<-as.matrix(fractionM[,1:3])
rownames(fractionM)<-rownames(initfractionM);
colnames(fractionM)<-c("stromal","immune","cancer");
fractionM<-getsubtypefractionM(fractionM,subtype)
fractionM;
}
##########################################################################################################
optimizefractionV<-function(exprV,profileM,geneV,initfractionV)
{
fn1<-function(fractionV, profileM,exprV,geneV)
{
mean((log((exp(profileM)-1)%*%fractionV+1)-exprV)^2/geneV)
}
gn1<-function(fractionV, profileM,exprV,geneV)
{
t(exp(profileM)-1)%*%((log((exp(profileM)-1)%*%fractionV+1)-exprV)*2/geneV/((exp(profileM)-1)%*%fractionV+1))/nrow(profileM)
}
optimx(par=initfractionV, fn=fn1,gr=gn1,lower=0, upper=Inf, itnmax=NULL, hessian=FALSE, method="L-BFGS-B",profileM=profileM,exprV=exprV,geneV=geneV)
}
########################################################################################################getsubtypefractionM
getsubtypefractionM<-function(fractionM,subtype)
{
subtypeM<-matrix(0,length(subtype),length(unique(subtype)),dimnames=list(names(subtype),unique(subtype)))
for(i in colnames(subtypeM))subtypeM[names(subtype)[subtype==i],i]<-1
if(any(sort(rownames(subtypeM))!=sort(rownames(fractionM))))stop("subtype samples don't match")
subtypeM<-subtypeM[rownames(fractionM),,drop=FALSE]
subtypeM<-sweep(subtypeM,1,fractionM[,"cancer"],"*")
fractionM<-cbind(fractionM[,c("stromal","immune")],subtypeM)
fractionM[fractionM<0]<-0;
fractionM
}
#######################################################################################################optimizeprofileM
optimizeprofileM<-function(exprM,fractionM)
{
#registerDoMC()
fn1<-function(profilei,expri,fractionM)
{
mean((log(fractionM%*%(exp(profilei)-1)+1)-expri)^2)
}
gr1<-function(profilei,expri,fractionM)
{
t<-2*(log(fractionM%*%(exp(profilei)-1)+1)-expri)/(fractionM%*%(exp(profilei)-1)+1)
t(t)%*%fractionM/nrow(fractionM)*exp(profilei)
}
print("start optimize profileM")
#pt<-proc.time()
r<-foreach(k=1:nrow(exprM))%dopar%{
# print(k)
options(warn=-1)
p<-optimx(par=rep(mean(exprM[k,]),ncol(fractionM)), fn=fn1, gr=gr1, lower=0, upper=Inf, itnmax=NULL, hessian=FALSE, method="L-BFGS-B",expri=exprM[k,],fractionM=fractionM)
####sometimes optimx outputs NA for unclear reason, it might be fixed by changing initial value.
if(any(is.na(p[1:ncol(fractionM)])))p<-optimx(par=rep(mean(exprM[k,])+1,ncol(fractionM)), fn=fn1, gr=gr1, lower=0, upper=Inf, itnmax=NULL, hessian=FALSE, method="L-BFGS-B",expri=exprM[k,],fractionM=fractionM)
p;
}#
# proc.time()-pt
r<-do.call("rbind",r)
curprofileM<-as.matrix(r[,1:ncol(fractionM)])
rownames(curprofileM)<-rownames(exprM);
colnames(curprofileM)<-colnames(fractionM);
curprofileM[curprofileM<0]<-0;
curprofileM;
}
########################################################################################
#############################################
###calculate mse
calculateMSE<-function(fractionM,exprM,profileM)
{
if(any(colnames(fractionM)!=colnames(profileM)))stop("fraction names need to match")
mean(as.vector((log((exp(profileM)-1)%*%t(fractionM)+1)-exprM)^2))
}
################################################################################################from CC
#########################################################################################
#####################################################################################################interatively updating fractionM
deClustFromCCiterativelognormal<-function(exprM,fractionM,subtypeN,maxstep=maxstep,maxsubtypestep=maxsubtypestep,seed=1)
{
if(sum(is.na(exprM))>0)stop("NA is not allowed in exprM")
if(any(colnames(exprM)!=rownames(fractionM)))stop("sample Name doesn't match")
stepi<-1
while(stepi<=maxstep)
{
print(paste("step",stepi))
r<-deClustFromCClognormal(exprM=exprM,fractionM=fractionM,subtypeNum=subtypeN,maxsubtypestep=maxsubtypestep,seed=seed)
newsubtypefractionM<-optimizefractionM(log(exprM+1),log(r$subtypeprofileM+1),r$subtype,r$subtypefractionM)
newfractionM<-cbind(newsubtypefractionM[,c("stromal","immune")],cancer=apply(newsubtypefractionM[,grep("subtype",colnames(newsubtypefractionM)),drop=FALSE],1,sum))
print(max(abs(newfractionM-fractionM)))
if(all(abs(newfractionM-fractionM)<0.01))break;
fractionM<-newfractionM;
stepi<-stepi+1;
# save(r,stepi,file=paste(outputDir,label,"_",stepi,".rda",sep=""))
}
return(r)
}
##################################################################################################################################################################################
################## deconvolution from fractionM lognormal
####################fractionM has 3 columns: stromal,immune and cancer
deClustFromCClognormal<-function(exprM,fractionM,subtypeNum,maxsubtypestep=100,seed=1)
{
exprM<-log(exprM+1)
#####initial ressubtype using logresidual without subtypes
profileM<-optimizeprofileM(exprM,fractionM)
if(subtypeNum==1){
mse<-calculateMSE(fractionM,exprM,profileM)
subtypefractionM<-fractionM;
colnames(subtypefractionM)[colnames(subtypefractionM)=="cancer"]<-"subtype1"
subtypeprofileM<-profileM;
colnames(subtypeprofileM)[colnames(subtypeprofileM)=="cancer"]<-"subtype1"
subtype<-rep("subtype1",nrow(subtypefractionM))
names(subtype)<-rownames(subtypefractionM)
return(list(mse=mse,subtypefractionM=subtypefractionM,subtypeprofileM=exp(subtypeprofileM)-1,subtype=subtype))
}
if(subtypeNum>1)
{
logresM<-exprM-log((exp(profileM)-1)%*%t(fractionM)+1)
set.seed(seed)
r<-kmeans(t(logresM),subtypeNum,nstart=5)
ressubtype<-r$cluster;
ressubtype<-paste("subtype",ressubtype,sep="")
names(ressubtype)<-names(r$cluster)
ressubtype<-ressubtype[rownames(fractionM)]
##################
oldsubtype<-ressubtype
step<-1
while(step<maxsubtypestep)
{
print(paste("subtypestep",step))
subtypefractionM<-getsubtypefractionM(fractionM,oldsubtype)
subtypeprofileM<-optimizeprofileM(exprM,subtypefractionM)
mse<-calculateMSE(subtypefractionM,exprM,subtypeprofileM)
print(mse)
####reassign cluster
subtypeNames<-unique(oldsubtype)
msesubtype<-sapply(subtypeNames,function(subtype)apply((exprM-log((exp(subtypeprofileM[,c("stromal","immune",subtype)])-1)%*%t(fractionM)+1))^2,2,mean))
newsubtype<-apply(msesubtype,1,function(x) subtypeNames[which.min(x)])
if(all(newsubtype==oldsubtype))break;
print(sum(newsubtype!=oldsubtype))
oldsubtype<-newsubtype
step<-step+1;
}
#####
subtype<-oldsubtype
return(list(subtypefractionM=subtypefractionM,subtypeprofileM=exp(subtypeprofileM)-1,subtype=subtype))
}
}
########################################################################################################################################################################
#########################
simpleFromCC<-function(exprM,fractionM,k,seed=1)
{
if(any(colnames(exprM)!=rownames(fractionM)))stop("sample doesn't match")
model<-lm.fit(x=fractionM,y=t(exprM))
reM<-t(model$coefficients)%*%t(fractionM)
reM[reM<1]<-1
adjexprM<-log(exprM)-log(reM)
set.seed(seed)
r<-kmeans(t(adjexprM),k,nstart=5)
ressubtype<-r$cluster;
ressubtype<-paste("subtype",ressubtype,sep="")
names(ressubtype)<-names(r$cluster)
ressubtype<-ressubtype[rownames(fractionM)]
subtypefractionM<-getsubtypefractionM(fractionM,ressubtype)
model<-lm.fit(x=subtypefractionM,y=t(exprM))
subtypeprofileM<-t(model$coefficients)
subtypeprofileM[subtypeprofileM<1]<-1
return(list(subtype=ressubtype,subtypeprofileM=subtypeprofileM,subtypefractionM=subtypefractionM))
}
########################################################################################
#############################
deClustFromCC<-function(exprM,fractionM,k,maxsubtypestep=100,seed=1)
{
model<-lm.fit(x=fractionM,y=t(exprM))
reM<-t(model$coefficients)%*%t(fractionM)
reM[reM<1]<-1
adjexprM<-log(exprM)-log(reM)
set.seed(seed)
r<-kmeans(t(adjexprM),k,nstart=5)
ressubtype<-r$cluster;
ressubtype<-paste("subtype",ressubtype,sep="")
names(ressubtype)<-names(r$cluster)
ressubtype<-ressubtype[rownames(fractionM)]
oldsubtype<-ressubtype
step<-1
while(step<maxsubtypestep)
{
print(paste("subtypestep",step))
subtypefractionM<-getsubtypefractionM(fractionM,oldsubtype)
model<-lm.fit(x=subtypefractionM,y=t(exprM))
subtypeprofileM<-t(model$coefficients)
subtypeprofileM[subtypeprofileM<1]<-1;
####reassign cluster
subtypeNames<-unique(oldsubtype)
msesubtype<-sapply(subtypeNames,function(subtype)
{
reM<-subtypeprofileM[,c("stromal","immune",subtype)]%*%t(fractionM[,c("stromal","immune","cancer")])
reM[reM<1]<-1;
apply((log(exprM)-log(reM))^2,2,mean)
})
newsubtype<-apply(msesubtype,1,function(x) subtypeNames[which.min(x)])
if(all(newsubtype==oldsubtype))break;
print(sum(newsubtype!=oldsubtype))
oldsubtype<-newsubtype
step<-step+1;
}
#####
subtypeprofileM[subtypeprofileM<1]<-1;
list(subtype=newsubtype,subtypeprofileM=subtypeprofileM,subtypefractionM=subtypefractionM)
}
#####################################################################################
########################################################################################
deClustFromCCiterative<-function(exprM,fractionM,k=2,weight=TRUE,maxstep=100,maxsubtypestep=100,seed=1)
{
stepi<-1
while(stepi<=maxstep)
{
print(paste("step",stepi))
#if(stepi==20)browser()
r<-deClustFromCC(exprM,fractionM,maxsubtypestep=maxsubtypestep,k=k,seed=seed)
subtype<-r$subtype
geneV<-apply((r$subtypeprofileM%*%t(r$subtypefractionM)-exprM)^2,1,mean)
subtypeName<-unique(subtype)
subtypefractionML<-lapply(subtypeName,function(st)
{
if(weight) model<-lm.wfit(x=r$subtypeprofileM[,c("stromal","immune",st)],y=exprM[,which(subtype==st)],w=1/geneV)
if(!weight) model<-lm.fit(x=r$subtypeprofileM[,c("stromal","immune",st)],y=exprM[,which(subtype==st)])
subtypefractionM<-t(model$coefficients)
###when there is only one sample there, the rownames is lost
rownames(subtypefractionM)<-colnames(exprM)[which(subtype==st)]
subtypefractionM[subtypefractionM<0]<-0;
subtypefractionM<-cbind(subtypefractionM,matrix(0,nrow(subtypefractionM),length(subtypeName)-1,dimnames=list(rownames(subtypefractionM),setdiff(subtypeName,st))))
subtypefractionM[,colnames(r$subtypeprofileM),drop=FALSE];
})
subtypefractionM<-do.call("rbind",subtypefractionML)
subtypefractionM<-subtypefractionM[colnames(exprM),]
newfractionM<-cbind(subtypefractionM[,c("stromal","immune")],cancer=apply(subtypefractionM[,grep("subtype",colnames(subtypefractionM)),drop=FALSE],1,sum))
if(all(abs(newfractionM-fractionM)<0.01))break;
print(max(abs(newfractionM-fractionM)))
fractionM<-newfractionM;
stepi<-stepi+1;
}
r;
}
| /DeClust/R/metaDeconvolution_simulationfunctions.r | no_license | integrativenetworkbiology/DeClust | R | false | false | 23,688 | r |
##################################################################################################
####
#' Simple version of DeClust with linear regression and without iterative optimization
#'
#' This function is a simple version of DeClust. It uses linear regression instead of nonlinear optimization assuming the gene expression follows normal distribution instead of log-normal distribution. In addition, unlike the function deClustFromMarker(), it doesn't use iteractive optimization.
#'
#' @param exprM a gene by sample expression matrix (in the original expression scale, not log-transformed,has to be non-negative)
#' @param k number of clusters
#' @param seed the random seed set for kmeans clustering
#' @return A named list with three components: subtype, subtypeprofileM and subtypefractionM. Subtype is a vector with length equal to the sample size, and it stores the sample clustering results;subtypeprofileM is a gene by compartment matrix, and it stores the inferred expression profile for each cancer subtype, immune and stromal compartment;subtypefractionM is a sample by compartment matrix, and it stores the estimated fraction of each compartment for each sample.
#' @examples data(exprM);
#' r<-simpleFromMarker(exprM,3);
#' @export
simpleFromMarker<-function(exprM,k,seed=1)
{
data(SI_geneset)
stromal<-as.matrix(SI_geneset)[1,-1];
immune<-as.matrix(SI_geneset)[2,-1];
if(sum(rownames(exprM)%in%immune)<10|sum(rownames(exprM)%in%stromal)<10)stop("too few markers for immune and stromal")
stromalMean<-exp(apply(log(exprM)[rownames(exprM)%in%stromal,],2,median))
immuneMean<-exp(apply(log(exprM)[rownames(exprM)%in%immune,],2,median))
markerM<-cbind(stromal=stromalMean/quantile(stromalMean,0.99),immune=immuneMean/quantile(immuneMean,0.99))
totalrM<-foreach( rates = seq(0.1,1,by=0.1))%dopar%
{
print(rates)
rM<-c()
for(ratei in seq(0.1,1,by=0.1))
{
print(ratei)
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
if(mean(fractionM[,"cancer"]<0)>0.05)
{
############we stop when there are more than 5% of samples with purity<0
next();
}else{
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-simpleFromCC(exprM,fractionM,k)
reM<-r$subtypeprofileM%*%t(r$subtypefractionM);
reM[reM<1]<-1
rM<-rbind(rM,c(rates=rates,ratei=ratei,MSE=mean(as.vector((log(reM)-log(exprM))^2))))
}
}
rM;
}
rM<-do.call("rbind",totalrM)
rM<-rM[which.min(rM[,"MSE"]),]
rates<-rM["rates"]
ratei<-rM["ratei"]
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-simpleFromCC(exprM,fractionM,k,seed=seed)
r;
}
#######################################################################################
########################################################################################
####Use marker to infer fraction
#' Simple version of DeClust with linear regression
#'
#' This function is a simple version of DeClust. It uses linear regression instead of nonlinear optimization assuming the gene expression follows normal distribution instead of log-normal distribution. Unlike simpleFromMarker, it uses iteractive optimization.
#'
#' @param exprM a gene by sample expression matrix (in the original expression scale, not log-transformed, has to be non-negative)
#' @param k number of clusters
#' @param maxsubtypestep the maximum number of iterations in the inner lay of optimization for sample clustering
#' @param maxstep the maximum number of iterations in the outer lay of optimization for the fraction of each compartment
#' @param seed the random seed set for kmeans clustering
#' @return A named list with three components: subtype, subtypeprofileM and subtypefractionM. Subtype is a vector with length equal to the sample size, and it stores the sample clustering results;subtypeprofileM is a gene by compartment matrix, and it stores the inferred expression profile for each cancer subtype, immune and stromal compartment;subtypefractionM is a sample by compartment matrix, and it stores the estimated fraction of each compartment for each sample.
#' @examples data(exprM);
#' r<-deClustFromMarker(exprM,3);
#' @export
deClustFromMarker<-function(exprM,k,maxsubtypestep=100,maxstep=100,seed=1)
{
data(SI_geneset)
stromal<-as.matrix(SI_geneset)[1,-1];
immune<-as.matrix(SI_geneset)[2,-1];
if(sum(rownames(exprM)%in%immune)<10|sum(rownames(exprM)%in%stromal)<10)stop("too few markers for immune and stromal")
stromalMean<-exp(apply(log(exprM)[rownames(exprM)%in%stromal,],2,median))
immuneMean<-exp(apply(log(exprM)[rownames(exprM)%in%immune,],2,median))
markerM<-cbind(stromal=stromalMean/quantile(stromalMean,0.99),immune=immuneMean/quantile(immuneMean,0.99))
totalrM<-foreach( rates = seq(0.1,1,by=0.1))%dopar%
{
print(rates)
rM<-c()
for(ratei in seq(0.1,1,by=0.1))
{
print(ratei)
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
if(mean(fractionM[,"cancer"]<0)>0.05)
{
############we stop when there are more than 5% of samples with purity<0
next();
}else{
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-deClustFromCC(exprM,fractionM,k=k,maxsubtypestep=maxsubtypestep,seed=seed)
reM<-r$subtypeprofileM%*%t(r$subtypefractionM);
reM[reM<1]<-1
rM<-rbind(rM,c(rates=rates,ratei=ratei,MSE=mean(as.vector((log(reM)-log(exprM))^2))))
}
}
rM;
}
rM<-do.call("rbind",totalrM)
rM<-rM[which.min(rM[,"MSE"]),]
rates<-rM["rates"]
ratei<-rM["ratei"]
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
#r0<-simpleDeconv(exprM,fractionM,k=k,maxsubtypestep=maxsubtypestep)
r<-deClustFromCCiterative(exprM,fractionM,k=k,maxstep=maxstep,maxsubtypestep=maxsubtypestep,seed=seed)
r;
}
##############################################################################################################################from marker: exprM in the original scale, it has to be a matrix
###################
#' Full version of DeClust
#'
#' This function is a full version of DeClust. It uses nonlinear optimization assuming the gene expression follows log-normal distribution. It also contains iteractive optimization procedures.
#'
#' @param exprM a gene by sample expression matrix (in the original expression scale, not log-transformed, has to be non-negative)
#' @param k number of clusters
#' @param maxsubtypestep the maximum number of iterations in the inner lay of optimization for sample clustering
#' @param maxstep the maximum number of iterations in the outer lay of optimization for the fraction of each compartment
#' @param seed the random seed set for kmeans clustering
#' @return A named list with three components: subtype, subtypeprofileM and subtypefratioM. Subtype is a vector with length equal to the sample size, and it stores the sample clustering results;subtypeprofileM is a gene by compartment matrix, and it stores the inferred expression profile for each cancer subtype, immune and stromal compartment;subtypefractionM is a sample by compartment matrix, and it stores the estimated fraction of each compartment for each sample.
#' @examples data(exprM);
#' library("doParallel");
#' cl<-makeCluster(5,type="FORK",outfile="");
#' registerDoParallel(cl);
#' r<-deClustFromMarkerlognormal(exprM,3);
#' @export
deClustFromMarkerlognormal<-function(exprM,k,maxsubtypestep=20,maxstep=20,seed=1)
{
options(warn=-1)
data(SI_geneset)
stromal<-as.matrix(SI_geneset)[1,-1];
immune<-as.matrix(SI_geneset)[2,-1];
if(sum(rownames(exprM)%in%immune)<10|sum(rownames(exprM)%in%stromal)<10)stop("too few markers for immune and stromal")
stromalMean<-exp(apply(log(exprM)[rownames(exprM)%in%stromal,],2,median))
immuneMean<-exp(apply(log(exprM)[rownames(exprM)%in%immune,],2,median))
markerM<-cbind(stromal=stromalMean/quantile(stromalMean,0.99),immune=immuneMean/quantile(immuneMean,0.99))
#registerDoMC(10)
rates<-c()
ratei<-c()
totalrM<-foreach( rates = seq(0.1,1,by=0.1))%dopar%
{
# source("~/projects/Li/TCGAtools/Rcode/metaDeconvolution_simulationfunctions.r")
print(rates)
rM<-c()
for(ratei in seq(0.1,1,by=0.1))
{
print(ratei)
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
if(mean(fractionM[,"cancer"]<0)>0.05)
{
############we stop when there are more than 5% of samples with purity<0
next();
}else{
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
r<-deClustFromCC(exprM+1,fractionM,k=k,maxsubtypestep=maxsubtypestep,seed=seed)
reM<-r$subtypeprofileM%*%t(r$subtypefractionM);
reM[reM<1]<-1
rM<-rbind(rM,c(rates=rates,ratei=ratei,MSE=mean(as.vector((log(reM)-log(exprM+1))^2))))
}
}
rM;
}
rM<-do.call("rbind",totalrM)
rM<-rM[which.min(rM[,"MSE"]),]
rates<-rM["rates"]
ratei<-rM["ratei"]
fractionM<-cbind(stromal=markerM[,"stromal"]*rates,immune=markerM[,"immune"]*ratei)
fractionM<-cbind(fractionM,cancer=1-fractionM[,"stromal"]-fractionM[,"immune"])
fractionM[fractionM>1]<-1;
fractionM[fractionM<0]<-0;
# save(rates,ratei,fractionM,file=paste(outputDir,label,"_bestrate.rda",sep=""))
r<-deClustFromCCiterativelognormal(exprM,fractionM,k,maxstep=maxstep,maxsubtypestep=maxsubtypestep,seed=seed)
r;
}
#########################################################################################
#' calculate BIC from the output of DeClust
#'
#'
#'
#' @param exprM the bulky expression data as input for deClust functions
#' @param deClustoutput the output of deClust functions
#'
#' @return BIC value
#' @examples data(exprM);
#' library("doParallel");
#' cl<-makeCluster(5,type="FORK",outfile="");
#' registerDoParallel(cl);
#' r<-deClustFromMarkerlognormal(exprM,3);
#' BIC<-calculateBIC(exprM,r)
#' @export
calculateBIC<-function(exprM,deClustoutput)
{
r<-deClustoutput
mse<-mean(as.vector((log(r$subtypeprofileM%*%t(r$subtypefractionM)+1)-log(exprM+1))^2))
sampleN<-length(r$subtype)
subtypeN<-length(unique(r$subtype))
return(log(mse)*sampleN+log(sampleN)*subtypeN);
}
##################################################################################################internal function
############################################################################################
#######################################################################################################optimizefractionM
optimizefractionM<-function(exprM,profileM,subtype,initfractionM)
{
###########for samples with unknown initfractionM, assign to the subtype with the most similar profile of top 10% cancer genes, assign initfractionM randomly
if(any(is.na(subtype)))
{
FC<-apply(profileM[,grep("subtype",colnames(profileM)),drop=FALSE],1,min)-apply(profileM[,c("stromal","immune")],1,max)
cancergenes<-which(FC>quantile(FC,0.9))
ms<-names(subtype)[is.na(subtype)]
for(ss in ms)
{
subtypenames<-setdiff(unique(subtype),NA)
subtype[ss]<-subtypenames[which.max(cor(exprM[cancergenes,ss],profileM[cancergenes,subtypenames]))[1]]
initfractionM[ss,]<-0;
initfractionM[ss,c("stromal","immune",subtype[ss])]<-1/3
}
}
#####when geneV is too small(or even zero), it will cause problem, so remove extreme genes
geneV<-apply((log((exp(profileM)-1)%*%t(initfractionM)+1)-exprM)^2,1,mean)
# gg<-which(geneV<quantile(geneV,0.95)&geneV>quantile(geneV,0.05))
r<-foreach(i = 1:ncol(exprM))%dopar%{
# print(i)
initfractionV<-initfractionM[i,c("stromal","immune",subtype[i])];
######when the initfractionM has close to zero fraction,the optimization may have issues
initfractionV[initfractionV<0.0001]<-0.0001;
t<-optimizefractionV(exprM[,i],profileM[,c("stromal","immune",subtype[i])],geneV,initfractionV)
names(t)[3]<-"cancer"
t;
}
# proc.time()-pt
fractionM<-do.call("rbind",r)
fractionM<-as.matrix(fractionM[,1:3])
rownames(fractionM)<-rownames(initfractionM);
colnames(fractionM)<-c("stromal","immune","cancer");
fractionM<-getsubtypefractionM(fractionM,subtype)
fractionM;
}
##########################################################################################################
optimizefractionV<-function(exprV,profileM,geneV,initfractionV)
{
fn1<-function(fractionV, profileM,exprV,geneV)
{
mean((log((exp(profileM)-1)%*%fractionV+1)-exprV)^2/geneV)
}
gn1<-function(fractionV, profileM,exprV,geneV)
{
t(exp(profileM)-1)%*%((log((exp(profileM)-1)%*%fractionV+1)-exprV)*2/geneV/((exp(profileM)-1)%*%fractionV+1))/nrow(profileM)
}
optimx(par=initfractionV, fn=fn1,gr=gn1,lower=0, upper=Inf, itnmax=NULL, hessian=FALSE, method="L-BFGS-B",profileM=profileM,exprV=exprV,geneV=geneV)
}
########################################################################################################getsubtypefractionM
getsubtypefractionM<-function(fractionM,subtype)
{
subtypeM<-matrix(0,length(subtype),length(unique(subtype)),dimnames=list(names(subtype),unique(subtype)))
for(i in colnames(subtypeM))subtypeM[names(subtype)[subtype==i],i]<-1
if(any(sort(rownames(subtypeM))!=sort(rownames(fractionM))))stop("subtype samples don't match")
subtypeM<-subtypeM[rownames(fractionM),,drop=FALSE]
subtypeM<-sweep(subtypeM,1,fractionM[,"cancer"],"*")
fractionM<-cbind(fractionM[,c("stromal","immune")],subtypeM)
fractionM[fractionM<0]<-0;
fractionM
}
#######################################################################################################optimizeprofileM
optimizeprofileM<-function(exprM,fractionM)
{
#registerDoMC()
fn1<-function(profilei,expri,fractionM)
{
mean((log(fractionM%*%(exp(profilei)-1)+1)-expri)^2)
}
gr1<-function(profilei,expri,fractionM)
{
t<-2*(log(fractionM%*%(exp(profilei)-1)+1)-expri)/(fractionM%*%(exp(profilei)-1)+1)
t(t)%*%fractionM/nrow(fractionM)*exp(profilei)
}
print("start optimize profileM")
#pt<-proc.time()
r<-foreach(k=1:nrow(exprM))%dopar%{
# print(k)
options(warn=-1)
p<-optimx(par=rep(mean(exprM[k,]),ncol(fractionM)), fn=fn1, gr=gr1, lower=0, upper=Inf, itnmax=NULL, hessian=FALSE, method="L-BFGS-B",expri=exprM[k,],fractionM=fractionM)
####sometimes optimx outputs NA for unclear reason, it might be fixed by changing initial value.
if(any(is.na(p[1:ncol(fractionM)])))p<-optimx(par=rep(mean(exprM[k,])+1,ncol(fractionM)), fn=fn1, gr=gr1, lower=0, upper=Inf, itnmax=NULL, hessian=FALSE, method="L-BFGS-B",expri=exprM[k,],fractionM=fractionM)
p;
}#
# proc.time()-pt
r<-do.call("rbind",r)
curprofileM<-as.matrix(r[,1:ncol(fractionM)])
rownames(curprofileM)<-rownames(exprM);
colnames(curprofileM)<-colnames(fractionM);
curprofileM[curprofileM<0]<-0;
curprofileM;
}
########################################################################################
#############################################
###calculate mse
calculateMSE<-function(fractionM,exprM,profileM)
{
if(any(colnames(fractionM)!=colnames(profileM)))stop("fraction names need to match")
mean(as.vector((log((exp(profileM)-1)%*%t(fractionM)+1)-exprM)^2))
}
################################################################################################from CC
#########################################################################################
#####################################################################################################interatively updating fractionM
deClustFromCCiterativelognormal<-function(exprM,fractionM,subtypeN,maxstep=maxstep,maxsubtypestep=maxsubtypestep,seed=1)
{
if(sum(is.na(exprM))>0)stop("NA is not allowed in exprM")
if(any(colnames(exprM)!=rownames(fractionM)))stop("sample Name doesn't match")
stepi<-1
while(stepi<=maxstep)
{
print(paste("step",stepi))
r<-deClustFromCClognormal(exprM=exprM,fractionM=fractionM,subtypeNum=subtypeN,maxsubtypestep=maxsubtypestep,seed=seed)
newsubtypefractionM<-optimizefractionM(log(exprM+1),log(r$subtypeprofileM+1),r$subtype,r$subtypefractionM)
newfractionM<-cbind(newsubtypefractionM[,c("stromal","immune")],cancer=apply(newsubtypefractionM[,grep("subtype",colnames(newsubtypefractionM)),drop=FALSE],1,sum))
print(max(abs(newfractionM-fractionM)))
if(all(abs(newfractionM-fractionM)<0.01))break;
fractionM<-newfractionM;
stepi<-stepi+1;
# save(r,stepi,file=paste(outputDir,label,"_",stepi,".rda",sep=""))
}
return(r)
}
##################################################################################################################################################################################
################## deconvolution from fractionM lognormal
####################fractionM has 3 columns: stromal,immune and cancer
deClustFromCClognormal<-function(exprM,fractionM,subtypeNum,maxsubtypestep=100,seed=1)
{
exprM<-log(exprM+1)
#####initial ressubtype using logresidual without subtypes
profileM<-optimizeprofileM(exprM,fractionM)
if(subtypeNum==1){
mse<-calculateMSE(fractionM,exprM,profileM)
subtypefractionM<-fractionM;
colnames(subtypefractionM)[colnames(subtypefractionM)=="cancer"]<-"subtype1"
subtypeprofileM<-profileM;
colnames(subtypeprofileM)[colnames(subtypeprofileM)=="cancer"]<-"subtype1"
subtype<-rep("subtype1",nrow(subtypefractionM))
names(subtype)<-rownames(subtypefractionM)
return(list(mse=mse,subtypefractionM=subtypefractionM,subtypeprofileM=exp(subtypeprofileM)-1,subtype=subtype))
}
if(subtypeNum>1)
{
logresM<-exprM-log((exp(profileM)-1)%*%t(fractionM)+1)
set.seed(seed)
r<-kmeans(t(logresM),subtypeNum,nstart=5)
ressubtype<-r$cluster;
ressubtype<-paste("subtype",ressubtype,sep="")
names(ressubtype)<-names(r$cluster)
ressubtype<-ressubtype[rownames(fractionM)]
##################
oldsubtype<-ressubtype
step<-1
while(step<maxsubtypestep)
{
print(paste("subtypestep",step))
subtypefractionM<-getsubtypefractionM(fractionM,oldsubtype)
subtypeprofileM<-optimizeprofileM(exprM,subtypefractionM)
mse<-calculateMSE(subtypefractionM,exprM,subtypeprofileM)
print(mse)
####reassign cluster
subtypeNames<-unique(oldsubtype)
msesubtype<-sapply(subtypeNames,function(subtype)apply((exprM-log((exp(subtypeprofileM[,c("stromal","immune",subtype)])-1)%*%t(fractionM)+1))^2,2,mean))
newsubtype<-apply(msesubtype,1,function(x) subtypeNames[which.min(x)])
if(all(newsubtype==oldsubtype))break;
print(sum(newsubtype!=oldsubtype))
oldsubtype<-newsubtype
step<-step+1;
}
#####
subtype<-oldsubtype
return(list(subtypefractionM=subtypefractionM,subtypeprofileM=exp(subtypeprofileM)-1,subtype=subtype))
}
}
########################################################################################################################################################################
#########################
simpleFromCC<-function(exprM,fractionM,k,seed=1)
{
if(any(colnames(exprM)!=rownames(fractionM)))stop("sample doesn't match")
model<-lm.fit(x=fractionM,y=t(exprM))
reM<-t(model$coefficients)%*%t(fractionM)
reM[reM<1]<-1
adjexprM<-log(exprM)-log(reM)
set.seed(seed)
r<-kmeans(t(adjexprM),k,nstart=5)
ressubtype<-r$cluster;
ressubtype<-paste("subtype",ressubtype,sep="")
names(ressubtype)<-names(r$cluster)
ressubtype<-ressubtype[rownames(fractionM)]
subtypefractionM<-getsubtypefractionM(fractionM,ressubtype)
model<-lm.fit(x=subtypefractionM,y=t(exprM))
subtypeprofileM<-t(model$coefficients)
subtypeprofileM[subtypeprofileM<1]<-1
return(list(subtype=ressubtype,subtypeprofileM=subtypeprofileM,subtypefractionM=subtypefractionM))
}
########################################################################################
#############################
deClustFromCC<-function(exprM,fractionM,k,maxsubtypestep=100,seed=1)
{
model<-lm.fit(x=fractionM,y=t(exprM))
reM<-t(model$coefficients)%*%t(fractionM)
reM[reM<1]<-1
adjexprM<-log(exprM)-log(reM)
set.seed(seed)
r<-kmeans(t(adjexprM),k,nstart=5)
ressubtype<-r$cluster;
ressubtype<-paste("subtype",ressubtype,sep="")
names(ressubtype)<-names(r$cluster)
ressubtype<-ressubtype[rownames(fractionM)]
oldsubtype<-ressubtype
step<-1
while(step<maxsubtypestep)
{
print(paste("subtypestep",step))
subtypefractionM<-getsubtypefractionM(fractionM,oldsubtype)
model<-lm.fit(x=subtypefractionM,y=t(exprM))
subtypeprofileM<-t(model$coefficients)
subtypeprofileM[subtypeprofileM<1]<-1;
####reassign cluster
subtypeNames<-unique(oldsubtype)
msesubtype<-sapply(subtypeNames,function(subtype)
{
reM<-subtypeprofileM[,c("stromal","immune",subtype)]%*%t(fractionM[,c("stromal","immune","cancer")])
reM[reM<1]<-1;
apply((log(exprM)-log(reM))^2,2,mean)
})
newsubtype<-apply(msesubtype,1,function(x) subtypeNames[which.min(x)])
if(all(newsubtype==oldsubtype))break;
print(sum(newsubtype!=oldsubtype))
oldsubtype<-newsubtype
step<-step+1;
}
#####
subtypeprofileM[subtypeprofileM<1]<-1;
list(subtype=newsubtype,subtypeprofileM=subtypeprofileM,subtypefractionM=subtypefractionM)
}
#####################################################################################
########################################################################################
deClustFromCCiterative<-function(exprM,fractionM,k=2,weight=TRUE,maxstep=100,maxsubtypestep=100,seed=1)
{
stepi<-1
while(stepi<=maxstep)
{
print(paste("step",stepi))
#if(stepi==20)browser()
r<-deClustFromCC(exprM,fractionM,maxsubtypestep=maxsubtypestep,k=k,seed=seed)
subtype<-r$subtype
geneV<-apply((r$subtypeprofileM%*%t(r$subtypefractionM)-exprM)^2,1,mean)
subtypeName<-unique(subtype)
subtypefractionML<-lapply(subtypeName,function(st)
{
if(weight) model<-lm.wfit(x=r$subtypeprofileM[,c("stromal","immune",st)],y=exprM[,which(subtype==st)],w=1/geneV)
if(!weight) model<-lm.fit(x=r$subtypeprofileM[,c("stromal","immune",st)],y=exprM[,which(subtype==st)])
subtypefractionM<-t(model$coefficients)
###when there is only one sample there, the rownames is lost
rownames(subtypefractionM)<-colnames(exprM)[which(subtype==st)]
subtypefractionM[subtypefractionM<0]<-0;
subtypefractionM<-cbind(subtypefractionM,matrix(0,nrow(subtypefractionM),length(subtypeName)-1,dimnames=list(rownames(subtypefractionM),setdiff(subtypeName,st))))
subtypefractionM[,colnames(r$subtypeprofileM),drop=FALSE];
})
subtypefractionM<-do.call("rbind",subtypefractionML)
subtypefractionM<-subtypefractionM[colnames(exprM),]
newfractionM<-cbind(subtypefractionM[,c("stromal","immune")],cancer=apply(subtypefractionM[,grep("subtype",colnames(subtypefractionM)),drop=FALSE],1,sum))
if(all(abs(newfractionM-fractionM)<0.01))break;
print(max(abs(newfractionM-fractionM)))
fractionM<-newfractionM;
stepi<-stepi+1;
}
r;
}
|
\name{scoreWtd}
\alias{scoreWtd}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Score items using regression or correlation based weights}
\description{Item weights from \code{\link{bestScales}} or \code{\link{setCor}} are used to find weighted scale scores. In contrast to the unit weights used in \code{\link{scoreItems}}, \code{\link{scoreWtd}} will multiply the data by a set of weights to find scale scores. These weight may come from a regression (e.g., \code{\link{lm}} or \code{\link{setCor}}) or may be the zero order correlation weights from \code{\link{bestScales}}.
}
\usage{
scoreWtd(weights, items, std = TRUE, sums = FALSE, impute = "none")
}
\arguments{
\item{weights}{This is just a matrix of weights to use for each item for each scale.}
\item{items}{ Matrix or dataframe of raw item scores}
\item{std}{if TRUE, then find weighted standard scores else just use raw data}
\item{sums}{By default, find the average item score. If sums = TRUE, then find the sum scores. This is useful for regression with an intercept term}
\item{impute}{impute="median" replaces missing values with the item medians, impute = "mean" replaces values with the mean response. impute="none" the subject's scores are based upon the average of the keyed, but non missing scores. impute = "none" is probably more appropriate for a large number of missing cases (e.g., SAPA data). }
}
\details{Although meant for finding correlation weighted scores using the weights from \code{\link{bestScales}}, it also possible to use alternative weight matrices, such as those returned by the coefficients in \code{\link{lm}}.
}
\value{
A data frame of scores.}
\author{William Revelle}
\seealso{
\code{\link{bestScales}} and \code{\link{setCor}}
}
\examples{
#find the weights from a regression model and then apply them to a new set
#derivation of weights from the first 20 cases
model.lm <- lm(rating ~ complaints + privileges + learning,data=attitude[1:20,])
#or use setCor to find the coefficents
model <- setCor(rating ~ complaints + privileges +learning,data=attitude[1:20,],std=FALSE)
#Apply these to a different set of data (the last 10 cases)
#note that the regression coefficients need to be a matrix
scores.lm <- scoreWtd(as.matrix(model.lm$coefficients),attitude[21:30,],sums=TRUE,std=FALSE)
scores <- scoreWtd(model$coefficients,attitude[21:30,],sums=TRUE,std=FALSE)
describe(scores)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ multivariate }
\keyword{models} | /man/scoreWtd.Rd | no_license | cran/psych | R | false | false | 2,560 | rd | \name{scoreWtd}
\alias{scoreWtd}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Score items using regression or correlation based weights}
\description{Item weights from \code{\link{bestScales}} or \code{\link{setCor}} are used to find weighted scale scores. In contrast to the unit weights used in \code{\link{scoreItems}}, \code{\link{scoreWtd}} will multiply the data by a set of weights to find scale scores. These weight may come from a regression (e.g., \code{\link{lm}} or \code{\link{setCor}}) or may be the zero order correlation weights from \code{\link{bestScales}}.
}
\usage{
scoreWtd(weights, items, std = TRUE, sums = FALSE, impute = "none")
}
\arguments{
\item{weights}{This is just a matrix of weights to use for each item for each scale.}
\item{items}{ Matrix or dataframe of raw item scores}
\item{std}{if TRUE, then find weighted standard scores else just use raw data}
\item{sums}{By default, find the average item score. If sums = TRUE, then find the sum scores. This is useful for regression with an intercept term}
\item{impute}{impute="median" replaces missing values with the item medians, impute = "mean" replaces values with the mean response. impute="none" the subject's scores are based upon the average of the keyed, but non missing scores. impute = "none" is probably more appropriate for a large number of missing cases (e.g., SAPA data). }
}
\details{Although meant for finding correlation weighted scores using the weights from \code{\link{bestScales}}, it also possible to use alternative weight matrices, such as those returned by the coefficients in \code{\link{lm}}.
}
\value{
A data frame of scores.}
\author{William Revelle}
\seealso{
\code{\link{bestScales}} and \code{\link{setCor}}
}
\examples{
#find the weights from a regression model and then apply them to a new set
#derivation of weights from the first 20 cases
model.lm <- lm(rating ~ complaints + privileges + learning,data=attitude[1:20,])
#or use setCor to find the coefficents
model <- setCor(rating ~ complaints + privileges +learning,data=attitude[1:20,],std=FALSE)
#Apply these to a different set of data (the last 10 cases)
#note that the regression coefficients need to be a matrix
scores.lm <- scoreWtd(as.matrix(model.lm$coefficients),attitude[21:30,],sums=TRUE,std=FALSE)
scores <- scoreWtd(model$coefficients,attitude[21:30,],sums=TRUE,std=FALSE)
describe(scores)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ multivariate }
\keyword{models} |
source("data_generators/datamaker_only_counts.R")
## these do not change
args_val <- list()
args_val$log2foldsd <- 1
args_val$tissue <- "muscle"
args_val$path <- "../../../data/gtex_tissue_gene_reads/"
args_val$Ngene <- 10000
args_val$log2foldmean <- 0
args_val$skip_gene <- 0
args_val$Nsamp <- Nsamp
args_val$nullpi <- nullpi
if (nullpi != 1) {
args_val$poisthin <- TRUE
}
d_out <- datamaker_counts_only(args_val)
which_null <- d_out$meta$null
control_genes <- as.logical(which_null)
nnull <- sum(control_genes)
control_genes[control_genes][sample(1:nnull, size = nnull - ncontrol)] <- FALSE
beta_true <- rep(0, length = args_val$Ngene)
if (nullpi != 1) {
beta_true[!which_null] <- d_out$meta$true_log2foldchange
}
X <- as.matrix(model.matrix(~d_out$input$condition))
colnames(X) <- c("Intercept", "Treatment")
Y <- t(log2(as.matrix(d_out$input$counts + 1)))
num_sv <- sva::num.sv(t(Y), mod = X, method = "be")
| /dsc_sims/code/data_generators/pois_thin.R | no_license | Feigeliudan01/sim_code | R | false | false | 1,112 | r | source("data_generators/datamaker_only_counts.R")
## these do not change
args_val <- list()
args_val$log2foldsd <- 1
args_val$tissue <- "muscle"
args_val$path <- "../../../data/gtex_tissue_gene_reads/"
args_val$Ngene <- 10000
args_val$log2foldmean <- 0
args_val$skip_gene <- 0
args_val$Nsamp <- Nsamp
args_val$nullpi <- nullpi
if (nullpi != 1) {
args_val$poisthin <- TRUE
}
d_out <- datamaker_counts_only(args_val)
which_null <- d_out$meta$null
control_genes <- as.logical(which_null)
nnull <- sum(control_genes)
control_genes[control_genes][sample(1:nnull, size = nnull - ncontrol)] <- FALSE
beta_true <- rep(0, length = args_val$Ngene)
if (nullpi != 1) {
beta_true[!which_null] <- d_out$meta$true_log2foldchange
}
X <- as.matrix(model.matrix(~d_out$input$condition))
colnames(X) <- c("Intercept", "Treatment")
Y <- t(log2(as.matrix(d_out$input$counts + 1)))
num_sv <- sva::num.sv(t(Y), mod = X, method = "be")
|
library(tidyverse)
library(rvest)
library(furrr)
library(tictoc)
library(js)
set.seed(1)
link <- sprintf("https://www.inscricao.marinha.mil.br/marinha/index_concursos.jsp?id_concurso=%d", 1:390L)
scrap <- function(x) {
if(RCurl::url.exists(x) == T) {
y <- read_html(x) %>%
html_nodes(".header0 b") %>%
html_text(trim = T)
} else {
y <- NA
}
return(y)
}
plan(multiprocess)
tic()
nomes_concurso <- furrr::future_map_chr(link, ~scrap(.x), .progress = T)
toc()
tab <- tibble(concurso = nomes_concurso, concurso2 = nomes_concurso) %>%
mutate(concurso = paste0('<a href=', '"', link, '"', '>', concurso, '</a>')) %>%
filter(!is.na(concurso2) == T,
str_detect(concurso2, "null") == F,
str_detect(concurso2, "") == T
) %>%
slice(nrow(.):1) %>%
select(concurso)
writexl::write_xlsx(tab, "C:/Users/Igor/Documents/trabalhos_R/concurso_marinha/base_marinha.xlsx")
| /gera_base_marinha.R | no_license | igorkf/marinha | R | false | false | 933 | r |
library(tidyverse)
library(rvest)
library(furrr)
library(tictoc)
library(js)
set.seed(1)
link <- sprintf("https://www.inscricao.marinha.mil.br/marinha/index_concursos.jsp?id_concurso=%d", 1:390L)
scrap <- function(x) {
if(RCurl::url.exists(x) == T) {
y <- read_html(x) %>%
html_nodes(".header0 b") %>%
html_text(trim = T)
} else {
y <- NA
}
return(y)
}
plan(multiprocess)
tic()
nomes_concurso <- furrr::future_map_chr(link, ~scrap(.x), .progress = T)
toc()
tab <- tibble(concurso = nomes_concurso, concurso2 = nomes_concurso) %>%
mutate(concurso = paste0('<a href=', '"', link, '"', '>', concurso, '</a>')) %>%
filter(!is.na(concurso2) == T,
str_detect(concurso2, "null") == F,
str_detect(concurso2, "") == T
) %>%
slice(nrow(.):1) %>%
select(concurso)
writexl::write_xlsx(tab, "C:/Users/Igor/Documents/trabalhos_R/concurso_marinha/base_marinha.xlsx")
|
#' Compare data and model prediction by computing residuals
#'
#' @param data data.frame with name (factor), time (numeric), value (numeric) and sigma (numeric)
#' @param out output of ode(), optionally augmented with attributes
#' "deriv" (output of ode() for the sensitivity equations) and
#' "parameters" (character vector of parameter names, a subsest of those
#' contained in the sensitivity equations). If "deriv" is given, also "parameters"
#' needs to be given.
#' @return data.frame with the original data augmented by columns "prediction" (
#' numeric, the model prediction), "residual" (numeric, difference between
#' prediction and data value), "weighted.residual" (numeric, residual devided
#' by sigma). If "deriv" was given, the returned data.frame has an
#' attribute "deriv" (data.frame with the derivatives of the residuals with
#' respect to the parameters).
#' @export
#' @import cOde2ndSens
res <- function (data, out) {
# Unique times, names and parameter names
times <- sort(unique(data$time))
names <- as.character(unique(data$name))
pars <- attr(out, "parameters")
# Match data times/names in unique times/names
data.time <- match(data$time, times)
data.name <- match(data$name, names)
# Match unique times/names in out times/names
time.out <- match(times, out[,1])
name.out <- match(names, colnames(out))
# Match data times/names in out times/names
timeIndex <- time.out[data.time]
nameIndex <- name.out[data.name]
prediction <- sapply(1:nrow(data), function(i) out[timeIndex[i], nameIndex[i]])
# Propagate derivatives if available
deriv <- attr(out, "deriv")
deriv.data <- NULL
if (!is.null(deriv)) {
sensnames <- as.vector(outer(names, pars, paste, sep="."))
# Match names to the corresponding sensitivities in sensnames
names.sensnames <- t(matrix(1:length(sensnames), nrow = length(names), ncol = length(pars)))
# Get positions of sensnames in colnames of deriv
sensnames.deriv <- match(sensnames, colnames(deriv))
# Get the columns in deriv corresponding to data names
derivnameIndex <- matrix(sensnames.deriv[names.sensnames[, data.name]], ncol = length(data.name))
# Derivatives of the prediction
deriv.prediction <- do.call(rbind, lapply(1:nrow(data), function(i) submatrix(deriv, timeIndex[i], derivnameIndex[, i])))
colnames(deriv.prediction) <- pars
deriv.data <- data.frame(time = data$time, name = data$name, deriv.prediction)
}
# -------------------------------------------
# Propagate sderivatives if available
sderiv <- attr(out, "sderiv")
sderiv.data <- NULL
if (!is.null(sderiv)) {
ssensnames <- as.vector(outer(outer(names, pars, paste, sep="."), pars, paste, sep="."))
# Match names to the corresponding ssensitivities in ssensnames
names.ssensnames <- t(matrix(1:length(ssensnames), nrow = length(names), ncol = length(pars)^2))
# Get positions of ssensnames in colnames of sderiv
ssensnames.sderiv <- match(ssensnames, colnames(sderiv))
# Get the columns in sderiv corresponding to data names
sderivnameIndex <- matrix(ssensnames.sderiv[names.ssensnames[, data.name]], ncol = length(data.name))
# Derivatives of the prediction
sderiv.prediction <- do.call(rbind, lapply(1:nrow(data), function(i) submatrix(sderiv, timeIndex[i], sderivnameIndex[, i])))
colnames(sderiv.prediction) <- outer(pars, pars, paste, sep =".")
sderiv.data <- data.frame(time = data$time, name = data$name, sderiv.prediction)
}
# ------------------------------------------------------------------------------------------------------------------------------
# Compute residuals
residuals <- prediction - data$value
weighted.residuals <- (prediction - data$value)/data$sigma
data <- cbind(data, prediction = prediction, residual = residuals,
weighted.residual = weighted.residuals)
data <- data[c("time", "name", "value", "prediction", "sigma",
"residual", "weighted.residual")]
#attr(data, "deriv") <- deriv.data
objframe(data, deriv = deriv.data, sderiv = sderiv.data)
}
| /R/WORKdata.R | permissive | dlill/dMod2ndsens | R | false | false | 4,132 | r | #' Compare data and model prediction by computing residuals
#'
#' @param data data.frame with name (factor), time (numeric), value (numeric) and sigma (numeric)
#' @param out output of ode(), optionally augmented with attributes
#' "deriv" (output of ode() for the sensitivity equations) and
#' "parameters" (character vector of parameter names, a subsest of those
#' contained in the sensitivity equations). If "deriv" is given, also "parameters"
#' needs to be given.
#' @return data.frame with the original data augmented by columns "prediction" (
#' numeric, the model prediction), "residual" (numeric, difference between
#' prediction and data value), "weighted.residual" (numeric, residual devided
#' by sigma). If "deriv" was given, the returned data.frame has an
#' attribute "deriv" (data.frame with the derivatives of the residuals with
#' respect to the parameters).
#' @export
#' @import cOde2ndSens
res <- function (data, out) {
# Unique times, names and parameter names
times <- sort(unique(data$time))
names <- as.character(unique(data$name))
pars <- attr(out, "parameters")
# Match data times/names in unique times/names
data.time <- match(data$time, times)
data.name <- match(data$name, names)
# Match unique times/names in out times/names
time.out <- match(times, out[,1])
name.out <- match(names, colnames(out))
# Match data times/names in out times/names
timeIndex <- time.out[data.time]
nameIndex <- name.out[data.name]
prediction <- sapply(1:nrow(data), function(i) out[timeIndex[i], nameIndex[i]])
# Propagate derivatives if available
deriv <- attr(out, "deriv")
deriv.data <- NULL
if (!is.null(deriv)) {
sensnames <- as.vector(outer(names, pars, paste, sep="."))
# Match names to the corresponding sensitivities in sensnames
names.sensnames <- t(matrix(1:length(sensnames), nrow = length(names), ncol = length(pars)))
# Get positions of sensnames in colnames of deriv
sensnames.deriv <- match(sensnames, colnames(deriv))
# Get the columns in deriv corresponding to data names
derivnameIndex <- matrix(sensnames.deriv[names.sensnames[, data.name]], ncol = length(data.name))
# Derivatives of the prediction
deriv.prediction <- do.call(rbind, lapply(1:nrow(data), function(i) submatrix(deriv, timeIndex[i], derivnameIndex[, i])))
colnames(deriv.prediction) <- pars
deriv.data <- data.frame(time = data$time, name = data$name, deriv.prediction)
}
# -------------------------------------------
# Propagate sderivatives if available
sderiv <- attr(out, "sderiv")
sderiv.data <- NULL
if (!is.null(sderiv)) {
ssensnames <- as.vector(outer(outer(names, pars, paste, sep="."), pars, paste, sep="."))
# Match names to the corresponding ssensitivities in ssensnames
names.ssensnames <- t(matrix(1:length(ssensnames), nrow = length(names), ncol = length(pars)^2))
# Get positions of ssensnames in colnames of sderiv
ssensnames.sderiv <- match(ssensnames, colnames(sderiv))
# Get the columns in sderiv corresponding to data names
sderivnameIndex <- matrix(ssensnames.sderiv[names.ssensnames[, data.name]], ncol = length(data.name))
# Derivatives of the prediction
sderiv.prediction <- do.call(rbind, lapply(1:nrow(data), function(i) submatrix(sderiv, timeIndex[i], sderivnameIndex[, i])))
colnames(sderiv.prediction) <- outer(pars, pars, paste, sep =".")
sderiv.data <- data.frame(time = data$time, name = data$name, sderiv.prediction)
}
# ------------------------------------------------------------------------------------------------------------------------------
# Compute residuals
residuals <- prediction - data$value
weighted.residuals <- (prediction - data$value)/data$sigma
data <- cbind(data, prediction = prediction, residual = residuals,
weighted.residual = weighted.residuals)
data <- data[c("time", "name", "value", "prediction", "sigma",
"residual", "weighted.residual")]
#attr(data, "deriv") <- deriv.data
objframe(data, deriv = deriv.data, sderiv = sderiv.data)
}
|
testlist <- list(latLongs = structure(c(1.46243431169009e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 4L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) | /MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612727147-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 217 | r | testlist <- list(latLongs = structure(c(1.46243431169009e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(6L, 4L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) |
library(tibble)
library(dplyr)
library(readr)
context("guess_delim")
# Tiny table
tiny <- tibble(
a = c('0,0', '0,1'),
b = c('1,0', '1,1'))
file_tiny <- tempfile()
write_delim(tiny, file_tiny, ";")
# Output table for file_tiny
ans <- tibble(
char_raw = c("3b", "2c"),
var = c(0, 0),
n = c(3L, 2L),
char = c(";", ","))
# Large tables
large <- as_tibble(nasa)
file_large1 <- tempfile()
file_large2 <- tempfile()
file_large3 <- tempfile()
file_large4 <- tempfile()
write_delim(large, file_large1, ",")
write_delim(large, file_large2, ";")
write_delim(large, file_large3, "|")
write_delim(large, file_large4, "\t")
test_that("guess_delim can guess from tiny tables", {
expect_identical(guess_delim(file_tiny), ans)
expect_message(guess_delim(file_tiny, verbose = TRUE), "probable")
})
test_that("guess_delim can guess from large tables", {
expect_identical(guess_delim(file_large1)$char[1], ",")
expect_identical(guess_delim(file_large2)$char[1], ";")
expect_identical(guess_delim(file_large3)$char[1], "|")
expect_identical(guess_delim(file_large4)$char[1], "\t")
})
| /tests/testthat/test_guess_delim.R | permissive | Athospd/forkliftr | R | false | false | 1,094 | r | library(tibble)
library(dplyr)
library(readr)
context("guess_delim")
# Tiny table
tiny <- tibble(
a = c('0,0', '0,1'),
b = c('1,0', '1,1'))
file_tiny <- tempfile()
write_delim(tiny, file_tiny, ";")
# Output table for file_tiny
ans <- tibble(
char_raw = c("3b", "2c"),
var = c(0, 0),
n = c(3L, 2L),
char = c(";", ","))
# Large tables
large <- as_tibble(nasa)
file_large1 <- tempfile()
file_large2 <- tempfile()
file_large3 <- tempfile()
file_large4 <- tempfile()
write_delim(large, file_large1, ",")
write_delim(large, file_large2, ";")
write_delim(large, file_large3, "|")
write_delim(large, file_large4, "\t")
test_that("guess_delim can guess from tiny tables", {
expect_identical(guess_delim(file_tiny), ans)
expect_message(guess_delim(file_tiny, verbose = TRUE), "probable")
})
test_that("guess_delim can guess from large tables", {
expect_identical(guess_delim(file_large1)$char[1], ",")
expect_identical(guess_delim(file_large2)$char[1], ";")
expect_identical(guess_delim(file_large3)$char[1], "|")
expect_identical(guess_delim(file_large4)$char[1], "\t")
})
|
### Code for Simulation Study 1 ###
library("RCurl")
script <- getURL("https://raw.githubusercontent.com/harlanhappydog/EquivTestStandardReg/master/EquivTestStandardReg.R", ssl.verifypeer = FALSE)
eval(parse(text = script))
########################################
simstudy1 <- function(randomreg=FALSE, nSim=100){
resultsmat <- data.frame(
JJJindex=NA,
true_std_beta= NA,
power=NA,
as.matrix(expand.grid(Nvar=c(2,4),
sigma2=c(0.05, 0.15, 0.5, 0.50001),
Nsample=c(180, 540, 1000, 3500))))
resultsmat$true_std_beta <- NA
resultsmat$alpha_sig <- 0.05
resultsmat<-resultsmat[order(resultsmat$Nvar, resultsmat$sigma2),]
pval_list<-list()
problist<-list()
for(jjj in 1:dim(resultsmat)[1]){
print(jjj)
resultsmat[jjj, "JJJindex"] <- paste("jjj",jjj,sep="")
sigma2 <- resultsmat[jjj, "sigma2"]
N <- resultsmat[jjj, "Nsample"]
nVar <- resultsmat[jjj, "Nvar"]
basematrix <- data.frame(expand.grid(
X1=c(0,1),
X2=c(0,1),
X3=c(0,1),
X4=c(0,1)))
X1 <- rep(basematrix$X1,900000)
X2 <- rep(basematrix$X2,900000)
X3 <- rep(basematrix$X3,900000)
X4 <- rep(basematrix$X4,900000)
if(nVar==2){
epsilon <- rnorm(length(X1), 0, sqrt(sigma2))
X <- as.matrix(cbind(1, X1, X2))
betavec <- c(-0.2, 0.1, 0.2)
if(sigma2==0.50001){betavec <- c(-0.2, 0.0, 0.2)}
Y <- c(X%*%betavec) + epsilon
std_beta_hat <- betavec[-1]*(apply(X[,-1],2,sd)/sd(Y))
true_std_beta <- std_beta_hat[1]
resultsmat[jjj, "true_std_beta"] <- round(true_std_beta,3)
Xmatrix_ <- X[1:N,]
}
if(nVar==4){
epsilon <- rnorm(length(X1), 0, sqrt(sigma2))
X <- as.matrix(cbind(1, X1, X2, X3, X4))
betavec <- c(0.2, 0.1, 0.14, -0.1, -0.1)
if(sigma2== 0.50001){betavec <- c(0.2, 0.00, 0.14, -0.1, -0.1)}
Y <- c(X%*%betavec) + epsilon
std_beta_hat <- betavec[-1]*(apply(X[,-1],2,sd)/sd(Y))
true_std_beta <- std_beta_hat[1]
resultsmat[jjj, "true_std_beta"] <- round(true_std_beta, 3)
Xmatrix_ <- X[1:N,]
}
pval_list[[jjj]] <- list()
Deltavec <- seq(0.01, 0.25, 0.005)
for(iii in 1:nSim){
if(randomreg){Xmatrix_ <- X[sample(1:dim(X)[1], N, replace=TRUE),]}
epsilon <- rnorm(N, 0, sqrt(sigma2))
Y <- Xmatrix_%*%betavec + epsilon
# lmmod <- summary(lm(Y ~ Xmatrix_[,-1]))
# R2 <- lmmod$r.squared
# N <- length(Y); K <- dim(Xmatrix_[,-1])[2]
# beta_hat <- lmmod$coef[-1,][1,1]; SE_beta_hat <- lmmod$coef[-1,][1,2];
# std_beta_hat <- beta_hat*(apply(Xmatrix_[,-1],2,sd)/sd(Y))[1]
# R2XkXk <- unlist(lapply(c(1:K), function(k) {
# summary(lm(Xmatrix_[,-1][,k]~Xmatrix_[,-1][,-k]))$r.squared}))[1]
# SE_std_beta_FIX <- sqrt((1-R2)/((1-R2XkXk)*(N-K-1)))
# SE_std_beta_RDM <- DEL(X=Xmatrix_[,-1], y=Y)$SEs[1]
pval_list[[jjj]][[iii]]<-vector()
for(kk in 1:length(Deltavec)){
Delta <- Deltavec[kk]
# Fixed regressors:
if(!randomreg){
pval <- equivstandardBeta(Y=Y, Xmatrix= Xmatrix_[,-1], DELTA= Delta, random=FALSE)$pval[1]
}
# Random regressors:
if(randomreg){
pval <- equivstandardBeta(Y=Y, Xmatrix= Xmatrix_[,-1], DELTA= Delta, random=TRUE)$pval[1]
}
pval_list[[jjj]][[iii]][kk] <- pval
}
}
pvalmat <- as.data.frame(pval_list[[jjj]], col.names=NA, row.names=Deltavec)
problist[[jjj]] <- rowMeans(pvalmat<resultsmat[jjj, "alpha_sig"])
print(resultsmat[jjj,])
}
problist_df <- as.data.frame(problist, col.names=c(1:dim(resultsmat)[1]))
colnames(problist_df)<-paste("jjj",c(1:dim(resultsmat)[1]), sep="")
problist_df$Delta<-Deltavec
library(tidyr)
problist_long<-gather(problist_df,JJJindex,pr_less_alpha,jjj1:jjj32, factor_key=TRUE)
resultsmatall<-merge(resultsmat,problist_long, by="JJJindex",all=TRUE)
resultsmatall$true_std_beta <- as.factor(resultsmatall$true_std_beta)
resultsmatall$N <- as.factor(resultsmatall$Nsample)
resultsmatall$g <- as.factor(paste(as.character(resultsmatall$true_std_beta),as.character(resultsmatall$N), sep="_"))
resultsmatall <- transform(resultsmatall,
Nvar = factor(Nvar, levels = sort(unique(resultsmatall$Nvar)), c( (("K = 2")),(("K = 4"))) ))
resultsmatall$KK <- as.numeric(resultsmatall$Nvar)*2
resultsmatall$NN <- as.numeric(as.character(resultsmatall$N))
return(resultsmatall)}
########################################
sim_fix <- simstudy1(FALSE, 5000)
sim_rdm <- simstudy1(TRUE, 5000)
resultsmatall<-sim_fix
#### plot with full axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~. ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(0, 1, by = 0.1))+ labs(x = expression(Delta),y = expression("probability of p" < alpha))+ coord_cartesian(ylim = c(0, 1))
#### plot with truncated axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~ . ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(-0.01, 1, by = 0.05))+ coord_cartesian(ylim = c(0, 0.20))+ labs(x = expression(Delta), y = expression("probability of p" < alpha))
resultsmatall<-sim_rdm
#### plot with full axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~. ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(0, 1, by = 0.1))+ labs(x = expression(Delta),y = expression("probability of p" < alpha))+ coord_cartesian(ylim = c(0, 1))
#### plot with truncated axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~ . ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(-0.01, 1, by = 0.05))+ coord_cartesian(ylim = c(0, 0.20))+ labs(x = expression(Delta), y = expression("probability of p" < alpha))
| /simstudy_1_v3.R | no_license | harlanhappydog/EquivTestStandardReg | R | false | false | 6,082 | r | ### Code for Simulation Study 1 ###
library("RCurl")
script <- getURL("https://raw.githubusercontent.com/harlanhappydog/EquivTestStandardReg/master/EquivTestStandardReg.R", ssl.verifypeer = FALSE)
eval(parse(text = script))
########################################
simstudy1 <- function(randomreg=FALSE, nSim=100){
resultsmat <- data.frame(
JJJindex=NA,
true_std_beta= NA,
power=NA,
as.matrix(expand.grid(Nvar=c(2,4),
sigma2=c(0.05, 0.15, 0.5, 0.50001),
Nsample=c(180, 540, 1000, 3500))))
resultsmat$true_std_beta <- NA
resultsmat$alpha_sig <- 0.05
resultsmat<-resultsmat[order(resultsmat$Nvar, resultsmat$sigma2),]
pval_list<-list()
problist<-list()
for(jjj in 1:dim(resultsmat)[1]){
print(jjj)
resultsmat[jjj, "JJJindex"] <- paste("jjj",jjj,sep="")
sigma2 <- resultsmat[jjj, "sigma2"]
N <- resultsmat[jjj, "Nsample"]
nVar <- resultsmat[jjj, "Nvar"]
basematrix <- data.frame(expand.grid(
X1=c(0,1),
X2=c(0,1),
X3=c(0,1),
X4=c(0,1)))
X1 <- rep(basematrix$X1,900000)
X2 <- rep(basematrix$X2,900000)
X3 <- rep(basematrix$X3,900000)
X4 <- rep(basematrix$X4,900000)
if(nVar==2){
epsilon <- rnorm(length(X1), 0, sqrt(sigma2))
X <- as.matrix(cbind(1, X1, X2))
betavec <- c(-0.2, 0.1, 0.2)
if(sigma2==0.50001){betavec <- c(-0.2, 0.0, 0.2)}
Y <- c(X%*%betavec) + epsilon
std_beta_hat <- betavec[-1]*(apply(X[,-1],2,sd)/sd(Y))
true_std_beta <- std_beta_hat[1]
resultsmat[jjj, "true_std_beta"] <- round(true_std_beta,3)
Xmatrix_ <- X[1:N,]
}
if(nVar==4){
epsilon <- rnorm(length(X1), 0, sqrt(sigma2))
X <- as.matrix(cbind(1, X1, X2, X3, X4))
betavec <- c(0.2, 0.1, 0.14, -0.1, -0.1)
if(sigma2== 0.50001){betavec <- c(0.2, 0.00, 0.14, -0.1, -0.1)}
Y <- c(X%*%betavec) + epsilon
std_beta_hat <- betavec[-1]*(apply(X[,-1],2,sd)/sd(Y))
true_std_beta <- std_beta_hat[1]
resultsmat[jjj, "true_std_beta"] <- round(true_std_beta, 3)
Xmatrix_ <- X[1:N,]
}
pval_list[[jjj]] <- list()
Deltavec <- seq(0.01, 0.25, 0.005)
for(iii in 1:nSim){
if(randomreg){Xmatrix_ <- X[sample(1:dim(X)[1], N, replace=TRUE),]}
epsilon <- rnorm(N, 0, sqrt(sigma2))
Y <- Xmatrix_%*%betavec + epsilon
# lmmod <- summary(lm(Y ~ Xmatrix_[,-1]))
# R2 <- lmmod$r.squared
# N <- length(Y); K <- dim(Xmatrix_[,-1])[2]
# beta_hat <- lmmod$coef[-1,][1,1]; SE_beta_hat <- lmmod$coef[-1,][1,2];
# std_beta_hat <- beta_hat*(apply(Xmatrix_[,-1],2,sd)/sd(Y))[1]
# R2XkXk <- unlist(lapply(c(1:K), function(k) {
# summary(lm(Xmatrix_[,-1][,k]~Xmatrix_[,-1][,-k]))$r.squared}))[1]
# SE_std_beta_FIX <- sqrt((1-R2)/((1-R2XkXk)*(N-K-1)))
# SE_std_beta_RDM <- DEL(X=Xmatrix_[,-1], y=Y)$SEs[1]
pval_list[[jjj]][[iii]]<-vector()
for(kk in 1:length(Deltavec)){
Delta <- Deltavec[kk]
# Fixed regressors:
if(!randomreg){
pval <- equivstandardBeta(Y=Y, Xmatrix= Xmatrix_[,-1], DELTA= Delta, random=FALSE)$pval[1]
}
# Random regressors:
if(randomreg){
pval <- equivstandardBeta(Y=Y, Xmatrix= Xmatrix_[,-1], DELTA= Delta, random=TRUE)$pval[1]
}
pval_list[[jjj]][[iii]][kk] <- pval
}
}
pvalmat <- as.data.frame(pval_list[[jjj]], col.names=NA, row.names=Deltavec)
problist[[jjj]] <- rowMeans(pvalmat<resultsmat[jjj, "alpha_sig"])
print(resultsmat[jjj,])
}
problist_df <- as.data.frame(problist, col.names=c(1:dim(resultsmat)[1]))
colnames(problist_df)<-paste("jjj",c(1:dim(resultsmat)[1]), sep="")
problist_df$Delta<-Deltavec
library(tidyr)
problist_long<-gather(problist_df,JJJindex,pr_less_alpha,jjj1:jjj32, factor_key=TRUE)
resultsmatall<-merge(resultsmat,problist_long, by="JJJindex",all=TRUE)
resultsmatall$true_std_beta <- as.factor(resultsmatall$true_std_beta)
resultsmatall$N <- as.factor(resultsmatall$Nsample)
resultsmatall$g <- as.factor(paste(as.character(resultsmatall$true_std_beta),as.character(resultsmatall$N), sep="_"))
resultsmatall <- transform(resultsmatall,
Nvar = factor(Nvar, levels = sort(unique(resultsmatall$Nvar)), c( (("K = 2")),(("K = 4"))) ))
resultsmatall$KK <- as.numeric(resultsmatall$Nvar)*2
resultsmatall$NN <- as.numeric(as.character(resultsmatall$N))
return(resultsmatall)}
########################################
sim_fix <- simstudy1(FALSE, 5000)
sim_rdm <- simstudy1(TRUE, 5000)
resultsmatall<-sim_fix
#### plot with full axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~. ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(0, 1, by = 0.1))+ labs(x = expression(Delta),y = expression("probability of p" < alpha))+ coord_cartesian(ylim = c(0, 1))
#### plot with truncated axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~ . ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(-0.01, 1, by = 0.05))+ coord_cartesian(ylim = c(0, 0.20))+ labs(x = expression(Delta), y = expression("probability of p" < alpha))
resultsmatall<-sim_rdm
#### plot with full axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~. ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(0, 1, by = 0.1))+ labs(x = expression(Delta),y = expression("probability of p" < alpha))+ coord_cartesian(ylim = c(0, 1))
#### plot with truncated axis:
library(ggplot2)
qplot(x=Delta, y= pr_less_alpha, group= g, pch=N, lty=N, col= true_std_beta, data= resultsmatall)+geom_line()+ geom_hline(yintercept = unique(resultsmatall$alpha_sig))+ facet_grid(Nvar ~ . ) + scale_x_continuous(breaks = seq(0, 0.2, by = 0.02)) + scale_y_continuous(breaks = seq(-0.01, 1, by = 0.05))+ coord_cartesian(ylim = c(0, 0.20))+ labs(x = expression(Delta), y = expression("probability of p" < alpha))
|
################## PROJECT: 2) Satander Customer Transaction Prediction Project ################## ################## ##################
#Remove the elements
rm(list = ls())
#Set working directory
setwd("/home/nupur/Downloads/edwisor")
#Check working directory
getwd()
library(ggplot2)
library(FactoMineR)
library(tidyverse)
library(moments)
library(DataExplorer)
library(caret)
library(Matrix)
library(pdp)
library(mlbench)
library(caTools)
library(randomForest)
library(glmnet)
library(mlr)
library(vita)
library(rBayesianOptimization)
library(lightgbm)
library(pROC)
library(DMwR)
library(ROSE)
library(yardstick)
library(DataCombine)
library(rpart)
library(usdm)
# loading datasets
train = read.csv("train.csv",header=T,na.strings = c(""," ","NA"))
test = read.csv("test.csv",header=T,na.strings = c(""," ","NA"))
############################## Exploratory data analysis ##########################################
#Getting the column names of the dataset
colnames(train)
colnames(test)
#Getting the number of variables and obervation in the datasets
dim(train)
dim(test)
# Structure of data
str(train)
str(test)
#Summary of datasets
summary(train)
summary(test)
#look at top 5 observations
head(train,5)
head(test,5)
#data type of variables
sapply(train,class)
#changing datatype of target variable to factor datatype.
train$target<-as.factor(train$target)
#Percenatge counts of target classes
table(train$target)/length(train$target)*100
#Bar plot for count of target classes
plot1<-ggplot(train,aes(target))+theme_bw()+geom_bar(stat='count',fill='lightgreen')
##We have a unbalanced data,where 90% of the data is the data of number of customers those will not make a transaction and 10% of the data is those who will make a transaction.
#checking for duplicates
dup<-function(x){if(length(unique(colnames(x))==ncol(x))){print('No')}else{print('Yes')}}
cat('Is there any duplicate column in train data:', dup(train),
'\nIs there any duplicate column in test data:', dup(test), sep = ' ')
### No duplicates
#Visulisation
#Distribution of train attributes from 3 to 102
for (var in names(train_df)[c(3:102)]){
target<-train_df$target
plot<-ggplot(train_df, aes(x=train_df[[var]],fill=target)) +
geom_density(kernel='gaussian') + ggtitle(var)+theme_classic()
print(plot)
}
#Distribution of train attributes from 103 to 202
for (var in names(train_df)[c(103:202)]){
target<-train_df$target
plot<-ggplot(train_df, aes(x=train_df[[var]], fill=target)) +
geom_density(kernel='gaussian') + ggtitle(var)+theme_classic()
print(plot)
}
#Distribution of test attributes from 2 to 101
plot_density(test_df[,c(2:101)], ggtheme = theme_classic(),geom_density_args = list(color='blue'))
#Distribution of test attributes from 102 to 201
plot_density(test_df[,c(102:201)], ggtheme = theme_classic(),geom_density_args = list(color='blue'))
################################## DATA PREPROCESSING ###############################################
######################## Missing Values Analysis #####################################
#checking for missing values
missing_val = data.frame(apply(train,2,function(x){sum(is.na(x))}))
#creating new column contains name of columns
missing_val$Columns = row.names(missing_val)
#changing name of column containg missing values
names(missing_val)[1] = "Missing_percentage"
#changing missing values to missing percentages
missing_val$Missing_percentage = (missing_val$Missing_percentage/nrow(train)) * 100
#arrange missing percentage in descending order
missing_val = missing_val[order(-missing_val$Missing_percentage),]
#removing row names to index
row.names(missing_val) = NULL
#interchange columns
missing_val = missing_val[,c(2,1)]
## No missing values present.
############################## OUTLIER ANALYSIS ##########################################
#Outlier analysis for test dataset.
numeric_index = sapply(train,is.numeric) #selecting only numeric
numeric_data = train[,numeric_index]
cnames = colnames(numeric_data)
for (i in 1:length(cnames))
{
assign(paste0("gn",i), ggplot(aes_string(y = (cnames[i]), x = "target"), data = subset(train))+
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour="red", fill = "grey" ,outlier.shape=18,
outlier.size=1, notch=FALSE) +
theme(legend.position="bottom")+
labs(y=cnames[i],x="target")+
ggtitle(paste("Box plot of target for",cnames[i])))
}
#visualise some variables
gridExtra::grid.arrange(gn3,gn4,gn5,ncol=3)
gridExtra::grid.arrange(gn6,gn7,gn8,ncol=3)
gridExtra::grid.arrange(gn9,gn10,gn11,ncol=3)
# #loop to remove outliers from train dataset variables
for(i in cnames){
print(i)
val = train[,i][train[,i] %in% boxplot.stats(train[,i])$out]
#print(length(val))
train = train[which(!train[,i] %in% val),]
}
numeric_index = sapply(test,is.numeric) #selecting only numeric
numeric_data = test[,numeric_index]
cnames = colnames(numeric_data)
# #loop to remove outliers from test dataset variables
for(i in cnames){
print(i)
val = test[,i][test[,i] %in% boxplot.stats(test[,i])$out]
#print(length(val))
test = test[which(!test[,i] %in% val),]
}
############################## FEATURE SELECTION ##########################################
#Correlations in train data
#convert factor to int.
train$target<-as.numeric(train$target)
train_correlations<-cor(train[,c(2:202)])
train_correlations
##We can observed that the correlation between the train attributes is very small.
#Correlations in test data
test_correlations<-cor(test[,c(2:201)])
test_correlations
##We can observed that the correlation between the test attributes is very small.
###################################Model Development#######################################
#Split the data using CreateDataPartition
set.seed(689)
#train.index<-createDataPartition(train_df$target,p=0.8,list=FALSE)
train.index<-sample(1:nrow(train),0.8*nrow(train))
#train data
train.data<-train[train.index,]
#validation data
valid.data<-train[-train.index,]
#dimension of train data
dim(train.data)
#dimension of validation data
dim(valid.data)
#target classes in train data
table(train.data$target)
#target classes in validation data
table(valid.data$target)
table(train$target)
##################### Decision tree for classification #####################################
#Develop Model on training data
C50_model = C5.0(target ~., train, trials = 100, rules = TRUE)
#Summary of DT model
summary(C50_model)
#write rules into disk
write(capture.output(summary(C50_model)), "c50Rules.txt")
#Lets predict for test cases
C50_Predictions = predict(C50_model, test[,-2], type = "class")
##Evaluate the performance of classification model
ConfMatrix_C50 = table(test$target, C50_Predictions)
confusionMatrix(ConfMatrix_C50)
error_metric(ConfMatrix_C50)
#Accuracy: 0.87
# Area under ROC curve
roc.curve(test$target,C50_Predictions)
#Area under the curve : 0.53
########################## Random Forest ####################################################
RF_model = randomForest(target ~ ., train, importance = TRUE, ntree = 500)
#Predict test data using random forest model
RF_Predictions = predict(RF_model, test[,-17])
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$target, RF_Predictions)
confusionMatrix(ConfMatrix_RF)
error_metric(ConfMatrix_RF)
# accuracy = 0.88
roc.curve(test$target,RF_Predictions)
#Area under the curve for train_:0.54
######################################## Logistic Regression ###############################
logit_model = glm(target ~ ., data = train, family = "binomial")
#summary of the model
summary(logit_model)
#predict using logistic regression
logit_Predictions = predict(logit_model, newdata = test, type = "response")
#convert prob
logit_Predictions = ifelse(logit_Predictions > 0.5, 1, 0)
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$target, logit_Predictions)
ConfMatrix_LR
error_metric(ConfMatrix_LR)
# accuracy = 0.9
roc.curve(test$target,logit_Predictions>0.5)
#Area under the curve for train: 0.78
############################## lightgbm #################################################
#Convert data frame to matrix
set.seed(5432)
X_train<-as.matrix(train.data[,-c(1,2)])
y_train<-as.matrix(train.data$target)
X_valid<-as.matrix(valid.data[,-c(1,2)])
y_valid<-as.matrix(valid.data$target)
test_data<-as.matrix(test[,-c(1)])
#training data
lgb.train <- lgb.Dataset(data=X_train, label=y_train)
#Validation data
lgb.valid <- lgb.Dataset(data=X_valid,label=y_valid)
#Selecting best hyperparameters
set.seed(653)
lgb.grid = list(objective = "binary",
metric = "auc",
boost='gbdt',
max_depth=-1,
boost_from_average='false',
min_sum_hessian_in_leaf = 12,
feature_fraction = 0.05,
bagging_fraction = 0.45,
bagging_freq = 5,
learning_rate=0.02,
tree_learner='serial',
num_leaves=20,
num_threads=5,
min_data_in_bin=150,
min_gain_to_split = 30,
min_data_in_leaf = 90,
verbosity=-1,
is_unbalance = TRUE)
#Training the lgbm model
set.seed(7663)
lgbm.model <- lgb.train(params = lgb.grid, data = lgb.train, nrounds =10000,eval_freq =1000,
valids=list(val1=lgb.train,val2=lgb.valid),early_stopping_rounds = 5000)
#lgbm model performance on test data
set.seed(6532)
lgbm_pred_prob <- predict(lgbm.model,test_data)
print(lgbm_pred_prob)
#Convert to binary output (1 and 0) with threshold 0.5
lgbm_pred<-ifelse(lgbm_pred_prob>0.5,1,0)
print(lgbm_pred)
set.seed(6521)
#feature importance plot
tree_imp <- lgb.importance(lgbm.model, percentage = TRUE)
lgb.plot.importance(tree_imp, top_n = 50, measure = "Frequency", left_margin = 10)
##lightgbm is performing well on imbalanced data compared to other models based on scores of roc_auc_score.
##Final submission
sub_df<-data.frame(ID_code=test_df$ID_code,lgb_predict_prob=lgbm_pred_prob,lgb_predict=lgbm_pred)
write.csv(sub_df,'submission.CSV',row.names=F)
head(sub_df)
| /santander.R | no_license | abhi-eng/Santander-customer-transaction-prediction | R | false | false | 10,301 | r | ################## PROJECT: 2) Satander Customer Transaction Prediction Project ################## ################## ##################
#Remove the elements
rm(list = ls())
#Set working directory
setwd("/home/nupur/Downloads/edwisor")
#Check working directory
getwd()
library(ggplot2)
library(FactoMineR)
library(tidyverse)
library(moments)
library(DataExplorer)
library(caret)
library(Matrix)
library(pdp)
library(mlbench)
library(caTools)
library(randomForest)
library(glmnet)
library(mlr)
library(vita)
library(rBayesianOptimization)
library(lightgbm)
library(pROC)
library(DMwR)
library(ROSE)
library(yardstick)
library(DataCombine)
library(rpart)
library(usdm)
# loading datasets
train = read.csv("train.csv",header=T,na.strings = c(""," ","NA"))
test = read.csv("test.csv",header=T,na.strings = c(""," ","NA"))
############################## Exploratory data analysis ##########################################
#Getting the column names of the dataset
colnames(train)
colnames(test)
#Getting the number of variables and obervation in the datasets
dim(train)
dim(test)
# Structure of data
str(train)
str(test)
#Summary of datasets
summary(train)
summary(test)
#look at top 5 observations
head(train,5)
head(test,5)
#data type of variables
sapply(train,class)
#changing datatype of target variable to factor datatype.
train$target<-as.factor(train$target)
#Percenatge counts of target classes
table(train$target)/length(train$target)*100
#Bar plot for count of target classes
plot1<-ggplot(train,aes(target))+theme_bw()+geom_bar(stat='count',fill='lightgreen')
##We have a unbalanced data,where 90% of the data is the data of number of customers those will not make a transaction and 10% of the data is those who will make a transaction.
#checking for duplicates
dup<-function(x){if(length(unique(colnames(x))==ncol(x))){print('No')}else{print('Yes')}}
cat('Is there any duplicate column in train data:', dup(train),
'\nIs there any duplicate column in test data:', dup(test), sep = ' ')
### No duplicates
#Visulisation
#Distribution of train attributes from 3 to 102
for (var in names(train_df)[c(3:102)]){
target<-train_df$target
plot<-ggplot(train_df, aes(x=train_df[[var]],fill=target)) +
geom_density(kernel='gaussian') + ggtitle(var)+theme_classic()
print(plot)
}
#Distribution of train attributes from 103 to 202
for (var in names(train_df)[c(103:202)]){
target<-train_df$target
plot<-ggplot(train_df, aes(x=train_df[[var]], fill=target)) +
geom_density(kernel='gaussian') + ggtitle(var)+theme_classic()
print(plot)
}
#Distribution of test attributes from 2 to 101
plot_density(test_df[,c(2:101)], ggtheme = theme_classic(),geom_density_args = list(color='blue'))
#Distribution of test attributes from 102 to 201
plot_density(test_df[,c(102:201)], ggtheme = theme_classic(),geom_density_args = list(color='blue'))
################################## DATA PREPROCESSING ###############################################
######################## Missing Values Analysis #####################################
#checking for missing values
missing_val = data.frame(apply(train,2,function(x){sum(is.na(x))}))
#creating new column contains name of columns
missing_val$Columns = row.names(missing_val)
#changing name of column containg missing values
names(missing_val)[1] = "Missing_percentage"
#changing missing values to missing percentages
missing_val$Missing_percentage = (missing_val$Missing_percentage/nrow(train)) * 100
#arrange missing percentage in descending order
missing_val = missing_val[order(-missing_val$Missing_percentage),]
#removing row names to index
row.names(missing_val) = NULL
#interchange columns
missing_val = missing_val[,c(2,1)]
## No missing values present.
############################## OUTLIER ANALYSIS ##########################################
#Outlier analysis for test dataset.
numeric_index = sapply(train,is.numeric) #selecting only numeric
numeric_data = train[,numeric_index]
cnames = colnames(numeric_data)
for (i in 1:length(cnames))
{
assign(paste0("gn",i), ggplot(aes_string(y = (cnames[i]), x = "target"), data = subset(train))+
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour="red", fill = "grey" ,outlier.shape=18,
outlier.size=1, notch=FALSE) +
theme(legend.position="bottom")+
labs(y=cnames[i],x="target")+
ggtitle(paste("Box plot of target for",cnames[i])))
}
#visualise some variables
gridExtra::grid.arrange(gn3,gn4,gn5,ncol=3)
gridExtra::grid.arrange(gn6,gn7,gn8,ncol=3)
gridExtra::grid.arrange(gn9,gn10,gn11,ncol=3)
# #loop to remove outliers from train dataset variables
for(i in cnames){
print(i)
val = train[,i][train[,i] %in% boxplot.stats(train[,i])$out]
#print(length(val))
train = train[which(!train[,i] %in% val),]
}
numeric_index = sapply(test,is.numeric) #selecting only numeric
numeric_data = test[,numeric_index]
cnames = colnames(numeric_data)
# #loop to remove outliers from test dataset variables
for(i in cnames){
print(i)
val = test[,i][test[,i] %in% boxplot.stats(test[,i])$out]
#print(length(val))
test = test[which(!test[,i] %in% val),]
}
############################## FEATURE SELECTION ##########################################
#Correlations in train data
#convert factor to int.
train$target<-as.numeric(train$target)
train_correlations<-cor(train[,c(2:202)])
train_correlations
##We can observed that the correlation between the train attributes is very small.
#Correlations in test data
test_correlations<-cor(test[,c(2:201)])
test_correlations
##We can observed that the correlation between the test attributes is very small.
###################################Model Development#######################################
#Split the data using CreateDataPartition
set.seed(689)
#train.index<-createDataPartition(train_df$target,p=0.8,list=FALSE)
train.index<-sample(1:nrow(train),0.8*nrow(train))
#train data
train.data<-train[train.index,]
#validation data
valid.data<-train[-train.index,]
#dimension of train data
dim(train.data)
#dimension of validation data
dim(valid.data)
#target classes in train data
table(train.data$target)
#target classes in validation data
table(valid.data$target)
table(train$target)
##################### Decision tree for classification #####################################
#Develop Model on training data
C50_model = C5.0(target ~., train, trials = 100, rules = TRUE)
#Summary of DT model
summary(C50_model)
#write rules into disk
write(capture.output(summary(C50_model)), "c50Rules.txt")
#Lets predict for test cases
C50_Predictions = predict(C50_model, test[,-2], type = "class")
##Evaluate the performance of classification model
ConfMatrix_C50 = table(test$target, C50_Predictions)
confusionMatrix(ConfMatrix_C50)
error_metric(ConfMatrix_C50)
#Accuracy: 0.87
# Area under ROC curve
roc.curve(test$target,C50_Predictions)
#Area under the curve : 0.53
########################## Random Forest ####################################################
RF_model = randomForest(target ~ ., train, importance = TRUE, ntree = 500)
#Predict test data using random forest model
RF_Predictions = predict(RF_model, test[,-17])
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$target, RF_Predictions)
confusionMatrix(ConfMatrix_RF)
error_metric(ConfMatrix_RF)
# accuracy = 0.88
roc.curve(test$target,RF_Predictions)
#Area under the curve for train_:0.54
######################################## Logistic Regression ###############################
logit_model = glm(target ~ ., data = train, family = "binomial")
#summary of the model
summary(logit_model)
#predict using logistic regression
logit_Predictions = predict(logit_model, newdata = test, type = "response")
#convert prob
logit_Predictions = ifelse(logit_Predictions > 0.5, 1, 0)
##Evaluate the performance of classification model
ConfMatrix_RF = table(test$target, logit_Predictions)
ConfMatrix_LR
error_metric(ConfMatrix_LR)
# accuracy = 0.9
roc.curve(test$target,logit_Predictions>0.5)
#Area under the curve for train: 0.78
############################## lightgbm #################################################
#Convert data frame to matrix
set.seed(5432)
X_train<-as.matrix(train.data[,-c(1,2)])
y_train<-as.matrix(train.data$target)
X_valid<-as.matrix(valid.data[,-c(1,2)])
y_valid<-as.matrix(valid.data$target)
test_data<-as.matrix(test[,-c(1)])
#training data
lgb.train <- lgb.Dataset(data=X_train, label=y_train)
#Validation data
lgb.valid <- lgb.Dataset(data=X_valid,label=y_valid)
#Selecting best hyperparameters
set.seed(653)
lgb.grid = list(objective = "binary",
metric = "auc",
boost='gbdt',
max_depth=-1,
boost_from_average='false',
min_sum_hessian_in_leaf = 12,
feature_fraction = 0.05,
bagging_fraction = 0.45,
bagging_freq = 5,
learning_rate=0.02,
tree_learner='serial',
num_leaves=20,
num_threads=5,
min_data_in_bin=150,
min_gain_to_split = 30,
min_data_in_leaf = 90,
verbosity=-1,
is_unbalance = TRUE)
#Training the lgbm model
set.seed(7663)
lgbm.model <- lgb.train(params = lgb.grid, data = lgb.train, nrounds =10000,eval_freq =1000,
valids=list(val1=lgb.train,val2=lgb.valid),early_stopping_rounds = 5000)
#lgbm model performance on test data
set.seed(6532)
lgbm_pred_prob <- predict(lgbm.model,test_data)
print(lgbm_pred_prob)
#Convert to binary output (1 and 0) with threshold 0.5
lgbm_pred<-ifelse(lgbm_pred_prob>0.5,1,0)
print(lgbm_pred)
set.seed(6521)
#feature importance plot
tree_imp <- lgb.importance(lgbm.model, percentage = TRUE)
lgb.plot.importance(tree_imp, top_n = 50, measure = "Frequency", left_margin = 10)
##lightgbm is performing well on imbalanced data compared to other models based on scores of roc_auc_score.
##Final submission
sub_df<-data.frame(ID_code=test_df$ID_code,lgb_predict_prob=lgbm_pred_prob,lgb_predict=lgbm_pred)
write.csv(sub_df,'submission.CSV',row.names=F)
head(sub_df)
|
# basefn2.R
source('C:/Users/brian/PycharmProjects/platform/legacy/R/basefn2.R')
source('C:/Users/brian/PycharmProjects/platform/legacy/R/utils.R')
source('C:/Users/brian/PycharmProjects/platform/legacy/R/platformstartup.R')
GetRes <- function() {return(300)}
GetImgDir <- function() {return ("C:\\Users\\brian\\Documents\\books\\ereport07_MMT_primer_project\\images\\")}
GetFootText <- function() {return("")}
| /book_07_mmt_expansion/startup.R | permissive | brianr747/brian_books | R | false | false | 413 | r |
# basefn2.R
source('C:/Users/brian/PycharmProjects/platform/legacy/R/basefn2.R')
source('C:/Users/brian/PycharmProjects/platform/legacy/R/utils.R')
source('C:/Users/brian/PycharmProjects/platform/legacy/R/platformstartup.R')
GetRes <- function() {return(300)}
GetImgDir <- function() {return ("C:\\Users\\brian\\Documents\\books\\ereport07_MMT_primer_project\\images\\")}
GetFootText <- function() {return("")}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repoverlap.R
\name{repOverlap}
\alias{repOverlap}
\title{General function for the repertoire overlap evaluation.}
\usage{
repOverlap(
.data,
.method = c("exact", "hamm", "lev", "jaccard", "morisita", "tversky", "overlap",
"horn"),
.seq = c("nuc", "aa"),
.quant = c("read.count", "umi.count", "read.prop", "umi.prop"),
.vgene = F,
.norm = T,
.a = 0.5,
.b = 0.5,
.do.unique = T,
.verbose = T
)
}
\arguments{
\item{.data}{List of clonesets.}
\item{.method}{Which method to use for the overlap evaluation. See "Details" for methods.}
\item{.seq}{Which clonotype sequences to use for the overlap: "nuc" for "CDR3.nucleotide.sequence", "aa" for
"CDR3.amino.acid.sequence".}
\item{.quant}{Which column to use for the quantity of clonotypes: "read.count" for the "Read.count" column,
"umi.count" for the "Umi.count" column, "read.prop" for the "Read.proportion" column, "umi.prop" for
the "Umi.proportion" column. Used in "morisita" and "horn".}
\item{.vgene}{If T than use V genes in computing shared or similar clonotypes. Used in all methods.}
\item{.norm}{If T than compute the normalised number of shared clonotypes. Used in "exact".}
\item{.a, .b}{Alpha and beta parameters for "tversky". Default values gives the Jaccard index measure.}
\item{.do.unique}{If T than remove duplicates from the input data, but add their quantities to their clones.}
\item{.verbose}{If T than output the data processing progress bar.}
}
\description{
General interface to all cloneset overlap functions.
}
\details{
You can see a more detailed description for each overlap method at \link{intersectClonesets} and \link{similarity}.
Parameter \code{.method} can have one of the following value each corresponding to the specific method:
- "exact" for the shared number of clonotypes (basic function \code{intersectClonesets(..., .type = "..e")}).
- "hamm" for the number of similar clonotypes by the Hamming distance (basic function \code{intersectClonesets(..., .type = "..h")}).
- "lev" for the number of similar clonotypes by the Levenshtein distance (basic function \code{intersectClonesets(..., .type = "..l")}).
- "jaccard" for the Jaccard index (basic function \code{jaccard.index}).
- "morisita" for the Morisita's overlap index (basic function \code{morisita.index}).
- "tversky" for the Tversky index (basic function \code{tversky.index}).
- "overlap" for the overlap coefficient (basic function \code{overlap.coef}).
- "horn" for the Horn's index (basic function \code{horn.index}).
}
\examples{
\dontrun{
data(twb)
repOverlap(twb, "exact", .seq = "nuc", .vgene = F)
repOverlap(twb, "morisita", .seq = "aa", .vgene = T, .quant = "umi.count")
ov <- repOverlap(twb)
ov[is.na(ov)] <- 0
vis.pca(prcomp(ov, scale. = T), list(A = c(1, 2), B = c(3, 4)))
}
}
\seealso{
\link{intersectClonesets}, \link{similarity}, \link{repDiversity}
}
| /fuzzedpackages/tcR/man/repOverlap.Rd | no_license | akhikolla/testpackages | R | false | true | 2,940 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repoverlap.R
\name{repOverlap}
\alias{repOverlap}
\title{General function for the repertoire overlap evaluation.}
\usage{
repOverlap(
.data,
.method = c("exact", "hamm", "lev", "jaccard", "morisita", "tversky", "overlap",
"horn"),
.seq = c("nuc", "aa"),
.quant = c("read.count", "umi.count", "read.prop", "umi.prop"),
.vgene = F,
.norm = T,
.a = 0.5,
.b = 0.5,
.do.unique = T,
.verbose = T
)
}
\arguments{
\item{.data}{List of clonesets.}
\item{.method}{Which method to use for the overlap evaluation. See "Details" for methods.}
\item{.seq}{Which clonotype sequences to use for the overlap: "nuc" for "CDR3.nucleotide.sequence", "aa" for
"CDR3.amino.acid.sequence".}
\item{.quant}{Which column to use for the quantity of clonotypes: "read.count" for the "Read.count" column,
"umi.count" for the "Umi.count" column, "read.prop" for the "Read.proportion" column, "umi.prop" for
the "Umi.proportion" column. Used in "morisita" and "horn".}
\item{.vgene}{If T than use V genes in computing shared or similar clonotypes. Used in all methods.}
\item{.norm}{If T than compute the normalised number of shared clonotypes. Used in "exact".}
\item{.a, .b}{Alpha and beta parameters for "tversky". Default values gives the Jaccard index measure.}
\item{.do.unique}{If T than remove duplicates from the input data, but add their quantities to their clones.}
\item{.verbose}{If T than output the data processing progress bar.}
}
\description{
General interface to all cloneset overlap functions.
}
\details{
You can see a more detailed description for each overlap method at \link{intersectClonesets} and \link{similarity}.
Parameter \code{.method} can have one of the following value each corresponding to the specific method:
- "exact" for the shared number of clonotypes (basic function \code{intersectClonesets(..., .type = "..e")}).
- "hamm" for the number of similar clonotypes by the Hamming distance (basic function \code{intersectClonesets(..., .type = "..h")}).
- "lev" for the number of similar clonotypes by the Levenshtein distance (basic function \code{intersectClonesets(..., .type = "..l")}).
- "jaccard" for the Jaccard index (basic function \code{jaccard.index}).
- "morisita" for the Morisita's overlap index (basic function \code{morisita.index}).
- "tversky" for the Tversky index (basic function \code{tversky.index}).
- "overlap" for the overlap coefficient (basic function \code{overlap.coef}).
- "horn" for the Horn's index (basic function \code{horn.index}).
}
\examples{
\dontrun{
data(twb)
repOverlap(twb, "exact", .seq = "nuc", .vgene = F)
repOverlap(twb, "morisita", .seq = "aa", .vgene = T, .quant = "umi.count")
ov <- repOverlap(twb)
ov[is.na(ov)] <- 0
vis.pca(prcomp(ov, scale. = T), list(A = c(1, 2), B = c(3, 4)))
}
}
\seealso{
\link{intersectClonesets}, \link{similarity}, \link{repDiversity}
}
|
######################################################################################################################
# Function: is.DataModel.
# Argument: an object.
# Description: Return if the object is of class DataModel
is.DataModel = function(arg){
return(any(class(arg)=="DataModel"))
} | /R/is.DataModel.R | no_license | gpaux/Mediana | R | false | false | 298 | r | ######################################################################################################################
# Function: is.DataModel.
# Argument: an object.
# Description: Return if the object is of class DataModel
is.DataModel = function(arg){
return(any(class(arg)=="DataModel"))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{drop_na_columns}
\alias{drop_na_columns}
\title{Drop NA columns}
\usage{
drop_na_columns(data)
}
\arguments{
\item{data}{a \code{data frame}}
}
\value{
\code{tibble}
}
\description{
This function drops NA
columns from a specified data frame.
}
\examples{
tibble(nameFirm = 'Goldman Sachs', countSuperHeros = NA, countCriminals = 0, countFinedEmployees = 10) \%>\% drop_na_columns()
}
\seealso{
Other utility function: \code{\link{get_class_df}},
\code{\link{tidy_column_formats}},
\code{\link{tidy_column_relations}}
}
\concept{utility function}
| /man/drop_na_columns.Rd | permissive | maduhu/fundManageR | R | false | true | 642 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{drop_na_columns}
\alias{drop_na_columns}
\title{Drop NA columns}
\usage{
drop_na_columns(data)
}
\arguments{
\item{data}{a \code{data frame}}
}
\value{
\code{tibble}
}
\description{
This function drops NA
columns from a specified data frame.
}
\examples{
tibble(nameFirm = 'Goldman Sachs', countSuperHeros = NA, countCriminals = 0, countFinedEmployees = 10) \%>\% drop_na_columns()
}
\seealso{
Other utility function: \code{\link{get_class_df}},
\code{\link{tidy_column_formats}},
\code{\link{tidy_column_relations}}
}
\concept{utility function}
|
#' Retrieves Sotkanet data according to the query arguments and combines the
#' indicator, region, and overall data into a single table
#'
#' Arguments:
#' @param indicators Dataset identifier(s)
#' @param years vector of years c(2010, 2012, ... )
#' @param genders vector of genders ('male' | 'female' | 'total')
#' @param regions pick selected regions only (default: all regions)
#' @param region.category return selected regions category (for options, see:
#' unique(SotkanetRegions(type = "table")$region.category));
#' "ALUEHALLINTOVIRASTO, "ERVA", "EURALUEET", "EUROOPPA", "KUNTA",
#' "MAA", "MAAKUNTA", "NUTS1", "POHJOISMAAT", "SAIRAANHOITOPIIRI",
#' "SEUTUKUNTA", "SUURALUE"
#' @param verbose verbose
#'
#' Returns:
#' @return data.frame
#'
#' @export
#' @references
#' See citation("sotkanet")
#' @author Einari Happonen. Maintainer: Louhos \email{louhos@@googlegroups.com}
#' @examples # dat <- GetDataSotkanet(indicators = 10013)
#' @keywords utilities
GetDataSotkanet <- function (indicators, years = 1991:2015, genders = c("total"), regions = NULL, region.category = NULL, verbose = TRUE) {
# List all indicators in Sotkanet database
sotkanet.indicators <- SotkanetIndicators(type = "table")
dats <- list()
for (indicator in indicators) {
if (verbose) {message(paste("Retrieving indicator", indicator))}
dats[[as.character(indicator)]] <- GetDataSotkanetSingleIndicator(indicator, years = years,
genders = genders, regions = regions, region.category = region.category)
}
# Merge all data from the different indicators in a single table
combined.data <- do.call("rbind", dats)
# Add indicator information
combined.data$indicator.organization.title.fi <- sotkanet.indicators[match(combined.data$indicator,
sotkanet.indicators$indicator), "indicator.organization.title.fi"]
combined.data
}
#' Description:
#' GetDataSotkanetSingleIndicator retrieves Sotkanet data
#' for given indicator according to the query arguments and combines
#' indicator, region, and overall data into one table
#'
#' Arguments:
#' @param indicator Dataset identifier
#' @param years vector of years c(2010, 2012, ... )
#' @param genders vector of genders ('male' | 'female' | 'total')
#' @param regions return selected regions only
#' @param region.category return selected regions category (for options, see:
#' unique(SotkanetRegions(type = "table")$region.category));
#' "ALUEHALLINTOVIRASTO, "ERVA", "EURALUEET", "EUROOPPA", "KUNTA",
#' "MAA", "MAAKUNTA", "NUTS1", "POHJOISMAAT", "SAIRAANHOITOPIIRI",
#' "SEUTUKUNTA", "SUURALUE"
#'
#' Returns:
#' @return sotkanet data table
#'
#' @references
#' See citation("sotkanet")
#' @author Einari Happonen. Maintainer: Louhos/Opasnet \email{louhos@@googlegroups.com}
#' @examples #
#' @keywords utilities
GetDataSotkanetSingleIndicator <- function (indicator, years = 1990:2000, genders = "total", regions = NULL, region.category = NULL) {
# FIXME: is it possible to specify already in query which regions we select
dat <- SotkanetData(indicator = indicator, years = years, genders = genders)
# Pick corresponding indicator
indicator.data <- SotkanetIndicators(indicator)[, c("indicator", "indicator.title.fi")]
dat <- merge(indicator.data, dat)
# Pick corresponding region
#message(paste("Picking region"))
region.data <- SotkanetRegions()[, c("region", "region.title.fi", "region.code", "region.category")]
dat <- merge(region.data, dat)
# Replace comma by point as decimal separator
#message(paste("Polishing"))
dat$primary.value <- as.numeric(gsub("\\,", "\\.", as.character(dat$primary.value)))
dat$absolute.value <- as.numeric(gsub("\\,", "\\.", as.character(dat$absolute.value)))
# Remove unnecessary column
#if (all(is.na(dat$absolute.value))) {dat$absolute.value <- NULL}
# Pick only the selected regions
if (!is.null(region.category)) {
dat <- dat[dat$region.category %in% region.category, ]
}
if (!is.null(regions)) {
dat <- dat[dat$region.title.fi %in% regions, ]
}
dat
}
| /sotkanet/R/GetDataSotkanet.R | no_license | ingted/R-Examples | R | false | false | 4,113 | r | #' Retrieves Sotkanet data according to the query arguments and combines the
#' indicator, region, and overall data into a single table
#'
#' Arguments:
#' @param indicators Dataset identifier(s)
#' @param years vector of years c(2010, 2012, ... )
#' @param genders vector of genders ('male' | 'female' | 'total')
#' @param regions pick selected regions only (default: all regions)
#' @param region.category return selected regions category (for options, see:
#' unique(SotkanetRegions(type = "table")$region.category));
#' "ALUEHALLINTOVIRASTO, "ERVA", "EURALUEET", "EUROOPPA", "KUNTA",
#' "MAA", "MAAKUNTA", "NUTS1", "POHJOISMAAT", "SAIRAANHOITOPIIRI",
#' "SEUTUKUNTA", "SUURALUE"
#' @param verbose verbose
#'
#' Returns:
#' @return data.frame
#'
#' @export
#' @references
#' See citation("sotkanet")
#' @author Einari Happonen. Maintainer: Louhos \email{louhos@@googlegroups.com}
#' @examples # dat <- GetDataSotkanet(indicators = 10013)
#' @keywords utilities
GetDataSotkanet <- function (indicators, years = 1991:2015, genders = c("total"), regions = NULL, region.category = NULL, verbose = TRUE) {
# List all indicators in Sotkanet database
sotkanet.indicators <- SotkanetIndicators(type = "table")
dats <- list()
for (indicator in indicators) {
if (verbose) {message(paste("Retrieving indicator", indicator))}
dats[[as.character(indicator)]] <- GetDataSotkanetSingleIndicator(indicator, years = years,
genders = genders, regions = regions, region.category = region.category)
}
# Merge all data from the different indicators in a single table
combined.data <- do.call("rbind", dats)
# Add indicator information
combined.data$indicator.organization.title.fi <- sotkanet.indicators[match(combined.data$indicator,
sotkanet.indicators$indicator), "indicator.organization.title.fi"]
combined.data
}
#' Description:
#' GetDataSotkanetSingleIndicator retrieves Sotkanet data
#' for given indicator according to the query arguments and combines
#' indicator, region, and overall data into one table
#'
#' Arguments:
#' @param indicator Dataset identifier
#' @param years vector of years c(2010, 2012, ... )
#' @param genders vector of genders ('male' | 'female' | 'total')
#' @param regions return selected regions only
#' @param region.category return selected regions category (for options, see:
#' unique(SotkanetRegions(type = "table")$region.category));
#' "ALUEHALLINTOVIRASTO, "ERVA", "EURALUEET", "EUROOPPA", "KUNTA",
#' "MAA", "MAAKUNTA", "NUTS1", "POHJOISMAAT", "SAIRAANHOITOPIIRI",
#' "SEUTUKUNTA", "SUURALUE"
#'
#' Returns:
#' @return sotkanet data table
#'
#' @references
#' See citation("sotkanet")
#' @author Einari Happonen. Maintainer: Louhos/Opasnet \email{louhos@@googlegroups.com}
#' @examples #
#' @keywords utilities
GetDataSotkanetSingleIndicator <- function (indicator, years = 1990:2000, genders = "total", regions = NULL, region.category = NULL) {
# FIXME: is it possible to specify already in query which regions we select
dat <- SotkanetData(indicator = indicator, years = years, genders = genders)
# Pick corresponding indicator
indicator.data <- SotkanetIndicators(indicator)[, c("indicator", "indicator.title.fi")]
dat <- merge(indicator.data, dat)
# Pick corresponding region
#message(paste("Picking region"))
region.data <- SotkanetRegions()[, c("region", "region.title.fi", "region.code", "region.category")]
dat <- merge(region.data, dat)
# Replace comma by point as decimal separator
#message(paste("Polishing"))
dat$primary.value <- as.numeric(gsub("\\,", "\\.", as.character(dat$primary.value)))
dat$absolute.value <- as.numeric(gsub("\\,", "\\.", as.character(dat$absolute.value)))
# Remove unnecessary column
#if (all(is.na(dat$absolute.value))) {dat$absolute.value <- NULL}
# Pick only the selected regions
if (!is.null(region.category)) {
dat <- dat[dat$region.category %in% region.category, ]
}
if (!is.null(regions)) {
dat <- dat[dat$region.title.fi %in% regions, ]
}
dat
}
|
#' Compute the Horvitz-Thompson Estimator
#'
#' Calculate the Horvitz-Thompson Estimator for a finite population mean/proportion or total based on sample data collected from a complex sampling design.
#'
#' @param y A numeric vector of the sampled response variable.
#' @param pi A numeric vector of inclusion probabilities for each sampled unit in y. If NULL, then simple random sampling without replacement is assumed.
#' @param N A numeric value of the population size. If NULL, it is estimated with the sum of the inverse of the pis.
#' @param var_est A logical indicating whether or not to compute a variance estimator. Default is FALSE.
#' @param var_method The method to use when computing the variance estimator. Options are a Taylor linearized technique: "LinHB"= Hajek-Berger estimator, "LinHH" = Hansen-Hurwitz estimator, "LinHTSRS" = Horvitz-Thompson estimator under simple random sampling without replacement, and "LinHT" = Horvitz-Thompson estimator or a resampling technique: "bootstrapSRS" = bootstrap variance estimator under simple random sampling without replacement. The default is "LinHB".
#' @param pi2 A square matrix of the joint inclusion probabilities. Needed for the "LinHT" variance estimator.
#' @param B The number of bootstrap samples if computing the bootstrap variance estimator. Default is 1000.
#' @param messages A logical indicating whether to output the messages internal to mase. Default is TRUE.
#'
#' @examples
#' library(survey)
#' data(api)
#' horvitzThompson(y = apisrs$api00, pi = apisrs$pw^(-1))
#' horvitzThompson(y = apisrs$api00, pi = apisrs$pw^(-1), var_est = TRUE, var_method = "LinHTSRS")
#'
#'@references{
#'\insertRef{hor52}{mase}
#'}
#' @return List of output containing:
#' \itemize{
#' \item{pop_total:}{Estimate of population total}
#' \item{pop_mean:}{Estimate of population mean}
#' \item{pop_total_var:}{ Estimated variance of population total estimate}
#' \item{pop_mean_var:}{ Estimated variance of population mean estimate}
#' }
#'
#' @export horvitzThompson
#' @import boot
#' @importFrom Rdpack reprompt
#' @include varMase.R
#' @include htt.R
#'
horvitzThompson <- function(y,
pi = NULL,
N = NULL,
pi2 = NULL,
var_est = FALSE,
var_method = "LinHB",
B = 1000,
messages = T) {
### INPUT VALIDATION ###
#Make sure the var_method is valid
if(!is.element(var_method, c("LinHB", "LinHH", "LinHTSRS", "LinHT", "bootstrapSRS"))){
stop("Variance method input incorrect. It has to be \"LinHB\", \"LinHH\", \"LinHT\", \"LinHTSRS\", or \"bootstrapSRS\".")
}
#Check that y is numeric
if(!(typeof(y) %in% c("numeric", "integer", "double"))){
stop("Must supply numeric y. For binary variable, convert to 0/1's.")
}
if(is.null(pi) && is.null(N)){
stop("Must supply either ", sQuote("pi")," or ", sQuote("N"))
}
if(is.null(pi)){
if (messages) {
message("Assuming simple random sampling")
}
}
# convert pi into diagonal matrix format
if (is.null(pi)) {
pi <- rep(length(y)/N, length(y))
}
#weight: inverse first order inclusion probabilities
weight <- pi^(-1)
#Sample size
n <- length(y)
##########################
# total population
total <- as.vector(t(y) %*% weight)
# defining estimate for population size if N is unknown, otherwise use
# known N.
if (is.null(N)) {
N <- sum(weight)
mu <- as.vector(total * (1/N))
} else {
mu <- as.vector((total/N))
}
if(var_est==TRUE){
if(var_method!="bootstrapSRS"){
varEst <- varMase(y = y,pi = pi,pi2 = pi2,method = var_method, N = N)
}else{
#Find bootstrap variance
dat <- cbind(y,pi)
#Bootstrap total estimates
t_boot <- boot(data = dat, statistic = htt, R = B)
#Adjust for bias and without replacement sampling
varEst <- var(t_boot$t)*n/(n-1)*(N-n)/(N-1)
}
#return estimates and variance estimates
varEstMu <- varEst*N^(-2)
return(list(pop_total = total, pop_mean = mu, pop_total_var=varEst, pop_mean_var = varEstMu))
}else{
# return estimates
return(list(pop_total = total, pop_mean = mu))
}
}
| /R/horvitzThompson.R | no_license | mcconvil/mase | R | false | false | 4,340 | r | #' Compute the Horvitz-Thompson Estimator
#'
#' Calculate the Horvitz-Thompson Estimator for a finite population mean/proportion or total based on sample data collected from a complex sampling design.
#'
#' @param y A numeric vector of the sampled response variable.
#' @param pi A numeric vector of inclusion probabilities for each sampled unit in y. If NULL, then simple random sampling without replacement is assumed.
#' @param N A numeric value of the population size. If NULL, it is estimated with the sum of the inverse of the pis.
#' @param var_est A logical indicating whether or not to compute a variance estimator. Default is FALSE.
#' @param var_method The method to use when computing the variance estimator. Options are a Taylor linearized technique: "LinHB"= Hajek-Berger estimator, "LinHH" = Hansen-Hurwitz estimator, "LinHTSRS" = Horvitz-Thompson estimator under simple random sampling without replacement, and "LinHT" = Horvitz-Thompson estimator or a resampling technique: "bootstrapSRS" = bootstrap variance estimator under simple random sampling without replacement. The default is "LinHB".
#' @param pi2 A square matrix of the joint inclusion probabilities. Needed for the "LinHT" variance estimator.
#' @param B The number of bootstrap samples if computing the bootstrap variance estimator. Default is 1000.
#' @param messages A logical indicating whether to output the messages internal to mase. Default is TRUE.
#'
#' @examples
#' library(survey)
#' data(api)
#' horvitzThompson(y = apisrs$api00, pi = apisrs$pw^(-1))
#' horvitzThompson(y = apisrs$api00, pi = apisrs$pw^(-1), var_est = TRUE, var_method = "LinHTSRS")
#'
#'@references{
#'\insertRef{hor52}{mase}
#'}
#' @return List of output containing:
#' \itemize{
#' \item{pop_total:}{Estimate of population total}
#' \item{pop_mean:}{Estimate of population mean}
#' \item{pop_total_var:}{ Estimated variance of population total estimate}
#' \item{pop_mean_var:}{ Estimated variance of population mean estimate}
#' }
#'
#' @export horvitzThompson
#' @import boot
#' @importFrom Rdpack reprompt
#' @include varMase.R
#' @include htt.R
#'
horvitzThompson <- function(y,
pi = NULL,
N = NULL,
pi2 = NULL,
var_est = FALSE,
var_method = "LinHB",
B = 1000,
messages = T) {
### INPUT VALIDATION ###
#Make sure the var_method is valid
if(!is.element(var_method, c("LinHB", "LinHH", "LinHTSRS", "LinHT", "bootstrapSRS"))){
stop("Variance method input incorrect. It has to be \"LinHB\", \"LinHH\", \"LinHT\", \"LinHTSRS\", or \"bootstrapSRS\".")
}
#Check that y is numeric
if(!(typeof(y) %in% c("numeric", "integer", "double"))){
stop("Must supply numeric y. For binary variable, convert to 0/1's.")
}
if(is.null(pi) && is.null(N)){
stop("Must supply either ", sQuote("pi")," or ", sQuote("N"))
}
if(is.null(pi)){
if (messages) {
message("Assuming simple random sampling")
}
}
# convert pi into diagonal matrix format
if (is.null(pi)) {
pi <- rep(length(y)/N, length(y))
}
#weight: inverse first order inclusion probabilities
weight <- pi^(-1)
#Sample size
n <- length(y)
##########################
# total population
total <- as.vector(t(y) %*% weight)
# defining estimate for population size if N is unknown, otherwise use
# known N.
if (is.null(N)) {
N <- sum(weight)
mu <- as.vector(total * (1/N))
} else {
mu <- as.vector((total/N))
}
if(var_est==TRUE){
if(var_method!="bootstrapSRS"){
varEst <- varMase(y = y,pi = pi,pi2 = pi2,method = var_method, N = N)
}else{
#Find bootstrap variance
dat <- cbind(y,pi)
#Bootstrap total estimates
t_boot <- boot(data = dat, statistic = htt, R = B)
#Adjust for bias and without replacement sampling
varEst <- var(t_boot$t)*n/(n-1)*(N-n)/(N-1)
}
#return estimates and variance estimates
varEstMu <- varEst*N^(-2)
return(list(pop_total = total, pop_mean = mu, pop_total_var=varEst, pop_mean_var = varEstMu))
}else{
# return estimates
return(list(pop_total = total, pop_mean = mu))
}
}
|
\name{print.split}
\alias{print.split}
\title{Print split.}
\usage{
\method{print}{split} (x, ...)
}
\arguments{
\item{x}{object to print}
\item{...}{unused}
}
\description{
Don't print labels, so it appears like a regular list
}
\keyword{internal}
| /man/print.split.Rd | no_license | talgalili/plyr | R | false | false | 259 | rd | \name{print.split}
\alias{print.split}
\title{Print split.}
\usage{
\method{print}{split} (x, ...)
}
\arguments{
\item{x}{object to print}
\item{...}{unused}
}
\description{
Don't print labels, so it appears like a regular list
}
\keyword{internal}
|
# %%%%%%%%%%%%%%%%% #
####### LD #######
# %%%%%%%%%%%%%%%%% #
# * Nalls et al. 2018: Imputation Panel Notes
# + _"One of the limitations of this study is the use of multiple imputation panels, due to logistic constraints.
# Adding datasets from non-European populations would be helpful to further improve our granularity in association
# testing and ability to fine-map loci through integration of more variable LD signatures."_
# + _"Post-Chang 23andMe samples were imputed using a combination of Finch for phasing (an in-house developed fork of Beagle)
# and miniMac2 for imputation with all-ethnicity samples from the __September 2013 release of 1000 Genomes Phase1__
# as reference haplotypes."_
# + _"The Nalls et al . 2014 and Chang et al . 2017 samples were imputed with Minimac2 using
# __1000 Genomes phase 1 haplotypes__.
# All additional sample series except for the post-Chang et al . 2017 samples from 23andMe were imputed using the
# __Haplotype Reference Consortium (HRC)__ on the University of Michigan imputation server under default settings
# with Eagle v2.3 phasing based on reference panel HRC r1.1 2016"_
#' Procure an LD matrix for fine-mapping
#'
#' Calculate and/or query linkage disequilibrium (LD) from reference panels (UK Biobank, 1000 Genomes),
#' or user-supplied datasets.
#'
#' Options:
#' \itemize{
#' \item Download pre-computed LD matrix from UK Biobank.
#' \item Download raw vcf file from 1KG and compute LD on the fly.
#' \item Compute LD on the fly from a user-supplied vcf file.
#' \item Use a user-supplied pre-computed LD-matrix.
#' }
#'
#' @param subset_DT The locus subset of the full summary stats file.
#' @inheritParams finemap_pipeline
#' @return A symmetric LD matrix of pairwise \emph{r} values.
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("BST1"); data("locus_dir");
#' locus_dir <- file.path("~/Desktop",locus_dir)
#' # BST1 <- limit_SNPs(500, BST1)
#'
#' # UK Biobank LD
#' LD_matrix <- LD.load_or_create(locus_dir=locus_dir, subset_DT=BST1, LD_reference="UKB")
#'
#' # 1000 Genomes
#' LD_matrix <- LD.load_or_create(locus_dir=locus_dir, subset_DT=BST1, LD_reference="1KGphase1", force_new_LD=T)
#'
#' # Local vcf file
#' LD_reference="~/Desktop/results/GWAS/Nalls23andMe_2019/BST1/LD/BST1.1KGphase1.vcf.gz"
#' LD_matrix <- LD.load_or_create(locus_dir=locus_dir, subset_DT=BST1, LD_reference=LD_reference, force_new_LD=T)
#' }
LD.load_or_create <- function(locus_dir,
subset_DT,
force_new_LD=F,
LD_reference="1KGphase1",
LD_genome_build="hg19",
superpopulation="EUR",
remote_LD=T,
download_method="direct",
vcf_folder=NULL,
# min_r2=0,
LD_block=F,
LD_block_size=.7,
# min_Dprime=F,
remove_correlates=F,
fillNA=0,
verbose=T,
server=F,
remove_tmps=T,
conda_env="echoR",
nThread=4){
RDS_path <- LD.get_rds_path(locus_dir=locus_dir,
LD_reference=LD_reference)
if(file.exists(RDS_path) & force_new_LD==F){
#### Import existing LD ####
printer("+ LD:: Previously computed LD_matrix detected. Importing...", RDS_path, v=verbose)
LD_matrix <- readSparse(LD_path = RDS_path,
convert_to_df = F)
LD_list <- list(DT=subset_DT,
LD=LD_matrix,
RDS_path=RDS_path)
} else if(LD_reference=="UKB"){
#### UK Biobank ####
LD_list <- LD.UKBiobank(subset_DT = subset_DT,
locus_dir = locus_dir,
force_new_LD = force_new_LD,
chimera = server,
download_method = download_method,
fillNA = fillNA,
nThread = nThread,
return_matrix = T,
conda_env = conda_env,
remove_tmps = remove_tmps)
} else if (LD_reference == "1KGphase1" |
LD_reference == "1KGphase3") {
#### 1000 Genomes ####
LD_list <- LD.1KG(locus_dir = locus_dir,
subset_DT = subset_DT,
vcf_folder = vcf_folder,
LD_reference = LD_reference,
superpopulation = superpopulation,
remote_LD = remote_LD,
LD_block = LD_block,
LD_block_size = LD_block_size,
# min_Dprime = min_Dprime,
remove_correlates = remove_correlates,
fillNA = fillNA,
nThread = nThread,
conda_env = conda_env,
download_method = download_method)
} else if (endsWith(tolower(LD_reference),".vcf") |
endsWith(tolower(LD_reference),".vcf.gz")){
#### Custom vcf ####
LD_list <- LD.custom_panel(LD_reference=LD_reference,
LD_genome_build=LD_genome_build,
subset_DT=subset_DT,
locus_dir=locus_dir,
force_new_LD=force_new_LD,
# min_r2=min_r2,
# min_Dprime=min_Dprime,
# remove_correlates=remove_correlates,
fillNA=fillNA,
LD_block=LD_block,
LD_block_size=LD_block_size,
remove_tmps=remove_tmps,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
} else {
stop("LD:: LD_reference input not recognized. Please supply: '1KGphase1', '1KGphase3', 'UKB', or the path to a .vcf[.gz] file.")
}
return(LD_list)
}
LD.get_rds_path <- function(locus_dir,
LD_reference){
RDS_path <- file.path(locus_dir,"LD",paste0(basename(locus_dir),".",basename(LD_reference),"_LD.RDS"))
return(RDS_path)
}
#' Filter LD
#'
#' @family LD
#' @keywords internal
#' @examples
#' data("BST1"); data("LD_matrix");
#' LD_list <- list(LD=LD_matrix, DT=BST1)
#' LD_list <- LD.filter_LD(LD_list, min_r2=.2)
LD.filter_LD <- function(LD_list,
remove_correlates=F,
min_r2=0,
verbose=F){
printer("+ FILTER:: Filtering by LD features.", v=verbose)
subset_DT <- LD_list$DT
LD_matrix <- LD_list$LD
if(any(remove_correlates!=F)){
# remove_correlates <- c("rs76904798"=.2, "rs10000737"=.8)
for(snp in names(remove_correlates)){
thresh <- remove_correlates[[snp]]
printer("+ FILTER:: Removing correlates of",snp,"at r2 ≥",thresh,v=verbose)
if(snp %in% row.names(LD_matrix)){
correlates <- colnames(LD_matrix[snp,])[LD_matrix[snp,]>=sqrt(thresh)]
LD_matrix <- LD_matrix[(!row.names(LD_matrix) %in% correlates),
(!colnames(LD_matrix) %in% correlates)]
}
}
}
if(min_r2!=0 & min_r2!=F){
printer("+ FILTER:: Removing SNPs that don't correlate with lead SNP at r2 ≤",min_r2,v=verbose)
LD_list <- subset_common_snps(LD_matrix = LD_matrix,
finemap_dat = subset_DT,
verbose = F)
subset_DT <- LD_list$DT
LD_matrix <- LD_list$LD
lead.snp <- subset(subset_DT, leadSNP)$SNP[1]
correlates <- colnames(LD_matrix[lead.snp,])[LD_matrix[lead.snp,]>=sqrt(min_r2)]
LD_matrix <- LD_matrix[(row.names(LD_matrix) %in% correlates),
(colnames(LD_matrix) %in% correlates)]
}
LD_list <- subset_common_snps(LD_matrix = LD_matrix,
finemap_dat = subset_DT,
verbose = F)
return(LD_list)
}
#' Compute LD from user-supplied vcf file
#'
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' if(!"gaston" %in% row.names(installed.packages())){install.packages("gaston")}
#' data("BST1"); data("locus_dir")
#' LD_reference="~/Desktop/results/Reference/custom_panel_chr4.vcf"
#' LD_matrix <- LD.custom_panel(LD_reference=LD_reference, subset_DT=BST1, locus_dir=locus_dir)
#'
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/QTL/Microglia_all_regions/BIN1"
#' subset_DT <- data.table::fread(file.path(locus_dir,"Multi-finemap/BIN1.Microglia_all_regions.1KGphase3_LD.Multi-finemap.tsv.gz"))
#' LD_reference = "/sc/hydra/projects/pd-omics/glia_omics/eQTL/post_imputation_filtering/eur/filtered_variants/AllChr.hg38.sort.filt.dbsnp.snpeff.vcf.gz"
#' LD_matrix <- LD.custom_panel(LD_reference=LD_reference, subset_DT=BST1, locus_dir=locus_dir, LD_genome_build="hg38")
#' }
LD.custom_panel <- function(LD_reference,
fullSS_genome_build="hg19",
LD_genome_build="hg19",
subset_DT,
locus_dir,
force_new_LD=F,
min_r2=F,
# min_Dprime=F,
remove_correlates=F,
fillNA=0,
LD_block=F,
LD_block_size=.7,
remove_tmps=T,
nThread=4,
conda_env="echoR",
verbose=T){
printer("LD:: Computing LD from local vcf file:",LD_reference)
if(!LD_genome_build %in% c("hg19","GRCh37","grch37")){
printer("LD:: LD panel in hg38. Handling accordingly.",v=verbose)
if(!fullSS_genome_build %in% c("hg19","GRCh37","grch37")){
## If the query was originally in hg38,
# that means it's already been lifted over to hg19.
# So you can use the old stored POS.hg38 when the
subset_DT <- subset_DT %>%
dplyr::rename(POS.hg19=POS) %>%
dplyr::rename(POS=POS.hg38)
} else {
## If the query was originally in hg19,
# that means no liftover was done.
# So you need to lift it over now.
subset_DT <- LIFTOVER(dat = subset_DT,
build.conversion = "hg19.to.hg38",
return_as_granges = F,
verbose = verbose)
}
}
vcf_file <- LD.index_vcf(vcf_file=LD_reference,
force_new_index=F,
conda_env=conda_env,
verbose=verbose)
# Make sure your query's chr format is the same as the vcf's chr format
has_chr <- LD.determine_chrom_type_vcf(vcf_file = vcf_file)
subset_DT <- dplyr::mutate(subset_DT,
CHR=if(has_chr) paste0("chr",gsub("chr","",CHR)) else gsub("chr","",CHR))
vcf_subset <- LD.query_vcf(subset_DT=subset_DT,
locus_dir=locus_dir,
LD_reference=LD_reference,
vcf_URL=LD_reference,
whole_vcf=F,
remove_original_vcf=F,
force_new_vcf=force_new_LD,
query_by_regions=F,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
bed_bim_fam <- LD.vcf_to_bed(vcf.gz.subset = vcf_subset,
locus_dir = locus_dir,
plink_prefix = "plink",
verbose = verbose)
# Calculate LD
LD_matrix <- LD.snpstats_get_LD(LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
select.snps=unique(subset_DT$SNP),
stats=c("R"),
symmetric=T,
depth="max",
verbose=verbose)
# Get MAF (if needed)
subset_DT <- LD.snpstats_get_MAF(subset_DT=subset_DT,
LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
force_new_MAF=F,
verbose=verbose)
# Filter out SNPs not in the same LD block as the lead SNP
# Get lead SNP rsid
leadSNP = subset(subset_DT, leadSNP==T)$SNP
if(LD_block){
block_snps <- LD.leadSNP_block(leadSNP = leadSNP,
LD_folder = "./plink_tmp",
LD_block_size = LD_block_size)
LD_matrix <- LD_matrix[row.names(LD_matrix) %in% block_snps, colnames(LD_matrix) %in% block_snps]
LD_matrix <- LD_matrix[block_snps, block_snps]
}
# IMPORTANT! Remove large data.ld file after you're done with it
if(remove_tmps){
suppressWarnings(file.remove(vcf_subset))
}
# Save LD matrix
LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix,
subset_DT=subset_DT,
locus_dir=locus_dir,
fillNA = fillNA,
LD_reference=gsub(".vcf|.gz","",LD_reference),
sparse = T,
verbose=verbose)
return(LD_list)
}
LD.determine_chrom_type_vcf <- function(vcf_file,
conda_env="echoR",
verbose=T){
vcf <- gaston::read.vcf(vcf_file, max.snps = 1, convert.chr = F)
has_chr <- grepl("chr",vcf@snps$chr[1])
# bcftools <- CONDA.find_package(package = "bcftools",
# conda_env = conda_env,
# verbose = verbose)
# # bcf_cmd <- paste("bcftools view -f '%CHROM' -H",vcf_file,"|head -1")
# header <- data.table::fread(cmd=bcf_cmd)
return(has_chr)
}
#' Save LD_matrix
#'
#' @family LD
#' @keywords internal
#' @examples
#' data("BST1"); data("LD_matrix"); data("locus_dir");
#' LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix, subset_DT=BST1, locus_dir=file.path("~/Desktop",locus_dir), LD_reference="UKB")
#' LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix, subset_DT=BST1, locus_dir=file.path("~/Desktop",locus_dir), LD_reference="custom_vcf")
LD.save_LD_matrix <- function(LD_matrix,
subset_DT,
locus_dir,
fillNA=0,
LD_reference,
subset_common=T,
sparse=T,
verbose=T){
RDS_path <- LD.get_rds_path(locus_dir = locus_dir,
LD_reference = basename(LD_reference))
printer("+ LD:: Saving LD matrix ==>",RDS_path,v=verbose)
if(subset_common){
sub.out <- subset_common_snps(LD_matrix = LD_matrix,
fillNA = fillNA,
finemap_dat = subset_DT,
verbose = F)
LD_matrix <- sub.out$LD
subset_DT <- sub.out$DT
}
printer(dim(LD_matrix)[1],"x",dim(LD_matrix)[2],"LD_matrix",if(sparse) "(sparse)"else NULL, v=verbose)
dir.create(dirname(RDS_path), showWarnings = F, recursive = T)
if(sparse){
saveSparse(LD_matrix = LD_matrix,
LD_path = RDS_path,
verbose = F)
} else {
saveRDS(LD_matrix, file = RDS_path)
}
return(list(LD=LD_matrix,
DT=subset_DT,
RDS_path=RDS_path))
}
#' Translate superopulation acronyms
#'
#' Ensures a common ontology for synonynmous superpopulation names.
#' @family LD
#' @keywords internal
#' @param superpopulation Three-letter superpopulation name.
LD.translate_population <- function(superpopulation){
pop_dict <- list("AFA"="AFR", "CAU"="EUR", "HIS"="AMR",
"AFR"="AFR","EUR"="EUR", "AMR"="AMR")
translated.list <- as.character(pop_dict[superpopulation])
return(translated.list)
}
#' Plot a subset of the LD matrix
#'
#' Uses \code{gaston} to plot a SNP-annotated LD matrix.
#' @inheritParams finemap_pipeline
#' @family LD
#' @keywords internal
#' @param span This is very computationally intensive,
#' so you need to limit the number of SNPs with span.
#' If \code{span=10}, only 10 SNPs upstream and 10 SNPs downstream of the lead SNP will be plotted.
#' @examples
#' \dontrun{
#' data("BST1");
#' LD_matrix <- readRDS("/Volumes/Steelix/fine_mapping_files/GWAS/Nalls23andMe_2019/BST1/plink/UKB_LD.RDS")
#' LD.plot_LD(LD_matrix=LD_matrix, subset_DT=BST1)
#' }
LD.plot_LD <- function(LD_matrix,
subset_DT,
span=10,
method=c("gaston","heatmap","image")){
leadSNP = subset(subset_DT, leadSNP==T)$SNP
lead_index = match(leadSNP, row.names(LD_matrix))
start_pos = lead_index - min(span, dim(LD_matrix)[1],na.rm = T)
end_pos = lead_index + min(span, dim(LD_matrix)[1],na.rm = T)
sub_DT <- subset(subset_DT, SNP %in% rownames(LD_matrix))
if(method[1]=="gaston"){
gaston::LD.plot(LD = LD_matrix[start_pos:end_pos,
start_pos:end_pos],
snp.positions = sub_DT$POS[start_pos:end_pos] )
}
if(method[1]=="heatmap"){
heatmap(as.matrix(LD_sparse)[start_pos:end_pos,
start_pos:end_pos])
}
if(method[1]=="image"){
image(as.matrix(LD_sparse)[start_pos:end_pos,
start_pos:end_pos])
}
}
#' Download vcf subset from 1000 Genomes
#'
#' @family LD
#' @keywords internal
#' @param query_by_regions You can make queries with \code{tabix} in two different ways:
#' \describe{
#' \item{\code{query_by_regions=F} \emph{(default)}}{Return a vcf with all positions between the min/max in \code{subset_DT} Takes up more storage but is MUCH faster}
#' \item{\code{query_by_regions=T}}{Return a vcf with only the exact positions present in \code{subset_DT}. Takes up less storage but is MUCH slower}
#' }
#' @inheritParams finemap_pipeline
#' @examples
#' \dontrun{
#' data("BST1");
#' subset_DT <- BST1
#' vcf_subset.popDat <- LD.1KG_download_vcf(subset_DT=BST1, LD_reference="1KGphase1", locus_dir=file.path("~/Desktop",locus_dir))
#' }
LD.1KG_download_vcf <- function(subset_DT,
LD_reference="1KGphase1",
remote_LD=T,
vcf_folder=NULL,
locus_dir,
locus=NULL,
whole_vcf=F,
download_method="wget",
force_new_vcf=F,
query_by_regions=F,
nThread=4,
conda_env="echoR",
verbose=T){
# throw error if anything but phase 1 or phase 3 are specified
if( ! LD_reference %in% c("1KGphase1", "1KGphase3" )){
stop("LD_reference must be one of \"1KGphase1\" or \"1KGphase3\" ")
}
# Old FTP (deprecated?)
## http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502/
# New FTP
## ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20110521/
# Download portion of vcf from 1KG website
# vcf_folder <- LD.get_locus_vcf_folder(locus_dir=locus_dir)
# Don't use the 'chr' prefix for 1KG queries:
## https://www.internationalgenome.org/faq/how-do-i-get-sub-section-vcf-file/
subset_DT$CHR <- gsub("chr","",subset_DT$CHR)
chrom <- unique(subset_DT$CHR)
# PHASE 3 DATA
if(LD_reference=="1KGphase3"){
FTP <- "ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20130502/"
# FTP <- "/sc/arion/projects/ad-omics/data/references/1KGPp3v5/"
popDat <- echolocatoR::popDat_1KGphase3
printer("LD Reference Panel = 1KGphase3", v=verbose)
if(remote_LD){## With internet
printer("+ LD:: Querying 1KG remote server.",v=verbose)
vcf_URL <- paste0(FTP,"/ALL.chr",chrom,
".phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz")
}else{## WithOUT internet
# vcf_folder <- "/sc/arion/projects/ad-omics/data/references/1KGPp3v5/"
printer("+ LD:: Querying 1KG local vcf files.",v=verbose)
vcf_URL <- paste(vcf_folder, "/ALL.chr",chrom,
".phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz",sep="")
}
# PHASE 1 DATA
} else if (LD_reference=="1KGphase1") {
FTP <- "ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20110521/"
popDat <- echolocatoR::popDat_1KGphase1
printer("LD Reference Panel = 1KGphase1", v=verbose)
if(remote_LD){## With internet
printer("+ LD:: Querying 1KG remote server.",v=verbose)
vcf_URL <- paste(FTP,"/ALL.chr",chrom,
".phase1_release_v3.20101123.snps_indels_svs.genotypes.vcf.gz", sep="")
}else{## WithOUT internet
printer("+ LD:: Querying 1KG local vcf files.",v=verbose)
vcf_URL <- paste(vcf_folder,"/ALL.chr",chrom,
".phase1_release_v3.20101123.snps_indels_svs.genotypes.vcf.gz", sep="")
}
}
phase <- gsub("1KG","",LD_reference)
# phase 1 has no header whereas phase 3 does
if( LD_reference == "1KGphase1" ){ use_header <- FALSE }
if( LD_reference == "1KGphase3" ){ use_header <- TRUE }
# Download and subset vcf if the subset doesn't exist already
vcf_subset <- LD.query_vcf(subset_DT=subset_DT,
vcf_URL=vcf_URL,
locus_dir=locus_dir,
LD_reference=LD_reference,
whole_vcf=whole_vcf,
force_new_vcf=force_new_vcf,
download_method=download_method,
query_by_regions=query_by_regions,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
return(list(vcf_subset = vcf_subset,
popDat = popDat))
}
#' Get VCF storage folder
#' @family LD
#' @keywords internal
#' @examples
#' data("locus_dir")
#' vcf_folder <- LD.get_locus_vcf_folder(locus_dir=locus_dir)
LD.get_locus_vcf_folder <- function(locus_dir=NULL){
vcf_folder <- file.path(locus_dir,"LD")
out <- dir.create(vcf_folder, showWarnings = F, recursive = T)
return(vcf_folder)
}
#' Construct the path to vcf subset
#'
#' @family LD
#' @keywords internal
#' @examples
#' data("locus_dir"); data("BST1");
#' vcf_subset <- LD.construct_subset_vcf_name(subset_DT=BST1, locus_dir=locus_dir, LD_reference="1KGlocal")
LD.construct_subset_vcf_name <- function(subset_DT,
LD_reference=NULL,
locus_dir,
whole_vcf=F){
vcf_folder <- LD.get_locus_vcf_folder(locus_dir=locus_dir)
# Don't use the chr prefix: https://www.internationalgenome.org/faq/how-do-i-get-sub-section-vcf-file/
subset_DT$CHR <- gsub("chr","",subset_DT$CHR)
chrom <- unique(subset_DT$CHR)
vcf_subset <- file.path(vcf_folder,
if(whole_vcf){
paste(basename(LD_reference), paste0("chr",chrom),sep=".")
} else {paste(basename(locus_dir),basename(LD_reference),sep=".")} )
dir.create(path = dirname(vcf_subset), recursive = T, showWarnings = F)
if(!(endsWith(vcf_subset,".vcf.gz")|
endsWith(vcf_subset,".vcf"))){
vcf_subset <- paste0(vcf_subset,".vcf")
}
return(vcf_subset)
}
#' Index vcf file if it hasn't been already
#'
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' LD_reference <- "~/Desktop/results/Reference/custom_panel_chr4.vcf.gz"
#' vcf_file <- LD.index_vcf(vcf_file=LD_reference)
#' }
LD.index_vcf <- function(vcf_file,
force_new_index=F,
conda_env="echoR",
verbose=T){
if(!endsWith(vcf_file,".gz")){
printer("+ LD:: Compressing vcf with bgzip",v=verbose)
bgzip <- CONDA.find_package(package="bgzip",
conda_env=conda_env,
verbose = verbose)
cmd1 <- paste(bgzip,
vcf_file)
printer("++ LD::",cmd1,v=verbose)
system(cmd1)
vcf_file <- paste0(vcf_file,".gz")
}else { printer("+ LD::",vcf_file,"already compressed",v=verbose)}
if(!file.exists(paste0(vcf_file,".tbi")) | force_new_index){
printer("+ LD:: Indexing",vcf_file,v=verbose)
tabix <- CONDA.find_package(package="tabix",
conda_env=conda_env,
verbose = verbose)
cmd <- paste(tabix,
"-fp vcf",
vcf_file)
printer("++ LD::",cmd,v=verbose)
system(cmd)
} else { printer("+ LD::",vcf_file,"already indexed.",v=verbose) }
return(vcf_file)
}
#' Query vcf file
#'
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("locus_dir"); data("BST1");
#' # Custom
#' LD_reference <- "~/Desktop/results/Reference/custom_panel_chr4.vcf"
#' vcf_file <- LD.index_vcf(vcf_file=LD_reference)
#' vcf_subset <- LD.query_vcf(subset_DT=BST1, locus_dir=locus_dir, vcf_URL=vcf_file, LD_reference=LD_reference, force_new_vcf=T)
#' }
LD.query_vcf <- function(subset_DT,
vcf_URL,
locus_dir,
LD_reference,
whole_vcf=F,
force_new_vcf=F,
remove_original_vcf=F,
download_method="wget",
query_by_regions=F,
nThread=4,
conda_env="echoR",
verbose=T){
# vcf_subset <- "/pd-omics/brian/Fine_Mapping/Data/GWAS/Nalls23andMe_2019/BRIP1/LD/BRIP1.1KGphase3.vcf.gz"
vcf_subset <- LD.construct_subset_vcf_name(subset_DT=subset_DT,
locus_dir=locus_dir,
LD_reference=LD_reference,
whole_vcf=whole_vcf)
# CHECK FOR EMPTY VCF FILES!
## These can be created if you stop the query early, or if the url fails.
if(file.exists(vcf_subset)){
if(file.size(vcf_subset) < 100){ # Less than 100 bytes
printer("+ LD:: Removing empty vcf file and its index", v=verbose)
file.remove(paste0(vcf_subset,"*")) # Remove both
}
}
tabix <- CONDA.find_package(package = "tabix",
conda_env = conda_env,
verbose = verbose)
if((!file.exists(vcf_subset)) | force_new_vcf){
printer("LD:: Querying VCF subset", v=verbose)
if(whole_vcf){
region <- ""
locus <- ""
out.file <- downloader(input_url = vcf_URL,
output_path = dirname(vcf_subset),
download_method = download_method,
nThread = nThread,
conda_env=conda_env)
} else {
# Download tabix subset
if(query_by_regions){
### Using region file (-R flag)
regions.bed <- file.path(locus_dir,"LD","regions.tsv")
data.table::fwrite(list(paste0(subset_DT$CHR), sort(subset_DT$POS)),
file=regions.bed, sep="\t")
regions <- paste("-R",regions.bed)
tabix_cmd <- paste(tabix,
"-fh",
"-p vcf",
vcf_URL,
gsub("\\./","",regions),
">",
gsub("\\./","",vcf_subset) )
printer(tabix_cmd)
system(tabix_cmd)
} else {
### Using coordinates range (MUCH faster!)
coord_range <- paste0(unique(subset_DT$CHR)[1],":",
min(subset_DT$POS),"-",max(subset_DT$POS))
tabix_cmd <- paste(tabix,
"-fh",
"-p vcf",
vcf_URL,
coord_range,
">",
gsub("\\./","",vcf_subset) )
printer(tabix_cmd)
system(tabix_cmd)
}
}
if(remove_original_vcf){
vcf_name <- paste0(basename(vcf_URL), ".tbi")
out <- suppressWarnings(file.remove(vcf_name))
}
} else {printer("+ Identified existing VCF subset file. Importing...", vcf_subset, v=verbose)}
return(vcf_subset)
}
#' Filter a vcf by min/max coordinates
#'
#' Uses \emph{bcftools} to filter a vcf by min/max genomic coordinates (in basepairs).
#' @param vcf_subset Path to the locus subset vcf.
#' @param popDat The metadata file listing the superpopulation to which each sample belongs.
#' @inheritParams finemap_pipeline
#' @family LD
#' @keywords internal
LD.filter_vcf <- function(vcf_subset,
popDat,
superpopulation,
remove_tmp=T,
verbose=T){
vcf.gz <- paste0(vcf_subset,".gz")
vcf.gz.subset <- gsub("_subset","_samples_subset",vcf.gz)
# Compress vcf
if(!file.exists(vcf.gz)){
printer("LD:BCFTOOLS:: Compressing vcf file...", v=verbose)
system(paste("bgzip -f",vcf_subset))
}
# Re-index vcf
printer("LD:TABIX:: Re-indexing vcf.gz...", v=verbose)
system(paste("tabix -f -p vcf",vcf.gz))
# Subset samples
selectedInds <- subset(popDat, superpop == superpopulation)$sample %>% unique()
printer("LD:BCFTOOLS:: Subsetting vcf to only include",superpopulation,"individuals (",length(selectedInds), "/",length(popDat$sample%>%unique()),").", v=verbose)
cmd <- paste("bcftools view -s",paste(selectedInds, collapse=","), vcf.gz, "| bgzip > tmp && mv tmp",vcf.gz.subset)
system(cmd)
# Remove old vcf
if(remove_tmp){out <- suppressWarnings(file.remove(vcf_subset))}
return(vcf.gz.subset)
}
#' Subset a vcf by superpopulation
#'
#' @inheritParams LD.filter_vcf
#' @family LD
#' @keywords internal
LD.filter_vcf_gaston <- function(vcf_subset,
subset_DT,
locus_dir,
superpopulation,
popDat,
verbose=T){
# Import w/ gaston and further subset
printer("+ Importing VCF as bed file...", v=verbose)
bed.file <- gaston::read.vcf(vcf_subset, verbose = F)
## Subset rsIDs
bed <- gaston::select.snps(bed.file, id %in% subset_DT$SNP & id !=".")
# Create plink sub-dir
dir.create(file.path(locus_dir, "LD"), recursive = T, showWarnings = F)
gaston::write.bed.matrix(bed, file.path(locus_dir, "LD/plink"), rds = NULL)
# Subset Individuals
selectedInds <- subset(popDat, superpop == superpopulation)
bed <- gaston::select.inds(bed, id %in% selectedInds$sample)
# Cleanup extra files
remove(bed.file)
# file.remove("vcf_subset")
return(bed)
}
#' Convert vcf file to BED file
#'
#' Uses plink to convert vcf to BED.
#' @param vcf.gz.subset Path to the gzipped locus subset vcf.
#' @param locus_dir Locus-specific results directory.
#' @family LD
#' @keywords internal
LD.vcf_to_bed <- function(vcf.gz.subset,
locus_dir,
plink_prefix="plink",
verbose=T){
plink <- LD.plink_file()
printer("LD:PLINK:: Converting vcf.gz to .bed/.bim/.fam", v=verbose)
LD_dir <- file.path(locus_dir,"LD")
dir.create(LD_dir, recursive = T, showWarnings = F)
cmd <- paste(plink,
"--vcf",vcf.gz.subset,
"--out", file.path(LD_dir,plink_prefix))
system(cmd)
return(
list(bed=file.path(LD_dir,paste0(plink_prefix,".bed")),
bim=file.path(LD_dir,paste0(plink_prefix,".bim")),
fam=file.path(LD_dir,paste0(plink_prefix,".fam")))
)
}
#' Calculate LD
#'
#' Calculate a pairwise LD matrix from a vcf file using \emph{plink}.
#' @param locus_dir Locus-specific results directory.
#' @param ld_window Set --r/--r2 max variant ct pairwise distance (usu. 10).
#' @param ld_format Whether to produce an LD matrix with
#' r (\code{ld_format="r"}) or D' (\code{ld_format="D"}) as the pairwise SNP correlation metric.
#' @family LD
#' @keywords internal
LD.calculate_LD <- function(locus_dir,
ld_window=1000, # 10000000
ld_format="r",
plink_prefix="plink",
verbose=T){
plink <- LD.plink_file()
printer("LD:PLINK:: Calculating LD ( r & D'-signed; LD-window =",ld_window,")", v=verbose)
plink_path_prefix <- file.path(locus_dir,"LD",plink_prefix)
dir.create(file.path(locus_dir,"LD"), recursive = T, showWarnings = F)
out_prefix <- paste0(plink_path_prefix,".r_dprimeSigned")
if(ld_format=="r"){
cmd <- paste(plink,
"--bfile",plink_path_prefix,
"--r square bin",
"--out",out_prefix)
ld.path <- paste0(out_prefix,".ld.bin")
} else {
cmd <- paste(plink,
"--bfile",plink_path_prefix,
"--r dprime-signed",
"--ld-window",ld_window,
"--ld-window-kb",ld_window,
"--out",out_prefix)
ld.path <- paste0(out_prefix,".ld")
}
system(cmd)
return(ld.path)
}
#' Create LD matrix from plink output
#'
#' Depending on which parameters you give \emph{plink} when calculating LD, you get different file outputs.
#' When it produces bin and bim files, use this function to create a proper LD matrix.
#' For example, this happens when you try to calculate D' with the \code{--r dprime-signed} flag (instead of just r).
#' @param LD_dir Directory that contains the bin/bim files.
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/QTL/Microglia_all_regions/BIN1"
#' ld.matrix <- LD.read_bin(LD_dir=file.path(locus_dir, "LD"))
#' }
LD.read_bin <- function(LD_dir){
bim <- data.table::fread(file.path(LD_dir, "plink.bim"), col.names = c("CHR","SNP","V3","POS","A1","A2"))
bin.vector <- readBin(file.path(LD_dir, "plink.ld.bin"), what = "numeric", n=length(bim$SNP)^2)
ld.matrix <- matrix(bin.vector, nrow = length(bim$SNP), dimnames = list(bim$SNP, bim$SNP))
return(ld.matrix)
}
#' Create LD matrix from plink output.
#'
#' Depending on which parameters you give \emph{plink} when calculating LD, you get different file outputs.
#' When it produces an LD table, use this function to create a proper LD matrix.
#' @family LD
#' @keywords internal
LD.read_ld_table <- function(ld.path,
snp.subset=F,
fillNA=0,
verbose=T){
# subset_DT <- data.table::fread(file.path(locus_dir,"Multi-finemap/Multi-finemap_results.txt")); snp.subset <- subset_DT$SNP
ld.table <- data.table::fread(ld.path, nThread = 4)
if(any(snp.subset!=F)){
printer("LD:PLINK:: Subsetting LD data...", v=verbose)
ld.table <- subset(ld.table, SNP_A %in% snp.subset | SNP_B %in% snp.subset)
}
printer("LD:PLINK:: Casting data.matrix...", v=verbose)
ld.cast <- data.table::dcast.data.table(ld.table,
formula = SNP_B ~ SNP_A,
value.var="R",
fill=0,
drop=T,
fun.agg = function(x){mean(x,na.rm = T)})
ld.cast <- subset(ld.cast, SNP_B !=".", select = -`.`)
ld.mat <- data.frame(ld.cast, row.names = ld.cast$SNP_B) %>% data.table() %>% as.matrix()
# ld.mat[1:10,1:10]
ld.mat <- LD.fill_NA(LD_matrix = ld.mat,
fillNA = fillNA,
verbose = verbose)
return(ld.mat)
}
#' Find correct plink file
#'
#' @family LD
#' @keywords internal
#' @examples
#' plink <- LD.plink_file()
LD.plink_file <- function(plink="plink",
conda_env=NULL){
if(!is.null(conda_env)){
plink <- CONDA.find_package("plink",conda_env = conda_env)
}
if(plink=="plink"){
base_url=system.file("tools/plink",package = "echolocatoR")
os <- get_os()
if (os=="osx") {
plink <- file.path(base_url, "plink1.9_mac");
} else if (os=="linux") {
plink <- file.path(base_url, "plink1.9_linux");
} else {
plink <- file.path(base_url, "plink1.9_windows.exe");
}
}
return(plink)
}
#' Compute LD from 1000 Genomes
#'
#' Downloads a subset vcf of the 1KG database that matches your locus coordinates.
#' Then uses \emph{plink} to calculate LD on the fly.
#'
#' This approach is taken, because other API query tools have limitations with the window size being queried.
#' This approach does not have this limitations, allowing you to fine-map loci more completely.
#'
#' @param fillNA When pairwise LD (r) between two SNPs is \code{NA}, replace with 0.
#' @inheritParams finemap_pipeline
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("BST1"); data("locus_dir");
#' BST1 <- limit_SNPs(max_snps = 500, subset_DT = BST1)
#' LD_matrix <- LD.1KG(locus_dir=file.path("~/Desktop",locus_dir), subset_DT=BST1, LD_reference="1KGphase1")
#'
#' ## Kunkle et al 2019
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ACE"
#'
#' }
LD.1KG <- function(locus_dir,
subset_DT,
LD_reference="1KGphase1",
superpopulation="EUR",
vcf_folder=NULL,
remote_LD=T,
# min_r2=F,
LD_block=F,
LD_block_size=.7,
# min_Dprime=F,
remove_correlates=F,
remove_tmps=T,
fillNA=0,
download_method="wget",
nThread=4,
conda_env="echoR",
verbose=T){
# data("BST1"); data("locus_dir"); subset_DT=BST1; LD_reference="1KGphase1"; vcf_folder=NULL; superpopulation="EUR";
# min_r2=F; LD_block=F; LD_block_size=.7; min_Dprime=F; remove_correlates=F; remote_LD=T; verbose=T; nThread=4; conda_env="echoR";
printer("LD:: Using 1000Genomes as LD reference panel.", v=verbose)
locus <- basename(locus_dir)
vcf_info <- LD.1KG_download_vcf(subset_DT=subset_DT,
locus_dir=locus_dir,
LD_reference=LD_reference,
vcf_folder=vcf_folder,
locus=locus,
remote_LD=remote_LD,
download_method=download_method,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
vcf_subset <- vcf_info$vcf_subset
popDat <- vcf_info$popDat
vcf.gz.path <- LD.filter_vcf(vcf_subset = vcf_subset,
popDat = popDat,
superpopulation = superpopulation,
remove_tmp = T,
verbose = verbose)
bed_bim_fam <- LD.vcf_to_bed(vcf.gz.subset = vcf.gz.path,
locus_dir = locus_dir,
verbose = verbose)
# Calculate LD
LD_matrix <- LD.snpstats_get_LD(LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
select.snps=unique(subset_DT$SNP),
stats=c("R"),
symmetric=T,
depth="max",
verbose=verbose)
# Get MAF (if needed)
subset_DT <- LD.snpstats_get_MAF(subset_DT=subset_DT,
LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
force_new_MAF=T,
nThread=nThread,
verbose=verbose)
# Get lead SNP rsid
leadSNP = subset(subset_DT, leadSNP==T)$SNP
# Filter out SNPs not in the same LD block as the lead SNP
if(LD_block){
block_snps <- LD.leadSNP_block(leadSNP = leadSNP,
LD_folder = file.path(locus_dir,"LD","plink_tmp"),
LD_block_size = LD_block_size)
LD_matrix <- LD_matrix[row.names(LD_matrix) %in% block_snps, colnames(LD_matrix) %in% block_snps]
LD_matrix <- LD_matrix[block_snps, block_snps]
}
# IMPORTANT! Remove large data.ld file after you're done with it
if(remove_tmps){
suppressWarnings(file.remove(vcf_subset))
}
# Save LD matrix
LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix,
subset_DT=subset_DT,
locus_dir=locus_dir,
subset_common = T,
sparse = T,
fillNA=fillNA,
LD_reference=LD_reference,
verbose=verbose)
return(LD_list)
}
#' Calculate LD (D')
#'
#' This appriach computes an LD matrix of D' (instead of r or r2) from a vcf.
#' See \code{\link{LD.run_plink_LD}} for a faster (but less flexible) alternative to computing LD.
#' @family LD
#' @keywords internal
LD.dprime_table <- function(SNP_list,
LD_folder,
conda_env){
plink <- LD.plink_file(conda_env)
printer("+ Creating DPrime table")
system( paste(plink, "--bfile",file.path(LD_folder,"plink"),
"--ld-snps", paste(SNP_list, collapse=" "),
"--r dprime-signed",
"--ld-window 10000000", # max out window size
"--ld-window-kb 10000000",
"--out",file.path(LD_folder,"plink")) )
#--ld-window-r2 0
# # Awk method: theoretically faster?
# if(min_Dprime==F){Dprime = -1}else{Dprime=min_Dprime}
# if(min_r2==F){r = -1}else{r = round(sqrt(min_r2),2) }
# columns <- data.table::fread(file.path(LD_folder, "plink.ld"), nrows = 0) %>% colnames()
# col_dict <- setNames(1:length(columns), columns)
# awk_cmd <- paste("awk -F \"\t\" 'NR==1{print $0}{ if(($",col_dict["DP"]," >= ",Dprime,")",
# " && ($",col_dict["R"]," >= ",r,")) { print } }' ",file.path(LD_folder, "plink.ld"),
# " > ",file.path(LD_folder, "plink.ld_filtered.txt"), sep="")
# system(awk_cmd)
plink.ld <- data.table::fread(file.path(LD_folder, "plink.ld"), select = c("SNP_A", "SNP_B","DP","R"), )
plink.ld <- plink.ld[complete.cases(plink.ld) ]
return(plink.ld)
}
#' Get LD using \pkg{snpStats} package
#'
#' @param LD_folder Locus-specific LD output folder.
#' @inheritParams snpStats::ld
#' @family LD
#' @keywords internal
#' @source
#' \href{https://www.bioconductor.org/packages/release/bioc/html/snpStats.html}{snpStats Bioconductor page}
#' \href{https://www.bioconductor.org/packages/release/bioc/vignettes/snpStats/inst/doc/ld-vignette.pdf}{LD tutorial}
#' @examples
#' subset_DT <- data.table::fread("/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ABCA7/Multi-finemap/ABCA7.Kunkle_2019.1KGphase3_LD.Multi-finemap.tsv.gz")
#' LD_folder <- "/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ABCA7/LD"
#' LD_matrix <- LD.snpstats_get_LD(LD_folder=LD_folder, select.snps=subset_DT$SNP)
LD.snpstats_get_LD <- function(LD_folder,
plink_prefix="plink",
select.snps=NULL,
stats=c("R"),
symmetric=T,
depth="max",
nThread=4,
verbose=T){
printer("LD:snpStats:: Computing LD",paste0("(stats = ",paste(stats,collapse=', '),")"),v=verbose)
# select.snps= arg needed bc otherwise read.plink() sometimes complains of
## duplicate RSID rownames. Also need to check whether these SNPs exist in the plink files.
## (snpStats doesn't have very good error handling for these cases).
select.snps <- LD.snpstats_ensure_nonduplicates(select.snps=select.snps,
LD_folder=LD_folder,
plink_prefix=plink_prefix,
nThread=nThread,
verbose=verbose)
# Only need to give bed path (infers bin/fam paths)
ss <- snpStats::read.plink(bed = file.path(LD_folder,plink_prefix),
select.snps = select.snps)
# Compute LD from snpMatrix
ld_list <- snpStats::ld(x = ss$genotypes,
y = ss$genotypes,
depth = if(depth=="max") ncol(ss$genotypes) else depth,
stats = stats,
symmetric = symmetric)
if(length(stats)==1) return(ld_list) else return(ld_list$R)
}
LD.snpstats_ensure_nonduplicates <- function(select.snps=NULL,
LD_folder,
plink_prefix="plink",
nThread=4,
verbose=T){
if(!is.null(select.snps)){
bim_path <- file.path(LD_folder,paste0(plink_prefix,".bim"))
bim <- data.table::fread(bim_path,
col.names = c("CHR","SNP","V3","POS","A1","A2"),
stringsAsFactors = F,
nThread=nThread)
printer("+ LD:snpStats::",nrow(bim),"rows in bim file.",v=verbose)
bim <- bim[!duplicated(bim$SNP),]
select.snps <- select.snps[select.snps %in% unique(bim$SNP)]
printer("+ LD:snpStats::",length(select.snps),"SNPs in select.snps.",v=verbose)
select.snps <- if(length(select.snps)==0) NULL else unique(select.snps);
}
return(select.snps)
}
#' Get MAF using \pkg{snpStats} package
#'
#' @param LD_folder Locus-specific LD output folder.
#' @inheritParams snpStats::ld
#' @family LD
#' @keywords internal
#' @source
#' \href{https://www.bioconductor.org/packages/release/bioc/html/snpStats.html}{snpStats Bioconductor page}
LD.snpstats_get_MAF <- function(subset_DT,
LD_folder,
plink_prefix="plink",
force_new_MAF=F,
nThread=4,
verbose=T){
if(!"MAF" %in% colnames(subset_DT) | force_new_MAF){
printer("LD::snpStats:: Filling `MAF` column with MAF from LD panel.",v=verbose)
select.snps <- LD.snpstats_ensure_nonduplicates(select.snps=subset_DT$SNP,
LD_folder=LD_folder,
plink_prefix=plink_prefix,
nThread=nThread,
verbose=F)
ss <- snpStats::read.plink(bed = file.path(LD_folder,plink_prefix),
select.snps = select.snps)
MAF_df <- data.frame(SNP=row.names(snpStats::col.summary(ss$genotypes)),
MAF=snpStats::col.summary(ss$genotypes)$MAF)
if("MAF" %in% colnames(subset_DT)) subset_DT <- subset(subset_DT,select=-MAF)
subset_merge <- data.table::merge.data.table(data.table::data.table(subset_DT),
data.table::data.table(MAF_df),
by="SNP")
return(subset_merge)
} else {
printer("LD::snpStats:: `MAF` column already present.",v=verbose);
return(subset_DT)
}
}
#' Calculate LD (r or r2)
#'
#' This appriach computes and LD matrix of r or r2 (instead of D') from a vcf.
#' See \code{\link{LD.dprime_table}} for a slower (but more flexible) alternative to computing LD.
#' @param bim A bim file produced by \emph{plink}
#' @param LD_folder Locus-specific LD output folder.
#' @param r_format Whether to fill the matrix with \code{r} or \code{r2}.
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("LRRK2")
#' LD_folder <- "/Users/schilder/Desktop/Fine_Mapping/Data/GWAS/Nalls23andMe_2019/LRRK2/plink/saved"
#' bim_path <- file.path(LD_folder, "plink.bim");
#' bim <- data.table::fread(bim_path, col.names = c("CHR","SNP","V3","POS","A1","A2"), stringsAsFactors = F)
#' bim <- subset(bim, SNP %in% LRRK2$SNP)
#' ld.bin <- file.path(LD_folder, paste0("plink",".ld.bin"))
#' SNPs <- data.table::fread(file.path(LD_folder,"SNPs.txt"), col.names = 'RSID')
#' bin.vector <- readBin(ld.bin, what = "numeric", n=length(SNPs$RSID)^2)
#' }
LD.run_plink_LD <- function(bim,
LD_folder,
plink_prefix="plink",
r_format="r",
extract_file=NULL){
plink <- LD.plink_file()
# METHOD 2 (faster, but less control over parameters. Most importantly, can't get Dprime)
system( paste(plink,
"--bfile",file.path(LD_folder,plink_prefix),
if(is.null(extract_file)) NULL else"--extract",extract_file,
paste0("--",r_format," square bin"),
"--out", file.path(LD_folder,plink_prefix)) )
ld.bin <- file.path(LD_folder, paste0(plink_prefix,".ld.bin"))
bin.vector <- readBin(ld.bin, what = "numeric", n=length(bim$SNP)^2)
ld.matrix <- matrix(bin.vector, nrow = length(bim$SNP), dimnames = list(bim$SNP, bim$SNP))
return(ld.matrix)
}
#' Calculate LD
#'
#' Use \emph{plink} to calculate LD from a vcf.
#' @family LD
#' @keywords internal
#' @examples
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ACE"
#' LD_folder <- file.path(locus_dir,"LD")
#' ld.matrix <- LD.plink_LD(subset_DT=BST1, LD_folder=LD_folder)
LD.plink_LD <-function(leadSNP=NULL,
subset_DT,
bim_path=NULL,
remove_excess_snps=T,
# IMPORTANT! keep this F
merge_by_RSID=F,
LD_folder,
min_r2=F,
min_Dprime=F,
remove_correlates=F,
fillNA=0,
plink_prefix="plink",
verbose=T,
conda_env=NULL){
# Dprime ranges from -1 to 1
start <- Sys.time()
if(is.null(leadSNP))leadSNP <- subset(subset_DT, leadSNP)$SNP[1]
# Calculate LD
printer("++ Reading in BIM file...", v=verbose)
if(is.null(bim_path)) bim_path <- file.path(LD_folder, "plink.bim");
bim <- data.table::fread(bim_path,
col.names = c("CHR","SNP","V3","POS","A1","A2"),
stringsAsFactors = F)
if(remove_excess_snps){
orig_n <- nrow(bim)
if(merge_by_RSID){
bim.merged <- data.table::merge.data.table(bim,
subset_DT,
by=c("SNP"))
} else {
# Standardize format adn merge
bim.merged <- data.table::merge.data.table(dplyr::mutate(bim,
CHR=as.integer(gsub("chr","",CHR)),
POS=as.integer(POS)),
dplyr::mutate(subset_DT,
CHR=as.integer(gsub("chr","",CHR)),
POS=as.integer(POS)),
by=c("CHR","POS"))
}
bim <- subset(bim, SNP %in% bim.merged$SNP.x)
printer("LD:PLINK:: Removing RSIDs that don't appear in locus subset:",orig_n,"==>",nrow(bim),"SNPs",v=verbose)
}
extract_file <- file.path(LD_folder,"SNPs.txt")
data.table::fwrite(subset(bim, select="SNP"),
extract_file, col.names = F)
printer("++ Calculating LD", v=verbose)
ld.matrix <- LD.run_plink_LD(bim = bim,
LD_folder = LD_folder,
plink_prefix = plink_prefix,
extract_file = file.path(LD_folder,"SNPs.txt"))
if((min_Dprime != F) | (min_r2 != F) | (remove_correlates != F)){
plink.ld <- LD.dprime_table(SNP_list = row.names(ld.matrix),
LD_folder,
conda_env=conda_env)
# DPrime filter
if(min_Dprime != F){
printer("+++ Filtering LD Matrix (min_Dprime): Removing SNPs with D' <=",min_Dprime,"for",leadSNP,"(lead SNP).", v=verbose)
plink.ld <- subset(plink.ld, (SNP_A==leadSNP & DP>=min_Dprime) | (SNP_B==leadSNP & DP>=min_Dprime))
} else{printer("+ min_Dprime == FALSE", v=verbose)}
# R2 filter
if(min_r2 != F ){
printer("+++ Filtering LD Matrix (min_r2): Removing SNPs with r <=",min_r2,"for",leadSNP,"(lead SNP).", v=verbose)
r = sqrt(min_r2) # PROBLEM: this doesn't give you r, which you need for SUSIE
plink.ld <- subset(plink.ld, (SNP_A==leadSNP & R>=r) | (SNP_B==leadSNP & R>=r))
} else{printer("+ min_r2 == FALSE", v=verbose)}
# Correlates filter
if(remove_correlates != F){
r2_threshold <- remove_correlates# 0.2
r <- sqrt(r2_threshold)
printer("+++ Filtering LD Matrix (remove_correlates): Removing SNPs with R2 >=",r2_threshold,"for",paste(remove_correlates,collapse=", "),".", v=verbose)
plink.ld <- subset(plink.ld, !(SNP_A %in% remove_correlates & R>=r) | (SNP_B %in% remove_correlates & R>=r))
} else{printer("+ remove_correlates == FALSE", v=verbose)}
# Apply filters
A_list <- unique(plink.ld$SNP_A)
B_list <- unique(plink.ld$SNP_B)
snp_list <- unique(c(A_list, B_list))
ld.matrix <- ld.matrix[row.names(ld.matrix) %in% snp_list, colnames(ld.matrix) %in% snp_list]
## Manually remove rare variant
# ld.matrix <- ld.matrix[rownames(ld.matrix)!="rs34637584", colnames(ld.matrix)!="rs34637584"]
}
# !IMPORTANT!: Fill NAs (otherwise susieR will break)
ld.matrix <- LD.fill_NA(LD_matrix = ld.matrix,
fillNA = fillNA,
verbose = verbose)
end <- Sys.time()
printer("+ LD matrix calculated in",round(as.numeric(end-start),2),"seconds.", v=verbose)
return(ld.matrix)
}
#' Fill NAs in an LD matrix
#'
#' Trickier than it looks.
#' @examples
#' \dontrun{
#' data("LD_matrix");
#' LD_matrix <- LD.fill_NA(LD_matrix)
#' }
LD.fill_NA <- function(LD_matrix,
fillNA=0,
verbose=F){
printer("+ LD:: Removing unnamed rows/cols", v=verbose)
# First, filter any rows/cols without names
LD_matrix <- data.frame(LD_matrix)
LD_matrix <- LD_matrix[rownames(LD_matrix)!=".", colnames(LD_matrix)!="."]
LD_matrix_orig <- LD_matrix
if(!is.null(fillNA)){
printer("+ LD:: Replacing NAs with",fillNA, v=verbose)
if(sum(is.na(LD_matrix))>0){
LD_matrix[is.na(LD_matrix)] <- 0
}
}
# Check for duplicate SNPs
LD_matrix <- LD_matrix[row.names(LD_matrix)[!duplicated(row.names(LD_matrix))],
colnames(LD_matrix)[!duplicated(colnames(LD_matrix))]]
return(LD_matrix)
}
#' Calculate LD blocks.
#'
#' Uses \emph{plink} to group highly correlated SNPs into LD blocks.
#'
#' @family LD
#' @keywords internal
LD.LD_blocks <- function(LD_folder,
LD_block_size=.7){
printer("++ Calculating LD blocks...")
# PLINK 1.07 LD: http://zzz.bwh.harvard.edu/plink/ld.shtml
# PLINK 1.9 LD: https://www.cog-genomics.org/plink/1.9/ld
# system("plink", "-h")
# Identify duplicate snps
# system("plink", "--vcf subset.vcf --list-duplicate-vars")
# Convert vcf to plink format
# system("plink", "--vcf subset.vcf --exclude ./plink_tmp/plink.dupvar --make-bed --out PTK2B")
# Estimate LD blocks
# Defaults: --blocks-strong-lowci = 0.70, --blocks-strong-highci .90
# Reducing "--blocks-inform-frac" is the only parameter that seems to make the block sizes larger
plink <- LD.plink_file()
system( paste(plink, "--bfile",file.path(LD_folder,"plink"),
"--blocks no-pheno-req no-small-max-span --blocks-max-kb 100000",
# "--blocks-strong-lowci .52 --blocks-strong-highci 1",
"--blocks-inform-frac",LD_block_size," --blocks-min-maf 0 --out",file.path(LD_folder,"plink")) )
# system( paste("plink", "--bfile plink --ld-snp-list snp_list.txt --r") )
blocks <- data.table::fread("./plink_tmp/plink.blocks.det")
return(blocks)
}
#' Identify the LD block in which the lead SNP resides
#' @family LD
#' @keywords internal
LD.leadSNP_block <- function(leadSNP, LD_folder, LD_block_size=.7){
printer("Returning lead SNP's block...")
blocks <- LD.LD_blocks(LD_folder, LD_block_size)
splitLists <- strsplit(blocks$SNPS,split = "[|]")
block_snps <- lapply(splitLists, function(l, leadSNP){if(leadSNP %in% l){return(l)} }, leadSNP=leadSNP) %>% unlist()
printer("Number of SNPs in LD block =", length(block_snps))
return(block_snps)
}
# LD_clumping <- function(vcf_subset, subset_SS){
# # PLINK clumping: http://zzz.bwh.harvard.edu/plink/clump.shtml
# # Convert vcf to .map (beagle)
# ## https://www.cog-genomics.org/plink/1.9/data
# system(paste("plink", "--vcf",vcf_subset,"--recode beagle --out ./plink_tmp/plink"))
# # Clumping
# system("plink", "--file ./plink_tmp/plink.chr-8 --clump",subset_SS,"--out ./plink_tmp")
# }
# LD_with_leadSNP <- function(LD_matrix,
# LD_SNP){
# printer("LD Matrix dimensions:", paste(dim(LD_matrix), collapse=" x "))
# printer("Extracting LD subset for lead SNP:",LD_SNP)
# LD_sub <- subset(LD_matrix, select=LD_SNP) %>%
# data.table::as.data.table(keep.rownames = T) %>%
# `colnames<-`(c("SNP","r")) %>%
# dplyr::mutate(r2 = r^2)
# return(LD_sub)
# }
#
#
#' Find correlates of the lead GWAS/QTL SNP
LD.get_lead_r2 <- function(finemap_dat,
LD_matrix=NULL,
fillNA=0,
LD_format="matrix",
verbose=T){
if(any(c("r","r2") %in% colnames(finemap_dat)) ){
finemap_dat <- dplyr::select(finemap_dat, -c(r,r2))
}
LD_SNP <- unique(subset(finemap_dat, leadSNP==T)$SNP)
if(length(LD_SNP)>1){
LD_SNP <- LD_SNP[1]
warning("More than one lead SNP found. Using only the first one:",LD_SNP,v=verbose)
}
# Infer LD data format
if(LD_format=="guess"){
LD_format <- if(nrow(LD_matrix)==ncol(LD_matrix) | class(LD_matrix)[1]=="dsCMatrix") "matrix" else "df"
}
if(LD_format=="matrix"){
if(is.null(LD_matrix)){
printer("+ LD:: No LD_matrix detected. Setting r2=NA",v=verbose);
dat <- finemap_dat
dat$r2 <- NA
} else {
printer("+ LD:: LD_matrix detected. Coloring SNPs by LD with lead SNP.", v=verbose)
LD_sub <- LD_matrix[,LD_SNP] %>%
# subset(LD_matrix, select=LD_SNP) %>%
# subset(select = -c(r,r2)) %>%
data.table::as.data.table(keep.rownames = T) %>%
`colnames<-`(c("SNP","r")) %>%
dplyr::mutate(r2 = r^2) %>%
data.table::as.data.table()
dat <- data.table::merge.data.table(finemap_dat, LD_sub,
by = "SNP",
all.x = T)
}
}
if(LD_format=="df"){
LD_sub <- subset(LD_matrix, select=c("SNP",LD_SNP)) %>%
`colnames<-`(c("SNP","r")) %>%
dplyr::mutate(r2 = r^2) %>%
data.table::as.data.table()
dat <- data.table::merge.data.table(finemap_dat, LD_sub,
by = "SNP",
all.x = T)
}
if(fillNA!=F){
dat$r <- tidyr::replace_na(dat$r, fillNA)
dat$r2 <- tidyr::replace_na(dat$r2, fillNA)
}
return(dat)
}
#' Extract LD proxies from 1KGphase3
#'
#' Wrapper for \code{LDlinkR::LDproxy_batch}.
#' Eeasy to use but doesn't scale up well to many SNPs (takes way too long).
#' @family LD
#' @source
#' \href{https://www.rdocumentation.org/packages/LDlinkR/versions/1.0.2}{website}
#' @examples
#' data("merged_DT")
#' lead.snps <- setNames(subset(merged_DT, leadSNP)$Locus, subset(merged_DT, leadSNP)$SNP)
#' proxies <- LDlinkR.LDproxy_batch(snp=lead.snps)
LDlinkR.LDproxy_batch <- function(snp,
pop="CEU",
r2d = "r2",
min_corr=F,
save_dir=NULL,
verbose=T){
printer("LD:LDlinkR:: Retrieving proxies of",length(snp),"SNPs",v=verbose)
res <- LDlinkR::LDproxy_batch(snp = snp,
pop = pop,
r2d = r2d,
append = T,
token = "df4298d58dc4")
printer("+ LD:LDlinkR::",length(unique(res$RS_Number)),"unique proxies returned.",v=verbose)
if(min_corr!=F){
res <- subset(res, eval(parse(text = toupper(r2d)))>=min_corr)
printer("+ LD:LDlinkR::",length(unique(res$RS_Number)),"remaining at",r2d," ≥",min_corr,v=verbose)
}
proxy_files <- 'combined_query_snp_list.txt' # Automatically named
if(!is.null(save_dir)){
# LDproxy_batch() saves all results as individual .txt files in the cwd by default.
## It's pretty dumb that they don't let you control if and where these are saved,
## so we have to do this manually afterwards.
# local_dir <- "~/Desktop"
# proxy_files <- list.files(path= "./",
# pattern = "^rs.*\\.txt$", full.names = T)
new_path <- file.path(save_dir,basename(proxy_files))
out <- file.rename(proxy_files, new_path)
return(new_path)
}else{return(proxy_files)}
}
| /R/LD.R | permissive | alexMarCar/echolocatoR | R | false | false | 62,738 | r | # %%%%%%%%%%%%%%%%% #
####### LD #######
# %%%%%%%%%%%%%%%%% #
# * Nalls et al. 2018: Imputation Panel Notes
# + _"One of the limitations of this study is the use of multiple imputation panels, due to logistic constraints.
# Adding datasets from non-European populations would be helpful to further improve our granularity in association
# testing and ability to fine-map loci through integration of more variable LD signatures."_
# + _"Post-Chang 23andMe samples were imputed using a combination of Finch for phasing (an in-house developed fork of Beagle)
# and miniMac2 for imputation with all-ethnicity samples from the __September 2013 release of 1000 Genomes Phase1__
# as reference haplotypes."_
# + _"The Nalls et al . 2014 and Chang et al . 2017 samples were imputed with Minimac2 using
# __1000 Genomes phase 1 haplotypes__.
# All additional sample series except for the post-Chang et al . 2017 samples from 23andMe were imputed using the
# __Haplotype Reference Consortium (HRC)__ on the University of Michigan imputation server under default settings
# with Eagle v2.3 phasing based on reference panel HRC r1.1 2016"_
#' Procure an LD matrix for fine-mapping
#'
#' Calculate and/or query linkage disequilibrium (LD) from reference panels (UK Biobank, 1000 Genomes),
#' or user-supplied datasets.
#'
#' Options:
#' \itemize{
#' \item Download pre-computed LD matrix from UK Biobank.
#' \item Download raw vcf file from 1KG and compute LD on the fly.
#' \item Compute LD on the fly from a user-supplied vcf file.
#' \item Use a user-supplied pre-computed LD-matrix.
#' }
#'
#' @param subset_DT The locus subset of the full summary stats file.
#' @inheritParams finemap_pipeline
#' @return A symmetric LD matrix of pairwise \emph{r} values.
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("BST1"); data("locus_dir");
#' locus_dir <- file.path("~/Desktop",locus_dir)
#' # BST1 <- limit_SNPs(500, BST1)
#'
#' # UK Biobank LD
#' LD_matrix <- LD.load_or_create(locus_dir=locus_dir, subset_DT=BST1, LD_reference="UKB")
#'
#' # 1000 Genomes
#' LD_matrix <- LD.load_or_create(locus_dir=locus_dir, subset_DT=BST1, LD_reference="1KGphase1", force_new_LD=T)
#'
#' # Local vcf file
#' LD_reference="~/Desktop/results/GWAS/Nalls23andMe_2019/BST1/LD/BST1.1KGphase1.vcf.gz"
#' LD_matrix <- LD.load_or_create(locus_dir=locus_dir, subset_DT=BST1, LD_reference=LD_reference, force_new_LD=T)
#' }
LD.load_or_create <- function(locus_dir,
subset_DT,
force_new_LD=F,
LD_reference="1KGphase1",
LD_genome_build="hg19",
superpopulation="EUR",
remote_LD=T,
download_method="direct",
vcf_folder=NULL,
# min_r2=0,
LD_block=F,
LD_block_size=.7,
# min_Dprime=F,
remove_correlates=F,
fillNA=0,
verbose=T,
server=F,
remove_tmps=T,
conda_env="echoR",
nThread=4){
RDS_path <- LD.get_rds_path(locus_dir=locus_dir,
LD_reference=LD_reference)
if(file.exists(RDS_path) & force_new_LD==F){
#### Import existing LD ####
printer("+ LD:: Previously computed LD_matrix detected. Importing...", RDS_path, v=verbose)
LD_matrix <- readSparse(LD_path = RDS_path,
convert_to_df = F)
LD_list <- list(DT=subset_DT,
LD=LD_matrix,
RDS_path=RDS_path)
} else if(LD_reference=="UKB"){
#### UK Biobank ####
LD_list <- LD.UKBiobank(subset_DT = subset_DT,
locus_dir = locus_dir,
force_new_LD = force_new_LD,
chimera = server,
download_method = download_method,
fillNA = fillNA,
nThread = nThread,
return_matrix = T,
conda_env = conda_env,
remove_tmps = remove_tmps)
} else if (LD_reference == "1KGphase1" |
LD_reference == "1KGphase3") {
#### 1000 Genomes ####
LD_list <- LD.1KG(locus_dir = locus_dir,
subset_DT = subset_DT,
vcf_folder = vcf_folder,
LD_reference = LD_reference,
superpopulation = superpopulation,
remote_LD = remote_LD,
LD_block = LD_block,
LD_block_size = LD_block_size,
# min_Dprime = min_Dprime,
remove_correlates = remove_correlates,
fillNA = fillNA,
nThread = nThread,
conda_env = conda_env,
download_method = download_method)
} else if (endsWith(tolower(LD_reference),".vcf") |
endsWith(tolower(LD_reference),".vcf.gz")){
#### Custom vcf ####
LD_list <- LD.custom_panel(LD_reference=LD_reference,
LD_genome_build=LD_genome_build,
subset_DT=subset_DT,
locus_dir=locus_dir,
force_new_LD=force_new_LD,
# min_r2=min_r2,
# min_Dprime=min_Dprime,
# remove_correlates=remove_correlates,
fillNA=fillNA,
LD_block=LD_block,
LD_block_size=LD_block_size,
remove_tmps=remove_tmps,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
} else {
stop("LD:: LD_reference input not recognized. Please supply: '1KGphase1', '1KGphase3', 'UKB', or the path to a .vcf[.gz] file.")
}
return(LD_list)
}
LD.get_rds_path <- function(locus_dir,
LD_reference){
RDS_path <- file.path(locus_dir,"LD",paste0(basename(locus_dir),".",basename(LD_reference),"_LD.RDS"))
return(RDS_path)
}
#' Filter LD
#'
#' @family LD
#' @keywords internal
#' @examples
#' data("BST1"); data("LD_matrix");
#' LD_list <- list(LD=LD_matrix, DT=BST1)
#' LD_list <- LD.filter_LD(LD_list, min_r2=.2)
LD.filter_LD <- function(LD_list,
remove_correlates=F,
min_r2=0,
verbose=F){
printer("+ FILTER:: Filtering by LD features.", v=verbose)
subset_DT <- LD_list$DT
LD_matrix <- LD_list$LD
if(any(remove_correlates!=F)){
# remove_correlates <- c("rs76904798"=.2, "rs10000737"=.8)
for(snp in names(remove_correlates)){
thresh <- remove_correlates[[snp]]
printer("+ FILTER:: Removing correlates of",snp,"at r2 ≥",thresh,v=verbose)
if(snp %in% row.names(LD_matrix)){
correlates <- colnames(LD_matrix[snp,])[LD_matrix[snp,]>=sqrt(thresh)]
LD_matrix <- LD_matrix[(!row.names(LD_matrix) %in% correlates),
(!colnames(LD_matrix) %in% correlates)]
}
}
}
if(min_r2!=0 & min_r2!=F){
printer("+ FILTER:: Removing SNPs that don't correlate with lead SNP at r2 ≤",min_r2,v=verbose)
LD_list <- subset_common_snps(LD_matrix = LD_matrix,
finemap_dat = subset_DT,
verbose = F)
subset_DT <- LD_list$DT
LD_matrix <- LD_list$LD
lead.snp <- subset(subset_DT, leadSNP)$SNP[1]
correlates <- colnames(LD_matrix[lead.snp,])[LD_matrix[lead.snp,]>=sqrt(min_r2)]
LD_matrix <- LD_matrix[(row.names(LD_matrix) %in% correlates),
(colnames(LD_matrix) %in% correlates)]
}
LD_list <- subset_common_snps(LD_matrix = LD_matrix,
finemap_dat = subset_DT,
verbose = F)
return(LD_list)
}
#' Compute LD from user-supplied vcf file
#'
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' if(!"gaston" %in% row.names(installed.packages())){install.packages("gaston")}
#' data("BST1"); data("locus_dir")
#' LD_reference="~/Desktop/results/Reference/custom_panel_chr4.vcf"
#' LD_matrix <- LD.custom_panel(LD_reference=LD_reference, subset_DT=BST1, locus_dir=locus_dir)
#'
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/QTL/Microglia_all_regions/BIN1"
#' subset_DT <- data.table::fread(file.path(locus_dir,"Multi-finemap/BIN1.Microglia_all_regions.1KGphase3_LD.Multi-finemap.tsv.gz"))
#' LD_reference = "/sc/hydra/projects/pd-omics/glia_omics/eQTL/post_imputation_filtering/eur/filtered_variants/AllChr.hg38.sort.filt.dbsnp.snpeff.vcf.gz"
#' LD_matrix <- LD.custom_panel(LD_reference=LD_reference, subset_DT=BST1, locus_dir=locus_dir, LD_genome_build="hg38")
#' }
LD.custom_panel <- function(LD_reference,
fullSS_genome_build="hg19",
LD_genome_build="hg19",
subset_DT,
locus_dir,
force_new_LD=F,
min_r2=F,
# min_Dprime=F,
remove_correlates=F,
fillNA=0,
LD_block=F,
LD_block_size=.7,
remove_tmps=T,
nThread=4,
conda_env="echoR",
verbose=T){
printer("LD:: Computing LD from local vcf file:",LD_reference)
if(!LD_genome_build %in% c("hg19","GRCh37","grch37")){
printer("LD:: LD panel in hg38. Handling accordingly.",v=verbose)
if(!fullSS_genome_build %in% c("hg19","GRCh37","grch37")){
## If the query was originally in hg38,
# that means it's already been lifted over to hg19.
# So you can use the old stored POS.hg38 when the
subset_DT <- subset_DT %>%
dplyr::rename(POS.hg19=POS) %>%
dplyr::rename(POS=POS.hg38)
} else {
## If the query was originally in hg19,
# that means no liftover was done.
# So you need to lift it over now.
subset_DT <- LIFTOVER(dat = subset_DT,
build.conversion = "hg19.to.hg38",
return_as_granges = F,
verbose = verbose)
}
}
vcf_file <- LD.index_vcf(vcf_file=LD_reference,
force_new_index=F,
conda_env=conda_env,
verbose=verbose)
# Make sure your query's chr format is the same as the vcf's chr format
has_chr <- LD.determine_chrom_type_vcf(vcf_file = vcf_file)
subset_DT <- dplyr::mutate(subset_DT,
CHR=if(has_chr) paste0("chr",gsub("chr","",CHR)) else gsub("chr","",CHR))
vcf_subset <- LD.query_vcf(subset_DT=subset_DT,
locus_dir=locus_dir,
LD_reference=LD_reference,
vcf_URL=LD_reference,
whole_vcf=F,
remove_original_vcf=F,
force_new_vcf=force_new_LD,
query_by_regions=F,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
bed_bim_fam <- LD.vcf_to_bed(vcf.gz.subset = vcf_subset,
locus_dir = locus_dir,
plink_prefix = "plink",
verbose = verbose)
# Calculate LD
LD_matrix <- LD.snpstats_get_LD(LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
select.snps=unique(subset_DT$SNP),
stats=c("R"),
symmetric=T,
depth="max",
verbose=verbose)
# Get MAF (if needed)
subset_DT <- LD.snpstats_get_MAF(subset_DT=subset_DT,
LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
force_new_MAF=F,
verbose=verbose)
# Filter out SNPs not in the same LD block as the lead SNP
# Get lead SNP rsid
leadSNP = subset(subset_DT, leadSNP==T)$SNP
if(LD_block){
block_snps <- LD.leadSNP_block(leadSNP = leadSNP,
LD_folder = "./plink_tmp",
LD_block_size = LD_block_size)
LD_matrix <- LD_matrix[row.names(LD_matrix) %in% block_snps, colnames(LD_matrix) %in% block_snps]
LD_matrix <- LD_matrix[block_snps, block_snps]
}
# IMPORTANT! Remove large data.ld file after you're done with it
if(remove_tmps){
suppressWarnings(file.remove(vcf_subset))
}
# Save LD matrix
LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix,
subset_DT=subset_DT,
locus_dir=locus_dir,
fillNA = fillNA,
LD_reference=gsub(".vcf|.gz","",LD_reference),
sparse = T,
verbose=verbose)
return(LD_list)
}
LD.determine_chrom_type_vcf <- function(vcf_file,
conda_env="echoR",
verbose=T){
vcf <- gaston::read.vcf(vcf_file, max.snps = 1, convert.chr = F)
has_chr <- grepl("chr",vcf@snps$chr[1])
# bcftools <- CONDA.find_package(package = "bcftools",
# conda_env = conda_env,
# verbose = verbose)
# # bcf_cmd <- paste("bcftools view -f '%CHROM' -H",vcf_file,"|head -1")
# header <- data.table::fread(cmd=bcf_cmd)
return(has_chr)
}
#' Save LD_matrix
#'
#' @family LD
#' @keywords internal
#' @examples
#' data("BST1"); data("LD_matrix"); data("locus_dir");
#' LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix, subset_DT=BST1, locus_dir=file.path("~/Desktop",locus_dir), LD_reference="UKB")
#' LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix, subset_DT=BST1, locus_dir=file.path("~/Desktop",locus_dir), LD_reference="custom_vcf")
LD.save_LD_matrix <- function(LD_matrix,
subset_DT,
locus_dir,
fillNA=0,
LD_reference,
subset_common=T,
sparse=T,
verbose=T){
RDS_path <- LD.get_rds_path(locus_dir = locus_dir,
LD_reference = basename(LD_reference))
printer("+ LD:: Saving LD matrix ==>",RDS_path,v=verbose)
if(subset_common){
sub.out <- subset_common_snps(LD_matrix = LD_matrix,
fillNA = fillNA,
finemap_dat = subset_DT,
verbose = F)
LD_matrix <- sub.out$LD
subset_DT <- sub.out$DT
}
printer(dim(LD_matrix)[1],"x",dim(LD_matrix)[2],"LD_matrix",if(sparse) "(sparse)"else NULL, v=verbose)
dir.create(dirname(RDS_path), showWarnings = F, recursive = T)
if(sparse){
saveSparse(LD_matrix = LD_matrix,
LD_path = RDS_path,
verbose = F)
} else {
saveRDS(LD_matrix, file = RDS_path)
}
return(list(LD=LD_matrix,
DT=subset_DT,
RDS_path=RDS_path))
}
#' Translate superopulation acronyms
#'
#' Ensures a common ontology for synonynmous superpopulation names.
#' @family LD
#' @keywords internal
#' @param superpopulation Three-letter superpopulation name.
LD.translate_population <- function(superpopulation){
pop_dict <- list("AFA"="AFR", "CAU"="EUR", "HIS"="AMR",
"AFR"="AFR","EUR"="EUR", "AMR"="AMR")
translated.list <- as.character(pop_dict[superpopulation])
return(translated.list)
}
#' Plot a subset of the LD matrix
#'
#' Uses \code{gaston} to plot a SNP-annotated LD matrix.
#' @inheritParams finemap_pipeline
#' @family LD
#' @keywords internal
#' @param span This is very computationally intensive,
#' so you need to limit the number of SNPs with span.
#' If \code{span=10}, only 10 SNPs upstream and 10 SNPs downstream of the lead SNP will be plotted.
#' @examples
#' \dontrun{
#' data("BST1");
#' LD_matrix <- readRDS("/Volumes/Steelix/fine_mapping_files/GWAS/Nalls23andMe_2019/BST1/plink/UKB_LD.RDS")
#' LD.plot_LD(LD_matrix=LD_matrix, subset_DT=BST1)
#' }
LD.plot_LD <- function(LD_matrix,
subset_DT,
span=10,
method=c("gaston","heatmap","image")){
leadSNP = subset(subset_DT, leadSNP==T)$SNP
lead_index = match(leadSNP, row.names(LD_matrix))
start_pos = lead_index - min(span, dim(LD_matrix)[1],na.rm = T)
end_pos = lead_index + min(span, dim(LD_matrix)[1],na.rm = T)
sub_DT <- subset(subset_DT, SNP %in% rownames(LD_matrix))
if(method[1]=="gaston"){
gaston::LD.plot(LD = LD_matrix[start_pos:end_pos,
start_pos:end_pos],
snp.positions = sub_DT$POS[start_pos:end_pos] )
}
if(method[1]=="heatmap"){
heatmap(as.matrix(LD_sparse)[start_pos:end_pos,
start_pos:end_pos])
}
if(method[1]=="image"){
image(as.matrix(LD_sparse)[start_pos:end_pos,
start_pos:end_pos])
}
}
#' Download vcf subset from 1000 Genomes
#'
#' @family LD
#' @keywords internal
#' @param query_by_regions You can make queries with \code{tabix} in two different ways:
#' \describe{
#' \item{\code{query_by_regions=F} \emph{(default)}}{Return a vcf with all positions between the min/max in \code{subset_DT} Takes up more storage but is MUCH faster}
#' \item{\code{query_by_regions=T}}{Return a vcf with only the exact positions present in \code{subset_DT}. Takes up less storage but is MUCH slower}
#' }
#' @inheritParams finemap_pipeline
#' @examples
#' \dontrun{
#' data("BST1");
#' subset_DT <- BST1
#' vcf_subset.popDat <- LD.1KG_download_vcf(subset_DT=BST1, LD_reference="1KGphase1", locus_dir=file.path("~/Desktop",locus_dir))
#' }
LD.1KG_download_vcf <- function(subset_DT,
LD_reference="1KGphase1",
remote_LD=T,
vcf_folder=NULL,
locus_dir,
locus=NULL,
whole_vcf=F,
download_method="wget",
force_new_vcf=F,
query_by_regions=F,
nThread=4,
conda_env="echoR",
verbose=T){
# throw error if anything but phase 1 or phase 3 are specified
if( ! LD_reference %in% c("1KGphase1", "1KGphase3" )){
stop("LD_reference must be one of \"1KGphase1\" or \"1KGphase3\" ")
}
# Old FTP (deprecated?)
## http://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20130502/
# New FTP
## ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20110521/
# Download portion of vcf from 1KG website
# vcf_folder <- LD.get_locus_vcf_folder(locus_dir=locus_dir)
# Don't use the 'chr' prefix for 1KG queries:
## https://www.internationalgenome.org/faq/how-do-i-get-sub-section-vcf-file/
subset_DT$CHR <- gsub("chr","",subset_DT$CHR)
chrom <- unique(subset_DT$CHR)
# PHASE 3 DATA
if(LD_reference=="1KGphase3"){
FTP <- "ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20130502/"
# FTP <- "/sc/arion/projects/ad-omics/data/references/1KGPp3v5/"
popDat <- echolocatoR::popDat_1KGphase3
printer("LD Reference Panel = 1KGphase3", v=verbose)
if(remote_LD){## With internet
printer("+ LD:: Querying 1KG remote server.",v=verbose)
vcf_URL <- paste0(FTP,"/ALL.chr",chrom,
".phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz")
}else{## WithOUT internet
# vcf_folder <- "/sc/arion/projects/ad-omics/data/references/1KGPp3v5/"
printer("+ LD:: Querying 1KG local vcf files.",v=verbose)
vcf_URL <- paste(vcf_folder, "/ALL.chr",chrom,
".phase3_shapeit2_mvncall_integrated_v5a.20130502.genotypes.vcf.gz",sep="")
}
# PHASE 1 DATA
} else if (LD_reference=="1KGphase1") {
FTP <- "ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20110521/"
popDat <- echolocatoR::popDat_1KGphase1
printer("LD Reference Panel = 1KGphase1", v=verbose)
if(remote_LD){## With internet
printer("+ LD:: Querying 1KG remote server.",v=verbose)
vcf_URL <- paste(FTP,"/ALL.chr",chrom,
".phase1_release_v3.20101123.snps_indels_svs.genotypes.vcf.gz", sep="")
}else{## WithOUT internet
printer("+ LD:: Querying 1KG local vcf files.",v=verbose)
vcf_URL <- paste(vcf_folder,"/ALL.chr",chrom,
".phase1_release_v3.20101123.snps_indels_svs.genotypes.vcf.gz", sep="")
}
}
phase <- gsub("1KG","",LD_reference)
# phase 1 has no header whereas phase 3 does
if( LD_reference == "1KGphase1" ){ use_header <- FALSE }
if( LD_reference == "1KGphase3" ){ use_header <- TRUE }
# Download and subset vcf if the subset doesn't exist already
vcf_subset <- LD.query_vcf(subset_DT=subset_DT,
vcf_URL=vcf_URL,
locus_dir=locus_dir,
LD_reference=LD_reference,
whole_vcf=whole_vcf,
force_new_vcf=force_new_vcf,
download_method=download_method,
query_by_regions=query_by_regions,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
return(list(vcf_subset = vcf_subset,
popDat = popDat))
}
#' Get VCF storage folder
#' @family LD
#' @keywords internal
#' @examples
#' data("locus_dir")
#' vcf_folder <- LD.get_locus_vcf_folder(locus_dir=locus_dir)
LD.get_locus_vcf_folder <- function(locus_dir=NULL){
vcf_folder <- file.path(locus_dir,"LD")
out <- dir.create(vcf_folder, showWarnings = F, recursive = T)
return(vcf_folder)
}
#' Construct the path to vcf subset
#'
#' @family LD
#' @keywords internal
#' @examples
#' data("locus_dir"); data("BST1");
#' vcf_subset <- LD.construct_subset_vcf_name(subset_DT=BST1, locus_dir=locus_dir, LD_reference="1KGlocal")
LD.construct_subset_vcf_name <- function(subset_DT,
LD_reference=NULL,
locus_dir,
whole_vcf=F){
vcf_folder <- LD.get_locus_vcf_folder(locus_dir=locus_dir)
# Don't use the chr prefix: https://www.internationalgenome.org/faq/how-do-i-get-sub-section-vcf-file/
subset_DT$CHR <- gsub("chr","",subset_DT$CHR)
chrom <- unique(subset_DT$CHR)
vcf_subset <- file.path(vcf_folder,
if(whole_vcf){
paste(basename(LD_reference), paste0("chr",chrom),sep=".")
} else {paste(basename(locus_dir),basename(LD_reference),sep=".")} )
dir.create(path = dirname(vcf_subset), recursive = T, showWarnings = F)
if(!(endsWith(vcf_subset,".vcf.gz")|
endsWith(vcf_subset,".vcf"))){
vcf_subset <- paste0(vcf_subset,".vcf")
}
return(vcf_subset)
}
#' Index vcf file if it hasn't been already
#'
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' LD_reference <- "~/Desktop/results/Reference/custom_panel_chr4.vcf.gz"
#' vcf_file <- LD.index_vcf(vcf_file=LD_reference)
#' }
LD.index_vcf <- function(vcf_file,
force_new_index=F,
conda_env="echoR",
verbose=T){
if(!endsWith(vcf_file,".gz")){
printer("+ LD:: Compressing vcf with bgzip",v=verbose)
bgzip <- CONDA.find_package(package="bgzip",
conda_env=conda_env,
verbose = verbose)
cmd1 <- paste(bgzip,
vcf_file)
printer("++ LD::",cmd1,v=verbose)
system(cmd1)
vcf_file <- paste0(vcf_file,".gz")
}else { printer("+ LD::",vcf_file,"already compressed",v=verbose)}
if(!file.exists(paste0(vcf_file,".tbi")) | force_new_index){
printer("+ LD:: Indexing",vcf_file,v=verbose)
tabix <- CONDA.find_package(package="tabix",
conda_env=conda_env,
verbose = verbose)
cmd <- paste(tabix,
"-fp vcf",
vcf_file)
printer("++ LD::",cmd,v=verbose)
system(cmd)
} else { printer("+ LD::",vcf_file,"already indexed.",v=verbose) }
return(vcf_file)
}
#' Query vcf file
#'
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("locus_dir"); data("BST1");
#' # Custom
#' LD_reference <- "~/Desktop/results/Reference/custom_panel_chr4.vcf"
#' vcf_file <- LD.index_vcf(vcf_file=LD_reference)
#' vcf_subset <- LD.query_vcf(subset_DT=BST1, locus_dir=locus_dir, vcf_URL=vcf_file, LD_reference=LD_reference, force_new_vcf=T)
#' }
LD.query_vcf <- function(subset_DT,
vcf_URL,
locus_dir,
LD_reference,
whole_vcf=F,
force_new_vcf=F,
remove_original_vcf=F,
download_method="wget",
query_by_regions=F,
nThread=4,
conda_env="echoR",
verbose=T){
# vcf_subset <- "/pd-omics/brian/Fine_Mapping/Data/GWAS/Nalls23andMe_2019/BRIP1/LD/BRIP1.1KGphase3.vcf.gz"
vcf_subset <- LD.construct_subset_vcf_name(subset_DT=subset_DT,
locus_dir=locus_dir,
LD_reference=LD_reference,
whole_vcf=whole_vcf)
# CHECK FOR EMPTY VCF FILES!
## These can be created if you stop the query early, or if the url fails.
if(file.exists(vcf_subset)){
if(file.size(vcf_subset) < 100){ # Less than 100 bytes
printer("+ LD:: Removing empty vcf file and its index", v=verbose)
file.remove(paste0(vcf_subset,"*")) # Remove both
}
}
tabix <- CONDA.find_package(package = "tabix",
conda_env = conda_env,
verbose = verbose)
if((!file.exists(vcf_subset)) | force_new_vcf){
printer("LD:: Querying VCF subset", v=verbose)
if(whole_vcf){
region <- ""
locus <- ""
out.file <- downloader(input_url = vcf_URL,
output_path = dirname(vcf_subset),
download_method = download_method,
nThread = nThread,
conda_env=conda_env)
} else {
# Download tabix subset
if(query_by_regions){
### Using region file (-R flag)
regions.bed <- file.path(locus_dir,"LD","regions.tsv")
data.table::fwrite(list(paste0(subset_DT$CHR), sort(subset_DT$POS)),
file=regions.bed, sep="\t")
regions <- paste("-R",regions.bed)
tabix_cmd <- paste(tabix,
"-fh",
"-p vcf",
vcf_URL,
gsub("\\./","",regions),
">",
gsub("\\./","",vcf_subset) )
printer(tabix_cmd)
system(tabix_cmd)
} else {
### Using coordinates range (MUCH faster!)
coord_range <- paste0(unique(subset_DT$CHR)[1],":",
min(subset_DT$POS),"-",max(subset_DT$POS))
tabix_cmd <- paste(tabix,
"-fh",
"-p vcf",
vcf_URL,
coord_range,
">",
gsub("\\./","",vcf_subset) )
printer(tabix_cmd)
system(tabix_cmd)
}
}
if(remove_original_vcf){
vcf_name <- paste0(basename(vcf_URL), ".tbi")
out <- suppressWarnings(file.remove(vcf_name))
}
} else {printer("+ Identified existing VCF subset file. Importing...", vcf_subset, v=verbose)}
return(vcf_subset)
}
#' Filter a vcf by min/max coordinates
#'
#' Uses \emph{bcftools} to filter a vcf by min/max genomic coordinates (in basepairs).
#' @param vcf_subset Path to the locus subset vcf.
#' @param popDat The metadata file listing the superpopulation to which each sample belongs.
#' @inheritParams finemap_pipeline
#' @family LD
#' @keywords internal
LD.filter_vcf <- function(vcf_subset,
popDat,
superpopulation,
remove_tmp=T,
verbose=T){
vcf.gz <- paste0(vcf_subset,".gz")
vcf.gz.subset <- gsub("_subset","_samples_subset",vcf.gz)
# Compress vcf
if(!file.exists(vcf.gz)){
printer("LD:BCFTOOLS:: Compressing vcf file...", v=verbose)
system(paste("bgzip -f",vcf_subset))
}
# Re-index vcf
printer("LD:TABIX:: Re-indexing vcf.gz...", v=verbose)
system(paste("tabix -f -p vcf",vcf.gz))
# Subset samples
selectedInds <- subset(popDat, superpop == superpopulation)$sample %>% unique()
printer("LD:BCFTOOLS:: Subsetting vcf to only include",superpopulation,"individuals (",length(selectedInds), "/",length(popDat$sample%>%unique()),").", v=verbose)
cmd <- paste("bcftools view -s",paste(selectedInds, collapse=","), vcf.gz, "| bgzip > tmp && mv tmp",vcf.gz.subset)
system(cmd)
# Remove old vcf
if(remove_tmp){out <- suppressWarnings(file.remove(vcf_subset))}
return(vcf.gz.subset)
}
#' Subset a vcf by superpopulation
#'
#' @inheritParams LD.filter_vcf
#' @family LD
#' @keywords internal
LD.filter_vcf_gaston <- function(vcf_subset,
subset_DT,
locus_dir,
superpopulation,
popDat,
verbose=T){
# Import w/ gaston and further subset
printer("+ Importing VCF as bed file...", v=verbose)
bed.file <- gaston::read.vcf(vcf_subset, verbose = F)
## Subset rsIDs
bed <- gaston::select.snps(bed.file, id %in% subset_DT$SNP & id !=".")
# Create plink sub-dir
dir.create(file.path(locus_dir, "LD"), recursive = T, showWarnings = F)
gaston::write.bed.matrix(bed, file.path(locus_dir, "LD/plink"), rds = NULL)
# Subset Individuals
selectedInds <- subset(popDat, superpop == superpopulation)
bed <- gaston::select.inds(bed, id %in% selectedInds$sample)
# Cleanup extra files
remove(bed.file)
# file.remove("vcf_subset")
return(bed)
}
#' Convert vcf file to BED file
#'
#' Uses plink to convert vcf to BED.
#' @param vcf.gz.subset Path to the gzipped locus subset vcf.
#' @param locus_dir Locus-specific results directory.
#' @family LD
#' @keywords internal
LD.vcf_to_bed <- function(vcf.gz.subset,
locus_dir,
plink_prefix="plink",
verbose=T){
plink <- LD.plink_file()
printer("LD:PLINK:: Converting vcf.gz to .bed/.bim/.fam", v=verbose)
LD_dir <- file.path(locus_dir,"LD")
dir.create(LD_dir, recursive = T, showWarnings = F)
cmd <- paste(plink,
"--vcf",vcf.gz.subset,
"--out", file.path(LD_dir,plink_prefix))
system(cmd)
return(
list(bed=file.path(LD_dir,paste0(plink_prefix,".bed")),
bim=file.path(LD_dir,paste0(plink_prefix,".bim")),
fam=file.path(LD_dir,paste0(plink_prefix,".fam")))
)
}
#' Calculate LD
#'
#' Calculate a pairwise LD matrix from a vcf file using \emph{plink}.
#' @param locus_dir Locus-specific results directory.
#' @param ld_window Set --r/--r2 max variant ct pairwise distance (usu. 10).
#' @param ld_format Whether to produce an LD matrix with
#' r (\code{ld_format="r"}) or D' (\code{ld_format="D"}) as the pairwise SNP correlation metric.
#' @family LD
#' @keywords internal
LD.calculate_LD <- function(locus_dir,
ld_window=1000, # 10000000
ld_format="r",
plink_prefix="plink",
verbose=T){
plink <- LD.plink_file()
printer("LD:PLINK:: Calculating LD ( r & D'-signed; LD-window =",ld_window,")", v=verbose)
plink_path_prefix <- file.path(locus_dir,"LD",plink_prefix)
dir.create(file.path(locus_dir,"LD"), recursive = T, showWarnings = F)
out_prefix <- paste0(plink_path_prefix,".r_dprimeSigned")
if(ld_format=="r"){
cmd <- paste(plink,
"--bfile",plink_path_prefix,
"--r square bin",
"--out",out_prefix)
ld.path <- paste0(out_prefix,".ld.bin")
} else {
cmd <- paste(plink,
"--bfile",plink_path_prefix,
"--r dprime-signed",
"--ld-window",ld_window,
"--ld-window-kb",ld_window,
"--out",out_prefix)
ld.path <- paste0(out_prefix,".ld")
}
system(cmd)
return(ld.path)
}
#' Create LD matrix from plink output
#'
#' Depending on which parameters you give \emph{plink} when calculating LD, you get different file outputs.
#' When it produces bin and bim files, use this function to create a proper LD matrix.
#' For example, this happens when you try to calculate D' with the \code{--r dprime-signed} flag (instead of just r).
#' @param LD_dir Directory that contains the bin/bim files.
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/QTL/Microglia_all_regions/BIN1"
#' ld.matrix <- LD.read_bin(LD_dir=file.path(locus_dir, "LD"))
#' }
LD.read_bin <- function(LD_dir){
bim <- data.table::fread(file.path(LD_dir, "plink.bim"), col.names = c("CHR","SNP","V3","POS","A1","A2"))
bin.vector <- readBin(file.path(LD_dir, "plink.ld.bin"), what = "numeric", n=length(bim$SNP)^2)
ld.matrix <- matrix(bin.vector, nrow = length(bim$SNP), dimnames = list(bim$SNP, bim$SNP))
return(ld.matrix)
}
#' Create LD matrix from plink output.
#'
#' Depending on which parameters you give \emph{plink} when calculating LD, you get different file outputs.
#' When it produces an LD table, use this function to create a proper LD matrix.
#' @family LD
#' @keywords internal
LD.read_ld_table <- function(ld.path,
snp.subset=F,
fillNA=0,
verbose=T){
# subset_DT <- data.table::fread(file.path(locus_dir,"Multi-finemap/Multi-finemap_results.txt")); snp.subset <- subset_DT$SNP
ld.table <- data.table::fread(ld.path, nThread = 4)
if(any(snp.subset!=F)){
printer("LD:PLINK:: Subsetting LD data...", v=verbose)
ld.table <- subset(ld.table, SNP_A %in% snp.subset | SNP_B %in% snp.subset)
}
printer("LD:PLINK:: Casting data.matrix...", v=verbose)
ld.cast <- data.table::dcast.data.table(ld.table,
formula = SNP_B ~ SNP_A,
value.var="R",
fill=0,
drop=T,
fun.agg = function(x){mean(x,na.rm = T)})
ld.cast <- subset(ld.cast, SNP_B !=".", select = -`.`)
ld.mat <- data.frame(ld.cast, row.names = ld.cast$SNP_B) %>% data.table() %>% as.matrix()
# ld.mat[1:10,1:10]
ld.mat <- LD.fill_NA(LD_matrix = ld.mat,
fillNA = fillNA,
verbose = verbose)
return(ld.mat)
}
#' Find correct plink file
#'
#' @family LD
#' @keywords internal
#' @examples
#' plink <- LD.plink_file()
LD.plink_file <- function(plink="plink",
conda_env=NULL){
if(!is.null(conda_env)){
plink <- CONDA.find_package("plink",conda_env = conda_env)
}
if(plink=="plink"){
base_url=system.file("tools/plink",package = "echolocatoR")
os <- get_os()
if (os=="osx") {
plink <- file.path(base_url, "plink1.9_mac");
} else if (os=="linux") {
plink <- file.path(base_url, "plink1.9_linux");
} else {
plink <- file.path(base_url, "plink1.9_windows.exe");
}
}
return(plink)
}
#' Compute LD from 1000 Genomes
#'
#' Downloads a subset vcf of the 1KG database that matches your locus coordinates.
#' Then uses \emph{plink} to calculate LD on the fly.
#'
#' This approach is taken, because other API query tools have limitations with the window size being queried.
#' This approach does not have this limitations, allowing you to fine-map loci more completely.
#'
#' @param fillNA When pairwise LD (r) between two SNPs is \code{NA}, replace with 0.
#' @inheritParams finemap_pipeline
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("BST1"); data("locus_dir");
#' BST1 <- limit_SNPs(max_snps = 500, subset_DT = BST1)
#' LD_matrix <- LD.1KG(locus_dir=file.path("~/Desktop",locus_dir), subset_DT=BST1, LD_reference="1KGphase1")
#'
#' ## Kunkle et al 2019
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ACE"
#'
#' }
LD.1KG <- function(locus_dir,
subset_DT,
LD_reference="1KGphase1",
superpopulation="EUR",
vcf_folder=NULL,
remote_LD=T,
# min_r2=F,
LD_block=F,
LD_block_size=.7,
# min_Dprime=F,
remove_correlates=F,
remove_tmps=T,
fillNA=0,
download_method="wget",
nThread=4,
conda_env="echoR",
verbose=T){
# data("BST1"); data("locus_dir"); subset_DT=BST1; LD_reference="1KGphase1"; vcf_folder=NULL; superpopulation="EUR";
# min_r2=F; LD_block=F; LD_block_size=.7; min_Dprime=F; remove_correlates=F; remote_LD=T; verbose=T; nThread=4; conda_env="echoR";
printer("LD:: Using 1000Genomes as LD reference panel.", v=verbose)
locus <- basename(locus_dir)
vcf_info <- LD.1KG_download_vcf(subset_DT=subset_DT,
locus_dir=locus_dir,
LD_reference=LD_reference,
vcf_folder=vcf_folder,
locus=locus,
remote_LD=remote_LD,
download_method=download_method,
nThread=nThread,
conda_env=conda_env,
verbose=verbose)
vcf_subset <- vcf_info$vcf_subset
popDat <- vcf_info$popDat
vcf.gz.path <- LD.filter_vcf(vcf_subset = vcf_subset,
popDat = popDat,
superpopulation = superpopulation,
remove_tmp = T,
verbose = verbose)
bed_bim_fam <- LD.vcf_to_bed(vcf.gz.subset = vcf.gz.path,
locus_dir = locus_dir,
verbose = verbose)
# Calculate LD
LD_matrix <- LD.snpstats_get_LD(LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
select.snps=unique(subset_DT$SNP),
stats=c("R"),
symmetric=T,
depth="max",
verbose=verbose)
# Get MAF (if needed)
subset_DT <- LD.snpstats_get_MAF(subset_DT=subset_DT,
LD_folder=file.path(locus_dir,"LD"),
plink_prefix="plink",
force_new_MAF=T,
nThread=nThread,
verbose=verbose)
# Get lead SNP rsid
leadSNP = subset(subset_DT, leadSNP==T)$SNP
# Filter out SNPs not in the same LD block as the lead SNP
if(LD_block){
block_snps <- LD.leadSNP_block(leadSNP = leadSNP,
LD_folder = file.path(locus_dir,"LD","plink_tmp"),
LD_block_size = LD_block_size)
LD_matrix <- LD_matrix[row.names(LD_matrix) %in% block_snps, colnames(LD_matrix) %in% block_snps]
LD_matrix <- LD_matrix[block_snps, block_snps]
}
# IMPORTANT! Remove large data.ld file after you're done with it
if(remove_tmps){
suppressWarnings(file.remove(vcf_subset))
}
# Save LD matrix
LD_list <- LD.save_LD_matrix(LD_matrix=LD_matrix,
subset_DT=subset_DT,
locus_dir=locus_dir,
subset_common = T,
sparse = T,
fillNA=fillNA,
LD_reference=LD_reference,
verbose=verbose)
return(LD_list)
}
#' Calculate LD (D')
#'
#' This appriach computes an LD matrix of D' (instead of r or r2) from a vcf.
#' See \code{\link{LD.run_plink_LD}} for a faster (but less flexible) alternative to computing LD.
#' @family LD
#' @keywords internal
LD.dprime_table <- function(SNP_list,
LD_folder,
conda_env){
plink <- LD.plink_file(conda_env)
printer("+ Creating DPrime table")
system( paste(plink, "--bfile",file.path(LD_folder,"plink"),
"--ld-snps", paste(SNP_list, collapse=" "),
"--r dprime-signed",
"--ld-window 10000000", # max out window size
"--ld-window-kb 10000000",
"--out",file.path(LD_folder,"plink")) )
#--ld-window-r2 0
# # Awk method: theoretically faster?
# if(min_Dprime==F){Dprime = -1}else{Dprime=min_Dprime}
# if(min_r2==F){r = -1}else{r = round(sqrt(min_r2),2) }
# columns <- data.table::fread(file.path(LD_folder, "plink.ld"), nrows = 0) %>% colnames()
# col_dict <- setNames(1:length(columns), columns)
# awk_cmd <- paste("awk -F \"\t\" 'NR==1{print $0}{ if(($",col_dict["DP"]," >= ",Dprime,")",
# " && ($",col_dict["R"]," >= ",r,")) { print } }' ",file.path(LD_folder, "plink.ld"),
# " > ",file.path(LD_folder, "plink.ld_filtered.txt"), sep="")
# system(awk_cmd)
plink.ld <- data.table::fread(file.path(LD_folder, "plink.ld"), select = c("SNP_A", "SNP_B","DP","R"), )
plink.ld <- plink.ld[complete.cases(plink.ld) ]
return(plink.ld)
}
#' Get LD using \pkg{snpStats} package
#'
#' @param LD_folder Locus-specific LD output folder.
#' @inheritParams snpStats::ld
#' @family LD
#' @keywords internal
#' @source
#' \href{https://www.bioconductor.org/packages/release/bioc/html/snpStats.html}{snpStats Bioconductor page}
#' \href{https://www.bioconductor.org/packages/release/bioc/vignettes/snpStats/inst/doc/ld-vignette.pdf}{LD tutorial}
#' @examples
#' subset_DT <- data.table::fread("/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ABCA7/Multi-finemap/ABCA7.Kunkle_2019.1KGphase3_LD.Multi-finemap.tsv.gz")
#' LD_folder <- "/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ABCA7/LD"
#' LD_matrix <- LD.snpstats_get_LD(LD_folder=LD_folder, select.snps=subset_DT$SNP)
LD.snpstats_get_LD <- function(LD_folder,
plink_prefix="plink",
select.snps=NULL,
stats=c("R"),
symmetric=T,
depth="max",
nThread=4,
verbose=T){
printer("LD:snpStats:: Computing LD",paste0("(stats = ",paste(stats,collapse=', '),")"),v=verbose)
# select.snps= arg needed bc otherwise read.plink() sometimes complains of
## duplicate RSID rownames. Also need to check whether these SNPs exist in the plink files.
## (snpStats doesn't have very good error handling for these cases).
select.snps <- LD.snpstats_ensure_nonduplicates(select.snps=select.snps,
LD_folder=LD_folder,
plink_prefix=plink_prefix,
nThread=nThread,
verbose=verbose)
# Only need to give bed path (infers bin/fam paths)
ss <- snpStats::read.plink(bed = file.path(LD_folder,plink_prefix),
select.snps = select.snps)
# Compute LD from snpMatrix
ld_list <- snpStats::ld(x = ss$genotypes,
y = ss$genotypes,
depth = if(depth=="max") ncol(ss$genotypes) else depth,
stats = stats,
symmetric = symmetric)
if(length(stats)==1) return(ld_list) else return(ld_list$R)
}
LD.snpstats_ensure_nonduplicates <- function(select.snps=NULL,
LD_folder,
plink_prefix="plink",
nThread=4,
verbose=T){
if(!is.null(select.snps)){
bim_path <- file.path(LD_folder,paste0(plink_prefix,".bim"))
bim <- data.table::fread(bim_path,
col.names = c("CHR","SNP","V3","POS","A1","A2"),
stringsAsFactors = F,
nThread=nThread)
printer("+ LD:snpStats::",nrow(bim),"rows in bim file.",v=verbose)
bim <- bim[!duplicated(bim$SNP),]
select.snps <- select.snps[select.snps %in% unique(bim$SNP)]
printer("+ LD:snpStats::",length(select.snps),"SNPs in select.snps.",v=verbose)
select.snps <- if(length(select.snps)==0) NULL else unique(select.snps);
}
return(select.snps)
}
#' Get MAF using \pkg{snpStats} package
#'
#' @param LD_folder Locus-specific LD output folder.
#' @inheritParams snpStats::ld
#' @family LD
#' @keywords internal
#' @source
#' \href{https://www.bioconductor.org/packages/release/bioc/html/snpStats.html}{snpStats Bioconductor page}
LD.snpstats_get_MAF <- function(subset_DT,
LD_folder,
plink_prefix="plink",
force_new_MAF=F,
nThread=4,
verbose=T){
if(!"MAF" %in% colnames(subset_DT) | force_new_MAF){
printer("LD::snpStats:: Filling `MAF` column with MAF from LD panel.",v=verbose)
select.snps <- LD.snpstats_ensure_nonduplicates(select.snps=subset_DT$SNP,
LD_folder=LD_folder,
plink_prefix=plink_prefix,
nThread=nThread,
verbose=F)
ss <- snpStats::read.plink(bed = file.path(LD_folder,plink_prefix),
select.snps = select.snps)
MAF_df <- data.frame(SNP=row.names(snpStats::col.summary(ss$genotypes)),
MAF=snpStats::col.summary(ss$genotypes)$MAF)
if("MAF" %in% colnames(subset_DT)) subset_DT <- subset(subset_DT,select=-MAF)
subset_merge <- data.table::merge.data.table(data.table::data.table(subset_DT),
data.table::data.table(MAF_df),
by="SNP")
return(subset_merge)
} else {
printer("LD::snpStats:: `MAF` column already present.",v=verbose);
return(subset_DT)
}
}
#' Calculate LD (r or r2)
#'
#' This appriach computes and LD matrix of r or r2 (instead of D') from a vcf.
#' See \code{\link{LD.dprime_table}} for a slower (but more flexible) alternative to computing LD.
#' @param bim A bim file produced by \emph{plink}
#' @param LD_folder Locus-specific LD output folder.
#' @param r_format Whether to fill the matrix with \code{r} or \code{r2}.
#' @family LD
#' @keywords internal
#' @examples
#' \dontrun{
#' data("LRRK2")
#' LD_folder <- "/Users/schilder/Desktop/Fine_Mapping/Data/GWAS/Nalls23andMe_2019/LRRK2/plink/saved"
#' bim_path <- file.path(LD_folder, "plink.bim");
#' bim <- data.table::fread(bim_path, col.names = c("CHR","SNP","V3","POS","A1","A2"), stringsAsFactors = F)
#' bim <- subset(bim, SNP %in% LRRK2$SNP)
#' ld.bin <- file.path(LD_folder, paste0("plink",".ld.bin"))
#' SNPs <- data.table::fread(file.path(LD_folder,"SNPs.txt"), col.names = 'RSID')
#' bin.vector <- readBin(ld.bin, what = "numeric", n=length(SNPs$RSID)^2)
#' }
LD.run_plink_LD <- function(bim,
LD_folder,
plink_prefix="plink",
r_format="r",
extract_file=NULL){
plink <- LD.plink_file()
# METHOD 2 (faster, but less control over parameters. Most importantly, can't get Dprime)
system( paste(plink,
"--bfile",file.path(LD_folder,plink_prefix),
if(is.null(extract_file)) NULL else"--extract",extract_file,
paste0("--",r_format," square bin"),
"--out", file.path(LD_folder,plink_prefix)) )
ld.bin <- file.path(LD_folder, paste0(plink_prefix,".ld.bin"))
bin.vector <- readBin(ld.bin, what = "numeric", n=length(bim$SNP)^2)
ld.matrix <- matrix(bin.vector, nrow = length(bim$SNP), dimnames = list(bim$SNP, bim$SNP))
return(ld.matrix)
}
#' Calculate LD
#'
#' Use \emph{plink} to calculate LD from a vcf.
#' @family LD
#' @keywords internal
#' @examples
#' locus_dir <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/GWAS/Kunkle_2019/ACE"
#' LD_folder <- file.path(locus_dir,"LD")
#' ld.matrix <- LD.plink_LD(subset_DT=BST1, LD_folder=LD_folder)
LD.plink_LD <-function(leadSNP=NULL,
subset_DT,
bim_path=NULL,
remove_excess_snps=T,
# IMPORTANT! keep this F
merge_by_RSID=F,
LD_folder,
min_r2=F,
min_Dprime=F,
remove_correlates=F,
fillNA=0,
plink_prefix="plink",
verbose=T,
conda_env=NULL){
# Dprime ranges from -1 to 1
start <- Sys.time()
if(is.null(leadSNP))leadSNP <- subset(subset_DT, leadSNP)$SNP[1]
# Calculate LD
printer("++ Reading in BIM file...", v=verbose)
if(is.null(bim_path)) bim_path <- file.path(LD_folder, "plink.bim");
bim <- data.table::fread(bim_path,
col.names = c("CHR","SNP","V3","POS","A1","A2"),
stringsAsFactors = F)
if(remove_excess_snps){
orig_n <- nrow(bim)
if(merge_by_RSID){
bim.merged <- data.table::merge.data.table(bim,
subset_DT,
by=c("SNP"))
} else {
# Standardize format adn merge
bim.merged <- data.table::merge.data.table(dplyr::mutate(bim,
CHR=as.integer(gsub("chr","",CHR)),
POS=as.integer(POS)),
dplyr::mutate(subset_DT,
CHR=as.integer(gsub("chr","",CHR)),
POS=as.integer(POS)),
by=c("CHR","POS"))
}
bim <- subset(bim, SNP %in% bim.merged$SNP.x)
printer("LD:PLINK:: Removing RSIDs that don't appear in locus subset:",orig_n,"==>",nrow(bim),"SNPs",v=verbose)
}
extract_file <- file.path(LD_folder,"SNPs.txt")
data.table::fwrite(subset(bim, select="SNP"),
extract_file, col.names = F)
printer("++ Calculating LD", v=verbose)
ld.matrix <- LD.run_plink_LD(bim = bim,
LD_folder = LD_folder,
plink_prefix = plink_prefix,
extract_file = file.path(LD_folder,"SNPs.txt"))
if((min_Dprime != F) | (min_r2 != F) | (remove_correlates != F)){
plink.ld <- LD.dprime_table(SNP_list = row.names(ld.matrix),
LD_folder,
conda_env=conda_env)
# DPrime filter
if(min_Dprime != F){
printer("+++ Filtering LD Matrix (min_Dprime): Removing SNPs with D' <=",min_Dprime,"for",leadSNP,"(lead SNP).", v=verbose)
plink.ld <- subset(plink.ld, (SNP_A==leadSNP & DP>=min_Dprime) | (SNP_B==leadSNP & DP>=min_Dprime))
} else{printer("+ min_Dprime == FALSE", v=verbose)}
# R2 filter
if(min_r2 != F ){
printer("+++ Filtering LD Matrix (min_r2): Removing SNPs with r <=",min_r2,"for",leadSNP,"(lead SNP).", v=verbose)
r = sqrt(min_r2) # PROBLEM: this doesn't give you r, which you need for SUSIE
plink.ld <- subset(plink.ld, (SNP_A==leadSNP & R>=r) | (SNP_B==leadSNP & R>=r))
} else{printer("+ min_r2 == FALSE", v=verbose)}
# Correlates filter
if(remove_correlates != F){
r2_threshold <- remove_correlates# 0.2
r <- sqrt(r2_threshold)
printer("+++ Filtering LD Matrix (remove_correlates): Removing SNPs with R2 >=",r2_threshold,"for",paste(remove_correlates,collapse=", "),".", v=verbose)
plink.ld <- subset(plink.ld, !(SNP_A %in% remove_correlates & R>=r) | (SNP_B %in% remove_correlates & R>=r))
} else{printer("+ remove_correlates == FALSE", v=verbose)}
# Apply filters
A_list <- unique(plink.ld$SNP_A)
B_list <- unique(plink.ld$SNP_B)
snp_list <- unique(c(A_list, B_list))
ld.matrix <- ld.matrix[row.names(ld.matrix) %in% snp_list, colnames(ld.matrix) %in% snp_list]
## Manually remove rare variant
# ld.matrix <- ld.matrix[rownames(ld.matrix)!="rs34637584", colnames(ld.matrix)!="rs34637584"]
}
# !IMPORTANT!: Fill NAs (otherwise susieR will break)
ld.matrix <- LD.fill_NA(LD_matrix = ld.matrix,
fillNA = fillNA,
verbose = verbose)
end <- Sys.time()
printer("+ LD matrix calculated in",round(as.numeric(end-start),2),"seconds.", v=verbose)
return(ld.matrix)
}
#' Fill NAs in an LD matrix
#'
#' Trickier than it looks.
#' @examples
#' \dontrun{
#' data("LD_matrix");
#' LD_matrix <- LD.fill_NA(LD_matrix)
#' }
LD.fill_NA <- function(LD_matrix,
fillNA=0,
verbose=F){
printer("+ LD:: Removing unnamed rows/cols", v=verbose)
# First, filter any rows/cols without names
LD_matrix <- data.frame(LD_matrix)
LD_matrix <- LD_matrix[rownames(LD_matrix)!=".", colnames(LD_matrix)!="."]
LD_matrix_orig <- LD_matrix
if(!is.null(fillNA)){
printer("+ LD:: Replacing NAs with",fillNA, v=verbose)
if(sum(is.na(LD_matrix))>0){
LD_matrix[is.na(LD_matrix)] <- 0
}
}
# Check for duplicate SNPs
LD_matrix <- LD_matrix[row.names(LD_matrix)[!duplicated(row.names(LD_matrix))],
colnames(LD_matrix)[!duplicated(colnames(LD_matrix))]]
return(LD_matrix)
}
#' Calculate LD blocks.
#'
#' Uses \emph{plink} to group highly correlated SNPs into LD blocks.
#'
#' @family LD
#' @keywords internal
LD.LD_blocks <- function(LD_folder,
LD_block_size=.7){
printer("++ Calculating LD blocks...")
# PLINK 1.07 LD: http://zzz.bwh.harvard.edu/plink/ld.shtml
# PLINK 1.9 LD: https://www.cog-genomics.org/plink/1.9/ld
# system("plink", "-h")
# Identify duplicate snps
# system("plink", "--vcf subset.vcf --list-duplicate-vars")
# Convert vcf to plink format
# system("plink", "--vcf subset.vcf --exclude ./plink_tmp/plink.dupvar --make-bed --out PTK2B")
# Estimate LD blocks
# Defaults: --blocks-strong-lowci = 0.70, --blocks-strong-highci .90
# Reducing "--blocks-inform-frac" is the only parameter that seems to make the block sizes larger
plink <- LD.plink_file()
system( paste(plink, "--bfile",file.path(LD_folder,"plink"),
"--blocks no-pheno-req no-small-max-span --blocks-max-kb 100000",
# "--blocks-strong-lowci .52 --blocks-strong-highci 1",
"--blocks-inform-frac",LD_block_size," --blocks-min-maf 0 --out",file.path(LD_folder,"plink")) )
# system( paste("plink", "--bfile plink --ld-snp-list snp_list.txt --r") )
blocks <- data.table::fread("./plink_tmp/plink.blocks.det")
return(blocks)
}
#' Identify the LD block in which the lead SNP resides
#' @family LD
#' @keywords internal
LD.leadSNP_block <- function(leadSNP, LD_folder, LD_block_size=.7){
printer("Returning lead SNP's block...")
blocks <- LD.LD_blocks(LD_folder, LD_block_size)
splitLists <- strsplit(blocks$SNPS,split = "[|]")
block_snps <- lapply(splitLists, function(l, leadSNP){if(leadSNP %in% l){return(l)} }, leadSNP=leadSNP) %>% unlist()
printer("Number of SNPs in LD block =", length(block_snps))
return(block_snps)
}
# LD_clumping <- function(vcf_subset, subset_SS){
# # PLINK clumping: http://zzz.bwh.harvard.edu/plink/clump.shtml
# # Convert vcf to .map (beagle)
# ## https://www.cog-genomics.org/plink/1.9/data
# system(paste("plink", "--vcf",vcf_subset,"--recode beagle --out ./plink_tmp/plink"))
# # Clumping
# system("plink", "--file ./plink_tmp/plink.chr-8 --clump",subset_SS,"--out ./plink_tmp")
# }
# LD_with_leadSNP <- function(LD_matrix,
# LD_SNP){
# printer("LD Matrix dimensions:", paste(dim(LD_matrix), collapse=" x "))
# printer("Extracting LD subset for lead SNP:",LD_SNP)
# LD_sub <- subset(LD_matrix, select=LD_SNP) %>%
# data.table::as.data.table(keep.rownames = T) %>%
# `colnames<-`(c("SNP","r")) %>%
# dplyr::mutate(r2 = r^2)
# return(LD_sub)
# }
#
#
#' Find correlates of the lead GWAS/QTL SNP
LD.get_lead_r2 <- function(finemap_dat,
LD_matrix=NULL,
fillNA=0,
LD_format="matrix",
verbose=T){
if(any(c("r","r2") %in% colnames(finemap_dat)) ){
finemap_dat <- dplyr::select(finemap_dat, -c(r,r2))
}
LD_SNP <- unique(subset(finemap_dat, leadSNP==T)$SNP)
if(length(LD_SNP)>1){
LD_SNP <- LD_SNP[1]
warning("More than one lead SNP found. Using only the first one:",LD_SNP,v=verbose)
}
# Infer LD data format
if(LD_format=="guess"){
LD_format <- if(nrow(LD_matrix)==ncol(LD_matrix) | class(LD_matrix)[1]=="dsCMatrix") "matrix" else "df"
}
if(LD_format=="matrix"){
if(is.null(LD_matrix)){
printer("+ LD:: No LD_matrix detected. Setting r2=NA",v=verbose);
dat <- finemap_dat
dat$r2 <- NA
} else {
printer("+ LD:: LD_matrix detected. Coloring SNPs by LD with lead SNP.", v=verbose)
LD_sub <- LD_matrix[,LD_SNP] %>%
# subset(LD_matrix, select=LD_SNP) %>%
# subset(select = -c(r,r2)) %>%
data.table::as.data.table(keep.rownames = T) %>%
`colnames<-`(c("SNP","r")) %>%
dplyr::mutate(r2 = r^2) %>%
data.table::as.data.table()
dat <- data.table::merge.data.table(finemap_dat, LD_sub,
by = "SNP",
all.x = T)
}
}
if(LD_format=="df"){
LD_sub <- subset(LD_matrix, select=c("SNP",LD_SNP)) %>%
`colnames<-`(c("SNP","r")) %>%
dplyr::mutate(r2 = r^2) %>%
data.table::as.data.table()
dat <- data.table::merge.data.table(finemap_dat, LD_sub,
by = "SNP",
all.x = T)
}
if(fillNA!=F){
dat$r <- tidyr::replace_na(dat$r, fillNA)
dat$r2 <- tidyr::replace_na(dat$r2, fillNA)
}
return(dat)
}
#' Extract LD proxies from 1KGphase3
#'
#' Wrapper for \code{LDlinkR::LDproxy_batch}.
#' Eeasy to use but doesn't scale up well to many SNPs (takes way too long).
#' @family LD
#' @source
#' \href{https://www.rdocumentation.org/packages/LDlinkR/versions/1.0.2}{website}
#' @examples
#' data("merged_DT")
#' lead.snps <- setNames(subset(merged_DT, leadSNP)$Locus, subset(merged_DT, leadSNP)$SNP)
#' proxies <- LDlinkR.LDproxy_batch(snp=lead.snps)
LDlinkR.LDproxy_batch <- function(snp,
pop="CEU",
r2d = "r2",
min_corr=F,
save_dir=NULL,
verbose=T){
printer("LD:LDlinkR:: Retrieving proxies of",length(snp),"SNPs",v=verbose)
res <- LDlinkR::LDproxy_batch(snp = snp,
pop = pop,
r2d = r2d,
append = T,
token = "df4298d58dc4")
printer("+ LD:LDlinkR::",length(unique(res$RS_Number)),"unique proxies returned.",v=verbose)
if(min_corr!=F){
res <- subset(res, eval(parse(text = toupper(r2d)))>=min_corr)
printer("+ LD:LDlinkR::",length(unique(res$RS_Number)),"remaining at",r2d," ≥",min_corr,v=verbose)
}
proxy_files <- 'combined_query_snp_list.txt' # Automatically named
if(!is.null(save_dir)){
# LDproxy_batch() saves all results as individual .txt files in the cwd by default.
## It's pretty dumb that they don't let you control if and where these are saved,
## so we have to do this manually afterwards.
# local_dir <- "~/Desktop"
# proxy_files <- list.files(path= "./",
# pattern = "^rs.*\\.txt$", full.names = T)
new_path <- file.path(save_dir,basename(proxy_files))
out <- file.rename(proxy_files, new_path)
return(new_path)
}else{return(proxy_files)}
}
|
# Functions file to be sourced within app later
options(stringsAsFactors = FALSE)
# used to trim the available options via the in-app selectize statement
symbol_list_initial <- read.csv(file="CSVs/master.csv", colClasses=c("NULL",NA))
symbol_list <- symbol_list_initial[!(is.na(symbol_list_initial$Symbol) | symbol_list_initial$Symbol==""), ]
crypto_list <- c("btc","bch","ltc","eth","eos","ada","neo","etc",
"xem","dcr","zec","dash","doge","pivx","xmr","vtc","xvg",
"xrp","xlm","lsk","gas","dgb","btg","trx","icx","ppt","omg",
"bnb","snt","wtc","rep","zrx","veri","bat","knc","gnt","fun",
"gno","salt","ethos","icn","pay","mtl","cvc","ven","rhoc","ae",
"ant","btm","lrc","zil")
# Function for fetching data and constructing main portfolio table
get_pair_data <- function(asset_1 = "eth",
asset_2 = "AMZN",
port_start_date = Sys.Date()-183,
port_end_date = Sys.Date()-3,
initial_investment=1000){
# Getting the data for asset 1
# If it's a crypto asset then get it from coinmetrics.io; else get it from yahoo API
if(asset_1 %in% crypto_list == T){
# create string to be used in URL call
crypto_url_1 <- paste0("https://coinmetrics.io/data/",asset_1,".csv")
# pull in data from web-hosted CSV
asset_1_data <- fread(crypto_url_1)
# drop all the data we don't need, keeping only date and price
asset_1_data <- asset_1_data[,c(1,5)]
# renaming columns for coherence
names(asset_1_data) <- c("date",asset_1)
} else {
# loads data from yahoo api; auto.assign=FALSE keeps it from loading into global env
asset_1_data <- as.data.frame(getSymbols(asset_1, src = "yahoo", auto.assign=FALSE))
# rips out the datetime index and makes it a column named "rn"
setDT(asset_1_data, keep.rownames = TRUE)[]
# keeps only data index and closing price
asset_1_data <- asset_1_data[,c(1,7)]
# changes column names to standardize naming convention
names(asset_1_data) <- c("date",asset_1)
# fills in weekend NULL prices with friday price
asset_1_data <- asset_1_data %>% fill(paste(asset_1))
}
# Getting the data for asset 2
# If it's a crypto asset then get it from coinmetrics.io; else get it from yahoo API
if(asset_2 %in% crypto_list == T){
# create string to be used in URL call
crypto_url_2 <- paste0("https://coinmetrics.io/data/",asset_2,".csv")
# pull in data from web-hosted CSV
asset_2_data <- fread(crypto_url_2)
# drop all the data we don't need, keeping only date and price
asset_2_data <- asset_2_data[,c(1,5)]
# renaming columns for coherence
names(asset_2_data) <- c("date",asset_2)
} else {
# loads data from yahoo api; auto.assign=FALSE keeps it from loading into global env
asset_2_data <- as.data.frame(getSymbols(asset_2, src = "yahoo", auto.assign=FALSE))
# rips out the datetime index and makes it a column named "rn"
setDT(asset_2_data, keep.rownames = TRUE)[]
# keeps only data index and closing price
asset_2_data <- asset_2_data[,c(1,7)]
# changes column names to standardize naming convention
names(asset_2_data) <- c("date",asset_2)
# fills in weekend NULL prices with friday price
asset_2_data <- asset_2_data %>% fill(paste(asset_2))
}
# first we need to get the most recent date for which we have data from BOTH assets
# creates vector of both first dates for which we have data for both assets
first_dates <- as.list(c(asset_1_data$date[1],asset_2_data$date[1]))
# finds the max start date out of both
start_date <- do.call(pmax, first_dates)
# does a full join of both asset dataframes
both_assets_data <- merge(x = asset_1_data, y = asset_2_data, by = "date", all = TRUE)
# filters this by the most recent start date so that we have only complete data
both_assets_data <- both_assets_data %>% filter(date >= start_date)
# does a second FILL as a final check in case merge creates more nulls for some reason
both_assets_data <- both_assets_data %>% fill(paste(asset_1))
both_assets_data <- both_assets_data %>% fill(paste(asset_2))
# implement the date filter
both_assets_data <- both_assets_data %>%
filter(between(as.Date(date), as.Date(port_start_date), as.Date(port_end_date)))
# Now we get the portfolio values
# First we need the market price for both assets at time of purchase
asset_1_mp_at_purchase <- both_assets_data %>%
select(noquote(asset_1)) %>%
filter(row_number()==1)
asset_2_mp_at_purchase <- both_assets_data %>%
select(noquote(asset_2)) %>%
filter(row_number()==1)
# now we built the actual portfolio value over time columns
portfolio_data <- both_assets_data %>%
mutate(
asset_1_port_val = (initial_investment*(both_assets_data[,2])/asset_1_mp_at_purchase[1,1]),
asset_2_port_val = (initial_investment*(both_assets_data[,3])/asset_2_mp_at_purchase[1,1])
)
# creating the strings with which to rename portoflio value columns
asset_1_port_val_name = paste0(asset_1,"_port_val")
asset_2_port_val_name = paste0(asset_2,"_port_val")
# renaming portfolio values to make them readable
names(portfolio_data)[4:5] <- c(asset_1_port_val_name, asset_2_port_val_name)
return(portfolio_data)
}
build_summary_table <- function(portfolio_data){
# creating the summary table
# creates various vectors
asset_names <- c(names(portfolio_data[2]),names(portfolio_data[3]))
asset_portfolio_max_worth <- c(max(portfolio_data[4]),max(portfolio_data[5]))
asset_portfolio_latest_worth <- c(as.numeric(tail(portfolio_data[4],1)),as.numeric(tail(portfolio_data[5],1)))
asset_portfolio_absolute_profit <- c(as.numeric(tail(portfolio_data[4],1))-as.numeric(head(portfolio_data[4],1)),
as.numeric(tail(portfolio_data[5],1))-as.numeric(head(portfolio_data[5],1)))
asset_portfolio_rate_of_return <- c(((as.numeric(tail(portfolio_data[4],1))-as.numeric(head(portfolio_data[4],1)))/as.numeric(head(portfolio_data[4],1)))*100,
((as.numeric(tail(portfolio_data[5],1))-as.numeric(head(portfolio_data[5],1)))/as.numeric(head(portfolio_data[5],1)))*100)
# merges vectors into dataframe
asset_summary_table <- data.frame(asset_names,asset_portfolio_max_worth,asset_portfolio_latest_worth, asset_portfolio_absolute_profit, asset_portfolio_rate_of_return)
colnames(asset_summary_table) <- c("Asset Names",
"Asset Portfolio Max Worth",
"Asset Portfolio Latest Worth",
"Asset Portfolio Absolute Profit",
"Asset Portfolio Rate of Return")
return(asset_summary_table)
}
build_portfolio_perf_chart <- function(data, port_loess_param = 0.33){
port_tbl <- data[,c(1,4:5)]
# grabbing the 2 asset names
asset_name1 <- sub('_.*', '', names(port_tbl)[2])
asset_name2 <- sub('_.*', '', names(port_tbl)[3])
# transforms dates into correct type so smoothing can be done
port_tbl[,1] <- as.Date(port_tbl[,1])
date_in_numeric_form <- as.numeric((port_tbl[,1]))
# assigning loess smoothing parameter
loess_span_parameter <- port_loess_param
# now building the plotly itself
port_perf_plot <- plot_ly(data = port_tbl, x = ~port_tbl[,1]) %>%
# asset 1 data plotted
add_markers(y =~port_tbl[,2],
marker = list(color = '#FC9C01'),
name = asset_name1,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(port_tbl[,2] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#FC9C01'),
name = asset_name1,
showlegend = TRUE) %>%
# asset 2 data plotted
add_markers(y =~port_tbl[,3],
marker = list(color = '#3498DB'),
name = asset_name2,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(port_tbl[,3] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#3498DB'),
name = asset_name2,
showlegend = TRUE) %>%
layout(
title = FALSE,
xaxis = list(type = "date",
title = "Date"),
yaxis = list(title = "Portfolio Value ($)"),
legend = list(orientation = 'h',
x = 0,
y = 1.15)) %>%
add_annotations(
x= 1,
y= 1.133,
xref = "paper",
yref = "paper",
text = "",
showarrow = F
)
return(port_perf_plot)
}
##################
get_portfolio_returns <- function(portfolio_data, period = "weekly"){
# grab the string names of the assets for labelling later
asset_1_name_str <- names(portfolio_data[2])
asset_2_name_str <- names(portfolio_data[3])
# create the xts objects necessary to run the periodReturn function
asset_1_xts <- xts(x=portfolio_data[,2], order.by=as.Date(portfolio_data$date))
asset_2_xts <- xts(x=portfolio_data[,3], order.by=as.Date(portfolio_data$date))
# get the returns of the assets over the chosen period
asset_1_returns <- periodReturn(asset_1_xts, period = period)
asset_2_returns <- periodReturn(asset_2_xts, period = period)
# make the naming of the columns more intuitive
names(asset_1_returns) <- paste0(asset_1_name_str,"_returns")
names(asset_2_returns) <- paste0(asset_2_name_str,"_returns")
# return a list of dataframes containing the returns that can be referenced later
asset_returns_list <- list(asset_1_returns, asset_2_returns)
return(asset_returns_list)
}
get_sharpe_ratio_plot <- function(asset_returns_list, Rf = 0, p=0.95){
# calculating the shapre ratios for each asset (rounded to 4th decimal)
asset_1_sharp_ratios <- round(SharpeRatio(asset_returns_list[[1]], Rf = Rf, p=p), 4)
asset_2_sharp_ratios <- round(SharpeRatio(asset_returns_list[[2]], Rf = Rf, p=p), 4)
# adding intuitve names to tables
# extra spaces injected into column names to facilitate ploly labelling later
asset_1_sharp_ratio_df <- data.frame(metric = c("StdDev Sharpe ","VaR Sharpe ","ES Sharpe "),
coredata(asset_1_sharp_ratios))
asset_2_sharp_ratio_df <- data.frame(metric = c("StdDev Sharpe ","VaR Sharpe ","ES Sharpe "),
coredata(asset_2_sharp_ratios))
# explictly clears pesky rownames
rownames(asset_1_sharp_ratio_df) <- c()
rownames(asset_2_sharp_ratio_df) <- c()
# creating final sharpe ratio table
final_sharpe_ratio_table <- merge(asset_1_sharp_ratio_df, asset_2_sharp_ratio_df, by="metric")
# drops now-unrelated label from names (drops everything after underscore)
names(final_sharpe_ratio_table) <- sub("_.*", "", names(final_sharpe_ratio_table))
# making the main sharpe ratio viz
sharpe_ratio_plot <- plot_ly(data = final_sharpe_ratio_table,
x = ~final_sharpe_ratio_table[,2],
y = ~metric,
type = 'bar',
orientation = 'h',
name = names(final_sharpe_ratio_table[2]),
marker = list(color = '#FC9C01')) %>%
add_trace(x = ~final_sharpe_ratio_table[,3],
name = names(final_sharpe_ratio_table[3]),
marker = list(color = '#3498DB')) %>%
layout(
title = FALSE,
xaxis = list(title = "Sharpe Ratio"),
yaxis = list(title = NA),
margin = list(l = 125),
legend = list(orientation = 'h',
x = 0,
y = 1.2)) %>%
add_annotations(
x= 1,
y= 1.16,
xref = "paper",
yref = "paper",
text = "",
showarrow = F
)
return(sharpe_ratio_plot)
}
build_asset_returns_plot <- function(asset_returns_list, asset_loess_param = 0.75){
asset_1_name_str <- sub("_.*", "", names(asset_returns_list[[1]]))
asset_2_name_str <- sub("_.*", "", names(asset_returns_list[[2]]))
asset_1_returns_df <- data.frame(date=index(asset_returns_list[[1]]), coredata(asset_returns_list[[1]]))
asset_2_returns_df <- data.frame(date=index(asset_returns_list[[2]]), coredata(asset_returns_list[[2]]))
total <- merge(asset_1_returns_df, asset_2_returns_df, by="date")
# building the viz
# preparing the data for smoothing
total[,1] <- as.Date(total[,1])
date_in_numeric_form <- as.numeric((total[,1]))
# picking smoothing parameter
loess_span_parameter <- asset_loess_param
asset_return_plot <- plot_ly(data = total, x = ~date) %>%
# asset 1 data plotted
add_markers(y =~total[,2],
marker = list(color = '#FC9C01'),
name = asset_1_name_str,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(total[,2] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#FC9C01'),
name = asset_1_name_str,
showlegend = TRUE) %>%
# asset 2 data plotted
add_markers(y =~total[,3],
marker = list(color = '#3498DB'),
name = asset_2_name_str,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(total[,3] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#3498DB'),
name = asset_2_name_str,
showlegend = TRUE) %>%
layout(
title = FALSE,
xaxis = list(type = "date",
title = "Date"),
yaxis = list(title = "Return on Investment (%)",
tickformat = "%"),
legend = list(orientation = 'h',
x = 0,
y = 1.15)) %>%
add_annotations(
x= 1,
y= 1.133,
xref = "paper",
yref = "paper",
text = "",
showarrow = F
)
return(asset_return_plot)
}
| /Functions.R | permissive | DrRoad/financial-asset-comparison-tool | R | false | false | 14,109 | r | # Functions file to be sourced within app later
options(stringsAsFactors = FALSE)
# used to trim the available options via the in-app selectize statement
symbol_list_initial <- read.csv(file="CSVs/master.csv", colClasses=c("NULL",NA))
symbol_list <- symbol_list_initial[!(is.na(symbol_list_initial$Symbol) | symbol_list_initial$Symbol==""), ]
crypto_list <- c("btc","bch","ltc","eth","eos","ada","neo","etc",
"xem","dcr","zec","dash","doge","pivx","xmr","vtc","xvg",
"xrp","xlm","lsk","gas","dgb","btg","trx","icx","ppt","omg",
"bnb","snt","wtc","rep","zrx","veri","bat","knc","gnt","fun",
"gno","salt","ethos","icn","pay","mtl","cvc","ven","rhoc","ae",
"ant","btm","lrc","zil")
# Function for fetching data and constructing main portfolio table
get_pair_data <- function(asset_1 = "eth",
asset_2 = "AMZN",
port_start_date = Sys.Date()-183,
port_end_date = Sys.Date()-3,
initial_investment=1000){
# Getting the data for asset 1
# If it's a crypto asset then get it from coinmetrics.io; else get it from yahoo API
if(asset_1 %in% crypto_list == T){
# create string to be used in URL call
crypto_url_1 <- paste0("https://coinmetrics.io/data/",asset_1,".csv")
# pull in data from web-hosted CSV
asset_1_data <- fread(crypto_url_1)
# drop all the data we don't need, keeping only date and price
asset_1_data <- asset_1_data[,c(1,5)]
# renaming columns for coherence
names(asset_1_data) <- c("date",asset_1)
} else {
# loads data from yahoo api; auto.assign=FALSE keeps it from loading into global env
asset_1_data <- as.data.frame(getSymbols(asset_1, src = "yahoo", auto.assign=FALSE))
# rips out the datetime index and makes it a column named "rn"
setDT(asset_1_data, keep.rownames = TRUE)[]
# keeps only data index and closing price
asset_1_data <- asset_1_data[,c(1,7)]
# changes column names to standardize naming convention
names(asset_1_data) <- c("date",asset_1)
# fills in weekend NULL prices with friday price
asset_1_data <- asset_1_data %>% fill(paste(asset_1))
}
# Getting the data for asset 2
# If it's a crypto asset then get it from coinmetrics.io; else get it from yahoo API
if(asset_2 %in% crypto_list == T){
# create string to be used in URL call
crypto_url_2 <- paste0("https://coinmetrics.io/data/",asset_2,".csv")
# pull in data from web-hosted CSV
asset_2_data <- fread(crypto_url_2)
# drop all the data we don't need, keeping only date and price
asset_2_data <- asset_2_data[,c(1,5)]
# renaming columns for coherence
names(asset_2_data) <- c("date",asset_2)
} else {
# loads data from yahoo api; auto.assign=FALSE keeps it from loading into global env
asset_2_data <- as.data.frame(getSymbols(asset_2, src = "yahoo", auto.assign=FALSE))
# rips out the datetime index and makes it a column named "rn"
setDT(asset_2_data, keep.rownames = TRUE)[]
# keeps only data index and closing price
asset_2_data <- asset_2_data[,c(1,7)]
# changes column names to standardize naming convention
names(asset_2_data) <- c("date",asset_2)
# fills in weekend NULL prices with friday price
asset_2_data <- asset_2_data %>% fill(paste(asset_2))
}
# first we need to get the most recent date for which we have data from BOTH assets
# creates vector of both first dates for which we have data for both assets
first_dates <- as.list(c(asset_1_data$date[1],asset_2_data$date[1]))
# finds the max start date out of both
start_date <- do.call(pmax, first_dates)
# does a full join of both asset dataframes
both_assets_data <- merge(x = asset_1_data, y = asset_2_data, by = "date", all = TRUE)
# filters this by the most recent start date so that we have only complete data
both_assets_data <- both_assets_data %>% filter(date >= start_date)
# does a second FILL as a final check in case merge creates more nulls for some reason
both_assets_data <- both_assets_data %>% fill(paste(asset_1))
both_assets_data <- both_assets_data %>% fill(paste(asset_2))
# implement the date filter
both_assets_data <- both_assets_data %>%
filter(between(as.Date(date), as.Date(port_start_date), as.Date(port_end_date)))
# Now we get the portfolio values
# First we need the market price for both assets at time of purchase
asset_1_mp_at_purchase <- both_assets_data %>%
select(noquote(asset_1)) %>%
filter(row_number()==1)
asset_2_mp_at_purchase <- both_assets_data %>%
select(noquote(asset_2)) %>%
filter(row_number()==1)
# now we built the actual portfolio value over time columns
portfolio_data <- both_assets_data %>%
mutate(
asset_1_port_val = (initial_investment*(both_assets_data[,2])/asset_1_mp_at_purchase[1,1]),
asset_2_port_val = (initial_investment*(both_assets_data[,3])/asset_2_mp_at_purchase[1,1])
)
# creating the strings with which to rename portoflio value columns
asset_1_port_val_name = paste0(asset_1,"_port_val")
asset_2_port_val_name = paste0(asset_2,"_port_val")
# renaming portfolio values to make them readable
names(portfolio_data)[4:5] <- c(asset_1_port_val_name, asset_2_port_val_name)
return(portfolio_data)
}
build_summary_table <- function(portfolio_data){
# creating the summary table
# creates various vectors
asset_names <- c(names(portfolio_data[2]),names(portfolio_data[3]))
asset_portfolio_max_worth <- c(max(portfolio_data[4]),max(portfolio_data[5]))
asset_portfolio_latest_worth <- c(as.numeric(tail(portfolio_data[4],1)),as.numeric(tail(portfolio_data[5],1)))
asset_portfolio_absolute_profit <- c(as.numeric(tail(portfolio_data[4],1))-as.numeric(head(portfolio_data[4],1)),
as.numeric(tail(portfolio_data[5],1))-as.numeric(head(portfolio_data[5],1)))
asset_portfolio_rate_of_return <- c(((as.numeric(tail(portfolio_data[4],1))-as.numeric(head(portfolio_data[4],1)))/as.numeric(head(portfolio_data[4],1)))*100,
((as.numeric(tail(portfolio_data[5],1))-as.numeric(head(portfolio_data[5],1)))/as.numeric(head(portfolio_data[5],1)))*100)
# merges vectors into dataframe
asset_summary_table <- data.frame(asset_names,asset_portfolio_max_worth,asset_portfolio_latest_worth, asset_portfolio_absolute_profit, asset_portfolio_rate_of_return)
colnames(asset_summary_table) <- c("Asset Names",
"Asset Portfolio Max Worth",
"Asset Portfolio Latest Worth",
"Asset Portfolio Absolute Profit",
"Asset Portfolio Rate of Return")
return(asset_summary_table)
}
build_portfolio_perf_chart <- function(data, port_loess_param = 0.33){
port_tbl <- data[,c(1,4:5)]
# grabbing the 2 asset names
asset_name1 <- sub('_.*', '', names(port_tbl)[2])
asset_name2 <- sub('_.*', '', names(port_tbl)[3])
# transforms dates into correct type so smoothing can be done
port_tbl[,1] <- as.Date(port_tbl[,1])
date_in_numeric_form <- as.numeric((port_tbl[,1]))
# assigning loess smoothing parameter
loess_span_parameter <- port_loess_param
# now building the plotly itself
port_perf_plot <- plot_ly(data = port_tbl, x = ~port_tbl[,1]) %>%
# asset 1 data plotted
add_markers(y =~port_tbl[,2],
marker = list(color = '#FC9C01'),
name = asset_name1,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(port_tbl[,2] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#FC9C01'),
name = asset_name1,
showlegend = TRUE) %>%
# asset 2 data plotted
add_markers(y =~port_tbl[,3],
marker = list(color = '#3498DB'),
name = asset_name2,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(port_tbl[,3] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#3498DB'),
name = asset_name2,
showlegend = TRUE) %>%
layout(
title = FALSE,
xaxis = list(type = "date",
title = "Date"),
yaxis = list(title = "Portfolio Value ($)"),
legend = list(orientation = 'h',
x = 0,
y = 1.15)) %>%
add_annotations(
x= 1,
y= 1.133,
xref = "paper",
yref = "paper",
text = "",
showarrow = F
)
return(port_perf_plot)
}
##################
get_portfolio_returns <- function(portfolio_data, period = "weekly"){
# grab the string names of the assets for labelling later
asset_1_name_str <- names(portfolio_data[2])
asset_2_name_str <- names(portfolio_data[3])
# create the xts objects necessary to run the periodReturn function
asset_1_xts <- xts(x=portfolio_data[,2], order.by=as.Date(portfolio_data$date))
asset_2_xts <- xts(x=portfolio_data[,3], order.by=as.Date(portfolio_data$date))
# get the returns of the assets over the chosen period
asset_1_returns <- periodReturn(asset_1_xts, period = period)
asset_2_returns <- periodReturn(asset_2_xts, period = period)
# make the naming of the columns more intuitive
names(asset_1_returns) <- paste0(asset_1_name_str,"_returns")
names(asset_2_returns) <- paste0(asset_2_name_str,"_returns")
# return a list of dataframes containing the returns that can be referenced later
asset_returns_list <- list(asset_1_returns, asset_2_returns)
return(asset_returns_list)
}
get_sharpe_ratio_plot <- function(asset_returns_list, Rf = 0, p=0.95){
# calculating the shapre ratios for each asset (rounded to 4th decimal)
asset_1_sharp_ratios <- round(SharpeRatio(asset_returns_list[[1]], Rf = Rf, p=p), 4)
asset_2_sharp_ratios <- round(SharpeRatio(asset_returns_list[[2]], Rf = Rf, p=p), 4)
# adding intuitve names to tables
# extra spaces injected into column names to facilitate ploly labelling later
asset_1_sharp_ratio_df <- data.frame(metric = c("StdDev Sharpe ","VaR Sharpe ","ES Sharpe "),
coredata(asset_1_sharp_ratios))
asset_2_sharp_ratio_df <- data.frame(metric = c("StdDev Sharpe ","VaR Sharpe ","ES Sharpe "),
coredata(asset_2_sharp_ratios))
# explictly clears pesky rownames
rownames(asset_1_sharp_ratio_df) <- c()
rownames(asset_2_sharp_ratio_df) <- c()
# creating final sharpe ratio table
final_sharpe_ratio_table <- merge(asset_1_sharp_ratio_df, asset_2_sharp_ratio_df, by="metric")
# drops now-unrelated label from names (drops everything after underscore)
names(final_sharpe_ratio_table) <- sub("_.*", "", names(final_sharpe_ratio_table))
# making the main sharpe ratio viz
sharpe_ratio_plot <- plot_ly(data = final_sharpe_ratio_table,
x = ~final_sharpe_ratio_table[,2],
y = ~metric,
type = 'bar',
orientation = 'h',
name = names(final_sharpe_ratio_table[2]),
marker = list(color = '#FC9C01')) %>%
add_trace(x = ~final_sharpe_ratio_table[,3],
name = names(final_sharpe_ratio_table[3]),
marker = list(color = '#3498DB')) %>%
layout(
title = FALSE,
xaxis = list(title = "Sharpe Ratio"),
yaxis = list(title = NA),
margin = list(l = 125),
legend = list(orientation = 'h',
x = 0,
y = 1.2)) %>%
add_annotations(
x= 1,
y= 1.16,
xref = "paper",
yref = "paper",
text = "",
showarrow = F
)
return(sharpe_ratio_plot)
}
build_asset_returns_plot <- function(asset_returns_list, asset_loess_param = 0.75){
asset_1_name_str <- sub("_.*", "", names(asset_returns_list[[1]]))
asset_2_name_str <- sub("_.*", "", names(asset_returns_list[[2]]))
asset_1_returns_df <- data.frame(date=index(asset_returns_list[[1]]), coredata(asset_returns_list[[1]]))
asset_2_returns_df <- data.frame(date=index(asset_returns_list[[2]]), coredata(asset_returns_list[[2]]))
total <- merge(asset_1_returns_df, asset_2_returns_df, by="date")
# building the viz
# preparing the data for smoothing
total[,1] <- as.Date(total[,1])
date_in_numeric_form <- as.numeric((total[,1]))
# picking smoothing parameter
loess_span_parameter <- asset_loess_param
asset_return_plot <- plot_ly(data = total, x = ~date) %>%
# asset 1 data plotted
add_markers(y =~total[,2],
marker = list(color = '#FC9C01'),
name = asset_1_name_str,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(total[,2] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#FC9C01'),
name = asset_1_name_str,
showlegend = TRUE) %>%
# asset 2 data plotted
add_markers(y =~total[,3],
marker = list(color = '#3498DB'),
name = asset_2_name_str,
showlegend = FALSE) %>%
add_lines(y = ~fitted(loess(total[,3] ~ date_in_numeric_form, span = loess_span_parameter)),
line = list(color = '#3498DB'),
name = asset_2_name_str,
showlegend = TRUE) %>%
layout(
title = FALSE,
xaxis = list(type = "date",
title = "Date"),
yaxis = list(title = "Return on Investment (%)",
tickformat = "%"),
legend = list(orientation = 'h',
x = 0,
y = 1.15)) %>%
add_annotations(
x= 1,
y= 1.133,
xref = "paper",
yref = "paper",
text = "",
showarrow = F
)
return(asset_return_plot)
}
|
roll <- function(){
die <- 1:6
dice <- sample(die, size = 2, replace = TRUE)
sum(dice)
}
show_env <- function(){
list(ran.in = environment(),
parent = parent.env(environment()),
objects = ls.str(environment())
)
}
show_env()
environment(show_env)
library(pryr)
environment(parenvs)
show_env <- function(){
a <- 1
b <- 2
c <- 3
list(ran.in = environment(),
parent = parent.env(environment()),
objects = ls.str(environment())
)
}
foo <- "take me to your runtime"
show_env <- function(x = foo){
list(ran.in = environment(),
parent = parent.env(environment()),
objects = ls.str(environment())
)
}
deal <- function(){
deck[1, ]
}
DECK <- deck
deck <- deck[-1, ]
head(deck, 3)
deal <- function(){
card <- deck[1, ]
deck <- deck[-1, ]
card
}
deal <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = globalenv())
card
}
deal()
shuffle <- function(cards){
random <- sample(1:52, size = 52)
cards[random, ]
}
head(deck, 3)
a <- shuffle(deck)
head(deck, 3) ## nao "shufflow"
head(a, 3)
shuffle <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = globalenv())
}
shuffle()
deal()
setup <- function(deck){
DECK <- deck
DEAL <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = globalenv())
card
}
SHUFFLE <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = globalenv())
}
}
##############################
setup <- function(deck){
DECK <- deck
DEAL <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = globalenv())
card
}
SHUFFLE <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = globalenv())
}
list(deal = DEAL, shuffle = SHUFFLE)
}
cards <- setup(deck)
deal <- cards$deal
shuffle <- cards$shuffle
deal
shuffle
environment(deal)
environment(shuffle)
###################################
setup <- function(deck){
DECK <- deck
DEAL <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = parent.env(environment()))
card
}
SHUFFLE <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = parent.env(environment()))
}
list(deal = DEAL, shuffle = SHUFFLE)
}
cards <- setup(deck)
deal <- cards$deal
shuffle <- cards$shuffle
shuffle()
deal()
) | /cap62.R | no_license | pedroceciliocn/scripts-hands-on-prog-r | R | false | false | 2,536 | r | roll <- function(){
die <- 1:6
dice <- sample(die, size = 2, replace = TRUE)
sum(dice)
}
show_env <- function(){
list(ran.in = environment(),
parent = parent.env(environment()),
objects = ls.str(environment())
)
}
show_env()
environment(show_env)
library(pryr)
environment(parenvs)
show_env <- function(){
a <- 1
b <- 2
c <- 3
list(ran.in = environment(),
parent = parent.env(environment()),
objects = ls.str(environment())
)
}
foo <- "take me to your runtime"
show_env <- function(x = foo){
list(ran.in = environment(),
parent = parent.env(environment()),
objects = ls.str(environment())
)
}
deal <- function(){
deck[1, ]
}
DECK <- deck
deck <- deck[-1, ]
head(deck, 3)
deal <- function(){
card <- deck[1, ]
deck <- deck[-1, ]
card
}
deal <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = globalenv())
card
}
deal()
shuffle <- function(cards){
random <- sample(1:52, size = 52)
cards[random, ]
}
head(deck, 3)
a <- shuffle(deck)
head(deck, 3) ## nao "shufflow"
head(a, 3)
shuffle <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = globalenv())
}
shuffle()
deal()
setup <- function(deck){
DECK <- deck
DEAL <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = globalenv())
card
}
SHUFFLE <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = globalenv())
}
}
##############################
setup <- function(deck){
DECK <- deck
DEAL <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = globalenv())
card
}
SHUFFLE <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = globalenv())
}
list(deal = DEAL, shuffle = SHUFFLE)
}
cards <- setup(deck)
deal <- cards$deal
shuffle <- cards$shuffle
deal
shuffle
environment(deal)
environment(shuffle)
###################################
setup <- function(deck){
DECK <- deck
DEAL <- function(){
card <- deck[1, ]
assign("deck", deck[-1, ], envir = parent.env(environment()))
card
}
SHUFFLE <- function(){
random <- sample(1:52, size = 52)
assign("deck", DECK[random, ], envir = parent.env(environment()))
}
list(deal = DEAL, shuffle = SHUFFLE)
}
cards <- setup(deck)
deal <- cards$deal
shuffle <- cards$shuffle
shuffle()
deal()
) |
# data
data <- rnorm(100,0,1)
# plot
layout(matrix(seq(2)),heights=c(0.7,0.3))
# c(bottom, left, top, right)
par(mar=c(0,4.1,4.1,2.1))
hist(data,breaks=50,freq=F,main="")
par(mar=c(0,4.1,0,2.1), mgp=c(3,0.5,0.0))
boxplot(data, horizontal=TRUE, axes=FALSE)
| /hist-and-box.R | no_license | xjyx/R-Scripts | R | false | false | 257 | r | # data
data <- rnorm(100,0,1)
# plot
layout(matrix(seq(2)),heights=c(0.7,0.3))
# c(bottom, left, top, right)
par(mar=c(0,4.1,4.1,2.1))
hist(data,breaks=50,freq=F,main="")
par(mar=c(0,4.1,0,2.1), mgp=c(3,0.5,0.0))
boxplot(data, horizontal=TRUE, axes=FALSE)
|
## A pair of functions:
## makeCacheMatrix - creates an object that represents a square invertible
## matrix, including a slot for a cached value of its
## inversion;
##
## cacheSolve - operates on an object returned by makeCacheMatrix, and
## inverts the matrix it represents (using the object's
## cached solution if it has one; calculating the
## solution and storing it in the cache if not).
# Create an abstract object representing a matrix.
#
# Argument:
# internalMatrix - optional. An R matrix to use as internal matrix. Empty
# matrix if not specified.
# Return value: an abstract object containing the following "methods":
# $set(matrix) - sets the internal R matrix
# $get - returns the internal R matrix
# $setCache(matrix) - sets the cached solution value to matrix
# $getCache - returns the cached solution value (NULL if it
# hasn't been set).
#
# All operations on the object assume that the internal matrix is square and
# invertible.
makeCacheMatrix <- function(internalMatrix = matrix()) {
# initialize cached solution to NULL, which signals to cacheSolve that the
# solution hasn't been computed yet
cachedSolution <- NULL
# a function which sets this object's internal matrix to value passed in
# and also invalidates the caches solution (setting it to NULL)
set <- function(matrix) {
internalMatrix <<- matrix
cachedSolution <<- NULL
}
# a function which gets this object's internal matrix
get <- function() internalMatrix
# a function which sets this object's cached solution
# NOTE: this isn't used in the current exercise
setCache <- function(matrix) cachedSolution <<- matrix
# a function which gets this object's cached solution
getCache <- function() cachedSolution
# assemble all the "methods" into an object and return it
list(set = set,
get = get,
setCache = setCache,
getCache = getCache)
}
# Solve an object of the kind returned by makeCacheMatrix, by taking its
# inverse.
#
# Argument:
# matrixObject - an object of the kind returned by makeCacheMatrix,
# representing a matrix (assumed to be square, invertible) and
# having the "methods" $set(matrix), $get(),
# $setCache(matrix), and $getcache().
# Return value: the inverse of the matrix represented by matrixObject.
cacheSolve <- function(matrixObject) {
# get cached solution, if there is one yet
solution <- matrixObject$getCache()
# if there is a solution, notify user that output is cached, not computed.
if(!is.null(solution)) {
message("getting cached data")
} else {
# otherwise, calculate it
# start by getting internal R matrix
internalMatrix <- matrixObject$get()
# calculate inverse
solution <- solve(internalMatrix)
# cache it in matrixObject for later use
matrixObject$setCache(solution)
}
# return solution, whether it was cached or calculated here
solution
} | /cachematrix.R | no_license | echristopherson/ProgrammingAssignment2 | R | false | false | 3,136 | r | ## A pair of functions:
## makeCacheMatrix - creates an object that represents a square invertible
## matrix, including a slot for a cached value of its
## inversion;
##
## cacheSolve - operates on an object returned by makeCacheMatrix, and
## inverts the matrix it represents (using the object's
## cached solution if it has one; calculating the
## solution and storing it in the cache if not).
# Create an abstract object representing a matrix.
#
# Argument:
# internalMatrix - optional. An R matrix to use as internal matrix. Empty
# matrix if not specified.
# Return value: an abstract object containing the following "methods":
# $set(matrix) - sets the internal R matrix
# $get - returns the internal R matrix
# $setCache(matrix) - sets the cached solution value to matrix
# $getCache - returns the cached solution value (NULL if it
# hasn't been set).
#
# All operations on the object assume that the internal matrix is square and
# invertible.
makeCacheMatrix <- function(internalMatrix = matrix()) {
# initialize cached solution to NULL, which signals to cacheSolve that the
# solution hasn't been computed yet
cachedSolution <- NULL
# a function which sets this object's internal matrix to value passed in
# and also invalidates the caches solution (setting it to NULL)
set <- function(matrix) {
internalMatrix <<- matrix
cachedSolution <<- NULL
}
# a function which gets this object's internal matrix
get <- function() internalMatrix
# a function which sets this object's cached solution
# NOTE: this isn't used in the current exercise
setCache <- function(matrix) cachedSolution <<- matrix
# a function which gets this object's cached solution
getCache <- function() cachedSolution
# assemble all the "methods" into an object and return it
list(set = set,
get = get,
setCache = setCache,
getCache = getCache)
}
# Solve an object of the kind returned by makeCacheMatrix, by taking its
# inverse.
#
# Argument:
# matrixObject - an object of the kind returned by makeCacheMatrix,
# representing a matrix (assumed to be square, invertible) and
# having the "methods" $set(matrix), $get(),
# $setCache(matrix), and $getcache().
# Return value: the inverse of the matrix represented by matrixObject.
cacheSolve <- function(matrixObject) {
# get cached solution, if there is one yet
solution <- matrixObject$getCache()
# if there is a solution, notify user that output is cached, not computed.
if(!is.null(solution)) {
message("getting cached data")
} else {
# otherwise, calculate it
# start by getting internal R matrix
internalMatrix <- matrixObject$get()
# calculate inverse
solution <- solve(internalMatrix)
# cache it in matrixObject for later use
matrixObject$setCache(solution)
}
# return solution, whether it was cached or calculated here
solution
} |
library(readr)
data_load <- function(filename){
#try catch
df <- read_delim(filename, delim = ";",
col_types = cols(
Zuzanna = col_logical(),
Julia = col_logical(),
Lena = col_double(),
Maja = col_double(),
Hanna = col_double(),
Zofia = col_double(),
Amelia = col_double(),
Alicja = col_double(),
Aleksandra = col_logical(),
Natalia = col_logical(),
Oliwia = col_logical(),
Maria = col_logical(),
Wiktoria = col_double(),
Emilia = col_double(),
Antonina = col_double(),
Laura = col_double(),
Anna = col_double(),
Nadia = col_logical(),
Pola = col_logical(),
Liliana = col_logical(),
Nikola = col_logical(),
Gabriela = col_logical()
))
df
}
| /project/phase2/Saputa_Werner/data_load.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 1,151 | r | library(readr)
data_load <- function(filename){
#try catch
df <- read_delim(filename, delim = ";",
col_types = cols(
Zuzanna = col_logical(),
Julia = col_logical(),
Lena = col_double(),
Maja = col_double(),
Hanna = col_double(),
Zofia = col_double(),
Amelia = col_double(),
Alicja = col_double(),
Aleksandra = col_logical(),
Natalia = col_logical(),
Oliwia = col_logical(),
Maria = col_logical(),
Wiktoria = col_double(),
Emilia = col_double(),
Antonina = col_double(),
Laura = col_double(),
Anna = col_double(),
Nadia = col_logical(),
Pola = col_logical(),
Liliana = col_logical(),
Nikola = col_logical(),
Gabriela = col_logical()
))
df
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscalingplans_operations.R
\name{autoscalingplans_describe_scaling_plans}
\alias{autoscalingplans_describe_scaling_plans}
\title{Describes one or more of your scaling plans}
\usage{
autoscalingplans_describe_scaling_plans(
ScalingPlanNames = NULL,
ScalingPlanVersion = NULL,
ApplicationSources = NULL,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{ScalingPlanNames}{The names of the scaling plans (up to 10). If you specify application
sources, you cannot specify scaling plan names.}
\item{ScalingPlanVersion}{The version number of the scaling plan. Currently, the only valid value
is \code{1}.
If you specify a scaling plan version, you must also specify a scaling
plan name.}
\item{ApplicationSources}{The sources for the applications (up to 10). If you specify scaling plan
names, you cannot specify application sources.}
\item{MaxResults}{The maximum number of scalable resources to return. This value can be
between 1 and 50. The default value is 50.}
\item{NextToken}{The token for the next set of results.}
}
\description{
Describes one or more of your scaling plans.
See \url{https://www.paws-r-sdk.com/docs/autoscalingplans_describe_scaling_plans/} for full documentation.
}
\keyword{internal}
| /cran/paws.management/man/autoscalingplans_describe_scaling_plans.Rd | permissive | paws-r/paws | R | false | true | 1,313 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscalingplans_operations.R
\name{autoscalingplans_describe_scaling_plans}
\alias{autoscalingplans_describe_scaling_plans}
\title{Describes one or more of your scaling plans}
\usage{
autoscalingplans_describe_scaling_plans(
ScalingPlanNames = NULL,
ScalingPlanVersion = NULL,
ApplicationSources = NULL,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{ScalingPlanNames}{The names of the scaling plans (up to 10). If you specify application
sources, you cannot specify scaling plan names.}
\item{ScalingPlanVersion}{The version number of the scaling plan. Currently, the only valid value
is \code{1}.
If you specify a scaling plan version, you must also specify a scaling
plan name.}
\item{ApplicationSources}{The sources for the applications (up to 10). If you specify scaling plan
names, you cannot specify application sources.}
\item{MaxResults}{The maximum number of scalable resources to return. This value can be
between 1 and 50. The default value is 50.}
\item{NextToken}{The token for the next set of results.}
}
\description{
Describes one or more of your scaling plans.
See \url{https://www.paws-r-sdk.com/docs/autoscalingplans_describe_scaling_plans/} for full documentation.
}
\keyword{internal}
|
# ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
# ANALYSIS OF FEH DATA: INDEX-VALUE #
# ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
# Data loading
data(FEH1000)
# Sites used in model development (pag.102 FEH Vol.3):
# area>0.5 km2
# digital catchment data available
# urbext<0.025\n
urbext <- cd[,"urbext1990"]
area <- cd[,"dtm_area"]
cd732 <- cd[(!is.nan(cd[,"dtm_area"]))&(urbext<0.025)&(area>0.5),] # vs 687 - 728 of FEH
fac <- factor(am[,"number"],levels=cd732[,"number"])
am732 <- am[!is.na(fac),]
nlevels(as.factor(am732[,"number"]))
# Index-flood = median
QMED <- tapply(am732[,4],am732[,1],median)
lnQMED <- log(QMED)
# Catchment descriptors (fig. 13.1 pag. 104)
lnAREA <- log(cd732[,"dtm_area"])
lnDPLBAR <- log(cd732[,"dplbar"])
lnSPRHOST <- log(cd732[,"sprhost"])
lnBFIHOST <- log(cd732[,"bfihost"])
lnSAAR <- log(cd732[,"saar"])
lnRMED1 <- log(cd732[,"rmed_1d"])
#lnNWET <- log(cd732[,""])
lnALTBAR <- log(cd732[,"altbar"])
lnFARL <- log(cd732[,"farl"])
M <- data.frame(cbind(lnQMED,lnAREA,lnDPLBAR,lnSPRHOST,lnBFIHOST,lnSAAR,lnRMED1,lnALTBAR,lnFARL))
print(cor(M))
plot(M,pch=".",cex=2)
par(ask = interactive())
# Additional variables (pag. 105):
RESHOST <- cd732[,"bfihost"] + 1.30*(cd732[,"sprhost"]/100)-0.987
lnAREAsq <- lnAREA^2
lnSAARsq <- lnSAAR^2
M <- data.frame(cbind(M,RESHOST,lnAREAsq,lnSAARsq))
# Ordinary Least Square models:
# create a function using the 'leaps' function of package 'subselect'
# to perform all-possible-regressions:
# bestregressions <- function(dip,ind) {
# Y <- as.numeric(dip)
# X <- ind
# Sy <- var(Y)
# Sx <- var(X)
# Sxy <- var(X,Y)
# Dm.mat <- Sx
# Dm.H <- Sxy %*% t(Sxy)/Sy
# require(subselect)
# Dm.leaps <- leaps(Dm.mat, kmin=1, kmax=6, H=Dm.H, r=1, nsol=3)
# Dm.leaps
# for(i in 6:1) {for(j in 1:3) {print(colnames(X)[Dm.leaps$subsets[j,c(1:6),i]])}}
# }
# bestregressions(M[,1],M[,-1])
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL" "RESHOST" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnBFIHOST" "lnSAAR" "lnFARL" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnRMED1" "lnFARL" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL" "RESHOST"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "RESHOST" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "RESHOST"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR"
# [1] "lnAREA" "lnSPRHOST" "lnSAARsq"
# [1] "lnAREA" "lnBFIHOST" "lnSAAR"
# [1] "lnAREA" "lnSAAR"
# [1] "lnAREA" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST"
# [1] "lnAREAsq"
# [1] "lnAREA"
# [1] "lnDPLBAR"
# Ordinary Least Square models (graphics and statistics):
graphics.lm <- function (regr) {
par(mfrow=c(2,2), cex=.7)
plot(regr$fitted.values,regr$residuals,pch=".",cex=3,xlab="lnQMED Fitted",ylab="lnQMED Residuals")
abline(0,0,lty=3)
normplot(regr$residuals,pch=".",cex=3,xlab="lnQMED Residuals")
plot(regr$fitted.values,lnQMED,pch=".",cex=3,xlab="lnQMED Originals",ylab="lnQMED Fitted")
abline(0,1,lty=3)
intervals <- predinterval.lm(regr)
intervals <- intervals[order(intervals[,1]),]
lines(intervals[,c(1,2)],lty=2)
lines(intervals[,c(1,3)],lty=2)
Rsq <- signif(R2.lm(regr),3)
rmse <- signif(RMSE.lm(regr),3)
rmsep <- signif(RMSEP(lnQMED,regr$fitted.values)*100,3)
mtext(paste("R2 = ",Rsq),3,-1.5,adj=0.05,cex=.7)
mtext(paste("RMSE = ",rmse),1,-2.5,adj=0.95,cex=.7)
mtext(paste("RMSEP = ",rmsep,"%"),1,-1.5,adj=0.95,cex=.7)
plot(exp(regr$fitted.values),exp(lnQMED),pch=".",cex=3,xlab="QMED Originals",ylab="QMED Fitted")
abline(0,1,lty=3)
lines(exp(intervals[,c(1,2)]),lty=2)
lines(exp(intervals[,c(1,3)]),lty=2)
Rsq <- signif(R2(exp(lnQMED),exp(regr$fitted.values)),3)
rmse <- signif(RMSE(exp(lnQMED),exp(regr$fitted.values)),3)
rmsep <- signif(RMSEP(exp(lnQMED),exp(regr$fitted.values))*100,3)
mtext(paste("R2 = ",Rsq),3,-1.5,adj=0.05,cex=.7)
mtext(paste("RMSE = ",rmse),1,-2.5,adj=0.95,cex=.7)
mtext(paste("RMSEP = ",rmsep,"%"),1,-1.5,adj=0.95,cex=.7)
par(mfrow=c(1,1),cex=1)
title(main=paste(names(regr$coefficients)[-1], collapse=", "),cex.main=.7,font.main=1)
}
graphics.lm(lm(lnQMED ~ lnDPLBAR))
graphics.lm(lm(lnQMED ~ lnAREA))
graphics.lm(lm(lnQMED ~ lnAREAsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST))
graphics.lm(lm(lnQMED ~ lnAREA + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSAAR))
graphics.lm(lm(lnQMED ~ lnAREA + lnBFIHOST + lnSAAR))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + RESHOST))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + RESHOST + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL + RESHOST))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnRMED1 + lnFARL + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnBFIHOST + lnSAAR + lnFARL + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL + RESHOST + lnSAARsq))
# ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
# THE END #
# ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
| /demo/feh_floods01.R | no_license | cran/nsRFA | R | false | false | 6,084 | r | # ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
# ANALYSIS OF FEH DATA: INDEX-VALUE #
# ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
# Data loading
data(FEH1000)
# Sites used in model development (pag.102 FEH Vol.3):
# area>0.5 km2
# digital catchment data available
# urbext<0.025\n
urbext <- cd[,"urbext1990"]
area <- cd[,"dtm_area"]
cd732 <- cd[(!is.nan(cd[,"dtm_area"]))&(urbext<0.025)&(area>0.5),] # vs 687 - 728 of FEH
fac <- factor(am[,"number"],levels=cd732[,"number"])
am732 <- am[!is.na(fac),]
nlevels(as.factor(am732[,"number"]))
# Index-flood = median
QMED <- tapply(am732[,4],am732[,1],median)
lnQMED <- log(QMED)
# Catchment descriptors (fig. 13.1 pag. 104)
lnAREA <- log(cd732[,"dtm_area"])
lnDPLBAR <- log(cd732[,"dplbar"])
lnSPRHOST <- log(cd732[,"sprhost"])
lnBFIHOST <- log(cd732[,"bfihost"])
lnSAAR <- log(cd732[,"saar"])
lnRMED1 <- log(cd732[,"rmed_1d"])
#lnNWET <- log(cd732[,""])
lnALTBAR <- log(cd732[,"altbar"])
lnFARL <- log(cd732[,"farl"])
M <- data.frame(cbind(lnQMED,lnAREA,lnDPLBAR,lnSPRHOST,lnBFIHOST,lnSAAR,lnRMED1,lnALTBAR,lnFARL))
print(cor(M))
plot(M,pch=".",cex=2)
par(ask = interactive())
# Additional variables (pag. 105):
RESHOST <- cd732[,"bfihost"] + 1.30*(cd732[,"sprhost"]/100)-0.987
lnAREAsq <- lnAREA^2
lnSAARsq <- lnSAAR^2
M <- data.frame(cbind(M,RESHOST,lnAREAsq,lnSAARsq))
# Ordinary Least Square models:
# create a function using the 'leaps' function of package 'subselect'
# to perform all-possible-regressions:
# bestregressions <- function(dip,ind) {
# Y <- as.numeric(dip)
# X <- ind
# Sy <- var(Y)
# Sx <- var(X)
# Sxy <- var(X,Y)
# Dm.mat <- Sx
# Dm.H <- Sxy %*% t(Sxy)/Sy
# require(subselect)
# Dm.leaps <- leaps(Dm.mat, kmin=1, kmax=6, H=Dm.H, r=1, nsol=3)
# Dm.leaps
# for(i in 6:1) {for(j in 1:3) {print(colnames(X)[Dm.leaps$subsets[j,c(1:6),i]])}}
# }
# bestregressions(M[,1],M[,-1])
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL" "RESHOST" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnBFIHOST" "lnSAAR" "lnFARL" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnRMED1" "lnFARL" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL" "RESHOST"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "RESHOST" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "lnFARL"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR" "RESHOST"
# [1] "lnAREA" "lnSPRHOST" "lnSAAR"
# [1] "lnAREA" "lnSPRHOST" "lnSAARsq"
# [1] "lnAREA" "lnBFIHOST" "lnSAAR"
# [1] "lnAREA" "lnSAAR"
# [1] "lnAREA" "lnSAARsq"
# [1] "lnAREA" "lnSPRHOST"
# [1] "lnAREAsq"
# [1] "lnAREA"
# [1] "lnDPLBAR"
# Ordinary Least Square models (graphics and statistics):
graphics.lm <- function (regr) {
par(mfrow=c(2,2), cex=.7)
plot(regr$fitted.values,regr$residuals,pch=".",cex=3,xlab="lnQMED Fitted",ylab="lnQMED Residuals")
abline(0,0,lty=3)
normplot(regr$residuals,pch=".",cex=3,xlab="lnQMED Residuals")
plot(regr$fitted.values,lnQMED,pch=".",cex=3,xlab="lnQMED Originals",ylab="lnQMED Fitted")
abline(0,1,lty=3)
intervals <- predinterval.lm(regr)
intervals <- intervals[order(intervals[,1]),]
lines(intervals[,c(1,2)],lty=2)
lines(intervals[,c(1,3)],lty=2)
Rsq <- signif(R2.lm(regr),3)
rmse <- signif(RMSE.lm(regr),3)
rmsep <- signif(RMSEP(lnQMED,regr$fitted.values)*100,3)
mtext(paste("R2 = ",Rsq),3,-1.5,adj=0.05,cex=.7)
mtext(paste("RMSE = ",rmse),1,-2.5,adj=0.95,cex=.7)
mtext(paste("RMSEP = ",rmsep,"%"),1,-1.5,adj=0.95,cex=.7)
plot(exp(regr$fitted.values),exp(lnQMED),pch=".",cex=3,xlab="QMED Originals",ylab="QMED Fitted")
abline(0,1,lty=3)
lines(exp(intervals[,c(1,2)]),lty=2)
lines(exp(intervals[,c(1,3)]),lty=2)
Rsq <- signif(R2(exp(lnQMED),exp(regr$fitted.values)),3)
rmse <- signif(RMSE(exp(lnQMED),exp(regr$fitted.values)),3)
rmsep <- signif(RMSEP(exp(lnQMED),exp(regr$fitted.values))*100,3)
mtext(paste("R2 = ",Rsq),3,-1.5,adj=0.05,cex=.7)
mtext(paste("RMSE = ",rmse),1,-2.5,adj=0.95,cex=.7)
mtext(paste("RMSEP = ",rmsep,"%"),1,-1.5,adj=0.95,cex=.7)
par(mfrow=c(1,1),cex=1)
title(main=paste(names(regr$coefficients)[-1], collapse=", "),cex.main=.7,font.main=1)
}
graphics.lm(lm(lnQMED ~ lnDPLBAR))
graphics.lm(lm(lnQMED ~ lnAREA))
graphics.lm(lm(lnQMED ~ lnAREAsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST))
graphics.lm(lm(lnQMED ~ lnAREA + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSAAR))
graphics.lm(lm(lnQMED ~ lnAREA + lnBFIHOST + lnSAAR))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + RESHOST))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + RESHOST + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL + RESHOST))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnRMED1 + lnFARL + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnBFIHOST + lnSAAR + lnFARL + lnSAARsq))
graphics.lm(lm(lnQMED ~ lnAREA + lnSPRHOST + lnSAAR + lnFARL + RESHOST + lnSAARsq))
# ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
# THE END #
# ------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------- #
|
library(argparser)
library(TopmedPipeline)
library(Biobase)
library(SNPRelate)
library(readr)
library(dplyr)
library(RColorBrewer)
library(ggplot2)
sessionInfo()
argp <- arg_parser("Pedigree check")
argp <- add_argument(argp, "config", help="path to config file")
argv <- parse_args(argp)
config <- readConfig(argv$config)
required <- c("phenotype_file",
"exp_rel_file",
"kinship_file")
optional <- c("subjectID"="submitted_subject_id",
"kinship_method"="king_ibdseg",
"out_file"="kinship_obsrel.RData",
"out_plot"="kinship.pdf",
"sample_include_file"=NA)
config <- setConfigDefaults(config, required, optional)
print(config)
annot <- getobj(config["phenotype_file"])
if (is(annot, "AnnotatedDataFrame")) {
annot <- pData(annot)
}
annot <- annot %>%
select(sample.id, Individ=!!unname(config["subjectID"]))
if (!is.na(config["sample_include_file"])) {
ids <- getobj(config["sample_include_file"])
annot <- filter(annot, sample.id %in% ids)
} else {
ids <- annot$sample.id
}
## read ibd file
kin.type <- tolower(config["kinship_method"])
if (kin.type == "pcrelate") {
pcr <- getobj(config["kinship_file"])
ibd <- pcr$kinBtwn %>%
filter(ID1 %in% ids & ID2 %in% ids) %>%
rename(kinship=kin) %>%
select(ID1, ID2, k0, kinship) %>%
mutate(obs.rel=GWASTools::ibdAssignRelatednessKing(k0, kinship, cut.ibs0.err=0.1))
xvar <- "k0"
rm(pcr)
} else if (kin.type == "king" ) {
king <- getobj(config["kinship_file"])
ibd <- snpgdsIBDSelection(king, samp.sel=which(king$sample.id %in% ids)) %>%
mutate(obs.rel=GWASTools::ibdAssignRelatednessKing(IBS0, kinship))
xvar <- "IBS0"
rm(king)
} else if (kin.type == "king_ibdseg") {
ibd <- read_tsv(config["kinship_file"], col_types="-c-c--nnnc") %>%
filter(ID1 %in% ids & ID2 %in% ids) %>%
mutate(IBS0=(1 - IBD1Seg - IBD2Seg), kinship=0.5*PropIBD) %>%
mutate(obs.rel=recode(InfType, "Dup/MZ" = "Dup", "2nd" = "Deg2", "3rd" = "Deg3", "4th" = "U", "UN" = "U")) %>%
select(ID1, ID2, IBS0, kinship, obs.rel)
xvar <- "IBS0"
}
nrow(ibd)
## expected relatives
rel <- getobj(config["exp_rel_file"])
rel <- rel %>%
mutate(pair=GWASTools::pasteSorted(Individ1, Individ2)) %>%
select(one_of("pair", "Individ1", "Individ2", "family", "relation", "exp.rel", "MZtwinID"))
ibd <- select(ibd, "ID1", "ID2", !!xvar, "kinship", "obs.rel") %>%
left_join(annot, by=c(ID1="sample.id")) %>%
rename(Individ1=Individ) %>%
left_join(annot, by=c(ID2="sample.id")) %>%
rename(Individ2=Individ) %>%
mutate(pair=GWASTools::pasteSorted(Individ1, Individ2)) %>%
left_join(select(rel, -starts_with("Individ")), by="pair")
unobs <- rel %>%
inner_join(annot, by=c(Individ1="Individ")) %>%
rename(ID1=sample.id) %>%
inner_join(annot, by=c(Individ2="Individ")) %>%
rename(ID2=sample.id) %>%
filter(!(pair %in% ibd$pair)) %>%
mutate(obs.rel="U")
ibd <- bind_rows(ibd, unobs) %>%
select(-pair) %>%
mutate(exp.rel=ifelse(is.na(exp.rel), "U", exp.rel),
exp.rel=ifelse(Individ1 == Individ2, "Dup", exp.rel)) %>%
filter(!(exp.rel == "U" & obs.rel == "U"))
nrow(ibd)
save(ibd, file=config["out_file"])
table(ibd$exp.rel, ibd$obs.rel)
## plot unexpected relatives
ibd <- mutate(ibd, unexp=ifelse(exp.rel == obs.rel, "expected", "unexpected"))
rels <- c("Dup", "PO", "FS", "Deg1", "Deg2", "Deg3", "Q", "U")
cols <- c(brewer.pal(length(rels)-1, "Dark2")[c(1, 2, 3, 6, 5, 4, 7)], "black")
cmap <- setNames(cols, rels)
theme_set(theme_bw() + theme(legend.position=c(1, 1), legend.justification=c(1,1), legend.background = element_rect(colour = "black")))
p <- ggplot(ibd, aes_string(xvar, "kinship", color="exp.rel")) + facet_wrap(~unexp) +
geom_hline(yintercept=2^(-seq(3,9,2)/2), linetype='dashed', color="grey") +
geom_point(alpha=0.7) +
scale_color_manual(values=cmap, breaks=names(cmap), na.value="grey30") +
guides(colour=guide_legend(override.aes=list(alpha=1))) +
ylab("kinship estimate")
ggsave(config["out_plot"], plot=p, width=12, height=6)
| /R/pedigree_check.R | no_license | UW-GAC/analysis_pipeline | R | false | false | 4,167 | r | library(argparser)
library(TopmedPipeline)
library(Biobase)
library(SNPRelate)
library(readr)
library(dplyr)
library(RColorBrewer)
library(ggplot2)
sessionInfo()
argp <- arg_parser("Pedigree check")
argp <- add_argument(argp, "config", help="path to config file")
argv <- parse_args(argp)
config <- readConfig(argv$config)
required <- c("phenotype_file",
"exp_rel_file",
"kinship_file")
optional <- c("subjectID"="submitted_subject_id",
"kinship_method"="king_ibdseg",
"out_file"="kinship_obsrel.RData",
"out_plot"="kinship.pdf",
"sample_include_file"=NA)
config <- setConfigDefaults(config, required, optional)
print(config)
annot <- getobj(config["phenotype_file"])
if (is(annot, "AnnotatedDataFrame")) {
annot <- pData(annot)
}
annot <- annot %>%
select(sample.id, Individ=!!unname(config["subjectID"]))
if (!is.na(config["sample_include_file"])) {
ids <- getobj(config["sample_include_file"])
annot <- filter(annot, sample.id %in% ids)
} else {
ids <- annot$sample.id
}
## read ibd file
kin.type <- tolower(config["kinship_method"])
if (kin.type == "pcrelate") {
pcr <- getobj(config["kinship_file"])
ibd <- pcr$kinBtwn %>%
filter(ID1 %in% ids & ID2 %in% ids) %>%
rename(kinship=kin) %>%
select(ID1, ID2, k0, kinship) %>%
mutate(obs.rel=GWASTools::ibdAssignRelatednessKing(k0, kinship, cut.ibs0.err=0.1))
xvar <- "k0"
rm(pcr)
} else if (kin.type == "king" ) {
king <- getobj(config["kinship_file"])
ibd <- snpgdsIBDSelection(king, samp.sel=which(king$sample.id %in% ids)) %>%
mutate(obs.rel=GWASTools::ibdAssignRelatednessKing(IBS0, kinship))
xvar <- "IBS0"
rm(king)
} else if (kin.type == "king_ibdseg") {
ibd <- read_tsv(config["kinship_file"], col_types="-c-c--nnnc") %>%
filter(ID1 %in% ids & ID2 %in% ids) %>%
mutate(IBS0=(1 - IBD1Seg - IBD2Seg), kinship=0.5*PropIBD) %>%
mutate(obs.rel=recode(InfType, "Dup/MZ" = "Dup", "2nd" = "Deg2", "3rd" = "Deg3", "4th" = "U", "UN" = "U")) %>%
select(ID1, ID2, IBS0, kinship, obs.rel)
xvar <- "IBS0"
}
nrow(ibd)
## expected relatives
rel <- getobj(config["exp_rel_file"])
rel <- rel %>%
mutate(pair=GWASTools::pasteSorted(Individ1, Individ2)) %>%
select(one_of("pair", "Individ1", "Individ2", "family", "relation", "exp.rel", "MZtwinID"))
ibd <- select(ibd, "ID1", "ID2", !!xvar, "kinship", "obs.rel") %>%
left_join(annot, by=c(ID1="sample.id")) %>%
rename(Individ1=Individ) %>%
left_join(annot, by=c(ID2="sample.id")) %>%
rename(Individ2=Individ) %>%
mutate(pair=GWASTools::pasteSorted(Individ1, Individ2)) %>%
left_join(select(rel, -starts_with("Individ")), by="pair")
unobs <- rel %>%
inner_join(annot, by=c(Individ1="Individ")) %>%
rename(ID1=sample.id) %>%
inner_join(annot, by=c(Individ2="Individ")) %>%
rename(ID2=sample.id) %>%
filter(!(pair %in% ibd$pair)) %>%
mutate(obs.rel="U")
ibd <- bind_rows(ibd, unobs) %>%
select(-pair) %>%
mutate(exp.rel=ifelse(is.na(exp.rel), "U", exp.rel),
exp.rel=ifelse(Individ1 == Individ2, "Dup", exp.rel)) %>%
filter(!(exp.rel == "U" & obs.rel == "U"))
nrow(ibd)
save(ibd, file=config["out_file"])
table(ibd$exp.rel, ibd$obs.rel)
## plot unexpected relatives
ibd <- mutate(ibd, unexp=ifelse(exp.rel == obs.rel, "expected", "unexpected"))
rels <- c("Dup", "PO", "FS", "Deg1", "Deg2", "Deg3", "Q", "U")
cols <- c(brewer.pal(length(rels)-1, "Dark2")[c(1, 2, 3, 6, 5, 4, 7)], "black")
cmap <- setNames(cols, rels)
theme_set(theme_bw() + theme(legend.position=c(1, 1), legend.justification=c(1,1), legend.background = element_rect(colour = "black")))
p <- ggplot(ibd, aes_string(xvar, "kinship", color="exp.rel")) + facet_wrap(~unexp) +
geom_hline(yintercept=2^(-seq(3,9,2)/2), linetype='dashed', color="grey") +
geom_point(alpha=0.7) +
scale_color_manual(values=cmap, breaks=names(cmap), na.value="grey30") +
guides(colour=guide_legend(override.aes=list(alpha=1))) +
ylab("kinship estimate")
ggsave(config["out_plot"], plot=p, width=12, height=6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manual R codes.r
\name{Vis_OSR_A.fra}
\alias{Vis_OSR_A.fra}
\title{Graph of OSRs and A-arm Selection Fraction Based on homogeneous E.st and T.st.}
\usage{
Vis_OSR_A.fra(d, title)
}
\arguments{
\item{d}{data of matrix with two colums, separately store the values based on the E.st and T.st}
\item{title}{the name of the plot.}
}
\value{
the 2-D plot
}
\description{
Visulize the OSRs and A-arm selection Fraction of homogeneous E.st and homogeneous T.st when N patients treated in a plot.
}
\examples{
library(combinat)
N <- 20
out<-serialTables(N)
ABSF<-sapply(out$Z, unlist)
A.fraction<-apply(ABSF,1,A.frac)
A.fraction[1] <- 0.5
Success.fraction <-success.fraction<-apply(ABSF,1,success.frac)
Success.fraction[1] <- 0.5
E.st_prob.A<-apply(ABSF,1,E.st_utinity)
#Probability to select the A-arm based on utinity function of E.st: E.st_utinity()
a<-0.8
#ture success rate of A-arm treatment
b<-0.6
#ture success rate of B-arm treatment
E.table.probability<-table.prob(relist(E.st_prob.A,out$sk),p=c(a,b))
#exact probability per 2X2 table based on utinity function of E.st: E.st_utinity()
w=0.5
#homogeneous optimistic attitude with w=0.5
T.st_prob.A<-apply(ABSF,1,T.st_utinity,w=0.5)
#Probability to select the A-arm based on utinity function of T.st: T.st_utinity()
a<-0.8
#ture success rate of A-arm treatment
b<-0.6
#ture success rate of B-arm treatment
T.table.probability<-table.prob(relist(T.st_prob.A,out$sk),p=c(a,b))
#exact probability per 2X2 table based on utinity function of T.st: T.st_utinity()
#Comparison of A-arm Fraction and OSRs of homogeneous E.st and T.st
E.table.prob.v<- unlist(E.table.probability)
E.mean.selectionA.per.n<-O_A_frac(fv=A.fraction,pv=E.table.prob.v,sk=out$sk)
E.mean.success.per.n<-OSR(xv=success.fraction,pv=E.table.prob.v,sk=out$sk)
T.table.prob.v<- unlist(T.table.probability)
T.mean.selectionA.per.n<-O_A_frac(fv=A.fraction,pv=T.table.prob.v,sk=out$sk)
T.mean.success.per.n<-OSR(xv=success.fraction,pv=T.table.prob.v,sk=out$sk)
par(mar=c(4,4,3,1),mfrow=c(1,2))
Vis_OSR_A.fra(d=cbind(E.mean.selectionA.per.n,T.mean.selectionA.per.n),title="A-arm Fraction of E.st vs T.st")
Vis_OSR_A.fra(d=cbind(E.mean.success.per.n,T.mean.success.per.n),title="OSRs of E.st vs T.st")
}
\keyword{NA}
| /man/Vis_OSR_A.fra.Rd | no_license | ryamada22/SelfDecABP | R | false | true | 2,316 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manual R codes.r
\name{Vis_OSR_A.fra}
\alias{Vis_OSR_A.fra}
\title{Graph of OSRs and A-arm Selection Fraction Based on homogeneous E.st and T.st.}
\usage{
Vis_OSR_A.fra(d, title)
}
\arguments{
\item{d}{data of matrix with two colums, separately store the values based on the E.st and T.st}
\item{title}{the name of the plot.}
}
\value{
the 2-D plot
}
\description{
Visulize the OSRs and A-arm selection Fraction of homogeneous E.st and homogeneous T.st when N patients treated in a plot.
}
\examples{
library(combinat)
N <- 20
out<-serialTables(N)
ABSF<-sapply(out$Z, unlist)
A.fraction<-apply(ABSF,1,A.frac)
A.fraction[1] <- 0.5
Success.fraction <-success.fraction<-apply(ABSF,1,success.frac)
Success.fraction[1] <- 0.5
E.st_prob.A<-apply(ABSF,1,E.st_utinity)
#Probability to select the A-arm based on utinity function of E.st: E.st_utinity()
a<-0.8
#ture success rate of A-arm treatment
b<-0.6
#ture success rate of B-arm treatment
E.table.probability<-table.prob(relist(E.st_prob.A,out$sk),p=c(a,b))
#exact probability per 2X2 table based on utinity function of E.st: E.st_utinity()
w=0.5
#homogeneous optimistic attitude with w=0.5
T.st_prob.A<-apply(ABSF,1,T.st_utinity,w=0.5)
#Probability to select the A-arm based on utinity function of T.st: T.st_utinity()
a<-0.8
#ture success rate of A-arm treatment
b<-0.6
#ture success rate of B-arm treatment
T.table.probability<-table.prob(relist(T.st_prob.A,out$sk),p=c(a,b))
#exact probability per 2X2 table based on utinity function of T.st: T.st_utinity()
#Comparison of A-arm Fraction and OSRs of homogeneous E.st and T.st
E.table.prob.v<- unlist(E.table.probability)
E.mean.selectionA.per.n<-O_A_frac(fv=A.fraction,pv=E.table.prob.v,sk=out$sk)
E.mean.success.per.n<-OSR(xv=success.fraction,pv=E.table.prob.v,sk=out$sk)
T.table.prob.v<- unlist(T.table.probability)
T.mean.selectionA.per.n<-O_A_frac(fv=A.fraction,pv=T.table.prob.v,sk=out$sk)
T.mean.success.per.n<-OSR(xv=success.fraction,pv=T.table.prob.v,sk=out$sk)
par(mar=c(4,4,3,1),mfrow=c(1,2))
Vis_OSR_A.fra(d=cbind(E.mean.selectionA.per.n,T.mean.selectionA.per.n),title="A-arm Fraction of E.st vs T.st")
Vis_OSR_A.fra(d=cbind(E.mean.success.per.n,T.mean.success.per.n),title="OSRs of E.st vs T.st")
}
\keyword{NA}
|
library(testthat)
library(kmer)
test_check("kmer")
| /kmer/tests/testthat.R | no_license | akhikolla/InformationHouse | R | false | false | 51 | r | library(testthat)
library(kmer)
test_check("kmer")
|
xp_to_fasta_prot <- function(data, nameFile){
for(i in seq_along(data)){
xp <- data[i]
imp_aa_seq <- entrez_fetch(db="protein", id=xp, rettype='fasta')
write(imp_aa_seq, paste0(nameFile, ".fasta"), append =TRUE)
}
}
# nameFile in character string format "nameFile"
# data is a vector which elements are the accesion name of proteins
| /XP_to_fasta_prot.R | no_license | RocioCarmonaMolero/ScriptProteinas | R | false | false | 353 | r | xp_to_fasta_prot <- function(data, nameFile){
for(i in seq_along(data)){
xp <- data[i]
imp_aa_seq <- entrez_fetch(db="protein", id=xp, rettype='fasta')
write(imp_aa_seq, paste0(nameFile, ".fasta"), append =TRUE)
}
}
# nameFile in character string format "nameFile"
# data is a vector which elements are the accesion name of proteins
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/emapplot_cluster.R
\name{emapplot_cluster}
\alias{emapplot_cluster}
\alias{emapplot_cluster,enrichResult-method}
\alias{emapplot_cluster,gseaResult-method}
\alias{emapplot_cluster,compareClusterResult-method}
\alias{emapplot_cluster.enrichResult}
\alias{emapplot_cluster.compareClusterResult}
\title{emapplot_cluster}
\usage{
emapplot_cluster(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
\S4method{emapplot_cluster}{enrichResult}(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
\S4method{emapplot_cluster}{gseaResult}(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
\S4method{emapplot_cluster}{compareClusterResult}(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
emapplot_cluster.enrichResult(
x,
showCategory = 30,
color = "p.adjust",
cex_line = 1,
with_edge = TRUE,
nWords = 4,
nCluster = NULL,
split = NULL,
min_edge = 0.2,
cex_label_group = 1,
label_style = "shadowtext",
group_legend = FALSE,
cex_category = 1,
label_format = 30,
repel = FALSE,
shadowtext = TRUE,
...
)
emapplot_cluster.compareClusterResult(
x,
showCategory = 30,
color = "p.adjust",
cex_line = 1,
with_edge = TRUE,
nWords = 4,
nCluster = NULL,
split = NULL,
min_edge = 0.2,
cex_label_group = 1,
pie = "equal",
legend_n = 5,
cex_category = 1,
label_style = "shadowtext",
group_legend = FALSE,
label_format = 30,
repel = FALSE,
shadowtext = TRUE,
...
)
}
\arguments{
\item{x}{Enrichment result.}
\item{showCategory}{A number or a vector of terms. If it is a number,
the first n terms will be displayed. If it is a vector of terms,
the selected terms will be displayed.}
\item{color}{Variable that used to color enriched terms, e.g. 'pvalue',
'p.adjust' or 'qvalue'.}
\item{label_format}{a numeric value sets wrap length, alternatively a
custom function to format axis labels.}
\item{...}{Additional parameters used to set the position of the group label.
When the parameter repel is set to TRUE, additional parameters will take effect.
additional parameters can refer the following parameters.
\itemize{
\item \code{force} Force of repulsion between overlapping text labels. Defaults to 1.
\item \code{nudge_x, nudge_y} Horizontal and vertical adjustments to nudge
the starting position of each text label.
\item \code{direction} "both", "x", or "y" – direction in which to adjust position of labels.
}}
\item{cex_line}{Scale of line width}
\item{with_edge}{Logical, if TRUE (the default), draw the edges of the network diagram.}
\item{nWords}{Numeric, the number of words in the cluster tags, the default value is 4.}
\item{nCluster}{Numeric, the number of clusters,
the default value is square root of the number of nodes.}
\item{split}{Separate result by 'category' variable}
\item{min_edge}{The minimum similarity threshold for whether
two nodes are connected, should between 0 and 1, default value is 0.2.}
\item{cex_label_group}{Numeric, scale of group labels size, the default value is 1.}
\item{label_style}{style of group label, one of "shadowtext" and "ggforce".}
\item{group_legend}{Logical, if TRUE, the grouping legend will be displayed.
The default is FALSE}
\item{cex_category}{Numeric, indicating the size by which plotting category
nodes should be scaled relative to the default.}
\item{repel}{whether to correct the position of the label. Defaults to FALSE.}
\item{shadowtext}{a logical value, whether to use shadow font. Defaults to TRUE.}
\item{pie}{Proportion of clusters in the pie chart, one of
'equal' (default) and 'Count'.}
\item{legend_n}{Number of circle in legend, the default value is 5.}
}
\value{
ggplot object
}
\description{
Functional grouping network diagram for enrichment result of
over-representation test or gene set enrichment analysis
}
\details{
This function visualizes gene sets as a grouped network (i.e. enrichment map).
Gene sets with high similarity tend to cluster together, making it easier
for interpretation. It adds clustering on the basis of emapplot's network graph.
For more details, please refer to the documentation of \link{emapplot}.
}
\examples{
\dontrun{
library(clusterProfiler)
library(org.Hs.eg.db)
library(enrichplot)
library(GOSemSim)
library(DOSE)
data(geneList)
gene <- names(geneList)[abs(geneList) > 2]
ego <- enrichGO(gene = gene,
universe = names(geneList),
OrgDb = org.Hs.eg.db,
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05,
readable = TRUE)
d <- godata('org.Hs.eg.db', ont="BP")
ego2 <- pairwise_termsim(ego, method = "Wang", semData = d)
emapplot_cluster(ego2, showCategory = 80)
}
}
| /man/emapplot_cluster.Rd | no_license | EricChLee/enrichplot | R | false | true | 4,958 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/emapplot_cluster.R
\name{emapplot_cluster}
\alias{emapplot_cluster}
\alias{emapplot_cluster,enrichResult-method}
\alias{emapplot_cluster,gseaResult-method}
\alias{emapplot_cluster,compareClusterResult-method}
\alias{emapplot_cluster.enrichResult}
\alias{emapplot_cluster.compareClusterResult}
\title{emapplot_cluster}
\usage{
emapplot_cluster(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
\S4method{emapplot_cluster}{enrichResult}(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
\S4method{emapplot_cluster}{gseaResult}(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
\S4method{emapplot_cluster}{compareClusterResult}(
x,
showCategory = 30,
color = "p.adjust",
label_format = 30,
...
)
emapplot_cluster.enrichResult(
x,
showCategory = 30,
color = "p.adjust",
cex_line = 1,
with_edge = TRUE,
nWords = 4,
nCluster = NULL,
split = NULL,
min_edge = 0.2,
cex_label_group = 1,
label_style = "shadowtext",
group_legend = FALSE,
cex_category = 1,
label_format = 30,
repel = FALSE,
shadowtext = TRUE,
...
)
emapplot_cluster.compareClusterResult(
x,
showCategory = 30,
color = "p.adjust",
cex_line = 1,
with_edge = TRUE,
nWords = 4,
nCluster = NULL,
split = NULL,
min_edge = 0.2,
cex_label_group = 1,
pie = "equal",
legend_n = 5,
cex_category = 1,
label_style = "shadowtext",
group_legend = FALSE,
label_format = 30,
repel = FALSE,
shadowtext = TRUE,
...
)
}
\arguments{
\item{x}{Enrichment result.}
\item{showCategory}{A number or a vector of terms. If it is a number,
the first n terms will be displayed. If it is a vector of terms,
the selected terms will be displayed.}
\item{color}{Variable that used to color enriched terms, e.g. 'pvalue',
'p.adjust' or 'qvalue'.}
\item{label_format}{a numeric value sets wrap length, alternatively a
custom function to format axis labels.}
\item{...}{Additional parameters used to set the position of the group label.
When the parameter repel is set to TRUE, additional parameters will take effect.
additional parameters can refer the following parameters.
\itemize{
\item \code{force} Force of repulsion between overlapping text labels. Defaults to 1.
\item \code{nudge_x, nudge_y} Horizontal and vertical adjustments to nudge
the starting position of each text label.
\item \code{direction} "both", "x", or "y" – direction in which to adjust position of labels.
}}
\item{cex_line}{Scale of line width}
\item{with_edge}{Logical, if TRUE (the default), draw the edges of the network diagram.}
\item{nWords}{Numeric, the number of words in the cluster tags, the default value is 4.}
\item{nCluster}{Numeric, the number of clusters,
the default value is square root of the number of nodes.}
\item{split}{Separate result by 'category' variable}
\item{min_edge}{The minimum similarity threshold for whether
two nodes are connected, should between 0 and 1, default value is 0.2.}
\item{cex_label_group}{Numeric, scale of group labels size, the default value is 1.}
\item{label_style}{style of group label, one of "shadowtext" and "ggforce".}
\item{group_legend}{Logical, if TRUE, the grouping legend will be displayed.
The default is FALSE}
\item{cex_category}{Numeric, indicating the size by which plotting category
nodes should be scaled relative to the default.}
\item{repel}{whether to correct the position of the label. Defaults to FALSE.}
\item{shadowtext}{a logical value, whether to use shadow font. Defaults to TRUE.}
\item{pie}{Proportion of clusters in the pie chart, one of
'equal' (default) and 'Count'.}
\item{legend_n}{Number of circle in legend, the default value is 5.}
}
\value{
ggplot object
}
\description{
Functional grouping network diagram for enrichment result of
over-representation test or gene set enrichment analysis
}
\details{
This function visualizes gene sets as a grouped network (i.e. enrichment map).
Gene sets with high similarity tend to cluster together, making it easier
for interpretation. It adds clustering on the basis of emapplot's network graph.
For more details, please refer to the documentation of \link{emapplot}.
}
\examples{
\dontrun{
library(clusterProfiler)
library(org.Hs.eg.db)
library(enrichplot)
library(GOSemSim)
library(DOSE)
data(geneList)
gene <- names(geneList)[abs(geneList) > 2]
ego <- enrichGO(gene = gene,
universe = names(geneList),
OrgDb = org.Hs.eg.db,
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05,
readable = TRUE)
d <- godata('org.Hs.eg.db', ont="BP")
ego2 <- pairwise_termsim(ego, method = "Wang", semData = d)
emapplot_cluster(ego2, showCategory = 80)
}
}
|
library(rbokeh)
suppressMessages(library(dplyr))
bike <- read.csv("~/Deere/data/Bike_Share_Stations.csv")
bike_small <- bike %>% select(LATITUDE, LONGITUDE, NUMBER_OF_DOCKS)
bike_small %>% head
gmap(lat = 39.768597, lng = -86.162682, zoom = 13,
width = 600, height = 800, map_type = "road_map") %>%
ly_points(LONGITUDE, LATITUDE, data = bike_small,
fill_alpha = 0.5, size = 1.25*NUMBER_OF_DOCKS, color = "black",
hover = c(NUMBER_OF_DOCKS)) | /2_htmlwidgets_Bike_Stations.R | no_license | philbowsher/Foundation-of-the-R-Workflow-workshop-2017-10-08 | R | false | false | 472 | r | library(rbokeh)
suppressMessages(library(dplyr))
bike <- read.csv("~/Deere/data/Bike_Share_Stations.csv")
bike_small <- bike %>% select(LATITUDE, LONGITUDE, NUMBER_OF_DOCKS)
bike_small %>% head
gmap(lat = 39.768597, lng = -86.162682, zoom = 13,
width = 600, height = 800, map_type = "road_map") %>%
ly_points(LONGITUDE, LATITUDE, data = bike_small,
fill_alpha = 0.5, size = 1.25*NUMBER_OF_DOCKS, color = "black",
hover = c(NUMBER_OF_DOCKS)) |
\name{bowlerWktsRunsPlot}
\alias{bowlerWktsRunsPlot}
\title{
Compute and plot the runs conceded versus the wickets taken
}
\description{
This function creates boxplots on the runs conceded for wickets taken for the bowler
}
\usage{
bowlerWktsRunsPlot(file, name = "A Googly")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{
This is the <bowler>.csv file obtained with an initial getPlayerData()
}
\item{name}{
Name of the bowler
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
None
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{bowlerWktsFreqPercent}}
\code{\link{relativeBowlingER}}
\code{\link{relativeBowlingPerf}}
\code{\link{bowlerHistWickets}}
}
\examples{
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# Retrieve the file path of a data file installed with cricketr
pathToFile <- system.file("data", "kumble.csv", package = "cricketr")
bowlerWktsRunsPlot(pathToFile,"Anil Kumble")
# Note: This example uses the file kumble.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
# The general format is pkg-function(pathToFile,par1,...)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/bowlerWktsRunsPlot.Rd | no_license | kmaheshkulkarni/cricketr | R | false | false | 1,747 | rd | \name{bowlerWktsRunsPlot}
\alias{bowlerWktsRunsPlot}
\title{
Compute and plot the runs conceded versus the wickets taken
}
\description{
This function creates boxplots on the runs conceded for wickets taken for the bowler
}
\usage{
bowlerWktsRunsPlot(file, name = "A Googly")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{
This is the <bowler>.csv file obtained with an initial getPlayerData()
}
\item{name}{
Name of the bowler
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
None
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{bowlerWktsFreqPercent}}
\code{\link{relativeBowlingER}}
\code{\link{relativeBowlingPerf}}
\code{\link{bowlerHistWickets}}
}
\examples{
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# Retrieve the file path of a data file installed with cricketr
pathToFile <- system.file("data", "kumble.csv", package = "cricketr")
bowlerWktsRunsPlot(pathToFile,"Anil Kumble")
# Note: This example uses the file kumble.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
# The general format is pkg-function(pathToFile,par1,...)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# Batch examples
# Note: In some cases, you'll need to wait for a step to complete in your AWS
# account before you can successfully run the next step.
# To set up the Batch compute environment, get security group and subnet info
# for the default VPC.
default_vpc <- paws.ec2::describe_vpcs(
Filters = "isDefault=true"
)$Vpcs[[1]]
security_group <- paws.ec2::describe_security_groups(
Filters = sprintf("vpc-id=%s", default_vpc$VpcId),
GroupNames = "default"
)$SecurityGroups[[1]]
subnets <- paws.ec2::describe_subnets(
Filters = sprintf("vpc-id=%s", default_vpc$VpcId)
)$Subnets
#-------------------------------------------------------------------------------
# Set up an IAM role for Batch.
role_name <- "TestBatchServiceRole"
policy_arn <- "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
trust_policy <- list(
Version = "2012-10-17",
Statement = list(
list(
Effect = "Allow",
Principal = list(
Service = "batch.amazonaws.com"
),
Action = "sts:AssumeRole"
)
)
)
iam <- paws.iam::create_role(
RoleName = role_name,
AssumeRolePolicyDocument = jsonlite::toJSON(trust_policy, auto_unbox = TRUE)
)
paws.iam::attach_role_policy(
RoleName = role_name,
PolicyArn = policy_arn
)
#-------------------------------------------------------------------------------
# Set up a compute environment: the resources on which Batch jobs will run.
paws.batch::create_compute_environment(
type = "MANAGED",
computeEnvironmentName = "TestComputeEnvironment",
computeResources = list(
type = "EC2",
desiredvCpus = 1L,
ec2KeyPair = "default",
instanceRole = "ecsInstanceRole",
instanceTypes = "optimal",
maxvCpus = 128L,
minvCpus = 0L,
securityGroupIds = security_group$GroupId,
subnets = sapply(subnets, function(x) x$SubnetId)
),
serviceRole = iam$Role$Arn,
state = "ENABLED"
)
# Set up a job queue for the compute environment.
paws.batch::create_job_queue(
computeEnvironmentOrder = list(
list(
computeEnvironment = "TestComputeEnvironment",
order = 1L
)
),
jobQueueName = "TestJobQueue",
priority = 1L,
state = "ENABLED"
)
# Add an example job definition -- sleep 10 seconds.
job_def <- paws.batch::register_job_definition(
type = "container",
containerProperties = list(
command = list(
"sleep",
"10"
),
image = "busybox",
memory = 128L,
vcpus = 1L
),
jobDefinitionName = "sleep10"
)
# Submit a job.
paws.batch::submit_job(
jobDefinition = "sleep10",
jobName = "Example",
jobQueue = "TestJobQueue"
)
# List the submitted job(s).
paws.batch::list_jobs(
jobQueue = "TestJobQueue",
jobStatus = "SUBMITTED"
)
# Clean up. You may have to wait for some steps to complete.
paws.batch::deregister_job_definition(jobDefinition = job_def$jobDefinitionArn)
paws.batch::update_job_queue("TestJobQueue", state = "DISABLED")
paws.batch::delete_job_queue("TestJobQueue")
paws.batch::update_compute_environment("TestComputeEnvironment", state = "DISABLED")
paws.batch::delete_compute_environment(computeEnvironment = "TestComputeEnvironment")
paws.iam::detach_role_policy(role_name, policy_arn)
paws.iam::delete_role(role_name)
| /examples/batch.R | permissive | CR-Mercado/paws | R | false | false | 3,214 | r | # Batch examples
# Note: In some cases, you'll need to wait for a step to complete in your AWS
# account before you can successfully run the next step.
# To set up the Batch compute environment, get security group and subnet info
# for the default VPC.
default_vpc <- paws.ec2::describe_vpcs(
Filters = "isDefault=true"
)$Vpcs[[1]]
security_group <- paws.ec2::describe_security_groups(
Filters = sprintf("vpc-id=%s", default_vpc$VpcId),
GroupNames = "default"
)$SecurityGroups[[1]]
subnets <- paws.ec2::describe_subnets(
Filters = sprintf("vpc-id=%s", default_vpc$VpcId)
)$Subnets
#-------------------------------------------------------------------------------
# Set up an IAM role for Batch.
role_name <- "TestBatchServiceRole"
policy_arn <- "arn:aws:iam::aws:policy/service-role/AWSBatchServiceRole"
trust_policy <- list(
Version = "2012-10-17",
Statement = list(
list(
Effect = "Allow",
Principal = list(
Service = "batch.amazonaws.com"
),
Action = "sts:AssumeRole"
)
)
)
iam <- paws.iam::create_role(
RoleName = role_name,
AssumeRolePolicyDocument = jsonlite::toJSON(trust_policy, auto_unbox = TRUE)
)
paws.iam::attach_role_policy(
RoleName = role_name,
PolicyArn = policy_arn
)
#-------------------------------------------------------------------------------
# Set up a compute environment: the resources on which Batch jobs will run.
paws.batch::create_compute_environment(
type = "MANAGED",
computeEnvironmentName = "TestComputeEnvironment",
computeResources = list(
type = "EC2",
desiredvCpus = 1L,
ec2KeyPair = "default",
instanceRole = "ecsInstanceRole",
instanceTypes = "optimal",
maxvCpus = 128L,
minvCpus = 0L,
securityGroupIds = security_group$GroupId,
subnets = sapply(subnets, function(x) x$SubnetId)
),
serviceRole = iam$Role$Arn,
state = "ENABLED"
)
# Set up a job queue for the compute environment.
paws.batch::create_job_queue(
computeEnvironmentOrder = list(
list(
computeEnvironment = "TestComputeEnvironment",
order = 1L
)
),
jobQueueName = "TestJobQueue",
priority = 1L,
state = "ENABLED"
)
# Add an example job definition -- sleep 10 seconds.
job_def <- paws.batch::register_job_definition(
type = "container",
containerProperties = list(
command = list(
"sleep",
"10"
),
image = "busybox",
memory = 128L,
vcpus = 1L
),
jobDefinitionName = "sleep10"
)
# Submit a job.
paws.batch::submit_job(
jobDefinition = "sleep10",
jobName = "Example",
jobQueue = "TestJobQueue"
)
# List the submitted job(s).
paws.batch::list_jobs(
jobQueue = "TestJobQueue",
jobStatus = "SUBMITTED"
)
# Clean up. You may have to wait for some steps to complete.
paws.batch::deregister_job_definition(jobDefinition = job_def$jobDefinitionArn)
paws.batch::update_job_queue("TestJobQueue", state = "DISABLED")
paws.batch::delete_job_queue("TestJobQueue")
paws.batch::update_compute_environment("TestComputeEnvironment", state = "DISABLED")
paws.batch::delete_compute_environment(computeEnvironment = "TestComputeEnvironment")
paws.iam::detach_role_policy(role_name, policy_arn)
paws.iam::delete_role(role_name)
|
#' Unfortunately, the data we are interested in use several calendars.
#'
#' The 'standard' or 'gregorian' calendar
#' The 'proleptic gregorian' calendar
#' A '360_day' calendar with 12 months each of 30 days
#' A '365_day' or 'noleap' calendar with 12 months of
#' c(31,28,31,30,31,30,31,31,30,31,30,31) days.
#'
#' So we need custom date tools - we will store a date as an
#' ISO-8601 format string down to minutes: 'YYYY-MM-DD:HH:MM'
#' combined with a string describing the calendar in use.
#' Fortunately, we don't need to bother with time zones.
#' Negative years are not currently supported.
#'
#' @export
#' @param string Vector of date strings in format 'YYYY-MM-DD:HH:SS'
#' @param calendar One of 'gregorian', '360_day' and '365_day'
#' @Result A list containing two components
GSDF.time<-function(string,calendar) {
result<-list()
result$date<-string
result$calendar<-GSDF.time.check.calendar(calendar)
result<-GSDF.time.check.time(result)
return(result)
}
#' Check that the calendar is supported
#'
#' Can do gregorian, 360_day and 365_day calendars.
#'
#' 365_day may be called 'noleap'
#' gregorian may be called 'standard' or
#' 'proleptic_gregorian'.
#'
#' @param date A celendar string
#' @result One of 'gregorian', '360_day' and '365_day'
GSDF.time.check.calendar<-function(calendar) {
calendar<-tolower(calendar)
if(calendar=='standard' ||
calendar=='proleptic_gregorian') {
calendar<-'gregorian'
}
if(calendar=='noleap') {
calendar<-'365_day'
}
if(calendar != 'gregorian' &&
calendar != '360_day' &&
calendar != '365_day') {
stop(sprintf("Unsupported calendar %s",calendar))
}
return(calendar)
}
#' Check that dates are meaningful
#'
#' Check that each string date is valid
#' given the calendar.
#'
#'
#' @param date A GSDF time object
#' @result TRUE if all the contents are valid.
GSDF.time.check.time<-function(date) {
w<-which(!grepl("\\d\\d\\d\\d-\\d\\d-\\d\\d:\\d\\d:\\d\\d",
date$date))
if(length(w)==1) {
stop(sprintf("%s is not in format YYYY-MM-DD:HH:MM",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not in format YYYY-MM-DD:HH:MM",
date$date[w[1]],length(w)-1))
}
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d):(\\d\\d):(\\d\\d)")
years <- as.integer(m[,2])
months <- as.integer(m[,3])
days <- as.integer(m[,4])
hours <- as.integer(m[,5])
minutes <- as.integer(m[,6])
date$date<-sprintf("%04d-%02d-%02d:%02d:%02d",
years,months,days,hours,minutes)
if(date$calendar=='gregorian') {
l<-lubridate::ymd_hms(sprintf("%s:00",date$date),quiet=TRUE)
w<-which(is.na(l))
if(length(w)==1) {
stop(sprintf("%s is not a valid gregorian date",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not valid gregorian dates",
date$date[w[1]],length(w)-1))
}
}
if(date$calendar=='360_day') {
w<-which(months<1 | months>12 |
days<1 | days>30 |
hours>23 |
minutes>59 )
if(length(w)==1) {
stop(sprintf("%s is not a valid 360_day date",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not valid 360_day dates",
date$date[w[1]],length(w)-1))
}
}
if(date$calendar=='365_day') {
month.length=c(31,28,31,30,31,30,31,31,30,31,30,31)
w<-which(months<1 | months>12 |
days<1 | days>month.length[months] |
hours>23 |
minutes>59 )
if(length(w)==1) {
stop(sprintf("%s is not a valid 365_day date",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not valid 365_day dates",
date$date[w[1]],length(w)-1))
}
}
return(date)
}
#' Add a period to a GSDF.time
#'
#' Increment the time by a number of years, month, days,
#' hours or minutes.
#'
#' Increments can be negative, and can be fractional.
#'
#' @export
#' @param date - GSDF.time
#' @param units Units of the offset - string 'hours', 'days', ...
#' @param offset numeric vector of time offsets.
#' @Result A list containing two components
GSDF.time.increment<-function(date,offset,units) {
if(units != 'years' && units != 'months' && units != 'days' &&
units != 'hours' && units != 'minutes') {
stop(sprintf("%s is not a supported time offset unit %s",
units,
"(expected 'years', 'months','days', 'hours, or 'minutes')"))
}
# Cope with different length of date and offset
#if(length(date$date)!=length(offset)) {
# if(length(date$date)!=1 && length(offset)!=1) {
# stop(sprintf("Length mismatch: %d dates and %d offsets",
# length(date$date),length(offset)))
# }
# if(length(date$date)==1) {
# date$date<-rep(date$date,length(offset))
# }
# if(length(offset==1)) {
# offset<-rep(offset,length(date$date))
# }
#}
# Split the string times into components
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d):(\\d\\d):(\\d\\d)")
year <- as.integer(m[,2])
month <- as.integer(m[,3])
day <- as.integer(m[,4])
hour <- as.integer(m[,5])
minute<- as.integer(m[,6])
if(date$calendar=='gregorian') {
l.base<-lubridate::ymd_hms(sprintf("%04d-%02d-%02d:%02d:%02d:00",
year,month,day,hour,minute))
if(units=='years') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of years must be integers")
}
l.base<-l.base+lubridate::years(as.integer(offset))
}
if(units=='months') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of months must be integers")
}
# Months have variable length - choose here to just
# change the month, leave day, hour etc. unchanged.
l.base<-lubridate::"%m+%"(l.base,months(offset))
}
if(units=='days') {
l.base<-l.base+lubridate::days(as.integer(offset))
offset<-(offset-as.integer(offset))*24
units<-'hours'
}
if(units=='hours') {
l.base<-l.base+lubridate::hours(as.integer(offset))
offset<-(offset-as.integer(offset))*60
units<-'minutes'
}
if(units=='minutes') {
l.base<-l.base+lubridate::minutes(as.integer(offset))
}
string.times<-sprintf("%04d-%02d-%02d:%02d:%02d",
lubridate::year(l.base),
lubridate::month(l.base),
lubridate::day(l.base),
lubridate::hour(l.base),
lubridate::minute(l.base))
return(GSDF.time(string.times,date$calendar))
}
if(date$calendar=='360_day') {
if(units=='years') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of years must be integers")
}
year<-year+offset
}
if(units=='months') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of months must be integers")
}
month<-month+offset
year<-year+as.integer((month-1)/12)
month<-month%%12
w<-which(month==0)
if(length(w)>0) month[w]<-12
}
if(units=='days') {
offset<-offset*24*60
units<-'minutes'
}
if(units=='hours') {
offset<-offset*60
units<-'minutes'
}
if(units=='minutes') {
minute<-minute+as.integer(offset)
hour<-hour+floor(minute/60)
minute<-minute%%60
day<-day+floor(hour/24)
hour<-hour%%24
month<-month+floor((day-1)/30)
day<-day%%30
w<-which(day==0)
if(length(w)>0) day[w]<-30
year<-year+floor((month-1)/12)
month<-month%%12
w<-which(month==0)
if(length(w)>0) month[w]<-12
}
string.times<-sprintf("%04d-%02d-%02d:%02d:%02d",
as.integer(year),
as.integer(month),
as.integer(day),
as.integer(hour),
as.integer(minute))
return(GSDF.time(string.times,date$calendar))
}
if(date$calendar=='365_day') {
if(units=='years') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of years must be integers")
}
year<-year+offset
}
if(units=='months') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of months must be integers")
}
month<-month+offset
year<-year+floor((month-1)/12)
month<-month%%12
w<-which(month==0)
if(length(w)>0) month[w]<-12
}
if(units=='days') {
offset<-offset*24*60
units<-'minutes'
}
if(units=='hours') {
offset<-offset*60
units<-'minutes'
}
month.lengths<-c(31,28,31,30,31,30,31,31,30,31,30,31)
month.tostart<-c(0,31,59,90,120,151,181,212,243,273,304,334,365)
if(units=='minutes') {
julian<-month.tostart[month]*24*60+(day-1)*24*60+
hour*60+minute+as.integer(offset)
year<-year+floor(julian/(365*24*60))
julian<-julian%%(365*24*60)
for(m in seq(1,12)) {
w<-which(julian>=month.tostart[m]*24*60 &
julian<month.tostart[m+1]*24*60)
if(length(w)==0) next
month[w]<-m
julian[w]<-julian[w]-month.tostart[m]*24*60
day[w]<-as.integer(julian[w]/(24*60))+1
julian[w]<-julian[w]%%(24*60)
hour[w]<-as.integer(julian[w]/60)
julian[w]<-julian[w]%%(60)
minute[w]<-as.integer(julian[w])
}
}
string.times<-sprintf("%04d-%02d-%02d:%02d:%02d",
as.integer(year),
as.integer(month),
as.integer(day),
as.integer(hour),
as.integer(minute))
return(GSDF.time(string.times,date$calendar))
}
}
#' Convert base + offset into GSDF.time format
#'
#' NetCDF files store date:time as a base time and numeric
#' offsets from that time given a calendar. The offsets
#' can be in units of days, hours, or minutes.
#'
#' Convert these into GSDF.time format
#' (Fractions of a minute are discarded).
#'
#' @export
#' @param base String time in format 'YYYY-MM-DD:HH:SS'
#' smaller components (s, min) may be missing.
#' @param calendar One of 'gregorian', '360_day' and '365_day'
#' @param units Units of the offset - string 'hours', 'days', ...
#' @param offset numeric vector of time offsets.
#' @Result A list containing two components
GSDF.time.from.base.and.offset<-function(offset,base,units,calendar) {
calendar<-GSDF.time.check.calendar(calendar)
# Assign base time components from the string
m<-stringr::str_match(base,"(\\d+)-(\\d+)-(\\d+)")
year <- as.integer(m[,2])
month <- as.integer(m[,3])
day <- as.integer(m[,4])
if(is.na(year) | is.na(month) | is.na(day)) {
stop(sprintf("%s is not a valid base time (expected YYYY-MM-DD + optional:HH:MM)",
base))
}
m<-stringr::str_match(base,"(\\d+)\\D(\\d+)\\D(\\d+)\\D(\\d+)")
hour <- as.integer(m[,5])
if(is.na(hour)) hour<-0
m<-stringr::str_match(base,"(\\d+)\\D(\\d+)\\D(\\d+)\\D(\\d+)\\D(\\d+)")
minute <- as.integer(m[,6])
if(is.na(minute)) minute<-0
base<-GSDF.time(sprintf("%04d-%02d-%02d:%02d:%02d:00",
year,month,day,hour,minute),calendar)
result<-GSDF.time.increment(base,offset,units)
return(result)
}
#' Calculate difference (in minutes) between two dates
#'
#' Need this for interpolation in time
#'
#' @export
#' @param first GSDF date
#' @param second GSDF date (same no. of dates as first)
#' @Result Vector of numeric differences (in minutes)
GSDF.time.difference<-function(first,second) {
if(first$calendar!=second$calendar) {
stop("Calendars do not match")
}
# Parse the date strings
first.c<-stringr::str_match(first$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
second.c<-stringr::str_match(second$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
if(first$calendar=='gregorian') {
first.l<-lubridate::ymd_hms(sprintf("%04d-%02d-%02d:%02d:%02d:00",
as.integer(first.c[,2]),
as.integer(first.c[,3]),
as.integer(first.c[,4]),
as.integer(first.c[,5]),
as.integer(first.c[,6])))
second.l<-lubridate::ymd_hms(sprintf("%04d-%02d-%02d:%02d:%02d:00",
as.integer(second.c[,2]),
as.integer(second.c[,3]),
as.integer(second.c[,4]),
as.integer(second.c[,5]),
as.integer(second.c[,6])))
result<-as.numeric(difftime(second.l,first.l,units='mins'))
return(result)
}
if(first$calendar=='360_day') {
result<-(as.integer(second.c[,2])-as.integer(first.c[,2]))*60*24*30*12 +
(as.integer(second.c[,3])-as.integer(first.c[,3]))*60*24*30 +
(as.integer(second.c[,4])-as.integer(first.c[,4]))*60*24 +
(as.integer(second.c[,5])-as.integer(first.c[,5]))*60 +
(as.integer(second.c[,6])-as.integer(first.c[,6]))
return(result)
}
if(first$calendar=='365_day') {
month.tostart<-c(0,31,59,90,120,151,181,212,243,273,304,334)
result<-(as.integer(second.c[,2])-as.integer(first.c[,2]))*60*24*365 +
(month.tostart[as.integer(second.c[,3])]-
month.tostart[as.integer(first.c[,3])])*60*24 +
(as.integer(second.c[,4])-as.integer(first.c[,4]))*60*24 +
(as.integer(second.c[,5])-as.integer(first.c[,5]))*60 +
(as.integer(second.c[,6])-as.integer(first.c[,6]))
return(result)
}
stop(sprintf("Unsupported calendar %s",first$calendar))
}
#' Adjust time values for change in calendar
#'
#' Sometimes we want to specify a time range, but we don't know the calendar
#' e.g. a year is -01-01:00:00 to -12-31:23:59 in Gregorian and
#' -01-01:00:00 to -12:30:23:59 in 360_day.
#' So it's useful to give the spec in gregorian and auto-shrink it if the
#' data turns out to have a 360_day calendar when we open the file.
#'
#' Assumes that you are going for the full month or year - use
#' with caution.
#'
#' @export
#' @param first GSDF date
#' @param second GSDF date (same no. of dates as first)
#' @Result Vector of numeric differences (in minutes)
GSDF.time.recalendar<-function(date,new.calendar) {
new.calendar<-GSDF.time.check.calendar(new.calendar)
if(date$calendar==new.calendar) return(date)
if(new.calendar=='gregorian') {
stop("Can't recalendar gregorian")
}
if(new.calendar=='365_day' && date$calendar=='360_day') {
stop("Can't recalendar 360_day to 365_day")
}
# Move dates at the end of the month in the source calendar,
# to the last day of the month in the new calendar.
if(new.calendar=='360_day') {
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
days<-as.integer(m[,4])
months<-as.integer(m[,3])
w<-which(days>30 |
(months==2 & days==29) |
(months==2 & days==28))
if(length(w)>0) days[w]<-30
date$date<-sprintf("%s-%s-%02d:%s:%s",m[,2],m[,3],days,m[,5],m[,6])
date$calendar<-'360_day'
return(date)
}
if(new.calendar=='365_day') {
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
days<-as.integer(m[,4])
months<-as.integer(m[,3])
w<-which(months==2 & days==29)
if(length(w)>0) days[w]<-28
date$date<-sprintf("%s-%s-%02d:%s:%s",m[,2],m[,3],days,m[,5],m[,6])
date$calendar<-'365_day'
return(date)
}
stop(sprintf("SNH error - unsupported calendar %s",new.calendar))
}
| /GSDF/R/GSDF.time.R | permissive | oldweather/GSDF | R | false | false | 16,244 | r | #' Unfortunately, the data we are interested in use several calendars.
#'
#' The 'standard' or 'gregorian' calendar
#' The 'proleptic gregorian' calendar
#' A '360_day' calendar with 12 months each of 30 days
#' A '365_day' or 'noleap' calendar with 12 months of
#' c(31,28,31,30,31,30,31,31,30,31,30,31) days.
#'
#' So we need custom date tools - we will store a date as an
#' ISO-8601 format string down to minutes: 'YYYY-MM-DD:HH:MM'
#' combined with a string describing the calendar in use.
#' Fortunately, we don't need to bother with time zones.
#' Negative years are not currently supported.
#'
#' @export
#' @param string Vector of date strings in format 'YYYY-MM-DD:HH:SS'
#' @param calendar One of 'gregorian', '360_day' and '365_day'
#' @Result A list containing two components
GSDF.time<-function(string,calendar) {
result<-list()
result$date<-string
result$calendar<-GSDF.time.check.calendar(calendar)
result<-GSDF.time.check.time(result)
return(result)
}
#' Check that the calendar is supported
#'
#' Can do gregorian, 360_day and 365_day calendars.
#'
#' 365_day may be called 'noleap'
#' gregorian may be called 'standard' or
#' 'proleptic_gregorian'.
#'
#' @param date A celendar string
#' @result One of 'gregorian', '360_day' and '365_day'
GSDF.time.check.calendar<-function(calendar) {
calendar<-tolower(calendar)
if(calendar=='standard' ||
calendar=='proleptic_gregorian') {
calendar<-'gregorian'
}
if(calendar=='noleap') {
calendar<-'365_day'
}
if(calendar != 'gregorian' &&
calendar != '360_day' &&
calendar != '365_day') {
stop(sprintf("Unsupported calendar %s",calendar))
}
return(calendar)
}
#' Check that dates are meaningful
#'
#' Check that each string date is valid
#' given the calendar.
#'
#'
#' @param date A GSDF time object
#' @result TRUE if all the contents are valid.
GSDF.time.check.time<-function(date) {
w<-which(!grepl("\\d\\d\\d\\d-\\d\\d-\\d\\d:\\d\\d:\\d\\d",
date$date))
if(length(w)==1) {
stop(sprintf("%s is not in format YYYY-MM-DD:HH:MM",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not in format YYYY-MM-DD:HH:MM",
date$date[w[1]],length(w)-1))
}
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d):(\\d\\d):(\\d\\d)")
years <- as.integer(m[,2])
months <- as.integer(m[,3])
days <- as.integer(m[,4])
hours <- as.integer(m[,5])
minutes <- as.integer(m[,6])
date$date<-sprintf("%04d-%02d-%02d:%02d:%02d",
years,months,days,hours,minutes)
if(date$calendar=='gregorian') {
l<-lubridate::ymd_hms(sprintf("%s:00",date$date),quiet=TRUE)
w<-which(is.na(l))
if(length(w)==1) {
stop(sprintf("%s is not a valid gregorian date",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not valid gregorian dates",
date$date[w[1]],length(w)-1))
}
}
if(date$calendar=='360_day') {
w<-which(months<1 | months>12 |
days<1 | days>30 |
hours>23 |
minutes>59 )
if(length(w)==1) {
stop(sprintf("%s is not a valid 360_day date",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not valid 360_day dates",
date$date[w[1]],length(w)-1))
}
}
if(date$calendar=='365_day') {
month.length=c(31,28,31,30,31,30,31,31,30,31,30,31)
w<-which(months<1 | months>12 |
days<1 | days>month.length[months] |
hours>23 |
minutes>59 )
if(length(w)==1) {
stop(sprintf("%s is not a valid 365_day date",
date$date[w]))
}
if(length(w)>1) {
stop(sprintf("%s and %d others are not valid 365_day dates",
date$date[w[1]],length(w)-1))
}
}
return(date)
}
#' Add a period to a GSDF.time
#'
#' Increment the time by a number of years, month, days,
#' hours or minutes.
#'
#' Increments can be negative, and can be fractional.
#'
#' @export
#' @param date - GSDF.time
#' @param units Units of the offset - string 'hours', 'days', ...
#' @param offset numeric vector of time offsets.
#' @Result A list containing two components
GSDF.time.increment<-function(date,offset,units) {
if(units != 'years' && units != 'months' && units != 'days' &&
units != 'hours' && units != 'minutes') {
stop(sprintf("%s is not a supported time offset unit %s",
units,
"(expected 'years', 'months','days', 'hours, or 'minutes')"))
}
# Cope with different length of date and offset
#if(length(date$date)!=length(offset)) {
# if(length(date$date)!=1 && length(offset)!=1) {
# stop(sprintf("Length mismatch: %d dates and %d offsets",
# length(date$date),length(offset)))
# }
# if(length(date$date)==1) {
# date$date<-rep(date$date,length(offset))
# }
# if(length(offset==1)) {
# offset<-rep(offset,length(date$date))
# }
#}
# Split the string times into components
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d):(\\d\\d):(\\d\\d)")
year <- as.integer(m[,2])
month <- as.integer(m[,3])
day <- as.integer(m[,4])
hour <- as.integer(m[,5])
minute<- as.integer(m[,6])
if(date$calendar=='gregorian') {
l.base<-lubridate::ymd_hms(sprintf("%04d-%02d-%02d:%02d:%02d:00",
year,month,day,hour,minute))
if(units=='years') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of years must be integers")
}
l.base<-l.base+lubridate::years(as.integer(offset))
}
if(units=='months') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of months must be integers")
}
# Months have variable length - choose here to just
# change the month, leave day, hour etc. unchanged.
l.base<-lubridate::"%m+%"(l.base,months(offset))
}
if(units=='days') {
l.base<-l.base+lubridate::days(as.integer(offset))
offset<-(offset-as.integer(offset))*24
units<-'hours'
}
if(units=='hours') {
l.base<-l.base+lubridate::hours(as.integer(offset))
offset<-(offset-as.integer(offset))*60
units<-'minutes'
}
if(units=='minutes') {
l.base<-l.base+lubridate::minutes(as.integer(offset))
}
string.times<-sprintf("%04d-%02d-%02d:%02d:%02d",
lubridate::year(l.base),
lubridate::month(l.base),
lubridate::day(l.base),
lubridate::hour(l.base),
lubridate::minute(l.base))
return(GSDF.time(string.times,date$calendar))
}
if(date$calendar=='360_day') {
if(units=='years') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of years must be integers")
}
year<-year+offset
}
if(units=='months') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of months must be integers")
}
month<-month+offset
year<-year+as.integer((month-1)/12)
month<-month%%12
w<-which(month==0)
if(length(w)>0) month[w]<-12
}
if(units=='days') {
offset<-offset*24*60
units<-'minutes'
}
if(units=='hours') {
offset<-offset*60
units<-'minutes'
}
if(units=='minutes') {
minute<-minute+as.integer(offset)
hour<-hour+floor(minute/60)
minute<-minute%%60
day<-day+floor(hour/24)
hour<-hour%%24
month<-month+floor((day-1)/30)
day<-day%%30
w<-which(day==0)
if(length(w)>0) day[w]<-30
year<-year+floor((month-1)/12)
month<-month%%12
w<-which(month==0)
if(length(w)>0) month[w]<-12
}
string.times<-sprintf("%04d-%02d-%02d:%02d:%02d",
as.integer(year),
as.integer(month),
as.integer(day),
as.integer(hour),
as.integer(minute))
return(GSDF.time(string.times,date$calendar))
}
if(date$calendar=='365_day') {
if(units=='years') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of years must be integers")
}
year<-year+offset
}
if(units=='months') {
if(any(as.integer(offset)!=offset)) {
stop("Increments in units of months must be integers")
}
month<-month+offset
year<-year+floor((month-1)/12)
month<-month%%12
w<-which(month==0)
if(length(w)>0) month[w]<-12
}
if(units=='days') {
offset<-offset*24*60
units<-'minutes'
}
if(units=='hours') {
offset<-offset*60
units<-'minutes'
}
month.lengths<-c(31,28,31,30,31,30,31,31,30,31,30,31)
month.tostart<-c(0,31,59,90,120,151,181,212,243,273,304,334,365)
if(units=='minutes') {
julian<-month.tostart[month]*24*60+(day-1)*24*60+
hour*60+minute+as.integer(offset)
year<-year+floor(julian/(365*24*60))
julian<-julian%%(365*24*60)
for(m in seq(1,12)) {
w<-which(julian>=month.tostart[m]*24*60 &
julian<month.tostart[m+1]*24*60)
if(length(w)==0) next
month[w]<-m
julian[w]<-julian[w]-month.tostart[m]*24*60
day[w]<-as.integer(julian[w]/(24*60))+1
julian[w]<-julian[w]%%(24*60)
hour[w]<-as.integer(julian[w]/60)
julian[w]<-julian[w]%%(60)
minute[w]<-as.integer(julian[w])
}
}
string.times<-sprintf("%04d-%02d-%02d:%02d:%02d",
as.integer(year),
as.integer(month),
as.integer(day),
as.integer(hour),
as.integer(minute))
return(GSDF.time(string.times,date$calendar))
}
}
#' Convert base + offset into GSDF.time format
#'
#' NetCDF files store date:time as a base time and numeric
#' offsets from that time given a calendar. The offsets
#' can be in units of days, hours, or minutes.
#'
#' Convert these into GSDF.time format
#' (Fractions of a minute are discarded).
#'
#' @export
#' @param base String time in format 'YYYY-MM-DD:HH:SS'
#' smaller components (s, min) may be missing.
#' @param calendar One of 'gregorian', '360_day' and '365_day'
#' @param units Units of the offset - string 'hours', 'days', ...
#' @param offset numeric vector of time offsets.
#' @Result A list containing two components
GSDF.time.from.base.and.offset<-function(offset,base,units,calendar) {
calendar<-GSDF.time.check.calendar(calendar)
# Assign base time components from the string
m<-stringr::str_match(base,"(\\d+)-(\\d+)-(\\d+)")
year <- as.integer(m[,2])
month <- as.integer(m[,3])
day <- as.integer(m[,4])
if(is.na(year) | is.na(month) | is.na(day)) {
stop(sprintf("%s is not a valid base time (expected YYYY-MM-DD + optional:HH:MM)",
base))
}
m<-stringr::str_match(base,"(\\d+)\\D(\\d+)\\D(\\d+)\\D(\\d+)")
hour <- as.integer(m[,5])
if(is.na(hour)) hour<-0
m<-stringr::str_match(base,"(\\d+)\\D(\\d+)\\D(\\d+)\\D(\\d+)\\D(\\d+)")
minute <- as.integer(m[,6])
if(is.na(minute)) minute<-0
base<-GSDF.time(sprintf("%04d-%02d-%02d:%02d:%02d:00",
year,month,day,hour,minute),calendar)
result<-GSDF.time.increment(base,offset,units)
return(result)
}
#' Calculate difference (in minutes) between two dates
#'
#' Need this for interpolation in time
#'
#' @export
#' @param first GSDF date
#' @param second GSDF date (same no. of dates as first)
#' @Result Vector of numeric differences (in minutes)
GSDF.time.difference<-function(first,second) {
if(first$calendar!=second$calendar) {
stop("Calendars do not match")
}
# Parse the date strings
first.c<-stringr::str_match(first$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
second.c<-stringr::str_match(second$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
if(first$calendar=='gregorian') {
first.l<-lubridate::ymd_hms(sprintf("%04d-%02d-%02d:%02d:%02d:00",
as.integer(first.c[,2]),
as.integer(first.c[,3]),
as.integer(first.c[,4]),
as.integer(first.c[,5]),
as.integer(first.c[,6])))
second.l<-lubridate::ymd_hms(sprintf("%04d-%02d-%02d:%02d:%02d:00",
as.integer(second.c[,2]),
as.integer(second.c[,3]),
as.integer(second.c[,4]),
as.integer(second.c[,5]),
as.integer(second.c[,6])))
result<-as.numeric(difftime(second.l,first.l,units='mins'))
return(result)
}
if(first$calendar=='360_day') {
result<-(as.integer(second.c[,2])-as.integer(first.c[,2]))*60*24*30*12 +
(as.integer(second.c[,3])-as.integer(first.c[,3]))*60*24*30 +
(as.integer(second.c[,4])-as.integer(first.c[,4]))*60*24 +
(as.integer(second.c[,5])-as.integer(first.c[,5]))*60 +
(as.integer(second.c[,6])-as.integer(first.c[,6]))
return(result)
}
if(first$calendar=='365_day') {
month.tostart<-c(0,31,59,90,120,151,181,212,243,273,304,334)
result<-(as.integer(second.c[,2])-as.integer(first.c[,2]))*60*24*365 +
(month.tostart[as.integer(second.c[,3])]-
month.tostart[as.integer(first.c[,3])])*60*24 +
(as.integer(second.c[,4])-as.integer(first.c[,4]))*60*24 +
(as.integer(second.c[,5])-as.integer(first.c[,5]))*60 +
(as.integer(second.c[,6])-as.integer(first.c[,6]))
return(result)
}
stop(sprintf("Unsupported calendar %s",first$calendar))
}
#' Adjust time values for change in calendar
#'
#' Sometimes we want to specify a time range, but we don't know the calendar
#' e.g. a year is -01-01:00:00 to -12-31:23:59 in Gregorian and
#' -01-01:00:00 to -12:30:23:59 in 360_day.
#' So it's useful to give the spec in gregorian and auto-shrink it if the
#' data turns out to have a 360_day calendar when we open the file.
#'
#' Assumes that you are going for the full month or year - use
#' with caution.
#'
#' @export
#' @param first GSDF date
#' @param second GSDF date (same no. of dates as first)
#' @Result Vector of numeric differences (in minutes)
GSDF.time.recalendar<-function(date,new.calendar) {
new.calendar<-GSDF.time.check.calendar(new.calendar)
if(date$calendar==new.calendar) return(date)
if(new.calendar=='gregorian') {
stop("Can't recalendar gregorian")
}
if(new.calendar=='365_day' && date$calendar=='360_day') {
stop("Can't recalendar 360_day to 365_day")
}
# Move dates at the end of the month in the source calendar,
# to the last day of the month in the new calendar.
if(new.calendar=='360_day') {
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
days<-as.integer(m[,4])
months<-as.integer(m[,3])
w<-which(days>30 |
(months==2 & days==29) |
(months==2 & days==28))
if(length(w)>0) days[w]<-30
date$date<-sprintf("%s-%s-%02d:%s:%s",m[,2],m[,3],days,m[,5],m[,6])
date$calendar<-'360_day'
return(date)
}
if(new.calendar=='365_day') {
m<-stringr::str_match(date$date,
"(\\d\\d\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)\\D(\\d\\d)")
days<-as.integer(m[,4])
months<-as.integer(m[,3])
w<-which(months==2 & days==29)
if(length(w)>0) days[w]<-28
date$date<-sprintf("%s-%s-%02d:%s:%s",m[,2],m[,3],days,m[,5],m[,6])
date$calendar<-'365_day'
return(date)
}
stop(sprintf("SNH error - unsupported calendar %s",new.calendar))
}
|
#### R-script Simulation Study : Measuring Forecast Accuracy for Factor Models, AR and VAR(1) benchmark ####
rm(list=ls())
#### Check packages installed ####
checkpackage<-function(U){
if((U %in% rownames(installed.packages()))==F){
install.packages(U)
library(U, character.only = TRUE)
}else{
library(U, character.only = TRUE)
}
}
packagelist<-list("lattice", "Rcpp", "MASS","methods", "zoo", "stats","utils","grDevices",
"graphics","RcppArmadillo", "RcppEigen", "R.matlab", "vars", "bigtime")
lapply(packagelist,checkpackage)
#### Source Functions ####
source("factorfunctions.R") # factor functions
oldw <- getOption("warn")
options(warn = -1)
##############################################################
#### Simulation Scenario 1 : COmponentwise HLag Structure ####
##############################################################
#### Setting ####
p=5 # p the maximal lag order
k=45 # k is the number of series
n=100 # n is the time series length. We ran the simulations for both n=100 and n=200
Nsim=500
#### Forecast Performance ####
Sim1MSFE <- matrix(NA, ncol=3, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim1MSFE) <- c("DFM", "FAVAR", "AR")
datasim1 <- read.table("sim1dat.txt")
library(bigtime) # for AR model
for(r in 1:Nsim){
# Data
Y <- datasim1[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
Sim1MSFE[r, 1] <- mean(MSFEs_DFM)
Sim1MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim1MSFE[r, 3] <- mean(MSFEs_ar)
}
apply(Sim1MSFE, 2, mean)
##########################################################
#### Simulation Scenario 2 : Own-Other HLag Structure ####
##########################################################
#### Setting ####
k=45;p=2;n=100
Nsim <- 500 # Number of simulations
#### Forecast Performance ####
Sim2MSFE <- matrix(NA, ncol=3, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim2MSFE) <- c("DFM", "FAVAR", "AR")
datasim2 <- read.table("sim2dat.txt")
library(bigtime)
for(r in 1:Nsim){
# Data
Y <- datasim2[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
Sim2MSFE[r, 1] <- mean(MSFEs_DFM)
Sim2MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim2MSFE[r, 3] <- mean(MSFEs_ar)
}
apply(Sim2MSFE, 2, mean)
############################################################
#### Simulation Scenario 3 : Elementwise HLag Structure ####
############################################################
#### Setting ####
k=45;p=4;n=100
Nsim <- 500 # Number of simulations
#### Forecast Performance ####
Sim3MSFE <- matrix(NA, ncol=3, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim3MSFE) <- c("DFM", "FAVAR", "AR")
datasim3 <- read.table("sim3dat.txt")
library(bigtime)
for(r in 1:Nsim){
# Data
Y <- datasim3[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
Sim3MSFE[r, 1] <- mean(MSFEs_DFM)
Sim3MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim3MSFE[r, 3] <- mean(MSFEs_ar)
}
apply(Sim3MSFE, 2, mean)
######################################################
#### Simulation Scenario 4 : Data-based Structure ####
######################################################
#### Setting ####
k <- 40; p <- 4; n=195
Nsim <- 500 # Number of simulations
#### Forecast Performance ####
Sim4MSFE <- matrix(NA, ncol=4, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim4MSFE) <- c("DFM", "FAVAR", "AR", "VAR")
datasim4 <- read.table("sim4dat.txt")
library(bigtime)
for(r in 1:Nsim){
# Data
Y <- datasim4[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
# VAR estimation
VARfit <- VAR(y = Y[-nrow(Y),], type = "none")
VARpredict <- predict(VARfit, n.ahead = 1)
collectresults <- matrix(unlist(VARpredict$fcst), nrow = k, ncol = 4, byrow = T)
VARforecast <- collectresults[,1]
MSFEs_var <- (Y[nrow(Y), ] - VARforecast)^2
Sim4MSFE[r, 1] <- mean(MSFEs_DFM)
Sim4MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim4MSFE[r, 3] <- mean(MSFEs_ar)
Sim4MSFE[r, 4] <- mean(MSFEs_var)
}
apply(Sim4MSFE, 2, mean)
| /code/Simulation/Forecast/forecast_other_benchmarks.R | no_license | zhangjia-swufe/HLAG | R | false | false | 7,055 | r | #### R-script Simulation Study : Measuring Forecast Accuracy for Factor Models, AR and VAR(1) benchmark ####
rm(list=ls())
#### Check packages installed ####
checkpackage<-function(U){
if((U %in% rownames(installed.packages()))==F){
install.packages(U)
library(U, character.only = TRUE)
}else{
library(U, character.only = TRUE)
}
}
packagelist<-list("lattice", "Rcpp", "MASS","methods", "zoo", "stats","utils","grDevices",
"graphics","RcppArmadillo", "RcppEigen", "R.matlab", "vars", "bigtime")
lapply(packagelist,checkpackage)
#### Source Functions ####
source("factorfunctions.R") # factor functions
oldw <- getOption("warn")
options(warn = -1)
##############################################################
#### Simulation Scenario 1 : COmponentwise HLag Structure ####
##############################################################
#### Setting ####
p=5 # p the maximal lag order
k=45 # k is the number of series
n=100 # n is the time series length. We ran the simulations for both n=100 and n=200
Nsim=500
#### Forecast Performance ####
Sim1MSFE <- matrix(NA, ncol=3, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim1MSFE) <- c("DFM", "FAVAR", "AR")
datasim1 <- read.table("sim1dat.txt")
library(bigtime) # for AR model
for(r in 1:Nsim){
# Data
Y <- datasim1[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
Sim1MSFE[r, 1] <- mean(MSFEs_DFM)
Sim1MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim1MSFE[r, 3] <- mean(MSFEs_ar)
}
apply(Sim1MSFE, 2, mean)
##########################################################
#### Simulation Scenario 2 : Own-Other HLag Structure ####
##########################################################
#### Setting ####
k=45;p=2;n=100
Nsim <- 500 # Number of simulations
#### Forecast Performance ####
Sim2MSFE <- matrix(NA, ncol=3, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim2MSFE) <- c("DFM", "FAVAR", "AR")
datasim2 <- read.table("sim2dat.txt")
library(bigtime)
for(r in 1:Nsim){
# Data
Y <- datasim2[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
Sim2MSFE[r, 1] <- mean(MSFEs_DFM)
Sim2MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim2MSFE[r, 3] <- mean(MSFEs_ar)
}
apply(Sim2MSFE, 2, mean)
############################################################
#### Simulation Scenario 3 : Elementwise HLag Structure ####
############################################################
#### Setting ####
k=45;p=4;n=100
Nsim <- 500 # Number of simulations
#### Forecast Performance ####
Sim3MSFE <- matrix(NA, ncol=3, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim3MSFE) <- c("DFM", "FAVAR", "AR")
datasim3 <- read.table("sim3dat.txt")
library(bigtime)
for(r in 1:Nsim){
# Data
Y <- datasim3[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
Sim3MSFE[r, 1] <- mean(MSFEs_DFM)
Sim3MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim3MSFE[r, 3] <- mean(MSFEs_ar)
}
apply(Sim3MSFE, 2, mean)
######################################################
#### Simulation Scenario 4 : Data-based Structure ####
######################################################
#### Setting ####
k <- 40; p <- 4; n=195
Nsim <- 500 # Number of simulations
#### Forecast Performance ####
Sim4MSFE <- matrix(NA, ncol=4, nrow=Nsim) # Note : VAR1 is already contained in the other file
colnames(Sim4MSFE) <- c("DFM", "FAVAR", "AR", "VAR")
datasim4 <- read.table("sim4dat.txt")
library(bigtime)
for(r in 1:Nsim){
# Data
Y <- datasim4[((n)*(r-1)+1):(r*n), ]
# DFM
SFMfit <- SFM(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1)
DFMfit <- DFM(Y = as.matrix(Y[-nrow(Y), ]), f = SFMfit$f, rank = SFMfit$rank, horizon = 1,
lag.max = p, Yhat_static = SFMfit$Yhat_static, decomp = SFMfit$decomp)
MSFEs_DFM <- (Y[nrow(Y), ] - DFMfit$Yhat_dynamic_AIC)^2
# FAVAR
FAVARfit <- FAVAR(Y = as.matrix(Y[-nrow(Y), ]), horizon = 1, lag.max = p)
MSFEs_FAVAR <- (Y[nrow(Y), ] - FAVARfit$YhatsAIC)^2
# AR
Y <- matrix(as.matrix(Y), ncol=ncol(Y), nrow=nrow(Y))
MSFEs_ar <- matrix(NA, ncol = ncol(Y), nrow = 1)
for(i in 1:ncol(Y)){
# AR
ourar <- sparseVAR(Y[-nrow(Y), i], p = p);
MSFEs_ar[1, i] <- (Y[nrow(Y), i] - directforecast(fit = ourar, model ="VAR", h=1))^2
}
# VAR estimation
VARfit <- VAR(y = Y[-nrow(Y),], type = "none")
VARpredict <- predict(VARfit, n.ahead = 1)
collectresults <- matrix(unlist(VARpredict$fcst), nrow = k, ncol = 4, byrow = T)
VARforecast <- collectresults[,1]
MSFEs_var <- (Y[nrow(Y), ] - VARforecast)^2
Sim4MSFE[r, 1] <- mean(MSFEs_DFM)
Sim4MSFE[r, 2] <- mean(MSFEs_FAVAR)
Sim4MSFE[r, 3] <- mean(MSFEs_ar)
Sim4MSFE[r, 4] <- mean(MSFEs_var)
}
apply(Sim4MSFE, 2, mean)
|
summarize_techval <- function(year, in_data) {
oneyear <- dplyr::filter(in_data, CDLYear == year)
cleaned <- dplyr::mutate(oneyear, PctTile = 1/ncells_tile) %>%
dplyr::filter(!is.na(FIPS)) %>% # remove mis-match points that do not have FIPS code (overlap water or other non-county polygon)
dplyr::mutate(coord_year = (paste0(x, y, "_", CDLYear))) %>%
dplyr::distinct(coord_year, .keep_all=T) # remove points that are duplicated within a given CDL year
#duplication could happen due to calculating mis-match from state tiles rather than actual state polygons, borders don't match exactly
logger::log_info('Finished cleaning and remove duplicate points, starting summarize by county.')
freq_bycounty <- cleaned %>% dplyr::group_by(NVC_Class, CDL_Class, CDLYear, State, FIPS) %>%
dplyr::summarise(Mismatch_NCells = n(), Mismatch_PctTile = sum(PctTile))
return(freq_bycounty)
} | /code/functions/summarize_techval.R | no_license | melaniekamm/MergeLANDFIREandCDL | R | false | false | 918 | r |
summarize_techval <- function(year, in_data) {
oneyear <- dplyr::filter(in_data, CDLYear == year)
cleaned <- dplyr::mutate(oneyear, PctTile = 1/ncells_tile) %>%
dplyr::filter(!is.na(FIPS)) %>% # remove mis-match points that do not have FIPS code (overlap water or other non-county polygon)
dplyr::mutate(coord_year = (paste0(x, y, "_", CDLYear))) %>%
dplyr::distinct(coord_year, .keep_all=T) # remove points that are duplicated within a given CDL year
#duplication could happen due to calculating mis-match from state tiles rather than actual state polygons, borders don't match exactly
logger::log_info('Finished cleaning and remove duplicate points, starting summarize by county.')
freq_bycounty <- cleaned %>% dplyr::group_by(NVC_Class, CDL_Class, CDLYear, State, FIPS) %>%
dplyr::summarise(Mismatch_NCells = n(), Mismatch_PctTile = sum(PctTile))
return(freq_bycounty)
} |
fit <- lm(Ozone ~ Solar.R + Wind + Temp + I(Wind^2) + I(Temp^2) +
I(Wind*Temp)+I(Wind*Temp^2) + I(Temp*Wind^2) + I(Temp^2*Wind^2),
data=airquality)
visreg2d(fit, x="Wind", y="Temp", plot.type="image")
visreg2d(fit, x="Wind", y="Temp", plot.type="image",
color=c("purple", "green", "red"))
visreg2d(fit, x="Wind", y="Temp", plot.type="persp")
## Requires the rgl package
# }
# NOT RUN {
visreg2d(fit,x="Wind",y="Temp",plot.type="rgl")
# }
# NOT RUN {
## Requires the ggplot2 package
# }
# NOT RUN {
visreg2d(fit, x="Wind", y="Temp", plot.type="gg")
## Basic
fit <- lm(Ozone ~ Solar.R + Wind + Temp, data=airquality)
visreg(fit)
visreg(fit, "Wind", type="contrast")
visreg(fit, "Wind", type="conditional")
## Factors
airquality$Heat <- cut(airquality$Temp, 3, labels=c("Cool","Mild","Hot"))
fit.heat <- lm(Ozone ~ Solar.R + Wind + Heat, data=airquality)
visreg(fit.heat, "Heat", type="contrast")
visreg(fit.heat, "Heat", type="conditional")
## Transformations
fit1 <- lm(Ozone ~ Solar.R + Wind + Temp + I(Wind^2), data=airquality)
fit2 <- lm(log(Ozone) ~ Solar.R + Wind + Temp, data=airquality)
fit3 <- lm(log(Ozone) ~ Solar.R + Wind + Temp + I(Wind^2), data=airquality)
visreg(fit1, "Wind")
visreg(fit2, "Wind", trans=exp, ylab="Ozone")
visreg(fit3, "Wind", trans=exp, ylab="Ozone")
## Conditioning
visreg(fit, "Wind", cond=list(Temp=50))
visreg(fit, "Wind", print.cond=TRUE)
visreg(fit, "Wind", cond=list(Temp=100))
## Interactions
fit.in1 <- lm(Ozone~ Solar.R + Wind*Heat, data=airquality)
visreg(fit.in1, "Wind", by="Heat")
visreg(fit.in1, "Heat", by="Wind")
visreg(fit.in1, "Wind", by="Heat", type="contrast")
visreg(fit.in1, "Heat", by="Wind", breaks=6)
visreg(fit.in1, "Heat", by="Wind", breaks=c(0,10,20))
## Overlay
visreg(fit.in1, "Wind", by="Heat", overlay=TRUE)
######################
## Nonlinear models ##
######################
## Logistic regression
data("birthwt", package="MASS")
birthwt$race <- factor(birthwt$race, labels=c("White","Black","Other"))
birthwt$smoke <- factor(birthwt$smoke, labels=c("Nonsmoker","Smoker"))
fit <- glm(low~age+race+smoke+lwt, data=birthwt, family="binomial")
visreg(fit, "lwt",
xlab="Mother's Weight", ylab="Log odds (low birthweight)")
visreg(fit, "lwt", scale="response", partial=FALSE,
xlab="Mother's Weight", ylab="P(low birthweight)")
visreg(fit, "lwt", scale="response", partial=FALSE,
xlab="Mother's Weight", ylab="P(low birthweight)", rug=2)
## Proportional hazards
require(survival)
data(ovarian)
ovarian$rx <- factor(ovarian$rx)
fit <- coxph(Surv(futime, fustat) ~ age + rx, data=ovarian)
visreg(fit, "age", ylab="log(Hazard ratio)")
## Robust regression
require(MASS)
fit <- rlm(Ozone ~ Solar.R + Wind*Heat, data=airquality)
visreg(fit, "Wind", cond=list(Heat="Mild"))
## And more...; anything with a 'predict' method should work
## Return raw components of plot
v <- visreg(fit, "Wind", cond=list(Heat="Mild"))
# } | /scripts/2020_Using_Visreg.R | no_license | MRoy120/Fiddler_SEM | R | false | false | 2,942 | r | fit <- lm(Ozone ~ Solar.R + Wind + Temp + I(Wind^2) + I(Temp^2) +
I(Wind*Temp)+I(Wind*Temp^2) + I(Temp*Wind^2) + I(Temp^2*Wind^2),
data=airquality)
visreg2d(fit, x="Wind", y="Temp", plot.type="image")
visreg2d(fit, x="Wind", y="Temp", plot.type="image",
color=c("purple", "green", "red"))
visreg2d(fit, x="Wind", y="Temp", plot.type="persp")
## Requires the rgl package
# }
# NOT RUN {
visreg2d(fit,x="Wind",y="Temp",plot.type="rgl")
# }
# NOT RUN {
## Requires the ggplot2 package
# }
# NOT RUN {
visreg2d(fit, x="Wind", y="Temp", plot.type="gg")
## Basic
fit <- lm(Ozone ~ Solar.R + Wind + Temp, data=airquality)
visreg(fit)
visreg(fit, "Wind", type="contrast")
visreg(fit, "Wind", type="conditional")
## Factors
airquality$Heat <- cut(airquality$Temp, 3, labels=c("Cool","Mild","Hot"))
fit.heat <- lm(Ozone ~ Solar.R + Wind + Heat, data=airquality)
visreg(fit.heat, "Heat", type="contrast")
visreg(fit.heat, "Heat", type="conditional")
## Transformations
fit1 <- lm(Ozone ~ Solar.R + Wind + Temp + I(Wind^2), data=airquality)
fit2 <- lm(log(Ozone) ~ Solar.R + Wind + Temp, data=airquality)
fit3 <- lm(log(Ozone) ~ Solar.R + Wind + Temp + I(Wind^2), data=airquality)
visreg(fit1, "Wind")
visreg(fit2, "Wind", trans=exp, ylab="Ozone")
visreg(fit3, "Wind", trans=exp, ylab="Ozone")
## Conditioning
visreg(fit, "Wind", cond=list(Temp=50))
visreg(fit, "Wind", print.cond=TRUE)
visreg(fit, "Wind", cond=list(Temp=100))
## Interactions
fit.in1 <- lm(Ozone~ Solar.R + Wind*Heat, data=airquality)
visreg(fit.in1, "Wind", by="Heat")
visreg(fit.in1, "Heat", by="Wind")
visreg(fit.in1, "Wind", by="Heat", type="contrast")
visreg(fit.in1, "Heat", by="Wind", breaks=6)
visreg(fit.in1, "Heat", by="Wind", breaks=c(0,10,20))
## Overlay
visreg(fit.in1, "Wind", by="Heat", overlay=TRUE)
######################
## Nonlinear models ##
######################
## Logistic regression
data("birthwt", package="MASS")
birthwt$race <- factor(birthwt$race, labels=c("White","Black","Other"))
birthwt$smoke <- factor(birthwt$smoke, labels=c("Nonsmoker","Smoker"))
fit <- glm(low~age+race+smoke+lwt, data=birthwt, family="binomial")
visreg(fit, "lwt",
xlab="Mother's Weight", ylab="Log odds (low birthweight)")
visreg(fit, "lwt", scale="response", partial=FALSE,
xlab="Mother's Weight", ylab="P(low birthweight)")
visreg(fit, "lwt", scale="response", partial=FALSE,
xlab="Mother's Weight", ylab="P(low birthweight)", rug=2)
## Proportional hazards
require(survival)
data(ovarian)
ovarian$rx <- factor(ovarian$rx)
fit <- coxph(Surv(futime, fustat) ~ age + rx, data=ovarian)
visreg(fit, "age", ylab="log(Hazard ratio)")
## Robust regression
require(MASS)
fit <- rlm(Ozone ~ Solar.R + Wind*Heat, data=airquality)
visreg(fit, "Wind", cond=list(Heat="Mild"))
## And more...; anything with a 'predict' method should work
## Return raw components of plot
v <- visreg(fit, "Wind", cond=list(Heat="Mild"))
# } |
# compare output and timing for Kendall tau computation: cor and taucor
library(CopulaModel)
# compare taucor (fast Kendall tau) with cor( method="kendall")
ktcor=function(x,y) { cor(x,y,method = "kendall") }
spcor=function(x,y) { cor(x,y,method = "spearman") }
# simulation to compare computation of Kendall's tau
# n = sample size
# nsim = number of simulation replications
# seed = seed for set.seed()
# fnname = function name, ktau or taucor
# rho = correlation for bivariate normal, used for simulated data
# Output: Kendall's tau is printed in the function
ktsimul=function(n,nsim,seed,fnname,rho=0.5)
{ set.seed(seed)
r1=sqrt(1-rho^2)
for(isim in 1:nsim)
{ x = rnorm(n,0,1)
y = rho*x+ r1*rnorm(n,0,1)
tau=fnname(x,y)
cat(isim, tau,"\n")
}
tau
}
set.seed(123)
for(isim in 1:10)
{ n = sample(1:1000,1)
print(n)
x = rnorm(n,0,1)
y = rnorm(n,0,1)
tau1=ktcor(x,y); tau2=taucor(x,y)
print(c(tau1,tau2))
}
n=1000; nsim=5;
n=10000; nsim=5;
rho=0.5
seed=123
for(rho in seq(-.9,.9,.2))
{ cat("\ntrue rho=", rho,"\n")
cat("method=kendall O(n^2)\n")
time0=proc.time()
ktsimul(n,nsim,seed,ktcor,rho)
time1 = proc.time() - time0
time0=proc.time()
cat("Knight's O(n*log(n)) implementation\n")
ktsimul(n,nsim,seed,taucor,rho)
time2 = proc.time() - time0
# user system elapsed
cat(time1,time2,"\n")
}
| /depmeasuretests/ktau-test.r | no_license | hoanguc3m/CopulaModel | R | false | false | 1,348 | r | # compare output and timing for Kendall tau computation: cor and taucor
library(CopulaModel)
# compare taucor (fast Kendall tau) with cor( method="kendall")
ktcor=function(x,y) { cor(x,y,method = "kendall") }
spcor=function(x,y) { cor(x,y,method = "spearman") }
# simulation to compare computation of Kendall's tau
# n = sample size
# nsim = number of simulation replications
# seed = seed for set.seed()
# fnname = function name, ktau or taucor
# rho = correlation for bivariate normal, used for simulated data
# Output: Kendall's tau is printed in the function
ktsimul=function(n,nsim,seed,fnname,rho=0.5)
{ set.seed(seed)
r1=sqrt(1-rho^2)
for(isim in 1:nsim)
{ x = rnorm(n,0,1)
y = rho*x+ r1*rnorm(n,0,1)
tau=fnname(x,y)
cat(isim, tau,"\n")
}
tau
}
set.seed(123)
for(isim in 1:10)
{ n = sample(1:1000,1)
print(n)
x = rnorm(n,0,1)
y = rnorm(n,0,1)
tau1=ktcor(x,y); tau2=taucor(x,y)
print(c(tau1,tau2))
}
n=1000; nsim=5;
n=10000; nsim=5;
rho=0.5
seed=123
for(rho in seq(-.9,.9,.2))
{ cat("\ntrue rho=", rho,"\n")
cat("method=kendall O(n^2)\n")
time0=proc.time()
ktsimul(n,nsim,seed,ktcor,rho)
time1 = proc.time() - time0
time0=proc.time()
cat("Knight's O(n*log(n)) implementation\n")
ktsimul(n,nsim,seed,taucor,rho)
time2 = proc.time() - time0
# user system elapsed
cat(time1,time2,"\n")
}
|
#plot 4
elec <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings="?", colClasses="character")
elec[, 1] <- paste(elec[, 1], elec[, 2], sep= " ")
require(lubridate)
elec[, 1] <- dmy_hms(elec[, 1])
mar <- with(elec, elec[year(Date) == 2007 &
month(Date) == 2 & day(Date) >= 1 & day(Date) <= 2, ])
rm(elec)
png(file = "plot4.png", bg = "NA", width=480, height=480)
par(mfrow=c(2,2))
with(mar, {
plot(mar[, 1], mar[, 3], type="n", xlab="", ylab="Global Active Power (kilowatts)")
lines(mar[, 1], mar[, 3])
plot(mar[, 1], mar[, 5], xlab="datetime", ylab="Voltage", type="l")
plot(mar[,1], mar[,7], type="n", xlab="", ylab="Energy Sub Metering")
lines(mar[,1], mar[,7], col="black", type="l")
lines(mar[,1], mar[,8], col="red", type="l")
lines(mar[,1], mar[,9], col="blue", type="l")
legend("topright", col=c("black", "red", "blue"), lty="solid", legend=names(mar[7:9]), bty="n")
plot(mar[, 1], mar[, 4], xlab="datetime", ylab=names(mar[4]), type="l")
})
dev.off() | /plot4.R | no_license | greenisagoodcolor/ExData_Plotting1 | R | false | false | 1,043 | r | #plot 4
elec <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings="?", colClasses="character")
elec[, 1] <- paste(elec[, 1], elec[, 2], sep= " ")
require(lubridate)
elec[, 1] <- dmy_hms(elec[, 1])
mar <- with(elec, elec[year(Date) == 2007 &
month(Date) == 2 & day(Date) >= 1 & day(Date) <= 2, ])
rm(elec)
png(file = "plot4.png", bg = "NA", width=480, height=480)
par(mfrow=c(2,2))
with(mar, {
plot(mar[, 1], mar[, 3], type="n", xlab="", ylab="Global Active Power (kilowatts)")
lines(mar[, 1], mar[, 3])
plot(mar[, 1], mar[, 5], xlab="datetime", ylab="Voltage", type="l")
plot(mar[,1], mar[,7], type="n", xlab="", ylab="Energy Sub Metering")
lines(mar[,1], mar[,7], col="black", type="l")
lines(mar[,1], mar[,8], col="red", type="l")
lines(mar[,1], mar[,9], col="blue", type="l")
legend("topright", col=c("black", "red", "blue"), lty="solid", legend=names(mar[7:9]), bty="n")
plot(mar[, 1], mar[, 4], xlab="datetime", ylab=names(mar[4]), type="l")
})
dev.off() |
# This script correspond to the filter
#ijob -A jcbnunez -c 10 --mem=60G --partition=standard
#module load gcc/7.1.0
#module load openmpi/3.1.4
#module load gdal
#module load proj
#module load htslib/1.9
#module load bcftools/1.9
#module load intel/18.0
#module load intelmpi/18.0
#module load R/4.0.0
#R
#Clean the environment
rm(list = ls())
##### libraries
#install.packages(c("data.table",
# "sp",
# "foreach",
# "tidyverse",
# "magrittr",
# "patchwork"))
library(data.table)
library(sp)
library(foreach)
library(tidyverse)
library(magrittr)
library(patchwork)
#User defined imput
priv.dt <- fread("SNAPE.AllSamps.0.001.delim") #file
ind.filter <- "AllSamps" #good.samps, all.samps
caller <- "SNAPE" #PoolSNPs, SNAPE
maf_thresh <- 0.001 #0.001, 0.05
# "PoolSNPs good.samps 0.001" [Done]
# "PoolSNPs all.samps 0.001" [Done]
# "PoolSNPs good.samps 0.05" [Done]
# "PoolSNPs all.samps 0.05" [Done]
# "SNAPE good.samps 0.001"
# "SNAPE all.samps 0.001"
# "SNAPE good.samps 0.05"
# "SNAPE all.samps 0.05"
##########################
#data <- list(
# callers= c( "PoolSNPs", "SNAPE" ),
# filters= c( "good.samps", "all.samps"),
# maf_thresholds= c( 0.001, 0.05 )
#)
#
#data %>%
# cross() %>%
# map(lift(paste)) %>%
# unlist() %>%
# unique()
##########################
if(caller=="PoolSNP") {
setnames(priv.dt, names(priv.dt), paste("V", 2:9, sep=""))
}
priv.dt[,V8:=paste(V9, paste(V6, V7, V8, sep=""), sep=";")]
## Add names to the prov object
names(priv.dt) = c(
"AF_bin_aob",
"chr",
"pos",
"nPop",
"missingPop",
"ref",
"alt",
"afs_mut_pop",
"afs_zero"
)
## Restrict any further analyses to the 4 main chromosomes
priv.dt = priv.dt[which(priv.dt$chr %in% c("2L","2R","3L","3R")),]
#Parse the allele frequency data
unlist_vect = lapply( priv.dt[,9] , unlist )
split_vect = lapply( unlist_vect , strsplit, split="\\+" )
remove_1stobj = lapply( split_vect$afs_zero, function(x) x[-1])
num_vect = lapply( remove_1stobj , as.numeric )
mean_AF = lapply( num_vect , mean )
AF_vector = as.data.frame(do.call("rbind", mean_AF))
# Add the allele frequency bin vector to the priv object
priv.dt %<>%
mutate(AF = round(AF_vector$V1,2) ) %>%
.[which(.$AF >= maf_thresh & .$AF <= (1-maf_thresh) ),] %>%
mutate(Raster_data = paste(chr,nPop,AF, sep = "_"))
as.data.frame(table(priv.dt$Raster_data , dnn = list("Raster_data")), responseName = "Freq") %>%
separate(Raster_data, into = c("chr","nPop","AF"), sep = "_") -> o
o[,c("nPop","AF","Freq")] = sapply(o[,c("nPop","AF","Freq")], as.numeric)
o %<>% mutate(caller = caller,
MAF = maf_thresh,
ind.filter = ind.filter)
save(o, file= paste(caller,
maf_thresh,
ind.filter,
"Rdata",
sep = "."))
| /Analyses/GeographicEndemism/Extended_endemism/2.1.AlleleFreq.SNAPE.Allsamps.0.001.R | no_license | alanbergland/DEST | R | false | false | 2,923 | r | # This script correspond to the filter
#ijob -A jcbnunez -c 10 --mem=60G --partition=standard
#module load gcc/7.1.0
#module load openmpi/3.1.4
#module load gdal
#module load proj
#module load htslib/1.9
#module load bcftools/1.9
#module load intel/18.0
#module load intelmpi/18.0
#module load R/4.0.0
#R
#Clean the environment
rm(list = ls())
##### libraries
#install.packages(c("data.table",
# "sp",
# "foreach",
# "tidyverse",
# "magrittr",
# "patchwork"))
library(data.table)
library(sp)
library(foreach)
library(tidyverse)
library(magrittr)
library(patchwork)
#User defined imput
priv.dt <- fread("SNAPE.AllSamps.0.001.delim") #file
ind.filter <- "AllSamps" #good.samps, all.samps
caller <- "SNAPE" #PoolSNPs, SNAPE
maf_thresh <- 0.001 #0.001, 0.05
# "PoolSNPs good.samps 0.001" [Done]
# "PoolSNPs all.samps 0.001" [Done]
# "PoolSNPs good.samps 0.05" [Done]
# "PoolSNPs all.samps 0.05" [Done]
# "SNAPE good.samps 0.001"
# "SNAPE all.samps 0.001"
# "SNAPE good.samps 0.05"
# "SNAPE all.samps 0.05"
##########################
#data <- list(
# callers= c( "PoolSNPs", "SNAPE" ),
# filters= c( "good.samps", "all.samps"),
# maf_thresholds= c( 0.001, 0.05 )
#)
#
#data %>%
# cross() %>%
# map(lift(paste)) %>%
# unlist() %>%
# unique()
##########################
if(caller=="PoolSNP") {
setnames(priv.dt, names(priv.dt), paste("V", 2:9, sep=""))
}
priv.dt[,V8:=paste(V9, paste(V6, V7, V8, sep=""), sep=";")]
## Add names to the prov object
names(priv.dt) = c(
"AF_bin_aob",
"chr",
"pos",
"nPop",
"missingPop",
"ref",
"alt",
"afs_mut_pop",
"afs_zero"
)
## Restrict any further analyses to the 4 main chromosomes
priv.dt = priv.dt[which(priv.dt$chr %in% c("2L","2R","3L","3R")),]
#Parse the allele frequency data
unlist_vect = lapply( priv.dt[,9] , unlist )
split_vect = lapply( unlist_vect , strsplit, split="\\+" )
remove_1stobj = lapply( split_vect$afs_zero, function(x) x[-1])
num_vect = lapply( remove_1stobj , as.numeric )
mean_AF = lapply( num_vect , mean )
AF_vector = as.data.frame(do.call("rbind", mean_AF))
# Add the allele frequency bin vector to the priv object
priv.dt %<>%
mutate(AF = round(AF_vector$V1,2) ) %>%
.[which(.$AF >= maf_thresh & .$AF <= (1-maf_thresh) ),] %>%
mutate(Raster_data = paste(chr,nPop,AF, sep = "_"))
as.data.frame(table(priv.dt$Raster_data , dnn = list("Raster_data")), responseName = "Freq") %>%
separate(Raster_data, into = c("chr","nPop","AF"), sep = "_") -> o
o[,c("nPop","AF","Freq")] = sapply(o[,c("nPop","AF","Freq")], as.numeric)
o %<>% mutate(caller = caller,
MAF = maf_thresh,
ind.filter = ind.filter)
save(o, file= paste(caller,
maf_thresh,
ind.filter,
"Rdata",
sep = "."))
|
\name{MLSTyper-package}
\alias{MLSTyper-package}
\alias{MLSTyper}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
| /man/MLSTyper-package.Rd | no_license | rdcrawford/MLSTyper | R | false | false | 824 | rd | \name{MLSTyper-package}
\alias{MLSTyper-package}
\alias{MLSTyper}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Your Name, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
## @knitr phenoPlots
## Objective : loop of analysis for each phenotype with a high number
##of strains with more than 8 drosophiles for each age
#Preparing the loop
phenotypes <- as.character( sort( unique( DATA_analysis$phenotype_name)))
RemainingStrain <- c()
nameOutliers1 <- c()
nameOutliers4 <- c()
for (namePheno in phenotypes){
#Subsetting
DATA_pheno <- DATA_analysis[ DATA_analysis$phenotype_name == namePheno,]
tabDATA_pheno <- data.frame( table( DATA_pheno$strain_number, DATA_pheno$age))
nameToRemove <- unique( as.character( tabDATA_pheno[ tabDATA_pheno$Freq <8, "Var1"]))
idxToRemove <- which( DATA_pheno$strain_number %in% nameToRemove)
if ( length( idxToRemove) != 0) {
DATA_pheno_rm <- DATA_pheno[ -idxToRemove, ]
} else {
DATA_pheno_rm <- DATA_pheno
}
#Extracting dataframe used for each phenotype
write.table( DATA_pheno_rm, file=file.path( dirOutput, paste("2_Phenotypes/Output/Phenotypes/", namePheno, ".txt")))
#Not traited if not enought data
RemainingStrain_nb <- ( length( unique( DATA_pheno_rm$strain_number)) *100) / length(
unique( DATA_analysis$strain_number))
RemainingStrain <- c( RemainingStrain, RemainingStrain_nb)
if ( RemainingStrain_nb > 80){
#Title
cat("<H4>", namePheno, "</H4><br>")
#Plot of remaining strains
cat("<br>Ci-dessous est affiché le barplot des lignées restantes après le second filtre. Il reste",
RemainingStrain_nb, "% des lignées de DATA_Analysis.<br>")
tabDATA_pheno_rm <- data.frame( table( DATA_pheno_rm$strain_number, DATA_pheno_rm$age))
colnames(tabDATA_pheno_rm) <- c( "strain_number", "age", "freq")
plotStrainpheno <- ggplot( tabDATA_pheno_rm, aes( x= strain_number, y= freq, fill = as.factor(age))) +
geom_bar( position="dodge", stat="identity") +
coord_cartesian( ylim = c( 0,25)) +
xlab( "Strains") +
ylab( "Frequency") +
ggtitle( paste( namePheno,"\nNumber of drosophila by strain and age")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text( angle = 90, hjust = 1, size = 7),
legend.position = "bottom") +
geom_hline( aes( yintercept = 8), lty = 2)
print( plotStrainpheno)
#Graphique boxplot des valeursen fonction de chacune des lignées ordonnées selon la médiane
cat("<br>Les boxplots suivants sont les valeurs du phénotype étudiés pour chaque lignée ordonnées selon leur médiane.<br>")
lenStrain <- length( unique( DATA_pheno_rm$strain_number))
plotHRMpheno <- ggplot( DATA_pheno_rm) +
geom_boxplot( aes( x = reorder(x =strain_number, X = value, FUN = median), y = value),
fill = rainbow(lenStrain)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 7)) +
xlab( "Strains") +
ylab( "Phenotype values") +
ggtitle( paste( namePheno, "\nphenotype values between strains organized by median")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( plotHRMpheno)
#Plot of overall difference between Age1 et Age4
cat("<br>Les valeurs phénotypiques sont maintenant organisées en fonction des âges, sans un regard à la
lignée, afin de voir si on a une tendance générale.<br>")
plotHRM14 <- ggplot( DATA_pheno_rm) +
geom_violin( aes( x = as.factor( age), y = value, fill = as.factor(age))) +
scale_fill_manual( values = c("1"= "lemonchiffon3", "4"= "gray60" )) +
geom_boxplot( aes( x = as.factor( age), y = value), width = 0.1) +
xlab( "Age") +
ylab( "Phenotype values") +
ggtitle( paste( namePheno, "\nphenotype values between ages")) +
theme( plot.title = element_text( hjust = 0.5))
print( plotHRM14)
# Wilcoxon test on age
cat("<br>Un premier test de Wilcoxon est appliquée à ces données. Si la p-valeur est inférieur à 0.05,
cela signifie que les deux populations (age1, age4) ont une moyenne significativement différente
et ne suivent donc pas une même distribution.")
wilcoxon <- wilcox.test(value ~ age, data = DATA_pheno_rm)
print( wilcoxon)
## MEDIANE AND MEAN
#Plot of difference median 1 - median 4
dfmedian <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
median1 <- median(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1),]$value)
median4 <- median(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4),]$value)
return( c( "strain_number" = as.character(x), "median1" = median1, "median4" = median4))
})
dfmedian <- data.frame( t( dfmedian))
dfmedian$median1 <- as.numeric( as.character( dfmedian$median1))
dfmedian$median4 <- as.numeric( as.character( dfmedian$median4))
dfmedian$diff <- dfmedian$median4 - dfmedian$median1
dfmedian[["sign"]] <- ifelse(dfmedian[["diff"]] >= 0, "positive", "negative")
strainorder <- dfmedian[order(dfmedian$diff),]$strain_number
dfmedian$strain_number = factor( dfmedian$strain_number, levels = strainorder)
#Plot of difference between Age1 et Age4 with median of each age
data1 <- dfmedian[, c( "strain_number", "median1")]
data1[["age"]] = 1
colnames( data1) <- c( "strain_number", "median", "age")
data4 <- dfmedian[, c( "strain_number", "median4")]
data4[["age"]] = 4
colnames( data4) <- c( "strain_number", "median", "age")
dfmedian_bis <- rbind.data.frame( data1, data4)
#Fonction pour déterminer les outliers
is_outlier <- function(x) {
return(x < quantile(x, 0.25) - 1.5 * IQR(x) | x > quantile(x, 0.75) + 1.5 * IQR(x))
}
outliers1 <- dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==1,]$median) & dfmedian_bis$age ==1, "strain_number"]
nameOutliers1 <- c(nameOutliers1, as.character(outliers1))
outliers4 <- dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==4,]$median) & dfmedian_bis$age ==4, "strain_number"]
nameOutliers4 <- c(nameOutliers4, as.character( outliers4))
cat("\nLes médianes des lignées pour les deux âges ont été calculées et mises sous forme d'un boxplot
comparable avec le précédent, il nous permet de voir la tendance générale ainsi que les lignées ayant
un comportement extrême, qui ont été labellisées.<br>")
plotHRM14bis <- ggplot( dfmedian_bis) +
geom_violin( aes( x = as.factor( age), y = median, fill = as.factor(age))) +
scale_fill_manual( values = c( "1"= "lemonchiffon3", "4"= "gray60" )) +
geom_boxplot( aes( x = as.factor( age), y = median), width = 0.1) +
geom_text_repel(
data = dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==1, ]$median) & dfmedian_bis$age ==1,],
aes(x = as.factor(age), y=median, label = strain_number), nudge_x = 0.2) +
geom_text_repel(
data = dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==4,]$median) & dfmedian_bis$age ==4,],
aes(x = as.factor(age), y=median, label = strain_number), nudge_x = 0.2) +
xlab( "Age") +
ylab( "median of phenotype values of strains") +
ggtitle( paste( namePheno, "\nmedian strains's values for each age")) +
theme( plot.title = element_text( hjust = 0.5))
print( plotHRM14bis)
# Wilcoxon test on age
cat("<br>Le test de Wilcoxon a maintenant été appliqué aux médianes des lignées pour l'âge 1
et l'âge. On a pu faire dans ce cas-ci un test paired.")
wilcoxon2 <- wilcox.test(median ~ age, data = dfmedian_bis, paired = TRUE)
print( wilcoxon2)
#Qplot
cat("\nLe graphique suivant est issu de la différence de la médiane âge4 moins celle de l'âge1, les lignées
ont été ordonnées suivant cette différence.<br>")
qplotmedian <- qplot(data = dfmedian, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "median4 - median1") +
ggtitle( paste( namePheno, "\ndifference mediane age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotmedian)
#Correlation mediane4/ mediane1
qplotmedian14 <- ggplot(dfmedian) +
geom_point( aes(x= median1, y = median4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation median age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Same with mean but keeping the order of strains by median to compare
dfmean <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
mean1 <- mean(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1), "value"])
mean4 <- mean(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4), "value"])
return( c( "strain_number" = as.character(x), "mean1" = mean1, "mean4" = mean4))
})
dfmean <- data.frame( t( dfmean))
dfmean$mean1 <- as.numeric( as.character( dfmean$mean1))
dfmean$mean4 <- as.numeric( as.character( dfmean$mean4))
dfmean$diff <- dfmean$mean4 - dfmean$mean1
dfmean[["sign"]] <- ifelse(dfmean[["diff"]] >= 0, "positive", "negative")
dfmean$strain_number = factor( dfmean$strain_number, levels = strainorder)
#Plot of diff mean4 -1 in order of diff median
cat("<br>Le graphique suivant est issu de la différence de la moyenne âge4 moins celle de l'âge1, les lignées
ont été ordonnées avec le même classement que les médianes.<br>")
qplotmean <- qplot(data = dfmean, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "mean4 - mean1") +
ggtitle( paste( namePheno, "\ndifference mean age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotmean)
#Correlation mean4/ mean1
qplotmean14 <- ggplot(dfmean) +
geom_point( aes( x= mean1, y = mean4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation mean age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Plot correlations side by side
cat("<br>Ci dessous sont les correlations entre l'age 1 et l'âge 4 pour la médiane et la moyenne.
On voit en effet que la moyenne est plus influencée par les valeurs extrèmes.<br>")
grid.arrange( qplotmedian14, qplotmean14, ncol = 2 , nrow =1 )
#Spearman correlation on median
cat("<br>")
spearman <- cor.test( x=dfmedian$median1, y = dfmedian$median4, method = "spearman")
print(spearman)
## MAD AND SD
#Calcul of difference mad 4 - mad 1
dfmad <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
mad1 <- mad(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1),]$value)
mad4 <- mad(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4),]$value)
return( c( "strain_number" = as.character(x), "mad1" = mad1, "mad4" = mad4))
})
dfmad <- data.frame( t( dfmad))
dfmad$mad1 <- as.numeric( as.character( dfmad$mad1))
dfmad$mad4 <- as.numeric( as.character( dfmad$mad4))
dfmad$diff <- dfmad$mad4 - dfmad$mad1
dfmad[["sign"]] <- ifelse(dfmad[["diff"]] >= 0, "positive", "negative")
strainordermad <- dfmad[order(dfmad$diff),]$strain_number
dfmad$strain_number = factor( dfmad$strain_number, levels = strainordermad)
#Plot of diff mad4 -1 in order of diff sd
cat("<br>Le graphique suivant est issu de la différence de la mad âge4 moins celle de l'âge1, les lignées
ont été ordonnées suivant cette différence.<br>")
qplotmad <- qplot(data = dfmad, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "mad4 - mad1") +
ggtitle( paste( namePheno, "\ndifference mad age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotmad)
#Correlation mad4/ mad1
qplotmad14 <- ggplot( dfmad) +
geom_point( aes( x= mad1, y = mad4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation mad age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Same with sd but keeping the order of strains by mad to compare
dfsd <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
sd1 <- sd(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1),]$value)
sd4 <- sd(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4),]$value)
return( c( "strain_number" = as.character(x), "sd1" = sd1, "sd4" = sd4))
})
dfsd <- data.frame( t( dfsd))
dfsd$sd1 <- as.numeric( as.character( dfsd$sd1))
dfsd$sd4 <- as.numeric( as.character( dfsd$sd4))
dfsd$diff <- dfsd$sd4 - dfsd$sd1
dfsd[["sign"]] <- ifelse(dfsd[["diff"]] >= 0, "positive", "negative")
dfsd$strain_number = factor( dfsd$strain_number, levels = strainordermad)
#Qplot
cat("<br>Le graphique suivant est issu de la différence de la sd âge4 moins celle de l'âge1, les lignées
ont été ordonnées avec le même classement que les mad.<br>")
qplotsd <- qplot(data = dfsd, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "sd4 - sd1") +
ggtitle( paste( namePheno, "\ndifference sd age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotsd)
#Correlation sd4/ sd1
qplotsd14 <- ggplot(dfsd) +
geom_point( aes( x= sd1, y = sd4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation sd age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Plot correlations side by side
cat("<br>Ci dessous sont les correlations entre l'age 1 et l'âge 4 pour la mad et le sd.
On voit en effet que le sd est plus influencé par les valeurs extrèmes.<br>")
grid.arrange( qplotmad14, qplotsd14, ncol = 2 , nrow =1 )
#Spearman on mad
cat("<br>")
spearman2 <- cor.test( x=dfmad$mad1, y = dfmad$mad4, method = "spearman")
print(spearman2)
cat('<br><br><br><br><hr style=" height:3px; background-color:black"><br><br><br><br>')
}
}
cat("<H4> Summary table </H4>")
#Summary of percent remaining strains for each phenotype, so if there are analyzed or not
kable( cbind.data.frame( "phenotypes" = phenotypes, "valid_strain" = formatC( RemainingStrain, digits =2, format = "f" )),
caption = "**Percentage of remaining strains** : Under 80%, phenotypes have not been studied")
cat("<br>Les deux tables qui suivent montrent les lignées qui ont été au minimum deux fois extrêmes.<br>")
kable( table( nameOutliers1)[ table( nameOutliers1) > 1], caption = "Strains most occured as extrem, age 1",
format = "html", align = NULL) %>%
kable_styling(full_width = F)
kable( table( nameOutliers4)[ table( nameOutliers4) > 1], caption = "Strains most occured as extrem, age4",
format = "html", align = NULL) %>%
kable_styling(full_width = F)
#Cleaning
#rm( c( DATA_test, DATA_test_rm)) ENLEVER LES VALEURS NON UTILES
| /2_Phenotypes/Input/pheno_analysis_01.R | no_license | sladebese/SNPnet | R | false | false | 17,392 | r | ## @knitr phenoPlots
## Objective : loop of analysis for each phenotype with a high number
##of strains with more than 8 drosophiles for each age
#Preparing the loop
phenotypes <- as.character( sort( unique( DATA_analysis$phenotype_name)))
RemainingStrain <- c()
nameOutliers1 <- c()
nameOutliers4 <- c()
for (namePheno in phenotypes){
#Subsetting
DATA_pheno <- DATA_analysis[ DATA_analysis$phenotype_name == namePheno,]
tabDATA_pheno <- data.frame( table( DATA_pheno$strain_number, DATA_pheno$age))
nameToRemove <- unique( as.character( tabDATA_pheno[ tabDATA_pheno$Freq <8, "Var1"]))
idxToRemove <- which( DATA_pheno$strain_number %in% nameToRemove)
if ( length( idxToRemove) != 0) {
DATA_pheno_rm <- DATA_pheno[ -idxToRemove, ]
} else {
DATA_pheno_rm <- DATA_pheno
}
#Extracting dataframe used for each phenotype
write.table( DATA_pheno_rm, file=file.path( dirOutput, paste("2_Phenotypes/Output/Phenotypes/", namePheno, ".txt")))
#Not traited if not enought data
RemainingStrain_nb <- ( length( unique( DATA_pheno_rm$strain_number)) *100) / length(
unique( DATA_analysis$strain_number))
RemainingStrain <- c( RemainingStrain, RemainingStrain_nb)
if ( RemainingStrain_nb > 80){
#Title
cat("<H4>", namePheno, "</H4><br>")
#Plot of remaining strains
cat("<br>Ci-dessous est affiché le barplot des lignées restantes après le second filtre. Il reste",
RemainingStrain_nb, "% des lignées de DATA_Analysis.<br>")
tabDATA_pheno_rm <- data.frame( table( DATA_pheno_rm$strain_number, DATA_pheno_rm$age))
colnames(tabDATA_pheno_rm) <- c( "strain_number", "age", "freq")
plotStrainpheno <- ggplot( tabDATA_pheno_rm, aes( x= strain_number, y= freq, fill = as.factor(age))) +
geom_bar( position="dodge", stat="identity") +
coord_cartesian( ylim = c( 0,25)) +
xlab( "Strains") +
ylab( "Frequency") +
ggtitle( paste( namePheno,"\nNumber of drosophila by strain and age")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text( angle = 90, hjust = 1, size = 7),
legend.position = "bottom") +
geom_hline( aes( yintercept = 8), lty = 2)
print( plotStrainpheno)
#Graphique boxplot des valeursen fonction de chacune des lignées ordonnées selon la médiane
cat("<br>Les boxplots suivants sont les valeurs du phénotype étudiés pour chaque lignée ordonnées selon leur médiane.<br>")
lenStrain <- length( unique( DATA_pheno_rm$strain_number))
plotHRMpheno <- ggplot( DATA_pheno_rm) +
geom_boxplot( aes( x = reorder(x =strain_number, X = value, FUN = median), y = value),
fill = rainbow(lenStrain)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 7)) +
xlab( "Strains") +
ylab( "Phenotype values") +
ggtitle( paste( namePheno, "\nphenotype values between strains organized by median")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( plotHRMpheno)
#Plot of overall difference between Age1 et Age4
cat("<br>Les valeurs phénotypiques sont maintenant organisées en fonction des âges, sans un regard à la
lignée, afin de voir si on a une tendance générale.<br>")
plotHRM14 <- ggplot( DATA_pheno_rm) +
geom_violin( aes( x = as.factor( age), y = value, fill = as.factor(age))) +
scale_fill_manual( values = c("1"= "lemonchiffon3", "4"= "gray60" )) +
geom_boxplot( aes( x = as.factor( age), y = value), width = 0.1) +
xlab( "Age") +
ylab( "Phenotype values") +
ggtitle( paste( namePheno, "\nphenotype values between ages")) +
theme( plot.title = element_text( hjust = 0.5))
print( plotHRM14)
# Wilcoxon test on age
cat("<br>Un premier test de Wilcoxon est appliquée à ces données. Si la p-valeur est inférieur à 0.05,
cela signifie que les deux populations (age1, age4) ont une moyenne significativement différente
et ne suivent donc pas une même distribution.")
wilcoxon <- wilcox.test(value ~ age, data = DATA_pheno_rm)
print( wilcoxon)
## MEDIANE AND MEAN
#Plot of difference median 1 - median 4
dfmedian <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
median1 <- median(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1),]$value)
median4 <- median(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4),]$value)
return( c( "strain_number" = as.character(x), "median1" = median1, "median4" = median4))
})
dfmedian <- data.frame( t( dfmedian))
dfmedian$median1 <- as.numeric( as.character( dfmedian$median1))
dfmedian$median4 <- as.numeric( as.character( dfmedian$median4))
dfmedian$diff <- dfmedian$median4 - dfmedian$median1
dfmedian[["sign"]] <- ifelse(dfmedian[["diff"]] >= 0, "positive", "negative")
strainorder <- dfmedian[order(dfmedian$diff),]$strain_number
dfmedian$strain_number = factor( dfmedian$strain_number, levels = strainorder)
#Plot of difference between Age1 et Age4 with median of each age
data1 <- dfmedian[, c( "strain_number", "median1")]
data1[["age"]] = 1
colnames( data1) <- c( "strain_number", "median", "age")
data4 <- dfmedian[, c( "strain_number", "median4")]
data4[["age"]] = 4
colnames( data4) <- c( "strain_number", "median", "age")
dfmedian_bis <- rbind.data.frame( data1, data4)
#Fonction pour déterminer les outliers
is_outlier <- function(x) {
return(x < quantile(x, 0.25) - 1.5 * IQR(x) | x > quantile(x, 0.75) + 1.5 * IQR(x))
}
outliers1 <- dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==1,]$median) & dfmedian_bis$age ==1, "strain_number"]
nameOutliers1 <- c(nameOutliers1, as.character(outliers1))
outliers4 <- dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==4,]$median) & dfmedian_bis$age ==4, "strain_number"]
nameOutliers4 <- c(nameOutliers4, as.character( outliers4))
cat("\nLes médianes des lignées pour les deux âges ont été calculées et mises sous forme d'un boxplot
comparable avec le précédent, il nous permet de voir la tendance générale ainsi que les lignées ayant
un comportement extrême, qui ont été labellisées.<br>")
plotHRM14bis <- ggplot( dfmedian_bis) +
geom_violin( aes( x = as.factor( age), y = median, fill = as.factor(age))) +
scale_fill_manual( values = c( "1"= "lemonchiffon3", "4"= "gray60" )) +
geom_boxplot( aes( x = as.factor( age), y = median), width = 0.1) +
geom_text_repel(
data = dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==1, ]$median) & dfmedian_bis$age ==1,],
aes(x = as.factor(age), y=median, label = strain_number), nudge_x = 0.2) +
geom_text_repel(
data = dfmedian_bis[is_outlier( dfmedian_bis[dfmedian_bis$age ==4,]$median) & dfmedian_bis$age ==4,],
aes(x = as.factor(age), y=median, label = strain_number), nudge_x = 0.2) +
xlab( "Age") +
ylab( "median of phenotype values of strains") +
ggtitle( paste( namePheno, "\nmedian strains's values for each age")) +
theme( plot.title = element_text( hjust = 0.5))
print( plotHRM14bis)
# Wilcoxon test on age
cat("<br>Le test de Wilcoxon a maintenant été appliqué aux médianes des lignées pour l'âge 1
et l'âge. On a pu faire dans ce cas-ci un test paired.")
wilcoxon2 <- wilcox.test(median ~ age, data = dfmedian_bis, paired = TRUE)
print( wilcoxon2)
#Qplot
cat("\nLe graphique suivant est issu de la différence de la médiane âge4 moins celle de l'âge1, les lignées
ont été ordonnées suivant cette différence.<br>")
qplotmedian <- qplot(data = dfmedian, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "median4 - median1") +
ggtitle( paste( namePheno, "\ndifference mediane age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotmedian)
#Correlation mediane4/ mediane1
qplotmedian14 <- ggplot(dfmedian) +
geom_point( aes(x= median1, y = median4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation median age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Same with mean but keeping the order of strains by median to compare
dfmean <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
mean1 <- mean(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1), "value"])
mean4 <- mean(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4), "value"])
return( c( "strain_number" = as.character(x), "mean1" = mean1, "mean4" = mean4))
})
dfmean <- data.frame( t( dfmean))
dfmean$mean1 <- as.numeric( as.character( dfmean$mean1))
dfmean$mean4 <- as.numeric( as.character( dfmean$mean4))
dfmean$diff <- dfmean$mean4 - dfmean$mean1
dfmean[["sign"]] <- ifelse(dfmean[["diff"]] >= 0, "positive", "negative")
dfmean$strain_number = factor( dfmean$strain_number, levels = strainorder)
#Plot of diff mean4 -1 in order of diff median
cat("<br>Le graphique suivant est issu de la différence de la moyenne âge4 moins celle de l'âge1, les lignées
ont été ordonnées avec le même classement que les médianes.<br>")
qplotmean <- qplot(data = dfmean, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "mean4 - mean1") +
ggtitle( paste( namePheno, "\ndifference mean age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotmean)
#Correlation mean4/ mean1
qplotmean14 <- ggplot(dfmean) +
geom_point( aes( x= mean1, y = mean4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation mean age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Plot correlations side by side
cat("<br>Ci dessous sont les correlations entre l'age 1 et l'âge 4 pour la médiane et la moyenne.
On voit en effet que la moyenne est plus influencée par les valeurs extrèmes.<br>")
grid.arrange( qplotmedian14, qplotmean14, ncol = 2 , nrow =1 )
#Spearman correlation on median
cat("<br>")
spearman <- cor.test( x=dfmedian$median1, y = dfmedian$median4, method = "spearman")
print(spearman)
## MAD AND SD
#Calcul of difference mad 4 - mad 1
dfmad <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
mad1 <- mad(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1),]$value)
mad4 <- mad(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4),]$value)
return( c( "strain_number" = as.character(x), "mad1" = mad1, "mad4" = mad4))
})
dfmad <- data.frame( t( dfmad))
dfmad$mad1 <- as.numeric( as.character( dfmad$mad1))
dfmad$mad4 <- as.numeric( as.character( dfmad$mad4))
dfmad$diff <- dfmad$mad4 - dfmad$mad1
dfmad[["sign"]] <- ifelse(dfmad[["diff"]] >= 0, "positive", "negative")
strainordermad <- dfmad[order(dfmad$diff),]$strain_number
dfmad$strain_number = factor( dfmad$strain_number, levels = strainordermad)
#Plot of diff mad4 -1 in order of diff sd
cat("<br>Le graphique suivant est issu de la différence de la mad âge4 moins celle de l'âge1, les lignées
ont été ordonnées suivant cette différence.<br>")
qplotmad <- qplot(data = dfmad, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "mad4 - mad1") +
ggtitle( paste( namePheno, "\ndifference mad age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotmad)
#Correlation mad4/ mad1
qplotmad14 <- ggplot( dfmad) +
geom_point( aes( x= mad1, y = mad4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation mad age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Same with sd but keeping the order of strains by mad to compare
dfsd <- sapply( unique( DATA_pheno_rm$strain_number), function(x){
sd1 <- sd(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 1),]$value)
sd4 <- sd(DATA_pheno_rm[ (DATA_pheno_rm$strain_number == x) & (DATA_pheno_rm$age == 4),]$value)
return( c( "strain_number" = as.character(x), "sd1" = sd1, "sd4" = sd4))
})
dfsd <- data.frame( t( dfsd))
dfsd$sd1 <- as.numeric( as.character( dfsd$sd1))
dfsd$sd4 <- as.numeric( as.character( dfsd$sd4))
dfsd$diff <- dfsd$sd4 - dfsd$sd1
dfsd[["sign"]] <- ifelse(dfsd[["diff"]] >= 0, "positive", "negative")
dfsd$strain_number = factor( dfsd$strain_number, levels = strainordermad)
#Qplot
cat("<br>Le graphique suivant est issu de la différence de la sd âge4 moins celle de l'âge1, les lignées
ont été ordonnées avec le même classement que les mad.<br>")
qplotsd <- qplot(data = dfsd, x= strain_number, y = diff, col = sign) +
scale_fill_manual(values = c("positive" = "deepskyblue", "negative" = "red2")) +
geom_hline( aes( yintercept = 0), lty = 2)+
xlab( "Strains") +
ylab( "sd4 - sd1") +
ggtitle( paste( namePheno, "\ndifference sd age 4 - 1 for each strain")) +
theme( plot.title = element_text( hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1, size = 7))
print( qplotsd)
#Correlation sd4/ sd1
qplotsd14 <- ggplot(dfsd) +
geom_point( aes( x= sd1, y = sd4)) +
geom_abline( slope = 1, intercept = 0, col = "red", lwd = 1) +
ggtitle( paste( namePheno, "\ncorrelation sd age 4 - 1 for each strain")) +
theme( legend.position = "none", plot.title = element_text( hjust = 0.5))
#Plot correlations side by side
cat("<br>Ci dessous sont les correlations entre l'age 1 et l'âge 4 pour la mad et le sd.
On voit en effet que le sd est plus influencé par les valeurs extrèmes.<br>")
grid.arrange( qplotmad14, qplotsd14, ncol = 2 , nrow =1 )
#Spearman on mad
cat("<br>")
spearman2 <- cor.test( x=dfmad$mad1, y = dfmad$mad4, method = "spearman")
print(spearman2)
cat('<br><br><br><br><hr style=" height:3px; background-color:black"><br><br><br><br>')
}
}
cat("<H4> Summary table </H4>")
#Summary of percent remaining strains for each phenotype, so if there are analyzed or not
kable( cbind.data.frame( "phenotypes" = phenotypes, "valid_strain" = formatC( RemainingStrain, digits =2, format = "f" )),
caption = "**Percentage of remaining strains** : Under 80%, phenotypes have not been studied")
cat("<br>Les deux tables qui suivent montrent les lignées qui ont été au minimum deux fois extrêmes.<br>")
kable( table( nameOutliers1)[ table( nameOutliers1) > 1], caption = "Strains most occured as extrem, age 1",
format = "html", align = NULL) %>%
kable_styling(full_width = F)
kable( table( nameOutliers4)[ table( nameOutliers4) > 1], caption = "Strains most occured as extrem, age4",
format = "html", align = NULL) %>%
kable_styling(full_width = F)
#Cleaning
#rm( c( DATA_test, DATA_test_rm)) ENLEVER LES VALEURS NON UTILES
|
library(daewr)
### Name: gear
### Title: Unreplicated split-plot fractional-factorial experiment on
### geometric distortion of drive gears
### Aliases: gear
### Keywords: datasets
### ** Examples
data(gear)
| /data/genthat_extracted_code/daewr/examples/gear.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 217 | r | library(daewr)
### Name: gear
### Title: Unreplicated split-plot fractional-factorial experiment on
### geometric distortion of drive gears
### Aliases: gear
### Keywords: datasets
### ** Examples
data(gear)
|
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 8.59352185133537e+228, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) | /dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609869321-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,199 | r | testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 8.59352185133537e+228, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) |
## File Name: vcov.srm.R
## File Version: 0.01
vcov.srm <- function(object, ...)
{
return(object$vcov)
}
| /R/vcov.srm.R | no_license | alexanderrobitzsch/srm | R | false | false | 111 | r | ## File Name: vcov.srm.R
## File Version: 0.01
vcov.srm <- function(object, ...)
{
return(object$vcov)
}
|
library(shiny)
library(dplyr)
library(lubridate)
# Load libraries and functions needed to create SQLite databases.
library(RSQLite)
# Used to be provided by dplyr, we'll hack together on our own now.
hflights_sqlite <- function(path = NULL) {
path <- dplyr:::db_location(path, "hflights.sqlite")
if (!file.exists(path)) {
message("Caching hflights db at ", path)
src <- src_sqlite(path, create = TRUE)
copy_to(src, getExportedValue("hflights", "hflights"), "hflights",
temporary = FALSE,
indexes = list("Dest", c("Year", "Month", "DayofMonth"), "UniqueCarrier")
)
} else {
src <- src_sqlite(path)
}
src
}
saveSQLite <- function(data, name){
path <- dplyr:::db_location(filename=paste0(name, ".sqlite"))
if (!file.exists(path)) {
message("Caching db at ", path)
src <- src_sqlite(path, create = TRUE)
copy_to(src, data, name, temporary = FALSE)
} else {
src <- src_sqlite(path)
}
return (src)
}
# Load/create some data and put it in SQLite. In practice, the data you want
# likely already exists in the databse, so you would just be reading the data
# in from the database, not uploading it from R.
# Load and upload flights data
library(hflights)
hflights_db <- tbl(hflights_sqlite(), "hflights")
# Create a user membership data.frame that maps user names to an airline
# company.
membership <- data.frame(
user = c("kim", "sam", "john", "kelly", "ben", "joe"),
company = c("", "DL", "AA", "UA", "US", "DL"),
role = c("manager", rep("user", 5)))
membership_db <- tbl(saveSQLite(membership, "membership"), "membership")
airlines <- data.frame(
abbrev = c("AA", "DL", "UA", "US"),
name = c("American Airlines", "Delta Air Lines",
"United Airlines", "US Airways")
)
airline_db <- tbl(saveSQLite(airlines, "airline"), "airline")
#' Get the full name of an airline given its abbreviation.
airlineName <- function(abbr){
as.data.frame(select(filter(airline_db, abbrev == abbr), name))[1,1]
}
shinyServer(function(input, output, session) {
#' Get the current user's username
user <- reactive({
curUser <- session$user
# Not logged in. Shiny Server Pro should be configured to prevent this.
if (is.null(curUser)){
return(NULL)
}
# Look up the user in the database to load all the associated data.
user <- as.data.frame(
filter(membership_db, user==curUser)
)
# No user in the database
if (nrow(user) < 1){
return(NULL)
}
user[1,]
})
#' Determine whether or not the current user is a manager.
isManager <- reactive({
if (is.null(user())){
return(FALSE)
}
role <- user()$role
return(role == "manager")
})
#' Get the company of which the current user is a member
userCompany <- reactive({
if (is.null(user())){
return(NULL)
}
if (isManager()){
# If the user is a manager, then they're allowed to select any company
# they want and view its data.
if (is.null(input$company)){
return(as.data.frame(airline_db)$abbrev[1])
}
return(input$company)
}
# Otherwise this is just a regular, logged-in user. Look up what company
# they're associated with and return that.
user()$company
})
#' Get the data the current user has permissions to see
#' @return a dplyr tbl
companyData <- reactive({
# Trim down to only relevant variables
delays <- select(hflights_db, Month, DayofMonth, DepDelay, UniqueCarrier)
# Trim down to only values that we have permissions to see
comp <- userCompany()
delays <- filter(delays, UniqueCarrier == comp)
delays
})
#' Of the data a user is allowed to see, further refine it to only include the
#' date range selected by the user.
filteredData <- reactive({
# Get current month and day
curMonth <- month(now())
curDay <- day(now())
# Get the previous month and day based on the slider input
prevMonth <- month(now()-days(input$days))
prevDay <- day(now()-days(input$days))
# Filter to only include the flights in between the selected dates.
data <- filter(companyData(),
(Month > prevMonth | (Month == prevMonth & DayofMonth >= prevDay)) &
(Month < curMonth | (Month == curMonth & DayofMonth <= curDay)))
as.data.frame(data)
})
output$title <- renderText({
if(is.null(user())){
return("ERROR: This application is designed to be run in Shiny Server Pro and to require authentication.")
}
paste0("Airline Delays for ", airlineName(userCompany()))
})
output$userPanel <- renderUI({
if (isManager()){
# The management UI should have a drop-down that allows you to select a
# company.
tagList(
HTML(paste0("Logged in as <code>", user()$user,
"</code> who is a <code>", user()$role ,"</code>.")),
hr(),
p("As a manager, you may select any company's data you wish to view."),
selectInput("company", "", as.data.frame(airline_db)$abbrev)
)
} else{
# It's just a regular user. Just tell them who they are.
HTML(paste0("Logged in as <code>", user()$user, "</code> with <code>",
airlineName(userCompany()),"</code>."))
}
})
#' Print a boxplot of the selected data.
output$box <- renderPlot({
boxplot(
lapply(
split(filteredData(), as.factor(
paste0(filteredData()$Month, "/", filteredData()$DayofMonth))),
function(dayData){
dayData$DepDelay
}
), ylab = "Delay (minutes)"
)
})
})
| /029-authentication-and-database/server.R | no_license | trestletech/shiny-examples | R | false | false | 5,704 | r |
library(shiny)
library(dplyr)
library(lubridate)
# Load libraries and functions needed to create SQLite databases.
library(RSQLite)
# Used to be provided by dplyr, we'll hack together on our own now.
hflights_sqlite <- function(path = NULL) {
path <- dplyr:::db_location(path, "hflights.sqlite")
if (!file.exists(path)) {
message("Caching hflights db at ", path)
src <- src_sqlite(path, create = TRUE)
copy_to(src, getExportedValue("hflights", "hflights"), "hflights",
temporary = FALSE,
indexes = list("Dest", c("Year", "Month", "DayofMonth"), "UniqueCarrier")
)
} else {
src <- src_sqlite(path)
}
src
}
saveSQLite <- function(data, name){
path <- dplyr:::db_location(filename=paste0(name, ".sqlite"))
if (!file.exists(path)) {
message("Caching db at ", path)
src <- src_sqlite(path, create = TRUE)
copy_to(src, data, name, temporary = FALSE)
} else {
src <- src_sqlite(path)
}
return (src)
}
# Load/create some data and put it in SQLite. In practice, the data you want
# likely already exists in the databse, so you would just be reading the data
# in from the database, not uploading it from R.
# Load and upload flights data
library(hflights)
hflights_db <- tbl(hflights_sqlite(), "hflights")
# Create a user membership data.frame that maps user names to an airline
# company.
membership <- data.frame(
user = c("kim", "sam", "john", "kelly", "ben", "joe"),
company = c("", "DL", "AA", "UA", "US", "DL"),
role = c("manager", rep("user", 5)))
membership_db <- tbl(saveSQLite(membership, "membership"), "membership")
airlines <- data.frame(
abbrev = c("AA", "DL", "UA", "US"),
name = c("American Airlines", "Delta Air Lines",
"United Airlines", "US Airways")
)
airline_db <- tbl(saveSQLite(airlines, "airline"), "airline")
#' Get the full name of an airline given its abbreviation.
airlineName <- function(abbr){
as.data.frame(select(filter(airline_db, abbrev == abbr), name))[1,1]
}
shinyServer(function(input, output, session) {
#' Get the current user's username
user <- reactive({
curUser <- session$user
# Not logged in. Shiny Server Pro should be configured to prevent this.
if (is.null(curUser)){
return(NULL)
}
# Look up the user in the database to load all the associated data.
user <- as.data.frame(
filter(membership_db, user==curUser)
)
# No user in the database
if (nrow(user) < 1){
return(NULL)
}
user[1,]
})
#' Determine whether or not the current user is a manager.
isManager <- reactive({
if (is.null(user())){
return(FALSE)
}
role <- user()$role
return(role == "manager")
})
#' Get the company of which the current user is a member
userCompany <- reactive({
if (is.null(user())){
return(NULL)
}
if (isManager()){
# If the user is a manager, then they're allowed to select any company
# they want and view its data.
if (is.null(input$company)){
return(as.data.frame(airline_db)$abbrev[1])
}
return(input$company)
}
# Otherwise this is just a regular, logged-in user. Look up what company
# they're associated with and return that.
user()$company
})
#' Get the data the current user has permissions to see
#' @return a dplyr tbl
companyData <- reactive({
# Trim down to only relevant variables
delays <- select(hflights_db, Month, DayofMonth, DepDelay, UniqueCarrier)
# Trim down to only values that we have permissions to see
comp <- userCompany()
delays <- filter(delays, UniqueCarrier == comp)
delays
})
#' Of the data a user is allowed to see, further refine it to only include the
#' date range selected by the user.
filteredData <- reactive({
# Get current month and day
curMonth <- month(now())
curDay <- day(now())
# Get the previous month and day based on the slider input
prevMonth <- month(now()-days(input$days))
prevDay <- day(now()-days(input$days))
# Filter to only include the flights in between the selected dates.
data <- filter(companyData(),
(Month > prevMonth | (Month == prevMonth & DayofMonth >= prevDay)) &
(Month < curMonth | (Month == curMonth & DayofMonth <= curDay)))
as.data.frame(data)
})
output$title <- renderText({
if(is.null(user())){
return("ERROR: This application is designed to be run in Shiny Server Pro and to require authentication.")
}
paste0("Airline Delays for ", airlineName(userCompany()))
})
output$userPanel <- renderUI({
if (isManager()){
# The management UI should have a drop-down that allows you to select a
# company.
tagList(
HTML(paste0("Logged in as <code>", user()$user,
"</code> who is a <code>", user()$role ,"</code>.")),
hr(),
p("As a manager, you may select any company's data you wish to view."),
selectInput("company", "", as.data.frame(airline_db)$abbrev)
)
} else{
# It's just a regular user. Just tell them who they are.
HTML(paste0("Logged in as <code>", user()$user, "</code> with <code>",
airlineName(userCompany()),"</code>."))
}
})
#' Print a boxplot of the selected data.
output$box <- renderPlot({
boxplot(
lapply(
split(filteredData(), as.factor(
paste0(filteredData()$Month, "/", filteredData()$DayofMonth))),
function(dayData){
dayData$DepDelay
}
), ylab = "Delay (minutes)"
)
})
})
|
\name{transform}
\alias{transform_incidences}
\title{Transform incidences}
\description{
Carry out transformations between incidence matrices from
endorelations and other codings.
}
\usage{
transform_incidences(x, from = c("PO", "SO", "01", "-1+1"),
to = c("PO", "SO", "01", "-1+1"))
}
\arguments{
\item{x}{An incidence matrix from an endorelation.}
\item{from, to}{The coding scheme (see \bold{Details}).}
}
\details{
In the following, we consider an incidence matrix \eqn{X} with cells
\eqn{x_{jk}} of a relation \eqn{R} with tuples \eqn{(a_j, b_k)}.
For the \code{"PO"} (\dQuote{Preference Order}) coding,
\eqn{X} is a 0/1 matrix, and
\eqn{a_j R b_k} iff \eqn{x_{jk} = 1}. It follows in particular
that if both \eqn{x_{jk}} and \eqn{x_{kj}} are 0, the corresponding pair
\eqn{(a_j, b_k)} is not contained in R, i.e., \eqn{a_j} and \eqn{b_k}
are unrelated.
For the \code{"SO"} (\dQuote{"Strict Order"}) coding,
\eqn{X} is a 0/1 matrix with possible
\code{NA} values. As for \code{"PO"}, \eqn{a_j R b_k} iff
\eqn{x_{jk} = 1}, but at most one of \eqn{x_{jk}} and \eqn{x_{kj}} can
be 1. If both are missing (\code{NA}), \eqn{a_j} and \eqn{b_k}
are unrelated.
For the \code{"01"} coding, \eqn{X} is a matrix with values 0, 1, or
0.5. The coding is similar to \code{"SO"}, except that \code{NA} is
represented by 0.5.
For the \code{"-1+1"} coding, \eqn{X} is a matrix with values -1, 0, or 1.
The coding is similar to \code{"SO"}, except that \code{NA} is
represented by 0, and \eqn{x_{jk} = -1} if \emph{not} \eqn{a_j R b_k}.
}
\seealso{
\code{\link{relation_incidence}()}.
}
\examples{
require("sets") # set(), pair() etc.
x <- relation(domain = c(1,2,3,4),
graph = set(pair(1,2), pair(4,2), pair(1,3), pair(1,4),
pair(3,2), pair(2,1)))
inc <- relation_incidence(x)
print(inc)
transform_incidences(inc, to = "SO")
transform_incidences(inc, to = "01")
transform_incidences(inc, to = "-1+1")
## transformations should be loss-free:
inc2 <- transform_incidences(inc, from = "PO", to = "-1+1")
inc2 <- transform_incidences(inc2, from = "-1+1", to = "SO")
inc2 <- transform_incidences(inc2, from = "SO", to = "01")
inc2 <- transform_incidences(inc2, from = "01", to = "PO")
stopifnot(identical(inc, inc2))
}
\keyword{math}
| /man/transform.Rd | no_license | cran/relations | R | false | false | 2,302 | rd | \name{transform}
\alias{transform_incidences}
\title{Transform incidences}
\description{
Carry out transformations between incidence matrices from
endorelations and other codings.
}
\usage{
transform_incidences(x, from = c("PO", "SO", "01", "-1+1"),
to = c("PO", "SO", "01", "-1+1"))
}
\arguments{
\item{x}{An incidence matrix from an endorelation.}
\item{from, to}{The coding scheme (see \bold{Details}).}
}
\details{
In the following, we consider an incidence matrix \eqn{X} with cells
\eqn{x_{jk}} of a relation \eqn{R} with tuples \eqn{(a_j, b_k)}.
For the \code{"PO"} (\dQuote{Preference Order}) coding,
\eqn{X} is a 0/1 matrix, and
\eqn{a_j R b_k} iff \eqn{x_{jk} = 1}. It follows in particular
that if both \eqn{x_{jk}} and \eqn{x_{kj}} are 0, the corresponding pair
\eqn{(a_j, b_k)} is not contained in R, i.e., \eqn{a_j} and \eqn{b_k}
are unrelated.
For the \code{"SO"} (\dQuote{"Strict Order"}) coding,
\eqn{X} is a 0/1 matrix with possible
\code{NA} values. As for \code{"PO"}, \eqn{a_j R b_k} iff
\eqn{x_{jk} = 1}, but at most one of \eqn{x_{jk}} and \eqn{x_{kj}} can
be 1. If both are missing (\code{NA}), \eqn{a_j} and \eqn{b_k}
are unrelated.
For the \code{"01"} coding, \eqn{X} is a matrix with values 0, 1, or
0.5. The coding is similar to \code{"SO"}, except that \code{NA} is
represented by 0.5.
For the \code{"-1+1"} coding, \eqn{X} is a matrix with values -1, 0, or 1.
The coding is similar to \code{"SO"}, except that \code{NA} is
represented by 0, and \eqn{x_{jk} = -1} if \emph{not} \eqn{a_j R b_k}.
}
\seealso{
\code{\link{relation_incidence}()}.
}
\examples{
require("sets") # set(), pair() etc.
x <- relation(domain = c(1,2,3,4),
graph = set(pair(1,2), pair(4,2), pair(1,3), pair(1,4),
pair(3,2), pair(2,1)))
inc <- relation_incidence(x)
print(inc)
transform_incidences(inc, to = "SO")
transform_incidences(inc, to = "01")
transform_incidences(inc, to = "-1+1")
## transformations should be loss-free:
inc2 <- transform_incidences(inc, from = "PO", to = "-1+1")
inc2 <- transform_incidences(inc2, from = "-1+1", to = "SO")
inc2 <- transform_incidences(inc2, from = "SO", to = "01")
inc2 <- transform_incidences(inc2, from = "01", to = "PO")
stopifnot(identical(inc, inc2))
}
\keyword{math}
|
# RAKIP Generic model
#
# TODO
#
# OpenAPI spec version: 1.0.4
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' DataModelModelMath Class
#'
#' @field parameter
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
DataModelModelMath <- R6::R6Class(
'DataModelModelMath',
public = list(
`parameter` = NULL,
initialize = function(`parameter`){
if (!missing(`parameter`)) {
stopifnot(is.list(`parameter`), length(`parameter`) != 0)
lapply(`parameter`, function(x) stopifnot(R6::is.R6(x)))
self$`parameter` <- `parameter`
}
},
toJSON = function() {
DataModelModelMathObject <- list()
if (!is.null(self$`parameter`)) {
DataModelModelMathObject[['parameter']] <- lapply(self$`parameter`, function(x) x$toJSON())
}
DataModelModelMathObject
},
fromJSON = function(DataModelModelMathJson) {
DataModelModelMathObject <- jsonlite::fromJSON(DataModelModelMathJson)
if (!is.null(DataModelModelMathObject$`parameter`)) {
self$`parameter` <- lapply(DataModelModelMathObject$`parameter`, function(x) {
parameterObject <- Parameter$new()
parameterObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE))
parameterObject
})
}
},
toJSONString = function() {
sprintf(
'{
"parameter": [%s]
}',
lapply(self$`parameter`, function(x) paste(x$toJSON(), sep=","))
)
},
fromJSONString = function(DataModelModelMathJson) {
DataModelModelMathObject <- jsonlite::fromJSON(DataModelModelMathJson)
self$`parameter` <- lapply(DataModelModelMathObject$`parameter`, function(x) Parameter$new()$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE)))
}
)
)
| /R/DataModelModelMath.r | no_license | miguelalba/fsk_metadata | R | false | false | 1,810 | r | # RAKIP Generic model
#
# TODO
#
# OpenAPI spec version: 1.0.4
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' DataModelModelMath Class
#'
#' @field parameter
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
DataModelModelMath <- R6::R6Class(
'DataModelModelMath',
public = list(
`parameter` = NULL,
initialize = function(`parameter`){
if (!missing(`parameter`)) {
stopifnot(is.list(`parameter`), length(`parameter`) != 0)
lapply(`parameter`, function(x) stopifnot(R6::is.R6(x)))
self$`parameter` <- `parameter`
}
},
toJSON = function() {
DataModelModelMathObject <- list()
if (!is.null(self$`parameter`)) {
DataModelModelMathObject[['parameter']] <- lapply(self$`parameter`, function(x) x$toJSON())
}
DataModelModelMathObject
},
fromJSON = function(DataModelModelMathJson) {
DataModelModelMathObject <- jsonlite::fromJSON(DataModelModelMathJson)
if (!is.null(DataModelModelMathObject$`parameter`)) {
self$`parameter` <- lapply(DataModelModelMathObject$`parameter`, function(x) {
parameterObject <- Parameter$new()
parameterObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE))
parameterObject
})
}
},
toJSONString = function() {
sprintf(
'{
"parameter": [%s]
}',
lapply(self$`parameter`, function(x) paste(x$toJSON(), sep=","))
)
},
fromJSONString = function(DataModelModelMathJson) {
DataModelModelMathObject <- jsonlite::fromJSON(DataModelModelMathJson)
self$`parameter` <- lapply(DataModelModelMathObject$`parameter`, function(x) Parameter$new()$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE)))
}
)
)
|
\name{pers.rate.obj}
\alias{pers.rate.obj}
\title{Calculating the global disease persistence rate in a metapopulation.}
\description{Calculating the global disease persistence rate in a metapopulation, by using the Kaplan–Meier survival curve of the metapopulation.}
\usage{pers.rate.obj<-function(object)}
\arguments{
\item{object}{a seir object with the condition, this object has to have the value of the parameter 'persistence'. Because, based on this parameter, we can estimated the survival rate.}
}
\details{For this function, we have a set of the survival time of all subpopulation in the metapopulation, due to the value of the parameter 'persistence'. Based on this value, we use the function 'survreg' of the package 'survival' to estimate the global persistence rate for a metapopulation. The function 'survival' is used for the Parametric Survival Model. Therefore, we get the estimated global persistence rate.
}
\value{
A matrix (or vector) with columns giving lower and upper confidence limits for the estimated value. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in % (by default 2.5% and 97.5%).
}
\author{TRAN Thi Cam Giang}
\seealso{
'pers.rate.obj' function in the 'dizzys' package.
}
\examples{
#STO, STO
sto<-seir(N=10e5,type="stoch",duration=30*365,beta1=0.1,nbVilles=10)
objper<- persistence(sto)
pers.obj<-pers.rate.obj(objper)
pause()
}
\keyword{ seir model }
\keyword{ R package }% __ONLY ONE__ keyword per line
| /CODE_dizzys/refreshDIZZYS_2015_10_23/lan1/dizzysNEWYANN/man/pers.rate.obj.Rd | no_license | ttcgiang/THESE_GitHub | R | false | false | 1,478 | rd | \name{pers.rate.obj}
\alias{pers.rate.obj}
\title{Calculating the global disease persistence rate in a metapopulation.}
\description{Calculating the global disease persistence rate in a metapopulation, by using the Kaplan–Meier survival curve of the metapopulation.}
\usage{pers.rate.obj<-function(object)}
\arguments{
\item{object}{a seir object with the condition, this object has to have the value of the parameter 'persistence'. Because, based on this parameter, we can estimated the survival rate.}
}
\details{For this function, we have a set of the survival time of all subpopulation in the metapopulation, due to the value of the parameter 'persistence'. Based on this value, we use the function 'survreg' of the package 'survival' to estimate the global persistence rate for a metapopulation. The function 'survival' is used for the Parametric Survival Model. Therefore, we get the estimated global persistence rate.
}
\value{
A matrix (or vector) with columns giving lower and upper confidence limits for the estimated value. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in % (by default 2.5% and 97.5%).
}
\author{TRAN Thi Cam Giang}
\seealso{
'pers.rate.obj' function in the 'dizzys' package.
}
\examples{
#STO, STO
sto<-seir(N=10e5,type="stoch",duration=30*365,beta1=0.1,nbVilles=10)
objper<- persistence(sto)
pers.obj<-pers.rate.obj(objper)
pause()
}
\keyword{ seir model }
\keyword{ R package }% __ONLY ONE__ keyword per line
|
modelData_18_19 <- read.csv("premLeague_18_19_clean.csv")
modelData_17_18 <- read.csv("premLeague_17_18_clean.csv")
modelData_16_17 <- read.csv("premLeague_16_17_clean.csv")
final_Data <-rbind(modelData_16_17,modelData_17_18,modelData_18_19)
read.csv(final_Data,"final_Data.csv")
| /Final_Data_code.R | no_license | hosavagyan/CS252-Data-Science-Premier-League-Analysis | R | false | false | 286 | r | modelData_18_19 <- read.csv("premLeague_18_19_clean.csv")
modelData_17_18 <- read.csv("premLeague_17_18_clean.csv")
modelData_16_17 <- read.csv("premLeague_16_17_clean.csv")
final_Data <-rbind(modelData_16_17,modelData_17_18,modelData_18_19)
read.csv(final_Data,"final_Data.csv")
|
#---------------------------------------------------------
# bits of code that will help when I am ready to spatialise the Random Forest model
require(sp)
require(rgdal)
require(raster)
require(randomForest)
# CREATE LIST OF RASTERS
rlist=list.files(getwd(), pattern="img$", full.names=TRUE)
# CREATE RASTER STACK
xvars <- stack(rlist)
# READ POINT SHAPEFILE TRAINING DATA
sdata <- readOGR(dsn=getwd() layer="inshape")
# ASSIGN RASTER VALUES TO TRAINING DATA
v <- as.data.frame(extract(xvars, sdata))
sdata@data = data.frame(sdata@data, v[match(rownames(sdata@data), rownames(v)),])
# RUN RF MODEL
rf.mdl <- randomForest(x=sdata@data[,3:ncol(sdata@data)], y=as.factor(sdata@data[,"train"]),
ntree=501, importance=TRUE)
# CHECK ERROR CONVERGENCE
plot(rf.mdl)
# PLOT mean decrease in accuracy VARIABLE IMPORTANCE
varImpPlot(rf.mdl, type=1)
# PREDICT MODEL
predict(xvars, rf.mdl, filename="RfClassPred.img", type="response",
index=1, na.rm=TRUE, progress="window", overwrite=TRUE)
| /SK418_RandomForestSpatialize.R | no_license | cboisvenue/RCodeSK | R | false | false | 1,026 | r | #---------------------------------------------------------
# bits of code that will help when I am ready to spatialise the Random Forest model
require(sp)
require(rgdal)
require(raster)
require(randomForest)
# CREATE LIST OF RASTERS
rlist=list.files(getwd(), pattern="img$", full.names=TRUE)
# CREATE RASTER STACK
xvars <- stack(rlist)
# READ POINT SHAPEFILE TRAINING DATA
sdata <- readOGR(dsn=getwd() layer="inshape")
# ASSIGN RASTER VALUES TO TRAINING DATA
v <- as.data.frame(extract(xvars, sdata))
sdata@data = data.frame(sdata@data, v[match(rownames(sdata@data), rownames(v)),])
# RUN RF MODEL
rf.mdl <- randomForest(x=sdata@data[,3:ncol(sdata@data)], y=as.factor(sdata@data[,"train"]),
ntree=501, importance=TRUE)
# CHECK ERROR CONVERGENCE
plot(rf.mdl)
# PLOT mean decrease in accuracy VARIABLE IMPORTANCE
varImpPlot(rf.mdl, type=1)
# PREDICT MODEL
predict(xvars, rf.mdl, filename="RfClassPred.img", type="response",
index=1, na.rm=TRUE, progress="window", overwrite=TRUE)
|
# For data manipulation and tidying
library(dplyr)
# For data visualizations
library(ggplot2)
# install.packages("fpc")
library(fpc)
# For modeling and predictions
library(caret)
# install.packages("glmnet")
library(glmnet)
# install.packages("ranger")
library(ranger)
# install.packages("e1071")
library(e1071)
# install.packages("clValid")
library(clValid)
d_train <- read.csv('data/monster_train.csv', header = TRUE, stringsAsFactors = FALSE)
d_train$Dataset <- "train"
d_test <- read.csv('data/monster_test.csv', header = TRUE, stringsAsFactors = FALSE)
d_test$Dataset <- "test"
full <- bind_rows(d_train, d_test)
str(full)
summary(full)
We have 8 variables currently:
# * **ID** : Appears to be the identification number of the monster in question
# * **Bone Length** : Average length of the bones in the creature, normalized to 0 - 1
# * **Rotting Flesh** : Percentage of flesh on the creature that is rotting
# * **Hair Length** : Average length of the hair on the creature, normalized from 0 - 1
# * **Has Soul** : The percentage of a soul present in the creature
# * **Color** : The color of the creature
# * **Type** : The category of the creature (i.e. ghoul, goblin or ghost)
# * **Dataset** : The column I added when importing data indicating whether the observation was part of the original training or test set
# It seems like a few of these variables would serve better as factors, rather than character strings, so I'll take care of that.
factor_variables <- c('id', 'color', 'type', 'Dataset')
full[factor_variables] <- lapply(full[factor_variables], function(x) as.factor(x))
# Data Exploration
train_2 <- full[full$Dataset == 'train', ]
# Distribution of Continuous Variables by Creature Type{.tabset}
# Bone Length
ggplot(train_2,
aes(x = type,
y = bone_length,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Bone Length") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
#### Rotting Flesh
ggplot(train_2,
aes(x = type,
y = rotting_flesh,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Percentage of Rotting Flesh") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
#### Hair Length
ggplot(train_2,
aes(x = type,
y = hair_length,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Hair Length") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
#### Soul
ggplot(train_2,
aes(x = type,
y = has_soul,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Percentage of Soul Present") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
### Distribution of Color by Creature Type{.tabset}
#### Ghost
ghost_color <- train_2 %>%
filter(type == 'Ghost') %>%
group_by(color) %>%
summarise(count = n())
ggplot(ghost_color,
aes(x = color,
y = count,
fill = color)) +
geom_bar(stat = "identity") +
xlab("Color") +
ylab("Number of Observations") +
scale_fill_manual(values = c("Black", "#D55E00", "#0072B2", "#F0E442", "#009E73", "#999999")) +
theme(panel.grid.minor = element_blank()) +
ylim(0, 50) +
guides(fill = FALSE)
#### Ghoul
ghoul_color <- train_2 %>%
filter(type == 'Ghoul') %>%
group_by(color) %>%
summarise(count = n())
ggplot(ghoul_color,
aes(x = color,
y = count,
fill = color)) +
geom_bar(stat = "identity") +
xlab("Color") +
ylab("Number of Observations") +
scale_fill_manual(values = c("Black", "#D55E00", "#0072B2", "#F0E442", "#009E73", "#999999")) +
theme(panel.grid.minor = element_blank()) +
ylim(0, 50) +
guides(fill = FALSE)
#### Goblin
goblin_color <- train_2 %>%
filter(type == 'Goblin') %>%
group_by(color) %>%
summarise(count = n())
ggplot(goblin_color,
aes(x = color,
y = count,
fill = color)) +
geom_bar(stat = "identity") +
xlab("Color") +
ylab("Number of Observations") +
scale_fill_manual(values = c("Black", "#D55E00", "#0072B2", "#F0E442", "#009E73", "#999999")) +
theme(panel.grid.minor = element_blank()) +
ylim(0, 50) +
guides(fill = FALSE)
pairs(full[,2:5],
col = full$type,
labels = c("Bone Length", "Rotting Flesh", "Hair Length", "Soul"))
full <- full %>%
mutate(hair_soul = hair_length * has_soul)
full_1 <- full %>%
filter(!is.na(type))
ggplot(full_1,
aes(x = type,
y = hair_soul,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Combination of Hair/Soul") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
full <- full %>%
mutate(bone_flesh = bone_length * rotting_flesh,
bone_hair = bone_length * hair_length,
bone_soul = bone_length * has_soul,
flesh_hair = rotting_flesh * hair_length,
flesh_soul = rotting_flesh * has_soul)
summary(full)
### Cluster Without Categorical Variables
# Set the seed
set.seed(100)
# Extract creature labels and remove column from dataset
creature_labels <- full$type
full2 <- full
full2$type <- NULL
# Remove categorical variables (id, color, and dataset) from dataset
full2$id <- NULL
full2$color <- NULL
full2$Dataset <- NULL
# Perform k-means clustering with 3 clusters, repeat 30 times
creature_km_1 <- kmeans(full2, 3, nstart = 30)
plotcluster(full2, creature_km_1$cluster)
dunn_ckm_1 <- dunn(clusters = creature_km_1$cluster, Data = full2)
dunn_ckm_1
table(creature_km_1$cluster, creature_labels)
train_complete <- full[full$Dataset == 'train', ]
test_complete <- full[full$Dataset == 'test', ]
# Because I plan on using the `caret` package for all of my modeling, I'm going to generate a standard `trainControl` so that those tuning parameters remain consistent throughout the various models.
### Creating trainControl
# I will create a system that will perform 20 repeats of a 10-Fold cross-validation of the data.
myControl <- trainControl(
method = "cv",
number = 10,
repeats = 20,
verboseIter = TRUE)
### Random Forest Modeling
set.seed(10)
rf_model <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + color + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
tuneLength = 3,
data = train_complete,
method = "ranger",
trControl = myControl,
importance = 'impurity'
)
# Creating a Variable Importance variable
vimp <- varImp(rf_model)
# Plotting "vimp"
ggplot(vimp,
top = dim(vimp$importance)[1])
# Huh. Our "hair_soul" variable seems to be the most important to this model and our other interactions rank pretty highly. I suppose we can hold on to them for now. Color, on the other hand, hardly plays into this. Let's try removing it from a second random forest model.
set.seed(10)
rf_model_2 <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
tuneLength = 3,
data = train_complete,
method = "ranger",
trControl = myControl,
importance = 'impurity'
)
### GLMnet Modeling
# I'm going to follow the random forest model up with a glmnet model, also from the `caret` package.
set.seed(10)
glm_model <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + color + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
method = "glmnet",
tuneGrid = expand.grid(alpha = 0:1,
lambda = seq(0.0001, 1, length = 20)),
data = train_complete,
trControl = myControl
)
set.seed(10)
glm_model_2 <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
method = "glmnet",
tuneGrid = expand.grid(alpha = 0:1,
lambda = seq(0.0001, 1, length = 20)),
data = train_complete,
trControl = myControl
)
### Comparing model fit
# Create a list of models
models <- list(rf = rf_model, rf2 = rf_model_2, glmnet = glm_model, glmnet2 = glm_model_2)
# Resample the models
resampled <- resamples(models)
# Generate a summary
summary(resampled)
# Plot the differences between model fits
dotplot(resampled, metric = "Accuracy")
## Predicting Creature Identity
test_complete <- test_complete %>%
arrange(id)
my_prediction <- predict(glm_model_2, test_complete)
# my_solution_GGG_03 <- data.frame(id = test_complete$id, Type = my_prediction)
# write.csv(my_solution_GGG_03, file = "my_solution_GGG_03.csv", row.names = FALSE) | /06_monster_wo_xgboost.R | no_license | trisgelar/belajar-xgboost | R | false | false | 9,134 | r | # For data manipulation and tidying
library(dplyr)
# For data visualizations
library(ggplot2)
# install.packages("fpc")
library(fpc)
# For modeling and predictions
library(caret)
# install.packages("glmnet")
library(glmnet)
# install.packages("ranger")
library(ranger)
# install.packages("e1071")
library(e1071)
# install.packages("clValid")
library(clValid)
d_train <- read.csv('data/monster_train.csv', header = TRUE, stringsAsFactors = FALSE)
d_train$Dataset <- "train"
d_test <- read.csv('data/monster_test.csv', header = TRUE, stringsAsFactors = FALSE)
d_test$Dataset <- "test"
full <- bind_rows(d_train, d_test)
str(full)
summary(full)
We have 8 variables currently:
# * **ID** : Appears to be the identification number of the monster in question
# * **Bone Length** : Average length of the bones in the creature, normalized to 0 - 1
# * **Rotting Flesh** : Percentage of flesh on the creature that is rotting
# * **Hair Length** : Average length of the hair on the creature, normalized from 0 - 1
# * **Has Soul** : The percentage of a soul present in the creature
# * **Color** : The color of the creature
# * **Type** : The category of the creature (i.e. ghoul, goblin or ghost)
# * **Dataset** : The column I added when importing data indicating whether the observation was part of the original training or test set
# It seems like a few of these variables would serve better as factors, rather than character strings, so I'll take care of that.
factor_variables <- c('id', 'color', 'type', 'Dataset')
full[factor_variables] <- lapply(full[factor_variables], function(x) as.factor(x))
# Data Exploration
train_2 <- full[full$Dataset == 'train', ]
# Distribution of Continuous Variables by Creature Type{.tabset}
# Bone Length
ggplot(train_2,
aes(x = type,
y = bone_length,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Bone Length") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
#### Rotting Flesh
ggplot(train_2,
aes(x = type,
y = rotting_flesh,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Percentage of Rotting Flesh") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
#### Hair Length
ggplot(train_2,
aes(x = type,
y = hair_length,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Hair Length") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
#### Soul
ggplot(train_2,
aes(x = type,
y = has_soul,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Percentage of Soul Present") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
### Distribution of Color by Creature Type{.tabset}
#### Ghost
ghost_color <- train_2 %>%
filter(type == 'Ghost') %>%
group_by(color) %>%
summarise(count = n())
ggplot(ghost_color,
aes(x = color,
y = count,
fill = color)) +
geom_bar(stat = "identity") +
xlab("Color") +
ylab("Number of Observations") +
scale_fill_manual(values = c("Black", "#D55E00", "#0072B2", "#F0E442", "#009E73", "#999999")) +
theme(panel.grid.minor = element_blank()) +
ylim(0, 50) +
guides(fill = FALSE)
#### Ghoul
ghoul_color <- train_2 %>%
filter(type == 'Ghoul') %>%
group_by(color) %>%
summarise(count = n())
ggplot(ghoul_color,
aes(x = color,
y = count,
fill = color)) +
geom_bar(stat = "identity") +
xlab("Color") +
ylab("Number of Observations") +
scale_fill_manual(values = c("Black", "#D55E00", "#0072B2", "#F0E442", "#009E73", "#999999")) +
theme(panel.grid.minor = element_blank()) +
ylim(0, 50) +
guides(fill = FALSE)
#### Goblin
goblin_color <- train_2 %>%
filter(type == 'Goblin') %>%
group_by(color) %>%
summarise(count = n())
ggplot(goblin_color,
aes(x = color,
y = count,
fill = color)) +
geom_bar(stat = "identity") +
xlab("Color") +
ylab("Number of Observations") +
scale_fill_manual(values = c("Black", "#D55E00", "#0072B2", "#F0E442", "#009E73", "#999999")) +
theme(panel.grid.minor = element_blank()) +
ylim(0, 50) +
guides(fill = FALSE)
pairs(full[,2:5],
col = full$type,
labels = c("Bone Length", "Rotting Flesh", "Hair Length", "Soul"))
full <- full %>%
mutate(hair_soul = hair_length * has_soul)
full_1 <- full %>%
filter(!is.na(type))
ggplot(full_1,
aes(x = type,
y = hair_soul,
fill = type)) +
geom_boxplot() +
guides(fill = FALSE) +
xlab("Creature") +
ylab("Combination of Hair/Soul") +
scale_fill_manual(values = c("#D55E00", "#0072B2", "#009E73"))
full <- full %>%
mutate(bone_flesh = bone_length * rotting_flesh,
bone_hair = bone_length * hair_length,
bone_soul = bone_length * has_soul,
flesh_hair = rotting_flesh * hair_length,
flesh_soul = rotting_flesh * has_soul)
summary(full)
### Cluster Without Categorical Variables
# Set the seed
set.seed(100)
# Extract creature labels and remove column from dataset
creature_labels <- full$type
full2 <- full
full2$type <- NULL
# Remove categorical variables (id, color, and dataset) from dataset
full2$id <- NULL
full2$color <- NULL
full2$Dataset <- NULL
# Perform k-means clustering with 3 clusters, repeat 30 times
creature_km_1 <- kmeans(full2, 3, nstart = 30)
plotcluster(full2, creature_km_1$cluster)
dunn_ckm_1 <- dunn(clusters = creature_km_1$cluster, Data = full2)
dunn_ckm_1
table(creature_km_1$cluster, creature_labels)
train_complete <- full[full$Dataset == 'train', ]
test_complete <- full[full$Dataset == 'test', ]
# Because I plan on using the `caret` package for all of my modeling, I'm going to generate a standard `trainControl` so that those tuning parameters remain consistent throughout the various models.
### Creating trainControl
# I will create a system that will perform 20 repeats of a 10-Fold cross-validation of the data.
myControl <- trainControl(
method = "cv",
number = 10,
repeats = 20,
verboseIter = TRUE)
### Random Forest Modeling
set.seed(10)
rf_model <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + color + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
tuneLength = 3,
data = train_complete,
method = "ranger",
trControl = myControl,
importance = 'impurity'
)
# Creating a Variable Importance variable
vimp <- varImp(rf_model)
# Plotting "vimp"
ggplot(vimp,
top = dim(vimp$importance)[1])
# Huh. Our "hair_soul" variable seems to be the most important to this model and our other interactions rank pretty highly. I suppose we can hold on to them for now. Color, on the other hand, hardly plays into this. Let's try removing it from a second random forest model.
set.seed(10)
rf_model_2 <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
tuneLength = 3,
data = train_complete,
method = "ranger",
trControl = myControl,
importance = 'impurity'
)
### GLMnet Modeling
# I'm going to follow the random forest model up with a glmnet model, also from the `caret` package.
set.seed(10)
glm_model <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + color + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
method = "glmnet",
tuneGrid = expand.grid(alpha = 0:1,
lambda = seq(0.0001, 1, length = 20)),
data = train_complete,
trControl = myControl
)
set.seed(10)
glm_model_2 <- train(
type ~ bone_length + rotting_flesh + hair_length + has_soul + hair_soul + bone_flesh + bone_hair +
bone_soul + flesh_hair + flesh_soul,
method = "glmnet",
tuneGrid = expand.grid(alpha = 0:1,
lambda = seq(0.0001, 1, length = 20)),
data = train_complete,
trControl = myControl
)
### Comparing model fit
# Create a list of models
models <- list(rf = rf_model, rf2 = rf_model_2, glmnet = glm_model, glmnet2 = glm_model_2)
# Resample the models
resampled <- resamples(models)
# Generate a summary
summary(resampled)
# Plot the differences between model fits
dotplot(resampled, metric = "Accuracy")
## Predicting Creature Identity
test_complete <- test_complete %>%
arrange(id)
my_prediction <- predict(glm_model_2, test_complete)
# my_solution_GGG_03 <- data.frame(id = test_complete$id, Type = my_prediction)
# write.csv(my_solution_GGG_03, file = "my_solution_GGG_03.csv", row.names = FALSE) |
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include fms_service.R
NULL
.fms$associate_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$associate_admin_account_output <- function(...) {
list()
}
.fms$associate_third_party_firewall_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$associate_third_party_firewall_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_associate_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), Items = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_associate_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), FailedItems = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), Reason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_disassociate_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), Items = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_disassociate_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), FailedItems = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), Reason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_apps_list_output <- function(...) {
list()
}
.fms$delete_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_notification_channel_output <- function(...) {
list()
}
.fms$delete_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), DeleteAllPolicyResources = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_policy_output <- function(...) {
list()
}
.fms$delete_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_protocols_list_output <- function(...) {
list()
}
.fms$delete_resource_set_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Identifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_resource_set_output <- function(...) {
list()
}
.fms$disassociate_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$disassociate_admin_account_output <- function(...) {
list()
}
.fms$disassociate_third_party_firewall_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$disassociate_third_party_firewall_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_account_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string")), RoleStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_scope_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_scope_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminScope = structure(list(AccountScope = structure(list(Accounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllAccountsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedAccounts = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), OrganizationalUnitScope = structure(list(OrganizationalUnits = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllOrganizationalUnitsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedOrganizationalUnits = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), RegionScope = structure(list(Regions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllRegionsEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PolicyTypeScope = structure(list(PolicyTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllPolicyTypesEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string")), DefaultList = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_apps_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), AppsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_compliance_detail_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_compliance_detail_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyComplianceDetail = structure(list(PolicyOwner = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), Violators = structure(list(structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), ViolationReason = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), Metadata = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), EvaluationLimitExceeded = structure(logical(0), tags = list(type = "boolean")), ExpiredAt = structure(logical(0), tags = list(type = "timestamp")), IssueInfoMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_notification_channel_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SnsTopicArn = structure(logical(0), tags = list(type = "string")), SnsRoleName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string")), PolicyOption = structure(list(NetworkFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ResourceSetIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PolicyDescription = structure(logical(0), tags = list(type = "string")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), PolicyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protection_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccountId = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp")), EndTime = structure(logical(0), tags = list(type = "timestamp")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protection_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccountId = structure(logical(0), tags = list(type = "string")), ServiceType = structure(logical(0), tags = list(type = "string")), Data = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string")), DefaultList = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protocols_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), ProtocolsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_resource_set_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Identifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_resource_set_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSet = structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), UpdateToken = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ResourceSetArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_third_party_firewall_association_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_third_party_firewall_association_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallStatus = structure(logical(0), tags = list(type = "string")), MarketplaceOnboardingStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_violation_details_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_violation_details_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ViolationDetail = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceViolations = structure(list(structure(list(AwsVPCSecurityGroupViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string")), PartialMatches = structure(list(structure(list(Reference = structure(logical(0), tags = list(type = "string")), TargetViolationReasons = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), PossibleSecurityGroupRemediationActions = structure(list(structure(list(RemediationActionType = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), RemediationResult = structure(list(IPV4Range = structure(logical(0), tags = list(type = "string")), IPV6Range = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), FromPort = structure(logical(0), tags = list(type = "long")), ToPort = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), IsDefaultAction = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), AwsEc2NetworkInterfaceViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolatingSecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), AwsEc2InstanceViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), AwsEc2NetworkInterfaceViolations = structure(list(structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolatingSecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), NetworkFirewallMissingFirewallViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingSubnetViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingExpectedRTViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), CurrentRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedRouteTable = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallPolicyModifiedViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), CurrentPolicyDescription = structure(list(StatelessRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), StatelessDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessFragmentDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessCustomActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer")), Override = structure(list(Action = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), StatefulDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulEngineOptions = structure(list(RuleOrder = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), ExpectedPolicyDescription = structure(list(StatelessRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), StatelessDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessFragmentDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessCustomActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer")), Override = structure(list(Action = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), StatefulDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulEngineOptions = structure(list(RuleOrder = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), NetworkFirewallInternetTrafficNotInspectedViolation = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), IsRouteTableUsedInDifferentAZ = structure(logical(0), tags = list(type = "boolean")), CurrentFirewallSubnetRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedFirewallEndpoint = structure(logical(0), tags = list(type = "string")), FirewallSubnetId = structure(logical(0), tags = list(type = "string")), ExpectedFirewallSubnetRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualFirewallSubnetRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), InternetGatewayId = structure(logical(0), tags = list(type = "string")), CurrentInternetGatewayRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedInternetGatewayRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualInternetGatewayRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallInvalidRouteConfigurationViolation = structure(list(AffectedSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string")), IsRouteTableUsedInDifferentAZ = structure(logical(0), tags = list(type = "boolean")), ViolatingRoute = structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CurrentFirewallSubnetRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedFirewallEndpoint = structure(logical(0), tags = list(type = "string")), ActualFirewallEndpoint = structure(logical(0), tags = list(type = "string")), ExpectedFirewallSubnetId = structure(logical(0), tags = list(type = "string")), ActualFirewallSubnetId = structure(logical(0), tags = list(type = "string")), ExpectedFirewallSubnetRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualFirewallSubnetRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), InternetGatewayId = structure(logical(0), tags = list(type = "string")), CurrentInternetGatewayRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedInternetGatewayRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualInternetGatewayRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallBlackHoleRouteDetectedViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), NetworkFirewallUnexpectedFirewallRoutesViolation = structure(list(FirewallSubnetId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string")), FirewallEndpoint = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallUnexpectedGatewayRoutesViolation = structure(list(GatewayId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingExpectedRoutesViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ExpectedRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DnsRuleGroupPriorityConflictViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string")), ConflictingPriority = structure(logical(0), tags = list(type = "integer")), ConflictingPolicyId = structure(logical(0), tags = list(type = "string")), UnavailablePriorities = structure(list(structure(logical(0), tags = list(type = "integer"))), tags = list(type = "list"))), tags = list(type = "structure")), DnsDuplicateRuleGroupViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DnsRuleGroupLimitExceededViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string")), NumberOfRuleGroupsAlreadyAssociated = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PossibleRemediationActions = structure(list(Description = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(Description = structure(logical(0), tags = list(type = "string")), OrderedRemediationActions = structure(list(structure(list(RemediationAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), EC2CreateRouteAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), DestinationCidrBlock = structure(logical(0), tags = list(type = "string")), DestinationPrefixListId = structure(logical(0), tags = list(type = "string")), DestinationIpv6CidrBlock = structure(logical(0), tags = list(type = "string")), VpcEndpointId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), GatewayId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2ReplaceRouteAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), DestinationCidrBlock = structure(logical(0), tags = list(type = "string")), DestinationPrefixListId = structure(logical(0), tags = list(type = "string")), DestinationIpv6CidrBlock = structure(logical(0), tags = list(type = "string")), GatewayId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2DeleteRouteAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), DestinationCidrBlock = structure(logical(0), tags = list(type = "string")), DestinationPrefixListId = structure(logical(0), tags = list(type = "string")), DestinationIpv6CidrBlock = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2CopyRouteTableAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), VpcId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2ReplaceRouteTableAssociationAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), AssociationId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2AssociateRouteTableAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SubnetId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), GatewayId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2CreateRouteTableAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), VpcId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), FMSPolicyUpdateFirewallCreationConfigAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), FirewallCreationConfig = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), Order = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), IsDefaultAction = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), FirewallSubnetIsOutOfScopeViolation = structure(list(FirewallSubnetId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZoneId = structure(logical(0), tags = list(type = "string")), VpcEndpointId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteHasOutOfScopeEndpointViolation = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZoneId = structure(logical(0), tags = list(type = "string")), CurrentFirewallSubnetRouteTable = structure(logical(0), tags = list(type = "string")), FirewallSubnetId = structure(logical(0), tags = list(type = "string")), FirewallSubnetRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), InternetGatewayId = structure(logical(0), tags = list(type = "string")), CurrentInternetGatewayRouteTable = structure(logical(0), tags = list(type = "string")), InternetGatewayRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), ThirdPartyFirewallMissingFirewallViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallMissingSubnetViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallMissingExpectedRouteTableViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), CurrentRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedRouteTable = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), FirewallSubnetMissingVPCEndpointViolation = structure(list(FirewallSubnetId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZoneId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ResourceDescription = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admin_accounts_for_organization_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admin_accounts_for_organization_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccounts = structure(list(structure(list(AdminAccount = structure(logical(0), tags = list(type = "string")), DefaultAdmin = structure(logical(0), tags = list(type = "boolean")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admins_managing_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admins_managing_account_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_apps_lists_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DefaultLists = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_apps_lists_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsLists = structure(list(structure(list(ListArn = structure(logical(0), tags = list(type = "string")), ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_compliance_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_compliance_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyComplianceStatusList = structure(list(structure(list(PolicyOwner = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), EvaluationResults = structure(list(structure(list(ComplianceStatus = structure(logical(0), tags = list(type = "string")), ViolatorCount = structure(logical(0), tags = list(type = "long")), EvaluationLimitExceeded = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), IssueInfoMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_discovered_resources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MemberAccountIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceType = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_discovered_resources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Items = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), AccountId = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_member_accounts_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_member_accounts_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MemberAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_policies_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_policies_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyList = structure(list(structure(list(PolicyArn = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), SecurityServiceType = structure(logical(0), tags = list(type = "string")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_protocols_lists_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DefaultLists = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_protocols_lists_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsLists = structure(list(structure(list(ListArn = structure(logical(0), tags = list(type = "string")), ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_set_resources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Identifier = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_set_resources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Items = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), AccountId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_sets_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_sets_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSets = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_third_party_firewall_firewall_policies_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_third_party_firewall_firewall_policies_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallFirewallPolicies = structure(list(structure(list(FirewallPolicyId = structure(logical(0), tags = list(type = "string")), FirewallPolicyName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string")), AdminScope = structure(list(AccountScope = structure(list(Accounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllAccountsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedAccounts = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), OrganizationalUnitScope = structure(list(OrganizationalUnits = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllOrganizationalUnitsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedOrganizationalUnits = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), RegionScope = structure(list(Regions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllRegionsEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PolicyTypeScope = structure(list(PolicyTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllPolicyTypesEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_admin_account_output <- function(...) {
list()
}
.fms$put_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_apps_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), AppsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SnsTopicArn = structure(logical(0), tags = list(type = "string")), SnsRoleName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_notification_channel_output <- function(...) {
list()
}
.fms$put_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string")), PolicyOption = structure(list(NetworkFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ResourceSetIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PolicyDescription = structure(logical(0), tags = list(type = "string")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string")), PolicyOption = structure(list(NetworkFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ResourceSetIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PolicyDescription = structure(logical(0), tags = list(type = "string")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), PolicyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_protocols_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), ProtocolsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_resource_set_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSet = structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), UpdateToken = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_resource_set_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSet = structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), UpdateToken = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ResourceSetArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
| /cran/paws.security.identity/R/fms_interfaces.R | permissive | paws-r/paws | R | false | false | 72,384 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common populate
#' @include fms_service.R
NULL
.fms$associate_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$associate_admin_account_output <- function(...) {
list()
}
.fms$associate_third_party_firewall_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$associate_third_party_firewall_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_associate_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), Items = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_associate_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), FailedItems = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), Reason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_disassociate_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), Items = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$batch_disassociate_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSetIdentifier = structure(logical(0), tags = list(type = "string")), FailedItems = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), Reason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_apps_list_output <- function(...) {
list()
}
.fms$delete_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_notification_channel_output <- function(...) {
list()
}
.fms$delete_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), DeleteAllPolicyResources = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_policy_output <- function(...) {
list()
}
.fms$delete_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_protocols_list_output <- function(...) {
list()
}
.fms$delete_resource_set_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Identifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$delete_resource_set_output <- function(...) {
list()
}
.fms$disassociate_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$disassociate_admin_account_output <- function(...) {
list()
}
.fms$disassociate_third_party_firewall_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$disassociate_third_party_firewall_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_account_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string")), RoleStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_scope_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_admin_scope_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminScope = structure(list(AccountScope = structure(list(Accounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllAccountsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedAccounts = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), OrganizationalUnitScope = structure(list(OrganizationalUnits = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllOrganizationalUnitsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedOrganizationalUnits = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), RegionScope = structure(list(Regions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllRegionsEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PolicyTypeScope = structure(list(PolicyTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllPolicyTypesEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "structure")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string")), DefaultList = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_apps_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), AppsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_compliance_detail_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_compliance_detail_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyComplianceDetail = structure(list(PolicyOwner = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), Violators = structure(list(structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), ViolationReason = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), Metadata = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), EvaluationLimitExceeded = structure(logical(0), tags = list(type = "boolean")), ExpiredAt = structure(logical(0), tags = list(type = "timestamp")), IssueInfoMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_notification_channel_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SnsTopicArn = structure(logical(0), tags = list(type = "string")), SnsRoleName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string")), PolicyOption = structure(list(NetworkFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ResourceSetIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PolicyDescription = structure(logical(0), tags = list(type = "string")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), PolicyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protection_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccountId = structure(logical(0), tags = list(type = "string")), StartTime = structure(logical(0), tags = list(type = "timestamp")), EndTime = structure(logical(0), tags = list(type = "timestamp")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protection_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccountId = structure(logical(0), tags = list(type = "string")), ServiceType = structure(logical(0), tags = list(type = "string")), Data = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ListId = structure(logical(0), tags = list(type = "string")), DefaultList = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_protocols_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), ProtocolsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_resource_set_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Identifier = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_resource_set_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSet = structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), UpdateToken = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ResourceSetArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_third_party_firewall_association_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_third_party_firewall_association_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallStatus = structure(logical(0), tags = list(type = "string")), MarketplaceOnboardingStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_violation_details_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$get_violation_details_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ViolationDetail = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceViolations = structure(list(structure(list(AwsVPCSecurityGroupViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string")), PartialMatches = structure(list(structure(list(Reference = structure(logical(0), tags = list(type = "string")), TargetViolationReasons = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), PossibleSecurityGroupRemediationActions = structure(list(structure(list(RemediationActionType = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), RemediationResult = structure(list(IPV4Range = structure(logical(0), tags = list(type = "string")), IPV6Range = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), FromPort = structure(logical(0), tags = list(type = "long")), ToPort = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure")), IsDefaultAction = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), AwsEc2NetworkInterfaceViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolatingSecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure")), AwsEc2InstanceViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), AwsEc2NetworkInterfaceViolations = structure(list(structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolatingSecurityGroups = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), NetworkFirewallMissingFirewallViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingSubnetViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingExpectedRTViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), CurrentRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedRouteTable = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallPolicyModifiedViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), CurrentPolicyDescription = structure(list(StatelessRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), StatelessDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessFragmentDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessCustomActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer")), Override = structure(list(Action = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), StatefulDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulEngineOptions = structure(list(RuleOrder = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), ExpectedPolicyDescription = structure(list(StatelessRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), StatelessDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessFragmentDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatelessCustomActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulRuleGroups = structure(list(structure(list(RuleGroupName = structure(logical(0), tags = list(type = "string")), ResourceId = structure(logical(0), tags = list(type = "string")), Priority = structure(logical(0), tags = list(type = "integer")), Override = structure(list(Action = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), StatefulDefaultActions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), StatefulEngineOptions = structure(list(RuleOrder = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), NetworkFirewallInternetTrafficNotInspectedViolation = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), IsRouteTableUsedInDifferentAZ = structure(logical(0), tags = list(type = "boolean")), CurrentFirewallSubnetRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedFirewallEndpoint = structure(logical(0), tags = list(type = "string")), FirewallSubnetId = structure(logical(0), tags = list(type = "string")), ExpectedFirewallSubnetRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualFirewallSubnetRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), InternetGatewayId = structure(logical(0), tags = list(type = "string")), CurrentInternetGatewayRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedInternetGatewayRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualInternetGatewayRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallInvalidRouteConfigurationViolation = structure(list(AffectedSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string")), IsRouteTableUsedInDifferentAZ = structure(logical(0), tags = list(type = "boolean")), ViolatingRoute = structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), CurrentFirewallSubnetRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedFirewallEndpoint = structure(logical(0), tags = list(type = "string")), ActualFirewallEndpoint = structure(logical(0), tags = list(type = "string")), ExpectedFirewallSubnetId = structure(logical(0), tags = list(type = "string")), ActualFirewallSubnetId = structure(logical(0), tags = list(type = "string")), ExpectedFirewallSubnetRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualFirewallSubnetRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), InternetGatewayId = structure(logical(0), tags = list(type = "string")), CurrentInternetGatewayRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedInternetGatewayRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ActualInternetGatewayRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallBlackHoleRouteDetectedViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), NetworkFirewallUnexpectedFirewallRoutesViolation = structure(list(FirewallSubnetId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string")), FirewallEndpoint = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallUnexpectedGatewayRoutesViolation = structure(list(GatewayId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), NetworkFirewallMissingExpectedRoutesViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ExpectedRoutes = structure(list(structure(list(IpV4Cidr = structure(logical(0), tags = list(type = "string")), PrefixListId = structure(logical(0), tags = list(type = "string")), IpV6Cidr = structure(logical(0), tags = list(type = "string")), ContributingSubnets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllowedTargets = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), RouteTableId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), VpcId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DnsRuleGroupPriorityConflictViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string")), ConflictingPriority = structure(logical(0), tags = list(type = "integer")), ConflictingPolicyId = structure(logical(0), tags = list(type = "string")), UnavailablePriorities = structure(list(structure(logical(0), tags = list(type = "integer"))), tags = list(type = "list"))), tags = list(type = "structure")), DnsDuplicateRuleGroupViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), DnsRuleGroupLimitExceededViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), ViolationTargetDescription = structure(logical(0), tags = list(type = "string")), NumberOfRuleGroupsAlreadyAssociated = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure")), PossibleRemediationActions = structure(list(Description = structure(logical(0), tags = list(type = "string")), Actions = structure(list(structure(list(Description = structure(logical(0), tags = list(type = "string")), OrderedRemediationActions = structure(list(structure(list(RemediationAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), EC2CreateRouteAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), DestinationCidrBlock = structure(logical(0), tags = list(type = "string")), DestinationPrefixListId = structure(logical(0), tags = list(type = "string")), DestinationIpv6CidrBlock = structure(logical(0), tags = list(type = "string")), VpcEndpointId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), GatewayId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2ReplaceRouteAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), DestinationCidrBlock = structure(logical(0), tags = list(type = "string")), DestinationPrefixListId = structure(logical(0), tags = list(type = "string")), DestinationIpv6CidrBlock = structure(logical(0), tags = list(type = "string")), GatewayId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2DeleteRouteAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), DestinationCidrBlock = structure(logical(0), tags = list(type = "string")), DestinationPrefixListId = structure(logical(0), tags = list(type = "string")), DestinationIpv6CidrBlock = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2CopyRouteTableAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), VpcId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2ReplaceRouteTableAssociationAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), AssociationId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2AssociateRouteTableAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), SubnetId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), GatewayId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), EC2CreateRouteTableAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), VpcId = structure(list(ResourceId = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), FMSPolicyUpdateFirewallCreationConfigAction = structure(list(Description = structure(logical(0), tags = list(type = "string")), FirewallCreationConfig = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure")), Order = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))), tags = list(type = "list")), IsDefaultAction = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), FirewallSubnetIsOutOfScopeViolation = structure(list(FirewallSubnetId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZoneId = structure(logical(0), tags = list(type = "string")), VpcEndpointId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), RouteHasOutOfScopeEndpointViolation = structure(list(SubnetId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), RouteTableId = structure(logical(0), tags = list(type = "string")), ViolatingRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZoneId = structure(logical(0), tags = list(type = "string")), CurrentFirewallSubnetRouteTable = structure(logical(0), tags = list(type = "string")), FirewallSubnetId = structure(logical(0), tags = list(type = "string")), FirewallSubnetRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), InternetGatewayId = structure(logical(0), tags = list(type = "string")), CurrentInternetGatewayRouteTable = structure(logical(0), tags = list(type = "string")), InternetGatewayRoutes = structure(list(structure(list(DestinationType = structure(logical(0), tags = list(type = "string")), TargetType = structure(logical(0), tags = list(type = "string")), Destination = structure(logical(0), tags = list(type = "string")), Target = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure")), ThirdPartyFirewallMissingFirewallViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallMissingSubnetViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), TargetViolationReason = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallMissingExpectedRouteTableViolation = structure(list(ViolationTarget = structure(logical(0), tags = list(type = "string")), VPC = structure(logical(0), tags = list(type = "string")), AvailabilityZone = structure(logical(0), tags = list(type = "string")), CurrentRouteTable = structure(logical(0), tags = list(type = "string")), ExpectedRouteTable = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), FirewallSubnetMissingVPCEndpointViolation = structure(list(FirewallSubnetId = structure(logical(0), tags = list(type = "string")), VpcId = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZone = structure(logical(0), tags = list(type = "string")), SubnetAvailabilityZoneId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ResourceDescription = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admin_accounts_for_organization_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admin_accounts_for_organization_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccounts = structure(list(structure(list(AdminAccount = structure(logical(0), tags = list(type = "string")), DefaultAdmin = structure(logical(0), tags = list(type = "boolean")), Status = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admins_managing_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_admins_managing_account_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_apps_lists_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DefaultLists = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_apps_lists_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsLists = structure(list(structure(list(ListArn = structure(logical(0), tags = list(type = "string")), ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_compliance_status_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_compliance_status_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyComplianceStatusList = structure(list(structure(list(PolicyOwner = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), MemberAccount = structure(logical(0), tags = list(type = "string")), EvaluationResults = structure(list(structure(list(ComplianceStatus = structure(logical(0), tags = list(type = "string")), ViolatorCount = structure(logical(0), tags = list(type = "long")), EvaluationLimitExceeded = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "list")), LastUpdated = structure(logical(0), tags = list(type = "timestamp")), IssueInfoMap = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "map"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_discovered_resources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MemberAccountIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceType = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_discovered_resources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Items = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), AccountId = structure(logical(0), tags = list(type = "string")), Type = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_member_accounts_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_member_accounts_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(MemberAccounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_policies_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_policies_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(PolicyList = structure(list(structure(list(PolicyArn = structure(logical(0), tags = list(type = "string")), PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), ResourceType = structure(logical(0), tags = list(type = "string")), SecurityServiceType = structure(logical(0), tags = list(type = "string")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_protocols_lists_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(DefaultLists = structure(logical(0), tags = list(type = "boolean")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_protocols_lists_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsLists = structure(list(structure(list(ListArn = structure(logical(0), tags = list(type = "string")), ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_set_resources_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Identifier = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_set_resources_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Items = structure(list(structure(list(URI = structure(logical(0), tags = list(type = "string")), AccountId = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_sets_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_resource_sets_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSets = structure(list(structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_tags_for_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_tags_for_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_third_party_firewall_firewall_policies_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewall = structure(logical(0), tags = list(type = "string")), NextToken = structure(logical(0), tags = list(type = "string")), MaxResults = structure(logical(0), tags = list(type = "integer"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$list_third_party_firewall_firewall_policies_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ThirdPartyFirewallFirewallPolicies = structure(list(structure(list(FirewallPolicyId = structure(logical(0), tags = list(type = "string")), FirewallPolicyName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), NextToken = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_admin_account_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AdminAccount = structure(logical(0), tags = list(type = "string")), AdminScope = structure(list(AccountScope = structure(list(Accounts = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllAccountsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedAccounts = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), OrganizationalUnitScope = structure(list(OrganizationalUnits = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllOrganizationalUnitsEnabled = structure(logical(0), tags = list(type = "boolean")), ExcludeSpecifiedOrganizationalUnits = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), RegionScope = structure(list(Regions = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllRegionsEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure")), PolicyTypeScope = structure(list(PolicyTypes = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), AllPolicyTypesEnabled = structure(logical(0), tags = list(type = "boolean"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_admin_account_output <- function(...) {
list()
}
.fms$put_apps_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_apps_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(AppsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), AppsList = structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list")), PreviousAppsList = structure(list(structure(list(structure(list(AppName = structure(logical(0), tags = list(type = "string")), Protocol = structure(logical(0), tags = list(type = "string")), Port = structure(logical(0), tags = list(type = "long"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), AppsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_notification_channel_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(SnsTopicArn = structure(logical(0), tags = list(type = "string")), SnsRoleName = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_notification_channel_output <- function(...) {
list()
}
.fms$put_policy_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string")), PolicyOption = structure(list(NetworkFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ResourceSetIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PolicyDescription = structure(logical(0), tags = list(type = "string")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_policy_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(Policy = structure(list(PolicyId = structure(logical(0), tags = list(type = "string")), PolicyName = structure(logical(0), tags = list(type = "string")), PolicyUpdateToken = structure(logical(0), tags = list(type = "string")), SecurityServicePolicyData = structure(list(Type = structure(logical(0), tags = list(type = "string")), ManagedServiceData = structure(logical(0), tags = list(type = "string")), PolicyOption = structure(list(NetworkFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ThirdPartyFirewallPolicy = structure(list(FirewallDeploymentModel = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "structure"))), tags = list(type = "structure")), ResourceType = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), ResourceTags = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list")), ExcludeResourceTags = structure(logical(0), tags = list(type = "boolean")), RemediationEnabled = structure(logical(0), tags = list(type = "boolean")), DeleteUnusedFMManagedResources = structure(logical(0), tags = list(type = "boolean")), IncludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ExcludeMap = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map")), ResourceSetIds = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PolicyDescription = structure(logical(0), tags = list(type = "string")), PolicyStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), PolicyArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_protocols_list_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_protocols_list_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ProtocolsList = structure(list(ListId = structure(logical(0), tags = list(type = "string")), ListName = structure(logical(0), tags = list(type = "string")), ListUpdateToken = structure(logical(0), tags = list(type = "string")), CreateTime = structure(logical(0), tags = list(type = "timestamp")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ProtocolsList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), PreviousProtocolsList = structure(list(structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "map"))), tags = list(type = "structure")), ProtocolsListArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_resource_set_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSet = structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), UpdateToken = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$put_resource_set_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceSet = structure(list(Id = structure(logical(0), tags = list(type = "string")), Name = structure(logical(0), tags = list(type = "string")), Description = structure(logical(0), tags = list(type = "string")), UpdateToken = structure(logical(0), tags = list(type = "string")), ResourceTypeList = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list")), LastUpdateTime = structure(logical(0), tags = list(type = "timestamp")), ResourceSetStatus = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure")), ResourceSetArn = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$tag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagList = structure(list(structure(list(Key = structure(logical(0), tags = list(type = "string")), Value = structure(logical(0), tags = list(type = "string"))), tags = list(type = "structure"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$tag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$untag_resource_input <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(ResourceArn = structure(logical(0), tags = list(type = "string")), TagKeys = structure(list(structure(logical(0), tags = list(type = "string"))), tags = list(type = "list"))), tags = list(type = "structure"))
return(populate(args, shape))
}
.fms$untag_resource_output <- function(...) {
args <- c(as.list(environment()), list(...))
shape <- structure(list(), tags = list(type = "structure"))
return(populate(args, shape))
}
|
#' @export Dropbox
Dropbox <-
#
# No need for the URIs as these are for acquiring the tokens.
# We assume you already have them. Otherwise use dropbox_oauth
#
function(consumerKey, consumerSecret,
oauthKey, oauthSecret,
signMethod = "HMAC",
.obj = new("DropboxCredentials"))
{
.obj@consumerKey = consumerKey
.obj@consumerSecret = consumerSecret
.obj@oauthKey = oauthKey
.obj@oauthSecret = oauthSecret
invisible(.obj)
}
#' @examples {
#' dummy = DropboxFolder("Dummy", drop)
#' }
setGeneric("DropboxFolder",
function(path, consumerKey, consumerSecret,
oauthKey, oauthSecret,
signMethod = "HMAC", ...,
.obj = "DropboxFolder")
{
standardGeneric("DropboxFolder")
})
setMethod("DropboxFolder", c("character", "DropboxCredentials"),
function(path, consumerKey, consumerSecret,
oauthKey, oauthSecret,
signMethod = "HMAC", ...,
.obj = "DropboxFolder")
{
if(is.character(.obj))
.obj = new(.obj, consumerKey)
.obj@path = path
.obj
})
if(FALSE) {
if(!isGeneric("ls"))
setGeneric("ls",
function (name, pos = -1, envir = as.environment(pos), all.names = FALSE, pattern, ...)
standardGeneric("ls"))
#XXX This doesn't work. ls() is probably using non-standard evaluation.
setMethod("ls", "ANY",
function (name, pos = -1, envir = as.environment(pos), all.names = FALSE, pattern, ...) {
browser()
base::ls(name)
})
setMethod("ls", "DropboxCredentials",
function (name, pos = -1, envir = as.environment(pos), all.names = FALSE, pattern, ...) {
dropbox_dir(name)
})
}
if(!isGeneric("dir"))
setGeneric("dir")
#XXX This doesn't work. ls() is probably using non-standard evaluation.
setMethod("dir", "DropboxCredentials",
function (path = ".", pattern = NULL, all.files = FALSE, full.names = FALSE,
recursive = FALSE, ignore.case = FALSE, include.dirs = FALSE) {
dropbox_dir(path)
})
setMethod("$", "DropboxCredentials",
function(x, name) {
id = sprintf("dropbox_%s", name)
# currently we look in the rDrop package.
# However, we could look along the search path
# and allow for people to extend the set of methods
# However, they can do this by overriding this $ method for their
# own class.
#
ns = getNamespace("rDrop")
if(!exists(id, ns, mode = "function"))
raiseError(c("no method named ", id, " for ", class(x)), "NoMethod")
# We could get the function, but we will leave
# it for now to make the resulting more readable and indicative.
# fun = get(id, ns, mode = "function")
f = function(...) fun(x, ...)
b = body(f)
b[[1]] = as.name(id) # could inser the actual fun above
body(f) = b
return(f)
# could create the functions manually if we want special .
switch(name,
delete = function(...) dropbox_delete(x, ...),
get = function(...) dropbox_get(x, ...),
function(...) fun(x, ...)
)
})
setMethod("[[", c("DropboxCredentials", "character", "missing"),
function(x, i, j, ...) {
dropbox_get(x, i, ...)
})
if(FALSE) # path added in method
setMethod("[[", c("DropboxFolder", "character", "missing"),
function(x, i, j, ...) {
dropbox_get(x, sprintf("%s/%s", x@path, i), ...)
})
setMethod("[[<-", c("DropboxCredentials", "character", "missing"),
function(x, i, j, ...) {
dropbox_put(x, i, ...)
})
if(FALSE) # path added in method
setMethod("[[<-", c("DropboxFolder", "character", "missing"),
function(x, i, j, ...) {
dropbox_put(x, sprintf("%s/%s", x@path, i), ...)
})
setGeneric("getPath",
function(path, url = character(), cred, ...)
standardGeneric("getPath"))
# A class that indicates that the path has already been expanded with the
# folder information.
setClass("FolderExpandedPath", contains = "character")
setMethod("getPath", c("FolderExpandedPath"),
function(path, url = character(), cred, ...) {
path
})
setMethod("getPath", c("character"),
function(path, url = character(), cred, ...) {
tmp = if (length(path) > 0) {
paste(c(url, path), collapse = "/") # prepend / if url is character() ???
} else {
if(length(url))
url
else
"/" # or leave as character() ?
}
new("FolderExpandedPath", tmp) # gsub("//", "/", tmp))
})
setMethod("getPath", c("character", cred = "DropboxFolder"),
function(path, url = character(), cred, ...)
getPath(c(cred@path, path), url))
| /R/dropboxMethods.r | no_license | karthik/rDrop | R | false | false | 5,320 | r | #' @export Dropbox
Dropbox <-
#
# No need for the URIs as these are for acquiring the tokens.
# We assume you already have them. Otherwise use dropbox_oauth
#
function(consumerKey, consumerSecret,
oauthKey, oauthSecret,
signMethod = "HMAC",
.obj = new("DropboxCredentials"))
{
.obj@consumerKey = consumerKey
.obj@consumerSecret = consumerSecret
.obj@oauthKey = oauthKey
.obj@oauthSecret = oauthSecret
invisible(.obj)
}
#' @examples {
#' dummy = DropboxFolder("Dummy", drop)
#' }
setGeneric("DropboxFolder",
function(path, consumerKey, consumerSecret,
oauthKey, oauthSecret,
signMethod = "HMAC", ...,
.obj = "DropboxFolder")
{
standardGeneric("DropboxFolder")
})
setMethod("DropboxFolder", c("character", "DropboxCredentials"),
function(path, consumerKey, consumerSecret,
oauthKey, oauthSecret,
signMethod = "HMAC", ...,
.obj = "DropboxFolder")
{
if(is.character(.obj))
.obj = new(.obj, consumerKey)
.obj@path = path
.obj
})
if(FALSE) {
if(!isGeneric("ls"))
setGeneric("ls",
function (name, pos = -1, envir = as.environment(pos), all.names = FALSE, pattern, ...)
standardGeneric("ls"))
#XXX This doesn't work. ls() is probably using non-standard evaluation.
setMethod("ls", "ANY",
function (name, pos = -1, envir = as.environment(pos), all.names = FALSE, pattern, ...) {
browser()
base::ls(name)
})
setMethod("ls", "DropboxCredentials",
function (name, pos = -1, envir = as.environment(pos), all.names = FALSE, pattern, ...) {
dropbox_dir(name)
})
}
if(!isGeneric("dir"))
setGeneric("dir")
#XXX This doesn't work. ls() is probably using non-standard evaluation.
setMethod("dir", "DropboxCredentials",
function (path = ".", pattern = NULL, all.files = FALSE, full.names = FALSE,
recursive = FALSE, ignore.case = FALSE, include.dirs = FALSE) {
dropbox_dir(path)
})
setMethod("$", "DropboxCredentials",
function(x, name) {
id = sprintf("dropbox_%s", name)
# currently we look in the rDrop package.
# However, we could look along the search path
# and allow for people to extend the set of methods
# However, they can do this by overriding this $ method for their
# own class.
#
ns = getNamespace("rDrop")
if(!exists(id, ns, mode = "function"))
raiseError(c("no method named ", id, " for ", class(x)), "NoMethod")
# We could get the function, but we will leave
# it for now to make the resulting more readable and indicative.
# fun = get(id, ns, mode = "function")
f = function(...) fun(x, ...)
b = body(f)
b[[1]] = as.name(id) # could inser the actual fun above
body(f) = b
return(f)
# could create the functions manually if we want special .
switch(name,
delete = function(...) dropbox_delete(x, ...),
get = function(...) dropbox_get(x, ...),
function(...) fun(x, ...)
)
})
setMethod("[[", c("DropboxCredentials", "character", "missing"),
function(x, i, j, ...) {
dropbox_get(x, i, ...)
})
if(FALSE) # path added in method
setMethod("[[", c("DropboxFolder", "character", "missing"),
function(x, i, j, ...) {
dropbox_get(x, sprintf("%s/%s", x@path, i), ...)
})
setMethod("[[<-", c("DropboxCredentials", "character", "missing"),
function(x, i, j, ...) {
dropbox_put(x, i, ...)
})
if(FALSE) # path added in method
setMethod("[[<-", c("DropboxFolder", "character", "missing"),
function(x, i, j, ...) {
dropbox_put(x, sprintf("%s/%s", x@path, i), ...)
})
setGeneric("getPath",
function(path, url = character(), cred, ...)
standardGeneric("getPath"))
# A class that indicates that the path has already been expanded with the
# folder information.
setClass("FolderExpandedPath", contains = "character")
setMethod("getPath", c("FolderExpandedPath"),
function(path, url = character(), cred, ...) {
path
})
setMethod("getPath", c("character"),
function(path, url = character(), cred, ...) {
tmp = if (length(path) > 0) {
paste(c(url, path), collapse = "/") # prepend / if url is character() ???
} else {
if(length(url))
url
else
"/" # or leave as character() ?
}
new("FolderExpandedPath", tmp) # gsub("//", "/", tmp))
})
setMethod("getPath", c("character", cred = "DropboxFolder"),
function(path, url = character(), cred, ...)
getPath(c(cred@path, path), url))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method_process_definition.R
\name{is_ti_method}
\alias{is_ti_method}
\title{Tests whether an object is a TI method description}
\usage{
is_ti_method(object)
}
\arguments{
\item{object}{The object to be tested}
}
\description{
Tests whether an object is a TI method description
}
| /man/is_ti_method.Rd | no_license | dweemx/dynwrap | R | false | true | 357 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/method_process_definition.R
\name{is_ti_method}
\alias{is_ti_method}
\title{Tests whether an object is a TI method description}
\usage{
is_ti_method(object)
}
\arguments{
\item{object}{The object to be tested}
}
\description{
Tests whether an object is a TI method description
}
|
# -----------------------------------------------------------------------------
# 读取spark分析处理后的数据,供画图程序使用
# -----------------------------------------------------------------------------
# 运行方法: 在R环境中,使用下面语句
# source("~/workspace_github/hadoop-ws/r-ws/draw-graphys-ggplot2/read-data-of-kmeans-v1.R")
# 得到如下数据对象
# 1. 数据集 GoodM1
# (1) fileData_GoodM1_metrics_unsorted
# (2) fileData_GoodM1_metrics_sorted
# (3) fileData__GoodM1_clusterCenters 目前无效
# (4) fileData_GoodM1_cluster
# (5) fileData_GoodM1_clusterSpecial
# 2. 数据集 GoodM2
# 3. 数据集 BadF2ExcludeF3
# 4. 数据集 BadF3
#
# 特别注意:
# loadMetrics : 加载 metrics 的函数
# 适用于 _metrics_unsorted 和 _metrics_sorted
# loadCluster : 加载 Cluster 的函数
# 适用于 _cluster 和 _clusterSpecial, 但不能加载 _clusterCenters
# !!!!! 当前没有加载 _clusterCenters 的函数
# -----------------------------------------------------------------------------
# *****************************************************************************
# 也可以调整为每次手动选择文件
# mydata = read.table(file.choose(), header=FALSE, sep=",")
# *****************************************************************************
# FilePath
# dataSetID <- "s98_L2k20_clusterCenters" # s01
# linux版本
rootFilePathOfIn <- stringr::str_c("~/workspace_github/hadoop-ws/r-ws/result-data/",dataSetID, "/")
# windows版本
#rootFilePathOfIn <- stringr::str_c("J:/home/hadoop/workspace_github/hadoop-ws/r-ws/result-data/",dataSetID, "/")
# 行: 数据集+单月/双月
dimRows <- c("S98_GoodM1", "S98_GoodM2", "S98_BadF2ExcludeF3", "S98_BadF3")
dimCols <- c("unsorted", "sorted", "clustercenters", "cluster", "cluster0fSpecial")
# 文件名
filesVector_s98_standalone_L2 <- c(
"", "", "", "", "s98_L2k20_GoodM1_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM1_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM1_NotTsNotLadder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM2_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM2_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM2_NotTsNotLadder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF2ExcludeF3_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF2ExcludeF3_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF2ExcludeF3_NotTsNotLadder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF3_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF3_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF3_NotTsNotLadder_clusterCenters.csv"
)
filesVector <- filesVector_s98_standalone_L2 #filesVector_s98_standalone # filesVector_s01
# 构建矩阵
dimRows <- c("S98_GoodM1_Ladder", "S98_GoodM1_Ts", "S98_GoodM1_NotTsNotLadder",
"S98_GoodM2_Ladder", "S98_GoodM2_Ts", "S98_GoodM2_NotTsNotLadder",
"S98_BadF2ExcludeF3_Ladder", "S98_BadF2ExcludeF3_Ts", "S98_BadF2ExcludeF3_NotTsNotLadder",
"S98_BadF3_Ladder", "S98_BadF3_Ts", "S98_BadF3_NotTsNotLadder"
)
filesMatrix <- matrix(filesVector, byrow=TRUE, nrow=length(dimRows), ncol=length(dimCols),dimnames=list(dimRows, dimCols))
#str(filesMatrix)
# *****************************************************************************
# metrics
# *****************************************************************************
# 本次要读取的文件名
# GoodM1
file_GoodM1_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[1,5])
file_GoodM1_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[2,5])
file_GoodM1_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[3,5])
# GoodM2
file_GoodM2_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[4,5])
file_GoodM2_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[5,5])
file_GoodM2_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[6,5])
# BadF2ExcludeF3
file_BadF2ExcludeF3_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[7,5])
file_BadF2ExcludeF3_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[8,5])
file_BadF2ExcludeF3_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[9,5])
# BadF3
file_BadF3_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[10,5])
file_BadF3_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[11,5])
file_BadF3_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[12,5])
# *****************************************************************************
# 函数定义
# *****************************************************************************
# -----------------------------------------------------------------------------
# 加载 metrics 的函数
# 适用于 _metrics_unsorted 和 _metrics_sorted
loadMetrics <- function(filename) {
# 读取文件
mymetrics = read.table(filename, header=FALSE, sep=",") # read table file ,首行无列名(header=FALSE)
#mymetrics
# 为 mymetrics 设置变量标签
labels_metrics <- c("k", "maxIterations", "WSSSE", "聚类开始时间", "聚类结束时间", "KMeansModel")
names(mymetrics) <- labels_metrics
# 将k, maxIterations 转换为factor
mymetrics$k <- as.factor(mymetrics$k)
mymetrics$maxIterations <- as.factor(mymetrics$maxIterations)
# 取第1,3列
#mymetrics[,1-3]
return (mymetrics)
}
# -----------------------------------------------------------------------------
# 加载 Cluster 的函数
# 适用于 _cluster 和 _clusterSpecial
# 特别注意: 不能加载 _clusterCenters
loadCluster <- function(filename) {
# 读取文件
myclustercenters = read.table(filename, header=FALSE, sep=",") # read table file ,首行无列名(header=FALSE)
#str(myclustercenters)
# -----------------------------------------------------------------------------
# vpm vpm201301,vpm201302,vpm201303,vpm201304,vpm201305,vpm201306,vpm201307,vpm201308,vpm201309,vpm201310,vpm201311,vpm201312,vpm201401,vpm201402,vpm201403,vpm201404,vpm201405,vpm201406
# 取前面 2+12 列,即从 201301~201312这12个月的月用点量
vpm <- myclustercenters[, c(1:(2+12))]
# 为vpm设置变量标签
newcolnames <- c("clusterID", "counter", "201301", "201302", "201303", "201304", "201305", "201306", "201307", "201308", "201309", "201310", "201311", "201312")
names(vpm) <- c(newcolnames) # names(vpm.clusterID) <- c(newcolnames,"clusterID")
rm(newcolnames)
# -----------------------------------------------------------------------------
# 生成 vpm.v (增加了clusterID) 新格式已经有clusterID
# 将行号变为一列
#newcolumn <- factor(as.numeric(rownames(vpm)))
#vpm.clusterID <- data.frame(vpm, newcolumn) # 为什么这一句后,列明变为了 x201301?
# 将新列的列名改为clusterID
# 重新设置列名
#newcolnames <- c("201301", "201302", "201303", "201304", "201305", "201306", "201307", "201308", "201309", "201310", "201311", "201312")
#names(vpm.clusterID) <- c(newcolnames,"clusterID")
#rm(newcolnames)
# 将 "clusterID" 变为 factor?
# -----------------------------------------------------------------------------
# 横表变纵表
library(reshape2)
# 'pointsNum'是没有必要的,但为了保留它作为单独的一列
#vpm.v <- melt(vpm, id = c("clusterID", "counter"), variable_name = "ym") # colnames/ym -> value
# reshape2 中 variable_name 无效
vpm.v <- melt(vpm, id = c("clusterID", "counter"))
# 为vpm.v设置变量标签
newcolnames <- c("clusterID", "counter", "ym", "value")
names(vpm.v) <- c(newcolnames)
rm(newcolnames)
# 画图的列
#vpm.v$ym <- as.factor(vpm.v$colnames)
# -----------------------------------------------------------------------------
# 行列转换
# vpm.t 从vpm进行转换
#vpm.t <- t(vpm) # 这种方法,值变为了 character vector
vpm.t <- as.data.frame(t(vpm))
# 将 行名 成为新列 ym
#vpm.t <- data.frame(vpm.t, factor(rownames(vpm))) # 不需要因子化,所以使用下面的语句即可
#vpm.t <- data.frame(vpm.t, rownames(vpm.t)) # 最后附加的列明是rownames.vpm.t. 所以改为下面两句
ym <- rownames(vpm.t)
vpm.t <- data.frame(vpm.t, ym)
# 返回值
mylist <- list(vpm, vpm.v, vpm.t)
return (mylist)
}
# *****************************************************************************
# 加载数据到变量中
# *****************************************************************************
# GoodM1
fileData_GoodM1_clusterSpecial_Ladder <- loadCluster(file_GoodM1_clusterSpecial_Ladder)
fileData_GoodM1_clusterSpecial_Ts <- loadCluster(file_GoodM1_clusterSpecial_Ts)
fileData_GoodM1_clusterSpecial_NotTsNotLadder <- loadCluster(file_GoodM1_clusterSpecial_NotTsNotLadder)
# GoodM2
fileData_GoodM2_clusterSpecial_Ladder <- loadCluster(file_GoodM2_clusterSpecial_Ladder)
fileData_GoodM2_clusterSpecial_Ts <- loadCluster(file_GoodM2_clusterSpecial_Ts)
fileData_GoodM2_clusterSpecial_NotTsNotLadder <- loadCluster(file_GoodM2_clusterSpecial_NotTsNotLadder)
# BadF2ExcludeF3
fileData_BadF2ExcludeF3_clusterSpecial_Ladder <- loadCluster(file_BadF2ExcludeF3_clusterSpecial_Ladder)
fileData_BadF2ExcludeF3_clusterSpecial_Ts <- loadCluster(file_BadF2ExcludeF3_clusterSpecial_Ts)
fileData_BadF2ExcludeF3_clusterSpecial_NotTsNotLadder <- loadCluster(file_BadF2ExcludeF3_clusterSpecial_NotTsNotLadder)
# BadF3
fileData_BadF3_clusterSpecial_Ladder <- loadCluster(file_BadF3_clusterSpecial_Ladder)
fileData_BadF3_clusterSpecial_Ts <- loadCluster(file_BadF3_clusterSpecial_Ts)
fileData_BadF3_clusterSpecial_NotTsNotLadder <- loadCluster(file_BadF3_clusterSpecial_NotTsNotLadder)
# *****************************************************************************
# 其他存档
# *****************************************************************************
# --------------------------------------------------------------
# 将第一列前面的"["去掉,并转换为 numeric
#tmpX <- myclustercenters$V1 # 注意:此时的tmpX是向量
#tmpX.substr <- substr(tmpX, 2, nchar(as.character(tmpX))) # 从第二个字符截取
#myclustercenters$V1 <- as.numeric(tmpX.substr)
#rm(tmpX)
# 将最后一列后面的"["去掉,并转换为 numeric
#tmpLen <- length(names(myclustercenters))
#tmpY <- myclustercenters[tmpLen] # 注意: 此时的tmpY是data.frame(只有一列)
#names(tmpY) <- c("tmpColID") # 重命名列名
#tmpZ <- tmpY$tmpColID # 应用新列名获取数据: 此时的tmpZ是向量
#tmpZ.substr <- substr(tmpZ, 1, nchar(as.character(tmpZ)) -1 ) # 截取到倒数第二个字符
#myclustercenters[tmpLen] <- as.numeric(tmpZ.substr)
#rm(tmpY)
#rm(tmpZ)
| /r-ws/draw-graphys-ggplot2/read-data-of-kmeans-v1-for-standalone-L2.R | no_license | un-knower/hadoop-ws | R | false | false | 10,800 | r | # -----------------------------------------------------------------------------
# 读取spark分析处理后的数据,供画图程序使用
# -----------------------------------------------------------------------------
# 运行方法: 在R环境中,使用下面语句
# source("~/workspace_github/hadoop-ws/r-ws/draw-graphys-ggplot2/read-data-of-kmeans-v1.R")
# 得到如下数据对象
# 1. 数据集 GoodM1
# (1) fileData_GoodM1_metrics_unsorted
# (2) fileData_GoodM1_metrics_sorted
# (3) fileData__GoodM1_clusterCenters 目前无效
# (4) fileData_GoodM1_cluster
# (5) fileData_GoodM1_clusterSpecial
# 2. 数据集 GoodM2
# 3. 数据集 BadF2ExcludeF3
# 4. 数据集 BadF3
#
# 特别注意:
# loadMetrics : 加载 metrics 的函数
# 适用于 _metrics_unsorted 和 _metrics_sorted
# loadCluster : 加载 Cluster 的函数
# 适用于 _cluster 和 _clusterSpecial, 但不能加载 _clusterCenters
# !!!!! 当前没有加载 _clusterCenters 的函数
# -----------------------------------------------------------------------------
# *****************************************************************************
# 也可以调整为每次手动选择文件
# mydata = read.table(file.choose(), header=FALSE, sep=",")
# *****************************************************************************
# FilePath
# dataSetID <- "s98_L2k20_clusterCenters" # s01
# linux版本
rootFilePathOfIn <- stringr::str_c("~/workspace_github/hadoop-ws/r-ws/result-data/",dataSetID, "/")
# windows版本
#rootFilePathOfIn <- stringr::str_c("J:/home/hadoop/workspace_github/hadoop-ws/r-ws/result-data/",dataSetID, "/")
# 行: 数据集+单月/双月
dimRows <- c("S98_GoodM1", "S98_GoodM2", "S98_BadF2ExcludeF3", "S98_BadF3")
dimCols <- c("unsorted", "sorted", "clustercenters", "cluster", "cluster0fSpecial")
# 文件名
filesVector_s98_standalone_L2 <- c(
"", "", "", "", "s98_L2k20_GoodM1_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM1_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM1_NotTsNotLadder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM2_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM2_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_GoodM2_NotTsNotLadder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF2ExcludeF3_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF2ExcludeF3_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF2ExcludeF3_NotTsNotLadder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF3_Ladder_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF3_Ts_clusterCenters.csv",
"", "", "", "", "s98_L2k20_BadF3_NotTsNotLadder_clusterCenters.csv"
)
filesVector <- filesVector_s98_standalone_L2 #filesVector_s98_standalone # filesVector_s01
# 构建矩阵
dimRows <- c("S98_GoodM1_Ladder", "S98_GoodM1_Ts", "S98_GoodM1_NotTsNotLadder",
"S98_GoodM2_Ladder", "S98_GoodM2_Ts", "S98_GoodM2_NotTsNotLadder",
"S98_BadF2ExcludeF3_Ladder", "S98_BadF2ExcludeF3_Ts", "S98_BadF2ExcludeF3_NotTsNotLadder",
"S98_BadF3_Ladder", "S98_BadF3_Ts", "S98_BadF3_NotTsNotLadder"
)
filesMatrix <- matrix(filesVector, byrow=TRUE, nrow=length(dimRows), ncol=length(dimCols),dimnames=list(dimRows, dimCols))
#str(filesMatrix)
# *****************************************************************************
# metrics
# *****************************************************************************
# 本次要读取的文件名
# GoodM1
file_GoodM1_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[1,5])
file_GoodM1_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[2,5])
file_GoodM1_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[3,5])
# GoodM2
file_GoodM2_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[4,5])
file_GoodM2_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[5,5])
file_GoodM2_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[6,5])
# BadF2ExcludeF3
file_BadF2ExcludeF3_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[7,5])
file_BadF2ExcludeF3_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[8,5])
file_BadF2ExcludeF3_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[9,5])
# BadF3
file_BadF3_clusterSpecial_Ladder <- stringr::str_c(rootFilePathOfIn, filesMatrix[10,5])
file_BadF3_clusterSpecial_Ts <- stringr::str_c(rootFilePathOfIn, filesMatrix[11,5])
file_BadF3_clusterSpecial_NotTsNotLadder <- stringr::str_c(rootFilePathOfIn, filesMatrix[12,5])
# *****************************************************************************
# 函数定义
# *****************************************************************************
# -----------------------------------------------------------------------------
# 加载 metrics 的函数
# 适用于 _metrics_unsorted 和 _metrics_sorted
loadMetrics <- function(filename) {
# 读取文件
mymetrics = read.table(filename, header=FALSE, sep=",") # read table file ,首行无列名(header=FALSE)
#mymetrics
# 为 mymetrics 设置变量标签
labels_metrics <- c("k", "maxIterations", "WSSSE", "聚类开始时间", "聚类结束时间", "KMeansModel")
names(mymetrics) <- labels_metrics
# 将k, maxIterations 转换为factor
mymetrics$k <- as.factor(mymetrics$k)
mymetrics$maxIterations <- as.factor(mymetrics$maxIterations)
# 取第1,3列
#mymetrics[,1-3]
return (mymetrics)
}
# -----------------------------------------------------------------------------
# 加载 Cluster 的函数
# 适用于 _cluster 和 _clusterSpecial
# 特别注意: 不能加载 _clusterCenters
loadCluster <- function(filename) {
# 读取文件
myclustercenters = read.table(filename, header=FALSE, sep=",") # read table file ,首行无列名(header=FALSE)
#str(myclustercenters)
# -----------------------------------------------------------------------------
# vpm vpm201301,vpm201302,vpm201303,vpm201304,vpm201305,vpm201306,vpm201307,vpm201308,vpm201309,vpm201310,vpm201311,vpm201312,vpm201401,vpm201402,vpm201403,vpm201404,vpm201405,vpm201406
# 取前面 2+12 列,即从 201301~201312这12个月的月用点量
vpm <- myclustercenters[, c(1:(2+12))]
# 为vpm设置变量标签
newcolnames <- c("clusterID", "counter", "201301", "201302", "201303", "201304", "201305", "201306", "201307", "201308", "201309", "201310", "201311", "201312")
names(vpm) <- c(newcolnames) # names(vpm.clusterID) <- c(newcolnames,"clusterID")
rm(newcolnames)
# -----------------------------------------------------------------------------
# 生成 vpm.v (增加了clusterID) 新格式已经有clusterID
# 将行号变为一列
#newcolumn <- factor(as.numeric(rownames(vpm)))
#vpm.clusterID <- data.frame(vpm, newcolumn) # 为什么这一句后,列明变为了 x201301?
# 将新列的列名改为clusterID
# 重新设置列名
#newcolnames <- c("201301", "201302", "201303", "201304", "201305", "201306", "201307", "201308", "201309", "201310", "201311", "201312")
#names(vpm.clusterID) <- c(newcolnames,"clusterID")
#rm(newcolnames)
# 将 "clusterID" 变为 factor?
# -----------------------------------------------------------------------------
# 横表变纵表
library(reshape2)
# 'pointsNum'是没有必要的,但为了保留它作为单独的一列
#vpm.v <- melt(vpm, id = c("clusterID", "counter"), variable_name = "ym") # colnames/ym -> value
# reshape2 中 variable_name 无效
vpm.v <- melt(vpm, id = c("clusterID", "counter"))
# 为vpm.v设置变量标签
newcolnames <- c("clusterID", "counter", "ym", "value")
names(vpm.v) <- c(newcolnames)
rm(newcolnames)
# 画图的列
#vpm.v$ym <- as.factor(vpm.v$colnames)
# -----------------------------------------------------------------------------
# 行列转换
# vpm.t 从vpm进行转换
#vpm.t <- t(vpm) # 这种方法,值变为了 character vector
vpm.t <- as.data.frame(t(vpm))
# 将 行名 成为新列 ym
#vpm.t <- data.frame(vpm.t, factor(rownames(vpm))) # 不需要因子化,所以使用下面的语句即可
#vpm.t <- data.frame(vpm.t, rownames(vpm.t)) # 最后附加的列明是rownames.vpm.t. 所以改为下面两句
ym <- rownames(vpm.t)
vpm.t <- data.frame(vpm.t, ym)
# 返回值
mylist <- list(vpm, vpm.v, vpm.t)
return (mylist)
}
# *****************************************************************************
# 加载数据到变量中
# *****************************************************************************
# GoodM1
fileData_GoodM1_clusterSpecial_Ladder <- loadCluster(file_GoodM1_clusterSpecial_Ladder)
fileData_GoodM1_clusterSpecial_Ts <- loadCluster(file_GoodM1_clusterSpecial_Ts)
fileData_GoodM1_clusterSpecial_NotTsNotLadder <- loadCluster(file_GoodM1_clusterSpecial_NotTsNotLadder)
# GoodM2
fileData_GoodM2_clusterSpecial_Ladder <- loadCluster(file_GoodM2_clusterSpecial_Ladder)
fileData_GoodM2_clusterSpecial_Ts <- loadCluster(file_GoodM2_clusterSpecial_Ts)
fileData_GoodM2_clusterSpecial_NotTsNotLadder <- loadCluster(file_GoodM2_clusterSpecial_NotTsNotLadder)
# BadF2ExcludeF3
fileData_BadF2ExcludeF3_clusterSpecial_Ladder <- loadCluster(file_BadF2ExcludeF3_clusterSpecial_Ladder)
fileData_BadF2ExcludeF3_clusterSpecial_Ts <- loadCluster(file_BadF2ExcludeF3_clusterSpecial_Ts)
fileData_BadF2ExcludeF3_clusterSpecial_NotTsNotLadder <- loadCluster(file_BadF2ExcludeF3_clusterSpecial_NotTsNotLadder)
# BadF3
fileData_BadF3_clusterSpecial_Ladder <- loadCluster(file_BadF3_clusterSpecial_Ladder)
fileData_BadF3_clusterSpecial_Ts <- loadCluster(file_BadF3_clusterSpecial_Ts)
fileData_BadF3_clusterSpecial_NotTsNotLadder <- loadCluster(file_BadF3_clusterSpecial_NotTsNotLadder)
# *****************************************************************************
# 其他存档
# *****************************************************************************
# --------------------------------------------------------------
# 将第一列前面的"["去掉,并转换为 numeric
#tmpX <- myclustercenters$V1 # 注意:此时的tmpX是向量
#tmpX.substr <- substr(tmpX, 2, nchar(as.character(tmpX))) # 从第二个字符截取
#myclustercenters$V1 <- as.numeric(tmpX.substr)
#rm(tmpX)
# 将最后一列后面的"["去掉,并转换为 numeric
#tmpLen <- length(names(myclustercenters))
#tmpY <- myclustercenters[tmpLen] # 注意: 此时的tmpY是data.frame(只有一列)
#names(tmpY) <- c("tmpColID") # 重命名列名
#tmpZ <- tmpY$tmpColID # 应用新列名获取数据: 此时的tmpZ是向量
#tmpZ.substr <- substr(tmpZ, 1, nchar(as.character(tmpZ)) -1 ) # 截取到倒数第二个字符
#myclustercenters[tmpLen] <- as.numeric(tmpZ.substr)
#rm(tmpY)
#rm(tmpZ)
|
#niche tracking analysis function using PCA & kernel density estimator via ecospat
#improvements: should limit background to a 5000km(?) buffer around the full range for each species.
nicheTracker_pcaOcc <- function(i) {
require(data.table);require(dismo);require(ecospat);require(pbapply);require(rgeos);require(ggplot2)
#subset species/seasons
loc <- subset(gbif,species == i)
loc.sum <- subset(loc,month%in%c(6,7,8)==T)
loc.wnt <- subset(loc,month%in%c(12,1,2)==T)
loc.sum <- SpatialPoints(data.frame(loc.sum[,.(decimallongitude,decimallatitude)]), proj4string=CRS(proj4string(ranges[[i]])))
loc.wnt <- SpatialPoints(data.frame(loc.wnt[,.(decimallongitude,decimallatitude)]), proj4string=CRS(proj4string(ranges[[i]])))
#crop to range if range map is present and species is not an austral migrant
if( !is.null(simple.ranges[[i]])){
if(ymin(gCentroid(simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,2),])) >= ymin(gCentroid(simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,3),])) ){
loc.sum <- gIntersection(loc.sum,simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,2),])
loc.wnt <- gIntersection(loc.wnt,simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,3),])
}
}
#continue if > n reports within range
if(length(loc.wnt) > 5 & length(loc.sum) > 5){
#find centroid of seasonal occurrences
centroid.sum <- gCentroid(loc.sum)
centroid.wnt <- gCentroid(loc.wnt)
centroid.distance <- pointDistance(centroid.sum,centroid.wnt,lonlat=T)
#extract occurrence pt background data
sp.sum <- na.omit(extract(bg.sum.r,loc.sum))
sp.wnt <- na.omit(extract(bg.wnt.r,loc.wnt))
sp.res.s <- na.omit(extract(bg.wnt.r,loc.sum)) #resident on summer territory
sp.res.w <- na.omit(extract(bg.sum.r,loc.wnt)) #resident on winter territory
#combo dataset for pca
df <- rbind(sp.sum,sp.wnt,sp.res.s,sp.res.w,bg.sum.df,bg.wnt.df)
#set weighting to train PCA on full background data
weights <- c(rep(1,nrow(sp.sum)),rep(1,nrow(sp.wnt)),rep(0,nrow(sp.res.s)),
rep(0,nrow(sp.res.w)),rep(0,nrow(bg.sum.df)),rep(0,nrow(bg.wnt.df)))
#run pca
pca <- dudi.pca(df,row.w=weights,nf=2,scannf=F,center=T,scale=T)
#pull rows for species & background pc coords
pc.sum <- pca$li[1 : nrow(sp.sum),]
pc.wnt <- pca$li[(1+nrow(sp.sum)):(nrow(sp.sum)+nrow(sp.wnt)),]
pc.res.s <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)) : (1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)),]
pc.res.w <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)) : (1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)),]
pc.bg.sum <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)) : (1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)+nrow(bg.sum.df)),]
pc.bg.wnt <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)+nrow(bg.sum.df)) : nrow(pca$li),]
pc.bg.all <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)) : nrow(pca$li),]
rm(list=c("pca"))
#grid with kernal density estimator (see Broenniman et al. 2012, J. Biogeography)
grid.sum <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.sum,sp=pc.sum,R=100)
grid.wnt <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.wnt,sp=pc.wnt,R=100)
grid.res.s <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.wnt,sp=pc.res.s,R=100)
grid.res.w <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.sum,sp=pc.res.w,R=100)
#run similarity test
sim.s.w <- ecospat.niche.similarity.test.noplot(z1=grid.sum,z2=grid.wnt,rep=1000,one.sided=F)
overlap.res.s <- ecospat.niche.overlap(grid.sum,grid.res.s,cor=T)
overlap.res.w <- ecospat.niche.overlap(grid.wnt,grid.res.w,cor=T)
data.frame(species=paste(i),
centroid.distance=centroid.distance,
I.obs=sim.s.w$obs$I,
I.res.s=overlap.res.s$I,
I.res.w=overlap.res.w$I,
p.similar=sim.s.w$p.I,
min.n.locs=min(length(loc.sum),length(loc.wnt)) )
}
}
| /scripts/nicheTracker_pcaOcc.R | no_license | cjbattey/nicheTrackR | R | false | false | 4,102 | r | #niche tracking analysis function using PCA & kernel density estimator via ecospat
#improvements: should limit background to a 5000km(?) buffer around the full range for each species.
nicheTracker_pcaOcc <- function(i) {
require(data.table);require(dismo);require(ecospat);require(pbapply);require(rgeos);require(ggplot2)
#subset species/seasons
loc <- subset(gbif,species == i)
loc.sum <- subset(loc,month%in%c(6,7,8)==T)
loc.wnt <- subset(loc,month%in%c(12,1,2)==T)
loc.sum <- SpatialPoints(data.frame(loc.sum[,.(decimallongitude,decimallatitude)]), proj4string=CRS(proj4string(ranges[[i]])))
loc.wnt <- SpatialPoints(data.frame(loc.wnt[,.(decimallongitude,decimallatitude)]), proj4string=CRS(proj4string(ranges[[i]])))
#crop to range if range map is present and species is not an austral migrant
if( !is.null(simple.ranges[[i]])){
if(ymin(gCentroid(simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,2),])) >= ymin(gCentroid(simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,3),])) ){
loc.sum <- gIntersection(loc.sum,simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,2),])
loc.wnt <- gIntersection(loc.wnt,simple.ranges[[i]][simple.ranges[[i]]@data$layer %in% c(1,3),])
}
}
#continue if > n reports within range
if(length(loc.wnt) > 5 & length(loc.sum) > 5){
#find centroid of seasonal occurrences
centroid.sum <- gCentroid(loc.sum)
centroid.wnt <- gCentroid(loc.wnt)
centroid.distance <- pointDistance(centroid.sum,centroid.wnt,lonlat=T)
#extract occurrence pt background data
sp.sum <- na.omit(extract(bg.sum.r,loc.sum))
sp.wnt <- na.omit(extract(bg.wnt.r,loc.wnt))
sp.res.s <- na.omit(extract(bg.wnt.r,loc.sum)) #resident on summer territory
sp.res.w <- na.omit(extract(bg.sum.r,loc.wnt)) #resident on winter territory
#combo dataset for pca
df <- rbind(sp.sum,sp.wnt,sp.res.s,sp.res.w,bg.sum.df,bg.wnt.df)
#set weighting to train PCA on full background data
weights <- c(rep(1,nrow(sp.sum)),rep(1,nrow(sp.wnt)),rep(0,nrow(sp.res.s)),
rep(0,nrow(sp.res.w)),rep(0,nrow(bg.sum.df)),rep(0,nrow(bg.wnt.df)))
#run pca
pca <- dudi.pca(df,row.w=weights,nf=2,scannf=F,center=T,scale=T)
#pull rows for species & background pc coords
pc.sum <- pca$li[1 : nrow(sp.sum),]
pc.wnt <- pca$li[(1+nrow(sp.sum)):(nrow(sp.sum)+nrow(sp.wnt)),]
pc.res.s <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)) : (1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)),]
pc.res.w <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)) : (1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)),]
pc.bg.sum <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)) : (1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)+nrow(bg.sum.df)),]
pc.bg.wnt <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)+nrow(bg.sum.df)) : nrow(pca$li),]
pc.bg.all <- pca$li[(1+nrow(sp.sum)+nrow(sp.wnt)+nrow(sp.res.s)+nrow(sp.res.w)) : nrow(pca$li),]
rm(list=c("pca"))
#grid with kernal density estimator (see Broenniman et al. 2012, J. Biogeography)
grid.sum <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.sum,sp=pc.sum,R=100)
grid.wnt <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.wnt,sp=pc.wnt,R=100)
grid.res.s <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.wnt,sp=pc.res.s,R=100)
grid.res.w <- ecospat.grid.clim.dyn(glob=pc.bg.all,glob1=pc.bg.sum,sp=pc.res.w,R=100)
#run similarity test
sim.s.w <- ecospat.niche.similarity.test.noplot(z1=grid.sum,z2=grid.wnt,rep=1000,one.sided=F)
overlap.res.s <- ecospat.niche.overlap(grid.sum,grid.res.s,cor=T)
overlap.res.w <- ecospat.niche.overlap(grid.wnt,grid.res.w,cor=T)
data.frame(species=paste(i),
centroid.distance=centroid.distance,
I.obs=sim.s.w$obs$I,
I.res.s=overlap.res.s$I,
I.res.w=overlap.res.w$I,
p.similar=sim.s.w$p.I,
min.n.locs=min(length(loc.sum),length(loc.wnt)) )
}
}
|
# XvalidatePredictions.R
#
# Purpose: From input Training Data, select a subset for
# Training and prediction, run
# similarity computations,
# predict synergy scores, and evaluate
# success.
#
# Version: 0.3
#
# Date: Nov 9 2015
# Author: Boris and DREAM team UofT
#
# V 0.3 Add scoring according to AZ Challenge organizers -
# with partial correlations to remove drug
# combination and cell-line medians.
# Reorganize results
# V 0.2.2 Maintenance
# V 0.2.1 Add analysis and plots for multiple xVal runs.
# Handle edge cases (no data, combinations not
# unique ... )
# Add progress messages.
# V 0.2 Maintenance and refactoring.
# Loop over Xval runs.
# Calculate random correlations
# V 0.1 First code
# ==========================================================
setwd(DREAMDIR)
source("DREAMutilities.R")
source("ch1scoring_functions.R")
# == CONSTANTS =============================================
#
METHOD <- "QuickPredict"
RUN_PATH <- "../validate/" # Directory for validation run data
RUN_PREFIX <- "09.test" # Filename prefix
FRAC_HOLD <- 0.33 # Fraction of holdout data
N_RUNS <- 40 # Number of cross-validation runs
EXCLUDE_POOR_QA <- TRUE # if TRUE, ignore all experiments
# with QA != 1
VERBOSE <- TRUE # Print progress
# Probably no need to change below here ====================
# Master file that contains monotherapy data
MASTER_DATA_FILE <- "../Challenge Data/Drug Synergy Data/ch1_train_combination_and_monoTherapy.csv"
# == PACKAGES ==============================================
# == FUNCTIONS =============================================
#
xValColours <- function(x,
nBreaks = 10,
method = "highlightMaximum") {
# create various types of coloring
if (method == "highlightMaximum") {
# highlight large values with red,
fCol <- colorRampPalette(c("#333333", "#4d4d4d",
"#666666", "#808080",
"#999999", "#b3b3b3",
"#cccccc", "#fc9973",
"#ff0d35"))
}
colVec <- fCol(nBreaks)
x <- colVec[cut(x, breaks = nBreaks, labels=FALSE)]
return(x)
}
# == MAIN ==================================================
#
# master file from which to select
master <- read.csv(MASTER_DATA_FILE, stringsAsFactors=FALSE)
if (EXCLUDE_POOR_QA) {
master <- master[master[,"QA"] == 1, ]
}
# == Datastructures to store results
combVec = c(meanR = numeric(),
steR = numeric(),
meanMSE = numeric(),
steMSE = numeric(),
N = numeric())
scores <- list(RUN = numeric(),
COR_PLAIN = numeric(),
GLOBAL_SCORE = numeric(),
COMB_SCORE_ALL = combVec,
COMB_SCORE_30 = combVec,
COMB_SCORE_20 = combVec,
COMB_SCORE_10 = combVec)
allResults <- data.frame(RUN = numeric(),
CELLS = character(),
ID = character(),
TRUE_SYN = numeric(),
PRED_SYN = numeric(),
SYN_CONF = numeric(),
stringsAsFactors = FALSE)
# == LOOP OVER CROSS-VALIDATION RUNS =======
for (iRun in 1:N_RUNS) {
# == MAKE RUN-SPECIFIC FILENAMES =======
RUN_ID <- sprintf("%s.%02d", RUN_PREFIX, iRun)
TRAINING_SET_FILE <- sprintf("%s%s_TrainingSet.csv", RUN_PATH, RUN_ID)
TEST_SET_FILE <- sprintf("%s%s_HoldoutSet.csv", RUN_PATH, RUN_ID)
PREDICTED_FILE <- sprintf("%s%s_prediction.csv", RUN_PATH, RUN_ID)
PRIORITY_FILE <- sprintf("%s%s_combination_priority.csv", RUN_PATH, RUN_ID)
# == MAKE TRAINING AND HOLDOUT SETS =======
nHold <- round(nrow(master) * FRAC_HOLD) # number of holdouts
iHold <- sample(1:nrow(master), nHold) # random row index of holdouts
trainSet <- master[-(iHold), ]
holdSet <- master[iHold, ]
holdSet[, "SYNERGY_SCORE"] <- NA # remove synergy values from
# holdout data
write.csv(trainSet, TRAINING_SET_FILE, row.names=FALSE)
write.csv(holdSet, TEST_SET_FILE, row.names=FALSE)
# == RUN PREDICTION =======
# predictSynergy.R is written to take input training and test
# filenames from commandline. If it it source()'ed instead
# redefine the commandArgs() function as below.
commandArgs <- function(trailingOnly) {
return(c(TRAINING_SET_FILE,
TEST_SET_FILE,
PREDICTED_FILE,
PRIORITY_FILE,
METHOD))
}
if (VERBOSE) {cat(paste("xVal run: ", iRun, "/", N_RUNS, " ", Sys.time(), "\n"))}
source("predictSynergy.R")
# predictSynergy.R writes its result into
# PREDICTED_FILE and PRIORITY_FILE
# == READ AND COMBINE RESULTS =======
pred <- read.csv(PREDICTED_FILE, stringsAsFactors=FALSE)
conf <- read.csv(PRIORITY_FILE, stringsAsFactors=FALSE)
# head(pred)
# head(conf)
results <- data.frame(RUN = rep(iRun, nrow(pred)),
CELLS = pred[ , "CELL_LINE"],
ID = pred[ , "COMBINATION_ID"],
TRUE_SYN = NA,
PRED_SYN = pred[ , "PREDICTION"],
SYN_CONF = rep(0, nrow(pred)),
stringsAsFactors = FALSE)
for (i in 1:nrow(results)) {
# add confidence values to results
results[i, "SYN_CONF"] <- conf[ (conf[,"COMBINATION_ID"] == results[i, "ID"]) ,
"CONFIDENCE" ]
# add true synergy score to results
# use the mean, since we actually have cases where
# there are more than one experiments for the same
# combination ID and cell line.
results[i, "TRUE_SYN"] <- master[master[ , "COMBINATION_ID"] == pred[i, "COMBINATION_ID"] &
master[ , "CELL_LINE"] == pred[i, "CELL_LINE"],
"SYNERGY_SCORE"] %>% mean
}
allResults <- rbind(allResults, results)
# head(results)
# == SCORE THE RESULTS =======
scores$RUN <- c(scores$RUN, iRun)
scores$COR_PLAIN <- c(scores$COR_PLAIN,
cor(results[,"TRUE_SYN"],
results[,"PRED_SYN"],
use="complete.obs"))
scores$GLOBAL_SCORE <- c(scores$GLOBAL_SCORE,
getGlobalScore_ch1(results))
scores$COMB_SCORE_ALL = rbind(scores$COMB_SCORE_ALL,
getDrugCombiScore_ch1(results, conf, topX=100))
scores$COMB_SCORE_30 = rbind(scores$COMB_SCORE_30,
getDrugCombiScore_ch1(results, conf, topX=30))
scores$COMB_SCORE_20 = rbind(scores$COMB_SCORE_20,
getDrugCombiScore_ch1(results, conf, topX=20))
scores$COMB_SCORE_10 = rbind(scores$COMB_SCORE_10,
getDrugCombiScore_ch1(results, conf, topX=10))
} # end for (iRun in 1:N_RUNS)
if (VERBOSE) {cat(paste("xVal completed: ", Sys.time(), "\n\n"))}
# == ANALYSE RUNS =======
# == HISTOGRAM RANDOM VS. PREDICTED
# create distribution of random scores
nRandRuns <- 1000 # per xVal run
scoreRand <- numeric(nRandRuns * N_RUNS)
iC <- 1
for (i in 1:N_RUNS) {
# subset of predictions for this run
tp <- allResults[allResults[, "RUN"] == i, c("CELLS", "ID", "TRUE_SYN", "PRED_SYN")]
for (j in 1:nRandRuns) {
# sample from subset
tp[ , "PRED_SYN"] <- sample(tp[ , "PRED_SYN"], nrow(tp))
scoreRand[iC] <- getGlobalScore_ch1(tp)
iC <- iC + 1
}
}
# Plot histogram,
# superimpose histogram of predicted Global Scores
colRand <- "#E6E0FF"
colObs <- "#00DDAA44"
hist(scoreRand,
breaks=20,
xlim = c(min(min(scoreRand), min(scores$GLOBAL_SCORE)) * 1.2,
max(max(scoreRand), max(scores$GLOBAL_SCORE)) * 1.2),
freq = FALSE,
col = colRand,
main = "Random and observed Global Scores",
cex.main = 0.8,
xlab = "Global Score"
)
if (length(scores$GLOBAL_SCORE) == 1) {
abline(v = scores$GLOBAL_SCORE[rep(1,4)], col = colObs)
} else {
par(new = TRUE)
hVals <- hist(scores$GLOBAL_SCORE, plot = FALSE)
hist(scores$GLOBAL_SCORE,
xlim = c(min(min(scoreRand), min(scores$GLOBAL_SCORE)) * 1.2,
max(max(scoreRand), max(scores$GLOBAL_SCORE)) * 1.2),
ylim = c(0, 5 * max(hVals$density)),
breaks = round(N_RUNS)/2,
freq = FALSE,
col= colObs,
border= "#666677",
axes = FALSE,
main = "",
sub = (sprintf("mean Global Scores for predictions: %1.3f ± %1.3f",
mean(scores$GLOBAL_SCORE), sd(scores$GLOBAL_SCORE))),
cex.sub = 0.8,
xlab = "",
ylab = ""
)
}
legend("topright",
cex = 0.7,
legend = c(sprintf("%d,%03d random",
floor(length(scoreRand)/1000),
length(scoreRand) %% 1000),
sprintf("%d predicted", N_RUNS)),
fill = c(colRand, colObs),
bty = "n",
title = "")
# == Scatterplot true vs. pred
true <- allResults[,"TRUE_SYN"]
pred <- allResults[,"PRED_SYN"]
plot(true, pred,
cex = 0.2,
pch = 16,
col = xValColours(allResults[,"SYN_CONF"]),
xlim = c(min(c(true, pred)), max(c(true, pred))),
ylim = c(min(c(true, pred)), max(c(true, pred))),
xlab = "True synergy scores",
ylab = "Predicted synergy scores",
main = sprintf("%s crossvalidation with %d%% holdouts, %d runs\n(new correlations)",
METHOD,
FRAC_HOLD * 100,
N_RUNS),
cex.main = 0.8
)
legend("topleft",
cex = 0.6,
legend = paste(seq(0.9, 0.0, by = -0.1), "-", seq(1.0, 0.1, by = -0.1)),
fill = xValColours(seq(0.95, 0.05, by = -0.1)),
bty = "n",
title = "Priority")
abline(h = 0, lwd = 0.5, col = "#CCCCCC")
abline(v = 0, lwd = 0.5, col = "#CCCCCC")
# add regression line
linMod <- lm(true ~ pred)
abline(linMod, col="#BB0000")
summary(linMod)
# [END]
| /XvalidatePredictions.R | no_license | DREAM-Toronto/Drug-Combination-Prediction-2015 | R | false | false | 10,280 | r | # XvalidatePredictions.R
#
# Purpose: From input Training Data, select a subset for
# Training and prediction, run
# similarity computations,
# predict synergy scores, and evaluate
# success.
#
# Version: 0.3
#
# Date: Nov 9 2015
# Author: Boris and DREAM team UofT
#
# V 0.3 Add scoring according to AZ Challenge organizers -
# with partial correlations to remove drug
# combination and cell-line medians.
# Reorganize results
# V 0.2.2 Maintenance
# V 0.2.1 Add analysis and plots for multiple xVal runs.
# Handle edge cases (no data, combinations not
# unique ... )
# Add progress messages.
# V 0.2 Maintenance and refactoring.
# Loop over Xval runs.
# Calculate random correlations
# V 0.1 First code
# ==========================================================
setwd(DREAMDIR)
source("DREAMutilities.R")
source("ch1scoring_functions.R")
# == CONSTANTS =============================================
#
METHOD <- "QuickPredict"
RUN_PATH <- "../validate/" # Directory for validation run data
RUN_PREFIX <- "09.test" # Filename prefix
FRAC_HOLD <- 0.33 # Fraction of holdout data
N_RUNS <- 40 # Number of cross-validation runs
EXCLUDE_POOR_QA <- TRUE # if TRUE, ignore all experiments
# with QA != 1
VERBOSE <- TRUE # Print progress
# Probably no need to change below here ====================
# Master file that contains monotherapy data
MASTER_DATA_FILE <- "../Challenge Data/Drug Synergy Data/ch1_train_combination_and_monoTherapy.csv"
# == PACKAGES ==============================================
# == FUNCTIONS =============================================
#
xValColours <- function(x,
nBreaks = 10,
method = "highlightMaximum") {
# create various types of coloring
if (method == "highlightMaximum") {
# highlight large values with red,
fCol <- colorRampPalette(c("#333333", "#4d4d4d",
"#666666", "#808080",
"#999999", "#b3b3b3",
"#cccccc", "#fc9973",
"#ff0d35"))
}
colVec <- fCol(nBreaks)
x <- colVec[cut(x, breaks = nBreaks, labels=FALSE)]
return(x)
}
# == MAIN ==================================================
#
# master file from which to select
master <- read.csv(MASTER_DATA_FILE, stringsAsFactors=FALSE)
if (EXCLUDE_POOR_QA) {
master <- master[master[,"QA"] == 1, ]
}
# == Datastructures to store results
combVec = c(meanR = numeric(),
steR = numeric(),
meanMSE = numeric(),
steMSE = numeric(),
N = numeric())
scores <- list(RUN = numeric(),
COR_PLAIN = numeric(),
GLOBAL_SCORE = numeric(),
COMB_SCORE_ALL = combVec,
COMB_SCORE_30 = combVec,
COMB_SCORE_20 = combVec,
COMB_SCORE_10 = combVec)
allResults <- data.frame(RUN = numeric(),
CELLS = character(),
ID = character(),
TRUE_SYN = numeric(),
PRED_SYN = numeric(),
SYN_CONF = numeric(),
stringsAsFactors = FALSE)
# == LOOP OVER CROSS-VALIDATION RUNS =======
for (iRun in 1:N_RUNS) {
# == MAKE RUN-SPECIFIC FILENAMES =======
RUN_ID <- sprintf("%s.%02d", RUN_PREFIX, iRun)
TRAINING_SET_FILE <- sprintf("%s%s_TrainingSet.csv", RUN_PATH, RUN_ID)
TEST_SET_FILE <- sprintf("%s%s_HoldoutSet.csv", RUN_PATH, RUN_ID)
PREDICTED_FILE <- sprintf("%s%s_prediction.csv", RUN_PATH, RUN_ID)
PRIORITY_FILE <- sprintf("%s%s_combination_priority.csv", RUN_PATH, RUN_ID)
# == MAKE TRAINING AND HOLDOUT SETS =======
nHold <- round(nrow(master) * FRAC_HOLD) # number of holdouts
iHold <- sample(1:nrow(master), nHold) # random row index of holdouts
trainSet <- master[-(iHold), ]
holdSet <- master[iHold, ]
holdSet[, "SYNERGY_SCORE"] <- NA # remove synergy values from
# holdout data
write.csv(trainSet, TRAINING_SET_FILE, row.names=FALSE)
write.csv(holdSet, TEST_SET_FILE, row.names=FALSE)
# == RUN PREDICTION =======
# predictSynergy.R is written to take input training and test
# filenames from commandline. If it it source()'ed instead
# redefine the commandArgs() function as below.
commandArgs <- function(trailingOnly) {
return(c(TRAINING_SET_FILE,
TEST_SET_FILE,
PREDICTED_FILE,
PRIORITY_FILE,
METHOD))
}
if (VERBOSE) {cat(paste("xVal run: ", iRun, "/", N_RUNS, " ", Sys.time(), "\n"))}
source("predictSynergy.R")
# predictSynergy.R writes its result into
# PREDICTED_FILE and PRIORITY_FILE
# == READ AND COMBINE RESULTS =======
pred <- read.csv(PREDICTED_FILE, stringsAsFactors=FALSE)
conf <- read.csv(PRIORITY_FILE, stringsAsFactors=FALSE)
# head(pred)
# head(conf)
results <- data.frame(RUN = rep(iRun, nrow(pred)),
CELLS = pred[ , "CELL_LINE"],
ID = pred[ , "COMBINATION_ID"],
TRUE_SYN = NA,
PRED_SYN = pred[ , "PREDICTION"],
SYN_CONF = rep(0, nrow(pred)),
stringsAsFactors = FALSE)
for (i in 1:nrow(results)) {
# add confidence values to results
results[i, "SYN_CONF"] <- conf[ (conf[,"COMBINATION_ID"] == results[i, "ID"]) ,
"CONFIDENCE" ]
# add true synergy score to results
# use the mean, since we actually have cases where
# there are more than one experiments for the same
# combination ID and cell line.
results[i, "TRUE_SYN"] <- master[master[ , "COMBINATION_ID"] == pred[i, "COMBINATION_ID"] &
master[ , "CELL_LINE"] == pred[i, "CELL_LINE"],
"SYNERGY_SCORE"] %>% mean
}
allResults <- rbind(allResults, results)
# head(results)
# == SCORE THE RESULTS =======
scores$RUN <- c(scores$RUN, iRun)
scores$COR_PLAIN <- c(scores$COR_PLAIN,
cor(results[,"TRUE_SYN"],
results[,"PRED_SYN"],
use="complete.obs"))
scores$GLOBAL_SCORE <- c(scores$GLOBAL_SCORE,
getGlobalScore_ch1(results))
scores$COMB_SCORE_ALL = rbind(scores$COMB_SCORE_ALL,
getDrugCombiScore_ch1(results, conf, topX=100))
scores$COMB_SCORE_30 = rbind(scores$COMB_SCORE_30,
getDrugCombiScore_ch1(results, conf, topX=30))
scores$COMB_SCORE_20 = rbind(scores$COMB_SCORE_20,
getDrugCombiScore_ch1(results, conf, topX=20))
scores$COMB_SCORE_10 = rbind(scores$COMB_SCORE_10,
getDrugCombiScore_ch1(results, conf, topX=10))
} # end for (iRun in 1:N_RUNS)
if (VERBOSE) {cat(paste("xVal completed: ", Sys.time(), "\n\n"))}
# == ANALYSE RUNS =======
# == HISTOGRAM RANDOM VS. PREDICTED
# create distribution of random scores
nRandRuns <- 1000 # per xVal run
scoreRand <- numeric(nRandRuns * N_RUNS)
iC <- 1
for (i in 1:N_RUNS) {
# subset of predictions for this run
tp <- allResults[allResults[, "RUN"] == i, c("CELLS", "ID", "TRUE_SYN", "PRED_SYN")]
for (j in 1:nRandRuns) {
# sample from subset
tp[ , "PRED_SYN"] <- sample(tp[ , "PRED_SYN"], nrow(tp))
scoreRand[iC] <- getGlobalScore_ch1(tp)
iC <- iC + 1
}
}
# Plot histogram,
# superimpose histogram of predicted Global Scores
colRand <- "#E6E0FF"
colObs <- "#00DDAA44"
hist(scoreRand,
breaks=20,
xlim = c(min(min(scoreRand), min(scores$GLOBAL_SCORE)) * 1.2,
max(max(scoreRand), max(scores$GLOBAL_SCORE)) * 1.2),
freq = FALSE,
col = colRand,
main = "Random and observed Global Scores",
cex.main = 0.8,
xlab = "Global Score"
)
if (length(scores$GLOBAL_SCORE) == 1) {
abline(v = scores$GLOBAL_SCORE[rep(1,4)], col = colObs)
} else {
par(new = TRUE)
hVals <- hist(scores$GLOBAL_SCORE, plot = FALSE)
hist(scores$GLOBAL_SCORE,
xlim = c(min(min(scoreRand), min(scores$GLOBAL_SCORE)) * 1.2,
max(max(scoreRand), max(scores$GLOBAL_SCORE)) * 1.2),
ylim = c(0, 5 * max(hVals$density)),
breaks = round(N_RUNS)/2,
freq = FALSE,
col= colObs,
border= "#666677",
axes = FALSE,
main = "",
sub = (sprintf("mean Global Scores for predictions: %1.3f ± %1.3f",
mean(scores$GLOBAL_SCORE), sd(scores$GLOBAL_SCORE))),
cex.sub = 0.8,
xlab = "",
ylab = ""
)
}
legend("topright",
cex = 0.7,
legend = c(sprintf("%d,%03d random",
floor(length(scoreRand)/1000),
length(scoreRand) %% 1000),
sprintf("%d predicted", N_RUNS)),
fill = c(colRand, colObs),
bty = "n",
title = "")
# == Scatterplot true vs. pred
true <- allResults[,"TRUE_SYN"]
pred <- allResults[,"PRED_SYN"]
plot(true, pred,
cex = 0.2,
pch = 16,
col = xValColours(allResults[,"SYN_CONF"]),
xlim = c(min(c(true, pred)), max(c(true, pred))),
ylim = c(min(c(true, pred)), max(c(true, pred))),
xlab = "True synergy scores",
ylab = "Predicted synergy scores",
main = sprintf("%s crossvalidation with %d%% holdouts, %d runs\n(new correlations)",
METHOD,
FRAC_HOLD * 100,
N_RUNS),
cex.main = 0.8
)
legend("topleft",
cex = 0.6,
legend = paste(seq(0.9, 0.0, by = -0.1), "-", seq(1.0, 0.1, by = -0.1)),
fill = xValColours(seq(0.95, 0.05, by = -0.1)),
bty = "n",
title = "Priority")
abline(h = 0, lwd = 0.5, col = "#CCCCCC")
abline(v = 0, lwd = 0.5, col = "#CCCCCC")
# add regression line
linMod <- lm(true ~ pred)
abline(linMod, col="#BB0000")
summary(linMod)
# [END]
|
#' A Function to clean a single input string by removing punctuation and numbers and tokenizing it.
#'
#' @param string A single input string such as "This is a cool function!"
#' @return A vector containing all valid tokens in the original input string
#' @export
Clean_String <- function(string){
# Lowercase
temp <- tolower(string)
# Remove everything that is not a number letter ? or !
temp <- stringr::str_replace_all(temp,"[^a-zA-Z\\s]", " ")
# Shrink down to just one white space
temp <- stringr::str_replace_all(temp,"[\\s]+", " ")
# Split it
temp <- stringr::str_split(temp, " ")[[1]]
# Get rid of trailing "" if necessary
indexes <- which(temp == "")
if(length(indexes) > 0){
temp <- temp[-indexes]
}
return(temp)
}
| /R/Clean_String.R | no_license | kathyfoley77/ipedsbindR | R | false | false | 756 | r | #' A Function to clean a single input string by removing punctuation and numbers and tokenizing it.
#'
#' @param string A single input string such as "This is a cool function!"
#' @return A vector containing all valid tokens in the original input string
#' @export
Clean_String <- function(string){
# Lowercase
temp <- tolower(string)
# Remove everything that is not a number letter ? or !
temp <- stringr::str_replace_all(temp,"[^a-zA-Z\\s]", " ")
# Shrink down to just one white space
temp <- stringr::str_replace_all(temp,"[\\s]+", " ")
# Split it
temp <- stringr::str_split(temp, " ")[[1]]
# Get rid of trailing "" if necessary
indexes <- which(temp == "")
if(length(indexes) > 0){
temp <- temp[-indexes]
}
return(temp)
}
|
## ---- eval=T-------------------------------------------------------------------------------------------
# download packages, if necessary
install.packages(c("vegan", "MASS", "cluster", "tree",
"BiodiversityR", "gclus", "ecodist", "FD", "psych", "pheatmap"))
## ---- warning=F, message=F-----------------------------------------------------------------------------
#load packages
library(vegan)
library(MASS)
library(cluster)
library(tree)
library(gclus)
#library(ecodist)
library(FD)
library(psych) #for pairs-plot
library(pheatmap) #for heatmaps
## ---- eval=T-------------------------------------------------------------------------------------------
# Take a look at vegan's vignettes
browseVignettes("vegan")
## ---- eval=T-------------------------------------------------------------------------------------------
data(varespec)
data(varechem)
?varespec
## ---- eval=T-------------------------------------------------------------------------------------------
data(dune)
data(dune.env)
?dune
## ---- eval=T-------------------------------------------------------------------------------------------
# tips
?str
?class
?summary
?head
?tail
?dim
?nrows
?ncols
?rownames
?colnames
?range
?apply #e.g. apply(varespec, MARGIN=1, "max")
## ---- eval=T-------------------------------------------------------------------------------------------
#Hint # Get help on the decostand() function
?decostand
## ---- eval=T, echo=T-----------------------------------------------------------------------------------
# Transformation and standardization of the species data
## Simple transformations
# Partial view of the raw data (abundance codes)
varespec[1:5, 2:4]
## 1) Transform abundances to presence-absence (1-0)
varespec.pa <- decostand(varespec, method = "pa")
varespec.pa[1:5, 2:4]
## 2) Standardization by columns (species)
# Scale abundances by dividing them by the maximum value of each
# species
# Note: MARGIN = 2 (column, default value) for argument "max"
varespec.scal <- decostand(varespec, "max")
varespec.scal[1:5, 2:4]
# Display the maximum in each transformed column
apply(varespec.scal, 2, max)
## 3) Standardization by rows (sites)
# Scale abundances by dividing them by the site totals
# (profiles of relative abundance by site)
varespec.rel <- decostand(varespec, "total") # default MARGIN = 1
varespec.rel[1:5, 2:4]
# Display the sum of row vectors to determine if the scaling worked
# properly
rowSums(varespec.rel) # equivalent to: apply(varespec.rel, 1, sum)
## 4) Standardization to zero mean and unit s.d.
varechem.st <- decostand(varechem, "standardize")
# verify it worked
colMeans(varechem.st)
apply(varechem.st, MARGIN=2, sd)
## ---- eval=T-------------------------------------------------------------------------------------------
#Hint # Get help on the following functions in vegan
?diversity
?specaccum
?rarefy
?rarecurve
## ---- echo=T, eval=T-----------------------------------------------------------------------------------
# Species richness
specnumber(BCI) # returns the species richness per plot
summary(specnumber(BCI)) # report descriptive statistics
hist(specnumber(BCI)) # have a look to the frequencies
# Diversity indices
diversity(BCI, index = "shannon") # Computes shannon diversity index per plot
hist(diversity(BCI, index = "shannon")) # see the histogram
diversity(BCI, index = "simpson") # Computes shannon diversity index per plot
hist(diversity(BCI, index = "simpson")) # see the histogram
# rarefaction (individual-based rarefaction)
rarefy(BCI, 20) # gives you the species per 20 individuals
rarecurve(BCI) # sample size reflects individuals
# rarefaction (sample-based)
spa <- specaccum(BCI)
plot(spa) # Plot the rarefaction curve
plot(spa, ci.type="poly", col="blue", lwd=2, ci.lty=0, ci.col="lightblue") # just nicer
## ---- eval=T-------------------------------------------------------------------------------------------
#Hints #
?vegdist
?as.matrix
?vegan::mantel #!! the FD package masks the mantel function from vegan!
## ---- echo=T, eval=T-----------------------------------------------------------------------------------
## 1)
distance.spe.q1 <- vegdist(varespec)
distance.spe.q2 <- vegdist(varespec, method ="euclidean") #does not account for double 0s!
distance.spe.q3 <- vegdist(varespec, binary=FALSE)
# binary = FALSE looks at the abundance;
distance.spe.q4 <- vegdist(varespec, binary=TRUE)
# TRUE looks at presence-absence (Sorenson's index)
# equivalent to distancce.spe.q1
## 2)
dim(distance.spe.q1)
length(distance.spe.q1) #How do you obtain this number?
distances <- as.matrix(distance.spe.q1)
dim(distances)
## 3.1) Q mode analysis - dissimilarity between sites based on species data
distance.spe.q1 <- vegdist(varespec) # "bray" is the default, not necessary to type
## 3.2) Q mode analysis - dissimilarity between sites based on environmental variables
distance.env.q1 <- vegdist(varechem, "euclidean")
# even better would be to standardize the variables
varechem.st <- decostand(varechem, method = "standardize", MARGIN=2) #by column!
distance.env.q1 <- vegdist(varechem.st, "euclidean")
## 3.3) R mode analysis - dissimilarity between species,
## based on their presence absence in a site
## Need to Transpose - in R variables are conventionally in columns!
varespec.t <- t(varespec)
#transpose matrix of species abundancces
distance.spe.r1 <- dist(decostand(varespec, "chi.square"))
# dist = euclidean distance in {base}
## 3.4) R mode analysis - between environmental predictors
distance.env.r1 <- vegdist(t(varechem.st), method ="euclidean")
# To compare env. variables it does not really make sense to calculate dissimilarities
# (even if technically possible). Tather one should calculate correlations
psych::pairs.panels(varechem[,1:5],
method = "pearson", # correlation method
hist.col = "#00AFBB",
density = TRUE, # show density plots
ellipses = TRUE # show correlation ellipses
)
## 4) Compare different dissimilarity matrices graphically
# dissimilarities based on species (Q-mode), with different metrics
pheatmap::pheatmap(distance.spe.q1, cluster_rows = F, cluster_cols = F)
pheatmap::pheatmap(distance.spe.q2, cluster_rows = F, cluster_cols = F)
pheatmap::pheatmap(distance.spe.q4, cluster_rows = F, cluster_cols = F)
# The Mantel test ####
# Mantel test calculates correlations between dissimilarities
# The higher is the result, the more similar the groups compared
vegan::mantel(distance.spe.q1, distance.spe.q2)
## 5) find the most similar plots
#transform to matrix, and replace diagonal with ones
#[to avoid considering distances of plots to themselves]
totest <- as.matrix(distance.spe.q1) + diag(nrow=nrow(varespec))
minn <- which(totest==min(totest), arr.ind=T)
totest[minn]
## ---- eval=T, echo=T-----------------------------------------------------------------------------------
# my solution
myjac <- function(x,y){
x.pa <- rbind(x,y)>0
return(1 - sum(colSums(x.pa)==2) / (sum(colSums(x.pa)==2) + sum(colSums(x.pa)==1)))
}
| /multivariate_analysis_Part1.R | no_license | fmsabatini/TeachingStats | R | false | false | 7,088 | r | ## ---- eval=T-------------------------------------------------------------------------------------------
# download packages, if necessary
install.packages(c("vegan", "MASS", "cluster", "tree",
"BiodiversityR", "gclus", "ecodist", "FD", "psych", "pheatmap"))
## ---- warning=F, message=F-----------------------------------------------------------------------------
#load packages
library(vegan)
library(MASS)
library(cluster)
library(tree)
library(gclus)
#library(ecodist)
library(FD)
library(psych) #for pairs-plot
library(pheatmap) #for heatmaps
## ---- eval=T-------------------------------------------------------------------------------------------
# Take a look at vegan's vignettes
browseVignettes("vegan")
## ---- eval=T-------------------------------------------------------------------------------------------
data(varespec)
data(varechem)
?varespec
## ---- eval=T-------------------------------------------------------------------------------------------
data(dune)
data(dune.env)
?dune
## ---- eval=T-------------------------------------------------------------------------------------------
# tips
?str
?class
?summary
?head
?tail
?dim
?nrows
?ncols
?rownames
?colnames
?range
?apply #e.g. apply(varespec, MARGIN=1, "max")
## ---- eval=T-------------------------------------------------------------------------------------------
#Hint # Get help on the decostand() function
?decostand
## ---- eval=T, echo=T-----------------------------------------------------------------------------------
# Transformation and standardization of the species data
## Simple transformations
# Partial view of the raw data (abundance codes)
varespec[1:5, 2:4]
## 1) Transform abundances to presence-absence (1-0)
varespec.pa <- decostand(varespec, method = "pa")
varespec.pa[1:5, 2:4]
## 2) Standardization by columns (species)
# Scale abundances by dividing them by the maximum value of each
# species
# Note: MARGIN = 2 (column, default value) for argument "max"
varespec.scal <- decostand(varespec, "max")
varespec.scal[1:5, 2:4]
# Display the maximum in each transformed column
apply(varespec.scal, 2, max)
## 3) Standardization by rows (sites)
# Scale abundances by dividing them by the site totals
# (profiles of relative abundance by site)
varespec.rel <- decostand(varespec, "total") # default MARGIN = 1
varespec.rel[1:5, 2:4]
# Display the sum of row vectors to determine if the scaling worked
# properly
rowSums(varespec.rel) # equivalent to: apply(varespec.rel, 1, sum)
## 4) Standardization to zero mean and unit s.d.
varechem.st <- decostand(varechem, "standardize")
# verify it worked
colMeans(varechem.st)
apply(varechem.st, MARGIN=2, sd)
## ---- eval=T-------------------------------------------------------------------------------------------
#Hint # Get help on the following functions in vegan
?diversity
?specaccum
?rarefy
?rarecurve
## ---- echo=T, eval=T-----------------------------------------------------------------------------------
# Species richness
specnumber(BCI) # returns the species richness per plot
summary(specnumber(BCI)) # report descriptive statistics
hist(specnumber(BCI)) # have a look to the frequencies
# Diversity indices
diversity(BCI, index = "shannon") # Computes shannon diversity index per plot
hist(diversity(BCI, index = "shannon")) # see the histogram
diversity(BCI, index = "simpson") # Computes shannon diversity index per plot
hist(diversity(BCI, index = "simpson")) # see the histogram
# rarefaction (individual-based rarefaction)
rarefy(BCI, 20) # gives you the species per 20 individuals
rarecurve(BCI) # sample size reflects individuals
# rarefaction (sample-based)
spa <- specaccum(BCI)
plot(spa) # Plot the rarefaction curve
plot(spa, ci.type="poly", col="blue", lwd=2, ci.lty=0, ci.col="lightblue") # just nicer
## ---- eval=T-------------------------------------------------------------------------------------------
#Hints #
?vegdist
?as.matrix
?vegan::mantel #!! the FD package masks the mantel function from vegan!
## ---- echo=T, eval=T-----------------------------------------------------------------------------------
## 1)
distance.spe.q1 <- vegdist(varespec)
distance.spe.q2 <- vegdist(varespec, method ="euclidean") #does not account for double 0s!
distance.spe.q3 <- vegdist(varespec, binary=FALSE)
# binary = FALSE looks at the abundance;
distance.spe.q4 <- vegdist(varespec, binary=TRUE)
# TRUE looks at presence-absence (Sorenson's index)
# equivalent to distancce.spe.q1
## 2)
dim(distance.spe.q1)
length(distance.spe.q1) #How do you obtain this number?
distances <- as.matrix(distance.spe.q1)
dim(distances)
## 3.1) Q mode analysis - dissimilarity between sites based on species data
distance.spe.q1 <- vegdist(varespec) # "bray" is the default, not necessary to type
## 3.2) Q mode analysis - dissimilarity between sites based on environmental variables
distance.env.q1 <- vegdist(varechem, "euclidean")
# even better would be to standardize the variables
varechem.st <- decostand(varechem, method = "standardize", MARGIN=2) #by column!
distance.env.q1 <- vegdist(varechem.st, "euclidean")
## 3.3) R mode analysis - dissimilarity between species,
## based on their presence absence in a site
## Need to Transpose - in R variables are conventionally in columns!
varespec.t <- t(varespec)
#transpose matrix of species abundancces
distance.spe.r1 <- dist(decostand(varespec, "chi.square"))
# dist = euclidean distance in {base}
## 3.4) R mode analysis - between environmental predictors
distance.env.r1 <- vegdist(t(varechem.st), method ="euclidean")
# To compare env. variables it does not really make sense to calculate dissimilarities
# (even if technically possible). Tather one should calculate correlations
psych::pairs.panels(varechem[,1:5],
method = "pearson", # correlation method
hist.col = "#00AFBB",
density = TRUE, # show density plots
ellipses = TRUE # show correlation ellipses
)
## 4) Compare different dissimilarity matrices graphically
# dissimilarities based on species (Q-mode), with different metrics
pheatmap::pheatmap(distance.spe.q1, cluster_rows = F, cluster_cols = F)
pheatmap::pheatmap(distance.spe.q2, cluster_rows = F, cluster_cols = F)
pheatmap::pheatmap(distance.spe.q4, cluster_rows = F, cluster_cols = F)
# The Mantel test ####
# Mantel test calculates correlations between dissimilarities
# The higher is the result, the more similar the groups compared
vegan::mantel(distance.spe.q1, distance.spe.q2)
## 5) find the most similar plots
#transform to matrix, and replace diagonal with ones
#[to avoid considering distances of plots to themselves]
totest <- as.matrix(distance.spe.q1) + diag(nrow=nrow(varespec))
minn <- which(totest==min(totest), arr.ind=T)
totest[minn]
## ---- eval=T, echo=T-----------------------------------------------------------------------------------
# my solution
myjac <- function(x,y){
x.pa <- rbind(x,y)>0
return(1 - sum(colSums(x.pa)==2) / (sum(colSums(x.pa)==2) + sum(colSums(x.pa)==1)))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_roc.R
\name{get_roc}
\alias{get_roc}
\title{Build receiver operating characteristic curve}
\usage{
get_roc(scores, labels)
}
\arguments{
\item{scores}{numeric array}
\item{labels}{binary array}
}
\value{
FPR,TPR numeric arrays
}
\description{
The function calculates the FPR and TRPR for the receiver
operating characteristic (ROC)
}
\examples{
labels <- c(rep(0,10))
labels[c(1,3,5)] <- 1
scores <- 10:1
roc <- get_roc(scores, labels)
}
\keyword{FPR}
\keyword{ROC}
\keyword{TPR}
\keyword{characteristic}
\keyword{metric}
\keyword{operating}
\keyword{receiver}
| /EGAD/man/get_roc.Rd | no_license | kantale/EGAD | R | false | true | 650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_roc.R
\name{get_roc}
\alias{get_roc}
\title{Build receiver operating characteristic curve}
\usage{
get_roc(scores, labels)
}
\arguments{
\item{scores}{numeric array}
\item{labels}{binary array}
}
\value{
FPR,TPR numeric arrays
}
\description{
The function calculates the FPR and TRPR for the receiver
operating characteristic (ROC)
}
\examples{
labels <- c(rep(0,10))
labels[c(1,3,5)] <- 1
scores <- 10:1
roc <- get_roc(scores, labels)
}
\keyword{FPR}
\keyword{ROC}
\keyword{TPR}
\keyword{characteristic}
\keyword{metric}
\keyword{operating}
\keyword{receiver}
|
#' Title
#'
#' @param interpro
#' @param gene_list
#' @param gene_length
#' @param domain_db
#' @param motif_db
#'
#' @return
#' @export
#'
#' @import dplyr
#'
#' @examples
interpro2draw <- function(interpro, gene_list,
gene_length, domain_db = "Pfam",
motif_db = "TMHMM"){
db_list <- list(domain_db, motif_db)
drawobj <- interpro %>%
filter(feature_id %in% gene_list) %>%
mutate(begin = start, description = sigunature_desc) %>%
filter(DB %in% db_list) %>%
mutate(type = ifelse(DB == domain_db, "DOMAIN", "MOTIF")) %>%
mutate(entryName = feature_id) %>%
mutate(order = 1) %>%
select(type, description, begin, end, length, entryName, order)
return(drawobj)
}
#' Title
#'
#' @param gene_length
#' @param gene_list
#'
#' @return
#' @export
#'
#' @import tibble
#' @import dplyr
#'
#' @examples
lenlist2chain <- function(gene_length, gene_list){
genelen_hit <- gene_length %>%
filter(protein_id %in% gene_list)
genenum <- nrow(genelen_hit)
chain_data <- tibble(type = rep("CHAIN", genenum),
description = rep(NA, genenum),
begin = rep(1, genenum),
end = genelen_hit$length,
length = genelen_hit $length,
entryName = genelen_hit$protein_id)
return(chain_data)
}
#' Title
#'
#' @param domain_data
#' @param chain_data
#' @param splice_data
#'
#' @return
#' @export
#'
#' @import tibble
#' @import dplyr
#' @import stringr
#'
#' @examples
bind_draws <- function(domain_data, chain_data, splice_data = NA){
all_draw <- domain_data %>%
add_row(chain_data)
if(!is.na(splice_data)){
all_draw <- all_draw %>%
add_row(splice_data) %>%
mutate(entryName_psuedo = str_replace(entryName, pattern=".m1$", replacement="")) %>%
mutate(.,order = group_indices(.,entryName_psuedo)) %>%
mutate(entryName = entryName_psuedo)
} else {
all_draw <- all_draw %>%
mutate(entryName_psuedo = str_replace(entryName, pattern=".m1$", replacement="")) %>%
mutate(.,order = group_indices(.,entryName_psuedo)) %>%
mutate(entryName = entryName_psuedo)
}
return(all_draw)
}
#' Title
#'
#' @param draw_data
#'
#' @return ggplot object of drawProteins
#' @export
#'
#' @import drawProteins
#'
#' @examples
draw_all <- function(draw_data){
p_chavas <- draw_canvas(draw_data)
p_chavas <- draw_chains(p_chavas, draw_data, label_chains = FALSE)
p_chavas <- draw_domains(p_chavas, draw_data, label_domains = FALSE)
p_chavas <- draw_motif(p_chavas, draw_data)
p_chavas <- draw_phospho(p_chavas, draw_data, show.legend = FALSE)
p_chavas <- p_chavas
return(p_chavas)
}
| /R/prepare_draw.R | permissive | xvtyzn/interpro2genevis | R | false | false | 2,721 | r | #' Title
#'
#' @param interpro
#' @param gene_list
#' @param gene_length
#' @param domain_db
#' @param motif_db
#'
#' @return
#' @export
#'
#' @import dplyr
#'
#' @examples
interpro2draw <- function(interpro, gene_list,
gene_length, domain_db = "Pfam",
motif_db = "TMHMM"){
db_list <- list(domain_db, motif_db)
drawobj <- interpro %>%
filter(feature_id %in% gene_list) %>%
mutate(begin = start, description = sigunature_desc) %>%
filter(DB %in% db_list) %>%
mutate(type = ifelse(DB == domain_db, "DOMAIN", "MOTIF")) %>%
mutate(entryName = feature_id) %>%
mutate(order = 1) %>%
select(type, description, begin, end, length, entryName, order)
return(drawobj)
}
#' Title
#'
#' @param gene_length
#' @param gene_list
#'
#' @return
#' @export
#'
#' @import tibble
#' @import dplyr
#'
#' @examples
lenlist2chain <- function(gene_length, gene_list){
genelen_hit <- gene_length %>%
filter(protein_id %in% gene_list)
genenum <- nrow(genelen_hit)
chain_data <- tibble(type = rep("CHAIN", genenum),
description = rep(NA, genenum),
begin = rep(1, genenum),
end = genelen_hit$length,
length = genelen_hit $length,
entryName = genelen_hit$protein_id)
return(chain_data)
}
#' Title
#'
#' @param domain_data
#' @param chain_data
#' @param splice_data
#'
#' @return
#' @export
#'
#' @import tibble
#' @import dplyr
#' @import stringr
#'
#' @examples
bind_draws <- function(domain_data, chain_data, splice_data = NA){
all_draw <- domain_data %>%
add_row(chain_data)
if(!is.na(splice_data)){
all_draw <- all_draw %>%
add_row(splice_data) %>%
mutate(entryName_psuedo = str_replace(entryName, pattern=".m1$", replacement="")) %>%
mutate(.,order = group_indices(.,entryName_psuedo)) %>%
mutate(entryName = entryName_psuedo)
} else {
all_draw <- all_draw %>%
mutate(entryName_psuedo = str_replace(entryName, pattern=".m1$", replacement="")) %>%
mutate(.,order = group_indices(.,entryName_psuedo)) %>%
mutate(entryName = entryName_psuedo)
}
return(all_draw)
}
#' Title
#'
#' @param draw_data
#'
#' @return ggplot object of drawProteins
#' @export
#'
#' @import drawProteins
#'
#' @examples
draw_all <- function(draw_data){
p_chavas <- draw_canvas(draw_data)
p_chavas <- draw_chains(p_chavas, draw_data, label_chains = FALSE)
p_chavas <- draw_domains(p_chavas, draw_data, label_domains = FALSE)
p_chavas <- draw_motif(p_chavas, draw_data)
p_chavas <- draw_phospho(p_chavas, draw_data, show.legend = FALSE)
p_chavas <- p_chavas
return(p_chavas)
}
|
# Reads the needed packages
library(dplyr)
library(ggplot2)
# Reads the RDS datasets
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Get the total emissions per year
baltimoreEmissions <- summarise(group_by(filter(NEI, fips == "24510" & type == "ON-ROAD"), year),
Emissions = sum(Emissions))
LAemissions <- summarise(group_by(filter(NEI, fips == "06037" & type == "ON-ROAD"), year),
Emissions = sum(Emissions))
baltimoreEmissions$Country <- "Baltimore City, MD"
LAemissions$Country <- "Los Angeles Country, CA"
combinedEmissionsofcities <- rbind(baltimoreEmissions, LAemissions)
# Create a plot and saving it as png file
png(filename = "plot6.png")
ggplot(combinedEmissionsofcities, aes(x = factor(year), y = Emissions, fill = Country,
label = round(Emissions, 2))) +
geom_bar(stat = "identity") +
facet_grid(scales = "free", space = "free", . ~ Country) +
xlab("Years") +
ylab(expression("Emissions")) +
ggtitle(expression("Total PM" [2.5]*" Emissions from Motor Vehicles in Baltimore and Los Angeles")) +
theme(legend.position = "none", plot.title = element_text(hjust = 0.5))
# Close the PNG device
dev.off() | /plot6.R | no_license | dhanncollin/Exploratory-Data-Analysis-Course-Project-2 | R | false | false | 1,439 | r | # Reads the needed packages
library(dplyr)
library(ggplot2)
# Reads the RDS datasets
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Get the total emissions per year
baltimoreEmissions <- summarise(group_by(filter(NEI, fips == "24510" & type == "ON-ROAD"), year),
Emissions = sum(Emissions))
LAemissions <- summarise(group_by(filter(NEI, fips == "06037" & type == "ON-ROAD"), year),
Emissions = sum(Emissions))
baltimoreEmissions$Country <- "Baltimore City, MD"
LAemissions$Country <- "Los Angeles Country, CA"
combinedEmissionsofcities <- rbind(baltimoreEmissions, LAemissions)
# Create a plot and saving it as png file
png(filename = "plot6.png")
ggplot(combinedEmissionsofcities, aes(x = factor(year), y = Emissions, fill = Country,
label = round(Emissions, 2))) +
geom_bar(stat = "identity") +
facet_grid(scales = "free", space = "free", . ~ Country) +
xlab("Years") +
ylab(expression("Emissions")) +
ggtitle(expression("Total PM" [2.5]*" Emissions from Motor Vehicles in Baltimore and Los Angeles")) +
theme(legend.position = "none", plot.title = element_text(hjust = 0.5))
# Close the PNG device
dev.off() |
VE.Hajek.Total.NHT <- function(VecY.s, VecPk.s)
{
if(! is.vector(VecY.s) ){stop("VecY.s must be a vector.") }
if(! is.vector(VecPk.s) ){stop("VecPk.s must be a vector.") }
n <- length(VecY.s)
if(n != length(VecPk.s) ){stop("The lengths of VecY.s and VecPk.s are different.") }
if(any(is.na(VecPk.s)) ){stop("There are missing values in VecPk.s.") }
if(any(VecPk.s<=0|VecPk.s>1) ){stop("There are invalid values in VecPk.s.") }
if(any(is.na(VecY.s)) ){stop("There are missing values in VecY.s.") }
Doublen <- as.double(n)
OUTPUT <- .C("VE_Hajek_Total_NHT",
as.double(VecY.s),
as.double(VecPk.s),
as.integer(n),
as.double(Doublen),
VarEst = double(1),
PACKAGE = "samplingVarEst")$VarEst
if(OUTPUT<0 ){warning("The variance estimate contains negative values.")}
OUTPUT
}
| /samplingVarEst/R/VE_Hajek_Total_NHT.r | no_license | ingted/R-Examples | R | false | false | 1,334 | r | VE.Hajek.Total.NHT <- function(VecY.s, VecPk.s)
{
if(! is.vector(VecY.s) ){stop("VecY.s must be a vector.") }
if(! is.vector(VecPk.s) ){stop("VecPk.s must be a vector.") }
n <- length(VecY.s)
if(n != length(VecPk.s) ){stop("The lengths of VecY.s and VecPk.s are different.") }
if(any(is.na(VecPk.s)) ){stop("There are missing values in VecPk.s.") }
if(any(VecPk.s<=0|VecPk.s>1) ){stop("There are invalid values in VecPk.s.") }
if(any(is.na(VecY.s)) ){stop("There are missing values in VecY.s.") }
Doublen <- as.double(n)
OUTPUT <- .C("VE_Hajek_Total_NHT",
as.double(VecY.s),
as.double(VecPk.s),
as.integer(n),
as.double(Doublen),
VarEst = double(1),
PACKAGE = "samplingVarEst")$VarEst
if(OUTPUT<0 ){warning("The variance estimate contains negative values.")}
OUTPUT
}
|
context("decode pl")
test_that("polyline decoded correctly", {
pl <- "nnseFmpzsZgalNytrXetrG}krKsaif@kivIccvzAvvqfClp~uBlymzA~ocQ}_}iCthxo@srst@"
expect_equal(nrow(decode_pl(pl)), 8)
})
test_that("error message for invalid encoded type", {
pl <- c("a","b")
expect_error(decode_pl(pl),
"encoded must be a string of length 1")
pl <- 123
expect_error(decode_pl(pl),
"encoded must be a string of length 1")
})
test_that("incorrect encoded string throws error",{
pl <- "abc"
expect_message(decode_pl(pl),
"The encoded string could not be decoded. \nYou can manually check the encoded line at https://developers.google.com/maps/documentation/utilities/polylineutility \nIf the line can successfully be manually decoded, please file an issue: https://github.com/SymbolixAU/googleway/issues ")
})
test_that("polyline encoded correctly", {
expect_equal(
encode_pl(lat = c(38.5, 40.7, 43.252), lon = c(-120.2, -120.95, -126.453)),
"_p~iF~ps|U_ulLnnqC_mqNvxq`@"
)
})
test_that("polyline encoding error message", {
expect_error(
encode_pl(lat = c(-91, -90, -89), lon = c(0, 1)),
"lat and lon must be the same length"
)
expect_message(
encode_pl(lat = "a", lon = "b"),
"The coordinates could not be encoded"
)
})
| /tests/testthat/test-decode_pl.R | permissive | SymbolixAU/googleway | R | false | false | 1,316 | r | context("decode pl")
test_that("polyline decoded correctly", {
pl <- "nnseFmpzsZgalNytrXetrG}krKsaif@kivIccvzAvvqfClp~uBlymzA~ocQ}_}iCthxo@srst@"
expect_equal(nrow(decode_pl(pl)), 8)
})
test_that("error message for invalid encoded type", {
pl <- c("a","b")
expect_error(decode_pl(pl),
"encoded must be a string of length 1")
pl <- 123
expect_error(decode_pl(pl),
"encoded must be a string of length 1")
})
test_that("incorrect encoded string throws error",{
pl <- "abc"
expect_message(decode_pl(pl),
"The encoded string could not be decoded. \nYou can manually check the encoded line at https://developers.google.com/maps/documentation/utilities/polylineutility \nIf the line can successfully be manually decoded, please file an issue: https://github.com/SymbolixAU/googleway/issues ")
})
test_that("polyline encoded correctly", {
expect_equal(
encode_pl(lat = c(38.5, 40.7, 43.252), lon = c(-120.2, -120.95, -126.453)),
"_p~iF~ps|U_ulLnnqC_mqNvxq`@"
)
})
test_that("polyline encoding error message", {
expect_error(
encode_pl(lat = c(-91, -90, -89), lon = c(0, 1)),
"lat and lon must be the same length"
)
expect_message(
encode_pl(lat = "a", lon = "b"),
"The coordinates could not be encoded"
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_toggl_clients.R
\name{get_toggl_clients}
\alias{get_toggl_clients}
\title{Get Toggl Clients}
\usage{
get_toggl_clients(api_token, workspace_id, user_agent = "api_test")
}
\value{
a data frame of Toggl clients
}
\description{
Get Toggl Clients
}
| /man/get_toggl_clients.Rd | no_license | justinjm/togglR | R | false | true | 327 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_toggl_clients.R
\name{get_toggl_clients}
\alias{get_toggl_clients}
\title{Get Toggl Clients}
\usage{
get_toggl_clients(api_token, workspace_id, user_agent = "api_test")
}
\value{
a data frame of Toggl clients
}
\description{
Get Toggl Clients
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abortion.R
\docType{data}
\name{abortion}
\alias{abortion}
\title{Haberman's Abortion, Education, and Religion Data}
\format{
A 3x3x3 (contingency) table
}
\usage{
data(abortion)
}
\description{
A multi-way contingency table with the results of a survey concerning
attitudes concerning abortion.
}
\references{
Haberman, S. (1978). \emph{Analysis of Qualitative Data} 1, 2.
Academic Press, Orlando, FL.
}
\author{
1972 National Opinion Research Center
}
\keyword{datasets}
| /man/abortion.Rd | no_license | dkahle/algstat | R | false | true | 551 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/abortion.R
\docType{data}
\name{abortion}
\alias{abortion}
\title{Haberman's Abortion, Education, and Religion Data}
\format{
A 3x3x3 (contingency) table
}
\usage{
data(abortion)
}
\description{
A multi-way contingency table with the results of a survey concerning
attitudes concerning abortion.
}
\references{
Haberman, S. (1978). \emph{Analysis of Qualitative Data} 1, 2.
Academic Press, Orlando, FL.
}
\author{
1972 National Opinion Research Center
}
\keyword{datasets}
|
##########################################################################################
# Create DoE Effect figure (Set focus and set uninteresting)
# Set switch 'nointeractioneffects' to TRUE to show only single effects
# (applies to focus set only).
#
# Project: Pub_SIRIOS
# Last update: 23/08/2016
# Author: Sascha Holzhauer
# Instructions: Run maschine-specific SIMP file first (see craftyr documentation)
##########################################################################################
# Usually also in simp.R, but required here to find simp.R
simp$sim$folder <- "_setA/_RegGlobMax"
simp$sim$task <- "DoE-720" # Name of surounding folder, usually a description of task
setsimp <- simp
setsimp$sim$id <- "set720-735"
preserve <- list()
preserve$task <- simp$sim$task
nointeractioneffects <- TRUE #FALSE
# simp$dirs$simp is set by maschine-specific file:
setwd(paste(simp$dirs$simp, simp$sim$folder, "cluster/common", sep="/"))
# usually, the setting/scenario specific simp.R is two levels above:
source("../../simp.R")
library(plyr)
runs = 720:735
rseeds = 0:19
simp$fig$height <- 1300
simp$fig$width <- 1300
simp$fig$outputformat <- "png"
simp$sim$scenario <- "A1"
metrics = matrix(c(
#1, "ConsConnectivity",
#1, "ConsPatches_C",
#1, "ConsPatches_NC",
#1, "ConsProp_C",
#2, "DivLuPerRegSimpson",
#2, "DivLuShannon",
#2, "DivSupplyAcrossRegSimpson",
#2, "DivSupplyPerRegSimpson",
#2, "EffSupply",
1, "MaxOverSupply",
1, "OverSupply_Total",
1,"UnderSupply_Total",
1, "RegUnderSupply_Cereal",
#2, "MaxUnderSupply",
1, "NumActions",
1, "NumActionsNC",
1, "ConsProp_NC",
#3, "OverSupply_Cereal",
#3, "OverSupply_Meat",
#3, "OverSupply_Timber",
#1,"RegUnderSupply_Meat",
#1,"RegUnderSupply_Timber",
#4,"UnderSupply_Cereal",
#41,"UnderSupply_Meat",
#4,"UnderSupply_Timber",
1, "VarChangesLu"
#4, "VarChangesCells")
), ncol=2, byrow = T
)
set1metrics <- metrics[metrics[,1]=="1",2]
metriccolnamesAll = c("VarChangesLu", "ConsPatches_NC", "ConsConnectivity","MaxUnderSupply", "MaxOverSupply",
"RegUnderSupply_Cereal", "RegUnderSupply_Meat", "RegUnderSupply_Timber",
"VarChangesCells", "DivLuShannon", "ConsPatches_C", "ConsProp_C", "ConsProp_NC","NumActions",
#"UnderSupply_Meat", "UnderSupply_Cereal", "UnderSupply_Timber", "NumActionsNC",
#"OverSupply_Total", "OverSupply_Meat", "OverSupply_Cereal", "OverSupply_Timber",
"DivSupplyPerRegSimpson", "DivLuPerRegSimpson", "DivSupplyAcrossRegSimpson", "EffSupply")
metricColnamesFocus = c("VarChangesLu", "VarChangesCells",
"MaxUnderSupply", "MaxOverSupply",
"RegUnderSupply_Timber",
"ConsPatches_NC", "ConsProp_NC", "ConsConnectivity", "NumActions", "DivSupplyPerRegSimpson"
#"DivLuPerRegSimpson", "DivSupplyPerRegSimpson")
# "UnderSupply_Cereal", "UnderSupply_Timber","UnderSupply_Meat", "NumActionsNC",
)
paramcolnames = c("TriggeringThreshold", "SubsidyRate", "ActionLifetime", "Precedence")
substitServices <- c("I" = 1, "J" = 2)
substitPresedence <- c("B" = 1, "C" = 2)
############### END of Parameter Section ######################
data <- shbasic::sh_tools_loadorsave(SIP = setsimp, OBJECTNAME = "data_metrics",
PRODUCTIONFUN = function() {
data <- data.frame()
for (run in runs) {
for (rseed in rseeds) {
# run = runs[1]; rseed = rseeds[1]
simp$sim$runids <- c(paste(run, rseed, sep="-")) # run to deal with
simp$sim$id <- c(paste(run, rseed, sep="-"))
input_tools_load(simp, "data_metrics")
runparams <- craftyr::input_csv_param_runs(simp, paramid = TRUE)
agg_metrics <- data.frame(
VarChangesLu = sum(data_metrics[data_metrics$Metric == "VarChangesLu", "Value"]),
ConsPatches_NC = mean(data_metrics[data_metrics$Metric == "ConsPatches_NC_Cereal-NC_Livestock", "Value"]),
MaxUnderSupply = max(0, abs(data_metrics[data_metrics$Metric == "MaxUnderSupply_Cereal-Meat-Timber", "Value"])),
MaxOverSupply = max(0, data_metrics[data_metrics$Metric == "MaxOverSupply_Cereal-Meat-Timber", "Value"]),
VarChangesCells = sum(data_metrics[data_metrics$Metric == "VarChangesCells", "Value"]),
DivLuShannon = mean(data_metrics[data_metrics$Metric == "DivLuShannon", "Value"]),
ConsPatches_C = mean(data_metrics[data_metrics$Metric == "ConsPatches_C_Cereal-C_Livestock", "Value"]),
ConsProp_C = mean(data_metrics[data_metrics$Metric == "ConsProp_C", "Value"]),
ConsProp_NC = mean(data_metrics[data_metrics$Metric == "ConsProp_NC", "Value"]),
ConsConnectivity= mean(data_metrics[data_metrics$Metric == "ConsConnectivity_NC_Cereal-NC_Livestock", "Value"]),
# correct under/oversupply data:
UnderSupply_Total = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Total", "Value"])-100),
UnderSupply_Meat = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Meat", "Value"])-100),
UnderSupply_Cereal = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Cereal", "Value"])-100),
UnderSupply_Timber = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Timber", "Value"])-100),
OverSupply_Total = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Total", "Value"])-100),
OverSupply_Meat = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Meat", "Value"])-100),
OverSupply_Cereal = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Cereal", "Value"])-100),
OverSupply_Timber = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Timber", "Value"])-100),
RegUnderSupply_Cereal = abs(mean(data_metrics[data_metrics$Metric == "RegionalUnderSupplyPercent_Cereal", "Value"])-100),
RegUnderSupply_Meat = abs(mean(data_metrics[data_metrics$Metric == "RegionalUnderSupplyPercent_Meat", "Value"])-100),
RegUnderSupply_Timber = abs(mean(data_metrics[data_metrics$Metric == "RegionalUnderSupplyPercent_Timber", "Value"])-100),
DivSupplyPerRegSimpson = mean(data_metrics[data_metrics$Metric == "DivSupplyPerRegSimpson", "Value"]),
DivLuPerRegSimpson = mean(data_metrics[data_metrics$Metric == "DivLuPerRegSimpson", "Value"]),
DivSupplyAcrossRegSimpson = mean(data_metrics[data_metrics$Metric == "DivSupplyAcrossRegSimpson", "Value"]),
EffSupply = mean(data_metrics[data_metrics$Metric == "EffSupply", "Value"]),
NumActions = metric_agg_actions_number(simp),
NumActionsNC = metric_agg_actions_number(simp, pattern="NC"),
ID = simp$sim$id,
TriggerThreshold= runparams[,"ThresholdCerealGlobal"],
ActionLifetime = runparams[,"GlobalInstActionRT"],
SubsidyRate = runparams[,"FactorTimberGlobal"],
MonitorDelay = runparams[,"MonitorDelay"],
ConsiderPACglob = runparams[,"PACconsiderGlobal"],
ConsiderPACreg = runparams[,"PACconsiderRegional"],
Noise = runparams[,"GlobalInstNoise"],
Services = regmatches(runparams[,"BT_xml"],
regexpr("(?<=Institutions_BehaviouralTypes)(.*)(?=\\.xml)", runparams[,"BT_xml"], perl=TRUE)),
Precedence = regmatches(runparams[,"GlobalInstitutions_xml"],
regexpr("(?<=GlobalInstitutions)(.*)(?=\\.xml)", runparams[,"GlobalInstitutions_xml"], perl=TRUE))
)
data <- rbind(data, agg_metrics)
}
}
return(data)
})
colnames(data)[match("TriggerThreshold", colnames(data))] <- "TriggeringThreshold"
# Substitute letters by numbers:
data$Services <- substitServices[data$Services]
data$Precedence <- substitPresedence[data$Precedence]
dexp <- shdoe::shdoe_param_getDefaultDexp()
metriclabels <- read.csv(file="../../reports/KeyTranslations2.csv", header=FALSE, stringsAsFactors = FALSE)
metriclabels <- setNames(metriclabels[,2], metriclabels[,1])
############### Set of focus metrics
for (set in unique(metrics[,1])) {
#set = unique(metrics[,1])[1]
# normalize data (divison by max/(mean)):
metricColnamesFocus <- metrics[metrics[, 1] == set, 2]
d <- apply(data[, metricColnamesFocus], MARGIN=2, FUN = function(x) max(abs(x), na.rm = T))
normd <- t(apply(data[, metricColnamesFocus], MARGIN=1, FUN= function(x) x/d))
colnames(normd) <- metricColnamesFocus
fxlist <- list()
for (i in metricColnamesFocus) {
print(i)
fx <- shdoe::shdoe_analyse_effectSizes(normd[,i], dexp,
data[,paramcolnames], id = "Testdata", confidence= 0.9)
fx$response <- i
fxlist <- append(fxlist, list(fx))
}
fx <- do.call(rbind, fxlist)
# extract single effects from first response var (whose row names are not numbered)
substitutions <- shdoe::shdoe_get_paramname_substitutions(simp, varnames = unique(rownames(fx))[!grepl(":",unique(rownames(fx))) &
!grepl("[0-9]", unique(rownames(fx)))], preventPlotmathParsing=TRUE)
if (nointeractioneffects) {
indices <- !grepl(":", rownames(fx))
effectdata <- setNames(fx$effects[indices], paste(rownames(fx)[indices], ":Effect", sep=""))
substitutions <- c(substitutions, "Effect" = "Effect")
simp$fig$height <- 920
simp$fig$width <- 920
filename <- paste("crafty_netsens_analysis_doe_effects_720-735_selectedMetrics_nointeraction_R2", set, sep="_")
numcol <- 2
} else {
indices <- rep(TRUE, length(rownames(fx)))
effectdata <- setNames(fx$effects, rownames(fx))
filename <- paste("crafty_netsens_analysis_doe_effects_720-735_selectedMetrics_R2", set, sep="_")
numcol <- 3
}
shdoe::shdoe_visualise_interaction_effects(dexp = simp,
effects = effectdata,
errors = fx$errors[indices],
pvalues = fx$pvalues[indices],
response = fx$response[indices],
substitutions = substitutions,
filename = filename,
ggplotaddons = list(ggplot2::theme(legend.position="bottom"),
ggplot2::scale_colour_discrete(guide=ggplot2::guide_legend(ncol=numcol, title.position="top",
title.hjust=0),
name = "Metrics", labels = metriclabels)
)
)
}
| /config/R/_setA/_RegGlobMax/reports/720-735/sirios_doDoE_effects_selectedMetrics.R | no_license | CRAFTY-ABM/Pub_SIRIOS | R | false | false | 10,128 | r | ##########################################################################################
# Create DoE Effect figure (Set focus and set uninteresting)
# Set switch 'nointeractioneffects' to TRUE to show only single effects
# (applies to focus set only).
#
# Project: Pub_SIRIOS
# Last update: 23/08/2016
# Author: Sascha Holzhauer
# Instructions: Run maschine-specific SIMP file first (see craftyr documentation)
##########################################################################################
# Usually also in simp.R, but required here to find simp.R
simp$sim$folder <- "_setA/_RegGlobMax"
simp$sim$task <- "DoE-720" # Name of surounding folder, usually a description of task
setsimp <- simp
setsimp$sim$id <- "set720-735"
preserve <- list()
preserve$task <- simp$sim$task
nointeractioneffects <- TRUE #FALSE
# simp$dirs$simp is set by maschine-specific file:
setwd(paste(simp$dirs$simp, simp$sim$folder, "cluster/common", sep="/"))
# usually, the setting/scenario specific simp.R is two levels above:
source("../../simp.R")
library(plyr)
runs = 720:735
rseeds = 0:19
simp$fig$height <- 1300
simp$fig$width <- 1300
simp$fig$outputformat <- "png"
simp$sim$scenario <- "A1"
metrics = matrix(c(
#1, "ConsConnectivity",
#1, "ConsPatches_C",
#1, "ConsPatches_NC",
#1, "ConsProp_C",
#2, "DivLuPerRegSimpson",
#2, "DivLuShannon",
#2, "DivSupplyAcrossRegSimpson",
#2, "DivSupplyPerRegSimpson",
#2, "EffSupply",
1, "MaxOverSupply",
1, "OverSupply_Total",
1,"UnderSupply_Total",
1, "RegUnderSupply_Cereal",
#2, "MaxUnderSupply",
1, "NumActions",
1, "NumActionsNC",
1, "ConsProp_NC",
#3, "OverSupply_Cereal",
#3, "OverSupply_Meat",
#3, "OverSupply_Timber",
#1,"RegUnderSupply_Meat",
#1,"RegUnderSupply_Timber",
#4,"UnderSupply_Cereal",
#41,"UnderSupply_Meat",
#4,"UnderSupply_Timber",
1, "VarChangesLu"
#4, "VarChangesCells")
), ncol=2, byrow = T
)
set1metrics <- metrics[metrics[,1]=="1",2]
metriccolnamesAll = c("VarChangesLu", "ConsPatches_NC", "ConsConnectivity","MaxUnderSupply", "MaxOverSupply",
"RegUnderSupply_Cereal", "RegUnderSupply_Meat", "RegUnderSupply_Timber",
"VarChangesCells", "DivLuShannon", "ConsPatches_C", "ConsProp_C", "ConsProp_NC","NumActions",
#"UnderSupply_Meat", "UnderSupply_Cereal", "UnderSupply_Timber", "NumActionsNC",
#"OverSupply_Total", "OverSupply_Meat", "OverSupply_Cereal", "OverSupply_Timber",
"DivSupplyPerRegSimpson", "DivLuPerRegSimpson", "DivSupplyAcrossRegSimpson", "EffSupply")
metricColnamesFocus = c("VarChangesLu", "VarChangesCells",
"MaxUnderSupply", "MaxOverSupply",
"RegUnderSupply_Timber",
"ConsPatches_NC", "ConsProp_NC", "ConsConnectivity", "NumActions", "DivSupplyPerRegSimpson"
#"DivLuPerRegSimpson", "DivSupplyPerRegSimpson")
# "UnderSupply_Cereal", "UnderSupply_Timber","UnderSupply_Meat", "NumActionsNC",
)
paramcolnames = c("TriggeringThreshold", "SubsidyRate", "ActionLifetime", "Precedence")
substitServices <- c("I" = 1, "J" = 2)
substitPresedence <- c("B" = 1, "C" = 2)
############### END of Parameter Section ######################
data <- shbasic::sh_tools_loadorsave(SIP = setsimp, OBJECTNAME = "data_metrics",
PRODUCTIONFUN = function() {
data <- data.frame()
for (run in runs) {
for (rseed in rseeds) {
# run = runs[1]; rseed = rseeds[1]
simp$sim$runids <- c(paste(run, rseed, sep="-")) # run to deal with
simp$sim$id <- c(paste(run, rseed, sep="-"))
input_tools_load(simp, "data_metrics")
runparams <- craftyr::input_csv_param_runs(simp, paramid = TRUE)
agg_metrics <- data.frame(
VarChangesLu = sum(data_metrics[data_metrics$Metric == "VarChangesLu", "Value"]),
ConsPatches_NC = mean(data_metrics[data_metrics$Metric == "ConsPatches_NC_Cereal-NC_Livestock", "Value"]),
MaxUnderSupply = max(0, abs(data_metrics[data_metrics$Metric == "MaxUnderSupply_Cereal-Meat-Timber", "Value"])),
MaxOverSupply = max(0, data_metrics[data_metrics$Metric == "MaxOverSupply_Cereal-Meat-Timber", "Value"]),
VarChangesCells = sum(data_metrics[data_metrics$Metric == "VarChangesCells", "Value"]),
DivLuShannon = mean(data_metrics[data_metrics$Metric == "DivLuShannon", "Value"]),
ConsPatches_C = mean(data_metrics[data_metrics$Metric == "ConsPatches_C_Cereal-C_Livestock", "Value"]),
ConsProp_C = mean(data_metrics[data_metrics$Metric == "ConsProp_C", "Value"]),
ConsProp_NC = mean(data_metrics[data_metrics$Metric == "ConsProp_NC", "Value"]),
ConsConnectivity= mean(data_metrics[data_metrics$Metric == "ConsConnectivity_NC_Cereal-NC_Livestock", "Value"]),
# correct under/oversupply data:
UnderSupply_Total = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Total", "Value"])-100),
UnderSupply_Meat = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Meat", "Value"])-100),
UnderSupply_Cereal = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Cereal", "Value"])-100),
UnderSupply_Timber = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentUnder_Timber", "Value"])-100),
OverSupply_Total = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Total", "Value"])-100),
OverSupply_Meat = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Meat", "Value"])-100),
OverSupply_Cereal = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Cereal", "Value"])-100),
OverSupply_Timber = abs(mean(data_metrics[data_metrics$Metric == "SupplyPercentOver_Timber", "Value"])-100),
RegUnderSupply_Cereal = abs(mean(data_metrics[data_metrics$Metric == "RegionalUnderSupplyPercent_Cereal", "Value"])-100),
RegUnderSupply_Meat = abs(mean(data_metrics[data_metrics$Metric == "RegionalUnderSupplyPercent_Meat", "Value"])-100),
RegUnderSupply_Timber = abs(mean(data_metrics[data_metrics$Metric == "RegionalUnderSupplyPercent_Timber", "Value"])-100),
DivSupplyPerRegSimpson = mean(data_metrics[data_metrics$Metric == "DivSupplyPerRegSimpson", "Value"]),
DivLuPerRegSimpson = mean(data_metrics[data_metrics$Metric == "DivLuPerRegSimpson", "Value"]),
DivSupplyAcrossRegSimpson = mean(data_metrics[data_metrics$Metric == "DivSupplyAcrossRegSimpson", "Value"]),
EffSupply = mean(data_metrics[data_metrics$Metric == "EffSupply", "Value"]),
NumActions = metric_agg_actions_number(simp),
NumActionsNC = metric_agg_actions_number(simp, pattern="NC"),
ID = simp$sim$id,
TriggerThreshold= runparams[,"ThresholdCerealGlobal"],
ActionLifetime = runparams[,"GlobalInstActionRT"],
SubsidyRate = runparams[,"FactorTimberGlobal"],
MonitorDelay = runparams[,"MonitorDelay"],
ConsiderPACglob = runparams[,"PACconsiderGlobal"],
ConsiderPACreg = runparams[,"PACconsiderRegional"],
Noise = runparams[,"GlobalInstNoise"],
Services = regmatches(runparams[,"BT_xml"],
regexpr("(?<=Institutions_BehaviouralTypes)(.*)(?=\\.xml)", runparams[,"BT_xml"], perl=TRUE)),
Precedence = regmatches(runparams[,"GlobalInstitutions_xml"],
regexpr("(?<=GlobalInstitutions)(.*)(?=\\.xml)", runparams[,"GlobalInstitutions_xml"], perl=TRUE))
)
data <- rbind(data, agg_metrics)
}
}
return(data)
})
colnames(data)[match("TriggerThreshold", colnames(data))] <- "TriggeringThreshold"
# Substitute letters by numbers:
data$Services <- substitServices[data$Services]
data$Precedence <- substitPresedence[data$Precedence]
dexp <- shdoe::shdoe_param_getDefaultDexp()
metriclabels <- read.csv(file="../../reports/KeyTranslations2.csv", header=FALSE, stringsAsFactors = FALSE)
metriclabels <- setNames(metriclabels[,2], metriclabels[,1])
############### Set of focus metrics
for (set in unique(metrics[,1])) {
#set = unique(metrics[,1])[1]
# normalize data (divison by max/(mean)):
metricColnamesFocus <- metrics[metrics[, 1] == set, 2]
d <- apply(data[, metricColnamesFocus], MARGIN=2, FUN = function(x) max(abs(x), na.rm = T))
normd <- t(apply(data[, metricColnamesFocus], MARGIN=1, FUN= function(x) x/d))
colnames(normd) <- metricColnamesFocus
fxlist <- list()
for (i in metricColnamesFocus) {
print(i)
fx <- shdoe::shdoe_analyse_effectSizes(normd[,i], dexp,
data[,paramcolnames], id = "Testdata", confidence= 0.9)
fx$response <- i
fxlist <- append(fxlist, list(fx))
}
fx <- do.call(rbind, fxlist)
# extract single effects from first response var (whose row names are not numbered)
substitutions <- shdoe::shdoe_get_paramname_substitutions(simp, varnames = unique(rownames(fx))[!grepl(":",unique(rownames(fx))) &
!grepl("[0-9]", unique(rownames(fx)))], preventPlotmathParsing=TRUE)
if (nointeractioneffects) {
indices <- !grepl(":", rownames(fx))
effectdata <- setNames(fx$effects[indices], paste(rownames(fx)[indices], ":Effect", sep=""))
substitutions <- c(substitutions, "Effect" = "Effect")
simp$fig$height <- 920
simp$fig$width <- 920
filename <- paste("crafty_netsens_analysis_doe_effects_720-735_selectedMetrics_nointeraction_R2", set, sep="_")
numcol <- 2
} else {
indices <- rep(TRUE, length(rownames(fx)))
effectdata <- setNames(fx$effects, rownames(fx))
filename <- paste("crafty_netsens_analysis_doe_effects_720-735_selectedMetrics_R2", set, sep="_")
numcol <- 3
}
shdoe::shdoe_visualise_interaction_effects(dexp = simp,
effects = effectdata,
errors = fx$errors[indices],
pvalues = fx$pvalues[indices],
response = fx$response[indices],
substitutions = substitutions,
filename = filename,
ggplotaddons = list(ggplot2::theme(legend.position="bottom"),
ggplot2::scale_colour_discrete(guide=ggplot2::guide_legend(ncol=numcol, title.position="top",
title.hjust=0),
name = "Metrics", labels = metriclabels)
)
)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/lgcpMethods.R
\name{gridfun.lgcpPredict}
\alias{gridfun.lgcpPredict}
\title{gridfun.lgcpPredict function}
\usage{
\method{gridfun}{lgcpPredict}(obj, ...)
}
\arguments{
\item{obj}{an object of class lgcpPredict}
\item{...}{additional arguments}
}
\value{
returns the output from the gridfunction option of the setoutput argument of lgcpPredict
}
\description{
Accessor function for \code{lgcpPredict objects}: returns the \code{gridfunction} argument
set in the \code{output.control} argument of the function \code{lgcpPredict}.
}
\seealso{
\link{setoutput}, \link{lgcpgrid}
}
| /man/gridfun.lgcpPredict.Rd | no_license | bentaylor1/lgcp | R | false | false | 664 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/lgcpMethods.R
\name{gridfun.lgcpPredict}
\alias{gridfun.lgcpPredict}
\title{gridfun.lgcpPredict function}
\usage{
\method{gridfun}{lgcpPredict}(obj, ...)
}
\arguments{
\item{obj}{an object of class lgcpPredict}
\item{...}{additional arguments}
}
\value{
returns the output from the gridfunction option of the setoutput argument of lgcpPredict
}
\description{
Accessor function for \code{lgcpPredict objects}: returns the \code{gridfunction} argument
set in the \code{output.control} argument of the function \code{lgcpPredict}.
}
\seealso{
\link{setoutput}, \link{lgcpgrid}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_dummmies.R
\name{create_dummies}
\alias{create_dummies}
\title{create dummies}
\usage{
create_dummies(
.data,
...,
append_col_name = TRUE,
max_levels = 10L,
remove_first_dummy = FALSE,
remove_most_frequent_dummy = FALSE,
ignore_na = FALSE,
split = NULL,
remove_selected_columns = TRUE
)
}
\arguments{
\item{.data}{data frame}
\item{...}{tidyselect columns. default selection is all character or factor variables}
\item{append_col_name}{logical, default TRUE. Appends original column name to dummy col name}
\item{max_levels}{uses \code{\link[forcats]{fct_lump_n}} to limit the number of categories. Only the top n levels are preserved, and the rest being lumped into "other". Default is set to 10 levels, to prevent accidental overload. Set value to \code{Inf} to use all levels}
\item{remove_first_dummy}{logical, default FALSE.}
\item{remove_most_frequent_dummy}{logical, default FALSE}
\item{ignore_na}{logical, default FALSE}
\item{split}{NULL}
\item{remove_selected_columns}{logical, default TRUE}
}
\value{
data frame
}
\description{
adapted from the \code{\link[fastDummies]{dummy_cols}} function Added the option to truncate the dummy column
names, and to specify dummy cols using tidyselect.
}
\details{
reference the \href{https://jacobkap.github.io/fastDummies/index.html}{fastDummies} package for documentation on the original function.
}
\examples{
iris \%>\%
create_dummies(Species, append_col_name = FALSE) \%>\%
tibble::as_tibble()
}
| /man/create_dummies.Rd | no_license | cran/framecleaner | R | false | true | 1,566 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_dummmies.R
\name{create_dummies}
\alias{create_dummies}
\title{create dummies}
\usage{
create_dummies(
.data,
...,
append_col_name = TRUE,
max_levels = 10L,
remove_first_dummy = FALSE,
remove_most_frequent_dummy = FALSE,
ignore_na = FALSE,
split = NULL,
remove_selected_columns = TRUE
)
}
\arguments{
\item{.data}{data frame}
\item{...}{tidyselect columns. default selection is all character or factor variables}
\item{append_col_name}{logical, default TRUE. Appends original column name to dummy col name}
\item{max_levels}{uses \code{\link[forcats]{fct_lump_n}} to limit the number of categories. Only the top n levels are preserved, and the rest being lumped into "other". Default is set to 10 levels, to prevent accidental overload. Set value to \code{Inf} to use all levels}
\item{remove_first_dummy}{logical, default FALSE.}
\item{remove_most_frequent_dummy}{logical, default FALSE}
\item{ignore_na}{logical, default FALSE}
\item{split}{NULL}
\item{remove_selected_columns}{logical, default TRUE}
}
\value{
data frame
}
\description{
adapted from the \code{\link[fastDummies]{dummy_cols}} function Added the option to truncate the dummy column
names, and to specify dummy cols using tidyselect.
}
\details{
reference the \href{https://jacobkap.github.io/fastDummies/index.html}{fastDummies} package for documentation on the original function.
}
\examples{
iris \%>\%
create_dummies(Species, append_col_name = FALSE) \%>\%
tibble::as_tibble()
}
|
var.rdf <- function(x,
var.rb,
limit)
{
dif <- 100 * abs(var.rb - cor(x))
big <- dif > limit
leq <- dif <= limit
dif[big] <- '*'
dif[leq] <- ''
diag(dif) <- '-'
var.rd <- dif
dimnames(var.rd) <- list(dimnames(x)[[2]],
dimnames(x)[[2]])
return(as.data.frame(var.rd))
}
| /R/var.rdf.R | no_license | cran/bpca | R | false | false | 376 | r | var.rdf <- function(x,
var.rb,
limit)
{
dif <- 100 * abs(var.rb - cor(x))
big <- dif > limit
leq <- dif <= limit
dif[big] <- '*'
dif[leq] <- ''
diag(dif) <- '-'
var.rd <- dif
dimnames(var.rd) <- list(dimnames(x)[[2]],
dimnames(x)[[2]])
return(as.data.frame(var.rd))
}
|
#'---
#' title: "PASS1A Rat Tissue: -- Modeling of RNASeq Data Expression"
#' author: "Alec Steep & Jiayu Zhang (Code copied and adapted from Jun Z. Li)"
#' date: "20200505"
#' output:
#' html_document:
#' code_folding: hide
#' toc: true
#' highlight: zenburn
#'
#'---
#+ setup, include=FALSE
knitr::opts_chunk$set(echo = TRUE)
knitr::opts_chunk$set(warning = FALSE)
knitr::opts_chunk$set(message = FALSE)
knitr::opts_chunk$set(cache = FALSE)
#' ## Goals of Analysis:
#' * Model Circadian Rhythms (5 major time points)
#' * SIN/COS Linear Model
#' * y = B_0 + B_1SIN(TOD) + B_2COS(TOD)
#' * Model Exercise Effects (7 time points)
#' * Comparison of linear models:
#' * Cubic: y = ax^3 + bx^2 + cx + d
#' * y = ax^4 + bx^3 + cx^2 + dx + e
#'
#' ## Setup the Environment
#+ Setup Environment, message=FALSE, results='hide', warning = FALSE
################################################################################
##### Resources and Dependencies ###############################################
################################################################################
# Set the working directory
WD <- '/Volumes/Frishman_4TB/motrpac/20200309_rna-seq_steep'
#setwd(WD)
# Load the dependencies
#source("https://bioconductor.org/biocLite.R")
#BiocManager::install("gapminder")
#install.packages("tidyverse")
# Load dependencies
pacs...man <- c("tidyverse","GenomicRanges", "DESeq2","devtools","rafalib","GO.db","vsn","hexbin","ggplot2", "GenomicFeatures","Biostrings","BSgenome","AnnotationHub","plyr","dplyr", "org.Rn.eg.db","pheatmap","sva","formula.tools","pathview","biomaRt", "PROPER","SeqGSEA",'purrr','BioInstaller','RColorBrewer','lubridate', "hms","ggpubr", "ggrepel","genefilter","qvalue","ggfortify","som", "vsn","org.Mm.eg.db","VennDiagram","EBImage","reshape2","xtable","kohonen","som","caret","enrichR","gplots","tiff","splines","gam")
lapply(pacs...man, FUN = function(X) {
do.call("library", list(X)) })
################################################################################
######################### Functions ############################################
################################################################################
# Set select
select <- dplyr::select
counts <- DESeq2::counts
map <- purrr::map
# Source the functions
source(paste0(WD,'/functions/not_in.R'))
source(paste0(WD,'/functions/rat_mouse_ortho.R'))
source(paste0(WD,'/functions/mouse2rat_ortho.R'))
source(paste0(WD,'/functions/lmp.R'))
source(paste0(WD,'/functions/sin.R'))
source(paste0(WD,'/functions/cos.R'))
#' ## Declare Variables
#+ Declare Variables
################################################################################
##### Declare Variables ################################################
################################################################################
# Declare Tissue
# TISSUE: Hypothalamus, Liver, Kidney, Aorta, Adrenal, Brown Adipose, Cortex, Gastrocnemius, Heart, Hippocampus,Lung,Ovaries,PaxGene,Spleen,Testes, White Adipose
# SCN: Suprachiasmatic nucleus -- Hypothalamus
# LIV: Liver
# KID: Kidney
# AOR: Aorta
# SKM: Gastrocnemius
# HAT: Heart
# ADG: Adrenal gland
# BAT: Brown adipose tissue
# WAT: White adipose tissue
# COR: Cortex
# HIP: Hippocampus
# LUNG: Lung
# OVR: Ovaries
# SPL: Spleen
# TES: Testes
# Load the decision table
table_file <- paste0(WD,'/data/20200603_rnaseq-tissue-data-assambly-table_steep.txt')
df_tbl <- read.table(file = table_file,sep = '\t', header = T, check.names = F)
models_df <- data.frame()
#TISSUE <- "Hypothalamus"
for(TISSUE in c('Lung','Hypothalamus','Aorta','Liver', 'Adrenal', 'Brown Adipose', 'Cortex','Gastrocnemius', 'Heart', 'Hippocampus','Ovaries','Spleen','Testes', 'White Adipose','Kidney')){
print(TISSUE)
#TISSUE <- 'Lung'
# # Collect the formula
# design <- df_tbl %>%
# filter(Tissue == TISSUE) %>%
# select(Formula) %>% unique() %>%
# unlist() %>% as.character() %>% as.formula()
# Collect the Outliers
OUTLIERS <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Outliers) %>% unique() %>%
unlist() %>% as.character()
# Collect Adjusted_Variance
ADJ_VAR <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Adjusted_Variance) %>% unique() %>%
unlist() %>% as.character()
# Collect the TIS Symbol
TIS <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Tis) %>% unique() %>%
unlist() %>% as.character()
# Collect the Formula
FORMULA <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Formula) %>% unique() %>%
unlist() %>% as.character() %>% as.formula()
# Collect the adjusted variances
ADJ_VAR <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Adjusted_Variance) %>% unique() %>%
unlist() %>% as.character()
}
#' ## Load & Clean Data
#' ##### Data files to load:
#' * Count Matrix and Metadata Table from:
#' * RNA-Seq from Mt. Sinai
#' * 3 sequencing batches & metadata
#' * RNA-Seq from Stanford
#' * 2 sequencing batches & metadata
#+ Load the Data
################################################################################
##### Load & Clean Data ###############################################
################################################################################
# Files last saved in: 20200309_exploration-rna-seq-phase1_steep.R
if(F) {
# Count matrix
in_file <- paste0(WD,'/data/20200309_rnaseq-countmatrix-pass1a-stanford-sinai_steep.csv')
count_data <- read.table(in_file,sep = ',', header = TRUE,row.names = 1,check.names = FALSE)
# Meatdata table
in_file <- paste0(WD,'/data/20200309_rnaseq-meta-pass1a-stanford-sinai_steep.txt')
col_data <- read.table(in_file, header = TRUE, check.names = FALSE, sep = '\t')
row.names(col_data) <- col_data$sample_key
# Adjust column objects
########################
# To factors
factor_cols <- c('labelid',
'vial_label',
'animal.registration.sex',
'animal.key.exlt4',
'X2D_barcode',
'BID',
'Seq_flowcell_lane',
'Seq_flowcell_run',
'Seq_end_type',
'Lib_UMI_cycle_num',
'pid',
'acute.test.staffid',
'acute.test.siteid',
'acute.test.versionnbr',
'acute.test.contactshock',
'animal.familiarization.staffid',
'animal.familiarization.siteid',
'animal.familiarization.versionnbr',
'animal.familiarization.compliant',
'animal.key.protocol',
'animal.key.agegroup',
'animal.key.batch',
'animal.key.intervention',
'animal.key.sitename',
'animal.registration.staffid',
'animal.registration.siteid',
'animal.registration.versionnbr',
'animal.registration.ratid',
'animal.registration.batchnumber',
'specimen.collection.bloodcomplete',
'specimen.collection.bloodtechid',
'specimen.collection.uterustype',
'specimen.collection.uterustechid',
'specimen.collection.deathtype',
'specimen.processing.versionnbr',
'specimen.processing.siteid',
'bid',
'specimen.processing.samplenumber',
'specimen.processing.techid',
'barcode',
'shiptositeid',
'receivedcas',
'receivestatuscas')
for(fc in factor_cols){
col_data[[fc]] <- as.factor(col_data[[fc]])
}
# To Dates: 03JUL2018
date_cols <- c('acute.test.d_visit',
'acute.test.d_start',
'animal.familiarization.d_visit',
'animal.familiarization.d_treadmillbegin',
'animal.familiarization.d_treadmillcomplete',
'animal.registration.d_visit',
'animal.registration.d_arrive',
'animal.registration.d_reverselight',
'specimen.collection.d_visit',
'animal.registration.d_birth',
'Seq_date')
for(dc in date_cols){
col_data[[dc]] <- ymd(col_data[[dc]])
}
# From Dates: 2/14/2019
date_cols <- c('RNA_extr_date',
'Lib_prep_date')
for(dc in date_cols){
col_data[[dc]] <- mdy(col_data[[dc]])
}
# To Times: 10:30:00
time_cols <- c('acute.test.t_complete',
'specimen.collection.t_anesthesia',
'specimen.collection.t_bloodstart',
'specimen.collection.t_bloodstop',
'specimen.collection.t_edtafill',
'specimen.collection.uteruscomplete',
'specimen.collection.t_uterusstart',
'specimen.collection.t_uterusstop',
'specimen.collection.t_death',
'specimen.processing.t_collection',
'specimen.processing.t_edtaspin',
'specimen.processing.t_freeze',
'acute.test.howlongshock',
'acute.test.t_start')
for(tc in time_cols){
col_data[[tc]] <- col_data[[tc]] %>% as.character() %>% parse_time() %>% as.numeric()
}
# Releveling factors
col_data$animal.key.anirandgroup <- as.character(col_data$animal.key.anirandgroup)
col_data$animal.key.anirandgroup <- factor(col_data$animal.key.anirandgroup,
levels = ec_levels)
# Create a variable for time post exercise
col_data <- col_data %>%
mutate(specimen.collection.t_exercise_hour = case_when(
animal.key.anirandgroup == 'Control - IPE' ~ -1,
animal.key.anirandgroup == 'Control - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - IPE' ~ 0,
animal.key.anirandgroup == 'Exercise - 0.5 hr' ~ 0.5,
animal.key.anirandgroup == 'Exercise - 1 hr' ~ 1,
animal.key.anirandgroup == 'Exercise - 4 hr' ~ 4,
animal.key.anirandgroup == 'Exercise - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - 24 hr' ~ 24,
animal.key.anirandgroup == 'Exercise - 48 hr' ~ 48))
# Take the absolute value of the square root of seconds post exercise (consider negative numbers)
# Make sure to Subtract 1 hour (3600s) from 'Control - IPE' groups to account for exercise effect
col_data <- col_data %>%
mutate(calculated.variables.deathtime_after_acute =
ifelse(animal.key.anirandgroup == 'Control - IPE',
calculated.variables.deathtime_after_acute - 3600,
calculated.variables.deathtime_after_acute))
col_data <- col_data %>%
mutate(specimen.collection.t_exercise_hour_sqrt = ifelse(
calculated.variables.deathtime_after_acute < 0,
(sqrt(abs(calculated.variables.deathtime_after_acute))/60/60)*(-1),
(sqrt(abs(calculated.variables.deathtime_after_acute))/60/60)))
row.names(col_data) <- col_data$sample_key
# Examine histograms
col_data %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=calculated.variables.deathtime_after_acute)) +
geom_histogram(bins = 68)
col_data %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour_sqrt)) +
geom_histogram(bins = 68)
# Generate a time-last-fed variable
col_data <- col_data %>%
mutate(animal_time_last_fed = case_when(
animal.key.anirandgroup %!in% c('Control - 7 hr', 'Exercise - 7 hr') ~ parse_time('8:00'),
(animal.key.anirandgroup %in% c('Control - 7 hr') &
animal.registration.sex == 'Male') ~ parse_time('11:50'),
(animal.key.anirandgroup %in% c('Control - 7 hr') &
animal.registration.sex == 'Female') ~ parse_time('12:30'),
(animal.key.anirandgroup %in% c('Exercise - 7 hr') &
animal.registration.sex == 'Male') ~ parse_time('12:30'),
(animal.key.anirandgroup %in% c('Exercise - 7 hr') &
animal.registration.sex == 'Female') ~ parse_time('12:50')) %>% as.numeric())
# Generate a time-fasted variable
col_data$calculated.variables.deathtime_after_fed <- (col_data$specimen.collection.t_death - col_data$animal_time_last_fed) %>% as.numeric()
# Save data as an R objects
# ################################################################################
# To determine object size
sl <- object.size(count_data)
print(sl, units = 'auto')
# Meta Data
meta_file <- paste0(WD,'/data/20200603_rnaseq-meta-pass1a-stanford-sinai-proc_steep.rds')
saveRDS(col_data, file = meta_file)
# Count Data
count_file <- paste0(WD, '/data/20200603_rnaseq-counts-pass1a-stanford-sinai-processed_steep.rds')
saveRDS(count_data, file = count_file)
}
meta_file <- paste0(WD,'/data/20200603_rnaseq-meta-pass1a-stanford-sinai-proc_steep.rds')
#' #### Polished metadata saved as:
#' `r meta_file`
#'
count_file <- paste0(WD, '/data/20200603_rnaseq-counts-pass1a-stanford-sinai-processed_steep.rds')
#' #### Polished read counts saved as:
#' `r count_file`
# Set a vector for Exercise/Control Levels and Colors
ec_levels <- c('Exercise - IPE',
'Exercise - 0.5 hr',
'Exercise - 1 hr',
'Exercise - 4 hr',
'Exercise - 7 hr',
'Exercise - 24 hr',
'Exercise - 48 hr',
'Control - IPE',
'Control - 7 hr')
ec_colors <- c('gold',
'darkgoldenrod1',
'orange',
'darkorange',
'darkorange2',
'darkorange3',
'darkorange4',
'steelblue1',
'steelblue4')
# Load Metadata and count data as R objects
################################################################################
# Restore the metadata object
meta_file <- paste0(WD,'/data/20200603_rnaseq-meta-pass1a-stanford-sinai-proc_steep.rds')
col_data <- readRDS(file = meta_file)
# Restore the count object
count_file <- paste0(WD, '/data/20200603_rnaseq-counts-pass1a-stanford-sinai-processed_steep.rds')
count_data <- readRDS(file = count_file)
#' #### Retrieve Circadian Genes Associated with Tissue
#' Data from Supplementary Table 2 from:
#' Yan, J., Wang, H., Liu, Y. & Shao, C. Analysis of gene regulatory networks in the mammalian circadian rhythm. PLoS Comput. Biol. 4, (2008).
#' Downloaded 20200326 by Alec Steep
#'
#' ## Place Genes in Genomic Ranges
#'
#' #### Reference Genome and Annotation:
#' Rnor_6.0 (GCA_000001895.4) assembly from Ensembl database (Release 96)
#' Found at: http://uswest.ensembl.org/Rattus_norvegicus/Info/Index.
#'
#' FASTA: Rattus_norvegicus.Rnor_6.0.dna.toplevel.fa.gz
#' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/Rattus_norvegicus.Rnor_6.0.dna.toplevel.fa.gz
#'
#' GTF: Rattus_norvegicus.Rnor_6.0.96.gtf.gz
#' ftp://ftp.ensembl.org/pub/release-96/gtf/rattus_norvegicus/Rattus_norvegicus.Rnor_6.0.96.gtf.gz
#'
#' ## Annotate Genes by Chromosome
#+ Annotate Genes by Chromosome
################################################################################
##### Annotate Genes by Chromosome ###################################
################################################################################
### Determine which control samples are male and female
# Get the list of genes on the W chromosome
# Construct your own personal galgal5 reference genome annotation
# Construct from gtf file from Ensembl (same file used in mapping)
#ens_gtf <- paste0(WD,'/data/Rattus_norvegicus.Rnor_6.0.96.gtf')
#Rn_TxDb <- makeTxDbFromGFF(ens_gtf,
# format=c('gtf'),
# dataSource='Ensembl_Rattus6_gtf',
# organism='Rattus norvegicus',
# taxonomyId=NA,
# circ_seqs=DEFAULT_CIRC_SEQS,
# chrominfo=NULL,
# miRBaseBuild=NA,
# metadata=NULL)
# Save the Rat Genomic Ranges Object
#gf_file <- paste0(WD,'/data/20200603_Rnor-6.0.96-GRanges_steep.sqlite')
#saveDb(Rn_TxDb, file=gf_file)
# To load the annotation
gf_file <- paste0(WD,'/data/20200603_Rnor-6.0.96-GRanges_steep.sqlite')
Rn_TxDb <- loadDb(gf_file)
# Define Female specific sex genes (X chromosome)
# To examine chromosome names
#seqlevels(Rn_TxDb)[1:23]
# Extract genes as GRanges object, then names
X_genes_gr <- genes(Rn_TxDb, columns = 'TXCHROM', filter = list(tx_chrom=c('X')))
# Collect ensembl gene ids for female specific genes
X_ens_id <- names(X_genes_gr)
# Examine the gene symbols
X_sym <- mapIds(org.Rn.eg.db, names(X_genes_gr), 'SYMBOL', 'ENSEMBL')
# Extract genes as GRanges object, then names
Y_genes_gr <- genes(Rn_TxDb, columns = 'TXCHROM', filter = list(tx_chrom=c('Y')))
# Collect ensembl gene ids for female specific genes
Y_ens_id <- names(Y_genes_gr)
sex_ens_id <- c(X_ens_id,Y_ens_id)
# Examine the gene symbols
Y_sym <- mapIds(org.Rn.eg.db, names(Y_genes_gr), 'SYMBOL', 'ENSEMBL')
#' ## Collect Samples of Interest and Normalize
#+ Collect Samples of Interest and Normalize
################################################################################
##### Collect Samples of Interest and Normalize #######################
################################################################################
# Filter Samples (meta)
if(TISSUE == c('Gastrocnemius_MSSM_1')){
tod_cols <- col_data %>%
filter(Tissue == 'Gastrocnemius') %>%
filter(Seq_batch == 'MSSM_1') %>%
filter(!is.na(animal.registration.sex))
}else if(TISSUE == c('Gastrocnemius_Stanford_1')){
tod_cols <- col_data %>%
filter(Tissue == 'Gastrocnemius') %>%
filter(Seq_batch == 'Stanford_1') %>%
filter(!is.na(animal.registration.sex))
}else{
# Filter Samples (meta)
tod_cols <- col_data %>%
filter(Tissue == TISSUE) %>%
filter(!is.na(animal.registration.sex))
}
rownames(tod_cols) <- tod_cols$sample_key
# Time post exercise
tod_cols <- tod_cols %>%
mutate(specimen.collection.t_exercise_hour = case_when(
animal.key.anirandgroup == 'Control - IPE' ~ -1,
animal.key.anirandgroup == 'Control - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - IPE' ~ 0,
animal.key.anirandgroup == 'Exercise - 0.5 hr' ~ 0.5,
animal.key.anirandgroup == 'Exercise - 1 hr' ~ 1,
animal.key.anirandgroup == 'Exercise - 4 hr' ~ 4,
animal.key.anirandgroup == 'Exercise - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - 24 hr' ~ 24,
animal.key.anirandgroup == 'Exercise - 48 hr' ~ 48))
# Convert to seconds and take the square root (consider negative numbers)
tod_cols$specimen.collection.t_exercise_seconds <-
tod_cols$specimen.collection.t_exercise_hour * 60 * 60
tod_cols <- tod_cols %>%
mutate(specimen.collection.t_exercise_hour_sqrt = ifelse(
specimen.collection.t_exercise_seconds < 0,
(sqrt(abs(specimen.collection.t_exercise_seconds))/60/60)*(-1),
(sqrt(abs(specimen.collection.t_exercise_seconds))/60/60)))
tod_cols$specimen.collection.t_exercise_hour_sqrt_jit <-
jitter(tod_cols$specimen.collection.t_exercise_hour_sqrt,
factor = 0.1)
row.names(tod_cols) <- tod_cols$sample_key
# Examine histograms
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour)) +
geom_histogram(bins = 68)
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_seconds)) +
geom_histogram(bins = 68)
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour_sqrt)) +
geom_histogram(bins = 68)
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour_sqrt_jit)) +
geom_histogram(bins = 68)
# Collect samples without NA values in TOD
nona_sams <- tod_cols %>%
filter(!is.na(specimen.collection.t_death_hour)) %>%
filter(sample_key != OUTLIERS) %>%
filter(!is.na(animal.registration.sex)) %>%
select(sample_key) %>% unlist() %>% as.character()
# Collect tissue specific counts
tod_counts <- count_data[,nona_sams]
#' #### Sanity Check: Ensure that the metadata rownames are identical to count matrix column names
all(rownames(tod_cols) == colnames(tod_counts))
# Create a design formula and load counts and supporting annotation into an S4 object (DESeq infrastructure)
#design = ~1 # Primary variable needs to be last.
design <- FORMULA
(title = paste0('Design: ',as.character(design)) )
dds1 <- DESeqDataSetFromMatrix(countData = tod_counts,
colData = tod_cols,
design = design)
# Reasoning from:
#citation("PROPER")
#dds
#' #### We remove genes with an average sequencing depth of 10 or less
#' Before Filtering
# dds1
zero_n <- dds1[(rowSums(counts(dds1))/ncol(dds1) < 1), ] %>%
nrow() %>% as.character()
reads_n <- 1
keep <- rowSums(counts(dds1))/ncol(dds1) >= reads_n
dds2 <- dds1[keep,]
#' #### Summary of counts and annotation data in a DESeqDataSet after filtering out genes with low sequencing depth
#' TODO: Critic from Jun: Here we are removing features that have a low average expression. This may be removing important features that might have zero counts in some samples and higher counts in specific groups. Consider developing an algorithm that will account for features with expression in n or more samples.
dds2
filter_n <- nrow(dds1) - nrow(dds2) - as.numeric(zero_n)
filter_p <- filter_n/(nrow(dds1) - as.numeric(zero_n))
total_n <- nrow(dds1) - nrow(dds2)
#' ##### Note: Number of genes with average counts between zero and 1 is `r zero_n` but removing reads with less than or equal to `r reads_n` removes an additional `r filter_n` features or removes `r filter_p*100`% of the non-zero reads (total of `r total_n` features removed).
dds <- dds2
#' To see the reads per million for each sample
sort(colSums(assay(dds)))/1e6
# estimateSizeFactors gives us a robust estimate in sequencing depth
dds <- estimateSizeFactors(dds)
#' Size facotrs are generally around 1 (scaled) and calculated using the median and are robust to genes with large read counts
summary(sizeFactors(dds))
rld <- DESeq2::vst(dds, blind = F)
#' Regularized Log (rlog) Transform
for(n in 1){
start_time <- Sys.time()
#rld <- DESeq2::rlog(dds, blind = F)
end_time <- Sys.time()
print(end_time - start_time)
}
# This command is redundent, but included for safety
rs <- rowSums(counts(dds))
#' #### We see just how well duplicate samples correlate regardless of sequencing batch
mypar()
pcaData <- DESeq2::plotPCA(rld,
intgroup=c("animal.key.anirandgroup",
"animal.registration.sex",
"sample_key"),
returnData=TRUE, ntop = 500)
percentVar <- round(100 * attr(pcaData, "percentVar"))
#pdf(paste0(WD,"/plots/20200505_rnaseq-",TIS,"-PCA-naive-modeling_steep.pdf"),
# width = 6, height = 4)
ggplot(pcaData, aes(PC1, PC2, color=animal.key.anirandgroup,shape=animal.registration.sex)) +
geom_point(size=3) +
#geom_label_repel(aes(label=sample_key),hjust=0, vjust=0) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
#coord_fixed() +
ggtitle(paste0("PCA of ",TISSUE," Gene Expression:\nNaive Model (~ 1)")) +
guides(color=guide_legend(title="animal.key.anirandgroup")) +
scale_color_manual(values=ec_colors) +
theme(legend.title=element_blank())
#dev.off()
#' ### Adjust Variance
#+ Adjust Variance
################################################################################
########### Adjust Variance #######################################
################################################################################
if(ADJ_VAR != 'None'){
for(adj_var in ADJ_VAR){
# Duplicate the rld object
rld_final <- rld
# The batch effect can only be removed with limma
# https://support.bioconductor.org/p/76099/ (See Michael Love's Comment)
assay(rld_final) <- limma::removeBatchEffect(assay(rld), rld[[adj_var]])
# Examine the primary variable of interest to see if we've solved our issue
# Before:
p <- DESeq2::plotPCA(rld, intgroup =adj_var) +
guides(color=guide_legend(title=adj_var))
plot(p)
# After
p <- DESeq2::plotPCA(rld_final, intgroup = adj_var, ntop = 500) +
guides(color=guide_legend(title=adj_var))
plot(p)
rld <- rld_final
}
}
#### We see just how well duplicate samples correlate regardless of sequencing batch
mypar()
pcaData <- DESeq2::plotPCA(rld,
intgroup=c("animal.key.anirandgroup",
"animal.registration.sex",
"sample_key"),
returnData=TRUE, ntop = 20000)
percentVar <- round(100 * attr(pcaData, "percentVar"))
#pdf(paste0(WD,"/plots/20200426_rnaseq-",TIS,"-PCA-sexmod-modeling_steep.pdf"),
# width = 6, height = 4)
ggplot(pcaData, aes(PC1, PC2, color=animal.key.anirandgroup,shape=animal.registration.sex)) +
geom_point(size=3) +
#geom_label_repel(aes(label=sample_key),hjust=0, vjust=0) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
#coord_fixed() +
ggtitle(paste0("PCA of ",TISSUE)) +
guides(color=guide_legend(title="animal.key.anirandgroup")) +
scale_color_manual(values=ec_colors) +
theme(legend.title=element_blank())
#dev.off()
#' #### Annotate Data for Modeling By Cluster
#+ Annotate Data for Modeling By Cluster
################################################################################
################ Annotate Data for Modeling By Cluster #########################
################################################################################
time_cols <- tod_cols %>%
filter(sample_key %in% nona_sams)
time_cols %>%
ggplot(aes(x = specimen.collection.t_death_hour, fill = animal.key.anirandgroup)) +
geom_bar(breaks = seq(0, 24), width = 2, colour = "grey") +
coord_polar(start = 0) +
theme_minimal() +
ggtitle("Death of Experimental Groups by Time of Day") +
scale_x_continuous("", limits = c(0, 24),
breaks = seq(0, 24),
labels = seq(0, 24)) +
theme(legend.title = element_blank()) +
scale_fill_manual(values=ec_colors[1:9]) +
theme(axis.title.y =element_blank(),
axis.text.y =element_blank(),
axis.ticks.y=element_blank())
# Select the normailzed counts
tod_counts <- assay(rld)
t_counts <- setNames(melt(tod_counts),
c('ENSEMBL_RAT', 'sample_key', 'count'))
# Join the dataframes and nest
by_gene_df <- tod_cols %>%
left_join(t_counts, by = "sample_key") %>%
filter(animal.key.anirandgroup %!in% c('Control - 7 hr')) %>%
group_by(ENSEMBL_RAT) %>%
arrange(sample_key) %>%
nest()
# # Add Cluster and Circ Status
# by_gene_df <- by_gene_df %>%
# mutate(CIRC = ifelse(ENSEMBL_RAT %in% circ_df$ENSEMBL_RAT, 'CIRC', 'NON-CIRC'))
# Add the gene symbol
by_gene_df$SYMBOL_RAT = mapIds(org.Rn.eg.db, as.character(by_gene_df$ENSEMBL_RAT), "SYMBOL", "ENSEMBL")
# Join the dataframes and nest
by_gene_df7 <- tod_cols %>%
left_join(t_counts, by = "sample_key") %>%
filter(animal.key.anirandgroup %in% c('Control - 7 hr')) %>%
group_by(ENSEMBL_RAT) %>%
arrange(sample_key) %>%
nest()
# Add Cluster and Circ Status
# by_gene_df7 <- by_gene_df7 %>%
# left_join(mosaic_df, by = "ENSEMBL_RAT") %>%
# filter(!is.na(CIRC)) # This filter removes genes that did not pass variance filter (MANOVA)
# Add the gene symbol
by_gene_df7$SYMBOL_RAT = mapIds(org.Rn.eg.db, as.character(by_gene_df7$ENSEMBL_RAT), "SYMBOL", "ENSEMBL")
# Cleanup
#rm(count_data, col_data, counts_centered, dds ,dds1,dds2,F_centered,F_counts,
# M_centered,M_counts,pcaData,t_counts)
# TODO: The entire dataframe cannot be unnested. It may be a memory issue or it may be a corrupted row -- come back and troubleshoot. For now, randomly select 2K rows to continue analysis.
#by_gene_df2 <- by_gene_df
#by_gene_df72 <- by_gene_df7
#by_gene_df <- by_gene_df2
#by_gene_df7 <- by_gene_df72
# Load in the DE genes between C0 and C7 for appropriate tissues
in_file <- paste0(WD, '/data/20200426_rnaseq-',TIS,'-DEgenes-sexmod-C0vsC7_steep.txt')
de_df <- read.table(in_file, sep = '\t', header = T)
# Take all the differentially expressed genes
de_all <- de_df %>%
arrange(padj) %>%
filter(padj <= 0.05) %>%
select(ENSEMBL_RAT) %>%
unlist() %>% as.character()
# Take the top 20 differentially expressed genes
de_top <- de_df %>%
arrange(padj) %>%
select(ENSEMBL_RAT) %>%
dplyr::slice(1:50) %>%
unlist() %>% as.character()
de_top20 <- de_df %>%
arrange(padj) %>%
select(ENSEMBL_RAT) %>%
dplyr::slice(1:20) %>%
unlist() %>% as.character()
#randomRows = sample(1:nrow(by_gene_df[,1]), 500, replace=F)
#by_gene_df <- by_gene_df[randomRows, ]
#by_gene_df7 <- by_gene_df7[randomRows, ]
by_gene_df <- by_gene_df %>% filter(ENSEMBL_RAT %in% de_all)
by_gene_df7 <- by_gene_df7 %>% filter(ENSEMBL_RAT %in% de_all)
# Must be true
all(by_gene_df$ENSEMBL_RAT == by_gene_df7$ENSEMBL_RAT)
# Generate model functions for the dataframes
################################################################################
gam_mod <- function(df) {
lm(count ~ ns(specimen.collection.t_exercise_hour_sqrt_jit, df = 4), data = df)
}
################################################################################
# Generate a model function for the dataframes
################################################################################
sin_mod <- function(df) {
lm(count ~ SIN(specimen.collection.t_death_hour) +
COS(specimen.collection.t_death_hour),
data = df)
}
################################################################################
# Generalized Additive Model (Exercise)
################################################################################
# Run models and save as a column
by_gene_df <- by_gene_df %>%
mutate(gam_model = map(data, gam_mod))
# Examine the ANOVA report on models
by_gene_df <- by_gene_df %>%
mutate(gam_ANOVA = map(gam_model, anova))
# Add the residuals
by_gene_df <- by_gene_df %>%
mutate(gam_resid = map2(data, gam_model, modelr::add_residuals))
# Examine the model metrics
by_gene_df <- by_gene_df %>%
mutate(gam_metrics = map(gam_model, broom::glance))
# Examine some model summaries
by_gene_df <- by_gene_df %>%
mutate(gam_summary = map(gam_model, summary))
# Save the model metrics
gam_metrics <- by_gene_df %>%
mutate(gam_metrics = map(gam_model, broom::glance)) %>%
unnest(gam_metrics)
# SIN/COS Model
################################################################################
# Run models and save as a column
by_gene_df <- by_gene_df %>%
mutate(sin_model = map(data, sin_mod))
# Examine the ANOVA report on models
by_gene_df <- by_gene_df %>%
mutate(sin_ANOVA = map(sin_model, anova))
# Add the residuals
by_gene_df <- by_gene_df %>%
mutate(sin_resid = map2(data, sin_model, modelr::add_residuals))
# Examine the model metrics
sin_metrics <- by_gene_df %>%
mutate(sin_metrics = map(sin_model, broom::glance)) %>%
unnest(sin_metrics)
# Examine the model metrics
by_gene_df <- by_gene_df %>%
mutate(sin_metrics = map(sin_model, broom::glance))
# Examine some model summaries
by_gene_df <- by_gene_df %>%
mutate(sin_summary = map(sin_model, summary))
# Arrange the dataframe by model R2
row_order <- by_gene_df %>%
unnest(sin_metrics) %>%
arrange(desc(adj.r.squared)) %>%
select(ENSEMBL_RAT) %>%
unlist() %>% as.character()
by_gene_df <- by_gene_df %>%
ungroup() %>%
arrange(factor(ENSEMBL_RAT, levels = row_order))
# Add the predictions (GAM model)
genes <- by_gene_df$ENSEMBL_RAT %>% as.character() %>% unique()
# Must be true
all(genes == by_gene_df$ENSEMBL_RAT)
gam_pred <- list()
i <- 1
for( g in genes){
# Subset the dataframe by gene
sub_data <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>%
ungroup() %>%
select(data))[[1]] %>% as.data.frame()
# Generate a grid
grid <- data.frame(specimen.collection.t_exercise_hour_sqrt_jit =
modelr::seq_range(
sub_data$specimen.collection.t_exercise_hour_sqrt_jit, n = length(sub_data$specimen.collection.t_exercise_hour_sqrt_jit)))
#grid$ENSEMBL_RAT <- g
mod <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>% ungroup() %>%
select(gam_model))[[1]][[1]]
summary(mod)
grid <- modelr::add_predictions(grid, mod, "pred") %>% as_tibble()
names(grid)[1] <- "grid_t_exercise_hour_sqrt_jit"
grid$specimen.collection.t_exercise_hour_sqrt_jit <-
sub_data$specimen.collection.t_exercise_hour_sqrt_jit
grid$count <- sub_data$count
gam_pred[[i]] <- grid
i <- i + 1
}
by_gene_df$gam_pred <- gam_pred
# Add the predictions (SIN Model)
genes <- by_gene_df$ENSEMBL_RAT %>% as.character() %>% unique()
# Must be true
all(genes == by_gene_df$ENSEMBL_RAT)
sin_pred <- list()
i <- 1
for( g in genes){
# Subset the dataframe by gene
sub_data <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>%
ungroup() %>%
select(data))[[1]] %>% as.data.frame()
# Generate a grid
grid <- data.frame(specimen.collection.t_death_hour =
modelr::seq_range(
sub_data$specimen.collection.t_death_hour,
n = length(sub_data$specimen.collection.t_exercise_hour_sqrt_jit)))
#grid$ENSEMBL_RAT <- g
mod <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>% ungroup() %>%
select(sin_model))[[1]][[1]]
summary(mod)
grid <- modelr::add_predictions(grid, mod, "pred") %>% as_tibble()
names(grid)[1] <- "grid_t_death_hour"
grid$grid_t_death_hour <- round(grid$grid_t_death_hour, digits = 1)
grid$specimen.collection.t_death_hour <-
sub_data$specimen.collection.t_death_hour
grid$count <- sub_data$count
sin_pred[[i]] <- grid
i <- i + 1
}
by_gene_df$sin_pred <- sin_pred
gene_n <- 1
# Visualize the GAM model by gene
gam_pred_df <- by_gene_df %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data, CIRC, gam_model, gam_pred) %>%
ungroup() %>%
filter(row_number() == gene_n) %>%
unnest(gam_pred)
gam_gene <- unique(gam_pred_df$ENSEMBL_RAT) %>% as.character()
# Visualize the SIN model by gene
sin_pred_df <- by_gene_df %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data, CIRC, sin_model, sin_pred) %>%
ungroup() %>%
filter(row_number() == gene_n) %>%
unnest(sin_pred)
sin_gene <- unique(sin_pred_df$ENSEMBL_RAT) %>% as.character()
# Must be true
gam_gene == sin_gene
# Collect the Control 7 hr data points
gam_hr7_df <- by_gene_df7 %>%
filter(ENSEMBL_RAT == gam_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_exercise_hour_sqrt_jit, count)
# Collect the Control 1 hr data points
gam_hr1_df <- by_gene_df %>%
filter(ENSEMBL_RAT == gam_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
filter(animal.key.anirandgroup == "Control - IPE") %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_exercise_hour_sqrt_jit, count)
# Collect the hours of death that occur for each exercise group
sac_hrs <- sort(unique(round(tod_cols$specimen.collection.t_death_hour, digits = 1)))
#names(tod_cols)
#e_df <- tod_cols %>%
# select(specimen.collection.t_death_hour, specimen.collection.t_exercise_hour_sqrt) %>%
# arrange(specimen.collection.t_death_hour)
#e_df$specimen.collection.t_death_hour <- round(e_df$specimen.collection.t_death_hour, digits = 1)
# Perform a second prediction (for hour post exercise -- predictions from SIN Model)
sin_pred_hour <- sin_pred_df %>%
filter(round(grid_t_death_hour, digits = 1) %in% sac_hrs) %>%
mutate(grid_t_exercise_hour = case_when(grid_t_death_hour == 9.9 ~ -0.01666667,
grid_t_death_hour == 10.0 ~ -0.01666667,
grid_t_death_hour == 10.2 ~ -0.01666667,
grid_t_death_hour == 10.3 ~ -0.01666667,
grid_t_death_hour == 10.6 ~ 0.08164966,
grid_t_death_hour == 10.9 ~ 0.08164966,
grid_t_death_hour == 11.2 ~ 0.11547005,
grid_t_death_hour == 11.6 ~ 0.11547005,
grid_t_death_hour == 11.9 ~ 0.01178511,
grid_t_death_hour == 12.2 ~ 0.01178511,
grid_t_death_hour == 12.3 ~ 0.01178511,
grid_t_death_hour == 13.2 ~ 0.00000000,
grid_t_death_hour == 13.3 ~ 0.00000000,
grid_t_death_hour == 13.6 ~ 0.00000000,
grid_t_death_hour == 13.9 ~ 0.01666667,
grid_t_death_hour == 14.2 ~ 0.01666667,
grid_t_death_hour == 14.3 ~ 0.01666667,
grid_t_death_hour == 14.6 ~ 0.03333333,
grid_t_death_hour == 14.9 ~ 0.03333333,
grid_t_death_hour == 16.9 ~ 0.04409586,
grid_t_death_hour == 17.2 ~ 0.04409586,
grid_t_death_hour == 17.3 ~ 0.04409586,
grid_t_death_hour == 17.6 ~ 0.04409586,
grid_t_death_hour == 17.9 ~ 0.04409586))
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
# Raw counts
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Raw counts with Line
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Raw counts with Line with control 7
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
geom_point(data = gam_hr7_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "red") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Raw counts with Line with control 7 and control 0
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point(color = "red", alpha = 1) +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
geom_point(data = gam_hr7_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "blue") +
geom_point(data = gam_hr1_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "blue") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
geom_point(data = gam_hr7_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "red") +
geom_point(data = sin_pred_hour,
mapping = aes(x = grid_t_exercise_hour, y = pred),
size = 3, alpha = 1, color = "orange") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
# Collect the Control 7 hr data points
sin_hr7_df <- by_gene_df7 %>%
filter(ENSEMBL_RAT == sin_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_death_hour, count)
# Collect the Control 1 hr data points
sin_hr1_df <- by_gene_df %>%
filter(ENSEMBL_RAT == sin_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
filter(animal.key.anirandgroup == "Control - IPE") %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_death_hour, count)
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
# Raw Counts
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point() +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Raw Counts + model
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = sin_pred_df,
aes(grid_t_death_hour, pred),
size = 1, alpha = 0.8, color = "orange") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Counts, Model, C7 Counts
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = sin_pred_df,
aes(grid_t_death_hour, pred),
size = 1, alpha = 0.8, color = "orange") +
geom_point(data = sin_hr7_df,
mapping = aes(specimen.collection.t_death_hour, count),
color = "red") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Counts, Model, C7 Counts, C1 Counts
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point(color = "red") +
geom_line(data = sin_pred_df,
aes(grid_t_death_hour, pred),
size = 1, alpha = 0.8, color = "orange") +
geom_point(data = sin_hr7_df,
mapping = aes(specimen.collection.t_death_hour, count),
color = "blue") +
geom_point(data = sin_hr1_df,
mapping = aes(specimen.collection.t_death_hour, count),
color = "blue") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Visualize the gam model metrics
(by_gene_df %>%
dplyr::slice(gene_n) %>%
select(sin_model) %>%
ungroup %>%
select(sin_model))[[1]][[1]] %>%
anova()
by_gene_df[gene_n,"gam_metrics"][[1]][[1]]
by_gene_df[gene_n,"sin_metrics"][[1]][[1]]
# Visualize the model ANOVA summary
#by_gene_df$gam_ANOVA[[1]]
#by_gene_df$sin_ANOVA[[1]]
################################################################################
# Compare the R2 values and p values from models
sin_metrics$r.squared.sin <- sin_metrics$r.squared
sin_metrics$p.value.sin <- sin_metrics$p.value
gam_metrics$r.squared.gam <- gam_metrics$r.squared
gam_metrics$p.value.gam <- gam_metrics$p.value
model_metrics <- sin_metrics %>%
select(ENSEMBL_RAT, r.squared.sin, p.value.sin) %>%
left_join(gam_metrics, by = "ENSEMBL_RAT") %>%
select(ENSEMBL_RAT, SYMBOL_RAT, CIRC, r.squared.sin, p.value.sin,
r.squared.gam, p.value.gam)
# Join the log fold change for sake of graph annotation
de_join <- de_df %>%
select(ENSEMBL_RAT, log2FoldChange)
# Annotate the top 20 de genes
model_metrics <- model_metrics %>%
ungroup() %>%
left_join(de_join, by = "ENSEMBL_RAT") %>%
mutate(Expression = factor(ifelse(log2FoldChange > 0, "UP", "DOWN"),
levels = c("UP", "DOWN"))) %>%
mutate(top_de = ifelse(ENSEMBL_RAT %in% de_top, "TOP GENE", "NOT TOP GENE"))
top_metrics <- model_metrics %>%
filter(ENSEMBL_RAT %in% de_top)
top20_metrics <- model_metrics %>%
filter(ENSEMBL_RAT %in% de_top20)
gene_metrics <- model_metrics %>%
filter(SYMBOL_RAT %in% c("Arntl","Pdk4"))
# Compare the R2 between plots
# First show circadian genes
ggplot(model_metrics, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.4) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
coord_equal()
# Plot Genes of Interest
d <- model_metrics %>%
mutate(gene_label = ifelse(SYMBOL_RAT %in% c("Arntl","Pdk4"), SYMBOL_RAT, ''))
ggplot(d, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.2) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
geom_point(data = gene_metrics,
mapping = aes(r.squared.gam, r.squared.sin),
alpha = 1) +
geom_label_repel(data = d,
mapping = aes(label=gene_label), alpha = 0.8,
hjust=0, vjust=0) +
coord_equal()
# Map the top DE genes
ggplot(model_metrics, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.1) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
geom_point(data = top_metrics,
mapping = aes(r.squared.gam, r.squared.sin, color = Expression),
alpha = 1)
# Label the top DE genes
ggplot(model_metrics, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.1) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
geom_point(data = top20_metrics,
mapping = aes(r.squared.gam, r.squared.sin, color = Expression),
alpha = 1)+
geom_label_repel(data = top20_metrics,
mapping = aes(label=SYMBOL_RAT), alpha = 0.8,
hjust=0, vjust=0)
#'
#' #' ### Adjust for Between Sex Variance
#'
#' #+ Adjust for Between Sex Variance
#' ################################################################################
#' ########### Adjust for Between Sex Variance ###################################
#' ################################################################################
#'
#' # "To adjust for batch effects, we median- centered the expression levels of each transcript within each batch and confirmed, using the correlation matrices, that the batch effects were removed after the adjustment."
#' #~ Li, J. Z. et al. Circadian patterns of gene expression in the human brain and disruption in major depressive disorder. Proc. Natl. Acad. Sci. U. S. A. 110, 9950–9955 (2013).
#'
#' # Here we have 2 Groups: Control - IPE and Control 7 hr; we'll median center these groups to combine the sexes.
#'
#' M_samples <- col_data %>%
#' filter(Tissue == TISSUE) %>%
#' filter(!is.na(animal.registration.sex)) %>%
#' filter(animal.registration.sex == 'Male') %>%
#' filter(sample_key != OUTLIERS) %>%
#' #filter(animal.key.anirandgroup %!in% c('Control - 7 hr')) %>%
#' select(sample_key) %>% unlist() %>% as.character()
#' F_samples <- col_data %>%
#' filter(Tissue == TISSUE) %>%
#' filter(!is.na(animal.registration.sex)) %>%
#' filter(sample_key != OUTLIERS) %>%
#' filter(animal.registration.sex == 'Female') %>%
#' #filter(animal.key.anirandgroup %!in% c('Control - 7 hr')) %>%
#' select(sample_key) %>% unlist() %>% as.character()
#' # Select the counts
#' M_counts <- assay(rld[, M_samples])
#' F_counts <- assay(rld[, F_samples])
#'
#' # Median Center data
#' # Collects median of each row, then subtracts by row medians
#' M_medians <- apply(M_counts,1,median)
#' M_centered <- M_counts - M_medians
#' F_medians <- apply(F_counts,1,median)
#' F_centered <- F_counts - F_medians
#' counts_centered <- cbind(M_centered, F_centered)
#' counts_centered <- counts_centered[, colnames(assay(rld))]
#' assay(rld) <- counts_centered
#'
#' #' #### We see just how well duplicate samples correlate regardless of sequencing batch
#' mypar()
#' pcaData <- DESeq2::plotPCA(rld,
#' intgroup=c("animal.key.anirandgroup",
#' "animal.registration.sex",
#' "sample_key"),
#' returnData=TRUE, ntop = 500)
#' percentVar <- round(100 * attr(pcaData, "percentVar"))
#' pdf(paste0(WD,"/plots/20200426_rnaseq-",TIS,"-PCA-sexmod-modeling_steep.pdf"),
#' width = 6, height = 4)
#' ggplot(pcaData, aes(PC1, PC2, color=animal.key.anirandgroup,shape=animal.registration.sex)) +
#' geom_point(size=3) +
#' #geom_label_repel(aes(label=sample_key),hjust=0, vjust=0) +
#' xlab(paste0("PC1: ",percentVar[1],"% variance")) +
#' ylab(paste0("PC2: ",percentVar[2],"% variance")) +
#' #coord_fixed() +
#' ggtitle(paste0("PCA of ",TISSUE," Gene Expression:\ny ~ sex + cohort")) +
#' guides(color=guide_legend(title="animal.key.anirandgroup")) +
#' scale_color_manual(values=ec_colors) +
#' theme(legend.title=element_blank())
#' dev.off()
| /scripts/20200505_rnaseq-tissue-model-circadian-exercise_steep.R | no_license | steepale/20200309_rna-seq_steep | R | false | false | 55,921 | r | #'---
#' title: "PASS1A Rat Tissue: -- Modeling of RNASeq Data Expression"
#' author: "Alec Steep & Jiayu Zhang (Code copied and adapted from Jun Z. Li)"
#' date: "20200505"
#' output:
#' html_document:
#' code_folding: hide
#' toc: true
#' highlight: zenburn
#'
#'---
#+ setup, include=FALSE
knitr::opts_chunk$set(echo = TRUE)
knitr::opts_chunk$set(warning = FALSE)
knitr::opts_chunk$set(message = FALSE)
knitr::opts_chunk$set(cache = FALSE)
#' ## Goals of Analysis:
#' * Model Circadian Rhythms (5 major time points)
#' * SIN/COS Linear Model
#' * y = B_0 + B_1SIN(TOD) + B_2COS(TOD)
#' * Model Exercise Effects (7 time points)
#' * Comparison of linear models:
#' * Cubic: y = ax^3 + bx^2 + cx + d
#' * y = ax^4 + bx^3 + cx^2 + dx + e
#'
#' ## Setup the Environment
#+ Setup Environment, message=FALSE, results='hide', warning = FALSE
################################################################################
##### Resources and Dependencies ###############################################
################################################################################
# Set the working directory
WD <- '/Volumes/Frishman_4TB/motrpac/20200309_rna-seq_steep'
#setwd(WD)
# Load the dependencies
#source("https://bioconductor.org/biocLite.R")
#BiocManager::install("gapminder")
#install.packages("tidyverse")
# Load dependencies
pacs...man <- c("tidyverse","GenomicRanges", "DESeq2","devtools","rafalib","GO.db","vsn","hexbin","ggplot2", "GenomicFeatures","Biostrings","BSgenome","AnnotationHub","plyr","dplyr", "org.Rn.eg.db","pheatmap","sva","formula.tools","pathview","biomaRt", "PROPER","SeqGSEA",'purrr','BioInstaller','RColorBrewer','lubridate', "hms","ggpubr", "ggrepel","genefilter","qvalue","ggfortify","som", "vsn","org.Mm.eg.db","VennDiagram","EBImage","reshape2","xtable","kohonen","som","caret","enrichR","gplots","tiff","splines","gam")
lapply(pacs...man, FUN = function(X) {
do.call("library", list(X)) })
################################################################################
######################### Functions ############################################
################################################################################
# Set select
select <- dplyr::select
counts <- DESeq2::counts
map <- purrr::map
# Source the functions
source(paste0(WD,'/functions/not_in.R'))
source(paste0(WD,'/functions/rat_mouse_ortho.R'))
source(paste0(WD,'/functions/mouse2rat_ortho.R'))
source(paste0(WD,'/functions/lmp.R'))
source(paste0(WD,'/functions/sin.R'))
source(paste0(WD,'/functions/cos.R'))
#' ## Declare Variables
#+ Declare Variables
################################################################################
##### Declare Variables ################################################
################################################################################
# Declare Tissue
# TISSUE: Hypothalamus, Liver, Kidney, Aorta, Adrenal, Brown Adipose, Cortex, Gastrocnemius, Heart, Hippocampus,Lung,Ovaries,PaxGene,Spleen,Testes, White Adipose
# SCN: Suprachiasmatic nucleus -- Hypothalamus
# LIV: Liver
# KID: Kidney
# AOR: Aorta
# SKM: Gastrocnemius
# HAT: Heart
# ADG: Adrenal gland
# BAT: Brown adipose tissue
# WAT: White adipose tissue
# COR: Cortex
# HIP: Hippocampus
# LUNG: Lung
# OVR: Ovaries
# SPL: Spleen
# TES: Testes
# Load the decision table
table_file <- paste0(WD,'/data/20200603_rnaseq-tissue-data-assambly-table_steep.txt')
df_tbl <- read.table(file = table_file,sep = '\t', header = T, check.names = F)
models_df <- data.frame()
#TISSUE <- "Hypothalamus"
for(TISSUE in c('Lung','Hypothalamus','Aorta','Liver', 'Adrenal', 'Brown Adipose', 'Cortex','Gastrocnemius', 'Heart', 'Hippocampus','Ovaries','Spleen','Testes', 'White Adipose','Kidney')){
print(TISSUE)
#TISSUE <- 'Lung'
# # Collect the formula
# design <- df_tbl %>%
# filter(Tissue == TISSUE) %>%
# select(Formula) %>% unique() %>%
# unlist() %>% as.character() %>% as.formula()
# Collect the Outliers
OUTLIERS <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Outliers) %>% unique() %>%
unlist() %>% as.character()
# Collect Adjusted_Variance
ADJ_VAR <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Adjusted_Variance) %>% unique() %>%
unlist() %>% as.character()
# Collect the TIS Symbol
TIS <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Tis) %>% unique() %>%
unlist() %>% as.character()
# Collect the Formula
FORMULA <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Formula) %>% unique() %>%
unlist() %>% as.character() %>% as.formula()
# Collect the adjusted variances
ADJ_VAR <- df_tbl %>%
filter(Tissue == TISSUE) %>%
select(Adjusted_Variance) %>% unique() %>%
unlist() %>% as.character()
}
#' ## Load & Clean Data
#' ##### Data files to load:
#' * Count Matrix and Metadata Table from:
#' * RNA-Seq from Mt. Sinai
#' * 3 sequencing batches & metadata
#' * RNA-Seq from Stanford
#' * 2 sequencing batches & metadata
#+ Load the Data
################################################################################
##### Load & Clean Data ###############################################
################################################################################
# Files last saved in: 20200309_exploration-rna-seq-phase1_steep.R
if(F) {
# Count matrix
in_file <- paste0(WD,'/data/20200309_rnaseq-countmatrix-pass1a-stanford-sinai_steep.csv')
count_data <- read.table(in_file,sep = ',', header = TRUE,row.names = 1,check.names = FALSE)
# Meatdata table
in_file <- paste0(WD,'/data/20200309_rnaseq-meta-pass1a-stanford-sinai_steep.txt')
col_data <- read.table(in_file, header = TRUE, check.names = FALSE, sep = '\t')
row.names(col_data) <- col_data$sample_key
# Adjust column objects
########################
# To factors
factor_cols <- c('labelid',
'vial_label',
'animal.registration.sex',
'animal.key.exlt4',
'X2D_barcode',
'BID',
'Seq_flowcell_lane',
'Seq_flowcell_run',
'Seq_end_type',
'Lib_UMI_cycle_num',
'pid',
'acute.test.staffid',
'acute.test.siteid',
'acute.test.versionnbr',
'acute.test.contactshock',
'animal.familiarization.staffid',
'animal.familiarization.siteid',
'animal.familiarization.versionnbr',
'animal.familiarization.compliant',
'animal.key.protocol',
'animal.key.agegroup',
'animal.key.batch',
'animal.key.intervention',
'animal.key.sitename',
'animal.registration.staffid',
'animal.registration.siteid',
'animal.registration.versionnbr',
'animal.registration.ratid',
'animal.registration.batchnumber',
'specimen.collection.bloodcomplete',
'specimen.collection.bloodtechid',
'specimen.collection.uterustype',
'specimen.collection.uterustechid',
'specimen.collection.deathtype',
'specimen.processing.versionnbr',
'specimen.processing.siteid',
'bid',
'specimen.processing.samplenumber',
'specimen.processing.techid',
'barcode',
'shiptositeid',
'receivedcas',
'receivestatuscas')
for(fc in factor_cols){
col_data[[fc]] <- as.factor(col_data[[fc]])
}
# To Dates: 03JUL2018
date_cols <- c('acute.test.d_visit',
'acute.test.d_start',
'animal.familiarization.d_visit',
'animal.familiarization.d_treadmillbegin',
'animal.familiarization.d_treadmillcomplete',
'animal.registration.d_visit',
'animal.registration.d_arrive',
'animal.registration.d_reverselight',
'specimen.collection.d_visit',
'animal.registration.d_birth',
'Seq_date')
for(dc in date_cols){
col_data[[dc]] <- ymd(col_data[[dc]])
}
# From Dates: 2/14/2019
date_cols <- c('RNA_extr_date',
'Lib_prep_date')
for(dc in date_cols){
col_data[[dc]] <- mdy(col_data[[dc]])
}
# To Times: 10:30:00
time_cols <- c('acute.test.t_complete',
'specimen.collection.t_anesthesia',
'specimen.collection.t_bloodstart',
'specimen.collection.t_bloodstop',
'specimen.collection.t_edtafill',
'specimen.collection.uteruscomplete',
'specimen.collection.t_uterusstart',
'specimen.collection.t_uterusstop',
'specimen.collection.t_death',
'specimen.processing.t_collection',
'specimen.processing.t_edtaspin',
'specimen.processing.t_freeze',
'acute.test.howlongshock',
'acute.test.t_start')
for(tc in time_cols){
col_data[[tc]] <- col_data[[tc]] %>% as.character() %>% parse_time() %>% as.numeric()
}
# Releveling factors
col_data$animal.key.anirandgroup <- as.character(col_data$animal.key.anirandgroup)
col_data$animal.key.anirandgroup <- factor(col_data$animal.key.anirandgroup,
levels = ec_levels)
# Create a variable for time post exercise
col_data <- col_data %>%
mutate(specimen.collection.t_exercise_hour = case_when(
animal.key.anirandgroup == 'Control - IPE' ~ -1,
animal.key.anirandgroup == 'Control - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - IPE' ~ 0,
animal.key.anirandgroup == 'Exercise - 0.5 hr' ~ 0.5,
animal.key.anirandgroup == 'Exercise - 1 hr' ~ 1,
animal.key.anirandgroup == 'Exercise - 4 hr' ~ 4,
animal.key.anirandgroup == 'Exercise - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - 24 hr' ~ 24,
animal.key.anirandgroup == 'Exercise - 48 hr' ~ 48))
# Take the absolute value of the square root of seconds post exercise (consider negative numbers)
# Make sure to Subtract 1 hour (3600s) from 'Control - IPE' groups to account for exercise effect
col_data <- col_data %>%
mutate(calculated.variables.deathtime_after_acute =
ifelse(animal.key.anirandgroup == 'Control - IPE',
calculated.variables.deathtime_after_acute - 3600,
calculated.variables.deathtime_after_acute))
col_data <- col_data %>%
mutate(specimen.collection.t_exercise_hour_sqrt = ifelse(
calculated.variables.deathtime_after_acute < 0,
(sqrt(abs(calculated.variables.deathtime_after_acute))/60/60)*(-1),
(sqrt(abs(calculated.variables.deathtime_after_acute))/60/60)))
row.names(col_data) <- col_data$sample_key
# Examine histograms
col_data %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=calculated.variables.deathtime_after_acute)) +
geom_histogram(bins = 68)
col_data %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour_sqrt)) +
geom_histogram(bins = 68)
# Generate a time-last-fed variable
col_data <- col_data %>%
mutate(animal_time_last_fed = case_when(
animal.key.anirandgroup %!in% c('Control - 7 hr', 'Exercise - 7 hr') ~ parse_time('8:00'),
(animal.key.anirandgroup %in% c('Control - 7 hr') &
animal.registration.sex == 'Male') ~ parse_time('11:50'),
(animal.key.anirandgroup %in% c('Control - 7 hr') &
animal.registration.sex == 'Female') ~ parse_time('12:30'),
(animal.key.anirandgroup %in% c('Exercise - 7 hr') &
animal.registration.sex == 'Male') ~ parse_time('12:30'),
(animal.key.anirandgroup %in% c('Exercise - 7 hr') &
animal.registration.sex == 'Female') ~ parse_time('12:50')) %>% as.numeric())
# Generate a time-fasted variable
col_data$calculated.variables.deathtime_after_fed <- (col_data$specimen.collection.t_death - col_data$animal_time_last_fed) %>% as.numeric()
# Save data as an R objects
# ################################################################################
# To determine object size
sl <- object.size(count_data)
print(sl, units = 'auto')
# Meta Data
meta_file <- paste0(WD,'/data/20200603_rnaseq-meta-pass1a-stanford-sinai-proc_steep.rds')
saveRDS(col_data, file = meta_file)
# Count Data
count_file <- paste0(WD, '/data/20200603_rnaseq-counts-pass1a-stanford-sinai-processed_steep.rds')
saveRDS(count_data, file = count_file)
}
meta_file <- paste0(WD,'/data/20200603_rnaseq-meta-pass1a-stanford-sinai-proc_steep.rds')
#' #### Polished metadata saved as:
#' `r meta_file`
#'
count_file <- paste0(WD, '/data/20200603_rnaseq-counts-pass1a-stanford-sinai-processed_steep.rds')
#' #### Polished read counts saved as:
#' `r count_file`
# Set a vector for Exercise/Control Levels and Colors
ec_levels <- c('Exercise - IPE',
'Exercise - 0.5 hr',
'Exercise - 1 hr',
'Exercise - 4 hr',
'Exercise - 7 hr',
'Exercise - 24 hr',
'Exercise - 48 hr',
'Control - IPE',
'Control - 7 hr')
ec_colors <- c('gold',
'darkgoldenrod1',
'orange',
'darkorange',
'darkorange2',
'darkorange3',
'darkorange4',
'steelblue1',
'steelblue4')
# Load Metadata and count data as R objects
################################################################################
# Restore the metadata object
meta_file <- paste0(WD,'/data/20200603_rnaseq-meta-pass1a-stanford-sinai-proc_steep.rds')
col_data <- readRDS(file = meta_file)
# Restore the count object
count_file <- paste0(WD, '/data/20200603_rnaseq-counts-pass1a-stanford-sinai-processed_steep.rds')
count_data <- readRDS(file = count_file)
#' #### Retrieve Circadian Genes Associated with Tissue
#' Data from Supplementary Table 2 from:
#' Yan, J., Wang, H., Liu, Y. & Shao, C. Analysis of gene regulatory networks in the mammalian circadian rhythm. PLoS Comput. Biol. 4, (2008).
#' Downloaded 20200326 by Alec Steep
#'
#' ## Place Genes in Genomic Ranges
#'
#' #### Reference Genome and Annotation:
#' Rnor_6.0 (GCA_000001895.4) assembly from Ensembl database (Release 96)
#' Found at: http://uswest.ensembl.org/Rattus_norvegicus/Info/Index.
#'
#' FASTA: Rattus_norvegicus.Rnor_6.0.dna.toplevel.fa.gz
#' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/Rattus_norvegicus.Rnor_6.0.dna.toplevel.fa.gz
#'
#' GTF: Rattus_norvegicus.Rnor_6.0.96.gtf.gz
#' ftp://ftp.ensembl.org/pub/release-96/gtf/rattus_norvegicus/Rattus_norvegicus.Rnor_6.0.96.gtf.gz
#'
#' ## Annotate Genes by Chromosome
#+ Annotate Genes by Chromosome
################################################################################
##### Annotate Genes by Chromosome ###################################
################################################################################
### Determine which control samples are male and female
# Get the list of genes on the W chromosome
# Construct your own personal galgal5 reference genome annotation
# Construct from gtf file from Ensembl (same file used in mapping)
#ens_gtf <- paste0(WD,'/data/Rattus_norvegicus.Rnor_6.0.96.gtf')
#Rn_TxDb <- makeTxDbFromGFF(ens_gtf,
# format=c('gtf'),
# dataSource='Ensembl_Rattus6_gtf',
# organism='Rattus norvegicus',
# taxonomyId=NA,
# circ_seqs=DEFAULT_CIRC_SEQS,
# chrominfo=NULL,
# miRBaseBuild=NA,
# metadata=NULL)
# Save the Rat Genomic Ranges Object
#gf_file <- paste0(WD,'/data/20200603_Rnor-6.0.96-GRanges_steep.sqlite')
#saveDb(Rn_TxDb, file=gf_file)
# To load the annotation
gf_file <- paste0(WD,'/data/20200603_Rnor-6.0.96-GRanges_steep.sqlite')
Rn_TxDb <- loadDb(gf_file)
# Define Female specific sex genes (X chromosome)
# To examine chromosome names
#seqlevels(Rn_TxDb)[1:23]
# Extract genes as GRanges object, then names
X_genes_gr <- genes(Rn_TxDb, columns = 'TXCHROM', filter = list(tx_chrom=c('X')))
# Collect ensembl gene ids for female specific genes
X_ens_id <- names(X_genes_gr)
# Examine the gene symbols
X_sym <- mapIds(org.Rn.eg.db, names(X_genes_gr), 'SYMBOL', 'ENSEMBL')
# Extract genes as GRanges object, then names
Y_genes_gr <- genes(Rn_TxDb, columns = 'TXCHROM', filter = list(tx_chrom=c('Y')))
# Collect ensembl gene ids for female specific genes
Y_ens_id <- names(Y_genes_gr)
sex_ens_id <- c(X_ens_id,Y_ens_id)
# Examine the gene symbols
Y_sym <- mapIds(org.Rn.eg.db, names(Y_genes_gr), 'SYMBOL', 'ENSEMBL')
#' ## Collect Samples of Interest and Normalize
#+ Collect Samples of Interest and Normalize
################################################################################
##### Collect Samples of Interest and Normalize #######################
################################################################################
# Filter Samples (meta)
if(TISSUE == c('Gastrocnemius_MSSM_1')){
tod_cols <- col_data %>%
filter(Tissue == 'Gastrocnemius') %>%
filter(Seq_batch == 'MSSM_1') %>%
filter(!is.na(animal.registration.sex))
}else if(TISSUE == c('Gastrocnemius_Stanford_1')){
tod_cols <- col_data %>%
filter(Tissue == 'Gastrocnemius') %>%
filter(Seq_batch == 'Stanford_1') %>%
filter(!is.na(animal.registration.sex))
}else{
# Filter Samples (meta)
tod_cols <- col_data %>%
filter(Tissue == TISSUE) %>%
filter(!is.na(animal.registration.sex))
}
rownames(tod_cols) <- tod_cols$sample_key
# Time post exercise
tod_cols <- tod_cols %>%
mutate(specimen.collection.t_exercise_hour = case_when(
animal.key.anirandgroup == 'Control - IPE' ~ -1,
animal.key.anirandgroup == 'Control - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - IPE' ~ 0,
animal.key.anirandgroup == 'Exercise - 0.5 hr' ~ 0.5,
animal.key.anirandgroup == 'Exercise - 1 hr' ~ 1,
animal.key.anirandgroup == 'Exercise - 4 hr' ~ 4,
animal.key.anirandgroup == 'Exercise - 7 hr' ~ 7,
animal.key.anirandgroup == 'Exercise - 24 hr' ~ 24,
animal.key.anirandgroup == 'Exercise - 48 hr' ~ 48))
# Convert to seconds and take the square root (consider negative numbers)
tod_cols$specimen.collection.t_exercise_seconds <-
tod_cols$specimen.collection.t_exercise_hour * 60 * 60
tod_cols <- tod_cols %>%
mutate(specimen.collection.t_exercise_hour_sqrt = ifelse(
specimen.collection.t_exercise_seconds < 0,
(sqrt(abs(specimen.collection.t_exercise_seconds))/60/60)*(-1),
(sqrt(abs(specimen.collection.t_exercise_seconds))/60/60)))
tod_cols$specimen.collection.t_exercise_hour_sqrt_jit <-
jitter(tod_cols$specimen.collection.t_exercise_hour_sqrt,
factor = 0.1)
row.names(tod_cols) <- tod_cols$sample_key
# Examine histograms
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour)) +
geom_histogram(bins = 68)
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_seconds)) +
geom_histogram(bins = 68)
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour_sqrt)) +
geom_histogram(bins = 68)
tod_cols %>%
filter(animal.key.anirandgroup != 'Control - 7 hr') %>%
ggplot(aes(x=specimen.collection.t_exercise_hour_sqrt_jit)) +
geom_histogram(bins = 68)
# Collect samples without NA values in TOD
nona_sams <- tod_cols %>%
filter(!is.na(specimen.collection.t_death_hour)) %>%
filter(sample_key != OUTLIERS) %>%
filter(!is.na(animal.registration.sex)) %>%
select(sample_key) %>% unlist() %>% as.character()
# Collect tissue specific counts
tod_counts <- count_data[,nona_sams]
#' #### Sanity Check: Ensure that the metadata rownames are identical to count matrix column names
all(rownames(tod_cols) == colnames(tod_counts))
# Create a design formula and load counts and supporting annotation into an S4 object (DESeq infrastructure)
#design = ~1 # Primary variable needs to be last.
design <- FORMULA
(title = paste0('Design: ',as.character(design)) )
dds1 <- DESeqDataSetFromMatrix(countData = tod_counts,
colData = tod_cols,
design = design)
# Reasoning from:
#citation("PROPER")
#dds
#' #### We remove genes with an average sequencing depth of 10 or less
#' Before Filtering
# dds1
zero_n <- dds1[(rowSums(counts(dds1))/ncol(dds1) < 1), ] %>%
nrow() %>% as.character()
reads_n <- 1
keep <- rowSums(counts(dds1))/ncol(dds1) >= reads_n
dds2 <- dds1[keep,]
#' #### Summary of counts and annotation data in a DESeqDataSet after filtering out genes with low sequencing depth
#' TODO: Critic from Jun: Here we are removing features that have a low average expression. This may be removing important features that might have zero counts in some samples and higher counts in specific groups. Consider developing an algorithm that will account for features with expression in n or more samples.
dds2
filter_n <- nrow(dds1) - nrow(dds2) - as.numeric(zero_n)
filter_p <- filter_n/(nrow(dds1) - as.numeric(zero_n))
total_n <- nrow(dds1) - nrow(dds2)
#' ##### Note: Number of genes with average counts between zero and 1 is `r zero_n` but removing reads with less than or equal to `r reads_n` removes an additional `r filter_n` features or removes `r filter_p*100`% of the non-zero reads (total of `r total_n` features removed).
dds <- dds2
#' To see the reads per million for each sample
sort(colSums(assay(dds)))/1e6
# estimateSizeFactors gives us a robust estimate in sequencing depth
dds <- estimateSizeFactors(dds)
#' Size facotrs are generally around 1 (scaled) and calculated using the median and are robust to genes with large read counts
summary(sizeFactors(dds))
rld <- DESeq2::vst(dds, blind = F)
#' Regularized Log (rlog) Transform
for(n in 1){
start_time <- Sys.time()
#rld <- DESeq2::rlog(dds, blind = F)
end_time <- Sys.time()
print(end_time - start_time)
}
# This command is redundent, but included for safety
rs <- rowSums(counts(dds))
#' #### We see just how well duplicate samples correlate regardless of sequencing batch
mypar()
pcaData <- DESeq2::plotPCA(rld,
intgroup=c("animal.key.anirandgroup",
"animal.registration.sex",
"sample_key"),
returnData=TRUE, ntop = 500)
percentVar <- round(100 * attr(pcaData, "percentVar"))
#pdf(paste0(WD,"/plots/20200505_rnaseq-",TIS,"-PCA-naive-modeling_steep.pdf"),
# width = 6, height = 4)
ggplot(pcaData, aes(PC1, PC2, color=animal.key.anirandgroup,shape=animal.registration.sex)) +
geom_point(size=3) +
#geom_label_repel(aes(label=sample_key),hjust=0, vjust=0) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
#coord_fixed() +
ggtitle(paste0("PCA of ",TISSUE," Gene Expression:\nNaive Model (~ 1)")) +
guides(color=guide_legend(title="animal.key.anirandgroup")) +
scale_color_manual(values=ec_colors) +
theme(legend.title=element_blank())
#dev.off()
#' ### Adjust Variance
#+ Adjust Variance
################################################################################
########### Adjust Variance #######################################
################################################################################
if(ADJ_VAR != 'None'){
for(adj_var in ADJ_VAR){
# Duplicate the rld object
rld_final <- rld
# The batch effect can only be removed with limma
# https://support.bioconductor.org/p/76099/ (See Michael Love's Comment)
assay(rld_final) <- limma::removeBatchEffect(assay(rld), rld[[adj_var]])
# Examine the primary variable of interest to see if we've solved our issue
# Before:
p <- DESeq2::plotPCA(rld, intgroup =adj_var) +
guides(color=guide_legend(title=adj_var))
plot(p)
# After
p <- DESeq2::plotPCA(rld_final, intgroup = adj_var, ntop = 500) +
guides(color=guide_legend(title=adj_var))
plot(p)
rld <- rld_final
}
}
#### We see just how well duplicate samples correlate regardless of sequencing batch
mypar()
pcaData <- DESeq2::plotPCA(rld,
intgroup=c("animal.key.anirandgroup",
"animal.registration.sex",
"sample_key"),
returnData=TRUE, ntop = 20000)
percentVar <- round(100 * attr(pcaData, "percentVar"))
#pdf(paste0(WD,"/plots/20200426_rnaseq-",TIS,"-PCA-sexmod-modeling_steep.pdf"),
# width = 6, height = 4)
ggplot(pcaData, aes(PC1, PC2, color=animal.key.anirandgroup,shape=animal.registration.sex)) +
geom_point(size=3) +
#geom_label_repel(aes(label=sample_key),hjust=0, vjust=0) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
#coord_fixed() +
ggtitle(paste0("PCA of ",TISSUE)) +
guides(color=guide_legend(title="animal.key.anirandgroup")) +
scale_color_manual(values=ec_colors) +
theme(legend.title=element_blank())
#dev.off()
#' #### Annotate Data for Modeling By Cluster
#+ Annotate Data for Modeling By Cluster
################################################################################
################ Annotate Data for Modeling By Cluster #########################
################################################################################
time_cols <- tod_cols %>%
filter(sample_key %in% nona_sams)
time_cols %>%
ggplot(aes(x = specimen.collection.t_death_hour, fill = animal.key.anirandgroup)) +
geom_bar(breaks = seq(0, 24), width = 2, colour = "grey") +
coord_polar(start = 0) +
theme_minimal() +
ggtitle("Death of Experimental Groups by Time of Day") +
scale_x_continuous("", limits = c(0, 24),
breaks = seq(0, 24),
labels = seq(0, 24)) +
theme(legend.title = element_blank()) +
scale_fill_manual(values=ec_colors[1:9]) +
theme(axis.title.y =element_blank(),
axis.text.y =element_blank(),
axis.ticks.y=element_blank())
# Select the normailzed counts
tod_counts <- assay(rld)
t_counts <- setNames(melt(tod_counts),
c('ENSEMBL_RAT', 'sample_key', 'count'))
# Join the dataframes and nest
by_gene_df <- tod_cols %>%
left_join(t_counts, by = "sample_key") %>%
filter(animal.key.anirandgroup %!in% c('Control - 7 hr')) %>%
group_by(ENSEMBL_RAT) %>%
arrange(sample_key) %>%
nest()
# # Add Cluster and Circ Status
# by_gene_df <- by_gene_df %>%
# mutate(CIRC = ifelse(ENSEMBL_RAT %in% circ_df$ENSEMBL_RAT, 'CIRC', 'NON-CIRC'))
# Add the gene symbol
by_gene_df$SYMBOL_RAT = mapIds(org.Rn.eg.db, as.character(by_gene_df$ENSEMBL_RAT), "SYMBOL", "ENSEMBL")
# Join the dataframes and nest
by_gene_df7 <- tod_cols %>%
left_join(t_counts, by = "sample_key") %>%
filter(animal.key.anirandgroup %in% c('Control - 7 hr')) %>%
group_by(ENSEMBL_RAT) %>%
arrange(sample_key) %>%
nest()
# Add Cluster and Circ Status
# by_gene_df7 <- by_gene_df7 %>%
# left_join(mosaic_df, by = "ENSEMBL_RAT") %>%
# filter(!is.na(CIRC)) # This filter removes genes that did not pass variance filter (MANOVA)
# Add the gene symbol
by_gene_df7$SYMBOL_RAT = mapIds(org.Rn.eg.db, as.character(by_gene_df7$ENSEMBL_RAT), "SYMBOL", "ENSEMBL")
# Cleanup
#rm(count_data, col_data, counts_centered, dds ,dds1,dds2,F_centered,F_counts,
# M_centered,M_counts,pcaData,t_counts)
# TODO: The entire dataframe cannot be unnested. It may be a memory issue or it may be a corrupted row -- come back and troubleshoot. For now, randomly select 2K rows to continue analysis.
#by_gene_df2 <- by_gene_df
#by_gene_df72 <- by_gene_df7
#by_gene_df <- by_gene_df2
#by_gene_df7 <- by_gene_df72
# Load in the DE genes between C0 and C7 for appropriate tissues
in_file <- paste0(WD, '/data/20200426_rnaseq-',TIS,'-DEgenes-sexmod-C0vsC7_steep.txt')
de_df <- read.table(in_file, sep = '\t', header = T)
# Take all the differentially expressed genes
de_all <- de_df %>%
arrange(padj) %>%
filter(padj <= 0.05) %>%
select(ENSEMBL_RAT) %>%
unlist() %>% as.character()
# Take the top 20 differentially expressed genes
de_top <- de_df %>%
arrange(padj) %>%
select(ENSEMBL_RAT) %>%
dplyr::slice(1:50) %>%
unlist() %>% as.character()
de_top20 <- de_df %>%
arrange(padj) %>%
select(ENSEMBL_RAT) %>%
dplyr::slice(1:20) %>%
unlist() %>% as.character()
#randomRows = sample(1:nrow(by_gene_df[,1]), 500, replace=F)
#by_gene_df <- by_gene_df[randomRows, ]
#by_gene_df7 <- by_gene_df7[randomRows, ]
by_gene_df <- by_gene_df %>% filter(ENSEMBL_RAT %in% de_all)
by_gene_df7 <- by_gene_df7 %>% filter(ENSEMBL_RAT %in% de_all)
# Must be true
all(by_gene_df$ENSEMBL_RAT == by_gene_df7$ENSEMBL_RAT)
# Generate model functions for the dataframes
################################################################################
gam_mod <- function(df) {
lm(count ~ ns(specimen.collection.t_exercise_hour_sqrt_jit, df = 4), data = df)
}
################################################################################
# Generate a model function for the dataframes
################################################################################
sin_mod <- function(df) {
lm(count ~ SIN(specimen.collection.t_death_hour) +
COS(specimen.collection.t_death_hour),
data = df)
}
################################################################################
# Generalized Additive Model (Exercise)
################################################################################
# Run models and save as a column
by_gene_df <- by_gene_df %>%
mutate(gam_model = map(data, gam_mod))
# Examine the ANOVA report on models
by_gene_df <- by_gene_df %>%
mutate(gam_ANOVA = map(gam_model, anova))
# Add the residuals
by_gene_df <- by_gene_df %>%
mutate(gam_resid = map2(data, gam_model, modelr::add_residuals))
# Examine the model metrics
by_gene_df <- by_gene_df %>%
mutate(gam_metrics = map(gam_model, broom::glance))
# Examine some model summaries
by_gene_df <- by_gene_df %>%
mutate(gam_summary = map(gam_model, summary))
# Save the model metrics
gam_metrics <- by_gene_df %>%
mutate(gam_metrics = map(gam_model, broom::glance)) %>%
unnest(gam_metrics)
# SIN/COS Model
################################################################################
# Run models and save as a column
by_gene_df <- by_gene_df %>%
mutate(sin_model = map(data, sin_mod))
# Examine the ANOVA report on models
by_gene_df <- by_gene_df %>%
mutate(sin_ANOVA = map(sin_model, anova))
# Add the residuals
by_gene_df <- by_gene_df %>%
mutate(sin_resid = map2(data, sin_model, modelr::add_residuals))
# Examine the model metrics
sin_metrics <- by_gene_df %>%
mutate(sin_metrics = map(sin_model, broom::glance)) %>%
unnest(sin_metrics)
# Examine the model metrics
by_gene_df <- by_gene_df %>%
mutate(sin_metrics = map(sin_model, broom::glance))
# Examine some model summaries
by_gene_df <- by_gene_df %>%
mutate(sin_summary = map(sin_model, summary))
# Arrange the dataframe by model R2
row_order <- by_gene_df %>%
unnest(sin_metrics) %>%
arrange(desc(adj.r.squared)) %>%
select(ENSEMBL_RAT) %>%
unlist() %>% as.character()
by_gene_df <- by_gene_df %>%
ungroup() %>%
arrange(factor(ENSEMBL_RAT, levels = row_order))
# Add the predictions (GAM model)
genes <- by_gene_df$ENSEMBL_RAT %>% as.character() %>% unique()
# Must be true
all(genes == by_gene_df$ENSEMBL_RAT)
gam_pred <- list()
i <- 1
for( g in genes){
# Subset the dataframe by gene
sub_data <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>%
ungroup() %>%
select(data))[[1]] %>% as.data.frame()
# Generate a grid
grid <- data.frame(specimen.collection.t_exercise_hour_sqrt_jit =
modelr::seq_range(
sub_data$specimen.collection.t_exercise_hour_sqrt_jit, n = length(sub_data$specimen.collection.t_exercise_hour_sqrt_jit)))
#grid$ENSEMBL_RAT <- g
mod <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>% ungroup() %>%
select(gam_model))[[1]][[1]]
summary(mod)
grid <- modelr::add_predictions(grid, mod, "pred") %>% as_tibble()
names(grid)[1] <- "grid_t_exercise_hour_sqrt_jit"
grid$specimen.collection.t_exercise_hour_sqrt_jit <-
sub_data$specimen.collection.t_exercise_hour_sqrt_jit
grid$count <- sub_data$count
gam_pred[[i]] <- grid
i <- i + 1
}
by_gene_df$gam_pred <- gam_pred
# Add the predictions (SIN Model)
genes <- by_gene_df$ENSEMBL_RAT %>% as.character() %>% unique()
# Must be true
all(genes == by_gene_df$ENSEMBL_RAT)
sin_pred <- list()
i <- 1
for( g in genes){
# Subset the dataframe by gene
sub_data <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>%
ungroup() %>%
select(data))[[1]] %>% as.data.frame()
# Generate a grid
grid <- data.frame(specimen.collection.t_death_hour =
modelr::seq_range(
sub_data$specimen.collection.t_death_hour,
n = length(sub_data$specimen.collection.t_exercise_hour_sqrt_jit)))
#grid$ENSEMBL_RAT <- g
mod <- (by_gene_df %>%
filter(ENSEMBL_RAT == g) %>% ungroup() %>%
select(sin_model))[[1]][[1]]
summary(mod)
grid <- modelr::add_predictions(grid, mod, "pred") %>% as_tibble()
names(grid)[1] <- "grid_t_death_hour"
grid$grid_t_death_hour <- round(grid$grid_t_death_hour, digits = 1)
grid$specimen.collection.t_death_hour <-
sub_data$specimen.collection.t_death_hour
grid$count <- sub_data$count
sin_pred[[i]] <- grid
i <- i + 1
}
by_gene_df$sin_pred <- sin_pred
gene_n <- 1
# Visualize the GAM model by gene
gam_pred_df <- by_gene_df %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data, CIRC, gam_model, gam_pred) %>%
ungroup() %>%
filter(row_number() == gene_n) %>%
unnest(gam_pred)
gam_gene <- unique(gam_pred_df$ENSEMBL_RAT) %>% as.character()
# Visualize the SIN model by gene
sin_pred_df <- by_gene_df %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data, CIRC, sin_model, sin_pred) %>%
ungroup() %>%
filter(row_number() == gene_n) %>%
unnest(sin_pred)
sin_gene <- unique(sin_pred_df$ENSEMBL_RAT) %>% as.character()
# Must be true
gam_gene == sin_gene
# Collect the Control 7 hr data points
gam_hr7_df <- by_gene_df7 %>%
filter(ENSEMBL_RAT == gam_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_exercise_hour_sqrt_jit, count)
# Collect the Control 1 hr data points
gam_hr1_df <- by_gene_df %>%
filter(ENSEMBL_RAT == gam_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
filter(animal.key.anirandgroup == "Control - IPE") %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_exercise_hour_sqrt_jit, count)
# Collect the hours of death that occur for each exercise group
sac_hrs <- sort(unique(round(tod_cols$specimen.collection.t_death_hour, digits = 1)))
#names(tod_cols)
#e_df <- tod_cols %>%
# select(specimen.collection.t_death_hour, specimen.collection.t_exercise_hour_sqrt) %>%
# arrange(specimen.collection.t_death_hour)
#e_df$specimen.collection.t_death_hour <- round(e_df$specimen.collection.t_death_hour, digits = 1)
# Perform a second prediction (for hour post exercise -- predictions from SIN Model)
sin_pred_hour <- sin_pred_df %>%
filter(round(grid_t_death_hour, digits = 1) %in% sac_hrs) %>%
mutate(grid_t_exercise_hour = case_when(grid_t_death_hour == 9.9 ~ -0.01666667,
grid_t_death_hour == 10.0 ~ -0.01666667,
grid_t_death_hour == 10.2 ~ -0.01666667,
grid_t_death_hour == 10.3 ~ -0.01666667,
grid_t_death_hour == 10.6 ~ 0.08164966,
grid_t_death_hour == 10.9 ~ 0.08164966,
grid_t_death_hour == 11.2 ~ 0.11547005,
grid_t_death_hour == 11.6 ~ 0.11547005,
grid_t_death_hour == 11.9 ~ 0.01178511,
grid_t_death_hour == 12.2 ~ 0.01178511,
grid_t_death_hour == 12.3 ~ 0.01178511,
grid_t_death_hour == 13.2 ~ 0.00000000,
grid_t_death_hour == 13.3 ~ 0.00000000,
grid_t_death_hour == 13.6 ~ 0.00000000,
grid_t_death_hour == 13.9 ~ 0.01666667,
grid_t_death_hour == 14.2 ~ 0.01666667,
grid_t_death_hour == 14.3 ~ 0.01666667,
grid_t_death_hour == 14.6 ~ 0.03333333,
grid_t_death_hour == 14.9 ~ 0.03333333,
grid_t_death_hour == 16.9 ~ 0.04409586,
grid_t_death_hour == 17.2 ~ 0.04409586,
grid_t_death_hour == 17.3 ~ 0.04409586,
grid_t_death_hour == 17.6 ~ 0.04409586,
grid_t_death_hour == 17.9 ~ 0.04409586))
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
# Raw counts
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Raw counts with Line
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Raw counts with Line with control 7
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
geom_point(data = gam_hr7_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "red") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Raw counts with Line with control 7 and control 0
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point(color = "red", alpha = 1) +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
geom_point(data = gam_hr7_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "blue") +
geom_point(data = gam_hr1_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "blue") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
gam_pred_df %>%
ggplot(aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = gam_pred_df,
aes(grid_t_exercise_hour_sqrt_jit, pred),
size = 1, alpha = 0.8, color = "blue") +
geom_point(data = gam_hr7_df,
mapping = aes(specimen.collection.t_exercise_hour_sqrt_jit, count),
color = "red") +
geom_point(data = sin_pred_hour,
mapping = aes(x = grid_t_exercise_hour, y = pred),
size = 3, alpha = 1, color = "orange") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(gam_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Hours Post Acute Exercise (Transformed)") +
geom_vline(xintercept = 0, linetype = "dashed", alpha = 0.5)
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
# Collect the Control 7 hr data points
sin_hr7_df <- by_gene_df7 %>%
filter(ENSEMBL_RAT == sin_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_death_hour, count)
# Collect the Control 1 hr data points
sin_hr1_df <- by_gene_df %>%
filter(ENSEMBL_RAT == sin_gene) %>%
select(ENSEMBL_RAT, SYMBOL_RAT, data) %>%
ungroup() %>%
unnest(data) %>%
filter(animal.key.anirandgroup == "Control - IPE") %>%
select(ENSEMBL_RAT, SYMBOL_RAT, specimen.collection.t_death_hour, count)
# Visualize the raw counts, model predictions, and control 7 counts (x=Hours Post Exercise)
# Raw Counts
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point() +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Raw Counts + model
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = sin_pred_df,
aes(grid_t_death_hour, pred),
size = 1, alpha = 0.8, color = "orange") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Counts, Model, C7 Counts
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point() +
geom_line(data = sin_pred_df,
aes(grid_t_death_hour, pred),
size = 1, alpha = 0.8, color = "orange") +
geom_point(data = sin_hr7_df,
mapping = aes(specimen.collection.t_death_hour, count),
color = "red") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Counts, Model, C7 Counts, C1 Counts
sin_pred_df %>%
ggplot(aes(specimen.collection.t_death_hour, count),
color = ENSEMBL_RAT) +
geom_point(color = "red") +
geom_line(data = sin_pred_df,
aes(grid_t_death_hour, pred),
size = 1, alpha = 0.8, color = "orange") +
geom_point(data = sin_hr7_df,
mapping = aes(specimen.collection.t_death_hour, count),
color = "blue") +
geom_point(data = sin_hr1_df,
mapping = aes(specimen.collection.t_death_hour, count),
color = "blue") +
theme(legend.position = "none") +
ggtitle(
paste0("Expression of ",
unique(sin_pred_df$SYMBOL_RAT),
":\nExercise Groups & Control IPE (",TISSUE,")")) +
ylab("Expression (Transformed/Normalized)") +
xlab("Time of Death (Hour)")
# Visualize the gam model metrics
(by_gene_df %>%
dplyr::slice(gene_n) %>%
select(sin_model) %>%
ungroup %>%
select(sin_model))[[1]][[1]] %>%
anova()
by_gene_df[gene_n,"gam_metrics"][[1]][[1]]
by_gene_df[gene_n,"sin_metrics"][[1]][[1]]
# Visualize the model ANOVA summary
#by_gene_df$gam_ANOVA[[1]]
#by_gene_df$sin_ANOVA[[1]]
################################################################################
# Compare the R2 values and p values from models
sin_metrics$r.squared.sin <- sin_metrics$r.squared
sin_metrics$p.value.sin <- sin_metrics$p.value
gam_metrics$r.squared.gam <- gam_metrics$r.squared
gam_metrics$p.value.gam <- gam_metrics$p.value
model_metrics <- sin_metrics %>%
select(ENSEMBL_RAT, r.squared.sin, p.value.sin) %>%
left_join(gam_metrics, by = "ENSEMBL_RAT") %>%
select(ENSEMBL_RAT, SYMBOL_RAT, CIRC, r.squared.sin, p.value.sin,
r.squared.gam, p.value.gam)
# Join the log fold change for sake of graph annotation
de_join <- de_df %>%
select(ENSEMBL_RAT, log2FoldChange)
# Annotate the top 20 de genes
model_metrics <- model_metrics %>%
ungroup() %>%
left_join(de_join, by = "ENSEMBL_RAT") %>%
mutate(Expression = factor(ifelse(log2FoldChange > 0, "UP", "DOWN"),
levels = c("UP", "DOWN"))) %>%
mutate(top_de = ifelse(ENSEMBL_RAT %in% de_top, "TOP GENE", "NOT TOP GENE"))
top_metrics <- model_metrics %>%
filter(ENSEMBL_RAT %in% de_top)
top20_metrics <- model_metrics %>%
filter(ENSEMBL_RAT %in% de_top20)
gene_metrics <- model_metrics %>%
filter(SYMBOL_RAT %in% c("Arntl","Pdk4"))
# Compare the R2 between plots
# First show circadian genes
ggplot(model_metrics, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.4) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
coord_equal()
# Plot Genes of Interest
d <- model_metrics %>%
mutate(gene_label = ifelse(SYMBOL_RAT %in% c("Arntl","Pdk4"), SYMBOL_RAT, ''))
ggplot(d, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.2) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
geom_point(data = gene_metrics,
mapping = aes(r.squared.gam, r.squared.sin),
alpha = 1) +
geom_label_repel(data = d,
mapping = aes(label=gene_label), alpha = 0.8,
hjust=0, vjust=0) +
coord_equal()
# Map the top DE genes
ggplot(model_metrics, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.1) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
geom_point(data = top_metrics,
mapping = aes(r.squared.gam, r.squared.sin, color = Expression),
alpha = 1)
# Label the top DE genes
ggplot(model_metrics, aes(r.squared.gam, r.squared.sin)) +
geom_point(alpha = 0.1) +
xlim(0,1) + ylim(0,1) +
geom_abline(intercept = 0, slope = 1) +
xlab("R^2 Natural Spline Model (Exercise)") +
ylab("R^2 SIN/COS Model (Circadian)") +
ggtitle("R2 Comparisons Between Models:\nDifferentially Expressed Genes (C0 -> C7)") +
geom_point(data = top20_metrics,
mapping = aes(r.squared.gam, r.squared.sin, color = Expression),
alpha = 1)+
geom_label_repel(data = top20_metrics,
mapping = aes(label=SYMBOL_RAT), alpha = 0.8,
hjust=0, vjust=0)
#'
#' #' ### Adjust for Between Sex Variance
#'
#' #+ Adjust for Between Sex Variance
#' ################################################################################
#' ########### Adjust for Between Sex Variance ###################################
#' ################################################################################
#'
#' # "To adjust for batch effects, we median- centered the expression levels of each transcript within each batch and confirmed, using the correlation matrices, that the batch effects were removed after the adjustment."
#' #~ Li, J. Z. et al. Circadian patterns of gene expression in the human brain and disruption in major depressive disorder. Proc. Natl. Acad. Sci. U. S. A. 110, 9950–9955 (2013).
#'
#' # Here we have 2 Groups: Control - IPE and Control 7 hr; we'll median center these groups to combine the sexes.
#'
#' M_samples <- col_data %>%
#' filter(Tissue == TISSUE) %>%
#' filter(!is.na(animal.registration.sex)) %>%
#' filter(animal.registration.sex == 'Male') %>%
#' filter(sample_key != OUTLIERS) %>%
#' #filter(animal.key.anirandgroup %!in% c('Control - 7 hr')) %>%
#' select(sample_key) %>% unlist() %>% as.character()
#' F_samples <- col_data %>%
#' filter(Tissue == TISSUE) %>%
#' filter(!is.na(animal.registration.sex)) %>%
#' filter(sample_key != OUTLIERS) %>%
#' filter(animal.registration.sex == 'Female') %>%
#' #filter(animal.key.anirandgroup %!in% c('Control - 7 hr')) %>%
#' select(sample_key) %>% unlist() %>% as.character()
#' # Select the counts
#' M_counts <- assay(rld[, M_samples])
#' F_counts <- assay(rld[, F_samples])
#'
#' # Median Center data
#' # Collects median of each row, then subtracts by row medians
#' M_medians <- apply(M_counts,1,median)
#' M_centered <- M_counts - M_medians
#' F_medians <- apply(F_counts,1,median)
#' F_centered <- F_counts - F_medians
#' counts_centered <- cbind(M_centered, F_centered)
#' counts_centered <- counts_centered[, colnames(assay(rld))]
#' assay(rld) <- counts_centered
#'
#' #' #### We see just how well duplicate samples correlate regardless of sequencing batch
#' mypar()
#' pcaData <- DESeq2::plotPCA(rld,
#' intgroup=c("animal.key.anirandgroup",
#' "animal.registration.sex",
#' "sample_key"),
#' returnData=TRUE, ntop = 500)
#' percentVar <- round(100 * attr(pcaData, "percentVar"))
#' pdf(paste0(WD,"/plots/20200426_rnaseq-",TIS,"-PCA-sexmod-modeling_steep.pdf"),
#' width = 6, height = 4)
#' ggplot(pcaData, aes(PC1, PC2, color=animal.key.anirandgroup,shape=animal.registration.sex)) +
#' geom_point(size=3) +
#' #geom_label_repel(aes(label=sample_key),hjust=0, vjust=0) +
#' xlab(paste0("PC1: ",percentVar[1],"% variance")) +
#' ylab(paste0("PC2: ",percentVar[2],"% variance")) +
#' #coord_fixed() +
#' ggtitle(paste0("PCA of ",TISSUE," Gene Expression:\ny ~ sex + cohort")) +
#' guides(color=guide_legend(title="animal.key.anirandgroup")) +
#' scale_color_manual(values=ec_colors) +
#' theme(legend.title=element_blank())
#' dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods_s4.R
\name{generic_s4_code}
\alias{generic_s4_code}
\title{Generates the S4 generic definition}
\usage{
generic_s4_code(methods)
}
\arguments{
\item{methods}{the methods list. something like
`tensor_methods() %>% declaration_with_name("abs")`}
}
\description{
Generates the S4 generic definition
}
| /tools/torchgen/man/generic_s4_code.Rd | permissive | dfalbel/torch | R | false | true | 384 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods_s4.R
\name{generic_s4_code}
\alias{generic_s4_code}
\title{Generates the S4 generic definition}
\usage{
generic_s4_code(methods)
}
\arguments{
\item{methods}{the methods list. something like
`tensor_methods() %>% declaration_with_name("abs")`}
}
\description{
Generates the S4 generic definition
}
|
#' eq_map
#' @importFrom leaflet addCircleMarkers
#' @importFrom leaflet addTiles
#' @importFrom leaflet leaflet
#' @param data data
#' @param annot_col name of the HTML column
#' @return A leaflet map with earthquakes and annotations.
#' @examples
#' \dontrun{eq_map(eq,"name")}
#' @export
eq_map <- function(data, annot_col) {
leaflet::addCircleMarkers(
leaflet::addTiles(
leaflet::leaflet()),
lat = data$LATITUDE,
lng = data$LONGITUDE,
radius = data$EQ_PRIMARY,
weight = 1,
popup = data[[annot_col]])
}
#' eq_create_label
#' @param data A data frame containing cleaned NOAA earthquake data
#' @return HTML label string
#' @examples
#' \dontrun{eq_create_label(data)}
#' @export
eq_create_label <- function(data) {
paste0(
ifelse(
is.na(data$LOCATION_NAME),
"",
paste0(
"<b>Location: </b>",
data$LOCATION_NAME,
"<br>")),
ifelse(
is.na(data$EQ_PRIMARY),
"",
paste0(
"<b>Magnitude: <b>",
data$EQ_PRIMARY,
"<br>")),
ifelse(
is.na(data$TOTAL_DEATHS),
"",
paste0(
"<b>Total deaths: ",
data$TOTAL_DEATHS,
"<br>"))
)
}
| /R/eq_map.R | permissive | A0791298/CourseraCapstone | R | false | false | 1,196 | r | #' eq_map
#' @importFrom leaflet addCircleMarkers
#' @importFrom leaflet addTiles
#' @importFrom leaflet leaflet
#' @param data data
#' @param annot_col name of the HTML column
#' @return A leaflet map with earthquakes and annotations.
#' @examples
#' \dontrun{eq_map(eq,"name")}
#' @export
eq_map <- function(data, annot_col) {
leaflet::addCircleMarkers(
leaflet::addTiles(
leaflet::leaflet()),
lat = data$LATITUDE,
lng = data$LONGITUDE,
radius = data$EQ_PRIMARY,
weight = 1,
popup = data[[annot_col]])
}
#' eq_create_label
#' @param data A data frame containing cleaned NOAA earthquake data
#' @return HTML label string
#' @examples
#' \dontrun{eq_create_label(data)}
#' @export
eq_create_label <- function(data) {
paste0(
ifelse(
is.na(data$LOCATION_NAME),
"",
paste0(
"<b>Location: </b>",
data$LOCATION_NAME,
"<br>")),
ifelse(
is.na(data$EQ_PRIMARY),
"",
paste0(
"<b>Magnitude: <b>",
data$EQ_PRIMARY,
"<br>")),
ifelse(
is.na(data$TOTAL_DEATHS),
"",
paste0(
"<b>Total deaths: ",
data$TOTAL_DEATHS,
"<br>"))
)
}
|
local({
## Helper function to get the path to the library directory for a
## given packrat project.
getPackratLibDir <- function(projDir = NULL) {
path <- file.path("packrat", "lib", R.version$platform, getRversion())
if (!is.null(projDir)) {
## Strip trailing slashes if necessary
projDir <- sub("/+$", "", projDir)
## Only prepend path if different from current working dir
if (!identical(normalizePath(projDir), normalizePath(getwd())))
path <- file.path(projDir, path)
}
path
}
## Ensure that we set the packrat library directory relative to the
## project directory. Normally, this should be the working directory,
## but we also use '.rs.getProjectDirectory()' if necessary (e.g. we're
## rebuilding a project while within a separate directory)
libDir <- if (exists(".rs.getProjectDirectory"))
getPackratLibDir(.rs.getProjectDirectory())
else
getPackratLibDir()
## Unload packrat in case it's loaded -- this ensures packrat _must_ be
## loaded from the private library. Note that `requireNamespace` will
## succeed if the package is already loaded, regardless of lib.loc!
if ("packrat" %in% loadedNamespaces())
try(unloadNamespace("packrat"), silent = TRUE)
if (suppressWarnings(requireNamespace("packrat", quietly = TRUE, lib.loc = libDir))) {
# Check 'print.banner.on.startup' -- when NA and RStudio, don't print
print.banner <- packrat::get_opts("print.banner.on.startup")
if (print.banner == "auto" && is.na(Sys.getenv("RSTUDIO", unset = NA))) {
print.banner <- TRUE
} else {
print.banner <- FALSE
}
return(packrat::on(print.banner = print.banner))
}
## Escape hatch to allow RStudio to handle bootstrapping. This
## enables RStudio to provide print output when automagically
## restoring a project from a bundle on load.
if (!is.na(Sys.getenv("RSTUDIO", unset = NA)) &&
is.na(Sys.getenv("RSTUDIO_PACKRAT_BOOTSTRAP", unset = NA))) {
Sys.setenv("RSTUDIO_PACKRAT_BOOTSTRAP" = "1")
setHook("rstudio.sessionInit", function(...) {
# Ensure that, on sourcing 'packrat/init.R', we are
# within the project root directory
if (exists(".rs.getProjectDirectory")) {
owd <- getwd()
setwd(.rs.getProjectDirectory())
on.exit(setwd(owd), add = TRUE)
}
source("packrat/init.R")
})
return(invisible(NULL))
}
## Bootstrapping -- only performed in interactive contexts,
## or when explicitly asked for on the command line
if (interactive() || "--bootstrap-packrat" %in% commandArgs(TRUE)) {
needsRestore <- "--bootstrap-packrat" %in% commandArgs(TRUE)
message("Packrat is not installed in the local library -- ",
"attempting to bootstrap an installation...")
## We need utils for the following to succeed -- there are calls to functions
## in 'restore' that are contained within utils. utils gets loaded at the
## end of start-up anyhow, so this should be fine
library("utils", character.only = TRUE)
## Install packrat into local project library
packratSrcPath <- list.files(full.names = TRUE,
file.path("packrat", "src", "packrat")
)
## No packrat tarballs available locally -- try some other means of installation
if (!length(packratSrcPath)) {
message("> No source tarball of packrat available locally")
## There are no packrat sources available -- try using a version of
## packrat installed in the user library to bootstrap
if (requireNamespace("packrat", quietly = TRUE) && packageVersion("packrat") >= "0.2.0.99") {
message("> Using user-library packrat (",
packageVersion("packrat"),
") to bootstrap this project")
}
## Couldn't find a user-local packrat -- try finding and using devtools
## to install
else if (requireNamespace("devtools", quietly = TRUE)) {
message("> Attempting to use devtools::install_github to install ",
"a temporary version of packrat")
library(stats) ## for setNames
devtools::install_github("rstudio/packrat")
}
## Try downloading packrat from CRAN if available
else if ("packrat" %in% rownames(available.packages())) {
message("> Installing packrat from CRAN")
install.packages("packrat")
}
## Fail -- couldn't find an appropriate means of installing packrat
else {
stop("Could not automatically bootstrap packrat -- try running ",
"\"'install.packages('devtools'); devtools::install_github('rstudio/packrat')\"",
"and restarting R to bootstrap packrat.")
}
# Restore the project, unload the temporary packrat, and load the private packrat
if (needsRestore)
packrat::restore(prompt = FALSE, restart = TRUE)
## This code path only reached if we didn't restart earlier
unloadNamespace("packrat")
requireNamespace("packrat", lib.loc = libDir, quietly = TRUE)
return(packrat::on())
}
## Multiple packrat tarballs available locally -- try to choose one
## TODO: read lock file and infer most appropriate from there; low priority because
## after bootstrapping packrat a restore should do the right thing
if (length(packratSrcPath) > 1) {
warning("Multiple versions of packrat available in the source directory;",
"using packrat source:\n- ", shQuote(packratSrcPath))
packratSrcPath <- packratSrcPath[[1]]
}
lib <- file.path("packrat", "lib", R.version$platform, getRversion())
if (!file.exists(lib)) {
dir.create(lib, recursive = TRUE)
}
lib <- normalizePath(lib, winslash = "/")
message("> Installing packrat into project private library:")
message("- ", shQuote(lib))
surround <- function(x, with) {
if (!length(x)) return(character())
paste0(with, x, with)
}
## The following is performed because a regular install.packages call can fail
peq <- function(x, y) paste(x, y, sep = " = ")
installArgs <- c(
peq("pkgs", surround(packratSrcPath, with = "'")),
peq("lib", surround(lib, with = "'")),
peq("repos", "NULL"),
peq("type", surround("source", with = "'"))
)
installCmd <- paste(sep = "",
"utils::install.packages(",
paste(installArgs, collapse = ", "),
")")
fullCmd <- paste(
surround(file.path(R.home("bin"), "R"), with = "\""),
"--vanilla",
"--slave",
"-e",
surround(installCmd, with = "\"")
)
system(fullCmd)
## Tag the installed packrat so we know it's managed by packrat
## TODO: should this be taking information from the lockfile? this is a bit awkward
## because we're taking an un-annotated packrat source tarball and simply assuming it's now
## an 'installed from source' version
## -- InstallAgent -- ##
installAgent <- 'InstallAgent: packrat 0.4.8-31'
## -- InstallSource -- ##
installSource <- 'InstallSource: source'
packratDescPath <- file.path(lib, "packrat", "DESCRIPTION")
DESCRIPTION <- readLines(packratDescPath)
DESCRIPTION <- c(DESCRIPTION, installAgent, installSource)
cat(DESCRIPTION, file = packratDescPath, sep = "\n")
# Otherwise, continue on as normal
message("> Attaching packrat")
library("packrat", character.only = TRUE, lib.loc = lib)
message("> Restoring library")
if (needsRestore)
packrat::restore(prompt = FALSE, restart = FALSE)
# If the environment allows us to restart, do so with a call to restore
restart <- getOption("restart")
if (!is.null(restart)) {
message("> Packrat bootstrap successfully completed. ",
"Restarting R and entering packrat mode...")
return(restart())
}
# Callers (source-erers) can define this hidden variable to make sure we don't enter packrat mode
# Primarily useful for testing
if (!exists(".__DONT_ENTER_PACKRAT_MODE__.") && interactive()) {
message("> Packrat bootstrap successfully completed. Entering packrat mode...")
packrat::on()
}
Sys.unsetenv("RSTUDIO_PACKRAT_BOOTSTRAP")
}
})
| /packrat/init.R | no_license | allanbeto/Exploratory_Data_Analysis_in_R_Case_Study | R | false | false | 8,306 | r | local({
## Helper function to get the path to the library directory for a
## given packrat project.
getPackratLibDir <- function(projDir = NULL) {
path <- file.path("packrat", "lib", R.version$platform, getRversion())
if (!is.null(projDir)) {
## Strip trailing slashes if necessary
projDir <- sub("/+$", "", projDir)
## Only prepend path if different from current working dir
if (!identical(normalizePath(projDir), normalizePath(getwd())))
path <- file.path(projDir, path)
}
path
}
## Ensure that we set the packrat library directory relative to the
## project directory. Normally, this should be the working directory,
## but we also use '.rs.getProjectDirectory()' if necessary (e.g. we're
## rebuilding a project while within a separate directory)
libDir <- if (exists(".rs.getProjectDirectory"))
getPackratLibDir(.rs.getProjectDirectory())
else
getPackratLibDir()
## Unload packrat in case it's loaded -- this ensures packrat _must_ be
## loaded from the private library. Note that `requireNamespace` will
## succeed if the package is already loaded, regardless of lib.loc!
if ("packrat" %in% loadedNamespaces())
try(unloadNamespace("packrat"), silent = TRUE)
if (suppressWarnings(requireNamespace("packrat", quietly = TRUE, lib.loc = libDir))) {
# Check 'print.banner.on.startup' -- when NA and RStudio, don't print
print.banner <- packrat::get_opts("print.banner.on.startup")
if (print.banner == "auto" && is.na(Sys.getenv("RSTUDIO", unset = NA))) {
print.banner <- TRUE
} else {
print.banner <- FALSE
}
return(packrat::on(print.banner = print.banner))
}
## Escape hatch to allow RStudio to handle bootstrapping. This
## enables RStudio to provide print output when automagically
## restoring a project from a bundle on load.
if (!is.na(Sys.getenv("RSTUDIO", unset = NA)) &&
is.na(Sys.getenv("RSTUDIO_PACKRAT_BOOTSTRAP", unset = NA))) {
Sys.setenv("RSTUDIO_PACKRAT_BOOTSTRAP" = "1")
setHook("rstudio.sessionInit", function(...) {
# Ensure that, on sourcing 'packrat/init.R', we are
# within the project root directory
if (exists(".rs.getProjectDirectory")) {
owd <- getwd()
setwd(.rs.getProjectDirectory())
on.exit(setwd(owd), add = TRUE)
}
source("packrat/init.R")
})
return(invisible(NULL))
}
## Bootstrapping -- only performed in interactive contexts,
## or when explicitly asked for on the command line
if (interactive() || "--bootstrap-packrat" %in% commandArgs(TRUE)) {
needsRestore <- "--bootstrap-packrat" %in% commandArgs(TRUE)
message("Packrat is not installed in the local library -- ",
"attempting to bootstrap an installation...")
## We need utils for the following to succeed -- there are calls to functions
## in 'restore' that are contained within utils. utils gets loaded at the
## end of start-up anyhow, so this should be fine
library("utils", character.only = TRUE)
## Install packrat into local project library
packratSrcPath <- list.files(full.names = TRUE,
file.path("packrat", "src", "packrat")
)
## No packrat tarballs available locally -- try some other means of installation
if (!length(packratSrcPath)) {
message("> No source tarball of packrat available locally")
## There are no packrat sources available -- try using a version of
## packrat installed in the user library to bootstrap
if (requireNamespace("packrat", quietly = TRUE) && packageVersion("packrat") >= "0.2.0.99") {
message("> Using user-library packrat (",
packageVersion("packrat"),
") to bootstrap this project")
}
## Couldn't find a user-local packrat -- try finding and using devtools
## to install
else if (requireNamespace("devtools", quietly = TRUE)) {
message("> Attempting to use devtools::install_github to install ",
"a temporary version of packrat")
library(stats) ## for setNames
devtools::install_github("rstudio/packrat")
}
## Try downloading packrat from CRAN if available
else if ("packrat" %in% rownames(available.packages())) {
message("> Installing packrat from CRAN")
install.packages("packrat")
}
## Fail -- couldn't find an appropriate means of installing packrat
else {
stop("Could not automatically bootstrap packrat -- try running ",
"\"'install.packages('devtools'); devtools::install_github('rstudio/packrat')\"",
"and restarting R to bootstrap packrat.")
}
# Restore the project, unload the temporary packrat, and load the private packrat
if (needsRestore)
packrat::restore(prompt = FALSE, restart = TRUE)
## This code path only reached if we didn't restart earlier
unloadNamespace("packrat")
requireNamespace("packrat", lib.loc = libDir, quietly = TRUE)
return(packrat::on())
}
## Multiple packrat tarballs available locally -- try to choose one
## TODO: read lock file and infer most appropriate from there; low priority because
## after bootstrapping packrat a restore should do the right thing
if (length(packratSrcPath) > 1) {
warning("Multiple versions of packrat available in the source directory;",
"using packrat source:\n- ", shQuote(packratSrcPath))
packratSrcPath <- packratSrcPath[[1]]
}
lib <- file.path("packrat", "lib", R.version$platform, getRversion())
if (!file.exists(lib)) {
dir.create(lib, recursive = TRUE)
}
lib <- normalizePath(lib, winslash = "/")
message("> Installing packrat into project private library:")
message("- ", shQuote(lib))
surround <- function(x, with) {
if (!length(x)) return(character())
paste0(with, x, with)
}
## The following is performed because a regular install.packages call can fail
peq <- function(x, y) paste(x, y, sep = " = ")
installArgs <- c(
peq("pkgs", surround(packratSrcPath, with = "'")),
peq("lib", surround(lib, with = "'")),
peq("repos", "NULL"),
peq("type", surround("source", with = "'"))
)
installCmd <- paste(sep = "",
"utils::install.packages(",
paste(installArgs, collapse = ", "),
")")
fullCmd <- paste(
surround(file.path(R.home("bin"), "R"), with = "\""),
"--vanilla",
"--slave",
"-e",
surround(installCmd, with = "\"")
)
system(fullCmd)
## Tag the installed packrat so we know it's managed by packrat
## TODO: should this be taking information from the lockfile? this is a bit awkward
## because we're taking an un-annotated packrat source tarball and simply assuming it's now
## an 'installed from source' version
## -- InstallAgent -- ##
installAgent <- 'InstallAgent: packrat 0.4.8-31'
## -- InstallSource -- ##
installSource <- 'InstallSource: source'
packratDescPath <- file.path(lib, "packrat", "DESCRIPTION")
DESCRIPTION <- readLines(packratDescPath)
DESCRIPTION <- c(DESCRIPTION, installAgent, installSource)
cat(DESCRIPTION, file = packratDescPath, sep = "\n")
# Otherwise, continue on as normal
message("> Attaching packrat")
library("packrat", character.only = TRUE, lib.loc = lib)
message("> Restoring library")
if (needsRestore)
packrat::restore(prompt = FALSE, restart = FALSE)
# If the environment allows us to restart, do so with a call to restore
restart <- getOption("restart")
if (!is.null(restart)) {
message("> Packrat bootstrap successfully completed. ",
"Restarting R and entering packrat mode...")
return(restart())
}
# Callers (source-erers) can define this hidden variable to make sure we don't enter packrat mode
# Primarily useful for testing
if (!exists(".__DONT_ENTER_PACKRAT_MODE__.") && interactive()) {
message("> Packrat bootstrap successfully completed. Entering packrat mode...")
packrat::on()
}
Sys.unsetenv("RSTUDIO_PACKRAT_BOOTSTRAP")
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_functions.R
\name{files.update}
\alias{files.update}
\title{Updates file metadata and/or content.}
\usage{
files.update(File, fileId, addParents = NULL, convert = NULL,
modifiedDateBehavior = NULL, newRevision = NULL, ocr = NULL,
ocrLanguage = NULL, pinned = NULL, removeParents = NULL,
setModifiedDate = NULL, timedTextLanguage = NULL,
timedTextTrackName = NULL, updateViewedDate = NULL,
useContentAsIndexableText = NULL)
}
\arguments{
\item{File}{The \link{File} object to pass to this method}
\item{fileId}{The ID of the file to update}
\item{addParents}{Comma-separated list of parent IDs to add}
\item{convert}{This parameter is deprecated and has no function}
\item{modifiedDateBehavior}{Determines the behavior in which modifiedDate is updated}
\item{newRevision}{Whether a blob upload should create a new revision}
\item{ocr}{Whether to attempt OCR on}
\item{ocrLanguage}{If ocr is true, hints at the language to use}
\item{pinned}{Whether to pin the new revision}
\item{removeParents}{Comma-separated list of parent IDs to remove}
\item{setModifiedDate}{Whether to set the modified date with the supplied modified date}
\item{timedTextLanguage}{The language of the timed text}
\item{timedTextTrackName}{The timed text track name}
\item{updateViewedDate}{Whether to update the view date after successfully updating the file}
\item{useContentAsIndexableText}{Whether to use the content as indexable text}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/drive
\item https://www.googleapis.com/auth/drive.appdata
\item https://www.googleapis.com/auth/drive.apps.readonly
\item https://www.googleapis.com/auth/drive.file
\item https://www.googleapis.com/auth/drive.metadata
\item https://www.googleapis.com/auth/drive.scripts
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/drive, https://www.googleapis.com/auth/drive.appdata, https://www.googleapis.com/auth/drive.apps.readonly, https://www.googleapis.com/auth/drive.file, https://www.googleapis.com/auth/drive.metadata, https://www.googleapis.com/auth/drive.scripts)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/drive/}{Google Documentation}
Other File functions: \code{\link{File.exportLinks}},
\code{\link{File.imageMediaMetadata.location}},
\code{\link{File.imageMediaMetadata}},
\code{\link{File.indexableText}},
\code{\link{File.labels}},
\code{\link{File.openWithLinks}},
\code{\link{File.thumbnail}},
\code{\link{File.videoMediaMetadata}},
\code{\link{File}}, \code{\link{files.copy}},
\code{\link{files.insert}}, \code{\link{files.patch}}
}
| /googledrivev2.auto/man/files.update.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 2,933 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_functions.R
\name{files.update}
\alias{files.update}
\title{Updates file metadata and/or content.}
\usage{
files.update(File, fileId, addParents = NULL, convert = NULL,
modifiedDateBehavior = NULL, newRevision = NULL, ocr = NULL,
ocrLanguage = NULL, pinned = NULL, removeParents = NULL,
setModifiedDate = NULL, timedTextLanguage = NULL,
timedTextTrackName = NULL, updateViewedDate = NULL,
useContentAsIndexableText = NULL)
}
\arguments{
\item{File}{The \link{File} object to pass to this method}
\item{fileId}{The ID of the file to update}
\item{addParents}{Comma-separated list of parent IDs to add}
\item{convert}{This parameter is deprecated and has no function}
\item{modifiedDateBehavior}{Determines the behavior in which modifiedDate is updated}
\item{newRevision}{Whether a blob upload should create a new revision}
\item{ocr}{Whether to attempt OCR on}
\item{ocrLanguage}{If ocr is true, hints at the language to use}
\item{pinned}{Whether to pin the new revision}
\item{removeParents}{Comma-separated list of parent IDs to remove}
\item{setModifiedDate}{Whether to set the modified date with the supplied modified date}
\item{timedTextLanguage}{The language of the timed text}
\item{timedTextTrackName}{The timed text track name}
\item{updateViewedDate}{Whether to update the view date after successfully updating the file}
\item{useContentAsIndexableText}{Whether to use the content as indexable text}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/drive
\item https://www.googleapis.com/auth/drive.appdata
\item https://www.googleapis.com/auth/drive.apps.readonly
\item https://www.googleapis.com/auth/drive.file
\item https://www.googleapis.com/auth/drive.metadata
\item https://www.googleapis.com/auth/drive.scripts
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/drive, https://www.googleapis.com/auth/drive.appdata, https://www.googleapis.com/auth/drive.apps.readonly, https://www.googleapis.com/auth/drive.file, https://www.googleapis.com/auth/drive.metadata, https://www.googleapis.com/auth/drive.scripts)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/drive/}{Google Documentation}
Other File functions: \code{\link{File.exportLinks}},
\code{\link{File.imageMediaMetadata.location}},
\code{\link{File.imageMediaMetadata}},
\code{\link{File.indexableText}},
\code{\link{File.labels}},
\code{\link{File.openWithLinks}},
\code{\link{File.thumbnail}},
\code{\link{File.videoMediaMetadata}},
\code{\link{File}}, \code{\link{files.copy}},
\code{\link{files.insert}}, \code{\link{files.patch}}
}
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.2914713777335e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609867675-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 830 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.85353502606261e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.2914713777335e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
## code to fix mis-matches between man_fsetid columns in what will become featureSet and pmfeature tables
fixManFsetid <- function(prbInfo, prb.table){
tmp <- prb.table[,c("man_fsetid","fsetid")]
tmp <- tmp[!duplicated(tmp$man_fsetid),]
if(!all(prbInfo$man_fsetid %in% tmp$man_fsetid)){
ind <- prbInfo$man_fsetid %in% tmp$fsetid
matcher <- match(prbInfo$man_fsetid[ind], tmp$fsetid)
prbInfo$man_fsetid[ind] <- tmp[matcher,"man_fsetid"]
}
if(nrow(tmp) > nrow(prbInfo)){
stillMiss <- tmp[!tmp$man_fsetid %in% prbInfo$man_fsetid,]
toadd <- data.frame(stillMiss$man_fsetid, matrix(NA, nrow(stillMiss), ncol(prbInfo)-1))
names(toadd) <- names(prbInfo)
prbInfo <- rbind(prbInfo, toadd)
}
if(nrow(tmp) < nrow(prbInfo)) stop("The pgf file has fewer probesets than the probeset csv file.", call. = FALSE)
prbInfo
}
## tables
htaMpsSchema <- list(col2type=c(
meta_fsetid="TEXT",
transcript_cluster_id="TEXT",
fsetid="INTEGER"
),
col2key=c(
fsetid="REFERENCES featureSet(fsetid)"
))
htaFeatureSetSchema <- list(col2type=c(
fsetid="INTEGER",
man_fsetid="TEXT",
strand="INTEGER",
start="INTEGER",
stop="INTEGER",
transcript_cluster_id="INTEGER",
exon_id="INTEGER",
crosshyb_type="INTEGER",
level="INTEGER",
junction_start_edge="INTEGER",
junction_stop_edge="INTEGER",
junction_sequence="TEXT",
has_cds="INTEGER",
chrom="INTEGER",
type="INTEGER"),
col2key=c(
fsetid="PRIMARY KEY",
chrom="REFERENCES chrom_dict(chrom_id)",
level="REFERENCES level_dict(level_id)",
type ="REFERENCES type_dict(type_id)"
))
###
parseHtaProbesetCSV <- function(probeFile, verbose=TRUE){
## Variables
SENSE <- as.integer(0)
ANTISENSE <- as.integer(1)
###################################################
## TABLES TO ADD
###################################################
if (verbose) simpleMessage("Creating dictionaries... ")
## chromosome dictionary moved to after reading
## the CSV file
## level_schema
level_schema <- getLevelSchema()
## type_schema
## type_schema <- getTypeSchema()
## do this on the fly, below
if (verbose) msgOK()
## the "probesets" df is to be the featureSet table
if (verbose) msgParsingFile(probeFile)
probesets <- read.csv(probeFile, comment.char="#",
stringsAsFactors=FALSE, na.strings="---")
if (verbose) msgOK()
## added: "junction_start_edge", "junction_stop_edge",
## "junction_sequence", "has_cds"
cols <- c("probeset_id", "seqname", "strand", "start", "stop",
"transcript_cluster_id", "exon_id",
"crosshyb_type", "level", "probeset_type",
"junction_start_edge", "junction_stop_edge",
"junction_sequence", "has_cds")
probesets <- probesets[, cols]
cols[1] <- "man_fsetid"
names(probesets) <- cols
rm(cols)
## probesets$fsetid <- as.integer(factor(probesets$man_fsetid))
## type_schema
type_schema <- getTypeSchema(probesets)
chromosome_schema <- createChrDict(probesets[["seqname"]])
probesets[["chrom"]] <- match(probesets[["seqname"]], chromosome_schema[["chrom_id"]])
probesets[["seqname"]] <- NULL
probesets[["strand"]] <- ifelse(probesets[["strand"]] == "-", ANTISENSE, SENSE)
probesets[["level"]] <- match(tolower(probesets[["level"]]), level_schema[["level_id"]])
probesets[["type"]] <- match(probesets[["probeset_type"]], type_schema[["type_id"]])
probesets[["probeset_type"]] <- NULL
list(probesets=probesets, level=level_schema, chromosome=chromosome_schema, type=type_schema)
}
combinePgfClfProbesetsMpsHTA <- function(pgfFile, clfFile, probeFile,
coreMps, verbose=TRUE){
tmp <- parsePgfClf(pgfFile=pgfFile, clfFile=clfFile, verbose=verbose)
probes.table <- tmp[["probes.table"]]
geom <- tmp[["geometry"]]
rm(tmp)
probesetInfo <- parseHtaProbesetCSV(probeFile, verbose=verbose)
## levels table
## id
## desc
level_dict <- probesetInfo[["level"]]
## chromosome table
## id
## chrom_id
chrom_dict <- probesetInfo[["chromosome"]]
## types table
## id
## type_id
type_dict <- probesetInfo[["type"]]
## featureSet table - Fields
## probeset_id
## strand
## start
## stop
## transcript_cluster_id
## exon_id
## crosshyb_type
## level
## junction_start_edge
## junction_stop_edge
## junction_sequence
## chrom
## type
## Starting with the MTA 2.0 array, there are (811) PSR and JUC probesets
## that are part of the design, but because they are targeting regions that
## are too repetitive to synthesize a probe, or too short for a probe (25-mer) to fit
## they don't actually have these probesets on the array. But they kept them in the
## design, so they show up in the csv files. This causes problems when we create the featureSet
## data.frame, because we are doing a merge with all = TRUE, so we get NA values for the
## fsetids that don't exist, but for which we do have man_fsetids. These NA values
## are problematic when we insert into the database, as the fsetids have to be unique.
## So we remove them here.
## As of the na35 build of probeset csv files, Affy is mixing fsetid and man_fsetid
## IDs in the probeset csv file (at least for the HTA arrays), so we have to account for that
probesetInfo$probesets <- fixManFsetid(probesetInfo$probesets, probes.table)
featureSet <- merge(probesetInfo[["probesets"]],
unique(probes.table[, c('man_fsetid', 'fsetid')]),
by='man_fsetid', all=TRUE)
## pmfeature table - Fields
## fid
## fsetid
## chr (NA)
## location (NA)
## x
## y
## IMPORTANT:
## ignoring strand
## keeping atom to match with MM's
pmFeatures <- subset(probes.table,
substr(probes.table[["ptype"]], 1, 2) == "pm",
select=c("fid", "fsetid", "atom", "x", "y", "sequence"))
pmSequence <- pmFeatures[, c("fid", "sequence")]
pmFeatures[["sequence"]] <- NULL
pmSequence <- pmSequence[order(pmSequence[["fid"]]),]
pmSequence <- DataFrame(fid=pmSequence[["fid"]],
sequence=DNAStringSet(pmSequence[["sequence"]]))
## mmfeature table - Fields
## fid
## fid of matching pm
## x
## y
## IMPORTANT:
## ignoring strand
## keeping atom to match with MM's
## ADD sequence for MM
mmFeatures <- subset(probes.table, substr(probes.table$ptype, 1, 2) =="mm",
select=c("fid", "fsetid", "atom", "x", "y", "sequence"))
if (nrow(mmFeatures) > 0){
mmSequence <- mmFeatures[, c("fid", "sequence")]
mmFeatures[["sequence"]] <- NULL
mmSequence <- mmSequence[order(mmSequence[["fid"]]),]
mmSequence <- DataFrame(fid=mmSequence[["fid"]],
sequence=DNAStringSet(mmSequence[["sequence"]]))
}else{
mmFeatures <- data.frame()
mmSequence <- data.frame()
}
## IMPORTANT: for the moment, bgfeature will contain everything (that is PM) but 'main'
## bgfeature table - Fields
## fid
## x
## y
## fs_type: featureSet type: genomic/antigenomic
## f_type: pm/mm at/st
## old code:
## subset using cols
## cols <- c("fid", "fsetid", "pstype", "ptype", "x", "y", "sequence")
rm(probes.table)
core <- mpsParser(coreMps, verbose=verbose)
## Here we should have the following tables available:
## featureSet: fsetid, type
## pmfeature: fid, fsetid, atom, x, y
## bgfeature: fid, fsetid, fs_type, f_type, x, y - NOT ANYMORE
## pmSequence: fid, sequence
## bgSequence: fid, sequence - NOT ANYMORE
## core, extended, full: meta_fsetid, trancript_cluster_id, fsetid
## mmfeatures/mmSequence
out <- list(featureSet=featureSet, pmFeatures=pmFeatures,
mmFeatures=mmFeatures, geometry=geom,
pmSequence=pmSequence, mmSequence=mmSequence,
chrom_dict=chrom_dict, level_dict=level_dict,
type_dict=type_dict, core=core)
return(out)
}
#######################################################################
## SECTION D - Package Maker
## This shouldn't be extremely hard.
## The idea is to: i) get array info (name, pkgname, dbname)
## ii) parse data; iii) create pkg from template;
## iv) dump the database
#######################################################################
setMethod("makePdInfoPackage", "AffyHTAPDInfoPkgSeed",
function(object, destDir=".", batch_size=10000, quiet=FALSE, unlink=FALSE) {
msgBar()
message("Building annotation package for Affymetrix HTA Array")
message("PGF.........: ", basename(object@pgfFile))
message("CLF.........: ", basename(object@clfFile))
message("Probeset....: ", basename(object@probeFile))
message("Transcript..: ", basename(object@transFile))
message("Core MPS....: ", basename(object@coreMps))
msgBar()
#######################################################################
## Part i) get array info (chipName, pkgName, dbname)
#######################################################################
chip <- chipName(object)
pkgName <- cleanPlatformName(chip)
extdataDir <- file.path(destDir, pkgName, "inst", "extdata")
dbFileName <- paste(pkgName, "sqlite", sep=".")
dbFilePath <- file.path(extdataDir, dbFileName)
#######################################################################
## Part ii) parse data. This should return a list of data.frames.
## The names of the elements in the list are table names.
#######################################################################
parsedData <- combinePgfClfProbesetsMpsHTA(object@pgfFile,
object@clfFile,
object@probeFile,
object@coreMps,
verbose=!quiet)
#######################################################################
## Part iii) Create package from template
#######################################################################
pdInfoClass <- "AffyHTAPDInfo"
syms <- list(MANUF=object@manufacturer,
VERSION=object@version,
GENOMEBUILD=object@genomebuild,
AUTHOR=object@author,
AUTHOREMAIL=object@email,
LIC=object@license,
DBFILE=dbFileName,
CHIPNAME=chip,
PKGNAME=pkgName,
PDINFONAME=pkgName,
PDINFOCLASS=pdInfoClass,
GEOMETRY=parsedData[["geometry"]])
templateDir <- system.file("pd.PKG.template",
package="pdInfoBuilder")
createPackage(pkgname=pkgName, destinationDir=destDir,
originDir=templateDir, symbolValues=syms,
quiet=quiet)
dir.create(extdataDir, recursive=TRUE)
#######################################################################
## Part iv) Create SQLite database
## FIX ME: Fix ordering of the tables
#######################################################################
conn <- dbConnect(dbDriver("SQLite"), dbname=dbFilePath)
increaseDbPerformance(conn)
## Adding new tables
dbCreateTable(conn,
"chrom_dict",
chromDictTable[["col2type"]],
chromDictTable[["col2key"]])
dbCreateTable(conn,
"level_dict",
levelDictTable[["col2type"]],
levelDictTable[["col2key"]])
dbCreateTable(conn,
"type_dict",
typeDictTable[["col2type"]],
typeDictTable[["col2key"]])
dbCreateTable(conn,
"core_mps",
htaMpsSchema[["col2type"]],
htaMpsSchema[["col2key"]])
## end adding
dbCreateTable(conn,
"featureSet",
htaFeatureSetSchema[["col2type"]],
htaFeatureSetSchema[["col2key"]])
dbCreateTable(conn, "pmfeature",
genePmFeatureSchema[["col2type"]],
genePmFeatureSchema[["col2key"]])
containsMm <- nrow(parsedData[["mmFeatures"]]) > 0
if (containsMm)
dbCreateTable(conn,
"mmfeature",
exonTranscriptionMmFeatureSchema[["col2type"]],
exonTranscriptionMmFeatureSchema[["col2key"]])
## Inserting data in new tables
dbInsertDataFrame(conn, "chrom_dict", parsedData[["chrom_dict"]],
chromDictTable[["col2type"]], !quiet)
dbInsertDataFrame(conn, "level_dict", parsedData[["level_dict"]],
levelDictTable[["col2type"]], !quiet)
dbInsertDataFrame(conn, "type_dict", parsedData[["type_dict"]],
typeDictTable[["col2type"]], !quiet)
dbInsertDataFrame(conn, "core_mps", parsedData[["core"]],
mpsSchema[["col2type"]], !quiet)
## end inserting
dbInsertDataFrame(conn, "featureSet", parsedData[["featureSet"]],
htaFeatureSetSchema[["col2type"]], !quiet)
dbInsertDataFrame(conn, "pmfeature", parsedData[["pmFeatures"]],
genePmFeatureSchema[["col2type"]], !quiet)
if (containsMm)
dbInsertDataFrame(conn, "mmfeature", parsedData[["mmFeatures"]],
exonTranscriptionMmFeatureSchema[["col2type"]], !quiet)
dbCreateTableInfo(conn, !quiet)
## Create indices
dbCreateIndex(conn, "idx_pmfsetid", "pmfeature", "fsetid", FALSE, verbose=!quiet)
dbCreateIndex(conn, "idx_pmfid", "pmfeature", "fid", FALSE, verbose=!quiet)
dbCreateIndicesFs(conn, !quiet)
dbCreateIndex(conn, "idx_core_meta_fsetid", "core_mps", "meta_fsetid", FALSE, verbose=!quiet)
dbCreateIndex(conn, "idx_core_fsetid", "core_mps", "fsetid", FALSE, verbose=!quiet)
if (containsMm){
dbCreateIndex(conn, "idx_mmfsetid", "mmfeature", "fsetid", FALSE, verbose=!quiet)
dbCreateIndex(conn, "idx_mmfid", "mmfeature", "fid", FALSE, verbose=!quiet)
}
dbGetQuery(conn, "VACUUM")
dbDisconnect(conn)
#######################################################################
## Part v) Save sequence DataFrames
## FIX ME: Fix ordering of the tables to match xxFeature tables
#######################################################################
datadir <- file.path(destDir, pkgName, "data")
dir.create(datadir)
pmSequence <- parsedData[["pmSequence"]]
pmSeqFile <- file.path(datadir, "pmSequence.rda")
if (!quiet) message("Saving DataFrame object for PM.")
save(pmSequence, file=pmSeqFile, compress='xz')
if (containsMm){
mmSequence <- parsedData[["mmSequence"]]
mmSeqFile <- file.path(datadir, "mmSequence.rda")
if (!quiet) message("Saving DataFrame object for MM.")
save(mmSequence, file=mmSeqFile, compress='xz')
}
#######################################################################
## Part vi) Save NetAffx Annotation to extdata
#######################################################################
if (!quiet) message("Saving NetAffx Annotation... ", appendLF=FALSE)
netaffxProbeset <- annot2fdata(object@probeFile)
save(netaffxProbeset, file=file.path(extdataDir,
'netaffxProbeset.rda'), compress='xz')
netaffxTranscript <- annot2fdata(object@transFile)
save(netaffxTranscript, file=file.path(extdataDir,
'netaffxTranscript.rda'), compress='xz')
if (!quiet) msgOK()
if (!quiet) message("Done.")
})
| /R/pdBuilderV2HTA2.R | no_license | benilton/pdInfoBuilder | R | false | false | 17,989 | r | ## code to fix mis-matches between man_fsetid columns in what will become featureSet and pmfeature tables
fixManFsetid <- function(prbInfo, prb.table){
tmp <- prb.table[,c("man_fsetid","fsetid")]
tmp <- tmp[!duplicated(tmp$man_fsetid),]
if(!all(prbInfo$man_fsetid %in% tmp$man_fsetid)){
ind <- prbInfo$man_fsetid %in% tmp$fsetid
matcher <- match(prbInfo$man_fsetid[ind], tmp$fsetid)
prbInfo$man_fsetid[ind] <- tmp[matcher,"man_fsetid"]
}
if(nrow(tmp) > nrow(prbInfo)){
stillMiss <- tmp[!tmp$man_fsetid %in% prbInfo$man_fsetid,]
toadd <- data.frame(stillMiss$man_fsetid, matrix(NA, nrow(stillMiss), ncol(prbInfo)-1))
names(toadd) <- names(prbInfo)
prbInfo <- rbind(prbInfo, toadd)
}
if(nrow(tmp) < nrow(prbInfo)) stop("The pgf file has fewer probesets than the probeset csv file.", call. = FALSE)
prbInfo
}
## tables
htaMpsSchema <- list(col2type=c(
meta_fsetid="TEXT",
transcript_cluster_id="TEXT",
fsetid="INTEGER"
),
col2key=c(
fsetid="REFERENCES featureSet(fsetid)"
))
htaFeatureSetSchema <- list(col2type=c(
fsetid="INTEGER",
man_fsetid="TEXT",
strand="INTEGER",
start="INTEGER",
stop="INTEGER",
transcript_cluster_id="INTEGER",
exon_id="INTEGER",
crosshyb_type="INTEGER",
level="INTEGER",
junction_start_edge="INTEGER",
junction_stop_edge="INTEGER",
junction_sequence="TEXT",
has_cds="INTEGER",
chrom="INTEGER",
type="INTEGER"),
col2key=c(
fsetid="PRIMARY KEY",
chrom="REFERENCES chrom_dict(chrom_id)",
level="REFERENCES level_dict(level_id)",
type ="REFERENCES type_dict(type_id)"
))
###
parseHtaProbesetCSV <- function(probeFile, verbose=TRUE){
## Variables
SENSE <- as.integer(0)
ANTISENSE <- as.integer(1)
###################################################
## TABLES TO ADD
###################################################
if (verbose) simpleMessage("Creating dictionaries... ")
## chromosome dictionary moved to after reading
## the CSV file
## level_schema
level_schema <- getLevelSchema()
## type_schema
## type_schema <- getTypeSchema()
## do this on the fly, below
if (verbose) msgOK()
## the "probesets" df is to be the featureSet table
if (verbose) msgParsingFile(probeFile)
probesets <- read.csv(probeFile, comment.char="#",
stringsAsFactors=FALSE, na.strings="---")
if (verbose) msgOK()
## added: "junction_start_edge", "junction_stop_edge",
## "junction_sequence", "has_cds"
cols <- c("probeset_id", "seqname", "strand", "start", "stop",
"transcript_cluster_id", "exon_id",
"crosshyb_type", "level", "probeset_type",
"junction_start_edge", "junction_stop_edge",
"junction_sequence", "has_cds")
probesets <- probesets[, cols]
cols[1] <- "man_fsetid"
names(probesets) <- cols
rm(cols)
## probesets$fsetid <- as.integer(factor(probesets$man_fsetid))
## type_schema
type_schema <- getTypeSchema(probesets)
chromosome_schema <- createChrDict(probesets[["seqname"]])
probesets[["chrom"]] <- match(probesets[["seqname"]], chromosome_schema[["chrom_id"]])
probesets[["seqname"]] <- NULL
probesets[["strand"]] <- ifelse(probesets[["strand"]] == "-", ANTISENSE, SENSE)
probesets[["level"]] <- match(tolower(probesets[["level"]]), level_schema[["level_id"]])
probesets[["type"]] <- match(probesets[["probeset_type"]], type_schema[["type_id"]])
probesets[["probeset_type"]] <- NULL
list(probesets=probesets, level=level_schema, chromosome=chromosome_schema, type=type_schema)
}
combinePgfClfProbesetsMpsHTA <- function(pgfFile, clfFile, probeFile,
coreMps, verbose=TRUE){
tmp <- parsePgfClf(pgfFile=pgfFile, clfFile=clfFile, verbose=verbose)
probes.table <- tmp[["probes.table"]]
geom <- tmp[["geometry"]]
rm(tmp)
probesetInfo <- parseHtaProbesetCSV(probeFile, verbose=verbose)
## levels table
## id
## desc
level_dict <- probesetInfo[["level"]]
## chromosome table
## id
## chrom_id
chrom_dict <- probesetInfo[["chromosome"]]
## types table
## id
## type_id
type_dict <- probesetInfo[["type"]]
## featureSet table - Fields
## probeset_id
## strand
## start
## stop
## transcript_cluster_id
## exon_id
## crosshyb_type
## level
## junction_start_edge
## junction_stop_edge
## junction_sequence
## chrom
## type
## Starting with the MTA 2.0 array, there are (811) PSR and JUC probesets
## that are part of the design, but because they are targeting regions that
## are too repetitive to synthesize a probe, or too short for a probe (25-mer) to fit
## they don't actually have these probesets on the array. But they kept them in the
## design, so they show up in the csv files. This causes problems when we create the featureSet
## data.frame, because we are doing a merge with all = TRUE, so we get NA values for the
## fsetids that don't exist, but for which we do have man_fsetids. These NA values
## are problematic when we insert into the database, as the fsetids have to be unique.
## So we remove them here.
## As of the na35 build of probeset csv files, Affy is mixing fsetid and man_fsetid
## IDs in the probeset csv file (at least for the HTA arrays), so we have to account for that
probesetInfo$probesets <- fixManFsetid(probesetInfo$probesets, probes.table)
featureSet <- merge(probesetInfo[["probesets"]],
unique(probes.table[, c('man_fsetid', 'fsetid')]),
by='man_fsetid', all=TRUE)
## pmfeature table - Fields
## fid
## fsetid
## chr (NA)
## location (NA)
## x
## y
## IMPORTANT:
## ignoring strand
## keeping atom to match with MM's
pmFeatures <- subset(probes.table,
substr(probes.table[["ptype"]], 1, 2) == "pm",
select=c("fid", "fsetid", "atom", "x", "y", "sequence"))
pmSequence <- pmFeatures[, c("fid", "sequence")]
pmFeatures[["sequence"]] <- NULL
pmSequence <- pmSequence[order(pmSequence[["fid"]]),]
pmSequence <- DataFrame(fid=pmSequence[["fid"]],
sequence=DNAStringSet(pmSequence[["sequence"]]))
## mmfeature table - Fields
## fid
## fid of matching pm
## x
## y
## IMPORTANT:
## ignoring strand
## keeping atom to match with MM's
## ADD sequence for MM
mmFeatures <- subset(probes.table, substr(probes.table$ptype, 1, 2) =="mm",
select=c("fid", "fsetid", "atom", "x", "y", "sequence"))
if (nrow(mmFeatures) > 0){
mmSequence <- mmFeatures[, c("fid", "sequence")]
mmFeatures[["sequence"]] <- NULL
mmSequence <- mmSequence[order(mmSequence[["fid"]]),]
mmSequence <- DataFrame(fid=mmSequence[["fid"]],
sequence=DNAStringSet(mmSequence[["sequence"]]))
}else{
mmFeatures <- data.frame()
mmSequence <- data.frame()
}
## IMPORTANT: for the moment, bgfeature will contain everything (that is PM) but 'main'
## bgfeature table - Fields
## fid
## x
## y
## fs_type: featureSet type: genomic/antigenomic
## f_type: pm/mm at/st
## old code:
## subset using cols
## cols <- c("fid", "fsetid", "pstype", "ptype", "x", "y", "sequence")
rm(probes.table)
core <- mpsParser(coreMps, verbose=verbose)
## Here we should have the following tables available:
## featureSet: fsetid, type
## pmfeature: fid, fsetid, atom, x, y
## bgfeature: fid, fsetid, fs_type, f_type, x, y - NOT ANYMORE
## pmSequence: fid, sequence
## bgSequence: fid, sequence - NOT ANYMORE
## core, extended, full: meta_fsetid, trancript_cluster_id, fsetid
## mmfeatures/mmSequence
out <- list(featureSet=featureSet, pmFeatures=pmFeatures,
mmFeatures=mmFeatures, geometry=geom,
pmSequence=pmSequence, mmSequence=mmSequence,
chrom_dict=chrom_dict, level_dict=level_dict,
type_dict=type_dict, core=core)
return(out)
}
#######################################################################
## SECTION D - Package Maker
## This shouldn't be extremely hard.
## The idea is to: i) get array info (name, pkgname, dbname)
## ii) parse data; iii) create pkg from template;
## iv) dump the database
#######################################################################
setMethod("makePdInfoPackage", "AffyHTAPDInfoPkgSeed",
function(object, destDir=".", batch_size=10000, quiet=FALSE, unlink=FALSE) {
msgBar()
message("Building annotation package for Affymetrix HTA Array")
message("PGF.........: ", basename(object@pgfFile))
message("CLF.........: ", basename(object@clfFile))
message("Probeset....: ", basename(object@probeFile))
message("Transcript..: ", basename(object@transFile))
message("Core MPS....: ", basename(object@coreMps))
msgBar()
#######################################################################
## Part i) get array info (chipName, pkgName, dbname)
#######################################################################
chip <- chipName(object)
pkgName <- cleanPlatformName(chip)
extdataDir <- file.path(destDir, pkgName, "inst", "extdata")
dbFileName <- paste(pkgName, "sqlite", sep=".")
dbFilePath <- file.path(extdataDir, dbFileName)
#######################################################################
## Part ii) parse data. This should return a list of data.frames.
## The names of the elements in the list are table names.
#######################################################################
parsedData <- combinePgfClfProbesetsMpsHTA(object@pgfFile,
object@clfFile,
object@probeFile,
object@coreMps,
verbose=!quiet)
#######################################################################
## Part iii) Create package from template
#######################################################################
pdInfoClass <- "AffyHTAPDInfo"
syms <- list(MANUF=object@manufacturer,
VERSION=object@version,
GENOMEBUILD=object@genomebuild,
AUTHOR=object@author,
AUTHOREMAIL=object@email,
LIC=object@license,
DBFILE=dbFileName,
CHIPNAME=chip,
PKGNAME=pkgName,
PDINFONAME=pkgName,
PDINFOCLASS=pdInfoClass,
GEOMETRY=parsedData[["geometry"]])
templateDir <- system.file("pd.PKG.template",
package="pdInfoBuilder")
createPackage(pkgname=pkgName, destinationDir=destDir,
originDir=templateDir, symbolValues=syms,
quiet=quiet)
dir.create(extdataDir, recursive=TRUE)
#######################################################################
## Part iv) Create SQLite database
## FIX ME: Fix ordering of the tables
#######################################################################
conn <- dbConnect(dbDriver("SQLite"), dbname=dbFilePath)
increaseDbPerformance(conn)
## Adding new tables
dbCreateTable(conn,
"chrom_dict",
chromDictTable[["col2type"]],
chromDictTable[["col2key"]])
dbCreateTable(conn,
"level_dict",
levelDictTable[["col2type"]],
levelDictTable[["col2key"]])
dbCreateTable(conn,
"type_dict",
typeDictTable[["col2type"]],
typeDictTable[["col2key"]])
dbCreateTable(conn,
"core_mps",
htaMpsSchema[["col2type"]],
htaMpsSchema[["col2key"]])
## end adding
dbCreateTable(conn,
"featureSet",
htaFeatureSetSchema[["col2type"]],
htaFeatureSetSchema[["col2key"]])
dbCreateTable(conn, "pmfeature",
genePmFeatureSchema[["col2type"]],
genePmFeatureSchema[["col2key"]])
containsMm <- nrow(parsedData[["mmFeatures"]]) > 0
if (containsMm)
dbCreateTable(conn,
"mmfeature",
exonTranscriptionMmFeatureSchema[["col2type"]],
exonTranscriptionMmFeatureSchema[["col2key"]])
## Inserting data in new tables
dbInsertDataFrame(conn, "chrom_dict", parsedData[["chrom_dict"]],
chromDictTable[["col2type"]], !quiet)
dbInsertDataFrame(conn, "level_dict", parsedData[["level_dict"]],
levelDictTable[["col2type"]], !quiet)
dbInsertDataFrame(conn, "type_dict", parsedData[["type_dict"]],
typeDictTable[["col2type"]], !quiet)
dbInsertDataFrame(conn, "core_mps", parsedData[["core"]],
mpsSchema[["col2type"]], !quiet)
## end inserting
dbInsertDataFrame(conn, "featureSet", parsedData[["featureSet"]],
htaFeatureSetSchema[["col2type"]], !quiet)
dbInsertDataFrame(conn, "pmfeature", parsedData[["pmFeatures"]],
genePmFeatureSchema[["col2type"]], !quiet)
if (containsMm)
dbInsertDataFrame(conn, "mmfeature", parsedData[["mmFeatures"]],
exonTranscriptionMmFeatureSchema[["col2type"]], !quiet)
dbCreateTableInfo(conn, !quiet)
## Create indices
dbCreateIndex(conn, "idx_pmfsetid", "pmfeature", "fsetid", FALSE, verbose=!quiet)
dbCreateIndex(conn, "idx_pmfid", "pmfeature", "fid", FALSE, verbose=!quiet)
dbCreateIndicesFs(conn, !quiet)
dbCreateIndex(conn, "idx_core_meta_fsetid", "core_mps", "meta_fsetid", FALSE, verbose=!quiet)
dbCreateIndex(conn, "idx_core_fsetid", "core_mps", "fsetid", FALSE, verbose=!quiet)
if (containsMm){
dbCreateIndex(conn, "idx_mmfsetid", "mmfeature", "fsetid", FALSE, verbose=!quiet)
dbCreateIndex(conn, "idx_mmfid", "mmfeature", "fid", FALSE, verbose=!quiet)
}
dbGetQuery(conn, "VACUUM")
dbDisconnect(conn)
#######################################################################
## Part v) Save sequence DataFrames
## FIX ME: Fix ordering of the tables to match xxFeature tables
#######################################################################
datadir <- file.path(destDir, pkgName, "data")
dir.create(datadir)
pmSequence <- parsedData[["pmSequence"]]
pmSeqFile <- file.path(datadir, "pmSequence.rda")
if (!quiet) message("Saving DataFrame object for PM.")
save(pmSequence, file=pmSeqFile, compress='xz')
if (containsMm){
mmSequence <- parsedData[["mmSequence"]]
mmSeqFile <- file.path(datadir, "mmSequence.rda")
if (!quiet) message("Saving DataFrame object for MM.")
save(mmSequence, file=mmSeqFile, compress='xz')
}
#######################################################################
## Part vi) Save NetAffx Annotation to extdata
#######################################################################
if (!quiet) message("Saving NetAffx Annotation... ", appendLF=FALSE)
netaffxProbeset <- annot2fdata(object@probeFile)
save(netaffxProbeset, file=file.path(extdataDir,
'netaffxProbeset.rda'), compress='xz')
netaffxTranscript <- annot2fdata(object@transFile)
save(netaffxTranscript, file=file.path(extdataDir,
'netaffxTranscript.rda'), compress='xz')
if (!quiet) msgOK()
if (!quiet) message("Done.")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleDefaults.R
\name{setNodeCustomRadialGradient}
\alias{setNodeCustomRadialGradient}
\title{Set Node Custom Radial Gradient}
\usage{
setNodeCustomRadialGradient(colors = c("#DDDDDD", "#888888"),
anchors = c(0, 1), xCenter = 0.5, yCenter = 0.5, slot = 1,
style.name = "default", base.url = .defaultBaseUrl)
}
\arguments{
\item{colors}{(optional) List of colors to define gradient}
\item{anchors}{(optional) Position of colors from 0.0 to 1.0.}
\item{xCenter}{(optional) X position for center of radial effect from 0.0
to 1.0. Default is 0.5.}
\item{yCenter}{(optional) Y position for center of radial effect from 0.0
to 1.0. Default is 0.5.}
\item{slot}{(optional) Which custom graphics slot to modify. Slots 1-9 are
available for independent charts, gradients and images. Default is 1.}
\item{style.name}{(optional) Name of style; default is "default" style}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Makes a gradient fill per node by setting a default custom
graphic style.
}
\examples{
\donttest{
setNodeCustomRadialGradient()
}
}
| /man/setNodeCustomRadialGradient.Rd | permissive | shraddhapai/RCy3 | R | false | true | 1,350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StyleDefaults.R
\name{setNodeCustomRadialGradient}
\alias{setNodeCustomRadialGradient}
\title{Set Node Custom Radial Gradient}
\usage{
setNodeCustomRadialGradient(colors = c("#DDDDDD", "#888888"),
anchors = c(0, 1), xCenter = 0.5, yCenter = 0.5, slot = 1,
style.name = "default", base.url = .defaultBaseUrl)
}
\arguments{
\item{colors}{(optional) List of colors to define gradient}
\item{anchors}{(optional) Position of colors from 0.0 to 1.0.}
\item{xCenter}{(optional) X position for center of radial effect from 0.0
to 1.0. Default is 0.5.}
\item{yCenter}{(optional) Y position for center of radial effect from 0.0
to 1.0. Default is 0.5.}
\item{slot}{(optional) Which custom graphics slot to modify. Slots 1-9 are
available for independent charts, gradients and images. Default is 1.}
\item{style.name}{(optional) Name of style; default is "default" style}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
None
}
\description{
Makes a gradient fill per node by setting a default custom
graphic style.
}
\examples{
\donttest{
setNodeCustomRadialGradient()
}
}
|
mccov <- function(S,X1=NULL,X2=NULL,
yv=rep(1,nrow(S)),
start=0,tol=10^-8,maxit=1000,
out_se=FALSE,output=FALSE,fort=TRUE){
# Preliminaries
check_der = FALSE # to check derivatives
n = sum(yv)
sS = dim(S)
ns = sS[1]
TT = sS[2]
b = max(S)
# Covariate structure and related matrices: initial probabilities
if((b+1) == 2) GBe = as.matrix(c(0,1)) else{
GBe = diag(b+1); GBe = GBe[,-1]
}
if(is.null(X1)){
nc1=0
Xlab = rep(1,ns)
nameBe = NULL
}else{
if(is.vector(X1)) X1 = matrix(X1,ns,1)
nc1 = dim(X1)[2] # number of covariates on the initial probabilities
if(ns!= dim(X1)[1]) stop("dimension mismatch between S and X1")
nameBe = colnames(X1)
out = aggr_data(X1,fort=fort)
Xdis = out$data_dis
if(nc1==1) Xdis = matrix(Xdis,length(Xdis),1)
Xlab = out$label
}
Xndis = max(Xlab)
XXdis = array(0,c(b+1,b*(nc1+1),Xndis))
for(i in 1:Xndis){
if(nc1==0) xdis = 1 else xdis = c(1,Xdis[i,])
XXdis[,,i] = GBe%*%(diag(b)%x%t(xdis))
}
# for the transition probabilities
if(is.null(X2)){
nc2 = 0
Zlab = rep(1,ns*(TT-1))
nameGa = NULL
Zndis = max(Zlab)
}else{
if(TT==2) X2 = array(X2,c(ns,1,dim(X2)[2]))
if(is.matrix(X2)) X2 = array(X2,c(ns,TT-1,1))
nc2 = dim(X2)[3] # number of covariates on the transition probabilities
if(ns!= dim(X2)[1]) stop("dimension mismatch between S and X2")
nameGa = colnames(aperm(X2,c(1,3,2)))
Z = NULL
for(t in 1:(TT-1)) Z = rbind(Z,X2[,t,])
if(nc2==1) Z = as.vector(X2)
out = aggr_data(Z,fort=fort); Zdis = out$data_dis; Zlab = out$label; Zndis = max(Zlab)
if(nc2==1) Zdis=matrix(Zdis,length(Zdis),1)
}
ZZdis = array(0,c(b+1,(b)*(nc2+1),Zndis,b+1))
for(h in 1:(b+1)){
if((b+1)==2){
if(h == 1) GGa = as.matrix(c(0,1)) else GGa = as.matrix(c(1,0))
}else{
GGa = diag(b+1); GGa = GGa[,-h]
}
for(i in 1:Zndis){
if(nc2==0) zdis = 1 else zdis = c(1,Zdis[i,])
ZZdis[,,i,h] = GGa%*%(diag(b)%x%t(zdis))
}
}
# parameters on initial probabilities
if(start==0) be = array(0,(nc1+1)*b)
else if(start==1){
be = c(rnorm(1),rep(0,nc1))
if((b+1)>2) for(h in 2:b) be = c(be,rnorm(1),rep(0,nc1))
}
out = prob_multilogit(XXdis,be,Xlab,fort)
Piv = out$P; Pivdis = out$Pdis
# parameters on transition probabilities
Ga = matrix(0,(nc2+1)*b,b+1)
if(start==0) Ga[1+(0:(b-1))*(nc2+1),] = -log(10)
else if(start==1) Ga[1+(0:(b-1))*(nc2+1),] = -abs(rnorm(b))
PIdis = array(0,c(Zndis,b+1,b+1)); PI = array(0,c(b+1,b+1,ns,TT))
for(h in 1:(b+1)){
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
out = prob_multilogit(tmp,Ga[,h],Zlab,fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1))
}
#updating be
V = matrix(0,ns,b+1)
for(i in 1:ns) V[i,S[i,1]+1]=yv[i]
out = est_multilogit(V,XXdis,Xlab,be,Pivdis,fort=fort)
be = out$be; Pivdis = out$Pdi; Piv = out$P
if(out_se){
iFi = ginv(out$Fi)
sebe = sqrt(diag(iFi))
}
#Updating Ga
U = array(0,c(b+1,b+1,ns,TT))
for(i in 1:ns) for(t in 2:TT){
U[S[i,t-1]+1,S[i,t]+1,i,t] = yv[i]
}
if(out_se) sega = matrix(0,(nc2+1)*b,b+1)
for(h in 1:(b+1)){
UU = NULL
for(t in 2:TT) UU = rbind(UU,t(U[h,,,t]))
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
tmp2 = PIdis[,,h]
if(Zndis==1) tmp2 = matrix(tmp2,1,b+1)
out = est_multilogit(UU,tmp,Zlab,Ga[,h],tmp2,fort=fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1)); Ga[,h] = out$be
if(out_se){
iFi = ginv(out$Fi)
sega[,h] = sqrt(diag(iFi))
}
}
# Compute log-likelihood
lk = sum(V*log(Piv))+sum(U[,,,2:TT]*log(PI[,,,2:TT]))
# Compute number of parameters
np = b*(nc1+1)
np = np+b*(nc2+1)*(b+1)
aic = -2*lk+np*2
bic = -2*lk+np*log(n)
Be = matrix(be,nc1+1,b)
if (is.null(nameBe)){
if(nc1==0) nameBe = c("Intercept") else nameBe = c("intercept",paste("X1",1:nc1,sep=""))
}else{
nameBe = c("intercept",nameBe)
}
dimnames(Be) = list(nameBe,logit=2:(b+1))
if(out_se) {seBe = matrix(sebe,nc1+1,b); dimnames(seBe) = list(nameBe,logit=2:(b+1))}
if(is.null(nameGa)){
if(nc2==0) nameGa = c("Intercept") else nameGa = c("intercept", paste("X2",1:nc2,sep=""))
}else{
nameGa = c("intercept",nameGa)
}
if((b+1)>2) {
Ga = array(as.vector(Ga),c(nc2+1,b,b+1))
dimnames(Ga) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}else if((b+1)==2){
dimnames(Ga) = list(nameGa,logit=1:(b+1))
}
if(out_se){
if((b+1)==2){
seGa = matrix(sega,nc2+1,2)
dimnames(seGa) = list(nameGa,logit=1:(b+1))
}else if((b+1)>2){
seGa = array(as.vector(sega),c(nc2+1,b,b+1))
dimnames(seGa) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}
}
# adjust output
lk = as.vector(lk)
if(output){
dimnames(Piv)=list(subject=1:ns,category=0:b)
dimnames(PI)=list(category=0:b,category=0:b,subject=1:ns,time=1:TT)
}
out = list(lk=lk,Be=Be,Ga=Ga,np=np,aic=aic,bic=bic, n = n, TT = TT)
if(out_se){
out$seBe = seBe
out$seGa = seGa
}
# final output
if(output){
out$PI = PI
out$Piv = Piv
}
class(out)="MCcov"
return(out)
}
| /R/mccov.R | no_license | cran/LMest | R | false | false | 5,282 | r | mccov <- function(S,X1=NULL,X2=NULL,
yv=rep(1,nrow(S)),
start=0,tol=10^-8,maxit=1000,
out_se=FALSE,output=FALSE,fort=TRUE){
# Preliminaries
check_der = FALSE # to check derivatives
n = sum(yv)
sS = dim(S)
ns = sS[1]
TT = sS[2]
b = max(S)
# Covariate structure and related matrices: initial probabilities
if((b+1) == 2) GBe = as.matrix(c(0,1)) else{
GBe = diag(b+1); GBe = GBe[,-1]
}
if(is.null(X1)){
nc1=0
Xlab = rep(1,ns)
nameBe = NULL
}else{
if(is.vector(X1)) X1 = matrix(X1,ns,1)
nc1 = dim(X1)[2] # number of covariates on the initial probabilities
if(ns!= dim(X1)[1]) stop("dimension mismatch between S and X1")
nameBe = colnames(X1)
out = aggr_data(X1,fort=fort)
Xdis = out$data_dis
if(nc1==1) Xdis = matrix(Xdis,length(Xdis),1)
Xlab = out$label
}
Xndis = max(Xlab)
XXdis = array(0,c(b+1,b*(nc1+1),Xndis))
for(i in 1:Xndis){
if(nc1==0) xdis = 1 else xdis = c(1,Xdis[i,])
XXdis[,,i] = GBe%*%(diag(b)%x%t(xdis))
}
# for the transition probabilities
if(is.null(X2)){
nc2 = 0
Zlab = rep(1,ns*(TT-1))
nameGa = NULL
Zndis = max(Zlab)
}else{
if(TT==2) X2 = array(X2,c(ns,1,dim(X2)[2]))
if(is.matrix(X2)) X2 = array(X2,c(ns,TT-1,1))
nc2 = dim(X2)[3] # number of covariates on the transition probabilities
if(ns!= dim(X2)[1]) stop("dimension mismatch between S and X2")
nameGa = colnames(aperm(X2,c(1,3,2)))
Z = NULL
for(t in 1:(TT-1)) Z = rbind(Z,X2[,t,])
if(nc2==1) Z = as.vector(X2)
out = aggr_data(Z,fort=fort); Zdis = out$data_dis; Zlab = out$label; Zndis = max(Zlab)
if(nc2==1) Zdis=matrix(Zdis,length(Zdis),1)
}
ZZdis = array(0,c(b+1,(b)*(nc2+1),Zndis,b+1))
for(h in 1:(b+1)){
if((b+1)==2){
if(h == 1) GGa = as.matrix(c(0,1)) else GGa = as.matrix(c(1,0))
}else{
GGa = diag(b+1); GGa = GGa[,-h]
}
for(i in 1:Zndis){
if(nc2==0) zdis = 1 else zdis = c(1,Zdis[i,])
ZZdis[,,i,h] = GGa%*%(diag(b)%x%t(zdis))
}
}
# parameters on initial probabilities
if(start==0) be = array(0,(nc1+1)*b)
else if(start==1){
be = c(rnorm(1),rep(0,nc1))
if((b+1)>2) for(h in 2:b) be = c(be,rnorm(1),rep(0,nc1))
}
out = prob_multilogit(XXdis,be,Xlab,fort)
Piv = out$P; Pivdis = out$Pdis
# parameters on transition probabilities
Ga = matrix(0,(nc2+1)*b,b+1)
if(start==0) Ga[1+(0:(b-1))*(nc2+1),] = -log(10)
else if(start==1) Ga[1+(0:(b-1))*(nc2+1),] = -abs(rnorm(b))
PIdis = array(0,c(Zndis,b+1,b+1)); PI = array(0,c(b+1,b+1,ns,TT))
for(h in 1:(b+1)){
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
out = prob_multilogit(tmp,Ga[,h],Zlab,fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1))
}
#updating be
V = matrix(0,ns,b+1)
for(i in 1:ns) V[i,S[i,1]+1]=yv[i]
out = est_multilogit(V,XXdis,Xlab,be,Pivdis,fort=fort)
be = out$be; Pivdis = out$Pdi; Piv = out$P
if(out_se){
iFi = ginv(out$Fi)
sebe = sqrt(diag(iFi))
}
#Updating Ga
U = array(0,c(b+1,b+1,ns,TT))
for(i in 1:ns) for(t in 2:TT){
U[S[i,t-1]+1,S[i,t]+1,i,t] = yv[i]
}
if(out_se) sega = matrix(0,(nc2+1)*b,b+1)
for(h in 1:(b+1)){
UU = NULL
for(t in 2:TT) UU = rbind(UU,t(U[h,,,t]))
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
tmp2 = PIdis[,,h]
if(Zndis==1) tmp2 = matrix(tmp2,1,b+1)
out = est_multilogit(UU,tmp,Zlab,Ga[,h],tmp2,fort=fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1)); Ga[,h] = out$be
if(out_se){
iFi = ginv(out$Fi)
sega[,h] = sqrt(diag(iFi))
}
}
# Compute log-likelihood
lk = sum(V*log(Piv))+sum(U[,,,2:TT]*log(PI[,,,2:TT]))
# Compute number of parameters
np = b*(nc1+1)
np = np+b*(nc2+1)*(b+1)
aic = -2*lk+np*2
bic = -2*lk+np*log(n)
Be = matrix(be,nc1+1,b)
if (is.null(nameBe)){
if(nc1==0) nameBe = c("Intercept") else nameBe = c("intercept",paste("X1",1:nc1,sep=""))
}else{
nameBe = c("intercept",nameBe)
}
dimnames(Be) = list(nameBe,logit=2:(b+1))
if(out_se) {seBe = matrix(sebe,nc1+1,b); dimnames(seBe) = list(nameBe,logit=2:(b+1))}
if(is.null(nameGa)){
if(nc2==0) nameGa = c("Intercept") else nameGa = c("intercept", paste("X2",1:nc2,sep=""))
}else{
nameGa = c("intercept",nameGa)
}
if((b+1)>2) {
Ga = array(as.vector(Ga),c(nc2+1,b,b+1))
dimnames(Ga) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}else if((b+1)==2){
dimnames(Ga) = list(nameGa,logit=1:(b+1))
}
if(out_se){
if((b+1)==2){
seGa = matrix(sega,nc2+1,2)
dimnames(seGa) = list(nameGa,logit=1:(b+1))
}else if((b+1)>2){
seGa = array(as.vector(sega),c(nc2+1,b,b+1))
dimnames(seGa) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}
}
# adjust output
lk = as.vector(lk)
if(output){
dimnames(Piv)=list(subject=1:ns,category=0:b)
dimnames(PI)=list(category=0:b,category=0:b,subject=1:ns,time=1:TT)
}
out = list(lk=lk,Be=Be,Ga=Ga,np=np,aic=aic,bic=bic, n = n, TT = TT)
if(out_se){
out$seBe = seBe
out$seGa = seGa
}
# final output
if(output){
out$PI = PI
out$Piv = Piv
}
class(out)="MCcov"
return(out)
}
|
## code to prepare `pvdiv_metadata` dataset goes here
##
pvdiv_metadata <- readRDS("~/Github/pvdiv-phenotypes/data/metadata.rds")
usethis::use_data(pvdiv_metadata, overwrite = TRUE)
| /data-raw/pvdiv_metadata.R | no_license | Alice-MacQueen/switchgrassGWAS | R | false | false | 183 | r | ## code to prepare `pvdiv_metadata` dataset goes here
##
pvdiv_metadata <- readRDS("~/Github/pvdiv-phenotypes/data/metadata.rds")
usethis::use_data(pvdiv_metadata, overwrite = TRUE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.