content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/entity_accessors.R
\name{getParameters}
\alias{getParameters}
\title{Get reaction parameters}
\usage{
getParameters(key = NULL, model = getCurrentModel())
}
\arguments{
\item{key}{Optionally, a character vector specifying which reaction parameters to get.}
\item{model}{A model object.}
}
\value{
Reaction parameters and associated information, as data frame.
}
\description{
\code{getParameters} returns reaction parameters as a data frame.
}
\details{
The \href{https://jpahle.github.io/CoRC/articles/entity_management.html}{online article on managing model entities} provides some further context.
}
\seealso{
\code{\link{getParameterReferences}} \code{\link{setParameters}}
Other reaction functions:
\code{\link{clearCustomKineticFunctions}()},
\code{\link{deleteKineticFunction}()},
\code{\link{deleteReaction}()},
\code{\link{entity_finders}},
\code{\link{getParameterReferences}()},
\code{\link{getReactionMappings}()},
\code{\link{getReactionReferences}()},
\code{\link{getReactions}()},
\code{\link{getValidReactionFunctions}()},
\code{\link{newKineticFunction}()},
\code{\link{newReaction}()},
\code{\link{setParameters}()},
\code{\link{setReactionFunction}()},
\code{\link{setReactionMappings}()},
\code{\link{setReactions}()}
}
\concept{reaction functions}
| /man/getParameters.Rd | permissive | jpahle/CoRC | R | false | true | 1,350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/entity_accessors.R
\name{getParameters}
\alias{getParameters}
\title{Get reaction parameters}
\usage{
getParameters(key = NULL, model = getCurrentModel())
}
\arguments{
\item{key}{Optionally, a character vector specifying which reaction parameters to get.}
\item{model}{A model object.}
}
\value{
Reaction parameters and associated information, as data frame.
}
\description{
\code{getParameters} returns reaction parameters as a data frame.
}
\details{
The \href{https://jpahle.github.io/CoRC/articles/entity_management.html}{online article on managing model entities} provides some further context.
}
\seealso{
\code{\link{getParameterReferences}} \code{\link{setParameters}}
Other reaction functions:
\code{\link{clearCustomKineticFunctions}()},
\code{\link{deleteKineticFunction}()},
\code{\link{deleteReaction}()},
\code{\link{entity_finders}},
\code{\link{getParameterReferences}()},
\code{\link{getReactionMappings}()},
\code{\link{getReactionReferences}()},
\code{\link{getReactions}()},
\code{\link{getValidReactionFunctions}()},
\code{\link{newKineticFunction}()},
\code{\link{newReaction}()},
\code{\link{setParameters}()},
\code{\link{setReactionFunction}()},
\code{\link{setReactionMappings}()},
\code{\link{setReactions}()}
}
\concept{reaction functions}
|
load.cleanFlights <- function(d) {
names(d) <- helpers.lowerfy(names(d))
d
}
load.flights2014 <- function() {
load.cleanFlights(read.csv('data/flights_2014_output.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE))
}
load.flights1213 <- function() {
load.cleanFlights(read.csv('data/flights_2012_2013output.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE))
}
| /load/flights.R | no_license | vladiim/politicians | R | false | false | 393 | r | load.cleanFlights <- function(d) {
names(d) <- helpers.lowerfy(names(d))
d
}
load.flights2014 <- function() {
load.cleanFlights(read.csv('data/flights_2014_output.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE))
}
load.flights1213 <- function() {
load.cleanFlights(read.csv('data/flights_2012_2013output.csv',
header = TRUE, sep = ',', stringsAsFactors = FALSE))
}
|
#' Read AmiraMesh data in binary or ascii format
#'
#' @details reading byte data as raw arrays requires 1/4 memory but complicates
#' arithmetic.
#' @param file Name of file (or connection) to read
#' @param sections character vector containing names of sections
#' @param header Whether to include the full unprocessed text header as an
#' attribute of the returned list.
#' @param simplify If there is only one datablock in file do not return wrapped
#' in a list (default TRUE).
#' @param endian Whether multibyte data types should be treated as big or little
#' endian. Default of NULL checks file or uses \code{.Platform$endian}
#' @param ReadByteAsRaw Logical specifying whether to read 8 bit data as an R
#' \code{raw} vector rather than \code{integer} vector (default: FALSE).
#' @param Verbose Print status messages
#' @return list of named data chunks
#' @importFrom nat.utils is.gzip
#' @rdname amiramesh-io
#' @export
#' @seealso \code{\link{readBin}, \link{.Platform}}
#' @family amira
read.amiramesh<-function(file,sections=NULL,header=FALSE,simplify=TRUE,
endian=NULL,ReadByteAsRaw=FALSE,Verbose=FALSE){
firstLine=readLines(file,n=1)
if(!any(grep("#\\s+(amira|hyper)mesh",firstLine,ignore.case=TRUE))){
warning(paste(file,"does not appear to be an AmiraMesh file"))
return(NULL)
}
binaryfile="binary"==tolower(sub(".*(ascii|binary).*","\\1",firstLine,ignore.case=TRUE))
# Check if file is gzipped
con=if(is.gzip(file)) gzfile(file) else file(file)
open(con, open=ifelse(binaryfile, 'rb', 'rt'))
on.exit(try(close(con),silent=TRUE))
h=read.amiramesh.header(con,Verbose=Verbose)
parsedHeader=h[["dataDef"]]
if(is.null(endian) && is.character(parsedHeader$endian)) {
endian=parsedHeader$endian[1]
}
if(ReadByteAsRaw){
parsedHeader$RType[parsedHeader$SimpleType=='byte']='raw'
}
if(is.null(sections)) sections=parsedHeader$DataName
else sections=intersect(parsedHeader$DataName,sections)
if(length(sections)){
if(binaryfile){
filedata=.read.amiramesh.bin(con,parsedHeader,sections,Verbose=Verbose,endian=endian)
close(con)
} else {
close(con)
filedata=read.amiramesh.ascii(file,parsedHeader,sections,Verbose=Verbose)
}
} else {
# we don't have any data to read - just make a dummy return object to which
# we can add attributes
filedata<-switch(parsedHeader$RType[1],
integer=integer(0), raw=raw(), numeric(0))
}
if(!header) h=h[setdiff(names(h),c("header"))]
for (n in names(h))
attr(filedata,n)=h[[n]]
# unlist?
if(simplify && is.list(filedata) && length(filedata)==1){
filedata2=filedata[[1]]
attributes(filedata2)=attributes(filedata)
dim(filedata2)=dim(filedata[[1]])
filedata=filedata2
}
return(filedata)
}
.read.amiramesh.bin<-function(con, df, sections, endian=endian, Verbose=FALSE){
l=list()
for(i in seq(len=nrow(df))){
if(Verbose) cat("Current offset is",seek(con),";",df$nBytes[i],"to read\n")
if(all(sections!=df$DataName[i])){
# Just skip this section
if(Verbose) cat("Skipping data section",df$DataName[i],"\n")
seek(con,df$nBytes[i],origin="current")
} else {
if(Verbose) cat("Reading data section",df$DataName[i],"\n")
if(df$HxType[i]=="HxByteRLE"){
d=readBin(con,what=raw(0),n=as.integer(df$HxLength[i]),size=1)
d=decode.rle(d,df$SimpleDataLength[i])
x=as.integer(d)
} else {
if(df$HxType[i]=="HxZip"){
uncompressed=read.zlib(con, compressedLength=as.integer(df$HxLength[i]))
} else {
uncompressed=con
}
whatval=switch(df$RType[i], integer=integer(0), raw=raw(0), numeric(0))
x=readBin(uncompressed,df$SimpleDataLength[i],size=df$Size[i],
what=whatval,signed=df$Signed[i],endian=endian)
}
# note that first dim is moving fastest
dims=unlist(df$Dims[i])
# if the individual elements have subelements
# then put those as innermost (fastest) dim
if(df$SubLength[i]>1) dims=c(df$SubLength[i],dims)
ndims=length(dims)
if(ndims>1) dim(x)=dims
if(ndims==2) x=t(x) # this feels like a hack, but ...
l[[df$DataName[i]]]=x
}
if(df$SimpleDataLength[i]){
# Skip return at end of section iff we had some data to read
readLines(con,n=1)
nextSectionHeader=readLines(con,n=1)
if(Verbose) cat("nextSectionHeader = ",nextSectionHeader,"\n")
}
}
l
}
# Read ASCII AmiraMesh data
# @details Does not assume anything about line spacing between sections
# @param df dataframe containing details of data in file
read.amiramesh.ascii<-function(file, df, sections, Verbose=FALSE){
l=list()
# df=subset(df,DataName%in%sections)
df=df[order(df$DataPos),]
if(inherits(file,'connection'))
con=file
else {
# rt is essential to ensure that readLines behaves with gzipped files
con=file(file,open='rt')
on.exit(close(con))
}
readLines(con, df$LineOffsets[1]-1)
for(i in seq(len=nrow(df))){
if(df$DataLength[i]>0){
# read some lines until we get to a data section
nskip=0
while( substring(readLines(con,1),1,1)!="@"){nskip=nskip+1}
if(Verbose) cat("Skipped",nskip,"lines to reach next data section")
if(Verbose) cat("Reading ",df$DataLength[i],"lines in file",file,"\n")
if(df$RType[i]=="integer") whatval=integer(0) else whatval=numeric(0)
datachunk=scan(con,what=whatval,n=df$SimpleDataLength[i],quiet=!Verbose,
na.strings = c("ERR","NA","NaN"))
# store data if required
if(df$DataName[i]%in%sections){
# convert to matrix if required
if(df$SubLength[i]>1){
datachunk=matrix(datachunk,ncol=df$SubLength[i],byrow=TRUE)
}
l[[df$DataName[i]]]=datachunk
}
} else {
if(Verbose) cat("Skipping empty data section",df$DataName[i],"\n")
}
}
return(l)
}
#' Read the header of an AmiraMesh file
#'
#' @param Parse Logical indicating whether to parse header (default: TRUE)
#' @export
#' @rdname amiramesh-io
#' @details \code{read.amiramesh.header} will open a connection if file is a
#' character vector and close it when finished reading.
read.amiramesh.header<-function(file, Parse=TRUE, Verbose=FALSE){
if(inherits(file,"connection")) {
con=file
} else {
con<-file(file, open='rt')
on.exit(close(con))
}
headerLines=NULL
while( substring(t<-readLines(con,1),1,2)!="@1"){
headerLines=c(headerLines,t)
}
if(!Parse) return(headerLines)
returnList<-list(header=headerLines)
binaryfile="binary"==tolower(sub(".*(ascii|binary).*","\\1",headerLines[1],ignore.case=TRUE))
endian=NA
if(binaryfile){
if(length(grep("little",headerLines[1],ignore.case=TRUE))>0) endian='little'
else endian='big'
}
nHeaderLines=length(headerLines)
# trim comments and blanks & convert all white space to single spaces
headerLines=trimws(sub("(.*)#.*","\\1",headerLines,perl=TRUE))
headerLines=headerLines[headerLines!=""]
headerLines=gsub("[[:space:]]+"," ",headerLines,perl=TRUE)
#print(headerLines)
# parse location definitions
LocationLines=grep("^(n|define )(\\w+) ([0-9 ]+)$",headerLines,perl=TRUE)
Locations=headerLines[LocationLines];headerLines[-LocationLines]
LocationList=strsplit(gsub("^(n|define )(\\w+) ([0-9 ]+)$","\\2 \\3",Locations,perl=TRUE)," ")
LocationNames=sapply(LocationList,"[",1)
Locations=lapply(LocationList,function(x) as.numeric(unlist(x[-1])))
names(Locations)=LocationNames
# parse parameters
ParameterStartLine=grep("^\\s*Parameters",headerLines,perl=TRUE)
if(length(ParameterStartLine)>0){
ParameterLines=headerLines[ParameterStartLine[1]:length(headerLines)]
returnList[["Parameters"]]<-.ParseAmirameshParameters(ParameterLines)$Parameters
if(!is.null(returnList[["Parameters"]]$Materials)){
# try and parse materials
te<-try(silent=TRUE,{
Ids=sapply(returnList[["Parameters"]]$Materials,'[[','Id')
# Replace any NULLs with NAs
Ids=sapply(Ids,function(x) ifelse(is.null(x),NA,x))
# Note we have to unquote and split any quoted colours
Colors=sapply(returnList[["Parameters"]]$Materials,
function(x) {if(is.null(x$Color)) return ('black')
if(is.character(x$Color)) x$Color=unlist(strsplit(x$Color," "))
return(rgb(x$Color[1],x$Color[2],x$Color[3]))})
Materials=data.frame(id=Ids,col=I(Colors),level=seq(from=0,length=length(Ids)))
rownames(Materials)<-names(returnList[["Parameters"]]$Materials)
})
if(inherits(te,'try-error')) warning("Unable to parse Amiramesh materials table")
else returnList[["Materials"]]=Materials
}
if(!is.null(returnList[["Parameters"]]$BoundingBox)){
returnList[["BoundingBox"]]=returnList[["Parameters"]]$BoundingBox
}
}
# parse data definitions
DataDefLines=grep("^(\\w+).*@(\\d+)(\\(Hx[^)]+\\)){0,1}$",headerLines,perl=TRUE)
DataDefs=headerLines[DataDefLines];headerLines[-DataDefLines]
HxTypes=rep("raw",length(DataDefs))
HxLengths=rep(NA,length(DataDefs))
LinesWithHXType=grep("(HxByteRLE|HxZip)",DataDefs)
HxTypes[LinesWithHXType]=sub(".*(HxByteRLE|HxZip).*","\\1",DataDefs[LinesWithHXType])
HxLengths[LinesWithHXType]=sub(".*(HxByteRLE|HxZip),([0-9]+).*","\\2",DataDefs[LinesWithHXType])
# remove all extraneous chars altogether
DataDefs=gsub("(=|@|\\}|\\{|[[:space:]])+"," ",DataDefs)
if(Verbose) cat("DataDefs=",DataDefs,"\n")
# make a df with DataDef info
DataDefMatrix=matrix(unlist(strsplit(DataDefs," ")),ncol=4,byrow=T)
# remove HxLength definitions from 4th column if required
DataDefMatrix[HxTypes!="raw",4]=sub("^([0-9]+).*","\\1",DataDefMatrix[HxTypes!="raw",4])
DataDefDF=data.frame(DataName=I(DataDefMatrix[,3]),DataPos=as.numeric(DataDefMatrix[,4]))
DataDefMatrix[,1]=sub("^EdgeData$","Edges",DataDefMatrix[,1])
# Dims will store a list of dimensions that can be used later
DataDefDF$Dims=Locations[DataDefMatrix[,1]]
DataDefDF$DataLength=sapply(DataDefMatrix[,1],function(x) prod(Locations[[x]])) # notice prod in case we have multi dim
DataDefDF$Type=I(DataDefMatrix[,2])
DataDefDF$SimpleType=sub("(\\w+)\\s*\\[\\d+\\]","\\1",DataDefDF$Type,perl=TRUE)
DataDefDF$SubLength=as.numeric(sub("\\w+\\s*(\\[(\\d+)\\])?","\\2",DataDefDF$Type,perl=TRUE))
DataDefDF$SubLength[is.na(DataDefDF$SubLength)]=1
# Find size of binary data (if required?)
TypeInfo=data.frame(SimpleType=I(c("float","byte", "ushort","short", "int", "double", "complex")),Size=c(4,1,2,2,4,8,8),
RType=I(c("numeric",rep("integer",4),rep("numeric",2))), Signed=c(TRUE,FALSE,FALSE,rep(TRUE,4)) )
DataDefDF=merge(DataDefDF,TypeInfo,all.x=T)
# Sort (just in case)
DataDefDF= DataDefDF[order(DataDefDF$DataPos),]
DataDefDF$SimpleDataLength=DataDefDF$DataLength*DataDefDF$SubLength
DataDefDF$nBytes=DataDefDF$SubLength*DataDefDF$Size*DataDefDF$DataLength
DataDefDF$HxType=HxTypes
DataDefDF$HxLength=HxLengths
DataDefDF$endian=endian
# FIXME Note that this assumes exactly one blank line in between each data section
# I'm not sure if this is a required property of the Amira file format
# Fixing this would of course require reading/skipping each data section
nDataSections=nrow(DataDefDF)
# NB 0 length data sections are not written
DataSectionsLineLengths=ifelse(DataDefDF$DataLength==0,0,2+DataDefDF$DataLength)
DataDefDF$LineOffsets=nHeaderLines+1+c(0,cumsum(DataSectionsLineLengths[-nDataSections]))
returnList[["dataDef"]]=DataDefDF
return(returnList)
}
# utility function to check that the label for a given item is unique
.checkLabel=function(l, label) {
if( any(names(l)==label) ){
newlabel=make.unique(c(names(l),label))[length(l)+1]
warning(paste("Duplicate item",label,"renamed",newlabel))
label=newlabel
}
label
}
.ParseAmirameshParameters<-function(textArray, CheckLabel=TRUE,ParametersOnly=FALSE){
# First check what kind of input we have
if(is.character(textArray)) con=textConnection(textArray,open='r')
else {
con=textArray
}
# empty list to store results
l=list()
# Should this check to see if the connection still exists?
# in case we want to bail out sooner
while ( {t<-try(isOpen(con),silent=TRUE);isTRUE(t) || !inherits(t,"try-error")} ){
thisLine<-readLines(con,1)
# no lines returned - ie end of file
if(length(thisLine)==0) break
# trim and split it up by white space
thisLine=trimws(thisLine)
# skip if this is a blank line
if(nchar(thisLine)==0) next
# skip if this is a comment
if(substr(thisLine,1,1)=="#") next
items=strsplit(thisLine," ",fixed=TRUE)[[1]]
if(length(items)==0) next
# get the label and items
label=items[1]; items=items[-1]
#cat("\nlabel=",label)
#cat("; items=",items)
# return list if this is the end of a section
if(label=="}") {
#cat("end of section - leaving this recursion\n")
return (l)
}
if(isTRUE(items[1]=="{")){
# parse new subsection
#cat("new subsection -> recursion\n")
# set the list element!
if(CheckLabel) label=.checkLabel(l, label)
l[[length(l)+1]]=.ParseAmirameshParameters(con,CheckLabel=CheckLabel)
names(l)[length(l)]<-label
if(ParametersOnly && label=="Parameters")
break # we're done
else next
}
if(isTRUE(items[length(items)]=="}")) {
returnAfterParsing=TRUE
items=items[-length(items)]
}
else returnAfterParsing=FALSE
# ordinary item
# Check first item (if there are any items)
if(length(items)>0){
firstItemFirstChar=substr(items[1],1,1)
if(any(firstItemFirstChar==c("-",as.character(0:9)) )){
# Get rid of any commas
items=chartr(","," ",items)
# convert to numeric if not a string
items=as.numeric(items)
} else if (firstItemFirstChar=="\""){
if(returnAfterParsing) thisLine=sub("\\}","",thisLine,fixed=TRUE)
# dequote quoted string using scan
items=scan(text=thisLine,what="",quiet=TRUE)[-1]
# remove any commas
items=items[items!=","]
attr(items,"quoted")=TRUE
}
}
# set the list element!
if(CheckLabel)
label=.checkLabel(l, label)
l[[length(l)+1]]=items
names(l)[length(l)]<-label
if(returnAfterParsing) return(l)
}
# we should only get here once if we parse a valid hierarchy
try(close(con),silent=TRUE)
return(l)
}
# decode some raw bytes into a new raw vector of specified length
# @param bytes to decode
# @param uncompressedLength Length of the new uncompressed data
# Expects an integer array
# Structure is that every odd byte is a count
# and every even byte is the actual data
# So 127 0 127 0 127 0 12 0 12 1 0
# I think that it ends with a zero count
# -----
# in fact the above is not quite right. If >=2 consecutive bytes are different
# then a control byte is written giving the length of the run of different bytes
# and then the whole run is written out
# data can therefore only be parsed by the trick of making 2 rows if there
# are no control bytes in range -126 to -1
decode.rle<-function(d,uncompressedLength){
rval=raw(uncompressedLength)
bytesRead=0
filepos=1
while(bytesRead<uncompressedLength){
x=d[filepos]
filepos=filepos+1
if(x==0L)
stop(paste("byte at offset ",filepos," is 0!"))
if(x>0x7f) {
# cat("x=",x,"\n")
x=as.integer(x)-128
# cat("now x=",x,"\n")
mybytes=d[filepos:(filepos+x-1)]
filepos=filepos+x
# that's the x that we've read
} else {
# x>0
mybytes=rep.int(d[filepos], as.integer(x))
filepos=filepos+1
}
rval[(bytesRead+1):(bytesRead+length(mybytes))]=mybytes
bytesRead=bytesRead+length(mybytes)
}
rval
}
# Uncompress zlib compressed data (from file or memory) to memory
#
# @details zlib compressed data uses the same algorithm but a smaller header
# than gzip data.
# @details For connections, compressedLength must be supplied, but offset is
# ignored (i.e. you must seek beforehand)
# @details For files, if compressedLength is not supplied then \code{read.zlib}
# will attempt to read until the end of the file.
# @param compressed Path to compressed file, connection or raw vector.
# @param offset Byte offset in file on disk
# @param compressedLength Bytes of compressed data to read
# @param type The compression type. See ?memDecompress for details.
# @param ... Additional parameters passed to \code{\link{readBin}}
# @return raw vector of decompressed data
# sealso memDecompress
# @export
read.zlib<-function(compressed, offset=NA, compressedLength=NA, type='gzip', ...){
if(!is.raw(compressed)){
if(inherits(compressed,'connection')){
if(is.na(compressedLength)) stop("Must supply compressedLength when reading from a connection")
con=compressed
} else {
con<-file(compressed,open='rb')
on.exit(close(con))
if(!is.na(offset)) seek(con,offset)
else offset = 0
if(is.na(compressedLength)) compressedLength=file.info(compressed)$size-offset
}
compressed=readBin(con, what=raw(), n=compressedLength)
}
memDecompress(compressed, type=type, ...)
}
# Compress raw data, returning raw vector or writing to file
#
# @details The default value of \code{con=raw()} means that this function will
# return a raw vector of compressed data if con is not specified.
# @param uncompressed \code{raw} vector of data
# @param con Raw vector or path to output file
# @return A raw vector (if \code{con} is a raw vector) or invisibly NULL.
# @seealso Depends on \code{\link{memCompress}}
# @export
write.zlib<-function(uncompressed, con=raw()){
if(!inherits(con, "connection") && !is.raw(con)){
con=open(con, open='wb')
on.exit(close(con))
}
d=memCompress(uncompressed, type='gzip')
if(is.raw(con)) return(d)
writeBin(object=d,con=con)
}
#' Check if file is AmiraMesh format
#'
#' @details Tries to be as fast as possible by reading only first 11 bytes and
#' checking if they equal to "# AmiraMesh" or (deprecated) "# HyperMesh".
#' @param f Path to one or more files to be tested \strong{or} an array of raw
#' bytes, for one file only.
#' @param bytes optional raw vector of at least 11 bytes from the start of a
#' single file (used in preference to reading file \code{f}).
#' @return logical
#' @export
#' @family amira
is.amiramesh<-function(f=NULL, bytes=NULL) {
if(!is.null(bytes) && is.character(f) && length(f)>1)
stop("Can only check bytes for a single file")
tocheck=if(is.null(bytes)) f else bytes
generic_magic_check(tocheck, c("# HyperMesh", "# AmiraMesh"))
}
#' Return the type of an AmiraMesh file on disk or a parsed header
#'
#' @details Note that when checking a file we first test if it is an AmiraMesh
#' file (fast, especially when \code{bytes!=NULL}) before reading the header
#' and determining content type (slow).
#' @param x Path to files on disk or a single pre-parsed parameter list
#' @param bytes A raw vector containing at least 11 bytes from the start of the
#' file.
#' @return character vector (NA_character_ when file invalid)
#' @export
#' @family amira
amiratype<-function(x, bytes=NULL){
if(is.list(x)) h<-x
else {
# we have a file, optionally with some raw data
if(!is.null(bytes) && length(x)>1)
stop("Can only accept bytes argument for single file")
if(length(x)>1) return(sapply(x,amiratype))
if(is.null(bytes) || length(bytes)<14) {
f=gzfile(x, open='rb')
on.exit(close(f))
bytes=readBin(f, what=raw(), n=14L)
}
if(!isTRUE(is.amiramesh(bytes))) {
if(generic_magic_check(bytes, "# HyperSurface")) {
return("HxSurface")
} else return(NA_character_)
}
h=try(read.amiramesh.header(x, Verbose=FALSE, Parse = F), silent=TRUE)
if(inherits(h,'try-error')) return(NA_character_)
}
ct=grep("ContentType", h, value = T, fixed=T)
if(length(ct)){
ct=sub(".*ContentType","",ct[1])
ct=gsub("[^A-z ]+"," ",ct)
ct=scan(text=ct, what = "", quiet = T)
if(length(ct)==0) stop('unable to parse ContentType')
return(ct[1])
}
ct=grep("CoordType", h, value = T, fixed=T)
if(length(ct)){
ct=sub(".*CoordType","",ct[1])
ct=gsub("[^A-z ]+"," ",ct)
ct=scan(text=ct, what = "", quiet = T)
if(length(ct)==0) stop('unable to parse CoordType')
return(paste0(ct[1], ".field"))
}
NA_character_
}
# generic function to return a function that identifies an Amira type
is.amiratype<-function(type) {
function(f, bytes=NULL){
rval=amiratype(f, bytes=bytes)
sapply(rval, function(x) isTRUE(x==type))
}
}
#' Write a 3D data object to an AmiraMesh format file
#' @inheritParams write.im3d
#' @param enc Encoding of the data. NB "raw" and "binary" are synonyms.
#' @param dtype Data type to write to disk
#' @param endian Endianness of data block. Defaults to current value of
#' \code{.Platform$endian}.
#' @param WriteNrrdHeader Whether to write a separate detached nrrd header next
#' to the AmiraMesh file allowing it to be opened by a NRRD reader. See
#' details.
#' @details Note that only \code{'raw'} or \code{'text'} format data can
#' accommodate a detached NRRD format header since Amira's HxZip format is
#' subtly different from NRRD's gzip encoding. There is a full description
#' of the detached NRRD format in the help for \code{\link{write.nrrd}}.
#' @export
#' @seealso \code{\link{.Platform}, \link{read.amiramesh}, \link{write.nrrd}}
#' @examples
#' d=array(rnorm(1000), c(10, 10, 10))
#' tf=tempfile(fileext='.am')
#' write.amiramesh(im3d(d, voxdims=c(0.5,0.5,1)), file=tf, WriteNrrdHeader=TRUE)
#' d2=read.nrrd(paste(tf, sep='', '.nhdr'))
#' all.equal(d, d2, tol=1e-6)
write.amiramesh<-function(x, file, enc=c("binary","raw","text","hxzip"),
dtype=c("float","byte", "short", "ushort", "int", "double"),
endian=.Platform$endian, WriteNrrdHeader=FALSE){
enc=match.arg(enc)
endian=match.arg(endian, c('big','little'))
if(enc=='text') cat("# AmiraMesh ASCII 1.0\n\n",file=file)
else if(endian=='little') cat("# AmiraMesh BINARY-LITTLE-ENDIAN 2.1\n\n",file=file)
else cat("# AmiraMesh 3D BINARY 2.0\n\n",file=file)
fc=file(file,open="at") # ie append, text mode
cat("# Created by write.amiramesh\n\n",file=fc)
if(!is.list(x)) d=x else d=x$estimate
# Find data type and size for Amira
dtype=match.arg(dtype)
dtypesize<-c(4,1,2,2,4,8)[which(dtype==c("float","byte", "short","ushort", "int", "double"))]
# Set the data mode which will be used in the as.vector call at the
# moment that the binary data is written out.
if(dtype%in%c("byte","short","ushort","int")) dmode="integer"
if(dtype%in%c("float","double")) dmode="numeric"
lattice=dim(d)
cat("define Lattice",lattice,"\n",file=fc)
cat("Parameters { CoordType \"uniform\",\n",file=fc)
# note Amira's definition for the bounding box:
# the range of the voxel centres.
# So eval.points should correspond to the CENTRE of the
# voxels at which the density is evaluated
cat("\t# BoundingBox is xmin xmax ymin ymax zmin zmax\n",file=fc)
BoundingBox=NULL
if(!is.null(attr(x,"BoundingBox"))){
BoundingBox=attr(x,"BoundingBox")
} else if(is.list(d) && !is.null(d$eval.points)){
BoundingBox=as.vector(apply(d$eval.points,2,range))
}
if(!is.null(BoundingBox)) cat("\t BoundingBox",BoundingBox,"\n",file=fc)
cat("}\n\n",file=fc)
if(enc=="hxzip"){
raw_data=writeBin(as.vector(d,mode=dmode),raw(),size=dtypesize,endian=endian)
zlibdata=write.zlib(raw_data)
cat("Lattice { ",dtype," ScalarField } = @1(HxZip,",length(zlibdata),")\n\n",sep="",file=fc)
} else cat("Lattice {",dtype,"ScalarField } = @1\n\n",file=fc)
cat("@1\n",file=fc)
close(fc)
# Write a Nrrd header to accompany the Amira file if desired
# see http://teem.sourceforge.net/nrrd/
if(WriteNrrdHeader) {
if(enc=="hxzip") stop("Nrrd cannot handle Amira's HxZip encoding (which is subtly different from gzip)")
nrrdfile=paste(file,sep=".","nhdr")
cat("NRRD0004\n",file=nrrdfile)
fc=file(nrrdfile,open="at") # ie append, text mode
nrrdType=ifelse(dtype=="byte","uint8",dtype)
cat("encoding:", ifelse(enc=="text","text","raw"),"\n",file=fc)
cat("type: ",nrrdType,"\n",sep="",file=fc)
cat("endian: ",endian,"\n",sep="",file=fc)
# Important - this sets the offset in the AmiraMesh file from which
# to start reading data
cat("byte skip:",file.info(file)$size,"\n",file=fc)
cat("dimension: ",length(lattice),"\n",sep="",file=fc)
cat("sizes:",lattice,"\n",file=fc)
voxdims=voxdims(x)
if(!is.null(voxdims)) cat("spacings:",voxdims,"\n",file=fc)
if(!is.null(BoundingBox)){
cat("axis mins:",matrix(BoundingBox,nrow=2)[1,],"\n",file=fc)
cat("axis maxs:",matrix(BoundingBox,nrow=2)[2,],"\n",file=fc)
}
cat("data file: ",basename(file),"\n",sep="",file=fc)
cat("\n",file=fc)
close(fc)
}
if(enc=='text'){
write(as.vector(d, mode=dmode), ncolumns=1, file=file, append=TRUE)
} else {
fc=file(file,open="ab") # ie append, bin mode
if(enc=="hxzip")
writeBin(zlibdata, fc, size=1, endian=endian)
else
writeBin(as.vector(d, mode=dmode), fc, size=dtypesize, endian=endian)
close(fc)
}
}
| /R/amiramesh-io.R | no_license | natverse/nat | R | false | false | 25,683 | r | #' Read AmiraMesh data in binary or ascii format
#'
#' @details reading byte data as raw arrays requires 1/4 memory but complicates
#' arithmetic.
#' @param file Name of file (or connection) to read
#' @param sections character vector containing names of sections
#' @param header Whether to include the full unprocessed text header as an
#' attribute of the returned list.
#' @param simplify If there is only one datablock in file do not return wrapped
#' in a list (default TRUE).
#' @param endian Whether multibyte data types should be treated as big or little
#' endian. Default of NULL checks file or uses \code{.Platform$endian}
#' @param ReadByteAsRaw Logical specifying whether to read 8 bit data as an R
#' \code{raw} vector rather than \code{integer} vector (default: FALSE).
#' @param Verbose Print status messages
#' @return list of named data chunks
#' @importFrom nat.utils is.gzip
#' @rdname amiramesh-io
#' @export
#' @seealso \code{\link{readBin}, \link{.Platform}}
#' @family amira
read.amiramesh<-function(file,sections=NULL,header=FALSE,simplify=TRUE,
endian=NULL,ReadByteAsRaw=FALSE,Verbose=FALSE){
firstLine=readLines(file,n=1)
if(!any(grep("#\\s+(amira|hyper)mesh",firstLine,ignore.case=TRUE))){
warning(paste(file,"does not appear to be an AmiraMesh file"))
return(NULL)
}
binaryfile="binary"==tolower(sub(".*(ascii|binary).*","\\1",firstLine,ignore.case=TRUE))
# Check if file is gzipped
con=if(is.gzip(file)) gzfile(file) else file(file)
open(con, open=ifelse(binaryfile, 'rb', 'rt'))
on.exit(try(close(con),silent=TRUE))
h=read.amiramesh.header(con,Verbose=Verbose)
parsedHeader=h[["dataDef"]]
if(is.null(endian) && is.character(parsedHeader$endian)) {
endian=parsedHeader$endian[1]
}
if(ReadByteAsRaw){
parsedHeader$RType[parsedHeader$SimpleType=='byte']='raw'
}
if(is.null(sections)) sections=parsedHeader$DataName
else sections=intersect(parsedHeader$DataName,sections)
if(length(sections)){
if(binaryfile){
filedata=.read.amiramesh.bin(con,parsedHeader,sections,Verbose=Verbose,endian=endian)
close(con)
} else {
close(con)
filedata=read.amiramesh.ascii(file,parsedHeader,sections,Verbose=Verbose)
}
} else {
# we don't have any data to read - just make a dummy return object to which
# we can add attributes
filedata<-switch(parsedHeader$RType[1],
integer=integer(0), raw=raw(), numeric(0))
}
if(!header) h=h[setdiff(names(h),c("header"))]
for (n in names(h))
attr(filedata,n)=h[[n]]
# unlist?
if(simplify && is.list(filedata) && length(filedata)==1){
filedata2=filedata[[1]]
attributes(filedata2)=attributes(filedata)
dim(filedata2)=dim(filedata[[1]])
filedata=filedata2
}
return(filedata)
}
.read.amiramesh.bin<-function(con, df, sections, endian=endian, Verbose=FALSE){
l=list()
for(i in seq(len=nrow(df))){
if(Verbose) cat("Current offset is",seek(con),";",df$nBytes[i],"to read\n")
if(all(sections!=df$DataName[i])){
# Just skip this section
if(Verbose) cat("Skipping data section",df$DataName[i],"\n")
seek(con,df$nBytes[i],origin="current")
} else {
if(Verbose) cat("Reading data section",df$DataName[i],"\n")
if(df$HxType[i]=="HxByteRLE"){
d=readBin(con,what=raw(0),n=as.integer(df$HxLength[i]),size=1)
d=decode.rle(d,df$SimpleDataLength[i])
x=as.integer(d)
} else {
if(df$HxType[i]=="HxZip"){
uncompressed=read.zlib(con, compressedLength=as.integer(df$HxLength[i]))
} else {
uncompressed=con
}
whatval=switch(df$RType[i], integer=integer(0), raw=raw(0), numeric(0))
x=readBin(uncompressed,df$SimpleDataLength[i],size=df$Size[i],
what=whatval,signed=df$Signed[i],endian=endian)
}
# note that first dim is moving fastest
dims=unlist(df$Dims[i])
# if the individual elements have subelements
# then put those as innermost (fastest) dim
if(df$SubLength[i]>1) dims=c(df$SubLength[i],dims)
ndims=length(dims)
if(ndims>1) dim(x)=dims
if(ndims==2) x=t(x) # this feels like a hack, but ...
l[[df$DataName[i]]]=x
}
if(df$SimpleDataLength[i]){
# Skip return at end of section iff we had some data to read
readLines(con,n=1)
nextSectionHeader=readLines(con,n=1)
if(Verbose) cat("nextSectionHeader = ",nextSectionHeader,"\n")
}
}
l
}
# Read ASCII AmiraMesh data
# @details Does not assume anything about line spacing between sections
# @param df dataframe containing details of data in file
read.amiramesh.ascii<-function(file, df, sections, Verbose=FALSE){
l=list()
# df=subset(df,DataName%in%sections)
df=df[order(df$DataPos),]
if(inherits(file,'connection'))
con=file
else {
# rt is essential to ensure that readLines behaves with gzipped files
con=file(file,open='rt')
on.exit(close(con))
}
readLines(con, df$LineOffsets[1]-1)
for(i in seq(len=nrow(df))){
if(df$DataLength[i]>0){
# read some lines until we get to a data section
nskip=0
while( substring(readLines(con,1),1,1)!="@"){nskip=nskip+1}
if(Verbose) cat("Skipped",nskip,"lines to reach next data section")
if(Verbose) cat("Reading ",df$DataLength[i],"lines in file",file,"\n")
if(df$RType[i]=="integer") whatval=integer(0) else whatval=numeric(0)
datachunk=scan(con,what=whatval,n=df$SimpleDataLength[i],quiet=!Verbose,
na.strings = c("ERR","NA","NaN"))
# store data if required
if(df$DataName[i]%in%sections){
# convert to matrix if required
if(df$SubLength[i]>1){
datachunk=matrix(datachunk,ncol=df$SubLength[i],byrow=TRUE)
}
l[[df$DataName[i]]]=datachunk
}
} else {
if(Verbose) cat("Skipping empty data section",df$DataName[i],"\n")
}
}
return(l)
}
#' Read the header of an AmiraMesh file
#'
#' @param Parse Logical indicating whether to parse header (default: TRUE)
#' @export
#' @rdname amiramesh-io
#' @details \code{read.amiramesh.header} will open a connection if file is a
#' character vector and close it when finished reading.
read.amiramesh.header<-function(file, Parse=TRUE, Verbose=FALSE){
if(inherits(file,"connection")) {
con=file
} else {
con<-file(file, open='rt')
on.exit(close(con))
}
headerLines=NULL
while( substring(t<-readLines(con,1),1,2)!="@1"){
headerLines=c(headerLines,t)
}
if(!Parse) return(headerLines)
returnList<-list(header=headerLines)
binaryfile="binary"==tolower(sub(".*(ascii|binary).*","\\1",headerLines[1],ignore.case=TRUE))
endian=NA
if(binaryfile){
if(length(grep("little",headerLines[1],ignore.case=TRUE))>0) endian='little'
else endian='big'
}
nHeaderLines=length(headerLines)
# trim comments and blanks & convert all white space to single spaces
headerLines=trimws(sub("(.*)#.*","\\1",headerLines,perl=TRUE))
headerLines=headerLines[headerLines!=""]
headerLines=gsub("[[:space:]]+"," ",headerLines,perl=TRUE)
#print(headerLines)
# parse location definitions
LocationLines=grep("^(n|define )(\\w+) ([0-9 ]+)$",headerLines,perl=TRUE)
Locations=headerLines[LocationLines];headerLines[-LocationLines]
LocationList=strsplit(gsub("^(n|define )(\\w+) ([0-9 ]+)$","\\2 \\3",Locations,perl=TRUE)," ")
LocationNames=sapply(LocationList,"[",1)
Locations=lapply(LocationList,function(x) as.numeric(unlist(x[-1])))
names(Locations)=LocationNames
# parse parameters
ParameterStartLine=grep("^\\s*Parameters",headerLines,perl=TRUE)
if(length(ParameterStartLine)>0){
ParameterLines=headerLines[ParameterStartLine[1]:length(headerLines)]
returnList[["Parameters"]]<-.ParseAmirameshParameters(ParameterLines)$Parameters
if(!is.null(returnList[["Parameters"]]$Materials)){
# try and parse materials
te<-try(silent=TRUE,{
Ids=sapply(returnList[["Parameters"]]$Materials,'[[','Id')
# Replace any NULLs with NAs
Ids=sapply(Ids,function(x) ifelse(is.null(x),NA,x))
# Note we have to unquote and split any quoted colours
Colors=sapply(returnList[["Parameters"]]$Materials,
function(x) {if(is.null(x$Color)) return ('black')
if(is.character(x$Color)) x$Color=unlist(strsplit(x$Color," "))
return(rgb(x$Color[1],x$Color[2],x$Color[3]))})
Materials=data.frame(id=Ids,col=I(Colors),level=seq(from=0,length=length(Ids)))
rownames(Materials)<-names(returnList[["Parameters"]]$Materials)
})
if(inherits(te,'try-error')) warning("Unable to parse Amiramesh materials table")
else returnList[["Materials"]]=Materials
}
if(!is.null(returnList[["Parameters"]]$BoundingBox)){
returnList[["BoundingBox"]]=returnList[["Parameters"]]$BoundingBox
}
}
# parse data definitions
DataDefLines=grep("^(\\w+).*@(\\d+)(\\(Hx[^)]+\\)){0,1}$",headerLines,perl=TRUE)
DataDefs=headerLines[DataDefLines];headerLines[-DataDefLines]
HxTypes=rep("raw",length(DataDefs))
HxLengths=rep(NA,length(DataDefs))
LinesWithHXType=grep("(HxByteRLE|HxZip)",DataDefs)
HxTypes[LinesWithHXType]=sub(".*(HxByteRLE|HxZip).*","\\1",DataDefs[LinesWithHXType])
HxLengths[LinesWithHXType]=sub(".*(HxByteRLE|HxZip),([0-9]+).*","\\2",DataDefs[LinesWithHXType])
# remove all extraneous chars altogether
DataDefs=gsub("(=|@|\\}|\\{|[[:space:]])+"," ",DataDefs)
if(Verbose) cat("DataDefs=",DataDefs,"\n")
# make a df with DataDef info
DataDefMatrix=matrix(unlist(strsplit(DataDefs," ")),ncol=4,byrow=T)
# remove HxLength definitions from 4th column if required
DataDefMatrix[HxTypes!="raw",4]=sub("^([0-9]+).*","\\1",DataDefMatrix[HxTypes!="raw",4])
DataDefDF=data.frame(DataName=I(DataDefMatrix[,3]),DataPos=as.numeric(DataDefMatrix[,4]))
DataDefMatrix[,1]=sub("^EdgeData$","Edges",DataDefMatrix[,1])
# Dims will store a list of dimensions that can be used later
DataDefDF$Dims=Locations[DataDefMatrix[,1]]
DataDefDF$DataLength=sapply(DataDefMatrix[,1],function(x) prod(Locations[[x]])) # notice prod in case we have multi dim
DataDefDF$Type=I(DataDefMatrix[,2])
DataDefDF$SimpleType=sub("(\\w+)\\s*\\[\\d+\\]","\\1",DataDefDF$Type,perl=TRUE)
DataDefDF$SubLength=as.numeric(sub("\\w+\\s*(\\[(\\d+)\\])?","\\2",DataDefDF$Type,perl=TRUE))
DataDefDF$SubLength[is.na(DataDefDF$SubLength)]=1
# Find size of binary data (if required?)
TypeInfo=data.frame(SimpleType=I(c("float","byte", "ushort","short", "int", "double", "complex")),Size=c(4,1,2,2,4,8,8),
RType=I(c("numeric",rep("integer",4),rep("numeric",2))), Signed=c(TRUE,FALSE,FALSE,rep(TRUE,4)) )
DataDefDF=merge(DataDefDF,TypeInfo,all.x=T)
# Sort (just in case)
DataDefDF= DataDefDF[order(DataDefDF$DataPos),]
DataDefDF$SimpleDataLength=DataDefDF$DataLength*DataDefDF$SubLength
DataDefDF$nBytes=DataDefDF$SubLength*DataDefDF$Size*DataDefDF$DataLength
DataDefDF$HxType=HxTypes
DataDefDF$HxLength=HxLengths
DataDefDF$endian=endian
# FIXME Note that this assumes exactly one blank line in between each data section
# I'm not sure if this is a required property of the Amira file format
# Fixing this would of course require reading/skipping each data section
nDataSections=nrow(DataDefDF)
# NB 0 length data sections are not written
DataSectionsLineLengths=ifelse(DataDefDF$DataLength==0,0,2+DataDefDF$DataLength)
DataDefDF$LineOffsets=nHeaderLines+1+c(0,cumsum(DataSectionsLineLengths[-nDataSections]))
returnList[["dataDef"]]=DataDefDF
return(returnList)
}
# utility function to check that the label for a given item is unique
.checkLabel=function(l, label) {
if( any(names(l)==label) ){
newlabel=make.unique(c(names(l),label))[length(l)+1]
warning(paste("Duplicate item",label,"renamed",newlabel))
label=newlabel
}
label
}
.ParseAmirameshParameters<-function(textArray, CheckLabel=TRUE,ParametersOnly=FALSE){
# First check what kind of input we have
if(is.character(textArray)) con=textConnection(textArray,open='r')
else {
con=textArray
}
# empty list to store results
l=list()
# Should this check to see if the connection still exists?
# in case we want to bail out sooner
while ( {t<-try(isOpen(con),silent=TRUE);isTRUE(t) || !inherits(t,"try-error")} ){
thisLine<-readLines(con,1)
# no lines returned - ie end of file
if(length(thisLine)==0) break
# trim and split it up by white space
thisLine=trimws(thisLine)
# skip if this is a blank line
if(nchar(thisLine)==0) next
# skip if this is a comment
if(substr(thisLine,1,1)=="#") next
items=strsplit(thisLine," ",fixed=TRUE)[[1]]
if(length(items)==0) next
# get the label and items
label=items[1]; items=items[-1]
#cat("\nlabel=",label)
#cat("; items=",items)
# return list if this is the end of a section
if(label=="}") {
#cat("end of section - leaving this recursion\n")
return (l)
}
if(isTRUE(items[1]=="{")){
# parse new subsection
#cat("new subsection -> recursion\n")
# set the list element!
if(CheckLabel) label=.checkLabel(l, label)
l[[length(l)+1]]=.ParseAmirameshParameters(con,CheckLabel=CheckLabel)
names(l)[length(l)]<-label
if(ParametersOnly && label=="Parameters")
break # we're done
else next
}
if(isTRUE(items[length(items)]=="}")) {
returnAfterParsing=TRUE
items=items[-length(items)]
}
else returnAfterParsing=FALSE
# ordinary item
# Check first item (if there are any items)
if(length(items)>0){
firstItemFirstChar=substr(items[1],1,1)
if(any(firstItemFirstChar==c("-",as.character(0:9)) )){
# Get rid of any commas
items=chartr(","," ",items)
# convert to numeric if not a string
items=as.numeric(items)
} else if (firstItemFirstChar=="\""){
if(returnAfterParsing) thisLine=sub("\\}","",thisLine,fixed=TRUE)
# dequote quoted string using scan
items=scan(text=thisLine,what="",quiet=TRUE)[-1]
# remove any commas
items=items[items!=","]
attr(items,"quoted")=TRUE
}
}
# set the list element!
if(CheckLabel)
label=.checkLabel(l, label)
l[[length(l)+1]]=items
names(l)[length(l)]<-label
if(returnAfterParsing) return(l)
}
# we should only get here once if we parse a valid hierarchy
try(close(con),silent=TRUE)
return(l)
}
# decode some raw bytes into a new raw vector of specified length
# @param bytes to decode
# @param uncompressedLength Length of the new uncompressed data
# Expects an integer array
# Structure is that every odd byte is a count
# and every even byte is the actual data
# So 127 0 127 0 127 0 12 0 12 1 0
# I think that it ends with a zero count
# -----
# in fact the above is not quite right. If >=2 consecutive bytes are different
# then a control byte is written giving the length of the run of different bytes
# and then the whole run is written out
# data can therefore only be parsed by the trick of making 2 rows if there
# are no control bytes in range -126 to -1
decode.rle<-function(d,uncompressedLength){
rval=raw(uncompressedLength)
bytesRead=0
filepos=1
while(bytesRead<uncompressedLength){
x=d[filepos]
filepos=filepos+1
if(x==0L)
stop(paste("byte at offset ",filepos," is 0!"))
if(x>0x7f) {
# cat("x=",x,"\n")
x=as.integer(x)-128
# cat("now x=",x,"\n")
mybytes=d[filepos:(filepos+x-1)]
filepos=filepos+x
# that's the x that we've read
} else {
# x>0
mybytes=rep.int(d[filepos], as.integer(x))
filepos=filepos+1
}
rval[(bytesRead+1):(bytesRead+length(mybytes))]=mybytes
bytesRead=bytesRead+length(mybytes)
}
rval
}
# Uncompress zlib compressed data (from file or memory) to memory
#
# @details zlib compressed data uses the same algorithm but a smaller header
# than gzip data.
# @details For connections, compressedLength must be supplied, but offset is
# ignored (i.e. you must seek beforehand)
# @details For files, if compressedLength is not supplied then \code{read.zlib}
# will attempt to read until the end of the file.
# @param compressed Path to compressed file, connection or raw vector.
# @param offset Byte offset in file on disk
# @param compressedLength Bytes of compressed data to read
# @param type The compression type. See ?memDecompress for details.
# @param ... Additional parameters passed to \code{\link{readBin}}
# @return raw vector of decompressed data
# sealso memDecompress
# @export
read.zlib<-function(compressed, offset=NA, compressedLength=NA, type='gzip', ...){
if(!is.raw(compressed)){
if(inherits(compressed,'connection')){
if(is.na(compressedLength)) stop("Must supply compressedLength when reading from a connection")
con=compressed
} else {
con<-file(compressed,open='rb')
on.exit(close(con))
if(!is.na(offset)) seek(con,offset)
else offset = 0
if(is.na(compressedLength)) compressedLength=file.info(compressed)$size-offset
}
compressed=readBin(con, what=raw(), n=compressedLength)
}
memDecompress(compressed, type=type, ...)
}
# Compress raw data, returning raw vector or writing to file
#
# @details The default value of \code{con=raw()} means that this function will
# return a raw vector of compressed data if con is not specified.
# @param uncompressed \code{raw} vector of data
# @param con Raw vector or path to output file
# @return A raw vector (if \code{con} is a raw vector) or invisibly NULL.
# @seealso Depends on \code{\link{memCompress}}
# @export
write.zlib<-function(uncompressed, con=raw()){
if(!inherits(con, "connection") && !is.raw(con)){
con=open(con, open='wb')
on.exit(close(con))
}
d=memCompress(uncompressed, type='gzip')
if(is.raw(con)) return(d)
writeBin(object=d,con=con)
}
#' Check if file is AmiraMesh format
#'
#' @details Tries to be as fast as possible by reading only first 11 bytes and
#' checking if they equal to "# AmiraMesh" or (deprecated) "# HyperMesh".
#' @param f Path to one or more files to be tested \strong{or} an array of raw
#' bytes, for one file only.
#' @param bytes optional raw vector of at least 11 bytes from the start of a
#' single file (used in preference to reading file \code{f}).
#' @return logical
#' @export
#' @family amira
is.amiramesh<-function(f=NULL, bytes=NULL) {
if(!is.null(bytes) && is.character(f) && length(f)>1)
stop("Can only check bytes for a single file")
tocheck=if(is.null(bytes)) f else bytes
generic_magic_check(tocheck, c("# HyperMesh", "# AmiraMesh"))
}
#' Return the type of an AmiraMesh file on disk or a parsed header
#'
#' @details Note that when checking a file we first test if it is an AmiraMesh
#' file (fast, especially when \code{bytes!=NULL}) before reading the header
#' and determining content type (slow).
#' @param x Path to files on disk or a single pre-parsed parameter list
#' @param bytes A raw vector containing at least 11 bytes from the start of the
#' file.
#' @return character vector (NA_character_ when file invalid)
#' @export
#' @family amira
amiratype<-function(x, bytes=NULL){
if(is.list(x)) h<-x
else {
# we have a file, optionally with some raw data
if(!is.null(bytes) && length(x)>1)
stop("Can only accept bytes argument for single file")
if(length(x)>1) return(sapply(x,amiratype))
if(is.null(bytes) || length(bytes)<14) {
f=gzfile(x, open='rb')
on.exit(close(f))
bytes=readBin(f, what=raw(), n=14L)
}
if(!isTRUE(is.amiramesh(bytes))) {
if(generic_magic_check(bytes, "# HyperSurface")) {
return("HxSurface")
} else return(NA_character_)
}
h=try(read.amiramesh.header(x, Verbose=FALSE, Parse = F), silent=TRUE)
if(inherits(h,'try-error')) return(NA_character_)
}
ct=grep("ContentType", h, value = T, fixed=T)
if(length(ct)){
ct=sub(".*ContentType","",ct[1])
ct=gsub("[^A-z ]+"," ",ct)
ct=scan(text=ct, what = "", quiet = T)
if(length(ct)==0) stop('unable to parse ContentType')
return(ct[1])
}
ct=grep("CoordType", h, value = T, fixed=T)
if(length(ct)){
ct=sub(".*CoordType","",ct[1])
ct=gsub("[^A-z ]+"," ",ct)
ct=scan(text=ct, what = "", quiet = T)
if(length(ct)==0) stop('unable to parse CoordType')
return(paste0(ct[1], ".field"))
}
NA_character_
}
# generic function to return a function that identifies an Amira type
is.amiratype<-function(type) {
function(f, bytes=NULL){
rval=amiratype(f, bytes=bytes)
sapply(rval, function(x) isTRUE(x==type))
}
}
#' Write a 3D data object to an AmiraMesh format file
#' @inheritParams write.im3d
#' @param enc Encoding of the data. NB "raw" and "binary" are synonyms.
#' @param dtype Data type to write to disk
#' @param endian Endianness of data block. Defaults to current value of
#' \code{.Platform$endian}.
#' @param WriteNrrdHeader Whether to write a separate detached nrrd header next
#' to the AmiraMesh file allowing it to be opened by a NRRD reader. See
#' details.
#' @details Note that only \code{'raw'} or \code{'text'} format data can
#' accommodate a detached NRRD format header since Amira's HxZip format is
#' subtly different from NRRD's gzip encoding. There is a full description
#' of the detached NRRD format in the help for \code{\link{write.nrrd}}.
#' @export
#' @seealso \code{\link{.Platform}, \link{read.amiramesh}, \link{write.nrrd}}
#' @examples
#' d=array(rnorm(1000), c(10, 10, 10))
#' tf=tempfile(fileext='.am')
#' write.amiramesh(im3d(d, voxdims=c(0.5,0.5,1)), file=tf, WriteNrrdHeader=TRUE)
#' d2=read.nrrd(paste(tf, sep='', '.nhdr'))
#' all.equal(d, d2, tol=1e-6)
write.amiramesh<-function(x, file, enc=c("binary","raw","text","hxzip"),
dtype=c("float","byte", "short", "ushort", "int", "double"),
endian=.Platform$endian, WriteNrrdHeader=FALSE){
enc=match.arg(enc)
endian=match.arg(endian, c('big','little'))
if(enc=='text') cat("# AmiraMesh ASCII 1.0\n\n",file=file)
else if(endian=='little') cat("# AmiraMesh BINARY-LITTLE-ENDIAN 2.1\n\n",file=file)
else cat("# AmiraMesh 3D BINARY 2.0\n\n",file=file)
fc=file(file,open="at") # ie append, text mode
cat("# Created by write.amiramesh\n\n",file=fc)
if(!is.list(x)) d=x else d=x$estimate
# Find data type and size for Amira
dtype=match.arg(dtype)
dtypesize<-c(4,1,2,2,4,8)[which(dtype==c("float","byte", "short","ushort", "int", "double"))]
# Set the data mode which will be used in the as.vector call at the
# moment that the binary data is written out.
if(dtype%in%c("byte","short","ushort","int")) dmode="integer"
if(dtype%in%c("float","double")) dmode="numeric"
lattice=dim(d)
cat("define Lattice",lattice,"\n",file=fc)
cat("Parameters { CoordType \"uniform\",\n",file=fc)
# note Amira's definition for the bounding box:
# the range of the voxel centres.
# So eval.points should correspond to the CENTRE of the
# voxels at which the density is evaluated
cat("\t# BoundingBox is xmin xmax ymin ymax zmin zmax\n",file=fc)
BoundingBox=NULL
if(!is.null(attr(x,"BoundingBox"))){
BoundingBox=attr(x,"BoundingBox")
} else if(is.list(d) && !is.null(d$eval.points)){
BoundingBox=as.vector(apply(d$eval.points,2,range))
}
if(!is.null(BoundingBox)) cat("\t BoundingBox",BoundingBox,"\n",file=fc)
cat("}\n\n",file=fc)
if(enc=="hxzip"){
raw_data=writeBin(as.vector(d,mode=dmode),raw(),size=dtypesize,endian=endian)
zlibdata=write.zlib(raw_data)
cat("Lattice { ",dtype," ScalarField } = @1(HxZip,",length(zlibdata),")\n\n",sep="",file=fc)
} else cat("Lattice {",dtype,"ScalarField } = @1\n\n",file=fc)
cat("@1\n",file=fc)
close(fc)
# Write a Nrrd header to accompany the Amira file if desired
# see http://teem.sourceforge.net/nrrd/
if(WriteNrrdHeader) {
if(enc=="hxzip") stop("Nrrd cannot handle Amira's HxZip encoding (which is subtly different from gzip)")
nrrdfile=paste(file,sep=".","nhdr")
cat("NRRD0004\n",file=nrrdfile)
fc=file(nrrdfile,open="at") # ie append, text mode
nrrdType=ifelse(dtype=="byte","uint8",dtype)
cat("encoding:", ifelse(enc=="text","text","raw"),"\n",file=fc)
cat("type: ",nrrdType,"\n",sep="",file=fc)
cat("endian: ",endian,"\n",sep="",file=fc)
# Important - this sets the offset in the AmiraMesh file from which
# to start reading data
cat("byte skip:",file.info(file)$size,"\n",file=fc)
cat("dimension: ",length(lattice),"\n",sep="",file=fc)
cat("sizes:",lattice,"\n",file=fc)
voxdims=voxdims(x)
if(!is.null(voxdims)) cat("spacings:",voxdims,"\n",file=fc)
if(!is.null(BoundingBox)){
cat("axis mins:",matrix(BoundingBox,nrow=2)[1,],"\n",file=fc)
cat("axis maxs:",matrix(BoundingBox,nrow=2)[2,],"\n",file=fc)
}
cat("data file: ",basename(file),"\n",sep="",file=fc)
cat("\n",file=fc)
close(fc)
}
if(enc=='text'){
write(as.vector(d, mode=dmode), ncolumns=1, file=file, append=TRUE)
} else {
fc=file(file,open="ab") # ie append, bin mode
if(enc=="hxzip")
writeBin(zlibdata, fc, size=1, endian=endian)
else
writeBin(as.vector(d, mode=dmode), fc, size=dtypesize, endian=endian)
close(fc)
}
}
|
r=359.77
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7gp4v/media/images/d7gp4v-006/svc:tesseract/full/full/359.77/default.jpg Accept:application/hocr+xml
| /ark_87287/d7gp4v/d7gp4v-006/rotated.r | permissive | ucd-library/wine-price-extraction | R | false | false | 199 | r | r=359.77
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7gp4v/media/images/d7gp4v-006/svc:tesseract/full/full/359.77/default.jpg Accept:application/hocr+xml
|
\name{AUDC}
\alias{AUDC}
\title{Augmented Uniform Design Construction}
\usage{
AUDC(X0,n,s,q,init,initX,crit,maxiter,hits_ratio,vis)
}
\description{
This function takes n,s,q; a unchanged initial design and other arguments to output a list(described below).
}
\arguments{
\item{X0}{an integer matrix R object}
\item{n}{an integer R object}
\item{crit}{an character R object. Type of criterion to use.
"maximin" -- maximin Discrepancy ;
"CL2" --Centered L2 Discrepancy ;
"MD2" --Mixture L2 Discrepancy ;}
\item{maxiter}{a positive integer R object}
\item{hits_ratio}{an float R object. Default value is 0.1, which is the ratio to accept changes of design in inner for loop. Details can be checked in (Zhang, A. and Li, H. (2017). UniDOE: an R package for uniform design construction via stochastic optimization.)}
\item{vis}{an boolean R object}
}
\value{
A list that contains Initial design matrix(initial_design),optimal design matrix(final_design), initial criterion value(initial_criterion), final criterion value(criterion_value) and criterion list(criterion_lists) in update process.
}
\examples{
#e.g.1.
#Set a fixed initial matrix:
n=12#(must be multiples of q)
mat0 = matrix(c(1,1,1,2,2,2,3,3,3),ncol=3,byrow=TRUE)# nb. of columns=s
crit = "MD2"#(Mixture L2 criteria)
list1=AUDC(X0=mat0,n,crit=crit)
#e.g.2.
#Set a fixed initial matrix with visualization:
n=8#(must be multiples of q)
mat0 = matrix(c(1,1,1,2,2,2,3,3,3),ncol=3,byrow=TRUE)# nb. of columns=s
crit = "MD2"#(Mixture L2 criteria)
vis= TRUE
list1=AUDC(X0=mat0,n,crit=crit,vis=vis)
}
| /man/AUDC.Rd | permissive | HAOYU-LI/UniDOE | R | false | false | 1,650 | rd | \name{AUDC}
\alias{AUDC}
\title{Augmented Uniform Design Construction}
\usage{
AUDC(X0,n,s,q,init,initX,crit,maxiter,hits_ratio,vis)
}
\description{
This function takes n,s,q; a unchanged initial design and other arguments to output a list(described below).
}
\arguments{
\item{X0}{an integer matrix R object}
\item{n}{an integer R object}
\item{crit}{an character R object. Type of criterion to use.
"maximin" -- maximin Discrepancy ;
"CL2" --Centered L2 Discrepancy ;
"MD2" --Mixture L2 Discrepancy ;}
\item{maxiter}{a positive integer R object}
\item{hits_ratio}{an float R object. Default value is 0.1, which is the ratio to accept changes of design in inner for loop. Details can be checked in (Zhang, A. and Li, H. (2017). UniDOE: an R package for uniform design construction via stochastic optimization.)}
\item{vis}{an boolean R object}
}
\value{
A list that contains Initial design matrix(initial_design),optimal design matrix(final_design), initial criterion value(initial_criterion), final criterion value(criterion_value) and criterion list(criterion_lists) in update process.
}
\examples{
#e.g.1.
#Set a fixed initial matrix:
n=12#(must be multiples of q)
mat0 = matrix(c(1,1,1,2,2,2,3,3,3),ncol=3,byrow=TRUE)# nb. of columns=s
crit = "MD2"#(Mixture L2 criteria)
list1=AUDC(X0=mat0,n,crit=crit)
#e.g.2.
#Set a fixed initial matrix with visualization:
n=8#(must be multiples of q)
mat0 = matrix(c(1,1,1,2,2,2,3,3,3),ncol=3,byrow=TRUE)# nb. of columns=s
crit = "MD2"#(Mixture L2 criteria)
vis= TRUE
list1=AUDC(X0=mat0,n,crit=crit,vis=vis)
}
|
library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wn'
scenario <- 18
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "wn", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wn_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.68, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.65, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'wn')
| /sim_pgms/wn/do20/2xcontH0_sc18_do20_mice.R | no_license | yuliasidi/nibinom_apply | R | false | false | 3,318 | r | library(dplyr)
ss.bounds <- readRDS("ss.bounds.rds")
alpha <- 0.025
method <- 'wn'
scenario <- 18
param <- 1
anal_type <- "mice"
ss <- ss.bounds%>%
dplyr::filter(method == "wn", scenario.id == scenario)
do_val <- 0.2
x1 <- parallel::mclapply(X = 1:10000,
mc.cores = parallel::detectCores() - 1,
FUN= function(x) {
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(reshape2, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(nibinom)
set.seed(10000*scenario + x)
#generate full data with desired correlation structure
dt0 <- sim_cont(p_C = ss$p_C, p_T = ss$p_C - ss$M2, n_arm = ss$n.arm,
mu1 = 4, mu2 = 100, sigma1 = 1, sigma2 = 20, r12 = -0.3, b1 = 0.1, b2 = -0.01)
ci.full <- dt0%>%wn_ci(ss$M2,'y', alpha)
#define missingness parameters and do rates
m_param <- mpars(do = do_val, atype = anal_type)
#impose missing values and perform analysis
ci.miss.mnar1 <- m_param%>%
slice(1)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 0.68, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss.mnar2 <- m_param%>%
slice(2)%>%
dplyr::mutate(results = purrr::pmap(list(b_trt=bt, b_y=by, b_x1=bx1, b_x2=bx2, b_ty = b.ty),
miss_gen_an, dt = dt0, do = do_val,
ci_method = wn_ci,
sing_anal = F,
mice_anal = T,
m2 = ss$M2, seed = 10000*scenario + x,
seed_mice = 10000*scenario + x,
method = method,
alpha = alpha,
n_mi = 2,
m_mi = 100,
mu_T = 1.65, sd_T = 0.05))%>%
dplyr::select(missing, results)
ci.miss <- bind_rows(ci.miss.mnar1, ci.miss.mnar2)%>%
dplyr::mutate(scenario.id = ss$scenario.id,
p_C = ss$p_C,
M2 = ss$M2,
type = 't.H0',
do = do_val,
sim.id = x)
ci.all <- list(ci.full, ci.miss)%>%purrr::set_names(c("ci.full","ci.miss"))
return(ci.all)
})
#to summarize type-I error and mean relative bias from the simulated data
source('funs/h0.mice.sum.R')
h0.mice.sum(x1, method = 'wn')
|
"ozonemonthly" <-
structure(as.integer(c(313, 311, 370, 359, 334, 296, 288, 274,
NA, NA, NA, 301, 284, 320, 394, 347, 332, 301, 280, 256, NA,
NA, NA, NA, NA, 305, 349, 378, 341, 328, 297, NA, NA, NA, NA,
NA, NA, 302, 303, 340, 322, 298, 295, NA, NA, NA, NA, NA, 287,
292, 345, 375, 318, 303, 304, NA, NA, NA, NA, NA, 267, 307, 332,
343, 310, 297, 329, NA, NA, NA, NA, NA, NA, 322, 380, 376, 319,
302, 305, 287, NA, NA, NA, 313, NA, 300, 347, 350, 316, 300,
325, 303, NA, NA, NA, 271, NA, 308, 401, 356, 314, 294, 296,
NA, NA, NA, NA, NA, NA, 273, 298, 335, 299, 289, 283, 286, NA,
NA, NA, NA, 288, 307, 338, 344, 299, 284, 268, NA, NA, NA, NA,
NA, NA, 313, 357, 333, 318, 285, 289, 279, NA, NA, NA, 284, 279,
292, 385, 348, 311, 290, 280, 244, NA, NA, NA, NA, 285, 274,
297, 314, 305, 285, 268, 258, NA, NA, NA, 307, NA, 274, 355,
344, 317, 312, 274, 277, NA, NA, NA, NA, 278, 290, 373, 344,
315, 300, 300, 312, NA, NA, NA, 304, 265, 295, 375, 349, 306,
293, 286, 277, NA, NA, NA, 272, 263, 271, 326, 334, 307, 275,
262, 242, NA, NA, NA, NA, 244, 272, 337, 351, 320, 275, 279,
NA, NA, NA, NA, NA, 267, 303, 309, 338, 314, 272, 257, NA, NA,
NA, NA, NA, 265, 283, 326, 335, 318, 280, 275, 253, NA, NA, NA,
290, 239, 251, 332, 360, 310, 305, 282, 253, NA, NA, NA, NA,
264, 284, 345, 337, 295, 283, 278, 283, NA, NA, NA, NA, 232,
263, 323, 352, 324, 292, 290, NA, NA, NA, NA, 328, 236, 226,
293, 340, 299, 280, 253, NA, NA, NA, NA, NA, 241, 237, 285, 326,
290, 278, 260, NA, NA, NA, NA, NA, 210, 218, 268, 322, 308, 292,
278, 266, NA, NA, NA, NA, 228, 195, 289, 325, 301, 272, 273,
267, NA, NA, NA, NA, 215, 194, 248, 322, 301, 269, 263, 245,
NA, NA, NA, NA, 217, 185, 215, 304, 286, 273, 247, 227, NA, NA,
NA, 253, 212, 233, 282, 309, 301, 278, 274, 274, NA, NA, NA,
254, 182, 150, 188, 287, 286, 264, 271, 265, NA, NA, NA, 242,
207, 216, 312, 323, 284, 281, 260, 274, NA, NA, NA, 270, 186,
150, 255, 295, 290, 266, 254, 254, NA, NA, NA, 259, 173, 173,
207, 246, 281, 257, 263, 233, NA, NA, NA, 204, 163, 137, 232,
296, 271, 283, 281, 257, NA, NA, NA, 185, 152, 147, 206, 270,
284, 275, 277, 256, NA, NA, NA, 209, 167, 122, 179, 285, 278,
264, 255, 284, NA, NA, NA, 197, 152, 126, 217, 316, 278, 269,
256, 254, NA, NA, NA, 218, 160, 130, 164, 252, 261, 249, 246,
226, NA, NA, NA, 173, 155, 148, 181, 260, 278, 265, 247, 243,
NA, NA, NA, 218, 171, 141, 210, 286, 267, 262, 264, 255, NA,
NA, NA, 221, 162, 140, 183, 255, 272, 259, 254, 267, NA, NA,
NA, 205, 172, 143, 172, 254, 281, 258, 250, 256, NA, NA, NA,
179, 151, 137, 267, 299, 286, 261, 251, 245, NA, NA, NA, 224,
148, 138, 209, 265, 283, 263, 246, 225, NA, NA, NA, 228, 213,
224, 329, 306, 282, 280, 268, 246, NA, NA, NA, 205, 155, 158,
229, 292, 277, 271, 262, 242, NA, NA, NA, 242, 173, 191, 222,
282, 275, 262, 253, 242, NA, NA, NA, 202, 157, 156)), .Names = c("V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6"), .Tsp = c(1956.66666666667,
2005.75, 12), class = "ts")
| /data/ozonemonthly.R | no_license | jverzani/UsingR | R | false | false | 6,665 | r | "ozonemonthly" <-
structure(as.integer(c(313, 311, 370, 359, 334, 296, 288, 274,
NA, NA, NA, 301, 284, 320, 394, 347, 332, 301, 280, 256, NA,
NA, NA, NA, NA, 305, 349, 378, 341, 328, 297, NA, NA, NA, NA,
NA, NA, 302, 303, 340, 322, 298, 295, NA, NA, NA, NA, NA, 287,
292, 345, 375, 318, 303, 304, NA, NA, NA, NA, NA, 267, 307, 332,
343, 310, 297, 329, NA, NA, NA, NA, NA, NA, 322, 380, 376, 319,
302, 305, 287, NA, NA, NA, 313, NA, 300, 347, 350, 316, 300,
325, 303, NA, NA, NA, 271, NA, 308, 401, 356, 314, 294, 296,
NA, NA, NA, NA, NA, NA, 273, 298, 335, 299, 289, 283, 286, NA,
NA, NA, NA, 288, 307, 338, 344, 299, 284, 268, NA, NA, NA, NA,
NA, NA, 313, 357, 333, 318, 285, 289, 279, NA, NA, NA, 284, 279,
292, 385, 348, 311, 290, 280, 244, NA, NA, NA, NA, 285, 274,
297, 314, 305, 285, 268, 258, NA, NA, NA, 307, NA, 274, 355,
344, 317, 312, 274, 277, NA, NA, NA, NA, 278, 290, 373, 344,
315, 300, 300, 312, NA, NA, NA, 304, 265, 295, 375, 349, 306,
293, 286, 277, NA, NA, NA, 272, 263, 271, 326, 334, 307, 275,
262, 242, NA, NA, NA, NA, 244, 272, 337, 351, 320, 275, 279,
NA, NA, NA, NA, NA, 267, 303, 309, 338, 314, 272, 257, NA, NA,
NA, NA, NA, 265, 283, 326, 335, 318, 280, 275, 253, NA, NA, NA,
290, 239, 251, 332, 360, 310, 305, 282, 253, NA, NA, NA, NA,
264, 284, 345, 337, 295, 283, 278, 283, NA, NA, NA, NA, 232,
263, 323, 352, 324, 292, 290, NA, NA, NA, NA, 328, 236, 226,
293, 340, 299, 280, 253, NA, NA, NA, NA, NA, 241, 237, 285, 326,
290, 278, 260, NA, NA, NA, NA, NA, 210, 218, 268, 322, 308, 292,
278, 266, NA, NA, NA, NA, 228, 195, 289, 325, 301, 272, 273,
267, NA, NA, NA, NA, 215, 194, 248, 322, 301, 269, 263, 245,
NA, NA, NA, NA, 217, 185, 215, 304, 286, 273, 247, 227, NA, NA,
NA, 253, 212, 233, 282, 309, 301, 278, 274, 274, NA, NA, NA,
254, 182, 150, 188, 287, 286, 264, 271, 265, NA, NA, NA, 242,
207, 216, 312, 323, 284, 281, 260, 274, NA, NA, NA, 270, 186,
150, 255, 295, 290, 266, 254, 254, NA, NA, NA, 259, 173, 173,
207, 246, 281, 257, 263, 233, NA, NA, NA, 204, 163, 137, 232,
296, 271, 283, 281, 257, NA, NA, NA, 185, 152, 147, 206, 270,
284, 275, 277, 256, NA, NA, NA, 209, 167, 122, 179, 285, 278,
264, 255, 284, NA, NA, NA, 197, 152, 126, 217, 316, 278, 269,
256, 254, NA, NA, NA, 218, 160, 130, 164, 252, 261, 249, 246,
226, NA, NA, NA, 173, 155, 148, 181, 260, 278, 265, 247, 243,
NA, NA, NA, 218, 171, 141, 210, 286, 267, 262, 264, 255, NA,
NA, NA, 221, 162, 140, 183, 255, 272, 259, 254, 267, NA, NA,
NA, 205, 172, 143, 172, 254, 281, 258, 250, 256, NA, NA, NA,
179, 151, 137, 267, 299, 286, 261, 251, 245, NA, NA, NA, 224,
148, 138, 209, 265, 283, 263, 246, 225, NA, NA, NA, 228, 213,
224, 329, 306, 282, 280, 268, 246, NA, NA, NA, 205, 155, 158,
229, 292, 277, 271, 262, 242, NA, NA, NA, 242, 173, 191, 222,
282, 275, 262, 253, 242, NA, NA, NA, 202, 157, 156)), .Names = c("V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7",
"V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5",
"V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1", "V2", "V3",
"V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V1",
"V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11",
"V12", "V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9",
"V10", "V11", "V12", "V1", "V2", "V3", "V4", "V5", "V6"), .Tsp = c(1956.66666666667,
2005.75, 12), class = "ts")
|
#!/usr/bin/env Rscript
options(stringsAsFactors = FALSE)
library(dplyr)
library(tidyr)
library(readr)
library(rtracklayer)
## select coding genes in GTF
gtf.file <- 'gencode.v19.genes.patched_contigs.gtf.gz'
out.file <- 'coding.genes.txt.gz'
out.file.2 <- 'all.genes.txt.gz'
gtf.tab <- readGFF(gtf.file, tags = c('gene_id', 'gene_name', 'transcript_name', 'gene_type'))
coding.genes <- gtf.tab %>% mutate(chr = seqid, ensg = gene_id) %>%
filter(gene_type == 'protein_coding', type == 'transcript') %>%
separate(ensg, into = c('ensg', 'remove'), sep = '[.]') %>%
select(chr, start, end, strand, ensg, gene_name) %>%
arrange(chr, start)
all.genes <- gtf.tab %>% mutate(chr = seqid, ensg = gene_id) %>%
filter(type == 'transcript') %>%
separate(ensg, into = c('ensg', 'remove'), sep = '[.]') %>%
group_by(chr, strand, ensg, gene_name) %>%
summarize(start = min(start), end = max(end)) %>%
arrange(chr, start)
write_tsv(coding.genes, path = out.file)
write_tsv(all.genes, path = out.file.2)
| /make.coding.genes.R | no_license | YPARK/cammel-gwas | R | false | false | 1,118 | r | #!/usr/bin/env Rscript
options(stringsAsFactors = FALSE)
library(dplyr)
library(tidyr)
library(readr)
library(rtracklayer)
## select coding genes in GTF
gtf.file <- 'gencode.v19.genes.patched_contigs.gtf.gz'
out.file <- 'coding.genes.txt.gz'
out.file.2 <- 'all.genes.txt.gz'
gtf.tab <- readGFF(gtf.file, tags = c('gene_id', 'gene_name', 'transcript_name', 'gene_type'))
coding.genes <- gtf.tab %>% mutate(chr = seqid, ensg = gene_id) %>%
filter(gene_type == 'protein_coding', type == 'transcript') %>%
separate(ensg, into = c('ensg', 'remove'), sep = '[.]') %>%
select(chr, start, end, strand, ensg, gene_name) %>%
arrange(chr, start)
all.genes <- gtf.tab %>% mutate(chr = seqid, ensg = gene_id) %>%
filter(type == 'transcript') %>%
separate(ensg, into = c('ensg', 'remove'), sep = '[.]') %>%
group_by(chr, strand, ensg, gene_name) %>%
summarize(start = min(start), end = max(end)) %>%
arrange(chr, start)
write_tsv(coding.genes, path = out.file)
write_tsv(all.genes, path = out.file.2)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{extractDrugStandardComplete}
\alias{extractDrugStandardComplete}
\title{Calculate the Standard Molecular Fingerprints (in Complete Format)}
\usage{
extractDrugStandardComplete(molecules, depth = 6, size = 1024,
silent = TRUE)
}
\arguments{
\item{molecules}{Parsed molucule object.}
\item{depth}{The search depth. Default is \code{6}.}
\item{size}{The length of the fingerprint bit string. Default is \code{1024}.}
\item{silent}{Logical. Whether the calculating process
should be shown or not, default is \code{TRUE}.}
}
\value{
An integer vector or a matrix. Each row represents one molecule,
the columns represent the fingerprints.
}
\description{
Calculate the Standard Molecular Fingerprints (in Complete Format)
}
\details{
Calculate the standard molecular fingerprints.
Considers paths of a given length.
This is hashed fingerprints, with a default length of 1024.
}
\examples{
\donttest{
smi = system.file('vignettedata/FDAMDD.smi', package = 'Rcpi')
mol = readMolFromSmi(smi, type = 'mol')
fp = extractDrugStandardComplete(mol)
dim(fp)}
}
\author{
Nan Xiao <\url{http://r2s.name}>
}
\seealso{
\link{extractDrugStandard}
}
\keyword{extractDrugStandardComplete}
| /man/extractDrugStandardComplete.Rd | no_license | MaythaNaif/Rcpi | R | false | false | 1,233 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{extractDrugStandardComplete}
\alias{extractDrugStandardComplete}
\title{Calculate the Standard Molecular Fingerprints (in Complete Format)}
\usage{
extractDrugStandardComplete(molecules, depth = 6, size = 1024,
silent = TRUE)
}
\arguments{
\item{molecules}{Parsed molucule object.}
\item{depth}{The search depth. Default is \code{6}.}
\item{size}{The length of the fingerprint bit string. Default is \code{1024}.}
\item{silent}{Logical. Whether the calculating process
should be shown or not, default is \code{TRUE}.}
}
\value{
An integer vector or a matrix. Each row represents one molecule,
the columns represent the fingerprints.
}
\description{
Calculate the Standard Molecular Fingerprints (in Complete Format)
}
\details{
Calculate the standard molecular fingerprints.
Considers paths of a given length.
This is hashed fingerprints, with a default length of 1024.
}
\examples{
\donttest{
smi = system.file('vignettedata/FDAMDD.smi', package = 'Rcpi')
mol = readMolFromSmi(smi, type = 'mol')
fp = extractDrugStandardComplete(mol)
dim(fp)}
}
\author{
Nan Xiao <\url{http://r2s.name}>
}
\seealso{
\link{extractDrugStandard}
}
\keyword{extractDrugStandardComplete}
|
{
library(tidyverse)
}
#=======Read files========
path<-'/home/san/halinejad/Desktop/Dashti/somatic2'
pcnt<-paste0(path,'/AGRE_cnv_control.csv')
pcs<-paste0(path,'/AGRE_cnv_case.csv')
input_case<- read.csv(pcs,header = T)
input_control<- read.csv(pcnt,header = T)
t1<-subset(input_case,input_case$CNV.Type=='Dup')
t1<-as.data.frame(t1$Patient.Id)
t1<-unique(t1)
no_case_Dup<- nrow(t1)
t1<-subset(input_control,input_control$CNV.Type=='Dup')
t1<-as.data.frame(t1$Patient.Id)
t1<-unique(t1)
no_control_Dup<- nrow(t1)
input_case<- subset(input_case,input_case$chr.1==21)
input_control<-subset(input_control,input_control$chr.1==21)
st_case<-min(input_case$start)
en_case<-max(input_case$end)
st_control<-min(input_control$start)
en_control<-max(input_control$end)
st<-min(st_case,st_control)
en<-max(en_control,en_case)
ln<-en-st+1
rm(en_case,st_case,en_control,st_control,t1)
#==========Choose Duplications================
case_Dup<-subset(input_case,input_case$CNV.Type=='Dup')
control_Dup<-subset(input_control,input_control$CNV.Type=='Dup')
rm(input_case,input_control)
gc()
#==========Create Matrix=======================
chr21_dup_cnv<-matrix(0, nrow = ln, ncol = 6)
#==========Fill matrix=========================
for (i in 1:ln) {
chr21_dup_cnv[i,1]<-i+st-1
}
case_Dup <- as.matrix(case_Dup)
if (nrow(case_Dup) != 0) {
for (i in 1:nrow(case_Dup)) {
k1 <- as.integer(case_Dup[i, 3])
k2 <- as.integer(case_Dup[i, 4])
k1 <- k1 - st + 1
k2 <- k2 - st + 1
chr21_dup_cnv[k1:k2, 2] <- chr21_dup_cnv[k1:k2, 2] + 1
}
}
control_Dup<-as.matrix(control_Dup)
if(nrow(control_Dup)!=0){
for (i in 1:nrow(control_Dup)){
k1<-as.integer(control_Dup[i,3])
k2<-as.integer(control_Dup[i,4])
k1<-k1-st+1
k2<-k2-st+1
chr21_dup_cnv[k1:k2,3]<-chr21_dup_cnv[k1:k2,3]+1
}
}
chr21_dup_cnv<-subset(chr21_dup_cnv,chr21_dup_cnv[,2]>=5)
if(nrow(chr21_dup_cnv)!=0){
for (i in 1:nrow(chr21_dup_cnv)){
m<-matrix(c(chr21_dup_cnv[i,2],chr21_dup_cnv[i,3],no_case_Dup-chr21_dup_cnv[i,2],no_control_Dup -chr21_dup_cnv[i,3]),nrow = 2)
chr21_dup_cnv[i,6] <-fisher.test(m,alternative = "two.sided",conf.level =0.9)$p.value
chr21_dup_cnv[i,4] <-fisher.test(m,alternative = "greater",conf.level =0.9)$p.value
chr21_dup_cnv[i,5] <-fisher.test(m,alternative = "less",conf.level =0.9)$p.value
}
}
path<-'/home/san/halinejad/Desktop/Dashti/somatic2'
path<-paste0(path,'/Result/chr21_dup.csv')
write.csv(chr21_dup_cnv,path)
#======================Significant regions=======================
significant_pval<- 0.0001609981
significant_file_Dup<- as.data.frame(subset(chr21_dup_cnv,chr21_dup_cnv[,4]<=significant_pval))
significant_regions_chr21_dup_cnv<- significant_file_Dup%>%
as.data.frame() %>%
mutate(
jump = V1 - c(-1, V1[-length(V1)]),
region = cumsum(jump != 1)
) %>%
group_by(region) %>%
summarize(
start = min(V1),
end = max(V1),
min_pval = min(V4),
max_pval = max(V4),
mean_pval = mean(V4),
min_case = min(V2),
max_case = max(V2),
mean_case = mean(V2),
min_control = min(V3),
max_control = max(V3),
mean_control = mean(V3),
)
path<-'/home/san/halinejad/Desktop/Dashti/somatic2'
path<-paste0(path,'/Result/regions_chr21_dup.csv')
write.csv(significant_regions_chr21_dup_cnv,path) | /21u.R | no_license | hameddashti/somatic2 | R | false | false | 3,280 | r | {
library(tidyverse)
}
#=======Read files========
path<-'/home/san/halinejad/Desktop/Dashti/somatic2'
pcnt<-paste0(path,'/AGRE_cnv_control.csv')
pcs<-paste0(path,'/AGRE_cnv_case.csv')
input_case<- read.csv(pcs,header = T)
input_control<- read.csv(pcnt,header = T)
t1<-subset(input_case,input_case$CNV.Type=='Dup')
t1<-as.data.frame(t1$Patient.Id)
t1<-unique(t1)
no_case_Dup<- nrow(t1)
t1<-subset(input_control,input_control$CNV.Type=='Dup')
t1<-as.data.frame(t1$Patient.Id)
t1<-unique(t1)
no_control_Dup<- nrow(t1)
input_case<- subset(input_case,input_case$chr.1==21)
input_control<-subset(input_control,input_control$chr.1==21)
st_case<-min(input_case$start)
en_case<-max(input_case$end)
st_control<-min(input_control$start)
en_control<-max(input_control$end)
st<-min(st_case,st_control)
en<-max(en_control,en_case)
ln<-en-st+1
rm(en_case,st_case,en_control,st_control,t1)
#==========Choose Duplications================
case_Dup<-subset(input_case,input_case$CNV.Type=='Dup')
control_Dup<-subset(input_control,input_control$CNV.Type=='Dup')
rm(input_case,input_control)
gc()
#==========Create Matrix=======================
chr21_dup_cnv<-matrix(0, nrow = ln, ncol = 6)
#==========Fill matrix=========================
for (i in 1:ln) {
chr21_dup_cnv[i,1]<-i+st-1
}
case_Dup <- as.matrix(case_Dup)
if (nrow(case_Dup) != 0) {
for (i in 1:nrow(case_Dup)) {
k1 <- as.integer(case_Dup[i, 3])
k2 <- as.integer(case_Dup[i, 4])
k1 <- k1 - st + 1
k2 <- k2 - st + 1
chr21_dup_cnv[k1:k2, 2] <- chr21_dup_cnv[k1:k2, 2] + 1
}
}
control_Dup<-as.matrix(control_Dup)
if(nrow(control_Dup)!=0){
for (i in 1:nrow(control_Dup)){
k1<-as.integer(control_Dup[i,3])
k2<-as.integer(control_Dup[i,4])
k1<-k1-st+1
k2<-k2-st+1
chr21_dup_cnv[k1:k2,3]<-chr21_dup_cnv[k1:k2,3]+1
}
}
chr21_dup_cnv<-subset(chr21_dup_cnv,chr21_dup_cnv[,2]>=5)
if(nrow(chr21_dup_cnv)!=0){
for (i in 1:nrow(chr21_dup_cnv)){
m<-matrix(c(chr21_dup_cnv[i,2],chr21_dup_cnv[i,3],no_case_Dup-chr21_dup_cnv[i,2],no_control_Dup -chr21_dup_cnv[i,3]),nrow = 2)
chr21_dup_cnv[i,6] <-fisher.test(m,alternative = "two.sided",conf.level =0.9)$p.value
chr21_dup_cnv[i,4] <-fisher.test(m,alternative = "greater",conf.level =0.9)$p.value
chr21_dup_cnv[i,5] <-fisher.test(m,alternative = "less",conf.level =0.9)$p.value
}
}
path<-'/home/san/halinejad/Desktop/Dashti/somatic2'
path<-paste0(path,'/Result/chr21_dup.csv')
write.csv(chr21_dup_cnv,path)
#======================Significant regions=======================
significant_pval<- 0.0001609981
significant_file_Dup<- as.data.frame(subset(chr21_dup_cnv,chr21_dup_cnv[,4]<=significant_pval))
significant_regions_chr21_dup_cnv<- significant_file_Dup%>%
as.data.frame() %>%
mutate(
jump = V1 - c(-1, V1[-length(V1)]),
region = cumsum(jump != 1)
) %>%
group_by(region) %>%
summarize(
start = min(V1),
end = max(V1),
min_pval = min(V4),
max_pval = max(V4),
mean_pval = mean(V4),
min_case = min(V2),
max_case = max(V2),
mean_case = mean(V2),
min_control = min(V3),
max_control = max(V3),
mean_control = mean(V3),
)
path<-'/home/san/halinejad/Desktop/Dashti/somatic2'
path<-paste0(path,'/Result/regions_chr21_dup.csv')
write.csv(significant_regions_chr21_dup_cnv,path) |
####
source("packages.R")
dag <- empty.graph(nodes = c("A", "S", "E", "O", "R", "T"))
dag
dag <- set.arc(dag, from = "A", to = "E")
dag <- set.arc(dag, from = "S", to = "E")
dag <- set.arc(dag, from = "E", to = "R")
dag <- set.arc(dag, from = "E", to = "O")
dag <- set.arc(dag, from = "O", to = "T")
dag <- set.arc(dag, from = "R", to = "T")
dag
# identical:
dag2 <- empty.graph(nodes = c("A", "S", "E", "O", "R", "T"))
arc.set <- matrix(c("A", "E",
"S", "E",
"E", "O",
"E", "R",
"O", "T",
"R", "T"),
byrow = TRUE, ncol = 2,
dimnames = list(NULL, c("from", "to")))
arcs(dag2) <- arc.set
modelstring(dag2)
nodes(dag2)
arcs(dag2)
#DAG's are acyclical!!!
A.lv <- c("young", "adult", "old")
S.lv <- c("M", "F")
E.lv <- c("high", "uni")
O.lv <- c("emp", "self")
R.lv <- c("small", "big")
T.lv <- c("car", "train", "other")
A.prob <- array(c(0.30, 0.50, 0.20), dim = 3,
dimnames = list(A = A.lv))
A.prob
S.prob <- array(c(0.60, 0.40), dim = 2,
dimnames = list(S = S.lv))
S.prob
O.prob <- array(c(0.96, 0.04, 0.92, 0.08), dim = c(2, 2),
dimnames = list(O = O.lv, E = E.lv))
O.prob
R.prob <- array(c(0.25, 0.75, 0.20, 0.80), dim = c(2, 2),
dimnames = list(R = R.lv, E = E.lv))
R.prob
E.prob <- array(c(0.75, 0.25, 0.72, 0.28, 0.88, 0.12, 0.64,
0.36, 0.70, 0.30, 0.90, 0.10), dim = c(2, 3, 2),
dimnames = list(E = E.lv, A = A.lv, S = S.lv))
T.prob <- array(c(0.48, 0.42, 0.10, 0.56, 0.36, 0.08, 0.58,
0.24, 0.18, 0.70, 0.21, 0.09), dim = c(3, 2, 2),
dimnames = list(T = T.lv, O = O.lv, R = R.lv))
#creating the BN
dag3 <- model2network("[A][S][E|A:S][O|E][R|E][T|O:R]")
all.equal(dag2, dag3)
cpt <- list(A = A.prob, S = S.prob, E = E.prob, O = O.prob,
R = R.prob, T = T.prob)
bn <- custom.fit(dag2, cpt)
nparams(bn)
arcs(bn)
bn$T
R.cpt <- coef(bn$R)
bn
# now with real data
survey <- read.table("survey.txt", header = TRUE)
head(survey)
# parameters to estimate: conditional probabilities in the local distributions
#fit the parameters for the local distributions:
bn.mle <- bn.fit(dag, data = survey, method = "mle")
bn.mle$O
bn.bayes <- bn.fit(dag, data = survey, method = "bayes",
iss = 10)
bn.bayes$O
#conditional independence tests: focus on presence of different arcs
# since each arc encodes probabilistic dependence the test can be used to assess whether
# that dependence is supported by the data
# if test rejects the Null, arc can be included in the DAG
# number of degrees of freedom for education -> travel:
(nlevels(survey[, "T"]) - 1) * (nlevels(survey[, "E"]) - 1) *
(nlevels(survey[, "O"]) * nlevels(survey[, "R"]))
# mutual information test from information theory:
ci.test("T", "E", c("O", "R"), test = "mi", data = survey)
# Pearsons X^2 test:
ci.test("T", "E", c("O", "R"), test = "x2", data = survey)
# that way we can remove arcs that are not supported by the data:
ci.test("T", "O", "R", test = "x2", data = survey)
#to do this for all:
arc.strength(dag, data = survey, criterion = "x2")
# network scores focus on the DAG as a whole: GOF statistics that measure how well
# the DAG mirrors the dependence structure of the data (e.g. BIC)
#Bayesian Dirichlet equivalent uniform (BDeu) posterior probability of
#the DAG associated with a uniform prior over both the space of the DAGs and
#of the parameters
#the higher BIC/BD the better the fit of the DAG to the data
score(dag, data = survey, type = "bic")
score(dag, data = survey, type = "bde", iss = 10)
# fot the BDe we have to specify imaginary sample size for computation of the posterior estimates
# corresponds to the weight assigned to the flat prior distribution
score(dag, data = survey, type = "bde", iss = 1)
# the lower the iss the closer BDe is to BIC
# evaluate a DAG that also includes Education -> Transport:
dag4 <- set.arc(dag, from = "E", to = "T")
nparams(dag4, survey)
score(dag4, data = survey, type = "bic")
# not beneficial
# also useful to compare completely different DAG's e.g. by randomly selecting one:
rnd <- random.graph(nodes = c("A", "S", "E", "O", "R", "T"))
modelstring(rnd)
score(rnd, data = survey, type = "bic")
#yet there are learning algorithms: searching for the DAG that maximises a given network score
# e.g. hill climbing
learned <- hc(survey)
modelstring(learned)
score(learned, data = survey, type = "bic")
learned2 <- hc(survey, score = "bde")
arc.strength(learned, data = survey, criterion = "bic")
# from the learned score, removing any will result in a decrease of BIC
# this is not true when using the DAG that we specified:
arc.strength(dag, data = survey, criterion = "bic")
# removing O-->T would increase BIC
#testing conditional independence via d-separation
dsep(dag, x = "S", y = "R")
dsep(dag, x = "O", y = "R")
path(dag, from = "S", to = "R")
dsep(dag, x = "S", y = "R", z = "E")
dsep(dag, x = "O", y = "R", z = "E")
dsep(dag, x = "A", y = "S")
dsep(dag, x = "A", y = "S", z = "E")
#----------------------------------------------------#
# Exact Inference #
#----------------------------------------------------#
# transform BN into a tree
junction <- compile(as.grain(bn))
#attitudes of women towards car
#and train use compared to the whole survey sample
querygrain(junction, nodes = "T")$T
jsex <- setEvidence(junction, nodes = "S", states = "F")
querygrain(jsex, nodes = "T")$T
# women show about the same preferences towards car and train use as the interviewees as a whole
#living in a small city affects car and train use?
jres <- setEvidence(junction, nodes = "R", states = "small")
querygrain(jres, nodes = "T")$T
jedu <- setEvidence(junction, nodes = "E", states = "high")
SxT.cpt <- querygrain(jedu, nodes = c("S", "T"),
type = "joint")
SxT.cpt
querygrain(jedu, nodes = c("S", "T"), type = "marginal")
querygrain(jedu, nodes = c("T", "S"), type = "conditional")
dsep(bn, x = "S", y = "T", z = "E")
SxT.ct = SxT.cpt * nrow(survey)
chisq.test(SxT.ct)
#----------------------------------------------------#
# Approximate Inference #
#----------------------------------------------------#
#using monte carlo simulations to randomly generate observations from the BN
# 5000 * nparam(BN)
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = (E == "high"))
#10^6 * nparam(BN)
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = (E == "high"), n = 10^6)
#probability of a man travelling by car given that his Age is young and his
#Education is uni or that he is an adult, regardless of his Education.
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = ((A == "young") & (E == "uni")) | (A == "adult"))
SxT <- cpdist(bn, nodes = c("S", "T"),
evidence = (E == "high"))
head(SxT)
prop.table(table(SxT))
# Graphical Implementation
graphviz.plot(dag)
graphviz.plot(dag, layout = "fdp")
graphviz.plot(dag, layout = "circo")
hlight <- list(nodes = nodes(dag), arcs = arcs(dag),
col = "grey", textCol = "grey")
pp <- graphviz.plot(dag, highlight = hlight)
graph::edgeRenderInfo(pp) <- list(col = c("S~E" = "black", "E~R" = "black"),
lwd = c("S~E" = 3, "E~R" = 3))
graph::nodeRenderInfo(pp) <- list(col = c("S" = "black", "E" = "black", "R" = "black"),
textCol = c("S" = "black", "E" = "black", "R" = "black"),
fill = c("E" = "grey"))
Rgraphviz::renderGraph(pp)
#Plotting Conditional Probability Distributions
bn.fit.barchart(bn.mle$T, main = "Travel",
xlab = "Pr(T | R,O)", ylab = "")
bn.fit.dotplot(bn.mle$T, main = "Travel",
xlab = "Pr(T | R,O)", ylab = "")
Evidence <- factor(c(rep("Unconditional",3), rep("Female", 3),
rep("Small City",3)),
levels = c("Unconditional", "Female", "Small City"))
Travel <- factor(rep(c("car", "train", "other"), 3),
levels = c("other", "train", "car"))
distr <- data.frame(Evidence = Evidence, Travel = Travel,
Prob = c(0.5618, 0.2808, 0.15730, 0.5620, 0.2806,
0.1573, 0.4838, 0.4170, 0.0990))
head(distr)
barchart(Travel ~ Prob | Evidence, data = distr,
layout = c(3, 1), xlab = "probability",
scales = list(alternating = 1, tck = c(1, 0)),
strip = strip.custom(factor.levels =
c(expression(Pr(T)),
expression(Pr({T} * " | " * {S == F})),
expression(Pr({T} * " | " * {R == small})))),
panel = function(...) {
panel.barchart(...)
panel.grid(h = 0, v = -1)
})
#------------------------------------------------------------------#
##### Continuous Case: Gaussian BN ####
#------------------------------------------------------------------#
# Model continuous data under multivariate normal assumption:
dag.bnlearn <- model2network("[G][E][V|G:E][N|V][W|V][C|N:W]")
dag.bnlearn
nano <- nodes(dag.bnlearn)
for (n1 in nano) {
for (n2 in nano) {
if (dsep(dag.bnlearn, n1, n2))
cat(n1, "and", n2, "are independent.\n")
}#FOR
}#FOR
for (n1 in nano[nano != "V"]) {
for (n2 in nano[nano != "V"]) {
if (n1 < n2) {
if (dsep(dag.bnlearn, n1, n2, "V"))
cat(n1, "and", n2, "are independent given V.\n")
}#THEN
}#FOR
}#
#Probabilistic representation
disE <- list(coef = c("(Intercept)" = 50), sd = 10)
disG <- list(coef = c("(Intercept)" = 50), sd = 10)
disV <- list(coef = c("(Intercept)" = -10.35534,
E = 0.70711, G = 0.5), sd = 5)
disN <- list(coef = c("(Intercept)" = 45, V = 0.1),
sd = 9.949874)
disW <- list(coef = c("(Intercept)" = 15, V = 0.7),
sd = 7.141428)
disC <- list(coef = c("(Intercept)" = 0, N = 0.3, W = 0.7),
sd = 6.25)
dis.list = list(E = disE, G = disG, V = disV, N = disN,
W = disW, C = disC)
gbn.bnlearn <- custom.fit(dag.bnlearn, dist = dis.list)
print(gbn.bnlearn)
# we have created a linear Gaussian Bayesian Network:
# with the following assumptions:
# 1. each node follows a normal distribution
# 2. root nodes are solely described by the marginal distribution
# 3. each node has a variance that is specific to that node and does not depend on the values of the parents
# 4. the local distribution of each node can be equivalently expressed as a Gaussian linear model which includes an intercept and the node’s parents as explanatory variables
# concentrate on GBN:
gbn.rbmn <- bnfit2nbn(gbn.bnlearn)
gema.rbmn <- nbn2gema(gbn.rbmn)
mn.rbmn <- gema2mn(gema.rbmn)
print8mn(mn.rbmn)
str(mn.rbmn)
#Estimating the Parameters: Correlation Coefficients
cropdata1 <- import("cropdata1.txt")
dim(cropdata1)
round(head(cropdata1), 2)
# bn.fit automatically adapts to the data type
est.para <- bn.fit(dag.bnlearn, data = cropdata1)
#assign the return value of a fit to directly to the corresponding node
est.para$C <- lm(C ~ N + W, data = cropdata1)
est.para$C <- penalized(C ~ N + W, lambda1 = 0, lambda2 = 1.5,
data = cropdata1)
est.para$E
est.para$C
# interecept true=0 , estimated 0 2.4069
# fit null intercept:
est.para$C <- lm(C ~ N + W - 1, data = cropdata1)
est.para$C
lmC <- lm(C ~ N + W, data = cropdata1[, c("N", "W", "C")])
coef(lmC)
confint(lmC)
#Tests and Scores
cormat <- cor(cropdata1[, c("C", "W", "N")])
invcor <- cor2pcor(cormat)
dimnames(invcor) <- dimnames(cormat)
invcor
invcor["C", "W"]
#similarly:
ci.test("C", "W", "N", test = "cor", data = cropdata1)
#use learning algorithm
stru1 <- iamb(cropdata1, test = "cor")
#differs from bn.learn slightly --> the V -N arc is missing thus
#we can make this arc mandatory by putting it on a whitelist:
wl <- matrix(c("V", "N"), ncol = 2)
wl
stru2 <- iamb(cropdata1, test = "cor", whitelist = wl)
all.equal(dag.bnlearn, stru2)
#more data learns the DAG correctly
cropdata2 <- import("cropdata2.txt")
stru3 <- iamb(cropdata2, test = "cor")
all.equal(dag.bnlearn, stru3)
#### Network Scores of GBNs ####
score(dag.bnlearn, data = cropdata2, type = "bic-g")
score(dag.bnlearn, data = cropdata2, type = "bge")
##### Inference with GBN #####
# again we are interested in the probability of an event or in
#the distribution of some random variables
#nbn is defined via the GBN's local distribution:
print8nbn(gbn.rbmn)
#str(gbn.rbmn)
#gema describes the GBN by two generating matrices:
#1. vector of expectations and 2. a matrix to be multiplied by a N(0, 1) white noise
print8gema(gema.rbmn)
#read as: V = 50 + 7.071E1 + 5E2 + 5E3, where E1,...,E6 are i.i.d. N(0,1) variables.
#use condi4joint() for conditional joint distributions of one or more nodes
print8mn(condi4joint(mn.rbmn, par = "C", pour = "V", x2 = 80))
print8mn(condi4joint(mn.rbmn, par = "V", pour = "C", x2 = 80))
#symmetric distribution
unlist(condi4joint(mn.rbmn, par = "C", pour = "V", x2 = NULL))
#### Approximate Inference ####
nbs <- 4
VG <- rnorm(nbs, mean = 50, sd = 10)
VE <- rnorm(nbs, mean = 50, sd = 10)
VV <- rnorm(nbs, mean = -10.355 + 0.5 * VG + 0.707 * VE,
sd = 5)
VN <- rnorm(nbs, mean = 45 + 0.1 * VV, sd = 9.95)
cbind(VV, VN)
#or quicker:
sim <- rbn(gbn.bnlearn, n = 4)
sim[, c("V", "N")]
#make probability assertions about intervals:
head(cpdist(gbn.bnlearn, nodes = c("C", "N", "W"),
evidence = (C > 80)))
#likelihood weighting due to the fact that single values have probability zero in continuous cases
head(cpdist(gbn.bnlearn, nodes = c("V"),
evidence = list(G = 10, E = 90), method = "lw"))
cpquery(gbn.bnlearn, event = (V > 70),
evidence = list(G = 10, E = 90), method = "lw")
# Plotting GBN's
igraph.options(print.full = TRUE)
dag0.igraph <- graph.formula(G-+V, E-+V, V-+N, V-+W,
N-+C, W-+C)
dag0.igraph
dag.igraph <- igraph.from.graphNEL(as.graphNEL(dag.bnlearn))
V(dag.igraph)
E(dag.igraph)
par(mfrow = c(2, 2), mar = rep(3, 4), cex.main = 2)
plot(dag.igraph, main = "\n1: defaults")
dag2 <- dag.igraph
V(dag2)$label <- V(dag2)$name
plot(dag2, main = "\n2: with labels")
ly <- matrix(c(2, 3, 1, 1, 2, 3,
1, 4, 4, 2, 3, 2), 6)
plot(dag2, layout = ly, main = "\n3: positioning")
colo <- c("black", "darkgrey", "darkgrey", rep(NA, 3))
lcolo <- c(rep("white", 3), rep(NA, 3))
par(mar = rep(0, 4), lwd = 1.5)
plot(dag2, layout = ly, frame = TRUE,
main = "\n4: final",
vertex.color = colo, vertex.label.color = lcolo,
vertex.label.cex = 3, vertex.size = 50,
edge.arrow.size = 0.8, edge.color = "black")
# display conditional probabilities
gbn.fit <- bn.fit(dag.bnlearn, cropdata2)
bn.fit.qqplot(gbn.fit)
bn.fit.qqplot(gbn.fit$V)
try(bn.fit.qqplot(gbn.bnlearn))
C.EV <- condi4joint(mn.rbmn, par = "C", pour = c("E", "V"),
x2 = NULL)
C.EV$rho
dsep(gbn.bnlearn, "E", "C", "V")
set.seed(5678)
cropdata3 <- cpdist(gbn.bnlearn, nodes = c("E", "V", "C"),
evidence = TRUE, n = 1000)
plot(cropdata3$V, cropdata3$C, type = "n",
main = "C | V, E; E is the point size")
cexlim <- c(0.1, 2.4)
cexE <- cexlim[1] + diff(cexlim) / diff(range(cropdata3$E)) *
(cropdata3$E - min(cropdata3$E))
points(cropdata3$V, cropdata3$C, cex = cexE)
cqa <- quantile(cropdata3$C, seq(0, 1, 0.1))
abline(h = cqa, lty = 3)
#--------------------------------------------------------------------#
#### Hybrid Bayesian Networks ####
#--------------------------------------------------------------------#
# Actually we can mix discrete and continuous variables and
# we can use any kind of distribution.
library(rjags)
sp <- c(0.5, 0.5)
mu <- c(6.1, 6.25)
sigma <- 0.05
jags.data <- list(sp = sp, mu = mu, sigma = sigma,
cdiam = 6.20)
model1 <- jags.model(file = "inclu.sc.jam", data = jags.data)
update(model1, n.iter = 10000)
simu1 <- coda.samples(model = model1, variable.names = "csup",
n.iter = 20000, thin = 20)
sim1 <- simu1[[1]]
sum(sim1 == 1) / length(sim1)
# quite close to the theoretical value:
d.s1 <- dnorm(6.2, mean = mu[1], sd = sigma)
d.s2 <- dnorm(6.2, mean = mu[2], sd = sigma)
d.s1 / (d.s1 + d.s2)
# discretizing continuous variables
limits <- c(6.16, 6.19)
dsd <- matrix(c(diff(c(0, pnorm(limits, mu[1], sigma), 1)),
diff(c(0, pnorm(limits, mu[2], sigma), 1))),
3, 2)
dimnames(dsd) <- list(D = c("thin", "average", "thick"),
S = c("s1", "s2"))
dsd
#joint distribution by multiplying dsd by the probability of each s (law of total probability)
jointd <- dsd * sp
# conditional probability of S given D:
dds <- t(jointd / rowSums(jointd))
dds
###### Using different distributions than multinomial/multinormal #####
dat0 <- list(p.PR = c(0.7, 0.2, 0.1),
a.CL = 3, b.CL = 1,
g.G1 = c(1, 3, 10),
k.G2 = 10,
m.TR = 5, s.TR = 2.5,
r.LO = 1/3, d.LO = 1)
# exploring
exp.loss <- rep(NA, 3)
names(exp.loss) <- paste("PR=", 1:3, sep = "")
qua.loss <- exp.loss
for (PR in 1:3) {
dat1 <- dat0
dat1$PR <- PR
mopest <- jags.model(file = "inclu.pest.jam", data = dat1,
quiet = TRUE)
update(mopest, 3000)
sipest <-
coda.samples(model = mopest, variable.names = "LO",
n.iter = 50000)
summa <- summary(sipest)
exp.loss[PR] <- summa$statistics["Mean"]
qua.loss[PR] <- summa$quantiles["75%"]
}#FOR
mean3 <- mean(sipest[[1]][, "LO"])
round(c(exp.loss, MEAN = mean(exp.loss)), 1)
###### Theoretic Motivation #####
X <- paste("[X1][X3][X5][X6|X8][X2|X1][X7|X5][X4|X1:X2]",
"[X8|X3:X7][X9|X2:X7][X10|X1:X9]", sep = "")
dag <- model2network(X)
skel <- skeleton(dag)
vstructs(dag)
cp1 <- cpdag(dag)
dsep(dag, x = "X9", y = "X5", z = c("X2", "X7", "X10"))
# identify markov blanket nodes
mb(dag, node = "X9")
mb(dag, node = "X7")
par.X9 <- bnlearn::parents(dag, node = "X9")
ch.X9 <- bnlearn::children(dag, node = "X9")
sp.X9 <- sapply(ch.X9, bnlearn::parents, x = dag)
sp.X9 <- sp.X9[sp.X9 != "X9"]
unique(c(par.X9, ch.X9, sp.X9))
V <- setdiff(nodes(dag), "X9")
S <- mb(dag, "X9")
sapply(setdiff(V, S), dsep, bn = dag, y = "X9", z = S)
V <- setdiff(nodes(dag), "X7")
S <- mb(dag, "X7")
sapply(setdiff(V, S), dsep, bn = dag, y = "X7", z = S)
belongs <- logical(0)
for (node in S)
belongs[node] <- "X7" %in% mb(dag, node)
belongs
#### Moral Graphs ####
#Just another graphical representation derived from the DAG
mg1 <- moral(dag)
all.equal(moral(dag),
moral(set.arc(dag, from = "X7", to = "X3")))
mg2 <- dag
vs <- vstructs(dag)
for (i in seq(nrow(vs)))
mg2 <- set.edge(mg2, from = vs[i, "X"], to = vs[i, "Y"],
check.cycles = FALSE)
mg2 <- skeleton(mg2)
all.equal(mg1, mg2)
#Moralization transforms BN into Markov Network
###################################
# #
#### Bayesan Network Learning #####
# #
###################################
#Grow Shrink structure learning algorithm
bn.cor <- gs(cropdata1, test = "cor", alpha = 0.05)
modelstring(bn.cor)
#missing the V-N arc; the small sample size seems to reduce the power of the test
# use Fischer's Z- test
bn.zf <- gs(cropdata1, test = "zf", alpha = 0.05)
# or Monte Carlo test
bn.mc <- gs(cropdata1, test = "mc-cor", B = 1000)
all.equal(bn.zf,bn.mc)
all.equal(bn.cor, bn.mc)
#still not the real structure
bn.iamb <- iamb(cropdata1, test = "cor", alpha = 0.05)
all.equal(bn.cor, bn.iamb)
gs(cropdata1, test = "cor", alpha = 0.05, debug = TRUE)
#include by hand:
bn.cor <- gs(cropdata1, test = "cor", alpha = 0.05,
whitelist = c("V", "N"))
all.equal(bn.cor, dag.bnlearn)
# Score based algorithms
learned <- hc(survey, score = "bic")
modelstring(learned)
score(learned, data = survey, type = "bic")
learned <- hc(survey, score = "bic", debug = T)
#start search at random graph
hc(survey, score = "bic", start = random.graph(names(survey)))
# Hybrid algorithms:
# MMHC is implemented in bnlearn in the mmhc function
mmhc(survey)
rsmax2(survey, restrict = "mmpc", maximize = "hc")
#rsmax2(survey, restrict = "si.hiton.pc", test = "x2",
# maximize = "tabu", score = "bde", maximize.args = list(iss = 5))
#
#-----------------------------------------------------------------------------#
###### Parameter Learning ######
#-----------------------------------------------------------------------------#
#probability to find a man driving a car
#given he has high school education
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = (E == "high"), n = 10^6)
particles <- rbn(bn, 10^6)
head(particles, n = 5)
partE <- particles[(particles[, "E"] == "high"), ]
nE <- nrow(partE)
partEq <-partE[(partE[, "S"] == "M") & (partE[, "T"] == "car"), ]
nEq <- nrow(partEq)
nEq/nE
###### Mutilated Networks and likelihood sampling ####
mutbn <- mutilated(bn, list(E = "high"))
mutbn$E
particles <- rbn(bn, 10^6)
partQ <- particles[(particles[, "S"] == "M") &
(particles[, "T"] == "car"), ]
nQ <- nrow(partQ)
nQ/10^6
w <- logLik(bn, particles, nodes = "E", by.sample = TRUE)
wEq <- sum(exp(w[(particles[, "S"] == "M") &
(particles[, "T"] == "car")]))
wE <- sum(exp(w))
wEq/wE
# or alternatively:
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = list(E = "high"), method = "lw")
###### Causal BNs #####
data(marks)
head(marks)
latent <- factor(c(rep("A", 44), "B",
rep("A", 7), rep("B", 36)))
modelstring(hc(marks[latent == "A", ]))
modelstring(hc(marks[latent == "B", ]))
modelstring(hc(marks))
#discretizing the BN to make it multinomial
dmarks <- discretize(marks, breaks = 2, method = "interval")
modelstring(hc(cbind(dmarks, LAT = latent)))
# example for imputation:
# missing data imputation.
with.missing.data = gaussian.test
with.missing.data[sample(nrow(with.missing.data), 500), "F"] = NA
fitted = bn.fit(model2network("[A][B][E][G][C|A:B][D|B][F|A:D:E:G]"),
gaussian.test)
imputed = impute(fitted, with.missing.data)
# predicting a variable in the test set.
training = bn.fit(model2network("[A][B][E][G][C|A:B][D|B][F|A:D:E:G]"),
gaussian.test[1:2000, ])
test = gaussian.test[2001:nrow(gaussian.test), ]
predicted = predict(training, node = "F", data = test)
# obtain the conditional probabilities for the values of a single variable
# given a subset of the rest, they are computed to determine the predicted
# values.
fitted = bn.fit(model2network("[A][C][F][B|A][D|A:C][E|B:F]"), learning.test)
evidence = data.frame(A = factor("a", levels = levels(learning.test$A)),
F = factor("b", levels = levels(learning.test$F)))
predicted = predict(fitted, "C", evidence,
method = "bayes-lw", prob = TRUE)
attr(predicted, "prob")
| /nettest.R | no_license | konstantingoe/MA-Statistics | R | false | false | 23,266 | r | ####
source("packages.R")
dag <- empty.graph(nodes = c("A", "S", "E", "O", "R", "T"))
dag
dag <- set.arc(dag, from = "A", to = "E")
dag <- set.arc(dag, from = "S", to = "E")
dag <- set.arc(dag, from = "E", to = "R")
dag <- set.arc(dag, from = "E", to = "O")
dag <- set.arc(dag, from = "O", to = "T")
dag <- set.arc(dag, from = "R", to = "T")
dag
# identical:
dag2 <- empty.graph(nodes = c("A", "S", "E", "O", "R", "T"))
arc.set <- matrix(c("A", "E",
"S", "E",
"E", "O",
"E", "R",
"O", "T",
"R", "T"),
byrow = TRUE, ncol = 2,
dimnames = list(NULL, c("from", "to")))
arcs(dag2) <- arc.set
modelstring(dag2)
nodes(dag2)
arcs(dag2)
#DAG's are acyclical!!!
A.lv <- c("young", "adult", "old")
S.lv <- c("M", "F")
E.lv <- c("high", "uni")
O.lv <- c("emp", "self")
R.lv <- c("small", "big")
T.lv <- c("car", "train", "other")
A.prob <- array(c(0.30, 0.50, 0.20), dim = 3,
dimnames = list(A = A.lv))
A.prob
S.prob <- array(c(0.60, 0.40), dim = 2,
dimnames = list(S = S.lv))
S.prob
O.prob <- array(c(0.96, 0.04, 0.92, 0.08), dim = c(2, 2),
dimnames = list(O = O.lv, E = E.lv))
O.prob
R.prob <- array(c(0.25, 0.75, 0.20, 0.80), dim = c(2, 2),
dimnames = list(R = R.lv, E = E.lv))
R.prob
E.prob <- array(c(0.75, 0.25, 0.72, 0.28, 0.88, 0.12, 0.64,
0.36, 0.70, 0.30, 0.90, 0.10), dim = c(2, 3, 2),
dimnames = list(E = E.lv, A = A.lv, S = S.lv))
T.prob <- array(c(0.48, 0.42, 0.10, 0.56, 0.36, 0.08, 0.58,
0.24, 0.18, 0.70, 0.21, 0.09), dim = c(3, 2, 2),
dimnames = list(T = T.lv, O = O.lv, R = R.lv))
#creating the BN
dag3 <- model2network("[A][S][E|A:S][O|E][R|E][T|O:R]")
all.equal(dag2, dag3)
cpt <- list(A = A.prob, S = S.prob, E = E.prob, O = O.prob,
R = R.prob, T = T.prob)
bn <- custom.fit(dag2, cpt)
nparams(bn)
arcs(bn)
bn$T
R.cpt <- coef(bn$R)
bn
# now with real data
survey <- read.table("survey.txt", header = TRUE)
head(survey)
# parameters to estimate: conditional probabilities in the local distributions
#fit the parameters for the local distributions:
bn.mle <- bn.fit(dag, data = survey, method = "mle")
bn.mle$O
bn.bayes <- bn.fit(dag, data = survey, method = "bayes",
iss = 10)
bn.bayes$O
#conditional independence tests: focus on presence of different arcs
# since each arc encodes probabilistic dependence the test can be used to assess whether
# that dependence is supported by the data
# if test rejects the Null, arc can be included in the DAG
# number of degrees of freedom for education -> travel:
(nlevels(survey[, "T"]) - 1) * (nlevels(survey[, "E"]) - 1) *
(nlevels(survey[, "O"]) * nlevels(survey[, "R"]))
# mutual information test from information theory:
ci.test("T", "E", c("O", "R"), test = "mi", data = survey)
# Pearsons X^2 test:
ci.test("T", "E", c("O", "R"), test = "x2", data = survey)
# that way we can remove arcs that are not supported by the data:
ci.test("T", "O", "R", test = "x2", data = survey)
#to do this for all:
arc.strength(dag, data = survey, criterion = "x2")
# network scores focus on the DAG as a whole: GOF statistics that measure how well
# the DAG mirrors the dependence structure of the data (e.g. BIC)
#Bayesian Dirichlet equivalent uniform (BDeu) posterior probability of
#the DAG associated with a uniform prior over both the space of the DAGs and
#of the parameters
#the higher BIC/BD the better the fit of the DAG to the data
score(dag, data = survey, type = "bic")
score(dag, data = survey, type = "bde", iss = 10)
# fot the BDe we have to specify imaginary sample size for computation of the posterior estimates
# corresponds to the weight assigned to the flat prior distribution
score(dag, data = survey, type = "bde", iss = 1)
# the lower the iss the closer BDe is to BIC
# evaluate a DAG that also includes Education -> Transport:
dag4 <- set.arc(dag, from = "E", to = "T")
nparams(dag4, survey)
score(dag4, data = survey, type = "bic")
# not beneficial
# also useful to compare completely different DAG's e.g. by randomly selecting one:
rnd <- random.graph(nodes = c("A", "S", "E", "O", "R", "T"))
modelstring(rnd)
score(rnd, data = survey, type = "bic")
#yet there are learning algorithms: searching for the DAG that maximises a given network score
# e.g. hill climbing
learned <- hc(survey)
modelstring(learned)
score(learned, data = survey, type = "bic")
learned2 <- hc(survey, score = "bde")
arc.strength(learned, data = survey, criterion = "bic")
# from the learned score, removing any will result in a decrease of BIC
# this is not true when using the DAG that we specified:
arc.strength(dag, data = survey, criterion = "bic")
# removing O-->T would increase BIC
#testing conditional independence via d-separation
dsep(dag, x = "S", y = "R")
dsep(dag, x = "O", y = "R")
path(dag, from = "S", to = "R")
dsep(dag, x = "S", y = "R", z = "E")
dsep(dag, x = "O", y = "R", z = "E")
dsep(dag, x = "A", y = "S")
dsep(dag, x = "A", y = "S", z = "E")
#----------------------------------------------------#
# Exact Inference #
#----------------------------------------------------#
# transform BN into a tree
junction <- compile(as.grain(bn))
#attitudes of women towards car
#and train use compared to the whole survey sample
querygrain(junction, nodes = "T")$T
jsex <- setEvidence(junction, nodes = "S", states = "F")
querygrain(jsex, nodes = "T")$T
# women show about the same preferences towards car and train use as the interviewees as a whole
#living in a small city affects car and train use?
jres <- setEvidence(junction, nodes = "R", states = "small")
querygrain(jres, nodes = "T")$T
jedu <- setEvidence(junction, nodes = "E", states = "high")
SxT.cpt <- querygrain(jedu, nodes = c("S", "T"),
type = "joint")
SxT.cpt
querygrain(jedu, nodes = c("S", "T"), type = "marginal")
querygrain(jedu, nodes = c("T", "S"), type = "conditional")
dsep(bn, x = "S", y = "T", z = "E")
SxT.ct = SxT.cpt * nrow(survey)
chisq.test(SxT.ct)
#----------------------------------------------------#
# Approximate Inference #
#----------------------------------------------------#
#using monte carlo simulations to randomly generate observations from the BN
# 5000 * nparam(BN)
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = (E == "high"))
#10^6 * nparam(BN)
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = (E == "high"), n = 10^6)
#probability of a man travelling by car given that his Age is young and his
#Education is uni or that he is an adult, regardless of his Education.
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = ((A == "young") & (E == "uni")) | (A == "adult"))
SxT <- cpdist(bn, nodes = c("S", "T"),
evidence = (E == "high"))
head(SxT)
prop.table(table(SxT))
# Graphical Implementation
graphviz.plot(dag)
graphviz.plot(dag, layout = "fdp")
graphviz.plot(dag, layout = "circo")
hlight <- list(nodes = nodes(dag), arcs = arcs(dag),
col = "grey", textCol = "grey")
pp <- graphviz.plot(dag, highlight = hlight)
graph::edgeRenderInfo(pp) <- list(col = c("S~E" = "black", "E~R" = "black"),
lwd = c("S~E" = 3, "E~R" = 3))
graph::nodeRenderInfo(pp) <- list(col = c("S" = "black", "E" = "black", "R" = "black"),
textCol = c("S" = "black", "E" = "black", "R" = "black"),
fill = c("E" = "grey"))
Rgraphviz::renderGraph(pp)
#Plotting Conditional Probability Distributions
bn.fit.barchart(bn.mle$T, main = "Travel",
xlab = "Pr(T | R,O)", ylab = "")
bn.fit.dotplot(bn.mle$T, main = "Travel",
xlab = "Pr(T | R,O)", ylab = "")
Evidence <- factor(c(rep("Unconditional",3), rep("Female", 3),
rep("Small City",3)),
levels = c("Unconditional", "Female", "Small City"))
Travel <- factor(rep(c("car", "train", "other"), 3),
levels = c("other", "train", "car"))
distr <- data.frame(Evidence = Evidence, Travel = Travel,
Prob = c(0.5618, 0.2808, 0.15730, 0.5620, 0.2806,
0.1573, 0.4838, 0.4170, 0.0990))
head(distr)
barchart(Travel ~ Prob | Evidence, data = distr,
layout = c(3, 1), xlab = "probability",
scales = list(alternating = 1, tck = c(1, 0)),
strip = strip.custom(factor.levels =
c(expression(Pr(T)),
expression(Pr({T} * " | " * {S == F})),
expression(Pr({T} * " | " * {R == small})))),
panel = function(...) {
panel.barchart(...)
panel.grid(h = 0, v = -1)
})
#------------------------------------------------------------------#
##### Continuous Case: Gaussian BN ####
#------------------------------------------------------------------#
# Model continuous data under multivariate normal assumption:
dag.bnlearn <- model2network("[G][E][V|G:E][N|V][W|V][C|N:W]")
dag.bnlearn
nano <- nodes(dag.bnlearn)
for (n1 in nano) {
for (n2 in nano) {
if (dsep(dag.bnlearn, n1, n2))
cat(n1, "and", n2, "are independent.\n")
}#FOR
}#FOR
for (n1 in nano[nano != "V"]) {
for (n2 in nano[nano != "V"]) {
if (n1 < n2) {
if (dsep(dag.bnlearn, n1, n2, "V"))
cat(n1, "and", n2, "are independent given V.\n")
}#THEN
}#FOR
}#
#Probabilistic representation
disE <- list(coef = c("(Intercept)" = 50), sd = 10)
disG <- list(coef = c("(Intercept)" = 50), sd = 10)
disV <- list(coef = c("(Intercept)" = -10.35534,
E = 0.70711, G = 0.5), sd = 5)
disN <- list(coef = c("(Intercept)" = 45, V = 0.1),
sd = 9.949874)
disW <- list(coef = c("(Intercept)" = 15, V = 0.7),
sd = 7.141428)
disC <- list(coef = c("(Intercept)" = 0, N = 0.3, W = 0.7),
sd = 6.25)
dis.list = list(E = disE, G = disG, V = disV, N = disN,
W = disW, C = disC)
gbn.bnlearn <- custom.fit(dag.bnlearn, dist = dis.list)
print(gbn.bnlearn)
# we have created a linear Gaussian Bayesian Network:
# with the following assumptions:
# 1. each node follows a normal distribution
# 2. root nodes are solely described by the marginal distribution
# 3. each node has a variance that is specific to that node and does not depend on the values of the parents
# 4. the local distribution of each node can be equivalently expressed as a Gaussian linear model which includes an intercept and the node’s parents as explanatory variables
# concentrate on GBN:
gbn.rbmn <- bnfit2nbn(gbn.bnlearn)
gema.rbmn <- nbn2gema(gbn.rbmn)
mn.rbmn <- gema2mn(gema.rbmn)
print8mn(mn.rbmn)
str(mn.rbmn)
#Estimating the Parameters: Correlation Coefficients
cropdata1 <- import("cropdata1.txt")
dim(cropdata1)
round(head(cropdata1), 2)
# bn.fit automatically adapts to the data type
est.para <- bn.fit(dag.bnlearn, data = cropdata1)
#assign the return value of a fit to directly to the corresponding node
est.para$C <- lm(C ~ N + W, data = cropdata1)
est.para$C <- penalized(C ~ N + W, lambda1 = 0, lambda2 = 1.5,
data = cropdata1)
est.para$E
est.para$C
# interecept true=0 , estimated 0 2.4069
# fit null intercept:
est.para$C <- lm(C ~ N + W - 1, data = cropdata1)
est.para$C
lmC <- lm(C ~ N + W, data = cropdata1[, c("N", "W", "C")])
coef(lmC)
confint(lmC)
#Tests and Scores
cormat <- cor(cropdata1[, c("C", "W", "N")])
invcor <- cor2pcor(cormat)
dimnames(invcor) <- dimnames(cormat)
invcor
invcor["C", "W"]
#similarly:
ci.test("C", "W", "N", test = "cor", data = cropdata1)
#use learning algorithm
stru1 <- iamb(cropdata1, test = "cor")
#differs from bn.learn slightly --> the V -N arc is missing thus
#we can make this arc mandatory by putting it on a whitelist:
wl <- matrix(c("V", "N"), ncol = 2)
wl
stru2 <- iamb(cropdata1, test = "cor", whitelist = wl)
all.equal(dag.bnlearn, stru2)
#more data learns the DAG correctly
cropdata2 <- import("cropdata2.txt")
stru3 <- iamb(cropdata2, test = "cor")
all.equal(dag.bnlearn, stru3)
#### Network Scores of GBNs ####
score(dag.bnlearn, data = cropdata2, type = "bic-g")
score(dag.bnlearn, data = cropdata2, type = "bge")
##### Inference with GBN #####
# again we are interested in the probability of an event or in
#the distribution of some random variables
#nbn is defined via the GBN's local distribution:
print8nbn(gbn.rbmn)
#str(gbn.rbmn)
#gema describes the GBN by two generating matrices:
#1. vector of expectations and 2. a matrix to be multiplied by a N(0, 1) white noise
print8gema(gema.rbmn)
#read as: V = 50 + 7.071E1 + 5E2 + 5E3, where E1,...,E6 are i.i.d. N(0,1) variables.
#use condi4joint() for conditional joint distributions of one or more nodes
print8mn(condi4joint(mn.rbmn, par = "C", pour = "V", x2 = 80))
print8mn(condi4joint(mn.rbmn, par = "V", pour = "C", x2 = 80))
#symmetric distribution
unlist(condi4joint(mn.rbmn, par = "C", pour = "V", x2 = NULL))
#### Approximate Inference ####
nbs <- 4
VG <- rnorm(nbs, mean = 50, sd = 10)
VE <- rnorm(nbs, mean = 50, sd = 10)
VV <- rnorm(nbs, mean = -10.355 + 0.5 * VG + 0.707 * VE,
sd = 5)
VN <- rnorm(nbs, mean = 45 + 0.1 * VV, sd = 9.95)
cbind(VV, VN)
#or quicker:
sim <- rbn(gbn.bnlearn, n = 4)
sim[, c("V", "N")]
#make probability assertions about intervals:
head(cpdist(gbn.bnlearn, nodes = c("C", "N", "W"),
evidence = (C > 80)))
#likelihood weighting due to the fact that single values have probability zero in continuous cases
head(cpdist(gbn.bnlearn, nodes = c("V"),
evidence = list(G = 10, E = 90), method = "lw"))
cpquery(gbn.bnlearn, event = (V > 70),
evidence = list(G = 10, E = 90), method = "lw")
# Plotting GBN's
igraph.options(print.full = TRUE)
dag0.igraph <- graph.formula(G-+V, E-+V, V-+N, V-+W,
N-+C, W-+C)
dag0.igraph
dag.igraph <- igraph.from.graphNEL(as.graphNEL(dag.bnlearn))
V(dag.igraph)
E(dag.igraph)
par(mfrow = c(2, 2), mar = rep(3, 4), cex.main = 2)
plot(dag.igraph, main = "\n1: defaults")
dag2 <- dag.igraph
V(dag2)$label <- V(dag2)$name
plot(dag2, main = "\n2: with labels")
ly <- matrix(c(2, 3, 1, 1, 2, 3,
1, 4, 4, 2, 3, 2), 6)
plot(dag2, layout = ly, main = "\n3: positioning")
colo <- c("black", "darkgrey", "darkgrey", rep(NA, 3))
lcolo <- c(rep("white", 3), rep(NA, 3))
par(mar = rep(0, 4), lwd = 1.5)
plot(dag2, layout = ly, frame = TRUE,
main = "\n4: final",
vertex.color = colo, vertex.label.color = lcolo,
vertex.label.cex = 3, vertex.size = 50,
edge.arrow.size = 0.8, edge.color = "black")
# display conditional probabilities
gbn.fit <- bn.fit(dag.bnlearn, cropdata2)
bn.fit.qqplot(gbn.fit)
bn.fit.qqplot(gbn.fit$V)
try(bn.fit.qqplot(gbn.bnlearn))
C.EV <- condi4joint(mn.rbmn, par = "C", pour = c("E", "V"),
x2 = NULL)
C.EV$rho
dsep(gbn.bnlearn, "E", "C", "V")
set.seed(5678)
cropdata3 <- cpdist(gbn.bnlearn, nodes = c("E", "V", "C"),
evidence = TRUE, n = 1000)
plot(cropdata3$V, cropdata3$C, type = "n",
main = "C | V, E; E is the point size")
cexlim <- c(0.1, 2.4)
cexE <- cexlim[1] + diff(cexlim) / diff(range(cropdata3$E)) *
(cropdata3$E - min(cropdata3$E))
points(cropdata3$V, cropdata3$C, cex = cexE)
cqa <- quantile(cropdata3$C, seq(0, 1, 0.1))
abline(h = cqa, lty = 3)
#--------------------------------------------------------------------#
#### Hybrid Bayesian Networks ####
#--------------------------------------------------------------------#
# Actually we can mix discrete and continuous variables and
# we can use any kind of distribution.
library(rjags)
sp <- c(0.5, 0.5)
mu <- c(6.1, 6.25)
sigma <- 0.05
jags.data <- list(sp = sp, mu = mu, sigma = sigma,
cdiam = 6.20)
model1 <- jags.model(file = "inclu.sc.jam", data = jags.data)
update(model1, n.iter = 10000)
simu1 <- coda.samples(model = model1, variable.names = "csup",
n.iter = 20000, thin = 20)
sim1 <- simu1[[1]]
sum(sim1 == 1) / length(sim1)
# quite close to the theoretical value:
d.s1 <- dnorm(6.2, mean = mu[1], sd = sigma)
d.s2 <- dnorm(6.2, mean = mu[2], sd = sigma)
d.s1 / (d.s1 + d.s2)
# discretizing continuous variables
limits <- c(6.16, 6.19)
dsd <- matrix(c(diff(c(0, pnorm(limits, mu[1], sigma), 1)),
diff(c(0, pnorm(limits, mu[2], sigma), 1))),
3, 2)
dimnames(dsd) <- list(D = c("thin", "average", "thick"),
S = c("s1", "s2"))
dsd
#joint distribution by multiplying dsd by the probability of each s (law of total probability)
jointd <- dsd * sp
# conditional probability of S given D:
dds <- t(jointd / rowSums(jointd))
dds
###### Using different distributions than multinomial/multinormal #####
dat0 <- list(p.PR = c(0.7, 0.2, 0.1),
a.CL = 3, b.CL = 1,
g.G1 = c(1, 3, 10),
k.G2 = 10,
m.TR = 5, s.TR = 2.5,
r.LO = 1/3, d.LO = 1)
# exploring
exp.loss <- rep(NA, 3)
names(exp.loss) <- paste("PR=", 1:3, sep = "")
qua.loss <- exp.loss
for (PR in 1:3) {
dat1 <- dat0
dat1$PR <- PR
mopest <- jags.model(file = "inclu.pest.jam", data = dat1,
quiet = TRUE)
update(mopest, 3000)
sipest <-
coda.samples(model = mopest, variable.names = "LO",
n.iter = 50000)
summa <- summary(sipest)
exp.loss[PR] <- summa$statistics["Mean"]
qua.loss[PR] <- summa$quantiles["75%"]
}#FOR
mean3 <- mean(sipest[[1]][, "LO"])
round(c(exp.loss, MEAN = mean(exp.loss)), 1)
###### Theoretic Motivation #####
X <- paste("[X1][X3][X5][X6|X8][X2|X1][X7|X5][X4|X1:X2]",
"[X8|X3:X7][X9|X2:X7][X10|X1:X9]", sep = "")
dag <- model2network(X)
skel <- skeleton(dag)
vstructs(dag)
cp1 <- cpdag(dag)
dsep(dag, x = "X9", y = "X5", z = c("X2", "X7", "X10"))
# identify markov blanket nodes
mb(dag, node = "X9")
mb(dag, node = "X7")
par.X9 <- bnlearn::parents(dag, node = "X9")
ch.X9 <- bnlearn::children(dag, node = "X9")
sp.X9 <- sapply(ch.X9, bnlearn::parents, x = dag)
sp.X9 <- sp.X9[sp.X9 != "X9"]
unique(c(par.X9, ch.X9, sp.X9))
V <- setdiff(nodes(dag), "X9")
S <- mb(dag, "X9")
sapply(setdiff(V, S), dsep, bn = dag, y = "X9", z = S)
V <- setdiff(nodes(dag), "X7")
S <- mb(dag, "X7")
sapply(setdiff(V, S), dsep, bn = dag, y = "X7", z = S)
belongs <- logical(0)
for (node in S)
belongs[node] <- "X7" %in% mb(dag, node)
belongs
#### Moral Graphs ####
#Just another graphical representation derived from the DAG
mg1 <- moral(dag)
all.equal(moral(dag),
moral(set.arc(dag, from = "X7", to = "X3")))
mg2 <- dag
vs <- vstructs(dag)
for (i in seq(nrow(vs)))
mg2 <- set.edge(mg2, from = vs[i, "X"], to = vs[i, "Y"],
check.cycles = FALSE)
mg2 <- skeleton(mg2)
all.equal(mg1, mg2)
#Moralization transforms BN into Markov Network
###################################
# #
#### Bayesan Network Learning #####
# #
###################################
#Grow Shrink structure learning algorithm
bn.cor <- gs(cropdata1, test = "cor", alpha = 0.05)
modelstring(bn.cor)
#missing the V-N arc; the small sample size seems to reduce the power of the test
# use Fischer's Z- test
bn.zf <- gs(cropdata1, test = "zf", alpha = 0.05)
# or Monte Carlo test
bn.mc <- gs(cropdata1, test = "mc-cor", B = 1000)
all.equal(bn.zf,bn.mc)
all.equal(bn.cor, bn.mc)
#still not the real structure
bn.iamb <- iamb(cropdata1, test = "cor", alpha = 0.05)
all.equal(bn.cor, bn.iamb)
gs(cropdata1, test = "cor", alpha = 0.05, debug = TRUE)
#include by hand:
bn.cor <- gs(cropdata1, test = "cor", alpha = 0.05,
whitelist = c("V", "N"))
all.equal(bn.cor, dag.bnlearn)
# Score based algorithms
learned <- hc(survey, score = "bic")
modelstring(learned)
score(learned, data = survey, type = "bic")
learned <- hc(survey, score = "bic", debug = T)
#start search at random graph
hc(survey, score = "bic", start = random.graph(names(survey)))
# Hybrid algorithms:
# MMHC is implemented in bnlearn in the mmhc function
mmhc(survey)
rsmax2(survey, restrict = "mmpc", maximize = "hc")
#rsmax2(survey, restrict = "si.hiton.pc", test = "x2",
# maximize = "tabu", score = "bde", maximize.args = list(iss = 5))
#
#-----------------------------------------------------------------------------#
###### Parameter Learning ######
#-----------------------------------------------------------------------------#
#probability to find a man driving a car
#given he has high school education
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = (E == "high"), n = 10^6)
particles <- rbn(bn, 10^6)
head(particles, n = 5)
partE <- particles[(particles[, "E"] == "high"), ]
nE <- nrow(partE)
partEq <-partE[(partE[, "S"] == "M") & (partE[, "T"] == "car"), ]
nEq <- nrow(partEq)
nEq/nE
###### Mutilated Networks and likelihood sampling ####
mutbn <- mutilated(bn, list(E = "high"))
mutbn$E
particles <- rbn(bn, 10^6)
partQ <- particles[(particles[, "S"] == "M") &
(particles[, "T"] == "car"), ]
nQ <- nrow(partQ)
nQ/10^6
w <- logLik(bn, particles, nodes = "E", by.sample = TRUE)
wEq <- sum(exp(w[(particles[, "S"] == "M") &
(particles[, "T"] == "car")]))
wE <- sum(exp(w))
wEq/wE
# or alternatively:
cpquery(bn, event = (S == "M") & (T == "car"),
evidence = list(E = "high"), method = "lw")
###### Causal BNs #####
data(marks)
head(marks)
latent <- factor(c(rep("A", 44), "B",
rep("A", 7), rep("B", 36)))
modelstring(hc(marks[latent == "A", ]))
modelstring(hc(marks[latent == "B", ]))
modelstring(hc(marks))
#discretizing the BN to make it multinomial
dmarks <- discretize(marks, breaks = 2, method = "interval")
modelstring(hc(cbind(dmarks, LAT = latent)))
# example for imputation:
# missing data imputation.
with.missing.data = gaussian.test
with.missing.data[sample(nrow(with.missing.data), 500), "F"] = NA
fitted = bn.fit(model2network("[A][B][E][G][C|A:B][D|B][F|A:D:E:G]"),
gaussian.test)
imputed = impute(fitted, with.missing.data)
# predicting a variable in the test set.
training = bn.fit(model2network("[A][B][E][G][C|A:B][D|B][F|A:D:E:G]"),
gaussian.test[1:2000, ])
test = gaussian.test[2001:nrow(gaussian.test), ]
predicted = predict(training, node = "F", data = test)
# obtain the conditional probabilities for the values of a single variable
# given a subset of the rest, they are computed to determine the predicted
# values.
fitted = bn.fit(model2network("[A][C][F][B|A][D|A:C][E|B:F]"), learning.test)
evidence = data.frame(A = factor("a", levels = levels(learning.test$A)),
F = factor("b", levels = levels(learning.test$F)))
predicted = predict(fitted, "C", evidence,
method = "bayes-lw", prob = TRUE)
attr(predicted, "prob")
|
SKI <- function(x, y, r0, method, num.select, family, ebic, ebic.gamma,cv=FALSE){
result <- screening(x = x, y = y, method = method, num.select=dim(x)[2], family = family, ebic = ebic,ebic.gamma = ebic.gamma)
r1 <- sort(result$screen,decreasing = F,index.return=T)$ix
#current_result <- .alphaEstimation(x = x, y = y, r1 = r1, r0 = r0,alphas = seq(0,1,0.1),num.select.max=num.select.max,family = family)
#alpha_hat <- current_result$alpha
alpha_hat <- .alphaEstimation(x = x,y = y,r1 = r1,r0 = r0,num.select.max = 1000,family = family)$alpha
r <- .combineRank(r0 = r0,r1 = r1,alpha = alpha_hat)
ix <- which(r <= num.select)
return(list(alpha=alpha_hat,combined_rank=r,screen=ix))
}
.combineRank <- function(r0,r1,alpha = 0.5){
for(alpha in seq(0,2,length.out = 10)){
r <- r0^(alpha/2)*r1^(1-alpha/2)
rank <- rank(r)
ix <- which(rank <= num)
a=sum(ix %in% beta.not.null)
b=sum(ix_1 %in% beta.not.null)
c=sum(ix_2 %in% beta.not.null)
cat("alpha",alpha,"new",a,"ext",b,"int",c,"\n")
}
return(rank)
}
.alphaEstimation <- function(x,y,r1,r0,num.select.max,family,method=c("ebic","bic","deviance"){
result <- screening(x = x, y = y, method = method, num.select=dim(x)[2], family = family, ebic = ebic,ebic.gamma = ebic.gamma)
r1 <- sort(result$screen,decreasing = F,index.return=T)$ix
iter <- 1
a <- NULL
t <- NULL
for(num in seq(10,num.select.max,length.out = 100)){
ix_1 <- which(r0 <= num)
ix_2 <- which(r1 <= num)
#ix_3 <- sample(1:dim(x)[2],num)
obj_1 <- cv.glmnet(x[,ix_1],y,family = family)
obj_2 <- cv.glmnet(x[,ix_2],y,family = family)
#obj_3 <- cv.glmnet(x[,ix_3],y,family = family)
fit1 <- glmnet(x[,ix_1],y,family = family,lambda = obj_1$lambda.min)
fit2 <- glmnet(x[,ix_2],y,family = family,lambda = obj_2$lambda.min)
#fit3 <- glmnet(x[,ix_3],y,family = family,lambda = obj_3$lambda.1se)
ebic_1 <- .ebic(deviance(fit1),num,dim(x)[1],sum(as.numeric(coef(fit1))!=0),0)
ebic_2 <- .ebic(deviance(fit2),num,dim(x)[1],sum(as.numeric(coef(fit2))!=0),)
ebic_3 <- .ebic(fit2$nulldev,num,dim(x)[1],0,1)
if(deviance(fit1) >= fit2$nulldev){
a[iter] <- 0
}else{
if(deviance(fit2) >= fit2$nulldev){
a[iter] <- 1
}else{
a[iter] <- (deviance(fit1)-fit2$nulldev)/(deviance(fit2)-fit2$nulldev)
}
}
#a[iter] <- 1-(ebic_2-ebic_1)/(ebic_2-ebic_3)
not_null <- NULL
alpha_seq <- seq(0,1,length.out = 11)
for(alpha in alpha_seq){
r <- r0^alpha*r1^(1-alpha)
rank <- rank(r)
ix <- which(rank <= num)
a1=sum(ix %in% beta.not.null)
not_null <- c(not_null,a1)
}
t[iter] <- alpha_seq[which.max(not_null)]
iter <- iter + 1
}
alpha_est <- mean(a)
true_alphap <- mean(t)
# for(num in seq(10,num.select.max,length.out = 100)){
# ix_1 <- which(r0 <= num)
# ix_2 <- which(r1 <= num)
# int <- intersect(ix_1,ix_2)
# a[iter] <- (length(int)-num^2/dim(x)[2])/num
# iter <- iter + 1
# }
#
#
#
# intersect/(num*2-intersect)
# cor1 <- mean(abs(cor(x[,ix_1],y)))
# cor2 <- mean(abs(cor(x[,ix_2],y)))
# cor3 <- abs(cor(x[,ix_random],y))
# (cor3 - cor1)/(cor3 - cor2)
# obj_1 <- glmnet(x[,ix_1],y,family = family,alpha = 1)
# obj_2 <- glmnet(x[,ix_2],y,family = family,alpha = 1)
# obj_3 <- glmnet(x[,ix_random],y,family = family,alpha = 1)
# ebic_2 <-
# ebic_3 <-
# bic_1 <- median(deviance(obj_1) + obj_1$df * log(obj_1$nobs))
# bic_2 <- median(deviance(obj_2) + obj_2$df * log(obj_2$nobs))
# bic_3 <- median(deviance(obj_3) + obj_3$df * log(obj_3$nobs))
# (bic_3 - bic_1)/(bic_3 - bic_2)
# iter <- iter + 1
# }
# ix_random <- sample(1:dim(x)[2],)
# tp_length_1 <- length(which(ix_1 %in% beta.not.null))
# tp_length_2 <- length(which(ix_2 %in% beta.not.null))
# length(which(ix_random %in% beta.not.null))
# obj_1 <- glmnet(x[,ix_1],y,family = family)
# obj_2 <- glmnet(x[,ix_2],y,family = family)
# obj_3 <- glmnet(x[,])
# obj_null <- glmnet(rep(1,dim(x)[1]),y,family = family)
# bic_1 <- deviance(obj_1) + obj_1$df * log(obj_1$nobs)
# bic_2 <- deviance(obj_2) + obj_2$df * log(obj_2$nobs)
# a[i] <- sum(min(bic_1)<min(bic_2))
# i <- i + 1
# }
# alpha <- sum(a)/100
return(list(alpha = alpha))
}
# .alphaEstimation <- function(x,y,r1,r0,alphas,num.select.max,family){
# bic_now <- 1000000
# num_now <- NULL
# a_now <- NULL
# for(a in alphas){
# for(num in seq(10,num.select.max,length.out = 10)){
# r_1 <- .combineRank(r0 = r0,r1 = r1,a)
# ix <- which(r <= num)
# # Get TP number. Only for test.
# tp_length <- length(which(ix %in% beta.not.null))
# obj <- glmnet(x[,ix],y,family = family,alpha = 1)
# # get loglikehood.
# d <- deviance(obj)
# # calculate the BIC.
# bic <- d + obj$df * log(obj$nobs)
# cat("a:",a,"num",num,"tp_length:",tp_length,"bic:",min(bic),"\n")
# if(min(bic) < bic_now){
# bic_now <- min(bic)
# num_now <- num
# a_now <- a
# }
# }
#
#
# # dev_ratio <- max(obj$dev.ratio)
# # dev_ratios <- c(dev_ratios,dev_ratio)
# # current_alphas <- c(current_alphas,a)
# #
# # if(dev_ratio > current_dev_ratio){
# # current_dev_ratio <- dev_ratio
# # current_alpha <- a
# # cat(a,",",tp_length,",",dev_ratio,"***","\n")
# # }else{
# # cat(a,",",tp_length,",",dev_ratio,"\n")
# # }
# }
# #return(list(alpha_hat = current_alpha, dev_ratio = current_dev_ratio))
# #return(list(alphas = current_alphas,dev_ratios = dev_ratios))
# return(list(alpha = a_now, num = num_now, bic = bic_now))
# }
.ebic <- function(deviance, model.size, sample.size, num.select, ebic.gamma) {
return (deviance + num.select * (log(sample.size) + 2 * ebic.gamma * log(model.size)))
}
| /SKI.r | no_license | stormliucong/SKI | R | false | false | 5,907 | r | SKI <- function(x, y, r0, method, num.select, family, ebic, ebic.gamma,cv=FALSE){
result <- screening(x = x, y = y, method = method, num.select=dim(x)[2], family = family, ebic = ebic,ebic.gamma = ebic.gamma)
r1 <- sort(result$screen,decreasing = F,index.return=T)$ix
#current_result <- .alphaEstimation(x = x, y = y, r1 = r1, r0 = r0,alphas = seq(0,1,0.1),num.select.max=num.select.max,family = family)
#alpha_hat <- current_result$alpha
alpha_hat <- .alphaEstimation(x = x,y = y,r1 = r1,r0 = r0,num.select.max = 1000,family = family)$alpha
r <- .combineRank(r0 = r0,r1 = r1,alpha = alpha_hat)
ix <- which(r <= num.select)
return(list(alpha=alpha_hat,combined_rank=r,screen=ix))
}
.combineRank <- function(r0,r1,alpha = 0.5){
for(alpha in seq(0,2,length.out = 10)){
r <- r0^(alpha/2)*r1^(1-alpha/2)
rank <- rank(r)
ix <- which(rank <= num)
a=sum(ix %in% beta.not.null)
b=sum(ix_1 %in% beta.not.null)
c=sum(ix_2 %in% beta.not.null)
cat("alpha",alpha,"new",a,"ext",b,"int",c,"\n")
}
return(rank)
}
.alphaEstimation <- function(x,y,r1,r0,num.select.max,family,method=c("ebic","bic","deviance"){
result <- screening(x = x, y = y, method = method, num.select=dim(x)[2], family = family, ebic = ebic,ebic.gamma = ebic.gamma)
r1 <- sort(result$screen,decreasing = F,index.return=T)$ix
iter <- 1
a <- NULL
t <- NULL
for(num in seq(10,num.select.max,length.out = 100)){
ix_1 <- which(r0 <= num)
ix_2 <- which(r1 <= num)
#ix_3 <- sample(1:dim(x)[2],num)
obj_1 <- cv.glmnet(x[,ix_1],y,family = family)
obj_2 <- cv.glmnet(x[,ix_2],y,family = family)
#obj_3 <- cv.glmnet(x[,ix_3],y,family = family)
fit1 <- glmnet(x[,ix_1],y,family = family,lambda = obj_1$lambda.min)
fit2 <- glmnet(x[,ix_2],y,family = family,lambda = obj_2$lambda.min)
#fit3 <- glmnet(x[,ix_3],y,family = family,lambda = obj_3$lambda.1se)
ebic_1 <- .ebic(deviance(fit1),num,dim(x)[1],sum(as.numeric(coef(fit1))!=0),0)
ebic_2 <- .ebic(deviance(fit2),num,dim(x)[1],sum(as.numeric(coef(fit2))!=0),)
ebic_3 <- .ebic(fit2$nulldev,num,dim(x)[1],0,1)
if(deviance(fit1) >= fit2$nulldev){
a[iter] <- 0
}else{
if(deviance(fit2) >= fit2$nulldev){
a[iter] <- 1
}else{
a[iter] <- (deviance(fit1)-fit2$nulldev)/(deviance(fit2)-fit2$nulldev)
}
}
#a[iter] <- 1-(ebic_2-ebic_1)/(ebic_2-ebic_3)
not_null <- NULL
alpha_seq <- seq(0,1,length.out = 11)
for(alpha in alpha_seq){
r <- r0^alpha*r1^(1-alpha)
rank <- rank(r)
ix <- which(rank <= num)
a1=sum(ix %in% beta.not.null)
not_null <- c(not_null,a1)
}
t[iter] <- alpha_seq[which.max(not_null)]
iter <- iter + 1
}
alpha_est <- mean(a)
true_alphap <- mean(t)
# for(num in seq(10,num.select.max,length.out = 100)){
# ix_1 <- which(r0 <= num)
# ix_2 <- which(r1 <= num)
# int <- intersect(ix_1,ix_2)
# a[iter] <- (length(int)-num^2/dim(x)[2])/num
# iter <- iter + 1
# }
#
#
#
# intersect/(num*2-intersect)
# cor1 <- mean(abs(cor(x[,ix_1],y)))
# cor2 <- mean(abs(cor(x[,ix_2],y)))
# cor3 <- abs(cor(x[,ix_random],y))
# (cor3 - cor1)/(cor3 - cor2)
# obj_1 <- glmnet(x[,ix_1],y,family = family,alpha = 1)
# obj_2 <- glmnet(x[,ix_2],y,family = family,alpha = 1)
# obj_3 <- glmnet(x[,ix_random],y,family = family,alpha = 1)
# ebic_2 <-
# ebic_3 <-
# bic_1 <- median(deviance(obj_1) + obj_1$df * log(obj_1$nobs))
# bic_2 <- median(deviance(obj_2) + obj_2$df * log(obj_2$nobs))
# bic_3 <- median(deviance(obj_3) + obj_3$df * log(obj_3$nobs))
# (bic_3 - bic_1)/(bic_3 - bic_2)
# iter <- iter + 1
# }
# ix_random <- sample(1:dim(x)[2],)
# tp_length_1 <- length(which(ix_1 %in% beta.not.null))
# tp_length_2 <- length(which(ix_2 %in% beta.not.null))
# length(which(ix_random %in% beta.not.null))
# obj_1 <- glmnet(x[,ix_1],y,family = family)
# obj_2 <- glmnet(x[,ix_2],y,family = family)
# obj_3 <- glmnet(x[,])
# obj_null <- glmnet(rep(1,dim(x)[1]),y,family = family)
# bic_1 <- deviance(obj_1) + obj_1$df * log(obj_1$nobs)
# bic_2 <- deviance(obj_2) + obj_2$df * log(obj_2$nobs)
# a[i] <- sum(min(bic_1)<min(bic_2))
# i <- i + 1
# }
# alpha <- sum(a)/100
return(list(alpha = alpha))
}
# .alphaEstimation <- function(x,y,r1,r0,alphas,num.select.max,family){
# bic_now <- 1000000
# num_now <- NULL
# a_now <- NULL
# for(a in alphas){
# for(num in seq(10,num.select.max,length.out = 10)){
# r_1 <- .combineRank(r0 = r0,r1 = r1,a)
# ix <- which(r <= num)
# # Get TP number. Only for test.
# tp_length <- length(which(ix %in% beta.not.null))
# obj <- glmnet(x[,ix],y,family = family,alpha = 1)
# # get loglikehood.
# d <- deviance(obj)
# # calculate the BIC.
# bic <- d + obj$df * log(obj$nobs)
# cat("a:",a,"num",num,"tp_length:",tp_length,"bic:",min(bic),"\n")
# if(min(bic) < bic_now){
# bic_now <- min(bic)
# num_now <- num
# a_now <- a
# }
# }
#
#
# # dev_ratio <- max(obj$dev.ratio)
# # dev_ratios <- c(dev_ratios,dev_ratio)
# # current_alphas <- c(current_alphas,a)
# #
# # if(dev_ratio > current_dev_ratio){
# # current_dev_ratio <- dev_ratio
# # current_alpha <- a
# # cat(a,",",tp_length,",",dev_ratio,"***","\n")
# # }else{
# # cat(a,",",tp_length,",",dev_ratio,"\n")
# # }
# }
# #return(list(alpha_hat = current_alpha, dev_ratio = current_dev_ratio))
# #return(list(alphas = current_alphas,dev_ratios = dev_ratios))
# return(list(alpha = a_now, num = num_now, bic = bic_now))
# }
.ebic <- function(deviance, model.size, sample.size, num.select, ebic.gamma) {
return (deviance + num.select * (log(sample.size) + 2 * ebic.gamma * log(model.size)))
}
|
mainplot <- function(){
plot(NULL, NULL
, log="xy"
, xlim = c(0.5, 2)
, ylim = c(0.5, 2)
, xaxs = "i"
, yaxs = "i"
, xlab = expression(E[21])
, ylab = expression(E[12])
)
}
mainplot()
bifLines <- function(){
abline(h=1)
abline(v=1)
}
mainplot(); bifLines()
coex <- function(){
text(0.8, 0.8 , "Coexistence")
}
mainplot(); bifLines(); coex();
excl <- function(){
text(1.25, 0.8 , "Species 2 dominates")
text(0.8, 1.25 , "Species 1 dominates")
}
mainplot(); bifLines(); coex(); excl()
founder <- function(){
text(1.25, 1.25 , "Founder control")
}
mainplot(); bifLines(); coex(); excl(); founder()
cCurve <- function(C){
curve (C/x, add=TRUE, col="blue")
text(C, 1.08, paste("C=", C), col="blue")
}
mainplot(); bifLines(); coex(); excl(); founder(); cCurve(0.7)
mainplot(); bifLines(); coex(); excl(); founder(); cCurve(1); cCurve(1.43); cCurve(0.7)
| /bifurcation.R | no_license | Bio3SS/Competition_models | R | false | false | 885 | r | mainplot <- function(){
plot(NULL, NULL
, log="xy"
, xlim = c(0.5, 2)
, ylim = c(0.5, 2)
, xaxs = "i"
, yaxs = "i"
, xlab = expression(E[21])
, ylab = expression(E[12])
)
}
mainplot()
bifLines <- function(){
abline(h=1)
abline(v=1)
}
mainplot(); bifLines()
coex <- function(){
text(0.8, 0.8 , "Coexistence")
}
mainplot(); bifLines(); coex();
excl <- function(){
text(1.25, 0.8 , "Species 2 dominates")
text(0.8, 1.25 , "Species 1 dominates")
}
mainplot(); bifLines(); coex(); excl()
founder <- function(){
text(1.25, 1.25 , "Founder control")
}
mainplot(); bifLines(); coex(); excl(); founder()
cCurve <- function(C){
curve (C/x, add=TRUE, col="blue")
text(C, 1.08, paste("C=", C), col="blue")
}
mainplot(); bifLines(); coex(); excl(); founder(); cCurve(0.7)
mainplot(); bifLines(); coex(); excl(); founder(); cCurve(1); cCurve(1.43); cCurve(0.7)
|
library(rjags)
##################################################
### 1.1. OUTCOMES MISSING NOT AT RANDOM
##################################################
## MMSE data
dat <- list(t = c(0, 5, 10, 15, 20),
y = c(28, 26, 27, 25, NA) )
plot(dat$t, dat$y, ylim=c(0, 30)) # quick visualisation
ini <- list(alpha=20, beta=-10, sigma=1)
### Part 1. Priors
mmse.mod <- "
model {
for (i in 1:5){
y[i] ~ dnorm(mu[i], tau)
mu[i] <- alpha + beta*t[i]
}
p20 <- step(20 - y[5])
### INSERT PRIOR DISTRIBUTIONS HERE
alpha ~ dunif(-20,20)
beta ~ dnorm(-10, 10)
sigma ~ dunif( 0,10)
tau <- 1/(sigma*sigma)
}
"
### Part 2. rjags commands to run the model and monitor variables of interest
mmse.jag <- jags.model(textConnection(mmse.mod), dat, ini)
sam <- coda.samples(mmse.jag, var=c("sigma","alpha","beta","y[5]","p20"), n.iter=10000)
sam <- window(sam, 1001, 10000) # discard burn-in (convergence assumed before 1000)
summary(sam)
dev.new()
plot(sam, ask=TRUE)
### Part 3. Adapt the code above to include a non-random missingness mechanism
### Part 4. Change the normal to a t error distribution
##################################################
### 1.2. MISSING COVARIATES
##################################################
### Add an imputation model for BEDNET to the code.
malaria <- read.table("malaria_data.txt", col.names=c("Y","AGE","BEDNET","GREEN","PHC"), skip=1, nrows=805)
mal.mod <- "
model{
for(i in 1:805) {
Y[i] ~ dbern(p[i])
logit(p[i]) <- alpha + beta[1]*(AGE[i] - mean(AGE[])) + beta[2]*BEDNET[i] +
beta[3]*(GREEN[i] - mean(GREEN[])) + beta[4]*PHC[i]
### INSERT IMPUTATION MODEL HERE
BEDNET[i] ~ dbern(q[i])
logit(q[i]) <- gamma[1] + gamma[2]*AGE[i] +gamma[3]*GREEN[i] + gamma[4]*PHC[i]
}
# vague priors on regression coefficients of analysis model
alpha ~ dlogis(0, 1)
for (i in 1:4){
beta[i] ~ dt(0, 0.16, 1)
or[i] <- exp(beta[i])
}
### PRIORS FOR IMPUTATION MODEL COEFFICIENTS HERE
for (i in 1:4){
gamma[i] ~ dnorm(0, 1)
}
}
"
mal.in <- list(alpha=0, beta=c(0,0,0,0), gamma=c(0,0,0,0))
### Run model, monitoring and summarising variables indicated in the questions
mal.jag <- jags.model(textConnection(mal.mod), malaria, mal.in)
sam <- coda.samples(mal.jag, c("alpha","or","beta","gamma","BEDNET[1:10]","BEDNET[531:540]"), n.iter=10000)
traceplot(sam[,c("beta[1]","beta[2]","beta[3]","beta[4]")])
summary(sam)
| /Bayesian Analysis/practical_material/missing-jags.r | no_license | MichaelBelias/My-Complete-Book-In-R | R | false | false | 2,576 | r | library(rjags)
##################################################
### 1.1. OUTCOMES MISSING NOT AT RANDOM
##################################################
## MMSE data
dat <- list(t = c(0, 5, 10, 15, 20),
y = c(28, 26, 27, 25, NA) )
plot(dat$t, dat$y, ylim=c(0, 30)) # quick visualisation
ini <- list(alpha=20, beta=-10, sigma=1)
### Part 1. Priors
mmse.mod <- "
model {
for (i in 1:5){
y[i] ~ dnorm(mu[i], tau)
mu[i] <- alpha + beta*t[i]
}
p20 <- step(20 - y[5])
### INSERT PRIOR DISTRIBUTIONS HERE
alpha ~ dunif(-20,20)
beta ~ dnorm(-10, 10)
sigma ~ dunif( 0,10)
tau <- 1/(sigma*sigma)
}
"
### Part 2. rjags commands to run the model and monitor variables of interest
mmse.jag <- jags.model(textConnection(mmse.mod), dat, ini)
sam <- coda.samples(mmse.jag, var=c("sigma","alpha","beta","y[5]","p20"), n.iter=10000)
sam <- window(sam, 1001, 10000) # discard burn-in (convergence assumed before 1000)
summary(sam)
dev.new()
plot(sam, ask=TRUE)
### Part 3. Adapt the code above to include a non-random missingness mechanism
### Part 4. Change the normal to a t error distribution
##################################################
### 1.2. MISSING COVARIATES
##################################################
### Add an imputation model for BEDNET to the code.
malaria <- read.table("malaria_data.txt", col.names=c("Y","AGE","BEDNET","GREEN","PHC"), skip=1, nrows=805)
mal.mod <- "
model{
for(i in 1:805) {
Y[i] ~ dbern(p[i])
logit(p[i]) <- alpha + beta[1]*(AGE[i] - mean(AGE[])) + beta[2]*BEDNET[i] +
beta[3]*(GREEN[i] - mean(GREEN[])) + beta[4]*PHC[i]
### INSERT IMPUTATION MODEL HERE
BEDNET[i] ~ dbern(q[i])
logit(q[i]) <- gamma[1] + gamma[2]*AGE[i] +gamma[3]*GREEN[i] + gamma[4]*PHC[i]
}
# vague priors on regression coefficients of analysis model
alpha ~ dlogis(0, 1)
for (i in 1:4){
beta[i] ~ dt(0, 0.16, 1)
or[i] <- exp(beta[i])
}
### PRIORS FOR IMPUTATION MODEL COEFFICIENTS HERE
for (i in 1:4){
gamma[i] ~ dnorm(0, 1)
}
}
"
mal.in <- list(alpha=0, beta=c(0,0,0,0), gamma=c(0,0,0,0))
### Run model, monitoring and summarising variables indicated in the questions
mal.jag <- jags.model(textConnection(mal.mod), malaria, mal.in)
sam <- coda.samples(mal.jag, c("alpha","or","beta","gamma","BEDNET[1:10]","BEDNET[531:540]"), n.iter=10000)
traceplot(sam[,c("beta[1]","beta[2]","beta[3]","beta[4]")])
summary(sam)
|
# install.packages("spotifyr")
library(spotifyr)
Sys.setenv(SPOTIFY_CLIENT_ID = "c22575ce003a46ba95e4590c10b3acb1")
Sys.setenv(SPOTIFY_CLIENT_SECRET = "94cc674619fc44168db51564357111ad")
access_token <- get_spotify_access_token()
genres <- c("rap", "folk", "country", "rock", "blues", "jazz", "electronic",
"pop", "classical", "metal", "punk", "easy listening")
data <- NULL
for(i in 1:length(genres)){
d <- get_genre_artists(genre = genres[i], limit = 50)
d <- d[ , c("name", "genre")]
data <- rbind(data, d)
Sys.sleep(20)
}
write.csv(data, "artist_data.csv", row.names = FALSE)
| /Music_Classification/.Rproj.user/64E635F7/sources/per/t/9ED22BFF-contents | no_license | SmithBradleyC/Programming_Projects | R | false | false | 607 | # install.packages("spotifyr")
library(spotifyr)
Sys.setenv(SPOTIFY_CLIENT_ID = "c22575ce003a46ba95e4590c10b3acb1")
Sys.setenv(SPOTIFY_CLIENT_SECRET = "94cc674619fc44168db51564357111ad")
access_token <- get_spotify_access_token()
genres <- c("rap", "folk", "country", "rock", "blues", "jazz", "electronic",
"pop", "classical", "metal", "punk", "easy listening")
data <- NULL
for(i in 1:length(genres)){
d <- get_genre_artists(genre = genres[i], limit = 50)
d <- d[ , c("name", "genre")]
data <- rbind(data, d)
Sys.sleep(20)
}
write.csv(data, "artist_data.csv", row.names = FALSE)
| |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isgd_links.R
\name{isgd_LinksExpand}
\alias{isgd_LinksExpand}
\title{Expand a short URL to a longer one}
\usage{
isgd_LinksExpand(shorturl = "", showRequestURL = FALSE)
}
\arguments{
\item{shorturl}{- (optional character) You can specify the shorturl parameter if you'd like to
pick a shortened URL instead of having is.gd randomly generate one. These must be between 5 and 30
characters long and can only contain alphanumeric characters and underscores. Shortened URLs
are case sensitive. Bear in mind that a desired short URL might already be taken (this is very
often the case with common words) so if you're using this option be prepared to respond to an
error and get an alternative choice from your app's user.}
\item{showRequestURL}{- show URL which has been build and requested from server.
For debug purposes.}
}
\description{
See \url{https://is.gd/apilookupreference.php}
}
\examples{
### isgd_LinksExpand(shorturl = "http://is.gd/4oIAXJ", showRequestURL = TRUE)
}
| /man/isgd_LinksExpand.Rd | permissive | patperu/urlshorteneR | R | false | true | 1,062 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isgd_links.R
\name{isgd_LinksExpand}
\alias{isgd_LinksExpand}
\title{Expand a short URL to a longer one}
\usage{
isgd_LinksExpand(shorturl = "", showRequestURL = FALSE)
}
\arguments{
\item{shorturl}{- (optional character) You can specify the shorturl parameter if you'd like to
pick a shortened URL instead of having is.gd randomly generate one. These must be between 5 and 30
characters long and can only contain alphanumeric characters and underscores. Shortened URLs
are case sensitive. Bear in mind that a desired short URL might already be taken (this is very
often the case with common words) so if you're using this option be prepared to respond to an
error and get an alternative choice from your app's user.}
\item{showRequestURL}{- show URL which has been build and requested from server.
For debug purposes.}
}
\description{
See \url{https://is.gd/apilookupreference.php}
}
\examples{
### isgd_LinksExpand(shorturl = "http://is.gd/4oIAXJ", showRequestURL = TRUE)
}
|
# Test expontential backoff/retry
#
# Author: brucehoff
###############################################################################
library(RCurl)
# note, I want slightly different set-ups for different tests, so I invoke it myself
# (instead of letting the framework do it), passing a parameter
# example, mySetUp(503, "HTTP/1.1 503 Service Unavailable\r\nContent-Type: application/json\r\n\r\n")
mySetUp <- function(httpErrorStatusCode, errorMessage)
{
synapseClient:::.setCache("httpRequestCount", 0)
synapseClient:::.setCache("httpStatus", 200)
synapseClient:::.setCache("permanent.redirects.resolved.REPO", TRUE)
synapseClient:::.setCache("permanent.redirects.resolved.FILE", TRUE)
## this function will 'time out' the first time but pass the second time
myGetUrl <- function(url,
customrequest,
httpheader,
curl,
debugfunction,
.opts
) {
if (regexpr("/version", url, fixed=T)>=0) {
synapseClient:::.setCache("httpStatus", 200)
return("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n\r\n{\"version\":\"foo\"}")
}
httpRequestCount <-synapseClient:::.getCache("httpRequestCount")
synapseClient:::.setCache("httpRequestCount", httpRequestCount+1)
if (httpRequestCount<1) { # first time, it fails
synapseClient:::.setCache("httpStatus", httpErrorStatusCode)
return(errorMessage)
} else {
synapseClient:::.setCache("httpStatus", 200)
return(list(headers="HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n", body="{\"foo\":\"bar\"}"))
}
}
attr(myGetUrl, "origDef") <- synapseClient:::.getURLIntern
assignInNamespace(".getURLIntern", myGetUrl, "synapseClient")
myGetCurlInfo<-function(curlHandle=NULL) {
list(response.code=synapseClient:::.getCache("httpStatus"))
}
attr(myGetCurlInfo, "origDef") <- synapseClient:::.getCurlInfo
assignInNamespace(".getCurlInfo", myGetCurlInfo, "synapseClient")
# also spoof checking black list, latest version
myCheckBlackList<-function() {"ok"}
myCheckLatestVersion<-function() {"ok"}
attr(myCheckBlackList, "origDef") <- synapseClient:::checkBlackList
assignInNamespace("checkBlackList", myCheckBlackList, "synapseClient")
attr(myCheckLatestVersion, "origDef") <- synapseClient:::checkLatestVersion
assignInNamespace("checkLatestVersion", myCheckLatestVersion, "synapseClient")
myLogErrorToSynapse<-function(label, message) {NULL}
attr(myLogErrorToSynapse, "origDef") <- synapseClient:::.logErrorToSynapse
assignInNamespace(".logErrorToSynapse", myLogErrorToSynapse, "synapseClient")
}
.tearDown <-
function()
{
synapseClient:::.setCache("permanent.redirects.resolved.REPO", NULL)
synapseClient:::.setCache("permanent.redirects.resolved.FILE", NULL)
origDef<-attr(synapseClient:::.getURLIntern, "origDef")
if (!is.null(origDef)) assignInNamespace(".getURLIntern", origDef, "synapseClient")
origDef<-attr(synapseClient:::.getCurlInfo, "origDef")
if (!is.null(origDef)) assignInNamespace(".getCurlInfo", origDef, "synapseClient")
origDef<-attr(synapseClient:::checkBlackList, "origDef")
if (!is.null(origDef)) assignInNamespace("checkBlackList", origDef, "synapseClient")
origDef<-attr(synapseClient:::checkLatestVersion, "origDef")
if (!is.null(origDef)) assignInNamespace("checkLatestVersion", origDef, "synapseClient")
origDef<-attr(synapseClient:::.logErrorToSynapse, "origDef")
if (!is.null(origDef)) assignInNamespace(".logErrorToSynapse", origDef, "synapseClient")
unloadNamespace('synapseClient')
library(synapseClient)
}
unitTestExponentialBackoffFor503ShouldFail <-
function()
{
mySetUp(503, list(headers="HTTP/1.1 503 Service Unavailable\r\nContent-Type: application/json\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will get a 503, and an empty response
synapseClient:::.setCache("maxWaitDiffTime", 0)
shouldBeEmpty<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500",
anonymous=T, opts=opts, checkHttpStatus=FALSE)
checkEquals("", shouldBeEmpty)
checkEquals(503, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor503ShouldComplete <-
function()
{
mySetUp(503, list(headers="HTTP/1.1 503 Service Unavailable\r\nContent-Type: application/json\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will complete
synapseClient:::.setCache("maxWaitDiffTime", as.difftime("00:30:00")) # 30 min
result<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500", anonymous=T, opts=opts)
checkEquals(list(foo="bar"), result)
checkEquals(200, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor502ShouldFail <-
function()
{
mySetUp(502, list(headers="HTTP Error: 502 for request https://file-prod.prod.sagebase.org/repo/v1/query\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will get a 502, and an empty response
synapseClient:::.setCache("maxWaitDiffTime", 0)
shouldBeEmpty<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500",
anonymous=T, opts=opts, checkHttpStatus=FALSE)
checkEquals("", shouldBeEmpty)
checkEquals(502, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor502ShouldComplete <-
function()
{
mySetUp(502, list(headers="HTTP Error: 502 for request https://file-prod.prod.sagebase.org/repo/v1/query\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will complete
synapseClient:::.setCache("maxWaitDiffTime", as.difftime("00:30:00")) # 30 min
result<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500", anonymous=T, opts=opts)
checkEquals(list(foo="bar"), result)
checkEquals(200, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor404ShouldComplete <- function()
{
synapseClient:::.setCache("httpRequestCount", 0)
myGetCurlInfo<-function(curlHandle=NULL) {
httpRequestCount <-synapseClient:::.getCache("httpRequestCount")
synapseClient:::.setCache("httpRequestCount", httpRequestCount+1)
if (httpRequestCount<2) { # first two times it fails
synapseClient:::.setCache("httpStatus", 404)
} else {
synapseClient:::.setCache("httpStatus", 200)
}
list(response.code=synapseClient:::.getCache("httpStatus"))
}
attr(myGetCurlInfo, "origDef") <- synapseClient:::.getCurlInfo
assignInNamespace(".getCurlInfo", myGetCurlInfo, "synapseClient")
myLogErrorToSynapse<-function(label, message) {NULL}
attr(myLogErrorToSynapse, "origDef") <- synapseClient:::.logErrorToSynapse
assignInNamespace(".logErrorToSynapse", myLogErrorToSynapse, "synapseClient")
curlHandle <- getCurlHandle()
synapseClient:::webRequestWithRetries(
fcn=function(curlHandle) {
"this is the response body"
},
curlHandle,
extraRetryStatusCodes=404
)
}
| /inst/unitTests/test_exponentialBackoffForRetriableStatus.R | no_license | woodhaha/rSynapseClient | R | false | false | 7,027 | r | # Test expontential backoff/retry
#
# Author: brucehoff
###############################################################################
library(RCurl)
# note, I want slightly different set-ups for different tests, so I invoke it myself
# (instead of letting the framework do it), passing a parameter
# example, mySetUp(503, "HTTP/1.1 503 Service Unavailable\r\nContent-Type: application/json\r\n\r\n")
mySetUp <- function(httpErrorStatusCode, errorMessage)
{
synapseClient:::.setCache("httpRequestCount", 0)
synapseClient:::.setCache("httpStatus", 200)
synapseClient:::.setCache("permanent.redirects.resolved.REPO", TRUE)
synapseClient:::.setCache("permanent.redirects.resolved.FILE", TRUE)
## this function will 'time out' the first time but pass the second time
myGetUrl <- function(url,
customrequest,
httpheader,
curl,
debugfunction,
.opts
) {
if (regexpr("/version", url, fixed=T)>=0) {
synapseClient:::.setCache("httpStatus", 200)
return("HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n\r\n{\"version\":\"foo\"}")
}
httpRequestCount <-synapseClient:::.getCache("httpRequestCount")
synapseClient:::.setCache("httpRequestCount", httpRequestCount+1)
if (httpRequestCount<1) { # first time, it fails
synapseClient:::.setCache("httpStatus", httpErrorStatusCode)
return(errorMessage)
} else {
synapseClient:::.setCache("httpStatus", 200)
return(list(headers="HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n", body="{\"foo\":\"bar\"}"))
}
}
attr(myGetUrl, "origDef") <- synapseClient:::.getURLIntern
assignInNamespace(".getURLIntern", myGetUrl, "synapseClient")
myGetCurlInfo<-function(curlHandle=NULL) {
list(response.code=synapseClient:::.getCache("httpStatus"))
}
attr(myGetCurlInfo, "origDef") <- synapseClient:::.getCurlInfo
assignInNamespace(".getCurlInfo", myGetCurlInfo, "synapseClient")
# also spoof checking black list, latest version
myCheckBlackList<-function() {"ok"}
myCheckLatestVersion<-function() {"ok"}
attr(myCheckBlackList, "origDef") <- synapseClient:::checkBlackList
assignInNamespace("checkBlackList", myCheckBlackList, "synapseClient")
attr(myCheckLatestVersion, "origDef") <- synapseClient:::checkLatestVersion
assignInNamespace("checkLatestVersion", myCheckLatestVersion, "synapseClient")
myLogErrorToSynapse<-function(label, message) {NULL}
attr(myLogErrorToSynapse, "origDef") <- synapseClient:::.logErrorToSynapse
assignInNamespace(".logErrorToSynapse", myLogErrorToSynapse, "synapseClient")
}
.tearDown <-
function()
{
synapseClient:::.setCache("permanent.redirects.resolved.REPO", NULL)
synapseClient:::.setCache("permanent.redirects.resolved.FILE", NULL)
origDef<-attr(synapseClient:::.getURLIntern, "origDef")
if (!is.null(origDef)) assignInNamespace(".getURLIntern", origDef, "synapseClient")
origDef<-attr(synapseClient:::.getCurlInfo, "origDef")
if (!is.null(origDef)) assignInNamespace(".getCurlInfo", origDef, "synapseClient")
origDef<-attr(synapseClient:::checkBlackList, "origDef")
if (!is.null(origDef)) assignInNamespace("checkBlackList", origDef, "synapseClient")
origDef<-attr(synapseClient:::checkLatestVersion, "origDef")
if (!is.null(origDef)) assignInNamespace("checkLatestVersion", origDef, "synapseClient")
origDef<-attr(synapseClient:::.logErrorToSynapse, "origDef")
if (!is.null(origDef)) assignInNamespace(".logErrorToSynapse", origDef, "synapseClient")
unloadNamespace('synapseClient')
library(synapseClient)
}
unitTestExponentialBackoffFor503ShouldFail <-
function()
{
mySetUp(503, list(headers="HTTP/1.1 503 Service Unavailable\r\nContent-Type: application/json\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will get a 503, and an empty response
synapseClient:::.setCache("maxWaitDiffTime", 0)
shouldBeEmpty<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500",
anonymous=T, opts=opts, checkHttpStatus=FALSE)
checkEquals("", shouldBeEmpty)
checkEquals(503, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor503ShouldComplete <-
function()
{
mySetUp(503, list(headers="HTTP/1.1 503 Service Unavailable\r\nContent-Type: application/json\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will complete
synapseClient:::.setCache("maxWaitDiffTime", as.difftime("00:30:00")) # 30 min
result<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500", anonymous=T, opts=opts)
checkEquals(list(foo="bar"), result)
checkEquals(200, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor502ShouldFail <-
function()
{
mySetUp(502, list(headers="HTTP Error: 502 for request https://file-prod.prod.sagebase.org/repo/v1/query\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will get a 502, and an empty response
synapseClient:::.setCache("maxWaitDiffTime", 0)
shouldBeEmpty<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500",
anonymous=T, opts=opts, checkHttpStatus=FALSE)
checkEquals("", shouldBeEmpty)
checkEquals(502, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor502ShouldComplete <-
function()
{
mySetUp(502, list(headers="HTTP Error: 502 for request https://file-prod.prod.sagebase.org/repo/v1/query\r\n", body=""))
opts<-synapseClient:::.getCache("curlOpts")
opts$timeout.ms<-100
# this will complete
synapseClient:::.setCache("maxWaitDiffTime", as.difftime("00:30:00")) # 30 min
result<-synapseClient:::synapseGet("/query?query=select+id+from+entity+limit==500", anonymous=T, opts=opts)
checkEquals(list(foo="bar"), result)
checkEquals(200, synapseClient:::.getCurlInfo()$response.code)
}
unitTestExponentialBackoffFor404ShouldComplete <- function()
{
synapseClient:::.setCache("httpRequestCount", 0)
myGetCurlInfo<-function(curlHandle=NULL) {
httpRequestCount <-synapseClient:::.getCache("httpRequestCount")
synapseClient:::.setCache("httpRequestCount", httpRequestCount+1)
if (httpRequestCount<2) { # first two times it fails
synapseClient:::.setCache("httpStatus", 404)
} else {
synapseClient:::.setCache("httpStatus", 200)
}
list(response.code=synapseClient:::.getCache("httpStatus"))
}
attr(myGetCurlInfo, "origDef") <- synapseClient:::.getCurlInfo
assignInNamespace(".getCurlInfo", myGetCurlInfo, "synapseClient")
myLogErrorToSynapse<-function(label, message) {NULL}
attr(myLogErrorToSynapse, "origDef") <- synapseClient:::.logErrorToSynapse
assignInNamespace(".logErrorToSynapse", myLogErrorToSynapse, "synapseClient")
curlHandle <- getCurlHandle()
synapseClient:::webRequestWithRetries(
fcn=function(curlHandle) {
"this is the response body"
},
curlHandle,
extraRetryStatusCodes=404
)
}
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 60,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
| /ui.R | no_license | gauravsatav/WebApp | R | false | false | 618 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 60,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readOnly.R
\name{readOnly}
\alias{readOnly}
\alias{readOnly,character-method}
\alias{readOnly,SsimLibrary-method}
\alias{readOnly,Project-method}
\alias{readOnly,Scenario-method}
\alias{readOnly,Folder-method}
\alias{readOnly<-}
\alias{readOnly<-,character-method}
\alias{readOnly<-,SsimObject-method}
\alias{readOnly<-,Folder-method}
\title{Read-only status of a SsimLibrary, Project, Scenario or Folder}
\usage{
readOnly(ssimObject)
\S4method{readOnly}{character}(ssimObject)
\S4method{readOnly}{SsimLibrary}(ssimObject)
\S4method{readOnly}{Project}(ssimObject)
\S4method{readOnly}{Scenario}(ssimObject)
\S4method{readOnly}{Folder}(ssimObject)
readOnly(ssimObject) <- value
\S4method{readOnly}{character}(ssimObject) <- value
\S4method{readOnly}{SsimObject}(ssimObject) <- value
\S4method{readOnly}{Folder}(ssimObject) <- value
}
\arguments{
\item{ssimObject}{\code{\link{Scenario}}, \code{\link{Project}},
\code{\link{SsimLibrary}}, or \code{\link{Folder}} object}
\item{value}{logical. If \code{TRUE} the SsimObject will be read-only. Default is
\code{FALSE}}
}
\value{
A logical: \code{TRUE} if the SsimObject is read-only and \code{FALSE}
otherwise.
}
\description{
Retrieves or sets whether or not a \code{\link{SsimLibrary}},
\code{\link{Project}}, \code{\link{Scenario}}, or \code{\link{Folder}} is
read-only.
}
\examples{
\donttest{
# Specify file path and name of new SsimLibrary
myLibraryName <- file.path(tempdir(), "testlib")
# Set up a SyncroSim Session, SsimLibrary, Project, Scenario, and Folder
mySession <- session()
myLibrary <- ssimLibrary(name = myLibraryName, session = mySession)
myProject <- project(myLibrary, project = "Definitions")
myScenario <- scenario(myProject, scenario = "My Scenario")
myFolder <- folder(myProject, "My Folder")
# Retrieve the read-only status of a SsimObject
readOnly(myLibrary)
readOnly(myProject)
readOnly(myScenario)
readOnly(myFolder)
# Set the read-only status of a SsimObject
readOnly(myScenario) <- TRUE
}
}
| /man/readOnly.Rd | permissive | syncrosim/rsyncrosim | R | false | true | 2,060 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readOnly.R
\name{readOnly}
\alias{readOnly}
\alias{readOnly,character-method}
\alias{readOnly,SsimLibrary-method}
\alias{readOnly,Project-method}
\alias{readOnly,Scenario-method}
\alias{readOnly,Folder-method}
\alias{readOnly<-}
\alias{readOnly<-,character-method}
\alias{readOnly<-,SsimObject-method}
\alias{readOnly<-,Folder-method}
\title{Read-only status of a SsimLibrary, Project, Scenario or Folder}
\usage{
readOnly(ssimObject)
\S4method{readOnly}{character}(ssimObject)
\S4method{readOnly}{SsimLibrary}(ssimObject)
\S4method{readOnly}{Project}(ssimObject)
\S4method{readOnly}{Scenario}(ssimObject)
\S4method{readOnly}{Folder}(ssimObject)
readOnly(ssimObject) <- value
\S4method{readOnly}{character}(ssimObject) <- value
\S4method{readOnly}{SsimObject}(ssimObject) <- value
\S4method{readOnly}{Folder}(ssimObject) <- value
}
\arguments{
\item{ssimObject}{\code{\link{Scenario}}, \code{\link{Project}},
\code{\link{SsimLibrary}}, or \code{\link{Folder}} object}
\item{value}{logical. If \code{TRUE} the SsimObject will be read-only. Default is
\code{FALSE}}
}
\value{
A logical: \code{TRUE} if the SsimObject is read-only and \code{FALSE}
otherwise.
}
\description{
Retrieves or sets whether or not a \code{\link{SsimLibrary}},
\code{\link{Project}}, \code{\link{Scenario}}, or \code{\link{Folder}} is
read-only.
}
\examples{
\donttest{
# Specify file path and name of new SsimLibrary
myLibraryName <- file.path(tempdir(), "testlib")
# Set up a SyncroSim Session, SsimLibrary, Project, Scenario, and Folder
mySession <- session()
myLibrary <- ssimLibrary(name = myLibraryName, session = mySession)
myProject <- project(myLibrary, project = "Definitions")
myScenario <- scenario(myProject, scenario = "My Scenario")
myFolder <- folder(myProject, "My Folder")
# Retrieve the read-only status of a SsimObject
readOnly(myLibrary)
readOnly(myProject)
readOnly(myScenario)
readOnly(myFolder)
# Set the read-only status of a SsimObject
readOnly(myScenario) <- TRUE
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AdvancedTuning.R
\name{RunInteractiveTuning}
\alias{RunInteractiveTuning}
\title{Run an interactive model tuning session.}
\usage{
RunInteractiveTuning(model)
}
\arguments{
\item{model}{dataRobotModel. A DataRobot model object to get tuning parameters for.}
}
\value{
A job ID that can be used to get the tuned model.
}
\description{
The advanced tuning feature allows you to manually set model parameters and override the
DataRobot default selections. It is generally available for Eureqa models. To use this
feature with other model types, contact your CFDS for more information.
}
\details{
This function runs an interactive sesstion to iterate you through individual arguments
for each tunable hyperparameter, presenting you with the defaults and other available
information. You can set each parameter one at a time, skipping ones you don't intend to
set. At the end, it will return a job ID that can be used to get the tuned model.
Note that sometimes you may see the exact same parameter more than once. These are for
different parts of the blueprint that use the same parameter (e.g., one hot encoding for
text and then one hot encoding for numeric). They are listed in the order they are found
in the blueprint but unfortunately more user-facing information cannot be provided.
}
\examples{
\dontrun{
projectId <- "59a5af20c80891534e3c2bde"
modelId <- "5996f820af07fc605e81ead4"
myXGBModel <- GetModel(projectId, modelId)
tuningJob <- RunInteractiveTuning(myXGBModel)
tunedModel <- GetModelFromJobId(projectId, tuningJob)
}
}
| /man/RunInteractiveTuning.Rd | no_license | malakz/datarobot | R | false | true | 1,624 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AdvancedTuning.R
\name{RunInteractiveTuning}
\alias{RunInteractiveTuning}
\title{Run an interactive model tuning session.}
\usage{
RunInteractiveTuning(model)
}
\arguments{
\item{model}{dataRobotModel. A DataRobot model object to get tuning parameters for.}
}
\value{
A job ID that can be used to get the tuned model.
}
\description{
The advanced tuning feature allows you to manually set model parameters and override the
DataRobot default selections. It is generally available for Eureqa models. To use this
feature with other model types, contact your CFDS for more information.
}
\details{
This function runs an interactive sesstion to iterate you through individual arguments
for each tunable hyperparameter, presenting you with the defaults and other available
information. You can set each parameter one at a time, skipping ones you don't intend to
set. At the end, it will return a job ID that can be used to get the tuned model.
Note that sometimes you may see the exact same parameter more than once. These are for
different parts of the blueprint that use the same parameter (e.g., one hot encoding for
text and then one hot encoding for numeric). They are listed in the order they are found
in the blueprint but unfortunately more user-facing information cannot be provided.
}
\examples{
\dontrun{
projectId <- "59a5af20c80891534e3c2bde"
modelId <- "5996f820af07fc605e81ead4"
myXGBModel <- GetModel(projectId, modelId)
tuningJob <- RunInteractiveTuning(myXGBModel)
tunedModel <- GetModelFromJobId(projectId, tuningJob)
}
}
|
# Homework #2
library(shiny)
library(reshape2)
library(dplyr)
library(shinythemes)
library(stringr)
library(httr)
library(jsonlite)
library(plotly)
library(htmltools)
ckanSQL <- function(url) {
# MAKE REQUEST
r <- RETRY("GET", URLencode(url))
# EXTRACT CONTENT
c <- content(r, "text")
# CREATE DATAFRAME
data.frame(jsonlite::fromJSON(c)$rows)
}
# UNIQUE VALUES FOR RESOURCE FIELD
ckanUniques <- function(field, id) {
url <- paste0("https://phl.carto.com/api/v2/sql?q=SELECT+", field, "+FROM+", id)
c(ckanSQL(URLencode(url)))
}
years <- sort(ckanUniques("year", "shootings")$year)
inside <- sort(ckanUniques("inside", "shootings")$inside)
# This will let my code to remain as numbers but then I can only see one of the codes.
incident.backup <- sort(ckanUniques("code", "shootings")$code)
# Weird workaround to get from numbers to words for my crime input but then I can't see my plot
url2 <- paste0("https://phl.carto.com/api/v2/sql?q=SELECT+p.*%2C++case+when+code2+%3C100+then+'Additional+Victim'+when+code2+%3C120+then+'Homicide'+when+code2+%3C300+then+'Rape'+when+code2+%3C400+then+'Robbery'+when+code2+%3C500+then+'Aggravated+Assault'+when+code2+%3C3901+then+'Hospital+Cases'+else+null+end+as+incidents+FROM+(SELECT+*%2C+CAST(code+AS+int)+as+code2+FROM+shootings)+as+p")
r2 <- RETRY("GET", URLencode(url2))
# EXTRACT CONTENT
c2 <- content(r2, "text")
# CREATE DATAFRAME
incident <- data.frame(jsonlite::fromJSON(c2)$rows)
pdf(NULL)
# Define UI for application that draws a histogram
ui <- navbarPage("Exploring Shooting Victim Data from Philadelphia",
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
# Selecting type of crime
selectInput("crimeSelect",
"Type of Incident:",
choices = incident$incidents,
multiple = TRUE,
selectize = TRUE,
selected = c("Aggravated Assault")),
# Year of Incident Slider
sliderInput("yearSelect",
"Year of Incident:",
min = min(years),
max = max(years),
value = c(min(years), max(years)),
step = 1),
# Check box Input for whether incident occured inside
checkboxGroupInput(inputId = "IncidentInside",
label = "Was the Incident Inside?:",
choiceNames = list("Yes", "No"),
choiceValues = list("1", "0")),
# Action button
actionButton("reset", "Reset Filters", icon = icon("refresh"))),
# Output plot
mainPanel(
plotlyOutput("codeplot", width = "100%"),
plotlyOutput("woundplotc", width = "100%"),
plotlyOutput("raceplot", width = "100%"))
)),
# Data Table
tabPanel("Table",
inputPanel(
downloadButton("downloadData","Download Victim Data")
),
fluidPage(DT::dataTableOutput("table"))
)
)
# Define server logic
server <- function(input, output, session = session) {
loadshoot <- reactive({
# Build API Query with proper encodes
url <- paste0("https://phl.carto.com/api/v2/sql?q=SELECT+*+FROM+shootings+WHERE+year+%3E%3D+'",input$yearSelect[1],"'+AND+year+%3C%3D+'",input$yearSelect[2],"'") #+AND+code+%3D+'",input$crimeSelect,"'")
# For crimSelect you needed to use an IN statement: https://www.w3schools.com/sql/sql_in.asp
print(url)
dat <- ckanSQL(url) %>%
# https://phl.carto.com/api/v2/sql?q=SELECT+p.*%2C++case+when+code2+%3C100+then+'Additional+Victim'+when+code2+%3C120+then+'Homicide'+when+code2+%3C300+then+'Rape'+when+code2+%3C400+then+'Robbery'+when+code2+%3C500+then+'Aggravated+Assault'+when+code2+%3C3901+then+'Hospital+Cases'+else+null+end+as+incidents+FROM+(SELECT+*%2C+CAST(code+AS+int)+as+code2+FROM+shootings)+as+p
# # Location of Incident
# if (length(input$IncidentInside) > 0 ) {
# dat <- subset(dat, inside %in% input$IncidentInside)
# }
# Clean Data
# Clean Wounds fields. This one took forever! I tried to do a case when IN function like in sql to save some
# lines of code, but no luck so I did it this way. I first opened the csv and manually categorized each value
# into a body area and then added all the quotes, equal signs, and squiggly signs in excel so I could just
# copy and paste it into r. I know this probably isn’t the best to clean data that is going to continually
# update since potentially a new cell could be spelled aaaabbbdddoommenn or some other incorrect way for
# abdomen but, this is what I could do.
# You could have used tolower() and/or tools::toTitlCase() Also, if you have a list of things you want to match off of you can use %in% instead ie: wound %in% c("aabdomen", "abdom", "abdome", "abdomen") lastly you can use grepl()
mutate(wound = case_when(
wound == "aabdomen" ~ "Abdomen",
wound == "abdom" ~ "Abdomen",
wound == "abdome" ~ "Abdomen",
wound == "abdomen" ~ "Abdomen",
wound == "ankle" ~ "Ankle",
wound == "arm" ~ "Arm",
wound == "arms" ~ "Arm",
wound == "elbow" ~ "Arm",
wound == "forearm" ~ "Arm",
wound == "BACK" ~ "Back",
wound == "back" ~ "Back",
wound == "back/head" ~ "Back",
wound == "flank" ~ "Back",
wound == "body" ~ "Body",
wound == "ribs" ~ "Body",
wound == "side" ~ "Body",
wound == "torso" ~ "Body",
wound == "butt" ~ "Butt",
wound == "buttock" ~ "Butt",
wound == "buttocks" ~ "Butt",
wound == "cheat" ~ "Chest",
wound == "chest" ~ "Chest",
wound == "chest/back" ~ "Chest",
wound == "feet" ~ "Foot",
wound == "foot" ~ "Foot",
wound == "groin" ~ "Groin",
wound == "testicle" ~ "Groin",
wound == "HEAD" ~ "Head",
wound == "cheek" ~ "Head",
wound == "ear" ~ "Head",
wound == "eye" ~ "Head",
wound == "face" ~ "Head",
wound == "face/multi" ~ "Head",
wound == "head" ~ "Head",
wound == "head-m" ~ "Head",
wound == "head-md" ~ "Head",
wound == "head/back" ~ "Head",
wound == "head/chest" ~ "Head",
wound == "head/mullt" ~ "Head",
wound == "head/multi" ~ "Head",
wound == "temple" ~ "Head",
wound == "wrist" ~ "Hand",
wound == "finger" ~ "Hand",
wound == "hand" ~ "Hand",
wound == "thumb" ~ "Hand",
wound == "hip" ~ "Hip",
wound == "pelvis" ~ "Hip",
wound == "waist" ~ "Hip",
wound == "calf" ~ "Leg",
wound == "knee" ~ "Leg",
wound == "leg" ~ "Leg",
wound == "leg/buttoc" ~ "Leg",
wound == "leg/multi" ~ "Leg",
wound == "legs" ~ "Leg",
wound == "LEG" ~ "Leg",
wound == "shin" ~ "Leg",
wound == "thigh" ~ "Leg",
wound == "thighs" ~ "Leg",
wound == "mukti" ~ "Multi",
wound == "mullti" ~ "Multi",
wound == "mult" ~ "Multi",
wound == "mult/headi" ~ "Multi",
wound == "multi" ~ "Multi",
wound == "multi leg" ~ "Multi",
wound == "multi tors" ~ "Multi",
wound == "multi/arm" ~ "Multi",
wound == "multi/face" ~ "Multi",
wound == "multi/head" ~ "Multi",
wound == "multli" ~ "Multi",
wound == "mutli" ~ "Multi",
wound == "mutli/head" ~ "Multi",
wound == "neck" ~ "Neck",
wound == "throat" ~ "Neck",
wound == "shou" ~ "Shoulder",
wound == "shoul" ~ "Shoulder",
wound == "should" ~ "Shoulder",
wound == "shouldeer" ~ "Shoulder",
wound == "shoulder" ~ "Shoulder",
wound == "shouldr" ~ "Shoulder",
wound == "stom" ~ "Stomach",
wound == "stomach" ~ "Stomach",
wound == "unk" ~ "Unknown",
TRUE ~ as.character(wound)),
# I tried to do a case when on the latino field to be in the race field by doing if latino == “1” ~ race == “Hispanic” but
# it didn’t work and couldn’t figure out a better way. This was my weird workaround to get latino into race. This command
# essentially turned race into false where latino == 1
race = ifelse(latino == "1", race == "Hispanic", race),
# I then did a case when to get it to be Hispanic and cleaned the others
race = case_when(
race == "A" ~ "Asian",
race == "B" ~ "Black",
race == "b" ~ "Black",
race == "w" ~ "White",
race == "W" ~ "White",
race == "M" ~ "Multi",
race == FALSE ~ "Hispanic",
TRUE ~ as.character(race)),
# Clean sex
sex = ifelse(sex == "M", "Male", "Female"),
# Change to numeric
code = as.numeric(code),
# This was another tricky one. I originally tried to do a case when if code >= 100 <= 119 ~ “Homicide” but it didn’t work. This works but not ideal.
code = case_when(
code > 2999 ~ "Hospital Cases",
code > 399 ~ "Aggravated Assault",
code > 299 ~ "Robbery",
code > 199 ~ "Rape",
code > 99 ~ "Homicide",
code < 100 ~ "Additional Victim",
TRUE ~ as.character(code)))
return(dat)
})
# A plot showing the the fequency of incidents over the years
output$codeplot <- renderPlotly({
dat <- loadshoot()
ggplotly(
ggplot(data = dat, aes(x = year, color = code)) +
geom_freqpoly() +
guides(fill = FALSE) +
scale_x_continuous(name = "Incident Year") +
scale_y_continuous(name = "Counts") +
ggtitle("Prevalent Incidents Per Year") +
theme(legend.title = element_blank()), height = 400, width = 650)})
# Column plot showing types of wounds
output$woundplotc <- renderPlotly({
dat<- loadshoot()
ggplotly(
ggplot(data = dat, aes(x = wound, fill = as.character(fatal))) +
geom_bar (position = position_dodge(width = 0.7)) +
xlab(" ") +
ylab("Counts") +
ggtitle("Where are Victims Injured the Most?") +
theme(legend.position = "top",
axis.text.x = element_text (angle = 30,
hjust = 1,
size = 7),
legend.title=element_text(size = 7)) +
guides(fill=guide_legend(title = "Was it Fatal?"), height = 400, width = 650))})
# Race bar plot
output$raceplot <- renderPlotly({
dat<- loadshoot()
ggplotly(
ggplot(data = dat, aes(x = race, fill = sex)) +
geom_bar (position = position_dodge(width = 0.9)) +
xlab("Race") +
ylab("Counts") +
ggtitle("Types of Victims") +
theme(legend.title = element_blank()) +
guides(color = FALSE), height = 400, width = 650)})
# Data Table
output$table <- DT::renderDataTable({
dat<- loadshoot()
subset(dat, select = c(code, wound, offender_injured, location, race, sex, dist, time))})
# Updating the URL Bar
observe({
print(reactiveValuesToList(input))
session$doBookmark()})
onBookmarked(function(url) {
updateQueryString(url)})
# Download data in the datatable
output$downloadData <- downloadHandler(
filename = function() {
paste("shootings", Sys.Date(), ".csv", sep="")},
content = function(file) {
write.csv(loadshoot(), file)})
# Reset Filter Data
# I didn't even get to touch this part :(
observeEvent(input$reset, {
updateSelectInput(session, "crimeSelect", selected = c("Aggravated Assualt", "Robbery", "Homicide", "Hospital Cases"))
updateCheckboxGroupInput(session, "IncidentInside", label = NULL, choices = NULL, selected = c("Y", "N"))
updateSliderInput(session, "yearSelect", value = c(min(years), max(years)))
showNotification("You have successfully reset the filters", type = "message")
})
}
# Run the application
shinyApp(ui = ui, server = server, enableBookmarking = "url") | /app.R | no_license | RforOperations2018/HW2_ASANDOVAL | R | false | false | 12,956 | r | # Homework #2
library(shiny)
library(reshape2)
library(dplyr)
library(shinythemes)
library(stringr)
library(httr)
library(jsonlite)
library(plotly)
library(htmltools)
ckanSQL <- function(url) {
# MAKE REQUEST
r <- RETRY("GET", URLencode(url))
# EXTRACT CONTENT
c <- content(r, "text")
# CREATE DATAFRAME
data.frame(jsonlite::fromJSON(c)$rows)
}
# UNIQUE VALUES FOR RESOURCE FIELD
ckanUniques <- function(field, id) {
url <- paste0("https://phl.carto.com/api/v2/sql?q=SELECT+", field, "+FROM+", id)
c(ckanSQL(URLencode(url)))
}
years <- sort(ckanUniques("year", "shootings")$year)
inside <- sort(ckanUniques("inside", "shootings")$inside)
# This will let my code to remain as numbers but then I can only see one of the codes.
incident.backup <- sort(ckanUniques("code", "shootings")$code)
# Weird workaround to get from numbers to words for my crime input but then I can't see my plot
url2 <- paste0("https://phl.carto.com/api/v2/sql?q=SELECT+p.*%2C++case+when+code2+%3C100+then+'Additional+Victim'+when+code2+%3C120+then+'Homicide'+when+code2+%3C300+then+'Rape'+when+code2+%3C400+then+'Robbery'+when+code2+%3C500+then+'Aggravated+Assault'+when+code2+%3C3901+then+'Hospital+Cases'+else+null+end+as+incidents+FROM+(SELECT+*%2C+CAST(code+AS+int)+as+code2+FROM+shootings)+as+p")
r2 <- RETRY("GET", URLencode(url2))
# EXTRACT CONTENT
c2 <- content(r2, "text")
# CREATE DATAFRAME
incident <- data.frame(jsonlite::fromJSON(c2)$rows)
pdf(NULL)
# Define UI for application that draws a histogram
ui <- navbarPage("Exploring Shooting Victim Data from Philadelphia",
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
# Selecting type of crime
selectInput("crimeSelect",
"Type of Incident:",
choices = incident$incidents,
multiple = TRUE,
selectize = TRUE,
selected = c("Aggravated Assault")),
# Year of Incident Slider
sliderInput("yearSelect",
"Year of Incident:",
min = min(years),
max = max(years),
value = c(min(years), max(years)),
step = 1),
# Check box Input for whether incident occured inside
checkboxGroupInput(inputId = "IncidentInside",
label = "Was the Incident Inside?:",
choiceNames = list("Yes", "No"),
choiceValues = list("1", "0")),
# Action button
actionButton("reset", "Reset Filters", icon = icon("refresh"))),
# Output plot
mainPanel(
plotlyOutput("codeplot", width = "100%"),
plotlyOutput("woundplotc", width = "100%"),
plotlyOutput("raceplot", width = "100%"))
)),
# Data Table
tabPanel("Table",
inputPanel(
downloadButton("downloadData","Download Victim Data")
),
fluidPage(DT::dataTableOutput("table"))
)
)
# Define server logic
server <- function(input, output, session = session) {
loadshoot <- reactive({
# Build API Query with proper encodes
url <- paste0("https://phl.carto.com/api/v2/sql?q=SELECT+*+FROM+shootings+WHERE+year+%3E%3D+'",input$yearSelect[1],"'+AND+year+%3C%3D+'",input$yearSelect[2],"'") #+AND+code+%3D+'",input$crimeSelect,"'")
# For crimSelect you needed to use an IN statement: https://www.w3schools.com/sql/sql_in.asp
print(url)
dat <- ckanSQL(url) %>%
# https://phl.carto.com/api/v2/sql?q=SELECT+p.*%2C++case+when+code2+%3C100+then+'Additional+Victim'+when+code2+%3C120+then+'Homicide'+when+code2+%3C300+then+'Rape'+when+code2+%3C400+then+'Robbery'+when+code2+%3C500+then+'Aggravated+Assault'+when+code2+%3C3901+then+'Hospital+Cases'+else+null+end+as+incidents+FROM+(SELECT+*%2C+CAST(code+AS+int)+as+code2+FROM+shootings)+as+p
# # Location of Incident
# if (length(input$IncidentInside) > 0 ) {
# dat <- subset(dat, inside %in% input$IncidentInside)
# }
# Clean Data
# Clean Wounds fields. This one took forever! I tried to do a case when IN function like in sql to save some
# lines of code, but no luck so I did it this way. I first opened the csv and manually categorized each value
# into a body area and then added all the quotes, equal signs, and squiggly signs in excel so I could just
# copy and paste it into r. I know this probably isn’t the best to clean data that is going to continually
# update since potentially a new cell could be spelled aaaabbbdddoommenn or some other incorrect way for
# abdomen but, this is what I could do.
# You could have used tolower() and/or tools::toTitlCase() Also, if you have a list of things you want to match off of you can use %in% instead ie: wound %in% c("aabdomen", "abdom", "abdome", "abdomen") lastly you can use grepl()
mutate(wound = case_when(
wound == "aabdomen" ~ "Abdomen",
wound == "abdom" ~ "Abdomen",
wound == "abdome" ~ "Abdomen",
wound == "abdomen" ~ "Abdomen",
wound == "ankle" ~ "Ankle",
wound == "arm" ~ "Arm",
wound == "arms" ~ "Arm",
wound == "elbow" ~ "Arm",
wound == "forearm" ~ "Arm",
wound == "BACK" ~ "Back",
wound == "back" ~ "Back",
wound == "back/head" ~ "Back",
wound == "flank" ~ "Back",
wound == "body" ~ "Body",
wound == "ribs" ~ "Body",
wound == "side" ~ "Body",
wound == "torso" ~ "Body",
wound == "butt" ~ "Butt",
wound == "buttock" ~ "Butt",
wound == "buttocks" ~ "Butt",
wound == "cheat" ~ "Chest",
wound == "chest" ~ "Chest",
wound == "chest/back" ~ "Chest",
wound == "feet" ~ "Foot",
wound == "foot" ~ "Foot",
wound == "groin" ~ "Groin",
wound == "testicle" ~ "Groin",
wound == "HEAD" ~ "Head",
wound == "cheek" ~ "Head",
wound == "ear" ~ "Head",
wound == "eye" ~ "Head",
wound == "face" ~ "Head",
wound == "face/multi" ~ "Head",
wound == "head" ~ "Head",
wound == "head-m" ~ "Head",
wound == "head-md" ~ "Head",
wound == "head/back" ~ "Head",
wound == "head/chest" ~ "Head",
wound == "head/mullt" ~ "Head",
wound == "head/multi" ~ "Head",
wound == "temple" ~ "Head",
wound == "wrist" ~ "Hand",
wound == "finger" ~ "Hand",
wound == "hand" ~ "Hand",
wound == "thumb" ~ "Hand",
wound == "hip" ~ "Hip",
wound == "pelvis" ~ "Hip",
wound == "waist" ~ "Hip",
wound == "calf" ~ "Leg",
wound == "knee" ~ "Leg",
wound == "leg" ~ "Leg",
wound == "leg/buttoc" ~ "Leg",
wound == "leg/multi" ~ "Leg",
wound == "legs" ~ "Leg",
wound == "LEG" ~ "Leg",
wound == "shin" ~ "Leg",
wound == "thigh" ~ "Leg",
wound == "thighs" ~ "Leg",
wound == "mukti" ~ "Multi",
wound == "mullti" ~ "Multi",
wound == "mult" ~ "Multi",
wound == "mult/headi" ~ "Multi",
wound == "multi" ~ "Multi",
wound == "multi leg" ~ "Multi",
wound == "multi tors" ~ "Multi",
wound == "multi/arm" ~ "Multi",
wound == "multi/face" ~ "Multi",
wound == "multi/head" ~ "Multi",
wound == "multli" ~ "Multi",
wound == "mutli" ~ "Multi",
wound == "mutli/head" ~ "Multi",
wound == "neck" ~ "Neck",
wound == "throat" ~ "Neck",
wound == "shou" ~ "Shoulder",
wound == "shoul" ~ "Shoulder",
wound == "should" ~ "Shoulder",
wound == "shouldeer" ~ "Shoulder",
wound == "shoulder" ~ "Shoulder",
wound == "shouldr" ~ "Shoulder",
wound == "stom" ~ "Stomach",
wound == "stomach" ~ "Stomach",
wound == "unk" ~ "Unknown",
TRUE ~ as.character(wound)),
# I tried to do a case when on the latino field to be in the race field by doing if latino == “1” ~ race == “Hispanic” but
# it didn’t work and couldn’t figure out a better way. This was my weird workaround to get latino into race. This command
# essentially turned race into false where latino == 1
race = ifelse(latino == "1", race == "Hispanic", race),
# I then did a case when to get it to be Hispanic and cleaned the others
race = case_when(
race == "A" ~ "Asian",
race == "B" ~ "Black",
race == "b" ~ "Black",
race == "w" ~ "White",
race == "W" ~ "White",
race == "M" ~ "Multi",
race == FALSE ~ "Hispanic",
TRUE ~ as.character(race)),
# Clean sex
sex = ifelse(sex == "M", "Male", "Female"),
# Change to numeric
code = as.numeric(code),
# This was another tricky one. I originally tried to do a case when if code >= 100 <= 119 ~ “Homicide” but it didn’t work. This works but not ideal.
code = case_when(
code > 2999 ~ "Hospital Cases",
code > 399 ~ "Aggravated Assault",
code > 299 ~ "Robbery",
code > 199 ~ "Rape",
code > 99 ~ "Homicide",
code < 100 ~ "Additional Victim",
TRUE ~ as.character(code)))
return(dat)
})
# A plot showing the the fequency of incidents over the years
output$codeplot <- renderPlotly({
dat <- loadshoot()
ggplotly(
ggplot(data = dat, aes(x = year, color = code)) +
geom_freqpoly() +
guides(fill = FALSE) +
scale_x_continuous(name = "Incident Year") +
scale_y_continuous(name = "Counts") +
ggtitle("Prevalent Incidents Per Year") +
theme(legend.title = element_blank()), height = 400, width = 650)})
# Column plot showing types of wounds
output$woundplotc <- renderPlotly({
dat<- loadshoot()
ggplotly(
ggplot(data = dat, aes(x = wound, fill = as.character(fatal))) +
geom_bar (position = position_dodge(width = 0.7)) +
xlab(" ") +
ylab("Counts") +
ggtitle("Where are Victims Injured the Most?") +
theme(legend.position = "top",
axis.text.x = element_text (angle = 30,
hjust = 1,
size = 7),
legend.title=element_text(size = 7)) +
guides(fill=guide_legend(title = "Was it Fatal?"), height = 400, width = 650))})
# Race bar plot
output$raceplot <- renderPlotly({
dat<- loadshoot()
ggplotly(
ggplot(data = dat, aes(x = race, fill = sex)) +
geom_bar (position = position_dodge(width = 0.9)) +
xlab("Race") +
ylab("Counts") +
ggtitle("Types of Victims") +
theme(legend.title = element_blank()) +
guides(color = FALSE), height = 400, width = 650)})
# Data Table
output$table <- DT::renderDataTable({
dat<- loadshoot()
subset(dat, select = c(code, wound, offender_injured, location, race, sex, dist, time))})
# Updating the URL Bar
observe({
print(reactiveValuesToList(input))
session$doBookmark()})
onBookmarked(function(url) {
updateQueryString(url)})
# Download data in the datatable
output$downloadData <- downloadHandler(
filename = function() {
paste("shootings", Sys.Date(), ".csv", sep="")},
content = function(file) {
write.csv(loadshoot(), file)})
# Reset Filter Data
# I didn't even get to touch this part :(
observeEvent(input$reset, {
updateSelectInput(session, "crimeSelect", selected = c("Aggravated Assualt", "Robbery", "Homicide", "Hospital Cases"))
updateCheckboxGroupInput(session, "IncidentInside", label = NULL, choices = NULL, selected = c("Y", "N"))
updateSliderInput(session, "yearSelect", value = c(min(years), max(years)))
showNotification("You have successfully reset the filters", type = "message")
})
}
# Run the application
shinyApp(ui = ui, server = server, enableBookmarking = "url") |
require(neuralnet)
require(RMySQL)
require(ff)
require(googleVis)
require(Metrics)
require(ppls)
require(RSNNS)
require(ftsa)
sites.count <- 10
history.length <- 50
data.len <- 52560
data.len.day <<- 144
mat.size <<- 365
window.size <- 10
train.data.percent <- 0.7
file.name <- "neuralnet_shortterm_simple.csv"
file.path <- "/home/freak/Programming/Thesis/results/results/neuralnet_shortterm_simple/"
table.ip.type <- "specific"#"random"
powdata <<- ff(NA, dim=c(data.len, sites.count), vmode="double")
powdata.normalized <<- ff(NA, dim=c(data.len, sites.count), vmode="double")
train.data <<- c()
test.data <<- c()
output <<- c()
drv = dbDriver("MySQL")
con = dbConnect(drv,host="localhost",dbname="eastwind",user="sachin",pass="password")
if(table.ip.type == "random"){
tablelist_statement = paste("SELECT TABLE_NAME FROM information_schema.TABLES ",
"WHERE TABLE_SCHEMA = 'eastwind' AND",
"TABLE_NAME LIKE 'onshore_SITE_%' "," LIMIT ",sites.count, ";")
tables <<- dbGetQuery(con, statement=tablelist_statement)
tables <<- data.frame(tables)
}else{
t <- c("onshore_SITE_00538",
"onshore_SITE_00366",
"onshore_SITE_00623",
"onshore_SITE_00418",
"onshore_SITE_00627",
"onshore_SITE_00532",
"onshore_SITE_00499",
"onshore_SITE_00571",
"onshore_SITE_03247",
"onshore_SITE_00622")
tables <<- data.frame(cbind(numeric(0),t))
}
loaddata <- function(){
for(indx in seq(1,sites.count)){
tab <- tables[indx,1]
print(paste("Loading from table :: ", tab))
query <- paste(" select pow from ", tab, " WHERE (mesdt >= 20060101 && mesdt < 20070101) LIMIT ", data.len, ";")
data06 <- data.frame(dbGetQuery(con,statement=query), check.names=FALSE)
powdata[,indx] <<- as.double(data06[,1])
powdata.normalized[,indx] <<- normalizeData(as.vector(data06[,1]),type="0_1")
}
}
predict.pow <- function(siteno, indx) {
if(indx < 1 || indx >= data.len){
print("Enter indx Greater than 0 and less than the data size")
return
}
data.set <<- as.vector(powdata.normalized[,siteno])
indx.start <<- indx
indx.end <<- indx + (window.size * data.len.day) - 1
data.train <<- c()
data.test <<- c()
data.out <<- c()
count <- 0
while(indx.end <= data.len){
print(paste("Site no: ", siteno, "Slide Count: ", count+1))
data.mat <- matrix(data.set[indx.start:indx.end], nrow=data.len.day, ncol=window.size, byrow=FALSE)
colnames(data.mat) <- paste("d",c(1:window.size), sep="")
train.dataset.indx <- floor(data.len.day * train.data.percent)
test.dataset.indx <- train.dataset.indx + 1
window.slide <- data.len.day - train.dataset.indx
data.mat.train <- data.mat[1:train.dataset.indx,]
data.mat.test <- data.mat[test.dataset.indx:data.len.day,]
formula.set <- colnames(data.mat)
y = formula.set[window.size]
x = formula.set[1:window.size-1]
f = as.formula(paste(y, " ~ ", paste(x, collapse="+")))
out <<- neuralnet(f,
data.mat.train,
hidden=window.size,#c(round(window.size/2), window.size,1)
rep=2,
stepmax = 2000,
threshold=0.2,
learningrate=1,
algorithm="rprop+", #'rprop-', 'sag', 'slr', 'rprop+'
startweights=NULL,
lifesign="none",
err.fct="sse",
act.fct="logistic",
exclude = NULL,
constant.weights = NULL,
linear.output=TRUE #If true, act.fct is not applied to the o/p of neuron. So it will be only integartion function
)
data.train <<- c(data.train, data.mat.train[,window.size])
data.test <<- c(data.test, data.mat.test[,window.size])
pred <- compute(out, data.mat.test[,1:window.size-1])$net.result
data.out <<- c(data.out, pred)
indx.start <<- indx.start + window.slide
indx.end <<- indx.start + (window.size * data.len.day)
count <- count + 1
if(count == 10){
break
}
}
train.data <<- cbind(train.data, data.train)
test.data <<- cbind(test.data, data.test)
output <<- cbind(output, data.out)
}
predict.power <- function(){
slide.indx <- data.len - (history.length * data.len.day) + 1
loaddata()
for(site in seq(1:sites.count)){
predict.pow(site,slide.indx)
break
}
}
prediction.error <- function(){
parm.count <- 5
err.data <<- matrix(,nrow=sites.count, ncol=parm.count, byrow=TRUE)
colnames(err.data) <<- c("site.id", "rmse", "mape", "sse", "mse")
setwd(file.path)
for(site in seq(1:sites.count)){
site.name <- as.character(tables[site,1])
test <- test.data[,site]
pred <- output[,site]
err.rmse <- error(forecast=pred, true=test,method="rmse")
err.mape <- error(forecast=pred, true=test,method="mape")
err.sse <- error(forecast=pred, true=test,method="sse")
err.mse <- error(forecast=pred, true=test,method="mse")
print(site.name)
err.data[site,] <<- c(site.name, err.rmse, err.mape, err.sse, err.mse)
break
}
write.csv(err.data, file=file.name)
}
predict.power()
prediction.error()
err.data
#plotting
x1 = train.data[,1]
x2 = test.data[,1]
y = output[,1]
length(y)
plot(y, type='l')
plot(x1, type='l')
plot(x2, type='l')
dataToPlot = data.frame(seq(1,440),x2,y)
Line <- gvisLineChart(dataToPlot)
plot(Line)
| /simple/shortterm/neuralnet/neuralnet_days_slidingwindow_forecast.R | no_license | sachinlv/tsaggr | R | false | false | 5,524 | r | require(neuralnet)
require(RMySQL)
require(ff)
require(googleVis)
require(Metrics)
require(ppls)
require(RSNNS)
require(ftsa)
sites.count <- 10
history.length <- 50
data.len <- 52560
data.len.day <<- 144
mat.size <<- 365
window.size <- 10
train.data.percent <- 0.7
file.name <- "neuralnet_shortterm_simple.csv"
file.path <- "/home/freak/Programming/Thesis/results/results/neuralnet_shortterm_simple/"
table.ip.type <- "specific"#"random"
powdata <<- ff(NA, dim=c(data.len, sites.count), vmode="double")
powdata.normalized <<- ff(NA, dim=c(data.len, sites.count), vmode="double")
train.data <<- c()
test.data <<- c()
output <<- c()
drv = dbDriver("MySQL")
con = dbConnect(drv,host="localhost",dbname="eastwind",user="sachin",pass="password")
if(table.ip.type == "random"){
tablelist_statement = paste("SELECT TABLE_NAME FROM information_schema.TABLES ",
"WHERE TABLE_SCHEMA = 'eastwind' AND",
"TABLE_NAME LIKE 'onshore_SITE_%' "," LIMIT ",sites.count, ";")
tables <<- dbGetQuery(con, statement=tablelist_statement)
tables <<- data.frame(tables)
}else{
t <- c("onshore_SITE_00538",
"onshore_SITE_00366",
"onshore_SITE_00623",
"onshore_SITE_00418",
"onshore_SITE_00627",
"onshore_SITE_00532",
"onshore_SITE_00499",
"onshore_SITE_00571",
"onshore_SITE_03247",
"onshore_SITE_00622")
tables <<- data.frame(cbind(numeric(0),t))
}
loaddata <- function(){
for(indx in seq(1,sites.count)){
tab <- tables[indx,1]
print(paste("Loading from table :: ", tab))
query <- paste(" select pow from ", tab, " WHERE (mesdt >= 20060101 && mesdt < 20070101) LIMIT ", data.len, ";")
data06 <- data.frame(dbGetQuery(con,statement=query), check.names=FALSE)
powdata[,indx] <<- as.double(data06[,1])
powdata.normalized[,indx] <<- normalizeData(as.vector(data06[,1]),type="0_1")
}
}
predict.pow <- function(siteno, indx) {
if(indx < 1 || indx >= data.len){
print("Enter indx Greater than 0 and less than the data size")
return
}
data.set <<- as.vector(powdata.normalized[,siteno])
indx.start <<- indx
indx.end <<- indx + (window.size * data.len.day) - 1
data.train <<- c()
data.test <<- c()
data.out <<- c()
count <- 0
while(indx.end <= data.len){
print(paste("Site no: ", siteno, "Slide Count: ", count+1))
data.mat <- matrix(data.set[indx.start:indx.end], nrow=data.len.day, ncol=window.size, byrow=FALSE)
colnames(data.mat) <- paste("d",c(1:window.size), sep="")
train.dataset.indx <- floor(data.len.day * train.data.percent)
test.dataset.indx <- train.dataset.indx + 1
window.slide <- data.len.day - train.dataset.indx
data.mat.train <- data.mat[1:train.dataset.indx,]
data.mat.test <- data.mat[test.dataset.indx:data.len.day,]
formula.set <- colnames(data.mat)
y = formula.set[window.size]
x = formula.set[1:window.size-1]
f = as.formula(paste(y, " ~ ", paste(x, collapse="+")))
out <<- neuralnet(f,
data.mat.train,
hidden=window.size,#c(round(window.size/2), window.size,1)
rep=2,
stepmax = 2000,
threshold=0.2,
learningrate=1,
algorithm="rprop+", #'rprop-', 'sag', 'slr', 'rprop+'
startweights=NULL,
lifesign="none",
err.fct="sse",
act.fct="logistic",
exclude = NULL,
constant.weights = NULL,
linear.output=TRUE #If true, act.fct is not applied to the o/p of neuron. So it will be only integartion function
)
data.train <<- c(data.train, data.mat.train[,window.size])
data.test <<- c(data.test, data.mat.test[,window.size])
pred <- compute(out, data.mat.test[,1:window.size-1])$net.result
data.out <<- c(data.out, pred)
indx.start <<- indx.start + window.slide
indx.end <<- indx.start + (window.size * data.len.day)
count <- count + 1
if(count == 10){
break
}
}
train.data <<- cbind(train.data, data.train)
test.data <<- cbind(test.data, data.test)
output <<- cbind(output, data.out)
}
predict.power <- function(){
slide.indx <- data.len - (history.length * data.len.day) + 1
loaddata()
for(site in seq(1:sites.count)){
predict.pow(site,slide.indx)
break
}
}
prediction.error <- function(){
parm.count <- 5
err.data <<- matrix(,nrow=sites.count, ncol=parm.count, byrow=TRUE)
colnames(err.data) <<- c("site.id", "rmse", "mape", "sse", "mse")
setwd(file.path)
for(site in seq(1:sites.count)){
site.name <- as.character(tables[site,1])
test <- test.data[,site]
pred <- output[,site]
err.rmse <- error(forecast=pred, true=test,method="rmse")
err.mape <- error(forecast=pred, true=test,method="mape")
err.sse <- error(forecast=pred, true=test,method="sse")
err.mse <- error(forecast=pred, true=test,method="mse")
print(site.name)
err.data[site,] <<- c(site.name, err.rmse, err.mape, err.sse, err.mse)
break
}
write.csv(err.data, file=file.name)
}
predict.power()
prediction.error()
err.data
#plotting
x1 = train.data[,1]
x2 = test.data[,1]
y = output[,1]
length(y)
plot(y, type='l')
plot(x1, type='l')
plot(x2, type='l')
dataToPlot = data.frame(seq(1,440),x2,y)
Line <- gvisLineChart(dataToPlot)
plot(Line)
|
expect_error_free <- function(...) {
expect_error(..., regexp = NA)
}
## set wd to session temp dir, execute testing code, restore previous wd
temporarily <- function(env = parent.frame()) {
withr::local_dir(path_temp(), .local_envir = env)
}
## useful during interactive test development to toggle the
## rlang_interactive escape hatch in reprex:::interactive()
interactive_mode <- function() {
before <- getOption("rlang_interactive", default = TRUE)
after <- if (before) FALSE else TRUE
options(rlang_interactive = after)
cat("rlang_interactive:", before, "-->", after, "\n")
invisible()
}
| /tests/testthat/helper.R | permissive | romainfrancois/reprex | R | false | false | 609 | r | expect_error_free <- function(...) {
expect_error(..., regexp = NA)
}
## set wd to session temp dir, execute testing code, restore previous wd
temporarily <- function(env = parent.frame()) {
withr::local_dir(path_temp(), .local_envir = env)
}
## useful during interactive test development to toggle the
## rlang_interactive escape hatch in reprex:::interactive()
interactive_mode <- function() {
before <- getOption("rlang_interactive", default = TRUE)
after <- if (before) FALSE else TRUE
options(rlang_interactive = after)
cat("rlang_interactive:", before, "-->", after, "\n")
invisible()
}
|
# --------------------------------------------------------------------
# Gait data
# --------------------------------------------------------------------
# --------------------------------------------------------------------
#
# Overview of the analyses
#
# The gait data were chosen for these sample analyses because they are
# bivariate: consisting of both hip and knee angles observed over a
# gait cycle for 39 children. The bivariate nature of the data implies
# certain displays and analyses that are not usually considered, and
# especially the use of canonical correlation analysis.
#
# As with the daily weather data, the harmonic acceleration roughness
# penalty is used throughout since the data are periodic with a strong
# sinusoidal component of variation.
#
# After setting up the data, smoothing the data using GCV (generalized
# cross-validation) to select a smoothing parameter, and displaying
# various descriptive results, the data are subjected to a principal
# components analysis, followed by a canonical correlation analysis of
# thejoint variation of hip and knee angle, and finally a registration
# of the curves. The registration is included here especially because
# the registering of periodic data requires the estimation of a phase
# shift constant for each curve in addition to possible nonlinear
# transformations of time.
#
# --------------------------------------------------------------------
# Last modified 10 November 2010 by Jim Ramsay
# attach the FDA functions
library(fda)
# Set up the argument values: equally spaced over circle of
# circumference 20. Earlier analyses of the gait data used time
# values over [0,1], but led to singularity problems in the use of
# function fRegress. In general, it is better use a time interval
# that assigns roughly one time unit to each inter-knot interval.
gaittime <- as.matrix((0:19)+0.5)
gaitrange <- c(0,20)
# display ranges of gait for each variable
apply(gait, 3, range)
# ----------- set up the harmonic acceleration operator ----------
harmaccelLfd <- vec2Lfd(c(0, (2*pi/20)^2, 0), rangeval=gaitrange)
# Set up basis for representing gait data. The basis is saturated
# since there are 20 data points per curve, and this set up defines
# 21 basis functions. Recall that a fourier basis has an odd number
# of basis functions.
gaitbasis <- create.fourier.basis(gaitrange, nbasis=21)
# -------------------------------------------------------------------
# Choose level of smoothing using
# the generalized cross-validation criterion
# -------------------------------------------------------------------
# set up range of smoothing parameters in log_10 units
gaitLoglam <- seq(-4,0,0.25)
nglam <- length(gaitLoglam)
gaitSmoothStats <- array(NA, dim=c(nglam, 3),
dimnames=list(gaitLoglam, c("log10.lambda", "df", "gcv") ) )
gaitSmoothStats[, 1] <- gaitLoglam
# loop through smoothing parameters
for (ilam in 1:nglam) {
gaitSmooth <- smooth.basisPar(gaittime, gait, gaitbasis,
Lfdobj=harmaccelLfd, lambda=10^gaitLoglam[ilam])
gaitSmoothStats[ilam, "df"] <- gaitSmooth$df
gaitSmoothStats[ilam, "gcv"] <- sum(gaitSmooth$gcv)
# note: gcv is a matrix in this case
}
# display and plot GCV criterion and degrees of freedom
gaitSmoothStats
plot(gaitSmoothStats[, 1], gaitSmoothStats[, 3])
# set up plotting arrangements for one and two panel displays
# allowing for larger fonts
op <- par(mfrow=c(2,1))
plot(gaitLoglam, gaitSmoothStats[, "gcv"], type="b",
xlab="Log_10 lambda", ylab="GCV Criterion",
main="Gait Smoothing", log="y")
plot(gaitLoglam, gaitSmoothStats[, "df"], type="b",
xlab="Log_10 lambda", ylab="Degrees of freedom",
main="Gait Smoothing")
par(op)
# With gaittime <- (1:20)/21,
# GCV is minimized with lambda = 10^(-2).
gaitfd <- smooth.basisPar(gaittime, gait,
gaitbasis, Lfdobj=harmaccelLfd, lambda=1e-2)$fd
names(gaitfd$fdnames) <- c("Normalized time", "Child", "Angle")
gaitfd$fdnames[[3]] <- c("Hip", "Knee")
str(gaitfd)
# -------- plot curves and their first derivatives ----------------
#par(mfrow=c(1,2), mar=c(3,4,2,1), pty="s")
op <- par(mfrow=c(2,1))
plot(gaitfd, cex=1.2)
par(op)
# plot each pair of curves interactively
plotfit.fd(gait, gaittime, gaitfd, cex=1.2, ask=FALSE)
# plot the residuals, sorting cases by residual sum of squares
# this produces 39 plots for each of knee and hip angle
plotfit.fd(gait, gaittime, gaitfd, residual=TRUE, sort=TRUE, cex=1.2)
# plot first derivative of all curves
op <- par(mfrow=c(2,1))
plot(gaitfd, Lfdobj=1)
par(op)
# -----------------------------------------------------------------
# Display the mean, variance and covariance functions
# -----------------------------------------------------------------
# ------------ compute the mean functions --------------------
gaitmeanfd <- mean.fd(gaitfd)
# plot these functions and their first two derivatives
op <- par(mfcol=2:3)
plot(gaitmeanfd)
plot(gaitmeanfd, Lfdobj=1)
plot(gaitmeanfd, Lfdobj=2)
par(op)
# -------------- Compute the variance functions -------------
gaitvarbifd <- var.fd(gaitfd)
str(gaitvarbifd)
gaitvararray <- eval.bifd(gaittime, gaittime, gaitvarbifd)
# plot variance and covariance functions as contours
filled.contour(gaittime, gaittime, gaitvararray[,,1,1], cex=1.2)
title("Knee - Knee")
filled.contour(gaittime, gaittime, gaitvararray[,,1,2], cex=1.2)
title("Knee - Hip")
filled.contour(gaittime, gaittime, gaitvararray[,,1,3], cex=1.2)
title("Hip - Hip")
# plot variance and covariance functions as surfaces
persp(gaittime, gaittime, gaitvararray[,,1,1], cex=1.2)
title("Knee - Knee")
persp(gaittime, gaittime, gaitvararray[,,1,2], cex=1.2)
title("Knee - Hip")
persp(gaittime, gaittime, gaitvararray[,,1,3], cex=1.2)
title("Hip - Hip")
# plot correlation functions as contours
gaitCorArray <- cor.fd(gaittime, gaitfd)
quantile(gaitCorArray)
contour(gaittime, gaittime, gaitCorArray[,,1,1], cex=1.2)
title("Knee - Knee")
contour(gaittime, gaittime, gaitCorArray[,,1,2], cex=1.2)
title("Knee - Hip")
contour(gaittime, gaittime, gaitCorArray[,,1,3], cex=1.2)
title("Hip - Hip")
# --------------------------------------------------------------
# Principal components analysis
# --------------------------------------------------------------
# do the PCA with varimax rotation
# Smooth with lambda as determined above
gaitfdPar <- fdPar(gaitbasis, harmaccelLfd, lambda=1e-2)
gaitpca.fd <- pca.fd(gaitfd, nharm=4, gaitfdPar)
gaitpca.fd <- varmx.pca.fd(gaitpca.fd)
# plot harmonics using cycle plots
op <- par(mfrow=c(2,2))
plot.pca.fd(gaitpca.fd, cycle=TRUE)
par(op)
# compute proportions of variance associated with each angle
gaitharmmat = eval.fd(gaittime, gaitpca.fd$harmonics)
hipharmmat = gaitharmmat[,,1]
kneeharmmat = gaitharmmat[,,2]
# then we want to find the total size of each
hipharmL2 = apply(hipharmmat^2,2,mean)
kneeharmL2 = apply(kneeharmmat^2,2,mean)
hippropvar2 = hipharmL2/(hipharmL2+kneeharmL2)
kneepropvar2 = 1-hippropvar2
print("Percentages of fits for the PCA:")
print(round(100*cbind(hippropvar2, kneepropvar2),1))
# --------------------------------------------------------------
# Canonical correlation analysis
# --------------------------------------------------------------
hipfd <- gaitfd[,1]
kneefd <- gaitfd[,2]
hipfdPar <- fdPar(hipfd, harmaccelLfd, 1e2)
kneefdPar <- fdPar(kneefd, harmaccelLfd, 1e2)
ccafd <- cca.fd(hipfd, kneefd, ncan=3, hipfdPar, kneefdPar)
# plot the canonical weight functions
op <- par(mfrow=c(2,1), mar=c(3,4,2,1), pty="m")
plot.cca.fd(ccafd, cex=1.2)
par(op)
# display the canonical correlations
round(ccafd$ccacorr[1:6],3)
plot(1:6, ccafd$ccacorr[1:6], type="b")
# --------------------------------------------------------------
# Register the angular acceleration of the gait data
# --------------------------------------------------------------
# compute the acceleration and mean acceleration
D2gaitfd <- deriv.fd(gaitfd,2)
names(D2gaitfd$fdnames)[[3]] <- "Angular acceleration"
D2gaitfd$fdnames[[3]] <- c("Hip", "Knee")
D2gaitmeanfd <- mean.fd(D2gaitfd)
names(D2gaitmeanfd$fdnames)[[3]] <- "Mean angular acceleration"
D2gaitmeanfd$fdnames[[3]] <- c("Hip", "Knee")
# set up basis for warping function
nwbasis <- 7
wbasis <- create.bspline.basis(gaitrange,nwbasis,3)
Warpfd <- fd(matrix(0,nwbasis,5),wbasis)
WarpfdPar <- fdPar(Warpfd)
# register the functions
gaitreglist <- register.fd(D2gaitmeanfd, D2gaitfd[1:5,], WarpfdPar, periodic=TRUE)
plotreg.fd(gaitreglist)
# display horizonal shift values
print(round(gaitreglist$shift,1))
# histogram of horizontal shift values
par(mfrow=c(1,1))
hist(gaitreglist$shift,xlab="Normalized time")
# --------------------------------------------------------------
# Predict knee angle from hip angle
# for angle and angular acceleration
# --------------------------------------------------------------
# set up the data
hipfd <- gaitfd[,1]
kneefd <- gaitfd[,2]
ncurve <- dim(kneefd$coefs)[2]
kneemeanfd <- mean(kneefd)
# define the functional parameter object for regression functions
betafdPar <- fdPar(gaitbasis, harmaccelLfd)
betalist <- list(betafdPar,betafdPar)
# ---------- predict knee angle from hip angle --------
conbasis <- create.constant.basis(c(0,20))
constfd <- fd(matrix(1,1,ncurve), conbasis)
# set up the list of covariate objects
xfdlist <- list(constfd, hipfd)
# fit the current functional linear model
fRegressout <- fRegress(kneefd, xfdlist, betalist)
# set up and plot the fit functions and the regression functions
kneehatfd <- fRegressout$yhatfd
betaestlist <- fRegressout$betaestlist
alphafd <- betaestlist[[1]]$fd
hipbetafd <- betaestlist[[2]]$fd
op <- par(mfrow=c(2,1), ask=FALSE)
plot(alphafd, ylab="Intercept")
plot(hipbetafd, ylab="Hip coefficient")
par(op)
# compute and plot squared multiple correlation function
gaitfine <- seq(0,20,len=101)
kneemat <- eval.fd(gaitfine, kneefd)
kneehatmat <- predict(kneehatfd, gaitfine)
kneemeanvec <- as.vector(eval.fd(gaitfine, kneemeanfd))
SSE0 <- apply((kneemat - outer(kneemeanvec, rep(1,ncurve)))^2, 1, sum)
SSE1 <- apply((kneemat - kneehatmat)^2, 1, sum)
Rsqr <- (SSE0-SSE1)/SSE0
op <- par(mfrow=c(1,1),ask=FALSE)
plot(gaitfine, Rsqr, type="l", ylim=c(0,0.4))
# for each case plot the function being fit, the fit,
# and the mean function
op <- par(mfrow=c(1,1),ask=TRUE)
for (i in 1:ncurve) {
plot( gaitfine, kneemat[,i], type="l", lty=1, col=4, ylim=c(0,80))
lines(gaitfine, kneemeanvec, lty=2, col=2)
lines(gaitfine, kneehatmat[,i], lty=3, col=4)
title(paste("Case",i))
}
par(op)
# ---------- predict knee acceleration from hip acceleration --------
D2kneefd <- deriv(kneefd, 2)
D2hipfd <- deriv(hipfd, 2)
D2kneemeanfd <- mean(D2kneefd)
# set up the list of covariate objects
D2xfdlist <- list(constfd,D2hipfd)
# fit the current functional linear model
D2fRegressout <- fRegress(D2kneefd, D2xfdlist, betalist)
# set up and plot the fit functions and the regression functions
D2kneehatfd <- D2fRegressout$yhatfd
D2betaestlist <- D2fRegressout$betaestlist
D2alphafd <- D2betaestlist[[1]]$fd
D2hipbetafd <- D2betaestlist[[2]]$fd
op <- par(mfrow=c(2,1), ask=FALSE)
plot(D2alphafd, ylab="D2Intercept")
plot(D2hipbetafd, ylab="D2Hip coefficient")
par(op)
# compute and plot squared multiple correlation function
D2kneemat <- eval.fd(gaitfine, D2kneefd)
D2kneehatmat <- predict(D2kneehatfd, gaitfine)
D2kneemeanvec <- as.vector(eval.fd(gaitfine, D2kneemeanfd))
D2SSE0 <- apply((D2kneemat - outer(D2kneemeanvec, rep(1,ncurve)))^2, 1, sum)
D2SSE1 <- apply((D2kneemat - D2kneehatmat)^2, 1, sum)
D2Rsqr <- (D2SSE0-D2SSE1)/D2SSE0
par(mfrow=c(1,1),ask=FALSE)
plot(gaitfine, D2Rsqr, type="l", ylim=c(0,0.5))
# for each case plot the function being fit, the fit, and the mean function
op <- par(mfrow=c(1,1),ask=TRUE)
for (i in 1:ncurve) {
plot( gaitfine, D2kneemat[,i], type="l", lty=1, col=4, ylim=c(-20,20))
lines(gaitfine, D2kneemeanvec, lty=2, col=2)
lines(gaitfine, D2kneehatmat[,i], lty=3, col=4)
lines(c(0,20), c(0,0), lty=2, col=2)
title(paste("Case",i))
}
par(op)
| /demo/gait.R | no_license | cran/fda | R | false | false | 12,427 | r | # --------------------------------------------------------------------
# Gait data
# --------------------------------------------------------------------
# --------------------------------------------------------------------
#
# Overview of the analyses
#
# The gait data were chosen for these sample analyses because they are
# bivariate: consisting of both hip and knee angles observed over a
# gait cycle for 39 children. The bivariate nature of the data implies
# certain displays and analyses that are not usually considered, and
# especially the use of canonical correlation analysis.
#
# As with the daily weather data, the harmonic acceleration roughness
# penalty is used throughout since the data are periodic with a strong
# sinusoidal component of variation.
#
# After setting up the data, smoothing the data using GCV (generalized
# cross-validation) to select a smoothing parameter, and displaying
# various descriptive results, the data are subjected to a principal
# components analysis, followed by a canonical correlation analysis of
# thejoint variation of hip and knee angle, and finally a registration
# of the curves. The registration is included here especially because
# the registering of periodic data requires the estimation of a phase
# shift constant for each curve in addition to possible nonlinear
# transformations of time.
#
# --------------------------------------------------------------------
# Last modified 10 November 2010 by Jim Ramsay
# attach the FDA functions
library(fda)
# Set up the argument values: equally spaced over circle of
# circumference 20. Earlier analyses of the gait data used time
# values over [0,1], but led to singularity problems in the use of
# function fRegress. In general, it is better use a time interval
# that assigns roughly one time unit to each inter-knot interval.
gaittime <- as.matrix((0:19)+0.5)
gaitrange <- c(0,20)
# display ranges of gait for each variable
apply(gait, 3, range)
# ----------- set up the harmonic acceleration operator ----------
harmaccelLfd <- vec2Lfd(c(0, (2*pi/20)^2, 0), rangeval=gaitrange)
# Set up basis for representing gait data. The basis is saturated
# since there are 20 data points per curve, and this set up defines
# 21 basis functions. Recall that a fourier basis has an odd number
# of basis functions.
gaitbasis <- create.fourier.basis(gaitrange, nbasis=21)
# -------------------------------------------------------------------
# Choose level of smoothing using
# the generalized cross-validation criterion
# -------------------------------------------------------------------
# set up range of smoothing parameters in log_10 units
gaitLoglam <- seq(-4,0,0.25)
nglam <- length(gaitLoglam)
gaitSmoothStats <- array(NA, dim=c(nglam, 3),
dimnames=list(gaitLoglam, c("log10.lambda", "df", "gcv") ) )
gaitSmoothStats[, 1] <- gaitLoglam
# loop through smoothing parameters
for (ilam in 1:nglam) {
gaitSmooth <- smooth.basisPar(gaittime, gait, gaitbasis,
Lfdobj=harmaccelLfd, lambda=10^gaitLoglam[ilam])
gaitSmoothStats[ilam, "df"] <- gaitSmooth$df
gaitSmoothStats[ilam, "gcv"] <- sum(gaitSmooth$gcv)
# note: gcv is a matrix in this case
}
# display and plot GCV criterion and degrees of freedom
gaitSmoothStats
plot(gaitSmoothStats[, 1], gaitSmoothStats[, 3])
# set up plotting arrangements for one and two panel displays
# allowing for larger fonts
op <- par(mfrow=c(2,1))
plot(gaitLoglam, gaitSmoothStats[, "gcv"], type="b",
xlab="Log_10 lambda", ylab="GCV Criterion",
main="Gait Smoothing", log="y")
plot(gaitLoglam, gaitSmoothStats[, "df"], type="b",
xlab="Log_10 lambda", ylab="Degrees of freedom",
main="Gait Smoothing")
par(op)
# With gaittime <- (1:20)/21,
# GCV is minimized with lambda = 10^(-2).
gaitfd <- smooth.basisPar(gaittime, gait,
gaitbasis, Lfdobj=harmaccelLfd, lambda=1e-2)$fd
names(gaitfd$fdnames) <- c("Normalized time", "Child", "Angle")
gaitfd$fdnames[[3]] <- c("Hip", "Knee")
str(gaitfd)
# -------- plot curves and their first derivatives ----------------
#par(mfrow=c(1,2), mar=c(3,4,2,1), pty="s")
op <- par(mfrow=c(2,1))
plot(gaitfd, cex=1.2)
par(op)
# plot each pair of curves interactively
plotfit.fd(gait, gaittime, gaitfd, cex=1.2, ask=FALSE)
# plot the residuals, sorting cases by residual sum of squares
# this produces 39 plots for each of knee and hip angle
plotfit.fd(gait, gaittime, gaitfd, residual=TRUE, sort=TRUE, cex=1.2)
# plot first derivative of all curves
op <- par(mfrow=c(2,1))
plot(gaitfd, Lfdobj=1)
par(op)
# -----------------------------------------------------------------
# Display the mean, variance and covariance functions
# -----------------------------------------------------------------
# ------------ compute the mean functions --------------------
gaitmeanfd <- mean.fd(gaitfd)
# plot these functions and their first two derivatives
op <- par(mfcol=2:3)
plot(gaitmeanfd)
plot(gaitmeanfd, Lfdobj=1)
plot(gaitmeanfd, Lfdobj=2)
par(op)
# -------------- Compute the variance functions -------------
gaitvarbifd <- var.fd(gaitfd)
str(gaitvarbifd)
gaitvararray <- eval.bifd(gaittime, gaittime, gaitvarbifd)
# plot variance and covariance functions as contours
filled.contour(gaittime, gaittime, gaitvararray[,,1,1], cex=1.2)
title("Knee - Knee")
filled.contour(gaittime, gaittime, gaitvararray[,,1,2], cex=1.2)
title("Knee - Hip")
filled.contour(gaittime, gaittime, gaitvararray[,,1,3], cex=1.2)
title("Hip - Hip")
# plot variance and covariance functions as surfaces
persp(gaittime, gaittime, gaitvararray[,,1,1], cex=1.2)
title("Knee - Knee")
persp(gaittime, gaittime, gaitvararray[,,1,2], cex=1.2)
title("Knee - Hip")
persp(gaittime, gaittime, gaitvararray[,,1,3], cex=1.2)
title("Hip - Hip")
# plot correlation functions as contours
gaitCorArray <- cor.fd(gaittime, gaitfd)
quantile(gaitCorArray)
contour(gaittime, gaittime, gaitCorArray[,,1,1], cex=1.2)
title("Knee - Knee")
contour(gaittime, gaittime, gaitCorArray[,,1,2], cex=1.2)
title("Knee - Hip")
contour(gaittime, gaittime, gaitCorArray[,,1,3], cex=1.2)
title("Hip - Hip")
# --------------------------------------------------------------
# Principal components analysis
# --------------------------------------------------------------
# do the PCA with varimax rotation
# Smooth with lambda as determined above
gaitfdPar <- fdPar(gaitbasis, harmaccelLfd, lambda=1e-2)
gaitpca.fd <- pca.fd(gaitfd, nharm=4, gaitfdPar)
gaitpca.fd <- varmx.pca.fd(gaitpca.fd)
# plot harmonics using cycle plots
op <- par(mfrow=c(2,2))
plot.pca.fd(gaitpca.fd, cycle=TRUE)
par(op)
# compute proportions of variance associated with each angle
gaitharmmat = eval.fd(gaittime, gaitpca.fd$harmonics)
hipharmmat = gaitharmmat[,,1]
kneeharmmat = gaitharmmat[,,2]
# then we want to find the total size of each
hipharmL2 = apply(hipharmmat^2,2,mean)
kneeharmL2 = apply(kneeharmmat^2,2,mean)
hippropvar2 = hipharmL2/(hipharmL2+kneeharmL2)
kneepropvar2 = 1-hippropvar2
print("Percentages of fits for the PCA:")
print(round(100*cbind(hippropvar2, kneepropvar2),1))
# --------------------------------------------------------------
# Canonical correlation analysis
# --------------------------------------------------------------
hipfd <- gaitfd[,1]
kneefd <- gaitfd[,2]
hipfdPar <- fdPar(hipfd, harmaccelLfd, 1e2)
kneefdPar <- fdPar(kneefd, harmaccelLfd, 1e2)
ccafd <- cca.fd(hipfd, kneefd, ncan=3, hipfdPar, kneefdPar)
# plot the canonical weight functions
op <- par(mfrow=c(2,1), mar=c(3,4,2,1), pty="m")
plot.cca.fd(ccafd, cex=1.2)
par(op)
# display the canonical correlations
round(ccafd$ccacorr[1:6],3)
plot(1:6, ccafd$ccacorr[1:6], type="b")
# --------------------------------------------------------------
# Register the angular acceleration of the gait data
# --------------------------------------------------------------
# compute the acceleration and mean acceleration
D2gaitfd <- deriv.fd(gaitfd,2)
names(D2gaitfd$fdnames)[[3]] <- "Angular acceleration"
D2gaitfd$fdnames[[3]] <- c("Hip", "Knee")
D2gaitmeanfd <- mean.fd(D2gaitfd)
names(D2gaitmeanfd$fdnames)[[3]] <- "Mean angular acceleration"
D2gaitmeanfd$fdnames[[3]] <- c("Hip", "Knee")
# set up basis for warping function
nwbasis <- 7
wbasis <- create.bspline.basis(gaitrange,nwbasis,3)
Warpfd <- fd(matrix(0,nwbasis,5),wbasis)
WarpfdPar <- fdPar(Warpfd)
# register the functions
gaitreglist <- register.fd(D2gaitmeanfd, D2gaitfd[1:5,], WarpfdPar, periodic=TRUE)
plotreg.fd(gaitreglist)
# display horizonal shift values
print(round(gaitreglist$shift,1))
# histogram of horizontal shift values
par(mfrow=c(1,1))
hist(gaitreglist$shift,xlab="Normalized time")
# --------------------------------------------------------------
# Predict knee angle from hip angle
# for angle and angular acceleration
# --------------------------------------------------------------
# set up the data
hipfd <- gaitfd[,1]
kneefd <- gaitfd[,2]
ncurve <- dim(kneefd$coefs)[2]
kneemeanfd <- mean(kneefd)
# define the functional parameter object for regression functions
betafdPar <- fdPar(gaitbasis, harmaccelLfd)
betalist <- list(betafdPar,betafdPar)
# ---------- predict knee angle from hip angle --------
conbasis <- create.constant.basis(c(0,20))
constfd <- fd(matrix(1,1,ncurve), conbasis)
# set up the list of covariate objects
xfdlist <- list(constfd, hipfd)
# fit the current functional linear model
fRegressout <- fRegress(kneefd, xfdlist, betalist)
# set up and plot the fit functions and the regression functions
kneehatfd <- fRegressout$yhatfd
betaestlist <- fRegressout$betaestlist
alphafd <- betaestlist[[1]]$fd
hipbetafd <- betaestlist[[2]]$fd
op <- par(mfrow=c(2,1), ask=FALSE)
plot(alphafd, ylab="Intercept")
plot(hipbetafd, ylab="Hip coefficient")
par(op)
# compute and plot squared multiple correlation function
gaitfine <- seq(0,20,len=101)
kneemat <- eval.fd(gaitfine, kneefd)
kneehatmat <- predict(kneehatfd, gaitfine)
kneemeanvec <- as.vector(eval.fd(gaitfine, kneemeanfd))
SSE0 <- apply((kneemat - outer(kneemeanvec, rep(1,ncurve)))^2, 1, sum)
SSE1 <- apply((kneemat - kneehatmat)^2, 1, sum)
Rsqr <- (SSE0-SSE1)/SSE0
op <- par(mfrow=c(1,1),ask=FALSE)
plot(gaitfine, Rsqr, type="l", ylim=c(0,0.4))
# for each case plot the function being fit, the fit,
# and the mean function
op <- par(mfrow=c(1,1),ask=TRUE)
for (i in 1:ncurve) {
plot( gaitfine, kneemat[,i], type="l", lty=1, col=4, ylim=c(0,80))
lines(gaitfine, kneemeanvec, lty=2, col=2)
lines(gaitfine, kneehatmat[,i], lty=3, col=4)
title(paste("Case",i))
}
par(op)
# ---------- predict knee acceleration from hip acceleration --------
D2kneefd <- deriv(kneefd, 2)
D2hipfd <- deriv(hipfd, 2)
D2kneemeanfd <- mean(D2kneefd)
# set up the list of covariate objects
D2xfdlist <- list(constfd,D2hipfd)
# fit the current functional linear model
D2fRegressout <- fRegress(D2kneefd, D2xfdlist, betalist)
# set up and plot the fit functions and the regression functions
D2kneehatfd <- D2fRegressout$yhatfd
D2betaestlist <- D2fRegressout$betaestlist
D2alphafd <- D2betaestlist[[1]]$fd
D2hipbetafd <- D2betaestlist[[2]]$fd
op <- par(mfrow=c(2,1), ask=FALSE)
plot(D2alphafd, ylab="D2Intercept")
plot(D2hipbetafd, ylab="D2Hip coefficient")
par(op)
# compute and plot squared multiple correlation function
D2kneemat <- eval.fd(gaitfine, D2kneefd)
D2kneehatmat <- predict(D2kneehatfd, gaitfine)
D2kneemeanvec <- as.vector(eval.fd(gaitfine, D2kneemeanfd))
D2SSE0 <- apply((D2kneemat - outer(D2kneemeanvec, rep(1,ncurve)))^2, 1, sum)
D2SSE1 <- apply((D2kneemat - D2kneehatmat)^2, 1, sum)
D2Rsqr <- (D2SSE0-D2SSE1)/D2SSE0
par(mfrow=c(1,1),ask=FALSE)
plot(gaitfine, D2Rsqr, type="l", ylim=c(0,0.5))
# for each case plot the function being fit, the fit, and the mean function
op <- par(mfrow=c(1,1),ask=TRUE)
for (i in 1:ncurve) {
plot( gaitfine, D2kneemat[,i], type="l", lty=1, col=4, ylim=c(-20,20))
lines(gaitfine, D2kneemeanvec, lty=2, col=2)
lines(gaitfine, D2kneehatmat[,i], lty=3, col=4)
lines(c(0,20), c(0,0), lty=2, col=2)
title(paste("Case",i))
}
par(op)
|
#' Returns the base url of the nbp api.
nbp_api_base_url <- function() {
"https://api.nbp.pl/api/"
}
| /R/endpoint_common.R | no_license | cran/rnbp | R | false | false | 105 | r | #' Returns the base url of the nbp api.
nbp_api_base_url <- function() {
"https://api.nbp.pl/api/"
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_nrst_haversine.R
\name{match_nrst_haversine}
\alias{match_nrst_haversine}
\title{Match coordinates to nearest coordinates}
\usage{
match_nrst_haversine(
lat,
lon,
addresses_lat,
addresses_lon,
Index = seq_along(addresses_lat),
cartesian_R = NULL,
close_enough = 10,
excl_self = FALSE,
as.data.table = TRUE,
.verify_box = TRUE
)
}
\arguments{
\item{lat, lon}{Coordinates to be geocoded. Numeric vectors of equal length.}
\item{addresses_lat, addresses_lon}{Coordinates of known locations. Numeric vectors of equal length
(likely to be a different length than the length of \code{lat}, except when \code{excl_self = TRUE}).}
\item{Index}{A vector the same length as \code{lat} to encode the match between \code{lat,lon}
and \code{addresses_lat,addresses_lon}. The default is to use the integer position
of the nearest match to
\code{addresses_lat,addresses_lon}.}
\item{cartesian_R}{The maximum radius of any address from the points to be geocoded. Used
to accelerate the detection of minimum distances. Note, as the argument name suggests,
the distance is in cartesian coordinates, so a small number is likely.}
\item{close_enough}{The distance, in metres, below which a match will be considered to have occurred.
(The distance that is considered "close enough" to be a match.)
For example, \code{close_enough = 10} means the first location within ten metres will be matched,
even if a closer match occurs later.
May be provided as a string to emphasize the units, e.g. \code{close_enough = "0.25km"}.
Only \code{km} and \code{m} are permitted.}
\item{excl_self}{(bool, default: \code{FALSE}) For each \eqn{x_i} of the first coordinates,
exclude the \eqn{y_i}-th point when determining closest match. Useful to determine the
nearest neighbour within a set of coordinates, \emph{viz.}
\code{match_nrst_haversine(x, y, x, y, excl_self = TRUE)}.}
\item{as.data.table}{Return result as a \code{data.table}?
If \code{FALSE}, a list is returned. \code{TRUE} by default to
avoid dumping a huge list to the console.}
\item{.verify_box}{Check the initial guess against other points within the
box of radius \eqn{\ell^\infty}.}
}
\value{
A list (or \code{data.table} if \code{as.data.table = TRUE}) with two elements,
both the same length as \code{lat}, giving for point \code{lat,lon}:
\describe{
\item{\code{pos}}{the position (or corresponding value in \code{Table})
in \code{addresses_lat,addresses_lon} nearest to \code{lat, lon}.}
\item{\code{dist}}{the distance, in kilometres, between the two points.}
}
}
\description{
When geocoding coordinates to known addresses, an efficient way to
match the given coordinates with the known is necessary. This function provides this
efficiency by using \code{C++} and allowing approximate matching.
}
\examples{
lat2 <- runif(5, -38, -37.8)
lon2 <- rep(145, 5)
lat1 <- c(-37.875, -37.91)
lon1 <- c(144.96, 144.978)
match_nrst_haversine(lat1, lon1, lat2, lon2, 0L)
match_nrst_haversine(lat1, lon1, lat1, lon1, 11:12, excl_self = TRUE)
}
| /hutilscpp/man/match_nrst_haversine.Rd | no_license | akhikolla/InformationHouse | R | false | true | 3,168 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_nrst_haversine.R
\name{match_nrst_haversine}
\alias{match_nrst_haversine}
\title{Match coordinates to nearest coordinates}
\usage{
match_nrst_haversine(
lat,
lon,
addresses_lat,
addresses_lon,
Index = seq_along(addresses_lat),
cartesian_R = NULL,
close_enough = 10,
excl_self = FALSE,
as.data.table = TRUE,
.verify_box = TRUE
)
}
\arguments{
\item{lat, lon}{Coordinates to be geocoded. Numeric vectors of equal length.}
\item{addresses_lat, addresses_lon}{Coordinates of known locations. Numeric vectors of equal length
(likely to be a different length than the length of \code{lat}, except when \code{excl_self = TRUE}).}
\item{Index}{A vector the same length as \code{lat} to encode the match between \code{lat,lon}
and \code{addresses_lat,addresses_lon}. The default is to use the integer position
of the nearest match to
\code{addresses_lat,addresses_lon}.}
\item{cartesian_R}{The maximum radius of any address from the points to be geocoded. Used
to accelerate the detection of minimum distances. Note, as the argument name suggests,
the distance is in cartesian coordinates, so a small number is likely.}
\item{close_enough}{The distance, in metres, below which a match will be considered to have occurred.
(The distance that is considered "close enough" to be a match.)
For example, \code{close_enough = 10} means the first location within ten metres will be matched,
even if a closer match occurs later.
May be provided as a string to emphasize the units, e.g. \code{close_enough = "0.25km"}.
Only \code{km} and \code{m} are permitted.}
\item{excl_self}{(bool, default: \code{FALSE}) For each \eqn{x_i} of the first coordinates,
exclude the \eqn{y_i}-th point when determining closest match. Useful to determine the
nearest neighbour within a set of coordinates, \emph{viz.}
\code{match_nrst_haversine(x, y, x, y, excl_self = TRUE)}.}
\item{as.data.table}{Return result as a \code{data.table}?
If \code{FALSE}, a list is returned. \code{TRUE} by default to
avoid dumping a huge list to the console.}
\item{.verify_box}{Check the initial guess against other points within the
box of radius \eqn{\ell^\infty}.}
}
\value{
A list (or \code{data.table} if \code{as.data.table = TRUE}) with two elements,
both the same length as \code{lat}, giving for point \code{lat,lon}:
\describe{
\item{\code{pos}}{the position (or corresponding value in \code{Table})
in \code{addresses_lat,addresses_lon} nearest to \code{lat, lon}.}
\item{\code{dist}}{the distance, in kilometres, between the two points.}
}
}
\description{
When geocoding coordinates to known addresses, an efficient way to
match the given coordinates with the known is necessary. This function provides this
efficiency by using \code{C++} and allowing approximate matching.
}
\examples{
lat2 <- runif(5, -38, -37.8)
lon2 <- rep(145, 5)
lat1 <- c(-37.875, -37.91)
lon1 <- c(144.96, 144.978)
match_nrst_haversine(lat1, lon1, lat2, lon2, 0L)
match_nrst_haversine(lat1, lon1, lat1, lon1, 11:12, excl_self = TRUE)
}
|
# stationary() finds the stationary distribution for the given markov
# chain (defined through a probability transition matrix or function)
# Input variables:
# pijdef: The transition probabilities, either in matrix form or a function
# type: Type of markov chain, either 'discrete' or 'continuous'
# tol: A positive scalar for error tolerance for infinite markov chain approximation
# qidef: Holding rates at each state for continuous markov chain
# transrate: Instead of inputting the individual holding rate and transition probabilities,
# user can input the transition rate instead for continuous time markov chain,
# as a function or a matrix
stationary = function(pijdef=NULL, type, tol = 1e-6, ...){
args = list(...)
if(type == 'discrete'){ #Discrete case
states.type = class(pijdef)
if(states.type == 'matrix'){ #Finite number of states
mkc = new('markovchain', transitionMatrix = pijdef)
absorb = absorbingStates(mkc)
if(length(absorb) > 0) stop('At least 1 absorbing state(s). Use absorb.mc() instead.')
pis = findpil.fin(pijdef)
mc = list(pijdef = pijdef, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}else{ #Infinte number of states
pis = findpil.inf(pijdef, tol)
mc = list(pijdef = pijdef, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}
}else if(type == 'continuous'){ #Continuous case.
if(!hasArg('qidef') && !hasArg(transrate)) stop('Missing holding/transition rates')
if(hasArg(transrate)){ #User input transition rate
state.type = class(args$transrate)
if(state.type == 'matrix'){ #Finite states
pis = findpicont.fin(transrate = args$transrate)
mc = list(transrate = args$transrate, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}else{ #Infinite states
pis = findpicont.inf(transrate = args$transrate)
mc = list(transrate = args$transrate, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}
}else{ #User input probability matrix/function and holding rates
if(is.null(pijdef)) stop('Missing probability matrix')
state.type = class(pijdef)
if(state.type == 'matrix'){ #Finite number of states
if(length(args$qidef) != nrow(pijdef)) stop('Dimension of probability matrix and holding rates mismatch')
pis = findpicont.fin(pijdef = pijdef, qidef = args$qidef)
mc = list(pijdef = pijdef, stationary.distribution = pis, holding.rates = args$qidef)
class(mc) = 'mc'
return(mc)
}else{
pis = findpicont.inf(pijdef = pijdef, qidef = args$qidef)
mc = list(pijdef = pijdef, stationary.distribution = pis, holding.rates = args$qidef)
class(mc) = 'mc'
return(mc)
}
}
}
}
#Find the stationary vector given a finite transition probability matrix
findpil.fin = function(pijdef){
n <- nrow(pijdef)
imp <- diag(n) - t(pijdef)
imp[n, ] <- rep(1, n)
rhs <- c(rep(0, n-1), 1)
solve(imp, rhs)
}
#Find the stationary probability given a function for infinite state transition probability
findpil.inf = function(pijdef, tol = 1e-06){
k = 10
pij = sapply(1:k, function(i){
sapply(1:k, function(j){
pijdef(i,j)
})
})
pij = t(pij)
pij[k,k] = 1-sum(pij[k,1:(k-1)])
stationary.pi = findpil.fin(pij)
if(stationary.pi[length(stationary.pi)] > stationary.pi[length(stationary.pi)-1]) stop("stationary distribution doesn't converge")
if(all(abs(diff(stationary.pi)) < 1e-10)) stop('no stationary distribution')
k = 20
pij = sapply(1:k, function(i){
sapply(1:k, function(j){
pijdef(i,j)
})
})
pij = t(pij)
pij[k,k] = 1-sum(pij[k,1:(k-1)])
stationary.pi.new = findpil.fin(pij)
error = sum((c(stationary.pi, rep(0, 10)) - stationary.pi.new)^2)
while(error > tol){
stationary.pi = stationary.pi.new
k = k + 10
pij = sapply(1:k, function(i){
sapply(1:k, function(j){
pijdef(i,j)
})
})
pij = t(pij)
pij[k,k] = 1-sum(pij[k,1:(k-1)])
stationary.pi.new = findpil.fin(pij)
error = sum((c(stationary.pi, rep(0, 10)) - stationary.pi.new)^2)
}
return(stationary.pi.new)
}
#Find the stationary distribution for continuous markov chain, given the transition probability and holding rates
findpicont.fin = function(...){
args = list(...)
if(hasArg(pijdef)){ #User input probability matrix and holding rate
#First create the Q matrix
n = length(args$qidef)
Q = diag(-args$qidef)
for(i in 1:n){
Q[-i, i] = args$qidef[i]*args$pijdef[i,-i]
}
Q[n, ] = rep(1,n)
rhs = c(rep(0, n-1), 1)
pivec = solve(Q, rhs)
return(pivec)
}else{
Q = args$transrate
n = nrow(Q)
Q[n,] = rep(1,n)
rhs = as.matrix((c(rep(0,n-1),1)))
pivec = solve(Q,rhs)
return(pivec)
}
}
#Find the stationary distribution for infinite state continuous markov chain, given the transition probability and holding rates
findpicont.inf = function(tol = 1e-6,...){
args = list(...)
truncate = function(k, transrate){
Q = sapply(1:k, function(i){
sapply(1:k, function(j){
transrate(i,j)
})
})
Q = t(Q)
return(Q)
}
if(hasArg(transrate)){ #User input transition rates
Q = truncate(10, args$transrate)
stationary.old = findpicont.fin(transrate = Q)
Q = truncate(20, args$transrate)
stationary.new = findpicont.fin(transrate = Q)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
k = 20
if(stationary.new[length(stationary.new)] > stationary.new[length(stationary.new)-1]) stop("stationary distribution doesn't converge")
if(all(abs(diff(stationary.pi)) < 1e-10)) stop('no stationary distribution')
while(error > tol){
stationary.old = stationary.new
k = k + 10
Q = truncate(k, args$transrate)
stationary.new = findpicont.fin(transrate = Q)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
}
return(stationary.new)
}else{ #User input functions for probability matrix and holding rates
rate.truncate = function(k, rates){
qi = sapply(1:k, function(i){
rates(i)
})
return(qi)
}
pij = truncate(10, args$pijdef)
qi = rate.truncate(10, args$qidef)
stationary.old = findpicont.fin(pijdef = pij, qidef = qi)
pij = truncate(20, args$pijdef)
qi = rate.truncate(20, args$qidef)
stationary.new = findpicont.fin(pijdef = pij, qidef = qi)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
k = 20
while(error > tol){
stationary.old = stationary.new
k = k + 10
pij = truncate(k, args$pijdef)
qi = rate.truncate(k, args$qidef)
stationary.new = findpicont.fin(pijdef = pij, qidef = qi)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
}
return(stationary.new)
}
} | /R/stationary.R | no_license | SonmezOzan/mc_1.0.3 | R | false | false | 6,994 | r | # stationary() finds the stationary distribution for the given markov
# chain (defined through a probability transition matrix or function)
# Input variables:
# pijdef: The transition probabilities, either in matrix form or a function
# type: Type of markov chain, either 'discrete' or 'continuous'
# tol: A positive scalar for error tolerance for infinite markov chain approximation
# qidef: Holding rates at each state for continuous markov chain
# transrate: Instead of inputting the individual holding rate and transition probabilities,
# user can input the transition rate instead for continuous time markov chain,
# as a function or a matrix
stationary = function(pijdef=NULL, type, tol = 1e-6, ...){
args = list(...)
if(type == 'discrete'){ #Discrete case
states.type = class(pijdef)
if(states.type == 'matrix'){ #Finite number of states
mkc = new('markovchain', transitionMatrix = pijdef)
absorb = absorbingStates(mkc)
if(length(absorb) > 0) stop('At least 1 absorbing state(s). Use absorb.mc() instead.')
pis = findpil.fin(pijdef)
mc = list(pijdef = pijdef, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}else{ #Infinte number of states
pis = findpil.inf(pijdef, tol)
mc = list(pijdef = pijdef, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}
}else if(type == 'continuous'){ #Continuous case.
if(!hasArg('qidef') && !hasArg(transrate)) stop('Missing holding/transition rates')
if(hasArg(transrate)){ #User input transition rate
state.type = class(args$transrate)
if(state.type == 'matrix'){ #Finite states
pis = findpicont.fin(transrate = args$transrate)
mc = list(transrate = args$transrate, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}else{ #Infinite states
pis = findpicont.inf(transrate = args$transrate)
mc = list(transrate = args$transrate, stationary.distribution = pis)
class(mc) = 'mc'
return(mc)
}
}else{ #User input probability matrix/function and holding rates
if(is.null(pijdef)) stop('Missing probability matrix')
state.type = class(pijdef)
if(state.type == 'matrix'){ #Finite number of states
if(length(args$qidef) != nrow(pijdef)) stop('Dimension of probability matrix and holding rates mismatch')
pis = findpicont.fin(pijdef = pijdef, qidef = args$qidef)
mc = list(pijdef = pijdef, stationary.distribution = pis, holding.rates = args$qidef)
class(mc) = 'mc'
return(mc)
}else{
pis = findpicont.inf(pijdef = pijdef, qidef = args$qidef)
mc = list(pijdef = pijdef, stationary.distribution = pis, holding.rates = args$qidef)
class(mc) = 'mc'
return(mc)
}
}
}
}
#Find the stationary vector given a finite transition probability matrix
findpil.fin = function(pijdef){
n <- nrow(pijdef)
imp <- diag(n) - t(pijdef)
imp[n, ] <- rep(1, n)
rhs <- c(rep(0, n-1), 1)
solve(imp, rhs)
}
#Find the stationary probability given a function for infinite state transition probability
findpil.inf = function(pijdef, tol = 1e-06){
k = 10
pij = sapply(1:k, function(i){
sapply(1:k, function(j){
pijdef(i,j)
})
})
pij = t(pij)
pij[k,k] = 1-sum(pij[k,1:(k-1)])
stationary.pi = findpil.fin(pij)
if(stationary.pi[length(stationary.pi)] > stationary.pi[length(stationary.pi)-1]) stop("stationary distribution doesn't converge")
if(all(abs(diff(stationary.pi)) < 1e-10)) stop('no stationary distribution')
k = 20
pij = sapply(1:k, function(i){
sapply(1:k, function(j){
pijdef(i,j)
})
})
pij = t(pij)
pij[k,k] = 1-sum(pij[k,1:(k-1)])
stationary.pi.new = findpil.fin(pij)
error = sum((c(stationary.pi, rep(0, 10)) - stationary.pi.new)^2)
while(error > tol){
stationary.pi = stationary.pi.new
k = k + 10
pij = sapply(1:k, function(i){
sapply(1:k, function(j){
pijdef(i,j)
})
})
pij = t(pij)
pij[k,k] = 1-sum(pij[k,1:(k-1)])
stationary.pi.new = findpil.fin(pij)
error = sum((c(stationary.pi, rep(0, 10)) - stationary.pi.new)^2)
}
return(stationary.pi.new)
}
#Find the stationary distribution for continuous markov chain, given the transition probability and holding rates
findpicont.fin = function(...){
args = list(...)
if(hasArg(pijdef)){ #User input probability matrix and holding rate
#First create the Q matrix
n = length(args$qidef)
Q = diag(-args$qidef)
for(i in 1:n){
Q[-i, i] = args$qidef[i]*args$pijdef[i,-i]
}
Q[n, ] = rep(1,n)
rhs = c(rep(0, n-1), 1)
pivec = solve(Q, rhs)
return(pivec)
}else{
Q = args$transrate
n = nrow(Q)
Q[n,] = rep(1,n)
rhs = as.matrix((c(rep(0,n-1),1)))
pivec = solve(Q,rhs)
return(pivec)
}
}
#Find the stationary distribution for infinite state continuous markov chain, given the transition probability and holding rates
findpicont.inf = function(tol = 1e-6,...){
args = list(...)
truncate = function(k, transrate){
Q = sapply(1:k, function(i){
sapply(1:k, function(j){
transrate(i,j)
})
})
Q = t(Q)
return(Q)
}
if(hasArg(transrate)){ #User input transition rates
Q = truncate(10, args$transrate)
stationary.old = findpicont.fin(transrate = Q)
Q = truncate(20, args$transrate)
stationary.new = findpicont.fin(transrate = Q)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
k = 20
if(stationary.new[length(stationary.new)] > stationary.new[length(stationary.new)-1]) stop("stationary distribution doesn't converge")
if(all(abs(diff(stationary.pi)) < 1e-10)) stop('no stationary distribution')
while(error > tol){
stationary.old = stationary.new
k = k + 10
Q = truncate(k, args$transrate)
stationary.new = findpicont.fin(transrate = Q)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
}
return(stationary.new)
}else{ #User input functions for probability matrix and holding rates
rate.truncate = function(k, rates){
qi = sapply(1:k, function(i){
rates(i)
})
return(qi)
}
pij = truncate(10, args$pijdef)
qi = rate.truncate(10, args$qidef)
stationary.old = findpicont.fin(pijdef = pij, qidef = qi)
pij = truncate(20, args$pijdef)
qi = rate.truncate(20, args$qidef)
stationary.new = findpicont.fin(pijdef = pij, qidef = qi)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
k = 20
while(error > tol){
stationary.old = stationary.new
k = k + 10
pij = truncate(k, args$pijdef)
qi = rate.truncate(k, args$qidef)
stationary.new = findpicont.fin(pijdef = pij, qidef = qi)
error = sum((c(stationary.old, rep(0, 10)) - stationary.new)^2)
}
return(stationary.new)
}
} |
#' @title \code{qkay} The K distribution quantile function
#'
#' @description Quantile function for the K distribution on \code{df} degrees of freedom having non-centrality parameter \code{ncp}.
#'
#' A K distribution is the square root of a chi-square divided by its degrees of freedom. That is, if x is chi-squared on m degrees of freedom, then y = sqrt(x/m) is K on m degrees of freedom.
#' Under standard normal theory, K is the distribution of the pivotal quantity s/sigma where s is the sample standard deviation and sigma is the standard deviation parameter of the normal density. K is the natural distribution for tests and confidence intervals about sigma.
#' K densities are more nearly symmetric than are chi-squared and concentrate near 1. As the degrees of freedom increase, they become more symmetric, more concentrated, and more nearly normally distributed.
#'
#'
#' @export qkay
#'
#' @param p A vector of probabilities at which to calculate the quantiles.
#' @param df Degrees of freedom (non-negative, but can be non-integer).
#' @param ncp Non-centrality parameter (non-negative).
#' @param upper.tail logical; if \code{TRUE}, instead of returning F(x) (the default), the upper tail probabilities 1-F(x) = Pr(X>x) are returned.
#' @param log.p logical; if \code{TRUE}, probabilities are given as log(p).
#'
#' @return \code{qkay} returns the quantiles at probabilities \code{p} for a K on \code{df} degrees of freedom and non-centrality parameter \code{ncp}.
#'
#' Invalid arguments will result in return value NaN, with a warning.
#'
#' The length of the result is the maximum of the lengths of the numerical arguments.
#'
#' The numerical arguments are recycled to the length of the result. Only the first elements of the logical arguments are used.
#'
#'
#' @note All calls depend on analogous calls to chi-squared functions. See \code{qchisq} for details on non-centrality parameter calculations.
#'
#'
#' @examples
#'
#' p <- ppoints(30)
#' # Get the quantiles for these points
#' q5 <- qkay(p, 5)
#' plot(p, q5, main="Quantile plot of K(20)", ylim=c(0,max(q5)))
#' # Add quantiles from another K
#' points(p, qkay(p, 20), pch=19)
#'
#' #
#' # Do these EXACT quantiles from a K(5) look like they might
#' # have been generated from K(20)?
#' qqtest(q5, dist="kay",df=20)
#'
#' # How about compared to normal?
#' qqnorm(q5)
#' qqtest(q5)
#' # for this many degrees of freedom it looks a lot like
#' # a gaussian (normal) distribution
#'
#' # And should look really good compared to the true distribution
#' qqtest(q5, dist="kay", df=5)
#' #
#' #
#' # But not so much like it came from a K on 1 degree of freedom
#' qqtest(q5, dist="kay",df=1)
#'
qkay <- function(p, df, ncp=0, upper.tail = FALSE, log.p = FALSE) {
chincp <- df * ncp^2
sqrt(qchisq(p, df, chincp, !upper.tail, log.p) /df)
} | /qqtest/R/qkay.R | no_license | ingted/R-Examples | R | false | false | 2,865 | r | #' @title \code{qkay} The K distribution quantile function
#'
#' @description Quantile function for the K distribution on \code{df} degrees of freedom having non-centrality parameter \code{ncp}.
#'
#' A K distribution is the square root of a chi-square divided by its degrees of freedom. That is, if x is chi-squared on m degrees of freedom, then y = sqrt(x/m) is K on m degrees of freedom.
#' Under standard normal theory, K is the distribution of the pivotal quantity s/sigma where s is the sample standard deviation and sigma is the standard deviation parameter of the normal density. K is the natural distribution for tests and confidence intervals about sigma.
#' K densities are more nearly symmetric than are chi-squared and concentrate near 1. As the degrees of freedom increase, they become more symmetric, more concentrated, and more nearly normally distributed.
#'
#'
#' @export qkay
#'
#' @param p A vector of probabilities at which to calculate the quantiles.
#' @param df Degrees of freedom (non-negative, but can be non-integer).
#' @param ncp Non-centrality parameter (non-negative).
#' @param upper.tail logical; if \code{TRUE}, instead of returning F(x) (the default), the upper tail probabilities 1-F(x) = Pr(X>x) are returned.
#' @param log.p logical; if \code{TRUE}, probabilities are given as log(p).
#'
#' @return \code{qkay} returns the quantiles at probabilities \code{p} for a K on \code{df} degrees of freedom and non-centrality parameter \code{ncp}.
#'
#' Invalid arguments will result in return value NaN, with a warning.
#'
#' The length of the result is the maximum of the lengths of the numerical arguments.
#'
#' The numerical arguments are recycled to the length of the result. Only the first elements of the logical arguments are used.
#'
#'
#' @note All calls depend on analogous calls to chi-squared functions. See \code{qchisq} for details on non-centrality parameter calculations.
#'
#'
#' @examples
#'
#' p <- ppoints(30)
#' # Get the quantiles for these points
#' q5 <- qkay(p, 5)
#' plot(p, q5, main="Quantile plot of K(20)", ylim=c(0,max(q5)))
#' # Add quantiles from another K
#' points(p, qkay(p, 20), pch=19)
#'
#' #
#' # Do these EXACT quantiles from a K(5) look like they might
#' # have been generated from K(20)?
#' qqtest(q5, dist="kay",df=20)
#'
#' # How about compared to normal?
#' qqnorm(q5)
#' qqtest(q5)
#' # for this many degrees of freedom it looks a lot like
#' # a gaussian (normal) distribution
#'
#' # And should look really good compared to the true distribution
#' qqtest(q5, dist="kay", df=5)
#' #
#' #
#' # But not so much like it came from a K on 1 degree of freedom
#' qqtest(q5, dist="kay",df=1)
#'
qkay <- function(p, df, ncp=0, upper.tail = FALSE, log.p = FALSE) {
chincp <- df * ncp^2
sqrt(qchisq(p, df, chincp, !upper.tail, log.p) /df)
} |
\name{WeatherMap.set.option}
\alias{WeatherMap.set.option}
\title{WeatherMap.option}
\usage{
WeatherMap.set.option(Options = NULL, option = NULL,
value = NULL)
}
\arguments{
\item{Options}{list of options - if NULL, use defaults}
\item{option}{name of option to set}
\item{value}{value to set selected option to}
}
\value{
new list of options
}
\description{
Set or query the options controling the plot.
}
\details{
The rendering of a map is controlled by a large number of
options contained in a list. Option: Default: Effect:
cores 1 Not currently used pole.lat 90 pole.lon 180 Pole
location for map lon.min -180 lon.max 180 lat.min -90
lat.max 90 Map range (around centre) show.wind=TRUE,
show.precipitation=TRUE, show.mslp=TRUE,
show.temperature=TRUE, show.ice=FALSE, show.fog=FALSE,
show.obs=FALSE, show.ice.shelves=TRUE,
precip.points=25000, # Bigger -> higher res precip
precip.threshold=0.0025, # Only show where more than this
precip.range=0.03, # Precip rate for max intensity
precip.T.snow=273, # Show as snow where colder (K)
precip.pch=21, # Graphics context for drawing precip
precip.lty=1, precip.lwd=1, precip.scale=1, # Scaling for
precip blob size precip.max.opacity=1,
precip.colour=c(0,0,0), # Colour for intense precip
wind.vector.fade.steps=1, # Increase for gradual fade
in/out wind.vector.iterate=1, # Move streamlets n times
before drawing wind.vector.seed=2, # Smaller -> more wind
vectors wind.vector.arrow=NULL, # See ?arrow
wind.vector.points=3, # Bigger -> smoother curves and
slower wind.vector.scale=0.25, # Bigger -> longer vectors
wind.vector.move.scale=1, # Bigger -> faster moving
vectors wind.vector.decimate=0.2, # Bigger -> less vector
clustering wind.vector.decimate.bandwidth=0.5, #
wind.vector.decimate.gridsize=1000, # wind.vector.lwd=2,
# Line width jitter=TRUE, # Jitter vector seed points?
wind.palette=rev( brewer.pal(11,'RdBu')), # Interpolated
blue red wind.palette.bias=1, # ?colorRamp
wind.palette.opacity=1, # wind.palette.maxgrey=550, #
Smaller -> white lines darker temperature.range=7, # T2m
anomaly for max. colour mslp.base=101325, # Base value
for anomalies mslp.range=10000, # Anomaly for max contour
mslp.step=750, # Smaller -> more contours
mslp.tpscale=2000, # Smaller -> contours less transparent
mslp.lwd=1, background.resolution='low', # 'low' for
fast, 'high' for pretty
sea.colour=rgb(80*1.1,95*1.1,107*1.1,255,
maxColorValue=255), # For background
ice.colour=rgb(150,165,177,255, maxColorValue=255),
merge.colour=rgb(110,110,110,255, maxColorValue=255), #
Soften Wind colours merge.weight=1, # Amount of softening
to apply ice.points=10000, # Bigger - higher res ice
land.colour=rgb(123,121,117,255, maxColorValue=255),
fog.colour=c(0.65,0.65,0.65), # 0-1, bigger -> lighter
fog fog.min.transparency=0.85, # 0-1, bigger -> thicker
fog fog.resolution=1, # Grid resolution in degrees
obs.size=0.5, # In degrees obs.colour=rgb(255,215,0,100,
maxColorValue=255), # For observations label='', # Label
- the date is a good choice label.xp=0.97,label.yp=0.04 #
Location, 'npc, units
}
| /GSDF.WeatherMap/man/WeatherMap.set.option.Rd | permissive | jacobvanetten/GSDF | R | false | false | 3,168 | rd | \name{WeatherMap.set.option}
\alias{WeatherMap.set.option}
\title{WeatherMap.option}
\usage{
WeatherMap.set.option(Options = NULL, option = NULL,
value = NULL)
}
\arguments{
\item{Options}{list of options - if NULL, use defaults}
\item{option}{name of option to set}
\item{value}{value to set selected option to}
}
\value{
new list of options
}
\description{
Set or query the options controling the plot.
}
\details{
The rendering of a map is controlled by a large number of
options contained in a list. Option: Default: Effect:
cores 1 Not currently used pole.lat 90 pole.lon 180 Pole
location for map lon.min -180 lon.max 180 lat.min -90
lat.max 90 Map range (around centre) show.wind=TRUE,
show.precipitation=TRUE, show.mslp=TRUE,
show.temperature=TRUE, show.ice=FALSE, show.fog=FALSE,
show.obs=FALSE, show.ice.shelves=TRUE,
precip.points=25000, # Bigger -> higher res precip
precip.threshold=0.0025, # Only show where more than this
precip.range=0.03, # Precip rate for max intensity
precip.T.snow=273, # Show as snow where colder (K)
precip.pch=21, # Graphics context for drawing precip
precip.lty=1, precip.lwd=1, precip.scale=1, # Scaling for
precip blob size precip.max.opacity=1,
precip.colour=c(0,0,0), # Colour for intense precip
wind.vector.fade.steps=1, # Increase for gradual fade
in/out wind.vector.iterate=1, # Move streamlets n times
before drawing wind.vector.seed=2, # Smaller -> more wind
vectors wind.vector.arrow=NULL, # See ?arrow
wind.vector.points=3, # Bigger -> smoother curves and
slower wind.vector.scale=0.25, # Bigger -> longer vectors
wind.vector.move.scale=1, # Bigger -> faster moving
vectors wind.vector.decimate=0.2, # Bigger -> less vector
clustering wind.vector.decimate.bandwidth=0.5, #
wind.vector.decimate.gridsize=1000, # wind.vector.lwd=2,
# Line width jitter=TRUE, # Jitter vector seed points?
wind.palette=rev( brewer.pal(11,'RdBu')), # Interpolated
blue red wind.palette.bias=1, # ?colorRamp
wind.palette.opacity=1, # wind.palette.maxgrey=550, #
Smaller -> white lines darker temperature.range=7, # T2m
anomaly for max. colour mslp.base=101325, # Base value
for anomalies mslp.range=10000, # Anomaly for max contour
mslp.step=750, # Smaller -> more contours
mslp.tpscale=2000, # Smaller -> contours less transparent
mslp.lwd=1, background.resolution='low', # 'low' for
fast, 'high' for pretty
sea.colour=rgb(80*1.1,95*1.1,107*1.1,255,
maxColorValue=255), # For background
ice.colour=rgb(150,165,177,255, maxColorValue=255),
merge.colour=rgb(110,110,110,255, maxColorValue=255), #
Soften Wind colours merge.weight=1, # Amount of softening
to apply ice.points=10000, # Bigger - higher res ice
land.colour=rgb(123,121,117,255, maxColorValue=255),
fog.colour=c(0.65,0.65,0.65), # 0-1, bigger -> lighter
fog fog.min.transparency=0.85, # 0-1, bigger -> thicker
fog fog.resolution=1, # Grid resolution in degrees
obs.size=0.5, # In degrees obs.colour=rgb(255,215,0,100,
maxColorValue=255), # For observations label='', # Label
- the date is a good choice label.xp=0.97,label.yp=0.04 #
Location, 'npc, units
}
|
### distance_to_TSS.R
### Load variables
source("code/variables_definition.R")
### Set parameters
eqtls.file <- paste0("all_tissues_eqtls_fdr", FDRcis, FDRtrans, "_", window, "MB.Rdata")
disttssfile <- "dist_snp_tss.RData"
pdfeqtlcisfile <- paste0("dist_tss_cis_fdr",FDRcis, FDRtrans, "_", window, "MB.pdf")
pdfeqtltransfile <- paste0("dist_tss_trans_fdr",FDRcis, FDRtrans, "_", window, "MB.pdf")
quantilecistrans <- paste0("dist_tss_summary_cis_trans_fdr",FDRcis, FDRtrans, "_", window, "MB.txt")
### Load data
load(anno.snps.file)
load(anno.genes.file)
load(paste0(eqtl.dir, eqtls.file))
load(tissue.file)
###Functions
##Extract list of cis and trans SNPs
extract.cis.or.trans.snp <- function(x){
cis <- unique(x$RSID[x$cis.or.trans=="cis"])
trans <- unique(x$RSID[x$cis.or.trans=="trans"])
return(list("cis"=cis, "trans"=trans))
}
## Compute distance between a snp and the nearest TSS.
compute.distance <- function(snp, genes){
d <- as.numeric(snp[2]) - as.numeric(genes$transcript_start)
j <- which(abs(d)==min(abs(d)))[1]
return(c(d[j], rownames(genes)[j]) )
}
## Plot distance to TSS of cis- and trans-eQTLs
plot.dist.tss <- function(s, dist.tss, step, main, xlim){
par(mar=c(4,5,4,1)+0.1)
d <- as.numeric(dist.tss$nearest.tss[ rownames(dist.tss) %in% s])
h <- hist(d, breaks=seq(min(d)-step, max(d)+step, step), plot=F)
h$density <- h$counts/sum(h$counts)
plot(h, freq=F, xlab="Distance to TSS (kb)", xaxt='n', ylab="Frequency", main=main, col="dodgerblue", xlim=xlim)
axis(side=1, at=seq(min(xlim), max(xlim), (xlim[2]-xlim[1])/4),
labels=seq(min(y=xlim)/1000, max(xlim)/1000, (xlim[2]-xlim[1])/4000))
}
### Extract distance to TSS for each snp
tss.genes <- anno.genes[,c("chromosome_name", "transcript_start")]
tss.genes$transcript_start[anno.genes$strand == -1] <- anno.genes$transcript_end[anno.genes$strand == -1]
tss.genes$transcript_start <- as.numeric(tss.genes$transcript_start)
dist.tss <- anno.snps[, c("chromosome_name", "position")]
dist.tss$nearest.tss <- rep(NA, nrow(dist.tss))
dist.tss$nearest.gene <- rep(NA, nrow(dist.tss))
for(chr in unique(dist.tss$chromosome_name)){
cat("Running chromosome", chr, "\n")
a <- t(apply(dist.tss[dist.tss$chromosome_name==chr,], 1, compute.distance,
genes=tss.genes[tss.genes$chromosome_name==chr,]))
dist.tss[dist.tss$chromosome_name==chr, c("nearest.tss", "nearest.gene")] <- a[,1:2]
}
save(dist.tss, file=paste0(eqtl.dir, disttssfile))
### Extract snps by tissue
snp <- lapply(eqtl, extract.cis.or.trans.snp)
### Extract 50% distance
eqtl <- lapply(eqtl, function(x){x[x$FDR<=0.05,]})
qtl.cis <- lapply(eqtl, function(x){unique(x$RSID[x$cis.or.trans=="cis"])})
qtl.trans <- lapply(eqtl, function(x){unique(x$RSID[x$cis.or.trans=="trans"])})
d.cis <- lapply(qtl.cis, function(x,d){d[rownames(d) %in% x,3]}, d=dist.tss)
d.trans <- lapply(qtl.trans, function(x,d){d[rownames(d) %in% x,3]}, d=dist.tss)
range(unlist(lapply(d.cis, function(x) quantile(abs(as.numeric(x)), 0.5)))[nb.samples>200])
range(unlist(lapply(d.trans, function(x) quantile(abs(as.numeric(x)), 0.5)))[nb.samples>200])
### Get quantile distribition of distance between cis- and trans-eQTLs and nearest TSS
q.cis <-matrix(ncol=9, nrow=0)
q.trans <-matrix(ncol=9, nrow=0)
for(i in 1:length(snp)){
s <- snp[[i]]$cis
s2 <- snp[[i]]$trans
d <- as.numeric(dist.tss$nearest.tss[ rownames(dist.tss) %in% s])
d2 <- as.numeric(dist.tss$nearest.tss[ rownames(dist.tss) %in% s2])
q.cis <- rbind(q.cis, quantile(d, c(0, 0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99, 1)) )
q.trans <- rbind(q.trans, quantile(d2, c(0, 0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99, 1)) )
}
rownames(q.cis) <- rownames(q.trans) <- names(eqtl)
write.table(rbind(q.cis, q.trans), file=paste0( eqtl.dir, quantilecistrans), quote=F, sep="\t")
### Plot distance to TSS of cis-eQTLs
pdf(paste( figure.dir, pdfeqtlcisfile, sep=""),width=8, height=11)
par(mfrow=c(3,2))
for(i in 1:length(snp)){
plot.dist.tss(snp[[i]]$cis, dist.tss, step=2000,
main=Tissues[names(snp)[i],1], xlim=c(-100000, 100000))
}
dev.off()
### Plot distance to TSS of trans-eQTLs
pdf(paste(figure.dir, pdfeqtltransfile, sep=""),width=8, height=11)
par(mfrow=c(3,2))
for(i in 1:length(snp)){
plot.dist.tss(snp[[i]]$trans, dist.tss, 2000,
main=Tissues[names(snp)[i],1], xlim=c(-100000, 100000))
}
dev.off()
| /code/distance_to_TSS.R | no_license | maudf/gtex_condor | R | false | false | 4,418 | r | ### distance_to_TSS.R
### Load variables
source("code/variables_definition.R")
### Set parameters
eqtls.file <- paste0("all_tissues_eqtls_fdr", FDRcis, FDRtrans, "_", window, "MB.Rdata")
disttssfile <- "dist_snp_tss.RData"
pdfeqtlcisfile <- paste0("dist_tss_cis_fdr",FDRcis, FDRtrans, "_", window, "MB.pdf")
pdfeqtltransfile <- paste0("dist_tss_trans_fdr",FDRcis, FDRtrans, "_", window, "MB.pdf")
quantilecistrans <- paste0("dist_tss_summary_cis_trans_fdr",FDRcis, FDRtrans, "_", window, "MB.txt")
### Load data
load(anno.snps.file)
load(anno.genes.file)
load(paste0(eqtl.dir, eqtls.file))
load(tissue.file)
###Functions
##Extract list of cis and trans SNPs
extract.cis.or.trans.snp <- function(x){
cis <- unique(x$RSID[x$cis.or.trans=="cis"])
trans <- unique(x$RSID[x$cis.or.trans=="trans"])
return(list("cis"=cis, "trans"=trans))
}
## Compute distance between a snp and the nearest TSS.
compute.distance <- function(snp, genes){
d <- as.numeric(snp[2]) - as.numeric(genes$transcript_start)
j <- which(abs(d)==min(abs(d)))[1]
return(c(d[j], rownames(genes)[j]) )
}
## Plot distance to TSS of cis- and trans-eQTLs
plot.dist.tss <- function(s, dist.tss, step, main, xlim){
par(mar=c(4,5,4,1)+0.1)
d <- as.numeric(dist.tss$nearest.tss[ rownames(dist.tss) %in% s])
h <- hist(d, breaks=seq(min(d)-step, max(d)+step, step), plot=F)
h$density <- h$counts/sum(h$counts)
plot(h, freq=F, xlab="Distance to TSS (kb)", xaxt='n', ylab="Frequency", main=main, col="dodgerblue", xlim=xlim)
axis(side=1, at=seq(min(xlim), max(xlim), (xlim[2]-xlim[1])/4),
labels=seq(min(y=xlim)/1000, max(xlim)/1000, (xlim[2]-xlim[1])/4000))
}
### Extract distance to TSS for each snp
tss.genes <- anno.genes[,c("chromosome_name", "transcript_start")]
tss.genes$transcript_start[anno.genes$strand == -1] <- anno.genes$transcript_end[anno.genes$strand == -1]
tss.genes$transcript_start <- as.numeric(tss.genes$transcript_start)
dist.tss <- anno.snps[, c("chromosome_name", "position")]
dist.tss$nearest.tss <- rep(NA, nrow(dist.tss))
dist.tss$nearest.gene <- rep(NA, nrow(dist.tss))
for(chr in unique(dist.tss$chromosome_name)){
cat("Running chromosome", chr, "\n")
a <- t(apply(dist.tss[dist.tss$chromosome_name==chr,], 1, compute.distance,
genes=tss.genes[tss.genes$chromosome_name==chr,]))
dist.tss[dist.tss$chromosome_name==chr, c("nearest.tss", "nearest.gene")] <- a[,1:2]
}
save(dist.tss, file=paste0(eqtl.dir, disttssfile))
### Extract snps by tissue
snp <- lapply(eqtl, extract.cis.or.trans.snp)
### Extract 50% distance
eqtl <- lapply(eqtl, function(x){x[x$FDR<=0.05,]})
qtl.cis <- lapply(eqtl, function(x){unique(x$RSID[x$cis.or.trans=="cis"])})
qtl.trans <- lapply(eqtl, function(x){unique(x$RSID[x$cis.or.trans=="trans"])})
d.cis <- lapply(qtl.cis, function(x,d){d[rownames(d) %in% x,3]}, d=dist.tss)
d.trans <- lapply(qtl.trans, function(x,d){d[rownames(d) %in% x,3]}, d=dist.tss)
range(unlist(lapply(d.cis, function(x) quantile(abs(as.numeric(x)), 0.5)))[nb.samples>200])
range(unlist(lapply(d.trans, function(x) quantile(abs(as.numeric(x)), 0.5)))[nb.samples>200])
### Get quantile distribition of distance between cis- and trans-eQTLs and nearest TSS
q.cis <-matrix(ncol=9, nrow=0)
q.trans <-matrix(ncol=9, nrow=0)
for(i in 1:length(snp)){
s <- snp[[i]]$cis
s2 <- snp[[i]]$trans
d <- as.numeric(dist.tss$nearest.tss[ rownames(dist.tss) %in% s])
d2 <- as.numeric(dist.tss$nearest.tss[ rownames(dist.tss) %in% s2])
q.cis <- rbind(q.cis, quantile(d, c(0, 0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99, 1)) )
q.trans <- rbind(q.trans, quantile(d2, c(0, 0.01, 0.05, 0.25, 0.5, 0.75, 0.95, 0.99, 1)) )
}
rownames(q.cis) <- rownames(q.trans) <- names(eqtl)
write.table(rbind(q.cis, q.trans), file=paste0( eqtl.dir, quantilecistrans), quote=F, sep="\t")
### Plot distance to TSS of cis-eQTLs
pdf(paste( figure.dir, pdfeqtlcisfile, sep=""),width=8, height=11)
par(mfrow=c(3,2))
for(i in 1:length(snp)){
plot.dist.tss(snp[[i]]$cis, dist.tss, step=2000,
main=Tissues[names(snp)[i],1], xlim=c(-100000, 100000))
}
dev.off()
### Plot distance to TSS of trans-eQTLs
pdf(paste(figure.dir, pdfeqtltransfile, sep=""),width=8, height=11)
par(mfrow=c(3,2))
for(i in 1:length(snp)){
plot.dist.tss(snp[[i]]$trans, dist.tss, 2000,
main=Tissues[names(snp)[i],1], xlim=c(-100000, 100000))
}
dev.off()
|
#Sys.setenv(JAVA_HOME='/usr/local/software/spack/spack-0.11.2/opt/spack/linux-rhel7-x86_64/gcc-5.4.0/jdk-8u141-b15-p4aaoptkqukgdix6dh5ey236kllhluvr/jre') #Ubuntu cluster
Sys.setenv(JAVA_HOME= "/usr/lib/jvm/java-11-openjdk-amd64")
## Load packages
library(nlrx)
library(tidyverse)
library(rcartocolor)
library(ggthemes)
# Office
netlogopath <- file.path("/home/hs621/NetLogo 6.1.1")
outpath <- file.path("/home/hs621/Dropbox (Cambridge University)/2019_Cambridge/[Programming]/Netlogo/Dissertation_Chapter4")
## Step1: Create a nl obejct:
nl <- nl(nlversion = "6.1.1",
nlpath = netlogopath,
modelpath = file.path(outpath, "St111261_Gangnam.nlogo"),
jvmmem = 1024)
## Step2: Add Experiment
nl@experiment <- experiment(expname = "nlrx_spatial",
outpath = outpath,
repetition = 1,
tickmetrics = "true",
idsetup = "setup",
idgo = "go",
runtime = 8764,
evalticks=seq(1,8764, by = 10),
metrics = c("count people with [health < 100] / count people"),
metrics.turtles = list("people" = c("who", "xcor", "ycor", "homename", "destinationName", "age", "edu","health")),
#variables = list('AC' = list(values=c(100,150,200))),
constants = list("PM10-parameters" = 100,
"Scenario" = "\"BAU\"",
"scenario-percent" = "\"inc-sce\"",
"AC" = 100)
)
# Evaluate if variables and constants are valid:
eval_variables_constants(nl)
#nl@simdesign <- simdesign_distinct(nl = nl, nseeds = 1)
nl@simdesign <- simdesign_simple(nl = nl, nseeds = 1)
# Step4: Run simulations:
init <- Sys.time()
results <- run_nl_all(nl = nl)
Sys.time() - init
# Attach results to nl object:
setsim(nl, "simoutput") <- results
# Report spatial data:
results_unnest <- unnest_simoutput(nl)
# Write output to outpath of experiment within nl
#write_simoutput(nl)
# Filter out unneeded variables and objects
# BAU scenario
turtles <- results_unnest %>%
select(`[step]`, Scenario, who, homename, destinationName, xcor, ycor, age, agent, health) %>%
filter(agent == "turtles", Scenario == "BAU", ycor < 326 & xcor < 297 & xcor > 0) %>%
filter(`[step]` %in% seq(5000,8764)) %>%
mutate(age_group = case_when(age < 15 ~ "young",
age >= 15 & age < 65 ~ "active",
age >= 65 ~ "old"),
edu_group = case_when(edu >= 3 ~ "high",
edu < 3 ~ "low"))
bau <- bind_rows(gn %>% filter(scenario == "BAU") %>%
select(ticks, riskpop, AC, scenario, age_u15,age_btw1564,age_ov65,edu_high,edu_low) %>%
mutate(District= "Gangnam")) %>%
group_by(District, scenario, AC, ticks) %>%
summarise_all(funs(mean, lo = lb, hi = ub)) %>% as.data.frame()
#patches <- results_unnest %>% select(`[step]`, Scenario, pxcor, pycor, pcolor) %>%
# filter(Scenario == "BAU", pycor < 324) %>%
# filter(`[step]` %in% seq(5000,8764))
# Create facet plot:
ggplot() +
facet_wrap(~`[step]`, ncol= 10) +
coord_equal() +
#geom_tile(data=patches, aes(x=pxcor, y=pycor, fill=pcolor), alpha = .2) +
geom_point(data=turtles, aes(x = xcor, y = ycor, color = age), size=1) +
# scale_fill_gradient(low = "white", high = "grey20") +
scale_color_manual(breaks=c("young", "active", "old"),
values = c("young" = "#56B4E9", "active" = "#E69F00", "old" = "#999999")) +
guides(fill=guide_legend(title="PM10")) +
ggtitle("Unhealthly Population after a long-term exposure") +
theme_minimal() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank()
#axis.title.x=element_blank(),
#axis.title.y=element_blank(),legend.position="none",
#panel.background=element_blank(),panel.border=element_blank(),panel.grid.major=element_blank(),
#panel.grid.minor=element_blank(),plot.background=element_blank()
)
## number of turtles
turtles %>%
group_by(`[step]`, age) %>%
tally() %>%
print(n = length(turtles$age)) %>%
reshape2::dcast(`[step]` ~ age) -> turtle.stat
turtle.stat$total <- rowSums(turtle.stat[,c(2:4)], na.rm = T)
## Density plot
# health distribution: density plot!
turtles_density <- results_unnest %>%
select(`[step]`, Scenario, xcor, ycor, age, agent, health, homename, destinationName) %>%
filter(agent == "turtles", Scenario == "BAU", ycor < 324 & xcor < 294 & xcor > 0) %>%
filter(`[step]` %in% seq(1,8764))
turtles_density$health[turtles_density$health <= 0] <- 0
turtles_density %>%
ggplot(aes(health, fill = age)) +
geom_density(alpha = 0.4) +
theme_bw() +
theme(legend.title = element_text(size=20, face="bold"),
legend.text = element_text(size=15),
legend.position = c(0.2, 0.8),
axis.text=element_text(size=20),
axis.title=element_text(size=15,face="bold")
)
| /nlrx_seoul_ubuntu.R | no_license | dataandcrowd/PollutionABM | R | false | false | 5,271 | r | #Sys.setenv(JAVA_HOME='/usr/local/software/spack/spack-0.11.2/opt/spack/linux-rhel7-x86_64/gcc-5.4.0/jdk-8u141-b15-p4aaoptkqukgdix6dh5ey236kllhluvr/jre') #Ubuntu cluster
Sys.setenv(JAVA_HOME= "/usr/lib/jvm/java-11-openjdk-amd64")
## Load packages
library(nlrx)
library(tidyverse)
library(rcartocolor)
library(ggthemes)
# Office
netlogopath <- file.path("/home/hs621/NetLogo 6.1.1")
outpath <- file.path("/home/hs621/Dropbox (Cambridge University)/2019_Cambridge/[Programming]/Netlogo/Dissertation_Chapter4")
## Step1: Create a nl obejct:
nl <- nl(nlversion = "6.1.1",
nlpath = netlogopath,
modelpath = file.path(outpath, "St111261_Gangnam.nlogo"),
jvmmem = 1024)
## Step2: Add Experiment
nl@experiment <- experiment(expname = "nlrx_spatial",
outpath = outpath,
repetition = 1,
tickmetrics = "true",
idsetup = "setup",
idgo = "go",
runtime = 8764,
evalticks=seq(1,8764, by = 10),
metrics = c("count people with [health < 100] / count people"),
metrics.turtles = list("people" = c("who", "xcor", "ycor", "homename", "destinationName", "age", "edu","health")),
#variables = list('AC' = list(values=c(100,150,200))),
constants = list("PM10-parameters" = 100,
"Scenario" = "\"BAU\"",
"scenario-percent" = "\"inc-sce\"",
"AC" = 100)
)
# Evaluate if variables and constants are valid:
eval_variables_constants(nl)
#nl@simdesign <- simdesign_distinct(nl = nl, nseeds = 1)
nl@simdesign <- simdesign_simple(nl = nl, nseeds = 1)
# Step4: Run simulations:
init <- Sys.time()
results <- run_nl_all(nl = nl)
Sys.time() - init
# Attach results to nl object:
setsim(nl, "simoutput") <- results
# Report spatial data:
results_unnest <- unnest_simoutput(nl)
# Write output to outpath of experiment within nl
#write_simoutput(nl)
# Filter out unneeded variables and objects
# BAU scenario
turtles <- results_unnest %>%
select(`[step]`, Scenario, who, homename, destinationName, xcor, ycor, age, agent, health) %>%
filter(agent == "turtles", Scenario == "BAU", ycor < 326 & xcor < 297 & xcor > 0) %>%
filter(`[step]` %in% seq(5000,8764)) %>%
mutate(age_group = case_when(age < 15 ~ "young",
age >= 15 & age < 65 ~ "active",
age >= 65 ~ "old"),
edu_group = case_when(edu >= 3 ~ "high",
edu < 3 ~ "low"))
bau <- bind_rows(gn %>% filter(scenario == "BAU") %>%
select(ticks, riskpop, AC, scenario, age_u15,age_btw1564,age_ov65,edu_high,edu_low) %>%
mutate(District= "Gangnam")) %>%
group_by(District, scenario, AC, ticks) %>%
summarise_all(funs(mean, lo = lb, hi = ub)) %>% as.data.frame()
#patches <- results_unnest %>% select(`[step]`, Scenario, pxcor, pycor, pcolor) %>%
# filter(Scenario == "BAU", pycor < 324) %>%
# filter(`[step]` %in% seq(5000,8764))
# Create facet plot:
ggplot() +
facet_wrap(~`[step]`, ncol= 10) +
coord_equal() +
#geom_tile(data=patches, aes(x=pxcor, y=pycor, fill=pcolor), alpha = .2) +
geom_point(data=turtles, aes(x = xcor, y = ycor, color = age), size=1) +
# scale_fill_gradient(low = "white", high = "grey20") +
scale_color_manual(breaks=c("young", "active", "old"),
values = c("young" = "#56B4E9", "active" = "#E69F00", "old" = "#999999")) +
guides(fill=guide_legend(title="PM10")) +
ggtitle("Unhealthly Population after a long-term exposure") +
theme_minimal() +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks=element_blank()
#axis.title.x=element_blank(),
#axis.title.y=element_blank(),legend.position="none",
#panel.background=element_blank(),panel.border=element_blank(),panel.grid.major=element_blank(),
#panel.grid.minor=element_blank(),plot.background=element_blank()
)
## number of turtles
turtles %>%
group_by(`[step]`, age) %>%
tally() %>%
print(n = length(turtles$age)) %>%
reshape2::dcast(`[step]` ~ age) -> turtle.stat
turtle.stat$total <- rowSums(turtle.stat[,c(2:4)], na.rm = T)
## Density plot
# health distribution: density plot!
turtles_density <- results_unnest %>%
select(`[step]`, Scenario, xcor, ycor, age, agent, health, homename, destinationName) %>%
filter(agent == "turtles", Scenario == "BAU", ycor < 324 & xcor < 294 & xcor > 0) %>%
filter(`[step]` %in% seq(1,8764))
turtles_density$health[turtles_density$health <= 0] <- 0
turtles_density %>%
ggplot(aes(health, fill = age)) +
geom_density(alpha = 0.4) +
theme_bw() +
theme(legend.title = element_text(size=20, face="bold"),
legend.text = element_text(size=15),
legend.position = c(0.2, 0.8),
axis.text=element_text(size=20),
axis.title=element_text(size=15,face="bold")
)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
test_that("RandomAccessFile$ReadMetadata() works for LocalFileSystem", {
fs <- LocalFileSystem$create()
tf <- tempfile()
on.exit(unlink(tf))
write("abcdefg", tf)
expect_identical(
fs$OpenInputFile(tf)$ReadMetadata(),
list()
)
})
test_that("reencoding input stream works for windows-1252", {
string <- "province_name\nQu\u00e9bec"
bytes_windows1252 <- iconv(
string,
from = Encoding(string),
to = "windows-1252",
toRaw = TRUE
)[[1]]
bytes_utf8 <- iconv(
string,
from = Encoding(string),
to = "UTF-8",
toRaw = TRUE
)[[1]]
temp_windows1252 <- tempfile()
con <- file(temp_windows1252, open = "wb")
writeBin(bytes_windows1252, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_windows1252)
stream_utf8 <- MakeReencodeInputStream(stream, "windows-1252")
expect_identical(as.raw(stream_utf8$Read(100)), bytes_utf8)
stream$close()
stream_utf8$close()
unlink(temp_windows1252)
})
test_that("reencoding input stream works for UTF-16", {
string <- paste0(strrep("a\u00e9\U0001f4a9", 30))
bytes_utf16 <- iconv(
string,
from = Encoding(string),
to = "UTF-16LE",
toRaw = TRUE
)[[1]]
bytes_utf8 <- iconv(
string,
from = Encoding(string),
to = "UTF-8",
toRaw = TRUE
)[[1]]
temp_utf16 <- tempfile()
con <- file(temp_utf16, open = "wb")
writeBin(bytes_utf16, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_utf16)
stream_utf8 <- MakeReencodeInputStream(stream, "UTF-16LE")
expect_identical(
as.raw(stream_utf8$Read(length(bytes_utf8))),
bytes_utf8
)
stream_utf8$close()
stream$close()
unlink(temp_utf16)
})
test_that("reencoding input stream works with pending characters", {
string <- paste0(strrep("a\u00e9\U0001f4a9", 30))
bytes_utf8 <- iconv(
string,
from = Encoding(string),
to = "UTF-8",
toRaw = TRUE
)[[1]]
temp_utf8 <- tempfile()
con <- file(temp_utf8, open = "wb")
writeBin(bytes_utf8, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_utf8)
stream_utf8 <- MakeReencodeInputStream(stream, "UTF-8")
# these calls all leave some pending characters
expect_identical(as.raw(stream_utf8$Read(4)), bytes_utf8[1:4])
expect_identical(as.raw(stream_utf8$Read(5)), bytes_utf8[5:9])
expect_identical(as.raw(stream_utf8$Read(6)), bytes_utf8[10:15])
expect_identical(as.raw(stream_utf8$Read(7)), bytes_utf8[16:22])
# finish the stream
expect_identical(
as.raw(stream_utf8$Read(length(bytes_utf8))),
bytes_utf8[23:length(bytes_utf8)]
)
stream$close()
stream_utf8$close()
unlink(temp_utf8)
})
test_that("reencoding input stream errors for invalid characters", {
bytes_utf8 <- rep(as.raw(0xff), 10)
temp_utf8 <- tempfile()
con <- file(temp_utf8, open = "wb")
writeBin(bytes_utf8, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_utf8)
stream_utf8 <- MakeReencodeInputStream(stream, "UTF-8")
expect_error(stream_utf8$Read(100), "Encountered invalid input bytes")
unlink(temp_utf8)
})
| /r/tests/testthat/test-io.R | permissive | 0x0L/arrow | R | false | false | 3,942 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
test_that("RandomAccessFile$ReadMetadata() works for LocalFileSystem", {
fs <- LocalFileSystem$create()
tf <- tempfile()
on.exit(unlink(tf))
write("abcdefg", tf)
expect_identical(
fs$OpenInputFile(tf)$ReadMetadata(),
list()
)
})
test_that("reencoding input stream works for windows-1252", {
string <- "province_name\nQu\u00e9bec"
bytes_windows1252 <- iconv(
string,
from = Encoding(string),
to = "windows-1252",
toRaw = TRUE
)[[1]]
bytes_utf8 <- iconv(
string,
from = Encoding(string),
to = "UTF-8",
toRaw = TRUE
)[[1]]
temp_windows1252 <- tempfile()
con <- file(temp_windows1252, open = "wb")
writeBin(bytes_windows1252, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_windows1252)
stream_utf8 <- MakeReencodeInputStream(stream, "windows-1252")
expect_identical(as.raw(stream_utf8$Read(100)), bytes_utf8)
stream$close()
stream_utf8$close()
unlink(temp_windows1252)
})
test_that("reencoding input stream works for UTF-16", {
string <- paste0(strrep("a\u00e9\U0001f4a9", 30))
bytes_utf16 <- iconv(
string,
from = Encoding(string),
to = "UTF-16LE",
toRaw = TRUE
)[[1]]
bytes_utf8 <- iconv(
string,
from = Encoding(string),
to = "UTF-8",
toRaw = TRUE
)[[1]]
temp_utf16 <- tempfile()
con <- file(temp_utf16, open = "wb")
writeBin(bytes_utf16, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_utf16)
stream_utf8 <- MakeReencodeInputStream(stream, "UTF-16LE")
expect_identical(
as.raw(stream_utf8$Read(length(bytes_utf8))),
bytes_utf8
)
stream_utf8$close()
stream$close()
unlink(temp_utf16)
})
test_that("reencoding input stream works with pending characters", {
string <- paste0(strrep("a\u00e9\U0001f4a9", 30))
bytes_utf8 <- iconv(
string,
from = Encoding(string),
to = "UTF-8",
toRaw = TRUE
)[[1]]
temp_utf8 <- tempfile()
con <- file(temp_utf8, open = "wb")
writeBin(bytes_utf8, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_utf8)
stream_utf8 <- MakeReencodeInputStream(stream, "UTF-8")
# these calls all leave some pending characters
expect_identical(as.raw(stream_utf8$Read(4)), bytes_utf8[1:4])
expect_identical(as.raw(stream_utf8$Read(5)), bytes_utf8[5:9])
expect_identical(as.raw(stream_utf8$Read(6)), bytes_utf8[10:15])
expect_identical(as.raw(stream_utf8$Read(7)), bytes_utf8[16:22])
# finish the stream
expect_identical(
as.raw(stream_utf8$Read(length(bytes_utf8))),
bytes_utf8[23:length(bytes_utf8)]
)
stream$close()
stream_utf8$close()
unlink(temp_utf8)
})
test_that("reencoding input stream errors for invalid characters", {
bytes_utf8 <- rep(as.raw(0xff), 10)
temp_utf8 <- tempfile()
con <- file(temp_utf8, open = "wb")
writeBin(bytes_utf8, con)
close(con)
fs <- LocalFileSystem$create()
stream <- fs$OpenInputStream(temp_utf8)
stream_utf8 <- MakeReencodeInputStream(stream, "UTF-8")
expect_error(stream_utf8$Read(100), "Encountered invalid input bytes")
unlink(temp_utf8)
})
|
library(checkmate)
expect_backend = function(b) {
expect_r6(b, cloneable = TRUE, public = c("nrow", "ncol", "colnames", "rownames", "data", "head", "distinct", "missing.values", "types"))
n = b$nrow
p = b$ncol
expect_count(n)
expect_count(p)
expect_atomic_vector(b$rownames, any.missing = FALSE, len = n)
expect_character(b$colnames, any.missing = FALSE, len = p, min.chars = 1L, unique = TRUE)
expect_data_table(b$data, nrow = n, ncol = p, col.names = "unique")
cn = b$colnames[1L]
x = b$get(cols = cn)
expect_data_table(x, ncol = 1, nrow = n)
x = x[[cn]]
expect_atomic_vector(x, len = n)
expect_set_equal(b$distinct(cn), x)
types = b$types
expect_character(types, len = p, names = "unique")
expect_set_equal(names(types), b$colnames)
expect_subset(types, mlrng$supported.col.types)
mv = b$missing.values
expect_integer(mv, names = "unique", any.missing = FALSE, lower = 0, upper = n)
expect_set_equal(names(mv), b$colnames)
expect_data_table(b$head(3), nrow = 3, ncol = p)
}
expect_task = function(task) {
expect_r6(task, "Task", cloneable = TRUE)
expect_string(task$id, min.chars = 1L)
expect_count(task$nrow)
expect_count(task$ncol)
expect_backend(task$backend)
expect_data_table(task$data)
expect_data_table(task$get())
expect_data_table(task$head(1), nrow = 1L)
# task.nas = task$na.cols
# expect_integer(task.nas, names = "unique", any.missing = FALSE, lower = 0L, upper = task$nrow)
# expect_set_equal(names(task.nas), task$backend$colnames)
}
expect_supervisedtask = function(task) {
expect_task(task)
expect_is(task, "TaskSupervised")
expect_choice(task$target, task$backend$colnames)
expect_class(task$formula, "formula")
tf = terms(task$formula)
expect_set_equal(labels(tf), task$features) # rhs
expect_set_equal(setdiff(all.vars(tf), labels(tf)), task$target) # lhs
expect_subset(task$features, colnames(task$backend$head()))
}
expect_classiftask = function(task) {
expect_supervisedtask(task)
x = task$truth()[[1L]]
expect_atomic_vector(x, any.missing = FALSE)
expect_true(is.character(x) || is.factor(r))
expect_int(task$nclasses, lower = 2L)
expect_atomic_vector(task$classes)
expect_subset(task$classes, x)
if (task$nclasses > 2L)
expect_identical(task$positive, NA_character_)
else
expect_choice(task$positive, task$classes)
}
expect_regrtask = function(task) {
expect_supervisedtask(task)
expect_numeric(task$get(cols = task$target)[[1L]], any.missing = FALSE)
}
expect_learner = function(lrn) {
expect_is(lrn, "Learner")
expect_string(lrn$id, min.chars = 1L)
expect_character(lrn$packages, min.chars = 1L)
expect_subset(lrn$properties, mlrng$supported.learner.props)
expect_is(lrn$par.set, "ParamSet")
expect_list(lrn$par.vals, names = "unique")
expect_function(lrn$predict, args = c("model", "newdata"), ordered = TRUE)
expect_function(lrn$train, args = c("task", "subset"), ordered = TRUE)
}
expect_split = function(s, len = NULL) {
expect_class(s, "Split")
expect_atomic_vector(s$train.set, min.len = 1)
expect_atomic_vector(s$test.set, min.len = 1L)
}
# task == FALSE -> assert that r is not instantiated
# task == [task] -> assert that r is instantiated with task
expect_resampling = function(r, task = FALSE) {
expect_is(r, "Resampling")
expect_string(r$id, min.chars = 1L)
expect_list(r$pars, names = "unique")
expect_count(r$iters)
if (isFALSE(task)) {
expect_scalar_na(r$checksum)
expect_null(r$instance)
}
if (inherits(task, "Task")) {
expect_string(r$checksum)
expect_list(r$instance, len = 2)
expect_list(r$instance$train, len = r$iters, names = "unnamed")
expect_list(r$instance$test, len = r$iters, names = "unnamed")
n = task$nrow
rows = task$backend$rownames
for (i in seq_len(r$iters)) {
expect_atomic_vector(r$train.set(i), min.len = 1L, max.len = n - 1L, any.missing = FALSE, names = "unnamed")
expect_subset(r$train.set(i), rows)
expect_atomic_vector(r$test.set(i), min.len = 1L, max.len = n - 1L, any.missing = FALSE, names = "unnamed")
expect_subset(r$test.set(i), rows)
}
}
}
expect_result = function(x) {
classes = head(mlrng$result.states, fastmatch::fmatch(class(x)[1L], mlrng$result.states))
expect_r6(x, rev(classes), ordered = TRUE, public = "data", cloneable = FALSE)
# check that classes are in the right order
cols = list(
TrainResult = c("task", "learner", "rmodel", "train.set", "train.log"),
PredictResult = c("test.set", "predicted"),
PerformanceResult = c("measures", "perf.vals"),
ResampleResult = c("resampling.iter"),
BenchmarkResult = c("resampling.id")
)
i = max(match(class(x), names(cols), nomatch = 0L))
cols = unlist(head(cols, i), use.names = FALSE)
if (!is.null(x$print))
expect_output(print(x))
expect_data_table(x$data, min.rows = 1L)
expect_subset(cols, names(x$data))
}
expect_trainresult = function(x) {
expect_class(x, "TrainResult")
expect_result(x)
if (result_state(x) <= result_state("PerformanceResult")) {
expect_true(hasName(x, "rmodel"))
expect_is(x$learner, "Learner")
expect_is(x$task, "Task")
expect_subset(x$train.set, x$task$backend$rownames)
expect_is(x$train.log, "TrainLog")
expect_flag(x$train.success)
}
}
expect_predictresult = function(x) {
expect_class(x, "PredictResult")
expect_trainresult(x)
if (result_state(x) <= result_state("PerformanceResult")) {
expect_data_table(x$truth, ncol = length(x$task$target))
expect_data_table(x$pred, min.cols = 3, col.names = "unique")
expect_set_equal(names(x$pred), c("test.set", "truth", "response"))
expect_subset(x$test.set, x$task$backend$rownames)
if (x$task$task.type %in% c("classif", "regr"))
expect_atomic_vector(x$predicted)
}
}
expect_performanceresult = function(x) {
expect_class(x, "PerformanceResult")
expect_predictresult(x)
if (result_state(x) <= result_state("PerformanceResult")) {
pv = x$perf.vals
expect_numeric(pv, names = "unique", any.missing = FALSE, finite = TRUE)
expect_set_equal(unlist(lapply(x$data$perf.vals, names)), ids(x$data$measures))
}
}
expect_resampleresult = function(x) {
expect_class(x, "ResampleResult")
expect_result(x)
expect_set_equal(unlist(lapply(x$data$perf.vals, names)), ids(x$data$measures))
expect_numeric(x$aggr, names = "unique", any.missing = FALSE, finite = TRUE)
}
expect_benchmarkresult = function(x) {
expect_class(x, "BenchmarkResult")
expect_result(x)
}
expect_same_address = function(x, y) {
expect_identical(address(x), address(y))
}
expect_different_address = function(x, y) {
expect_false(identical(address(x), address(y)))
}
| /tests/testthat/helper_expects.R | no_license | mlr-archive/mlrng | R | false | false | 6,714 | r | library(checkmate)
expect_backend = function(b) {
expect_r6(b, cloneable = TRUE, public = c("nrow", "ncol", "colnames", "rownames", "data", "head", "distinct", "missing.values", "types"))
n = b$nrow
p = b$ncol
expect_count(n)
expect_count(p)
expect_atomic_vector(b$rownames, any.missing = FALSE, len = n)
expect_character(b$colnames, any.missing = FALSE, len = p, min.chars = 1L, unique = TRUE)
expect_data_table(b$data, nrow = n, ncol = p, col.names = "unique")
cn = b$colnames[1L]
x = b$get(cols = cn)
expect_data_table(x, ncol = 1, nrow = n)
x = x[[cn]]
expect_atomic_vector(x, len = n)
expect_set_equal(b$distinct(cn), x)
types = b$types
expect_character(types, len = p, names = "unique")
expect_set_equal(names(types), b$colnames)
expect_subset(types, mlrng$supported.col.types)
mv = b$missing.values
expect_integer(mv, names = "unique", any.missing = FALSE, lower = 0, upper = n)
expect_set_equal(names(mv), b$colnames)
expect_data_table(b$head(3), nrow = 3, ncol = p)
}
expect_task = function(task) {
expect_r6(task, "Task", cloneable = TRUE)
expect_string(task$id, min.chars = 1L)
expect_count(task$nrow)
expect_count(task$ncol)
expect_backend(task$backend)
expect_data_table(task$data)
expect_data_table(task$get())
expect_data_table(task$head(1), nrow = 1L)
# task.nas = task$na.cols
# expect_integer(task.nas, names = "unique", any.missing = FALSE, lower = 0L, upper = task$nrow)
# expect_set_equal(names(task.nas), task$backend$colnames)
}
expect_supervisedtask = function(task) {
expect_task(task)
expect_is(task, "TaskSupervised")
expect_choice(task$target, task$backend$colnames)
expect_class(task$formula, "formula")
tf = terms(task$formula)
expect_set_equal(labels(tf), task$features) # rhs
expect_set_equal(setdiff(all.vars(tf), labels(tf)), task$target) # lhs
expect_subset(task$features, colnames(task$backend$head()))
}
expect_classiftask = function(task) {
expect_supervisedtask(task)
x = task$truth()[[1L]]
expect_atomic_vector(x, any.missing = FALSE)
expect_true(is.character(x) || is.factor(r))
expect_int(task$nclasses, lower = 2L)
expect_atomic_vector(task$classes)
expect_subset(task$classes, x)
if (task$nclasses > 2L)
expect_identical(task$positive, NA_character_)
else
expect_choice(task$positive, task$classes)
}
expect_regrtask = function(task) {
expect_supervisedtask(task)
expect_numeric(task$get(cols = task$target)[[1L]], any.missing = FALSE)
}
expect_learner = function(lrn) {
expect_is(lrn, "Learner")
expect_string(lrn$id, min.chars = 1L)
expect_character(lrn$packages, min.chars = 1L)
expect_subset(lrn$properties, mlrng$supported.learner.props)
expect_is(lrn$par.set, "ParamSet")
expect_list(lrn$par.vals, names = "unique")
expect_function(lrn$predict, args = c("model", "newdata"), ordered = TRUE)
expect_function(lrn$train, args = c("task", "subset"), ordered = TRUE)
}
expect_split = function(s, len = NULL) {
expect_class(s, "Split")
expect_atomic_vector(s$train.set, min.len = 1)
expect_atomic_vector(s$test.set, min.len = 1L)
}
# task == FALSE -> assert that r is not instantiated
# task == [task] -> assert that r is instantiated with task
expect_resampling = function(r, task = FALSE) {
expect_is(r, "Resampling")
expect_string(r$id, min.chars = 1L)
expect_list(r$pars, names = "unique")
expect_count(r$iters)
if (isFALSE(task)) {
expect_scalar_na(r$checksum)
expect_null(r$instance)
}
if (inherits(task, "Task")) {
expect_string(r$checksum)
expect_list(r$instance, len = 2)
expect_list(r$instance$train, len = r$iters, names = "unnamed")
expect_list(r$instance$test, len = r$iters, names = "unnamed")
n = task$nrow
rows = task$backend$rownames
for (i in seq_len(r$iters)) {
expect_atomic_vector(r$train.set(i), min.len = 1L, max.len = n - 1L, any.missing = FALSE, names = "unnamed")
expect_subset(r$train.set(i), rows)
expect_atomic_vector(r$test.set(i), min.len = 1L, max.len = n - 1L, any.missing = FALSE, names = "unnamed")
expect_subset(r$test.set(i), rows)
}
}
}
expect_result = function(x) {
classes = head(mlrng$result.states, fastmatch::fmatch(class(x)[1L], mlrng$result.states))
expect_r6(x, rev(classes), ordered = TRUE, public = "data", cloneable = FALSE)
# check that classes are in the right order
cols = list(
TrainResult = c("task", "learner", "rmodel", "train.set", "train.log"),
PredictResult = c("test.set", "predicted"),
PerformanceResult = c("measures", "perf.vals"),
ResampleResult = c("resampling.iter"),
BenchmarkResult = c("resampling.id")
)
i = max(match(class(x), names(cols), nomatch = 0L))
cols = unlist(head(cols, i), use.names = FALSE)
if (!is.null(x$print))
expect_output(print(x))
expect_data_table(x$data, min.rows = 1L)
expect_subset(cols, names(x$data))
}
expect_trainresult = function(x) {
expect_class(x, "TrainResult")
expect_result(x)
if (result_state(x) <= result_state("PerformanceResult")) {
expect_true(hasName(x, "rmodel"))
expect_is(x$learner, "Learner")
expect_is(x$task, "Task")
expect_subset(x$train.set, x$task$backend$rownames)
expect_is(x$train.log, "TrainLog")
expect_flag(x$train.success)
}
}
expect_predictresult = function(x) {
expect_class(x, "PredictResult")
expect_trainresult(x)
if (result_state(x) <= result_state("PerformanceResult")) {
expect_data_table(x$truth, ncol = length(x$task$target))
expect_data_table(x$pred, min.cols = 3, col.names = "unique")
expect_set_equal(names(x$pred), c("test.set", "truth", "response"))
expect_subset(x$test.set, x$task$backend$rownames)
if (x$task$task.type %in% c("classif", "regr"))
expect_atomic_vector(x$predicted)
}
}
expect_performanceresult = function(x) {
expect_class(x, "PerformanceResult")
expect_predictresult(x)
if (result_state(x) <= result_state("PerformanceResult")) {
pv = x$perf.vals
expect_numeric(pv, names = "unique", any.missing = FALSE, finite = TRUE)
expect_set_equal(unlist(lapply(x$data$perf.vals, names)), ids(x$data$measures))
}
}
expect_resampleresult = function(x) {
expect_class(x, "ResampleResult")
expect_result(x)
expect_set_equal(unlist(lapply(x$data$perf.vals, names)), ids(x$data$measures))
expect_numeric(x$aggr, names = "unique", any.missing = FALSE, finite = TRUE)
}
expect_benchmarkresult = function(x) {
expect_class(x, "BenchmarkResult")
expect_result(x)
}
expect_same_address = function(x, y) {
expect_identical(address(x), address(y))
}
expect_different_address = function(x, y) {
expect_false(identical(address(x), address(y)))
}
|
#####RED - IUCN Redlisting Tools
#####Version 1.5.0 (2020-05-04)
#####By Pedro Cardoso
#####Maintainer: pedro.cardoso@helsinki.fi
#####Reference: Cardoso, P.(2017) An R package to facilitate species red list assessments according to the IUCN criteria. Biodiversity Data Journal 5: e20530 doi: 10.3897/BDJ.5.e20530
#####Changed from v1.4.0:
#####added function rli.predict to interpolate and extrapolate linearly beyond the years assessed
#####added new options in functions rli and rli.multi on how to deal with DD species when bootstrapping
#####required packages
library("BAT")
library("dismo")
library("gdistance")
library("geosphere")
library("graphics")
library("grDevices")
library("jsonlite")
library("maptools")
library("methods")
library("raster")
library("rgdal")
library("rgeos")
library("sp")
library("stats")
library("utils")
#' @import gdistance
#' @import graphics
#' @import jsonlite
#' @import maptools
#' @import rgdal
#' @import rgeos
#' @import sp
#' @import stats
#' @import utils
#' @importFrom BAT contribution
#' @importFrom geosphere areaPolygon
#' @importFrom grDevices chull dev.copy dev.off pdf
#' @importFrom methods slot
#' @importFrom raster area cellStats clump crop extent extract getValues layerStats mask raster rasterize rasterToPoints rasterToPolygons reclassify res sampleRandom scalebar terrain trim writeRaster xmax xmin
raster::rasterOptions(maxmemory = 2e+09)
globalVariables(c("worldborders"))
###############################################################################
##############################AUX FUNCTIONS####################################
###############################################################################
longlat2utm <- function(longlat){
longlat = as.matrix(longlat)
minlong = min(longlat[,1])
zone = floor((minlong + 180) / 6) + 1
res = rgdal::project(longlat, paste("+proj=utm +zone=",zone," ellps=WGS84",sep=''))
return(res)
}
utm2longlat <- function(utm, zone){
if(class(utm) == "RasterLayer"){
if(!is.null(zone))
raster::crs(utm) <- paste("+proj=utm +zone=", zone, sep="")
res <- raster::projectRaster(utm, crs = "+proj=longlat +datum=WGS84", method='ngb')
} else {
utm <- SpatialPoints(utm, CRS(paste("+proj=utm +zone=", zone,sep="")))
res <- as.data.frame(spTransform(utm,CRS(paste("+proj=longlat"))))
}
return(res)
}
##warn if maxent.jar is not available
warnMaxent <- function(){
warning("RED could not find maxent.jar.
1. Download the latest version of maxent from:
https://biodiversityinformatics.amnh.org/open_source/maxent/
2. Move the file maxent.jar to the java directory inside dismo package
(there should be a file named dismo.jar already there)
3. Install the latest version of java runtime environment (JRE) with the same architecture (32 or 64 bits) as your version of R:
http://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html")
}
##detect which layers are categorical by checking if all values are integers and if the max is less than 50 (may fail, just an attempt)
find.categorical <- function(layers){
categorical = c()
for(l in 1:(dim(layers)[3])){
lay <- raster::as.matrix(layers[[l]])
lay <- as.vector(lay)
lay <- lay[!is.na(lay)]
if(sum(floor(lay)) == sum(lay) && length(unique(lay)) < 50)
categorical = c(categorical, l)
}
return(categorical)
}
##basic function to calculate the rli of any group of species
rli.calc <- function(spData, tree = NULL, boot = FALSE, dd = FALSE, runs = 1000){
if(all(is.na(spData)))
return(NA)
spData <- rli.convert(spData) ##call function to convert spData to a 0-1 scale
if(is.null(tree)){ ##if not weighted by PD or FD
if(!boot){ ##if no bootstrap to be made
return (mean(spData, na.rm = TRUE))
} else {
run <- rep(NA, runs)
if(!dd){
for(i in 1:runs){
rnd <- sample(spData, replace = TRUE) ##bootstrap with all species
run[i] <- mean(rnd, na.rm = TRUE)
}
} else { ##bootstrap with only DD species
nDD = sum(is.na(spData)) ##number of DD species
rliBase = sum(spData, na.rm = TRUE)
for(i in 1:runs){
rnd <- sample(spData[!is.na(spData)], nDD, replace = TRUE)
run[i] <- (rliBase + sum(rnd)) / length(spData)
}
}
res <- matrix(quantile(run, c(0.025, 0.5, 0.975)), nrow = 1)
colnames(res) <- c("LowCL", "Median", "UpCL")
return(res)
}
} else { ##if weighted by PD or FD, still to work, not available at the moment!!!!!!!!!!!!!!!!!!!!!!!!!!!!
comm <- matrix(1, nrow = 2, ncol = length(spData))
contrib <- BAT::contribution(comm, tree, relative = TRUE)[1,]
contrib <- contrib/sum(contrib[!is.na(spData)]) #needed to standardize the contribution by the total contribution of species living in the community
if(!boot){ ##if no bootstrap to be made
return(sum(spData * contrib, na.rm = TRUE))
} else {
run <- rep(NA, runs)
for(i in 1:runs){
rndSpp <- sample(length(spData), replace = TRUE)
rndComm <- spData[rndSpp]
rndContrib <- contrib[rndSpp]/sum(contrib[rndSpp])
run[i] <- sum(rndComm * rndContrib, na.rm = TRUE)
}
res <- matrix(quantile(run, c(0.025, 0.5, 0.975)), nrow = 1)
colnames(res) <- c("LowCL", "Median", "UpCL")
return(res)
}
}
}
##function to convert strings to numbers in the RLI
rli.convert <- function(spData){
if(!is.numeric(spData)){ ##if letters are given, convert to [0,1]
spData <- replace(spData, which(spData == "EX" ), 0)
spData <- replace(spData, which(spData == "EW" ), 0)
spData <- replace(spData, which(spData == "RE" ), 0)
spData <- replace(spData, which(spData == "CR" ), 0.2)
spData <- replace(spData, which(spData == "CR(PE)" ), 0.2)
spData <- replace(spData, which(spData == "EN" ), 0.4)
spData <- replace(spData, which(spData == "VU" ), 0.6)
spData <- replace(spData, which(spData == "NT" ), 0.8)
spData <- replace(spData, which(spData == "LC" ), 1)
spData <- replace(spData, which(spData == "DD" ), NA)
spData <- as.numeric(spData)
} else if (all(spData == floor(spData))){ #if all integers, a scale [0,5] is given, convert to [0,1]
spData <- 1 - spData/5
}
return(spData)
}
##################################################################################
##################################MAIN FUNCTIONS##################################
##################################################################################
#' Setup GIS directory.
#' @description Setup directory where GIS files are stored.
#' @param gisPath Path to the directory where the gis files are stored.
#' @details Writes a txt file in the red directory allowing the package to always access the world GIS files directory.
#' @export
red.setDir <- function(gisPath = NULL){
if(is.null(gisPath))
gisPath <- readline("Input directory for storing world gis layers:")
gisPath <- paste(gisPath, "/", sep = "")
redFile <- paste(find.package("red"), "/red.txt", sep = "")
dput(gisPath, redFile)
}
#' Read GIS directory.
#' @description Read directory where GIS files are stored.
#' @details Reads a txt file pointing to where the world GIS files are stored.
#' @export
red.getDir <- function(){
redFile <- paste(find.package("red"), "/red.txt", sep = "")
if (file.exists(redFile)){ #if there is already a file read from it
dir <- dget(redFile)
} else {
warning(paste(redFile, "not found, please run red.setDir()"))
return()
}
return(dir)
}
#' Download and setup GIS files.
#' @description Setup red to work with species distribution modelling and layers available online.
#' @details Please check that you have at least 50Gb free in your disk (and a fast internet connection) to download all files. In the end of the process "only" 17.4Gb will be left though. This function will:
#' 1. Check if maxent.jar is available in the dismo package directory.
#' 2. Ask user input for GIS directory.
#' 3. Download global bioclim and elevation files (20) from http://biogeo.ucdavis.edu/data/worldclim/v2.0/tif/base/wc2.0_30s_bio.zip.
#' 4. Download landcover files (12) from http://data.earthenv.org/consensus_landcover/without_DISCover/.
#' 5. Unzip all files and delete the originals.
#' 6. Create a new layer (1) with the dominant land cover at each cell.
#' 7. Resample all files (33) to approximately 10x10km (for use with widespread species) grid cells.
#' Sit back and enjoy, this should take a while.
#' @export
red.setup <- function(){
##test if maxent.jar is in the right directory
if(!file.exists(paste(.libPaths()[[1]], "/dismo/java/maxent.jar", sep=""))){
warnMaxent()
return()
}
oldwd = getwd()
on.exit(expr = setwd(oldwd))
gisdir = red.setDir()
setwd(gisdir)
##basic setup
pb <- txtProgressBar(min = 0, max = 33, style = 3)
##download and process bioclim
download.file("http://biogeo.ucdavis.edu/data/worldclim/v2.0/tif/base/wc2.0_30s_bio.zip", "bioclim2.zip")
unzip(zipfile = "bioclim.zip")
file.remove("bioclim.zip")
for(i in 1:19){
setTxtProgressBar(pb, i)
if(i < 10)
rast <- raster(paste("wc2.0_bio_30s_0", i, ".tif", sep=""))
else
rast <- raster(paste("wc2.0_bio_30s_", i, ".tif", sep=""))
rast <- crop(rast, c(-180, 180, -56, 90))
writeRaster(rast, paste("red_1km_", i, ".tif", sep=""))
rast <- aggregate(rast, 10)
writeRaster(rast, paste("red_10km_", i, ".tif", sep=""))
if(i < 10)
file.remove(paste("wc2.0_bio_30s_0", i, ".tif", sep=""))
else
file.remove(paste("wc2.0_bio_30s_", i, ".tif", sep=""))
gc()
}
##download and process altitude
setTxtProgressBar(pb, 20)
download.file("http://biogeo.ucdavis.edu/data/climate/worldclim/1_4/grid/cur/alt_30s_bil.zip", "alt_30s_bil.zip")
unzip(zipfile = "alt_30s_bil.zip")
file.remove("alt_30s_bil.zip")
rast <- raster("alt.bil")
rast <- crop(rast, c(-180, 180, -56, 90))
writeRaster(rast, "red_1km_20.tif")
rast <- aggregate(rast, 10)
writeRaster(rast, "red_10km_20.tif")
file.remove("alt.bil")
file.remove("alt.hdr")
gc()
##download and process land cover
altmask1 = raster("red_1km_20.tif")
altmask10 = raster("red_10km_20.tif")
for(i in 5:12){
setTxtProgressBar(pb, (i+20))
download.file(paste("http://data.earthenv.org/consensus_landcover/without_DISCover/Consensus_reduced_class_", i, ".tif", sep=""), destfile = paste("Consensus_reduced_class_", i, ".tif", sep=""), mode = "wb")
rast <- raster(paste("Consensus_reduced_class_", i, ".tif", sep=""))
rast <- mask(rast, altmask1)
writeRaster(rast, paste("red_1km_", (i+20), ".tif", sep=""))
rast <- aggregate(rast, 10)
#maskLayer <- sum(altmask, rast)
#maskLayer[!is.na(maskLayer)] <- 1
rast <- mask(rast, altmask10)
writeRaster(rast, paste("red_10km_", (i+20), ".tif", sep=""))
file.remove(paste("Consensus_reduced_class_", i, ".tif", sep=""))
gc()
}
remove(rast)
##create new rasters with most common landcover at each cell
setTxtProgressBar(pb, 33)
max1 <- raster()
max10 <- raster()
for(i in 21:32){
rast <- raster(paste("red_1km_", i, ".tif", sep=""))
max1 <- raster::stack(max1, rast)
rast <- raster(paste("red_10km_", i, ".tif", sep=""))
max10 <- raster::stack(max10, rast)
}
max1 <- which.max(max1)
writeRaster(max1, "red_1km_33.tif")
max10 <- which.max(max10)
writeRaster(max10, "red_10km_33.tif")
remove(max1, max10)
gc()
setwd(oldwd)
##Now the files should be named as:
##red_1km_1.tif
##...
##red_10km_33.tif
##Where 1 to 19 are the corresponding bioclim variables, 20 is altitude, 21 to 32 are landcover proportion and 33 is most common landcover per cell
#download country borders (not working Feb. 2017)
#download.file("http://biogeo.ucdavis.edu/data/gadm2.6/countries_gadm26.rds", destfile = paste("worldcountries.rds"), mode = "wb")
}
#' Download taxon records from GBIF.
#' @description Downloads species or higher taxon data from GBIF and outputs non-duplicate records with geographical coordinates.
#' @param taxon Taxon name.
#' @details As always when using data from multiple sources the user should be careful and check if records "make sense". This can be done by either ploting them in a map (e.g. using red::map.draw()) or using red::outliers().
#' @return A data.frame with longitude and latitude, plus species names if taxon is above species.
#' @examples records("Nephila senegalensis")
#' @export
records <- function(taxon){
taxon = unlist(strsplit(taxon, split = " ")[[1]])
dat <- dismo::gbif(taxon[1], paste(taxon[2], "*", sep = ""))
dat <- dat[c("species","lon","lat")] #filter columns
dat <- dat[!(is.na(dat$lon) | is.na(dat$lat)),] #filter rows
dat <- unique(dat) #delete duplicate rows
colnames(dat) <- c("Species", "long", "lat")
if (length(taxon) == 1){ #if genus
dat[which(is.na(dat[,1])),1] <- paste(taxon, "sp.")
} else { #if species
dat <- dat[,-1]
}
return(dat)
}
#' Move records to closest non-NA cell.
#' @description Identifies and moves presence records to cells with environmental values.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster.
#' @param buffer Maximum distance in map units that a record will move. If 0 all NA records will be changed.
#' @details Often records are in coastal or other areas for which no environmental data is available. This function moves such records to the closest cells with data so that no information is lost during modelling.
#' @return A matrix with new coordinate values.
#' @examples rast <- raster::raster(matrix(c(rep(NA,100), rep(1,100), rep(NA,100)), ncol = 15))
#' pts <- cbind(runif(100, 0, 0.55), runif(100, 0, 1))
#' raster::plot(rast)
#' points(pts)
#' pts <- move(pts, rast)
#' raster::plot(rast)
#' points(pts)
#' @export
move <- function(longlat, layers, buffer = 0){
layers <- layers[[1]]
values <- extract(layers, longlat) #get values of each record
suppressWarnings(
for(i in which(is.na(values))){ #if a value is NA, move it
distRaster = raster::distanceFromPoints(layers, longlat[i,])
distRaster = mask(distRaster, layers)
vmin = raster::minValue(distRaster)
if(buffer <= 0 || buffer > vmin){
vmin = rasterToPoints(distRaster, function(x) x == vmin)
longlat[i,] = vmin[1,1:2]
}
}
)
return(longlat)
}
#' Visual detection of outliers.
#' @description Draws plots of sites in geographical (longlat) and environmental (2-axis PCA) space.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster. It can be any set of environmental layers thought to allow the identification of environmental outliers.
#' @details Erroneous data sources or errors in transcriptions may introduce outliers that can be easily detected by looking at simple graphs of geographical or environmental space.
#' @return A data.frame with coordinate values and distance to centroid in pca is returned. Two plots are drawn for visual inspection. The environmental plot includes row numbers for easy identification of possible outliers.
#' @examples data(red.records)
#' data(red.layers)
#' outliers(red.records, red.layers[[1:3]])
#' @export
outliers <- function(longlat, layers){
if(dim(layers)[3] == 33) #if layers come from raster.read
pca <- raster.reduce(layers[[1:19]], n = 2)
else
pca <- raster.reduce(layers, n = 2)
##extract pca values from longlat
pca <- as.data.frame(raster::extract(pca, longlat))
goodRows <- which(!is.na(pca[,1]))
pca <- pca[goodRows,]
longlat <- longlat[goodRows,]
par(mfrow = c(1,2))
map.draw(longlat, layers[[1]], spName = "Geographical")
raster::plot(pca, main = "Environmental", type = "n")
centroid = colMeans(pca)
text(centroid[1], centroid[2], label = "X")
for(i in 1:nrow(pca)){
text(pca[i,1], pca[i,2], label = row.names(longlat)[i])
}
##build new matrix ordered by distance to centroid
dist2centroid = apply(pca, 1, function(x) dist(rbind(x, centroid)))
out = as.data.frame(cbind(longlat, dist2centroid))
out = out[order(-dist2centroid),]
return(out)
}
#' Spatial thinning of occurrence records.
#' @description Thinning of records with minimum distances either absolute or relative to the species range.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param distance Distance either in relative terms (proportion of maximum distance between any two records) or in raster units.
#' @param relative If TRUE, represents the proportion of maximum distance between any two records. If FALSE, is in raster units.
#' @param runs Number of runs
#' @details Clumped distribution records due to ease of accessibility of sites, emphasis of sampling on certain areas in the past, etc. may bias species distribution models.
#' The algorithm used here eliminates records closer than a given distance to any other record. The choice of records to eliminate is random, so a number of runs are made and the one keeping more of the original records is chosen.
#' @return A matrix of species occurrence records separated by at least the given distance.
#' @examples records <- matrix(sample(100), ncol = 2)
#' par(mfrow=c(1,2))
#' graphics::plot(records)
#' records <- thin(records, 0.1)
#' graphics::plot(records)
#' @export
thin <- function(longlat, distance = 0.01, relative = TRUE, runs = 100){
longlat = longlat[!duplicated(longlat),] #first, remove duplicate rows
nSites = nrow(longlat)
if(nSites < 4)
return(longlat)
##if relative, calculate maxDist between any two points
if(relative){
if(nSites < 40){ #if limited number of sites use all data
maxDist = 0
for(x in 1:(nSites-1)){
for(y in (x+1):nSites){
maxDist = max(maxDist,((longlat[x,1]-longlat[y,1])^2+(longlat[x,2]-longlat[y,2])^2)^.5)
}
}
} else { #if many sites use hypothenusa of square encompassing all of them
horiDist = max(longlat[,1]) - min(longlat[,1])
vertDist = max(longlat[,2]) - min(longlat[,2])
maxDist = (horiDist^2 + vertDist^2)^0.5
}
distance = maxDist*distance
}
listSites = matrix(longlat[1,], ncol=2, byrow = TRUE)
for (r in 1:runs){
longlat = longlat[sample(nSites),] ##shuffle rows (sites)
rndSites = longlat[1,] ##start with first random site
for(newSite in 2:nSites){
for(oldSite in 1:(newSite-1)){
addSite = TRUE
dist = ((longlat[newSite,1]-longlat[oldSite,1])^2+(longlat[newSite,2]-longlat[oldSite,2])^2)^.5
if(dist < distance){
addSite = FALSE
break
}
}
if(addSite)
rndSites = rbind(rndSites, longlat[newSite,])
}
if(nrow(rndSites) > nrow(listSites))
listSites = rndSites
}
return(as.matrix(listSites))
}
#' Read and buffer raster layers.
#' @description Read raster layers of environmental or other variables and crop them to a given extent around the known occurrences.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster.
#' @param ext Either extent of map or buffer around the known records used to crop layers. If buffer, it is relative to the maximum distance between any two records.
#' @details If layers are not given, the function will read either 30 arc-second (approx. 1km) or 5 arc-minutes (approx. 10km) resolution rasters from worldclim (Fick & Hijmans 2017) and landcover (Tuanmu & Jetz 2014) if red.setup() is run previously.
#' @return A RasterStack object (If no layers are given: Variables 1-19 = bioclim, 20 = elevation, 21-32 = proportion landcover, 33 = most common landcover).
#' @references Fick, S.E. & Hijmans, R.J. (2017) Worldclim 2: new 1-km spatial resolution climate surfaces for global land areas. International Journal of Climatology, in press.
#' @references Tuanmu, M.-N. & Jetz, W. (2014) A global 1-km consensus land-cover product for biodiversity and ecosystem modeling. Global Ecology and Biogeography, 23: 1031-1045.
#' @examples data(red.layers)
#' data(red.records)
#' par(mfrow=c(1,2))
#' raster::plot(red.layers[[1]])
#' points(red.records)
#' croppedLayers <- raster.read(red.records, red.layers, 0.1)
#' raster::plot(croppedLayers[[1]])
#' points(red.records)
#' @export
raster.read <- function(longlat, layers = NULL, ext = 1){
xmin = min(longlat[,1])
xmax = max(longlat[,1])
xlen = xmax - xmin
ymin = min(longlat[,2])
ymax = max(longlat[,2])
ylen = ymax - ymin
if(is.null(layers)){ ##if no layers are provided read the ones available
gisdir = red.getDir()
##calculate species range and buffer around it
if(eoo(longlat) < 200000){
layers <- raster::stack(raster::raster(paste(gisdir, "red_1km_1.tif", sep = "")))
for(i in 2:33)
layers <- raster::stack(layers, raster::raster(paste(gisdir, "red_1km_", i, ".tif", sep = "")))
} else {
layers <- raster::stack(raster::raster(paste(gisdir, "red_10km_1.tif", sep = "")))
for(i in 2:33)
layers <- raster::stack(layers, raster::raster(paste(gisdir, "red_10km_", i, ".tif", sep = "")))
}
##determine longitude limits of species to check if crop and paste are needed around longitude 180 for Pacific species
if(xmin < -90 && xmax > 90 && sum(longlat[longlat[,1] < 90 && longlat[,1] > -90,]) != 0){
##crop and merge layers
rightHalf = crop(layers, c(0,180,raster::extent(layers)@ymin,raster::extent(layers)@ymax))
raster::extent(rightHalf) <- c(-180,0,raster::extent(layers)@ymin,raster::extent(layers)@ymax)
leftHalf = crop(layers, c(-180,0,raster::extent(layers)@ymin,raster::extent(layers)@ymax))
raster::extent(leftHalf) <- c(0,180,raster::extent(layers)@ymin,raster::extent(layers)@ymax)
layers <- merge(rightHalf, leftHalf)
##modify longlat
for(i in 1:nrow(longlat))
if(longlat[i,1] > 0)
longlat[i,1] = longlat[i,1] - 180
else
longlat[i,1] = longlat[i,1] + 180
}
}
if(length(ext) == 4) ##if absolute extent is given crop and return, else calculate buffer
return(crop(layers, ext))
if(xlen == 0) ##in case some dimensions are inexistent consider equal to extent
xlen = ext
if(ylen == 0)
ylen = ext
##calculate new extent of layers and crop
ext = max(1, ((xlen + ylen) * ext))
xmin <- max(raster::extent(layers)@xmin, xmin-ext)
xmax <- min(raster::extent(layers)@xmax, xmax+ext)
ymin <- max(raster::extent(layers)@ymin, ymin-ext)
ymax <- min(raster::extent(layers)@ymax, ymax+ext)
layers <- crop(layers, c(xmin,xmax,ymin,ymax))
return(layers)
}
#' Uniformize raster layers.
#' @description Crop raster layers to minimum size possible and uniformize NA values across layers.
#' @param layers Raster* object as defined by package raster.
#' @details Excludes all marginal rows and columns with only NA values and change values to NA if they are NA in any of the layers.
#' @return A Raster* object, same class as layers.
#' @examples data(red.layers)
#' raster::plot(raster.clean(red.layers))
#' @export
raster.clean <- function(layers){
##apply mask to have NAs everywhere where any layer has NAs
maskLayer <- sum(layers)
maskLayer[!is.na(maskLayer)] <- 1
layers <- mask(layers, maskLayer)
##crop by excluding external rows and columns with NAs only
layers <- trim(layers)
return(layers)
}
#' Reduce dimensionality of raster layers.
#' @description Reduce the number of layers by either performing a PCA on them or by eliminating highly correlated ones.
#' @param layers Raster* object as defined by package raster.
#' @param method Either Principal Components Analysis ("pca", default) or Pearson's correlation ("cor").
#' @param n Number of layers to reduce to.
#' @param thres Value for pairwise Pearson's correlation above which one of the layers (randomly selected) is eliminated.
#' @details Using a large number of explanatory variables in models with few records may lead to overfitting. This function allows to avoid it as much as possible.
#' If both n and thres are given, n has priority. If method is not recognized and layers come from raster.read function, only landcover is reduced by using only the dominating landuse of each cell.
#' @return A RasterStack object.
#' @export
raster.reduce <- function(layers, method = "pca", n = NULL, thres = NULL){
##method = "pca, cor", if unrecognized method only reduce landcover but not climate
out <- raster::stack()
if(dim(layers)[3] == 33){ ##check if layers are obtained with raster.read
out <- raster::stack(layers[[33]])
layers = layers[[1:19]]
}
if(method == "cor"){ ##if correlation
if(is.null(n)){
if(is.null(thres))
thres = 0.7
for(i in 1:dim(layers)[3]){ ##delete layers until none are correlated above threshold
cor = as.matrix(as.dist(layerStats(layers, 'pearson', na.rm = TRUE)[[1]]))
if(max(cor) < thres)
break
corLayer = sample(which(cor == max(cor), arr.ind = TRUE)[,1],1)
layers = layers[[-corLayer]]
}
} else {
while (dim(layers)[3] > n){ ##delete layers until reaching n layers
cor = abs(as.matrix(as.dist(layerStats(layers, 'pearson', na.rm = TRUE)[[1]])))
corLayer = sample(which(cor == max(cor), arr.ind = TRUE)[,1],1)
layers = layers[[-corLayer]]
}
}
} else if(method == "pca"){ ##if pca
if(is.null(n))
n = 3
if(sum(!is.na(getValues(layers[[1]]))) > 2000)
sr <- sampleRandom(layers, 1000)
else
sr <- sampleRandom(layers, as.integer(sum(!is.na(getValues(layers[[1]])))/2))
pca <- prcomp(sr)
layers <- raster::predict(layers, pca, index = 1:n)
for(i in 1:n)
names(layers[[i]]) <- paste("pca",i)
}
out <- raster::stack(layers, out)
return(out)
}
#' Create distance layer.
#' @description Creates a layer depicting distances to records using the minimum, average, distance to the minimum convex polygon or distance taking into account a cost surface.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster to serve as model to create distance layer. Cost surface in case of param ="cost".
#' @param type text string indicating whether the output should be the "minimum", "average", "mcp" or "cost" distance to all records. "mcp" means the distance to the minimum convex polygon encompassing all records.
#' @details Using distance to records in models may help limiting the extrapolation of the predicted area much beyond known areas.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' alt = red.layers[[3]]
#' data(red.records)
#' par(mfrow=c(3,2))
#' raster::plot(alt)
#' points(red.records)
#' raster::plot(raster.distance(red.records, alt))
#' raster::plot(raster.distance(red.records, alt, type = "average"))
#' raster::plot(raster.distance(red.records, alt, type = "mcp"))
#' raster::plot(raster.distance(red.records, alt, type = "cost"))
#' @export
raster.distance <- function(longlat, layers, type = "minimum"){
if(dim(layers)[3] > 1)
layers <- layers[[1]]
layers[!is.na(layers)] <- 0
if(type == "average"){
for(d in 1:nrow(longlat)){
layers <- layers + raster::distanceFromPoints(layers, longlat[d,])
}
layers <- layers/nrow(longlat)
names(layers) <- "average distance"
} else if (type == "mcp"){
vertices <- chull(longlat)
vertices <- c(vertices, vertices[1])
vertices <- longlat[vertices,]
poly = Polygon(vertices)
poly = Polygons(list(poly),1)
poly = SpatialPolygons(list(poly)) ##minimum convex polygon
longlat = rasterToPoints(rasterize(poly, layers))[,1:2]
layers <- mask(raster::distanceFromPoints(layers, longlat), layers)
names(layers) <- "mcp distance"
} else if (type == "cost"){
layers <- transition(layers, function(x) 1/mean(x), 8)
layers <- geoCorrection(layers)
layers <- accCost(layers, as.matrix(longlat))
names(layers) <- "cost distance"
} else {
layers <- mask(raster::distanceFromPoints(layers, longlat), layers)
names(layers) <- "minimum distance"
}
return(layers)
}
#' Create longitude layer.
#' @description Create a layer depicting longitude based on any other.
#' @param layers Raster* object as defined by package raster.
#' @details Using longitude (and latitude) in models may help limiting the extrapolation of the predicted area much beyond known areas.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.long(red.layers))
#' @export
raster.long <- function(layers){
if(dim(layers)[3] > 1)
layers <- layers[[3]]
x <- rasterToPoints(layers)[,1:2]
long <- rasterize(x, layers, x[,1])
long <- mask(long, layers)
names(long) <- "longitude"
return(long)
}
#' Create latitude layer.
#' @description Create a layer depicting latitude based on any other.
#' @param layers Raster* object as defined by package raster.
#' @details Using latitude (and longitude) in models may help limiting the extrapolation of the predicted area much beyond known areas.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.lat(red.layers[[1]]))
#' @export
raster.lat <- function(layers){
if(dim(layers)[3] > 1)
layers <- layers[[3]]
x <- rasterToPoints(layers)[,1:2]
lat <- rasterize(x, layers, x[,2])
lat <- mask(lat, layers)
names(lat) <- "latitude"
return(lat)
}
#' Create eastness layer.
#' @description Create a layer depicting eastness based on an elevation layer.
#' @param dem RasterLayer object of elevation (a digital elevation model - DEM) as defined by package raster.
#' @details Using elevation, aspect can be calculated. Yet, it is a circular variable (0 = 360) and has to be converted to northness and eastness to be useful for modelling.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.east(red.layers[[3]]))
#' @export
raster.east <- function(dem){
asp <- terrain(dem, opt = "aspect")
return(sin(asp))
}
#' Create northness layer.
#' @description Create a layer depicting northness based on an elevation layer.
#' @param dem RasterLayer object of elevation (a digital elevation model - DEM) as defined by package raster.
#' @details Using elevation, aspect can be calculated. Yet, it is a circular variable (0 = 360) and has to be converted to northness and eastness to be useful for modelling.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.north(red.layers[[3]]))
#' @export
raster.north <- function(dem){
asp <- terrain(dem, opt = "aspect")
return(cos(asp))
}
#' Predict species distribution.
#' @description Prediction of potential species distributions using maximum entropy (maxent).
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layers Predictor variables, a Raster* object as defined by package raster.
#' @param error Vector of spatial error in longlat (one element per row of longlat) in the same unit as longlat. Used to move any point randomly within the error radius.
#' @param year Vector of sampling years in longlat (one element per row of longlat). Used to exclude old records with a given probability proportional to time passed since sampling (never excluded only for current year).
#' @param idconf Vector of identification confidence in longlat (one element per row of longlat). Used to exclude uncertain records with a given probability. Can be on any scale where max values are certain (e.g. from 1 - very uncertain to 10 - holotype).
#' @param categorical Vector of layer indices of categorical (as opposed to quantitative) data. If NULL the package will try to find them automatically based on the data.
#' @param thres Threshold of logistic output used for conversion of probabilistic to binary (presence/absence) maps. If 0 this will be the value that maximizes the sum of sensitivity and specificity.
#' @param testpercentage Percentage of records used for testing only. If 0 all records will be used for both training and testing.
#' @param mcp Used for a precautionary approach. If TRUE, all areas predicted as present but outside the minimum convex hull polygon encompassing all occurrence records are converted to absence. Exceptions are cells connected to other areas inside the polygon.
#' @param points If TRUE, force map to include cells with presence records even if suitable habitat was not identified.
#' @param eval If TRUE, build a matrix with AUC, Kappa, TSS, EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model).
#' @param runs If <= 0 no ensemble modelling is performed. If > 0, ensemble modelling with n runs is made. For each run, a new random sample of occurrence records (if testpercentage > 0), background points and predictive variables (if subset > 0) are chosen. In the ensemble model, each run is weighted as max(0, (runAUC - 0.5)) ^ 2.
#' @param subset Number of predictive variables to be randomly selected from layers for each run if runs > 0. If <= 0 all layers are used on all runs. Using a small number of layers is usually better than using many variables for rare species, with few occurrence records (Lomba et al. 2010, Breiner et al. 2015).
#' @details Builds maxent (maximum entropy) species distribution models (Phillips et al. 2004, 2006; Elith et al. 2011) using function maxent from R package dismo (Hijmans et al. 2017). Dismo requires the MaxEnt species distribution model software, a java program that can be downloaded from http://biodiversityinformatics.amnh.org/open_source/maxent. Copy the file 'maxent.jar' into the 'java' folder of the dismo package. That is the folder returned by system.file("java", package="dismo"). You need MaxEnt version 3.3.3b or higher. Please note that this program (maxent.jar) cannot be redistributed or used for commercial or for-profit purposes.
#' @return List with either one or two raster objects (depending if ensemble modelling is performed, in which case the second is a probabilistic map from all the runs) and, if eval = TRUE, a matrix with AUC, Kappa, TSS, EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model). Aggregate values are taken from maps after transformation of probabilities to incidence, with presence predicted for cells with ensemble values > 0.5.
#' @references Breiner, F.T., Guisan, A., Bergamini, A., Nobis, M.P. (2015) Overcoming limitations of modelling rare species by using ensembles of small models. Methods in Ecology and Evolution, 6: 1210-1218.
#' @references Hijmans, R.J., Phillips, S., Leathwick, J., Elith, J. (2017) dismo: Species Distribution Modeling. R package version 1.1-4. https://CRAN.R-project.org/package=dismo
#' @references Lomba, A., Pellissier, L., Randin, C.F., Vicente, J., Moreira, F., Honrado, J., Guisan, A. (2010) Overcoming the rare species modelling paradox: a novel hierarchical framework applied to an Iberian endemic plant. Biological Conservation, 143: 2647-2657.
#' @references Phillips, S.J., Dudik, M., Schapire, R.E. (2004) A maximum entropy approach to species distribution modeling. Proceedings of the Twenty-First International Conference on Machine Learning. p. 655-662.
#' @references Phillips, S.J., Anderson, R.P., Schapire, R.E. (2006) Maximum entropy modeling of species geographic distributions. Ecological Modelling, 190: 231-259.
#' @references Elith, J., Phillips, S.J., Hastie, T., Dudik, M., Chee, Y.E., Yates, C.J. (2011) A statistical explanation of MaxEnt for ecologists. Diversity and Distributions, 17: 43-57.
#' @export
map.sdm <- function(longlat, layers, error = NULL, year = NULL, idconf = NULL, categorical = NULL, thres = 0, testpercentage = 0, mcp = TRUE, points = FALSE, eval = TRUE, runs = 0, subset = 0){
raster::rasterOptions(maxmemory = 2e+09)
origLonglat = longlat
##if ensemble is to be done
if(runs > 0){
longlat = origLonglat
#if there is spatial error randomly move points within its radius
if(!is.null(error)){
for(i in 1:nrow(longlat)){
#move up to given error (angular movement converted to x and y)
rndAngle = sample(1:360, 1)
rndDist = runif(1, 0, error[i])
longlat[i,1] = longlat[i,1] + rndDist * cos(rndAngle)
longlat[i,2] = longlat[i,2] + rndDist * sin(rndAngle)
}
}
#if there is year
if(!is.null(year)){
for(i in 1:nrow(longlat)){
if(year[i] < sample(min(year):as.integer(substr(Sys.Date(), 1, 4)), 1))
longlat = longlat[-i,]
}
}
#if there is idconf
if(!is.null(idconf)){
for(i in 1:nrow(longlat)){
if(idconf[i] < sample(1:max(idconf), 1))
longlat = longlat[-i,]
}
}
if(eval)
runEval = matrix(NA, nrow = 1, ncol = 7)
runMap <- rasterize(longlat, layers[[1]], field = 0, background = 0)
pb <- txtProgressBar(min = 0, max = runs, style = 3)
totalAUC = 0
for(i in 1:runs){
if(subset > 0 && subset < dim(layers)[3]){
runLayers <- layers[[sample.int(dim(layers)[3], subset)]]
thisRun <- map.sdm(longlat, runLayers, error = NULL, year = NULL, idconf = NULL, categorical, thres, testpercentage, mcp, points, eval, runs = 0, subset = 0)
} else {
thisRun <- map.sdm(longlat, layers, error = NULL, year = NULL, idconf = NULL, categorical, thres, testpercentage, mcp, points, eval, runs = 0, subset = 0)
}
runAUC = 1
if(eval){
runAUC <- thisRun[[2]][1]
runAUC <- max(0, (runAUC - 0.5)) ^ 2 #weight the map by its AUC above 0.5 to the square
runEval <- rbind(runEval, thisRun[[2]])
thisRun <- thisRun[[1]]
}
totalAUC = totalAUC + runAUC
runMap <- runMap + (thisRun * runAUC)
setTxtProgressBar(pb, i)
}
runMap <- raster::calc(runMap, function(x) {x/totalAUC})
upMap <- reclassify(runMap, matrix(c(0,0.025,0,0.025,1,1), ncol = 3, byrow = TRUE))
consensusMap <- reclassify(runMap, matrix(c(0,0.499,0,0.499,1,1), ncol = 3, byrow = TRUE))
downMap <- reclassify(runMap, matrix(c(0,0.975,0,0.975,1,1), ncol = 3, byrow = TRUE))
if(mcp && aoo(consensusMap) >= 4)
consensusMap <- map.habitat(longlat, consensusMap, mcp = TRUE, eval = FALSE)
if(eval){
runEval <- runEval[-1,]
clEval <- matrix(NA, nrow = 3, ncol = 7)
colnames(clEval) <- c("AUC", "Kappa", "TSS", "EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)")
rownames(clEval) <- c("UpCL", "Consensus", "LowCL")
clEval[1,] <- apply(runEval, 2, quantile, probs= 0.975, na.rm = TRUE)
clEval[2,] <- apply(runEval, 2, quantile, probs= 0.5, na.rm = TRUE)
clEval[3,] <- apply(runEval, 2, quantile, probs= 0.025, na.rm = TRUE)
clEval[1:3,4] <- eoo(longlat)
clEval[1:3,6] <- aoo(longlat)
clEval[1,5] <- eoo(upMap)
clEval[1,7] <- aoo(upMap)
clEval[2,5] <- eoo(consensusMap)
clEval[2,7] <- aoo(consensusMap)
clEval[3,5] <- eoo(downMap)
clEval[3,7] <- aoo(downMap)
return(list(consensusMap, runMap, clEval))
} else {
return (consensusMap)
}
}
longlat <- move(longlat, layers) #move all records falling on NAs
nPoints = min(1000, sum(!is.na(as.vector(layers[[1]])), na.rm = TRUE)/4)
bg <- dismo::randomPoints(layers, nPoints) ##extract background points
##if no categorical variables are given try to figure out which are
if(is.null(categorical))
categorical <- find.categorical(layers)
llTrain <- longlat
llTest <- longlat
if(testpercentage > 0){
testRecords <- sample(1:nrow(longlat), ceiling(nrow(longlat)*testpercentage/100))
llTrain <- longlat[-testRecords,]
llTest <- longlat[testRecords,]
}
mod <- dismo::maxent(layers, llTrain, a = bg, factors = categorical) ##build model
p <- raster::predict(mod, layers) ##do prediction
e <- dismo::evaluate(p = llTrain, a = bg, model = mod, x = layers) ##do evaluation of model
if(thres == 0)
thres <- dismo::threshold(e)$spec_sens ##extract threshold from evaluation
p <- reclassify(p, matrix(c(0,thres,0,thres,1,1), nrow=2, byrow = TRUE)) ##convert to presence/absence
if(mcp && aoo(p) >= 4)
p <- map.habitat(longlat, p, mcp = TRUE, eval = FALSE)
if(points)
p <- max(p, map.points(longlat, p, eval = FALSE))
if(eval){
e <- dismo::evaluate(p = llTest, a = bg, model = mod, x = layers, tr = thres) ##do evaluation of model with threshold
auc <- e@auc
kappa <- e@kappa
sensitivity <- as.numeric(e@TPR/(e@TPR+e@FNR))
specificity <- as.numeric(e@TNR/(e@TNR+e@FPR))
tss <- sensitivity + specificity - 1
eooRaw <- eoo(longlat)
aooRaw <- aoo(longlat)
aooModel <- aoo(p)
if(aooModel > 8)
eooModel <- eoo(p)
else
eooModel = aooModel
txtEval <- matrix(c(auc, kappa, tss, eooRaw, eooModel, aooRaw, aooModel), nrow = 1)
colnames(txtEval) <- c("AUC", "Kappa", "TSS", "EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)")
return(list(p, txtEval))
} else {
return(p)
}
}
#' Map species distribution of habitat specialist.
#' @description Mapping of all habitat patches where the species is known to occur.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layer RasterLayer object representing the presence/absence (1/0) of a single habitat type.
#' @param move If TRUE, identifies and moves presence records to closest cells with suitable habitat. Use when spatial error might put records outside the correct patch.
#' @param mcp If TRUE, all habitat patches inside the minimum convex hull polygon encompassing all occurrence records are converted to presence.
#' @param points If TRUE, force map to include cells with presence records even if suitable habitat was not identified.
#' @param eval If TRUE, build a matrix with EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model).
#' @details In many cases a species has a very restricted habitat and we generally know where it occurs. In such cases using the distribution of the known habitat patches may be enough to map the species.
#' @return One raster object and, if eval = TRUE, a matrix with EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model).
#' @export
map.habitat <- function(longlat, layer, move = TRUE, mcp = FALSE, points = FALSE, eval = TRUE){
if(points)
layer <- max(layer, map.points(longlat, layer, eval = FALSE))
if(move){
moveLayer <- layer
moveLayer[moveLayer == 0] <- NA
longlat <- move(longlat, moveLayer)
remove(moveLayer)
}
if(mcp){
vertices <- chull(longlat)
vertices <- c(vertices, vertices[1])
vertices <- longlat[vertices,]
poly = Polygon(vertices)
poly = Polygons(list(poly),1)
poly = SpatialPolygons(list(poly)) ##minimum convex polygon
patches <- raster::clump(layer, gaps=FALSE) ##individual patches, numbered
selPatches <- raster::unique(extract(patches, poly, df = TRUE, weights = TRUE)$clumps) ##which patches are inside polygon
} else {
patches <- raster::clump(layer, gaps=FALSE) ##individual patches, numbered
selPatches <- raster::unique(extract(patches, longlat, df = TRUE, weights = TRUE)$clumps) ##which patches have the species
}
selPatches <- selPatches[!is.na(selPatches)]
allPatches <- raster::unique(patches)
allPatches <- as.data.frame(cbind(allPatches, rep(0, length(allPatches))))
colnames(allPatches) <- c("patches", "selected")
allPatches[selPatches, 2] <- 1
patches <- raster::subs(patches, allPatches)
layer <- mask(layer, patches, maskvalue = 0, updatevalue = 0)
if(eval){
eooRaw <- eoo(longlat)
eooModel <- eoo(layer)
aooRaw <- aoo(longlat)
aooModel <- aoo(layer)
txtEval <- matrix(c(eooRaw, eooModel, aooRaw, aooModel), nrow = 1)
colnames(txtEval) <- c("EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)")
return(list(layer, txtEval))
} else {
return(layer)
}
}
#' Map recorded distribution of species.
#' @description Mapping of all cells where the species is known to occur.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layers Raster* object as defined by package raster. Any raster with the relevant extent and cell size can be used.
#' @param eval If TRUE, build a matrix with EOO and AOO calculated from occurrence records only.
#' @details To be used if either information on the species is very scarce (and it is not possible to model the species distribution) or, on the contrary, complete (and there is no need to model the distribution).
#' @return One raster object and, if EVAL = TRUE, a matrix with EOO and AOO.
#' @examples
#' data(red.records)
#' data(red.layers)
#' raster::plot(map.points(red.records, red.layers, eval = FALSE))
#' points(red.records)
#' @export
map.points <- function(longlat, layers, eval = TRUE){
p <- rasterize(longlat, layers[[1]], field = 1, background = 0)
maskLayer <- sum(layers)
maskLayer[!is.na(maskLayer)] <- 1
p <- mask(p, maskLayer)
if(eval){
eooRaw <- eoo(longlat)
aooRaw <- aoo(longlat)
txtEval <- matrix(c(eooRaw, aooRaw), nrow = 1)
colnames(txtEval) <- c("EOO", "AOO")
return(list(p, txtEval))
} else {
return(p)
}
}
#' Species distributions made easy (multiple species).
#' @description Single step for prediction of multiple species distributions. Output of maps (in pdf format), klms (for Google Earth) and relevant data (in csv format).
#' @param longlat data.frame of taxon names, longitude and latitude or eastness and northness (three columns in this order) of each occurrence record.
#' @param layers If NULL analyses are done with environmental layers read from data files of red.setup(). If a Raster* object as defined by package raster, analyses use these.
#' @param habitat Raster* object as defined by package raster. Habitat extent layer (0/1) used instead of layers if any species is an habitat specialist.
#' @param zone UTM zone if data is in metric units. Used only for correct placement of kmls and countries.
#' @param thin boolean defining if species data should be thinned before modeling (only for SDMs).
#' @param error Vector of spatial error in longlat (one element per row of longlat) in the same unit as longlat. Used to move any point randomly within the error radius.
#' @param move If TRUE, identifies and moves presence records to closest cells with environmental data. Use when spatial error might put records outside such data.
#' @param dem RasterLayer object. It should be a digital elevation model for calculation of elevation limits of the species. If NULL, dem from red.setup() is used if possible, otherwise it will be 0.
#' @param pca Number of pca axes for environmental data reduction. If 0 (default) no pca is made.
#' @param filename Name of output csv file with all results. If NULL it is named "Results_All.csv".
#' @param mapoption Vector of values within options: points, habitat and sdm; each value corresponding to the function to be used for each species (map.points, map.habitat, map.sdm). If a single value, all species will be modelled according to it. If NULL, the function will perform analyses using map.points. Species values must be in same order as latlong.
#' @param testpercentage Percentage of records used for testing only. If 0 all records will be used for both training and testing.
#' @param mintest Minimim number of total occurrence records of any species to set aside a test set. Only used if testpercentage > 0.
#' @param points If TRUE, force map to include cells with presence records even if suitable habitat was not identified.
#' @param runs If <= 0 no ensemble modelling is performed. If > 0, ensemble modelling with n runs is made. For each run, a new random sample of occurrence records (if testpercentage > 0), background points and predictive variables (if subset > 0) are chosen. In the ensemble model, each run is weighted as max(0, (runAUC - 0.5)) ^ 2.
#' @param subset Number of predictive variables to be randomly selected from layers for each run if runs > 0. If <= 0 all layers are used on all runs. Using a small number of layers is usually better than using many variables for rare species, with few occurrence records (Lomba et al. 2010, Breiner et al. 2015).
#' @return Outputs maps in asc, pdf and kml format, plus a file with EOO, AOO and a list of countries where the species is predicted to be present if possible to extract.
#' @references Breiner, F.T., Guisan, A., Bergamini, A., Nobis, M.P. (2015) Overcoming limitations of modelling rare species by using ensembles of small models. Methods in Ecology and Evolution, 6: 1210-1218.
#' @references Lomba, A., Pellissier, L., Randin, C.F., Vicente, J., Moreira, F., Honrado, J., Guisan, A. (2010) Overcoming the rare species modelling paradox: a novel hierarchical framework applied to an Iberian endemic plant. Biological Conservation, 143: 2647-2657.
#' @export
map.easy <- function(longlat, layers = NULL, habitat = NULL, zone = NULL, thin = TRUE, error = NULL, move = TRUE, dem = NULL, pca = 0, filename = NULL, mapoption = NULL, testpercentage = 0, mintest = 20, points = FALSE, runs = 0, subset = 0){
try(dev.off(), silent = TRUE)
spNames <- unique(longlat[,1])
nSp <- length(spNames)
if(is.null(mapoption))
mapoption = rep("points", nSp)
else if(length(mapoption) == 1)
mapoption = rep(mapoption, nSp)
else if(length(mapoption) != nSp)
return(warning("Number of species different from length of mapoption"))
if("sdm" %in% mapoption){
if(!file.exists(paste(.libPaths()[[1]], "/dismo/java/maxent.jar", sep=""))){
warnMaxent()
return()
}
}
if (all(mapoption == rep("points", nSp))){
res <- matrix(NA, nrow = nSp, ncol = 5)
colnames(res) <- c("EOO", "AOO", "Min elevation", "Max elevation", "Countries")
} else if (("sdm" %in% mapoption) && runs > 0) {
res <- matrix(NA, nrow = nSp, ncol = 11)
colnames(res) <- c("EOO (raw)", "EOO (LowCL)", "EOO (Consensus)", "EOO (UpCL)", "AOO (raw)", "AOO (LowCL)", "AOO (Consensus)", "AOO (UpCL)", "Min elevation", "Max elevation", "Countries")
} else {
res <- matrix(NA, nrow = nSp, ncol = 7)
colnames(res) <- c("EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)", "Min elevation", "Max elevation", "Countries")
}
rownames(res) <- spNames
if(is.null(layers))
newLayers <- TRUE
else
newLayers <- FALSE
if(is.null(dem))
newDem <- TRUE
else
newDem <- FALSE
rad = 0.1
for(s in 1:nSp){
cat("\nSpecies", s, "of", nSp, "-", toString(spNames[s]),"\n")
spData <- longlat[longlat[,1] == spNames[s], -1]
if(!is.null(error)){
spError <- error[longlat[,1] == spNames[s]]
if(max(spError) > 1)
rad <- spError/100000
else
rad <- spError
} else {
spError <- NULL
}
if(newLayers){
layers <- raster.read(spData)
if(newDem)
dem <- layers[[20]]
if(pca > 0)
layers <- raster.reduce(layers, n = pca)
}
if(mapoption[s] == "sdm" && aoo(move(spData, layers)) > 8){
if(move)
spData <- move(spData, layers)
if(thin)
spData <- thin(spData)
if(testpercentage > 0)
p <- map.sdm(spData, layers, spError, testpercentage = testpercentage, mcp = TRUE, points = points, runs = runs, subset = subset)
else
p <- map.sdm(spData, layers, spError, testpercentage = 0, mcp = TRUE, points = points, runs = runs, subset = subset)
} else if (mapoption[s] == "habitat"){
p <- map.habitat(spData, habitat, move, points = points)
} else {
mapoption[s] = "points"
p <- map.points(spData, layers)
}
writeRaster(p[[1]], paste(toString(spNames[s]), ".asc", sep=""), overwrite = TRUE)
map.draw(spData, p[[1]], spNames[s], sites = FALSE, print = TRUE)
if(mapoption[s] != "points"){
kml(p[[1]], zone = zone, paste(toString(spNames[s]), ".kml", sep=""), mapoption = "aoo")
countryList <- countries(p[[1]], zone = zone)
if(is.null(dem))
elev <- c(0, 0)
else
elev <- elevation(p[[1]], dem)
} else {
kml(spData, zone = zone, paste(toString(spNames[s]), ".kml", sep=""), mapoption = "points", rad = rad)
countryList <- countries(spData, zone = zone)
if(is.null(dem))
elev <- c(0, 0)
else
elev <- elevation(spData, dem)
}
if(mapoption[s] == "sdm" && aoo(spData) > 8 && runs > 0){
writeRaster(p[[2]], paste(toString(spNames[s]), "_prob.asc", sep=""), overwrite = TRUE)
map.draw(spData, p[[2]], paste(toString(spNames[s]), "_prob", sep = ""), legend = TRUE, print = TRUE)
}
##write output values to csv
spRes = p[[length(p)]]
if(ncol(res) == 5){ #colnames(res) <- c("EOO", "AOO", "Min elevation", "Max elevation", "Countries")
res[s,] <- c(spRes, elev, toString(countryList))
}
if(ncol(res) == 7){ #colnames(res) <- c("EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)", "Min elevation", "Max elevation", "Countries")
if(length(spRes) == 7)
res[s,] <- c(spRes[4:7], elev, toString(countryList))
else #if length(spRes) < 7
res[s,] <- c(spRes[c(1,1,2,2)], elev, toString(countryList))
}
if(ncol(res) == 11){ #colnames(res) <- c("EOO (raw)", "EOO (LowCL)", "EOO (Consensus)", "EOO (UpCL)", "AOO (raw)", "AOO (LowCL)", "AOO (Consensus)", "AOO (UpCL)", "Min elevation", "Max elevation", "Countries")
if(length(spRes) == 2)
res[s,] <- c(spRes[c(1,1,1,1,2,2,2,2)], elev, toString(countryList))
else if(length(spRes) == 4)
res[s,] <- c(spRes[c(1,2,2,2,3,4,4,4)], elev, toString(countryList))
else if(is.null(dim(spRes)))
res[s,] <- c(spRes[4:7], elev, toString(countryList))
else #if matrix
res[s,] <- c(spRes[2,4], spRes[3:1,5], spRes[2,6], spRes[3:1,7], elev, toString(countryList))
}
write.csv(res[s,], paste(toString(spNames[s]), ".csv", sep = ""))
if(mapoption[s] == "sdm" && aoo(spData) > 8){
if(runs > 0)
write.csv(p[[3]], paste(toString(spNames[s]), "_detail.csv", sep = ""))
else
write.csv(p[[2]], paste(toString(spNames[s]), "_detail.csv", sep = ""))
}
}
if(is.null(filename))
write.csv(res, "Results_All.csv")
else
write.csv(res, toString(filename))
return(as.data.frame(res))
}
#' Map creation.
#' @description Creates maps ready to print in pdf or other formats.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layer RasterLayer object representing the presence/absence map for the species.
#' @param spName String of species name.
#' @param borders If TRUE country borders are drawn.
#' @param scale If TRUE a distance scale in km is drawn.
#' @param legend If TRUE the legend for the map is drawn.
#' @param sites If TRUE the record locations are drawn.
#' @param mcp If TRUE the minimum convex polygon representing the Extent of Occurrence is drawn.
#' @param print If TRUE a pdf is saved instead of the output to the console.
#' @examples data(red.records)
#' data(red.range)
#' par(mfrow = c(1,2))
#' map.draw(red.records, layer = red.range, mcp = TRUE)
#' @export
map.draw <- function(longlat = NULL, layer, spName, borders = FALSE, scale = TRUE, legend = FALSE, sites = TRUE, mcp = FALSE, print = FALSE){
worldborders <- NULL
data(worldborders, envir = environment())
if (borders){
layer[layer == 0] <- NA
raster::plot(layer, main = spName, legend = legend, xlab = "longitude", ylab = "latitude", col = "forestgreen")
lines(worldborders)
} else {
raster::plot(layer, main = spName, legend = legend, colNA = "lightblue", xlab = "longitude", ylab = "latitude")
}
if (scale){
width = (xmax(layer) - xmin(layer))
d = round(width/10^(nchar(width)-1))*10^(nchar(width)-2)
scalebar(d = d, type="bar", divs = 2)
}
if (sites && !is.null(longlat))
points(longlat, pch = 19)
if (mcp){
e <- rasterToPoints(layer, fun = function(dat){dat == 1}) ##convert raster to points
vertices <- chull(e[,1], e[,2])
vertices <- c(vertices, vertices[1])
vertices <- e[vertices,c(1,2)]
poly <- SpatialPolygons(list(Polygons(list(Polygon(vertices)),1)))
raster::plot(poly, add = TRUE)
}
if(print){
dev.copy(device = pdf, file = paste(toString(spName), ".pdf", sep=""))
dev.off()
}
}
#' Extent of Occurrence (EOO).
#' @description Calculates the Extent of Occurrence of a species based on either records or predicted distribution.
#' @param spData spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (either 0/1 or probabilistic values).
#' @details EOO is calculated as the minimum convex polygon covering all known or predicted sites for the species.
#' @return A single value in km2 or a vector with lower confidence limit, consensus and upper confidence limit (probabilities 0.975, 0.5 and 0.025 respectively).
#' @examples data(red.records)
#' data(red.range)
#' eoo(red.records)
#' eoo(red.range)
#' @export
eoo <- function(spData){
if(class(spData) == "RasterLayer"){
if(!all(raster::as.matrix(spData) == floor(raster::as.matrix(spData)), na.rm = TRUE)){ #if probabilistic map
upMap <- reclassify(spData, matrix(c(0,0.025,0,0.025,1,1), ncol = 3, byrow = TRUE))
consensusMap <- reclassify(spData, matrix(c(0,0.499,0,0.499,1,1), ncol = 3, byrow = TRUE))
downMap <- reclassify(spData, matrix(c(0,0.975,0,0.975,1,1), ncol = 3, byrow = TRUE))
area <- c(eoo(downMap), eoo(consensusMap), eoo(upMap))
} else {
if (raster::xmax(spData) <= 180) { #if longlat data
e <- rasterToPoints(spData, fun = function(dat){dat == 1}) ##convert raster to points
vertices <- chull(e[,1], e[,2])
if(length(vertices) < 3) return(0)
vertices <- c(vertices, vertices[1])
vertices <- e[vertices,c(1,2)]
area = geosphere::areaPolygon(vertices)/1000000
} else {
spData[spData < 1] <- NA
spData <- rasterToPoints(spData)
vertices <- chull(spData)
if(length(vertices) < 3) return(0)
vertices <- c(vertices, vertices[1])
vertices <- spData[vertices,]
area = 0
for(i in 1:(nrow(vertices)-1))
area = area + (as.numeric(vertices[i,1])*as.numeric(vertices[(i+1),2]) - as.numeric(vertices[i,2])*as.numeric(vertices[(i+1),1]))
area = abs(area/2000000)
}
}
} else if (ncol(spData) == 2){
vertices <- chull(spData)
if(length(vertices) < 3) return(0)
vertices <- c(vertices, vertices[1])
vertices <- spData[vertices,]
if(max(spData) <= 180) { #if longlat data
area = geosphere::areaPolygon(vertices)/1000000
} else { #if square data in meters
area = 0
for(i in 1:(nrow(vertices)-1))
area = area + (as.numeric(vertices[i,1])*as.numeric(vertices[(i+1),2]) - as.numeric(vertices[i,2])*as.numeric(vertices[(i+1),1]))
area = abs(area/2000000)
}
} else {
return(warning("Data format not recognized"))
}
return(round(area))
}
#' Area of Occupancy (AOO).
#' @description Calculates the Area of Occupancy of a species based on either known records or predicted distribution.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (either 0/1 or probabilistic values).
#' @details AOO is calculated as the area of all known or predicted cells for the species. The resolution will be 2x2km as required by IUCN.
#' @return A single value in km2 or a vector with lower confidence limit, consensus and upper confidence limit (probabilities 0.975, 0.5 and 0.025 respectively).
#' @examples data(red.range)
#' aoo(red.range)
#' @export
aoo <- function(spData){
if (class(spData) == "RasterLayer"){ #if rasterlayer
if(raster::maxValue(spData) == 0){ #if no data (empty raster)
area = 0
} else if(!all(raster::as.matrix(spData) == floor(raster::as.matrix(spData)), na.rm = TRUE)){ #if probabilistic map
upMap <- reclassify(spData, matrix(c(0,0.025,0,0.025,1,1), ncol = 3, byrow = TRUE))
consensusMap <- reclassify(spData, matrix(c(0,0.499,0,0.499,1,1), ncol = 3, byrow = TRUE))
downMap <- reclassify(spData, matrix(c(0,0.975,0,0.975,1,1), ncol = 3, byrow = TRUE))
area <- c(aoo(downMap), aoo(consensusMap), aoo(upMap))
} else {
if (raster::xmax(spData) <= 180) { #if longlat data
if(res(spData)[1] > 0.05){ #if resolution is > 1km use area of cells rounded to nearest 4km
area = round(cellStats((raster::area(spData) * spData), sum)/4)*4
} else {
spData[spData < 1] <- NA
spData <- rasterToPoints(spData)
if(nrow(unique(spData)) == 1){
area = 4
} else {
spData <- longlat2utm(spData[,-3])
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
}
}
} else { #if square data in meters
spData[spData < 1] <- NA
spData <- rasterToPoints(spData)
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
}
}
} else if (ncol(spData) == 2){
if (max(spData) <= 180) { #if longlat data
spData <- longlat2utm(spData)
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
} else { #if square data in meters
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
}
} else {
return(warning("Data format not recognized!"))
}
return(round(area))
}
#' Elevation limits.
#' @description Calculates the elevation (or depth) limits (range) of a species based on either known records or predicted distribution.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (0/1 values).
#' @param dem RasterLayer object. Should be a digital elevation model (DEM) of the relevant area. If not given the function will try to read it from base data, only works with longlat data.
#' @details Maximum and minimum elevation are calculated based on the DEM.
#' @return A vector with two values (min and max) in meters above (or below) sea level.
#' @examples data(red.records)
#' data(red.range)
#' data(red.layers)
#' dem = red.layers[[3]]
#' elevation(red.records, dem)
#' elevation(red.range, dem)
#' @export
elevation <- function(spData, dem = NULL){
if(class(spData) != "RasterLayer"){ #if no rasterlayer is given but just a matrix of longlat.
if(is.null(dem) && max(spData) <= 180){
gisdir = red.getDir()
dem <- raster::raster(paste(gisdir, "red_1km_20.tif", sep =""))
dem <- crop(dem, c(min(spData[,1])-0.1, max(spData[,1]+0.1), min(spData[,2])-0.1, max(spData[,2])+0.1))
}
spData = rasterize(spData, dem, field = 1, background = NA) #create a layer of presence based on the dem
} else if (is.null(dem)){
gisdir = red.getDir()
dem <- raster::raster(paste(gisdir, "red_1km_20.tif", sep = ""))
dem <- crop(dem, spData)
}
spData[spData == 0] <- NA
spData <- raster::overlay(spData, dem, fun = function(x,y){(x*y)})
out <- c(raster::minValue(spData), raster::maxValue(spData))
names(out) <- c("Min", "Max")
return(round(out))
}
#' Countries of occurrence.
#' @description Extracts the names or ISO codes of countries of occurrence of a species based on either records or predicted distribution.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (0/1 values).
#' @param zone UTM zone if data is in metric units.
#' @param ISO Outputs either country names (FALSE) or ISO codes (TRUE).
#' @details Country boundaries and designations are based on data(worldborders) from package maptools.
#' @return A vector with country names or codes.
#' @examples data(red.records)
#' data(red.range)
#' countries(red.records)
#' countries(red.range, ISO = TRUE)
#' @export
countries <- function(spData, zone = NULL, ISO = FALSE){
if ((class(spData) == "RasterLayer" && raster::xmax(spData) > 180) || (class(spData) != "RasterLayer" && max(spData) > 180)) ##if need to project to longlat
spData <- utm2longlat(spData, zone)
worldborders <- NULL
data(worldborders, envir = environment())
if(class(spData) == "RasterLayer")
spData <- rasterToPoints(spData, fun = function(dat){dat == 1}) ##convert raster to points
countryList <- sp::over(sp::SpatialPoints(spData), sp::SpatialPolygons(worldborders@polygons))
if(ISO)
countryList <- unique(worldborders@data[countryList,])$ISO2
else
countryList <- unique(worldborders@data[countryList,])$NAME
countryList <- sort(as.vector(countryList[!is.na(countryList)]))
return(countryList)
}
#' Output kml files.
#' @description Creates kml files for Google Maps as required by IUCN guidelines.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (0/1 values).
#' @param zone UTM zone if data is in metric units.
#' @param filename The name of file to save, should end with .kml.
#' @param mapoption Type of representation, any of "points", "eoo" or "aoo".
#' @param smooth Smooths the kml lines as per IUCN guidelines. Higher values represent smoother polygons.
#' @param rad radius of circles in degrees if mapoption is "points". It can be the same value for all points or a vector with length equal to number of records in spData representing associated error. The default is about 10km (0.1 degrees) as per IUCN guidelines.
#' @return A kml with polygon or circles around records.
#' @export
kml <- function(spData, zone = NULL, filename, mapoption = "aoo", smooth = 0, rad = 0.1){
if ((class(spData) == "RasterLayer" && raster::xmax(spData) > 180) || (class(spData) != "RasterLayer" && max(spData) > 180)) ##if need to project to longlat
spData <- utm2longlat(spData, zone)
if(mapoption == "aoo" && class(spData) == "RasterLayer"){
spData[spData != 1] <- NA
spData <- rasterToPolygons(spData, dissolve = TRUE)
#simplify
if(smooth > 0){
trytol <- c(seq(0.001,0.01,0.001),seq(0.02,0.1,0.01),seq(0.2,1,0.1),2:10,seq(20,100,10),seq(200,1000,100),seq(2000,10000,1000),seq(20000,100000,10000),seq(200000,1000000,100000))
for (i in trytol){
if(class(try(gSimplify(spData, tol = (1 / i)), silent = TRUE)) != "try-error"){
spData <- gSimplify(spData, tol = (smooth / (i*10)))
break
}
}
#cut to coast
spData <- gIntersection(worldborders, spData)
#round
smooth = smooth * 100
polys = methods::slot(spData@polygons[[1]], "Polygons")
spline.poly <- function(xy, vertices, k=3, ...) {
# Assert: xy is an n by 2 matrix with n >= k.
# Wrap k vertices around each end.
n <- dim(xy)[1]
if (k >= 1) {
data <- rbind(xy[(n-k+1):n,], xy, xy[1:k, ])
} else {
data <- xy
}
# Spline the x and y coordinates.
data.spline <- spline(1:(n+2*k), data[,1], n=vertices, ...)
x <- data.spline$x
x1 <- data.spline$y
x2 <- spline(1:(n+2*k), data[,2], n=vertices, ...)$y
# Retain only the middle part.
cbind(x1, x2)[k < x & x <= n+k, ]
}
spData <- SpatialPolygons(
Srl = lapply(1:length(polys),
function(x){
p <- polys[[x]]
#applying spline.poly function for smoothing polygon edges
px <- methods::slot(polys[[x]], "coords")[,1]
py <- methods::slot(polys[[x]], "coords")[,2]
bz <- spline.poly(methods::slot(polys[[x]], "coords"),smooth, k=3)
bz <- rbind(bz, bz[1,])
methods::slot(p, "coords") <- bz
# create Polygons object
poly <- Polygons(list(p), ID = x)
}
)
)
spData <- SpatialPolygonsDataFrame(spData, data=data.frame(ID = 1:length(spData)))
kmlPolygons(spData, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
} else {
kmlPolygon(spData, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
}
} else if(mapoption == "points" || (class(spData) == "RasterLayer" && aoo(spData) <= 8) || nrow(spData) < 3){
poly = list()
for(i in 1:nrow(spData)){
pts = seq(0, 2 * pi, length.out = 100)
if(length(rad) == 1)
xy = cbind(spData[i, 1] + rad * sin(pts), spData[i, 2] + rad * cos(pts))
else
xy = cbind(spData[i, 1] + rad[i] * sin(pts), spData[i, 2] + rad[i] * cos(pts))
poly[[i]] = Polygon(xy)
}
poly = Polygons(poly,1)
kmlPolygon(poly, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
} else {
if (class(spData) == "RasterLayer"){
e <- rasterToPoints(spData, fun = function(dat){dat == 1}) ##convert raster to points
vertices <- chull(e[,1], e[,2])
vertices <- c(vertices, vertices[1])
vertices <- e[vertices,c(1,2)]
} else {
vertices <- chull(spData)
vertices <- c(vertices, vertices[1])
vertices <- spData[vertices,]
}
poly = Polygon(vertices)
poly = Polygons(list(poly),1)
kmlPolygon(poly, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
}
}
#' Red List Index.
#' @description Calculates the Red List Index (RLI) for a group of species.
#' @param spData Either a vector with species assessment categories for a single point in time or a matrix with two points in time in different columns (species x date). Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param tree An hclust or phylo object (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @param boot If TRUE bootstrapping for statistical significance is performed on both values per date and the trend between dates.
#' @param dd bootstrap among all species (FALSE) or Data Deficient species only (TRUE).
#' @param runs Number of runs for bootstrapping
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to Extinct/Extinct in the Wild (5).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Each species weight can further be influenced by how much it uniquely contributes to the phylogenetic or functional diversity of the group (Cardoso et al. in prep.).
#' To incorporate Importantly, the RLI is based on true improvements or deteriorations in the status of species, i.e. genuine changes. It excludes category changes resulting from, e.g., new knowledge (Butchart et al. 2007).
#' The RLI approach helps to develop a better understanding of which taxa, regions or ecosystems are declining or improving.
#' Juslen et al. (2016a, b) suggested the use of bootstrapping to search for statistical significance when comparing taxa or for trends in time of the index and this approach is here implemented.
#' @return Either a vector (if no two dates are given) or a matrix with the RLI values and, if bootstrap is performed, their confidence limits and significance.
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PloS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PloS One, 2: e140.
#' @references Juslen, A., Cardoso, P., Kullberg, J., Saari, S. & Kaila, L. (2016a) Trends of extinction risk for Lepidoptera in Finland: the first national Red List Index of butterflies and moths. Insect Conservation and Diversity, 9: 118-123.
#' @references Juslen, A., Pykala, J., Kuusela, S., Kaila, L., Kullberg, J., Mattila, J., Muona, J., Saari, S. & Cardoso, P. (2016b) Application of the Red List Index as an indicator of habitat change. Biodiversity and Conservation, 25: 569-585.
#' @examples rliData <- matrix(c("LC","LC","EN","EN","EX","EX","LC","CR","DD","DD"), ncol = 2, byrow = TRUE)
#' colnames(rliData) <- c("2000", "2010")
#' rli(rliData[,1])
#' rli(rliData[,1], boot = TRUE)
#' rli(rliData)
#' rli(rliData, boot = TRUE, dd = TRUE)
#' @export
rli <- function (spData, tree = NULL, boot = FALSE, dd = FALSE, runs = 1000){
##if only one point in time is given
if(is.null(dim(spData)))
return(rli.calc(spData, tree, boot, dd, runs)) ##return either 1 or 3 values
##if two points in time are given
ts <- apply(spData, 2, function(x) rli.calc(x, tree, boot = FALSE))
sl <- (ts[2] - ts[1]) / (as.numeric(colnames(spData))[2] - as.numeric(colnames(spData))[1])
if(!boot){
res <- matrix(c(ts, sl), nrow = 1)
colnames(res) <- c(colnames(spData), "Change/year")
rownames(res) <- c("Raw")
return(res)
} else {
tr <- apply(spData, 2, function(x) rli.calc(x, tree, boot, dd, runs))
p = 0
rndSl = rep(NA, runs)
for(r in 1:runs){
rndSl[r] <- rli.calc(spData[,2], tree, boot, dd, runs = 1)[2] - rli.calc(spData[,1], tree, boot, dd, runs = 1)[2]
if(sign(sl) < sign(rndSl[r]) || sign(sl) > sign(rndSl[r]))
p = p + 1
}
p = p / runs
rndSl = quantile(rndSl, c(0.025, 0.5, 0.975))
res <- matrix(c(ts[1], tr[,1], ts[2], tr[,2], sl, rndSl), nrow = 4, ncol = 3)
colnames(res) <- c(colnames(spData), "Change")
rownames(res) <- c("Raw", "LowCL", "Median", "UpCL")
return(list("Values" = res, "P_change" = p))
}
}
#' Red List Index for multiple groups.
#' @description Calculates the Red List Index (RLI) for multiple groups of species.
#' @param spData A matrix with group names (first column) and species assessment categories for one or two points in time (remaining columns). Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param tree A list of hclust or phylo objects, each corresponding to a tree per group (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @param boot If TRUE bootstrapping for statistical significance is performed on both values per date and the trend between dates.
#' @param dd bootstrap among all species (FALSE) or Data Deficient species only (TRUE).
#' @param runs Number of runs for bootstrapping
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to 5 (Extinct/Extinct in the Wild).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Each species weight can further be influenced by how much it uniquely contributes to the phylogenetic or functional diversity of the group (Cardoso et al. in prep.).
#' Importantly, the RLI is based on true improvements or deteriorations in the status of species, i.e. genuine changes. It excludes category changes resulting from, e.g., new knowledge (Butchart et al. 2007).
#' The RLI approach helps to develop a better understanding of which taxa, regions or ecosystems are declining or improving.
#' Juslen et al. (2016a, b) suggested the use of bootstrapping to search for statistical significance when comparing taxa or for trends in time of the index and this approach is here implemented.
#' @return A matrix with the RLI values and, if bootstrap is performed, their confidence limits and significance.
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PloS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PloS One, 2: e140.
#' @references Juslen, A., Cardoso, P., Kullberg, J., Saari, S. & Kaila, L. (2016a) Trends of extinction risk for Lepidoptera in Finland: the first national Red List Index of butterflies and moths. Insect Conservation and Diversity, 9: 118-123.
#' @references Juslen, A., Pykala, J., Kuusela, S., Kaila, L., Kullberg, J., Mattila, J., Muona, J., Saari, S. & Cardoso, P. (2016b) Application of the Red List Index as an indicator of habitat change. Biodiversity and Conservation, 25: 569-585.
#' @examples rliData <- matrix(c("LC","LC","EN","EN","EX","EX","LC","CR","CR","EX"), ncol = 2, byrow = TRUE)
#' colnames(rliData) <- c("2000", "2010")
#' rliData <- cbind(c("Arthropods","Arthropods","Birds","Birds","Birds"), rliData)
#' rli.multi(rliData[,1:2])
#' rli.multi(rliData[,1:2], boot = TRUE)
#' rli.multi(rliData)
#' rli.multi(rliData, boot = TRUE)
#' @export
rli.multi <- function (spData, tree = NULL, boot = FALSE, dd = FALSE, runs = 1000){
groups <- unique(spData[,1])
nGroups <- length(groups)
if(ncol(spData) == 2 && !boot){
res <- matrix(NA, nrow = nGroups, ncol = 1)
} else if((ncol(spData) == 2 && boot) || (ncol(spData) == 3 && !boot)){
res <- matrix(NA, nrow = nGroups, ncol = 3)
} else {
res <- matrix(NA, nrow = nGroups, ncol = 13)
colnames(res) <- c(paste(colnames(spData)[2], "(raw)"), paste(colnames(spData)[2], "(lowCL)"), paste(colnames(spData)[2], "(median)"), paste(colnames(spData)[2], "(upCL)"), paste(colnames(spData)[3], "(raw)"), paste(colnames(spData)[3], "(lowCL)"), paste(colnames(spData)[3], "(median)"), paste(colnames(spData)[3], "(upCL)"), "Change (raw)", "Change (lowCL)", "Change (median)", "Change (upCL)", "p (change)")
}
row.names(res) <- groups
for(g in 1:nGroups){
if(is.null(tree))
v <- rli(spData[spData[,1] == groups[g],-1], tree = NULL, boot = boot, dd = dd, runs = runs)
else
v <- rli(spData[spData[,1] == groups[g],-1], tree[[g]], boot = boot, dd = dd, runs = runs)
if(ncol(res) < 13){
res[g,] <- v
colnames(res) <- colnames(v)
} else {
res[g,1:4] <- v$Values[,1]
res[g,5:8] <- v$Values[,2]
res[g,9:12] <- v$Values[,3]
res[g,13] <- v$P_change
}
}
return(res)
}
#' Prediction of Red List Index.
#' @description Linearly interpolates and extrapolates RLI values to any years.
#' @param rliValue Should be a vector with RLI values and names as the corresponding year numbers.
#' @param from Starting year of the sequence to predict.
#' @param to Ending year of the sequence to predict.
#' @param rliPlot Plots the result
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' @return A matrix with the RLI values and confidence limits.
#' @examples rliValue <- c(4.5, 4.3, 4.4, 4.2, 4.0)
#' names(rliValue) <- c(2000, 2004, 2008, 2011, 2017)
#' rli.predict(rliValue, 1990, 2020)
#' @export
rli.predict <- function(rliValue, from = NA, to = NA, rliPlot = FALSE){
year = as.numeric(c(names(rliValue)))
rliTable = data.frame(rliValue, year)
if(is.na(from))
from = min(year)
if(is.na(to))
to = max(year)
newYear = data.frame(year = seq(from = from, to = to, by = 1))
lmOut = predict(lm(rliValue ~ year, data = rliTable), newYear, interval = "confidence", level = 0.95)
res = lmOut[,c(2,1,3)]
colnames(res) = c("LowCL", "Fitted RLI", "UpCL")
rownames(res) = newYear$year
if(rliPlot){
plot(year, rliValue, xlab="Year", ylab="Fitted RLI", xlim = c(from, to), ylim = c(0,5))
abline(lm(rliValue ~ year, data = rliTable), col = "red")
matlines(newYear, lmOut[,2:3], col = "blue", lty = 2)
}
return(res)
}
#' Sampled Red List Index.
#' @description Calculates accumulation curve of confidence limits in sampled RLI.
#' @param spData A vector with species assessment categories for a single point in time. Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param tree An hclust or phylo object (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @param p p-value of confidence limits (in a two-tailed test).
#' @param runs Number of runs for smoothing accumulation curves.
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to Extinct/Extinct in the Wild (5).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Yet, in many groups, it is not possible to assess all species due to huge diversity and/or lack of resources. In such case, the RLI is estimated from a randomly selected sample of species - the Sampled Red List Index (SRLI; Stuart et al. 2010).
#' This function allows to calculate how many species are needed to reach a given maximum error of the SRLI around the true value of the RLI (with all species included) for future assessments of the group.
#' @return A vector with the accumulation of the error of the SRLI around the true value of the RLI (with all species included).
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PLoS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PLoS One, 2: e140.
#' @references Stuart, S.N., Wilson, E.O., McNeely, J.A., Mittermeier, R.A. & Rodriguez, J.P. (2010) The barometer of Life. Science 328, 117.
#' @examples rliData <- c("LC","LC","EN","EN","EX","EX","LC","CR","CR","EX")
#' rli.sampled(rliData)
#' @export
rli.sampled <- function (spData, tree = NULL, p = 0.05, runs = 1000){
nSpp <- length(spData)
accum <- rep(NA, nSpp)
for(n in 1:nSpp){ #test with n species from the entire set
diff = rep(NA, runs) #try runs times each species
for(r in 1:runs){ #do r runs for each n species
rndComm = rep(NA, nSpp)
rndSpp = sample(nSpp, n)
rndComm[rndSpp] = spData[rndSpp]
diff[r] = abs(rli.calc(spData, tree, FALSE, FALSE, runs = 1) - rli.calc(rndComm, tree, FALSE, FALSE, runs = 1)) #calculate absolute difference between true and sampled rli for each run
}
accum[n] = quantile(diff, (1-p))
}
return(accum) #returns the accumulation curve of confidence limit of sampled RLI
}
#' Mapping the Red List Index.
#' @description Creates a map for the red list index according to species distribution and threat status.
#' @param spData Either a vector with species assessment categories for a single point in time or a matrix with two points in time in different columns (species x date). Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param layers Species distributions (0/1), a Raster* object as defined by package raster.
#' @param layers2 Species distributions (0/1) on the second point in time, a Raster* object as defined by package raster. If there are two dates but no layers2, the distributions are assumed to be kept constant in time.
#' @param tree An hclust or phylo object (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to Extinct/Extinct in the Wild (5).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Each species weight can further be influenced by how much it uniquely contributes to the phylogenetic or functional diversity of the group (Cardoso et al. in prep.).
#' @return A RasterLayer with point values (if a single date is given) or change per cell (if two dates are given).
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PloS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PloS One, 2: e140.
#' @examples sp1 <- raster::raster(matrix(c(1,1,1,0,0,0,0,0,NA), ncol = 3))
#' sp2 <- raster::raster(matrix(c(1,0,0,1,0,0,1,0,NA), ncol = 3))
#' sp3 <- raster::raster(matrix(c(1,0,0,0,0,0,0,0,NA), ncol = 3))
#' sp4 <- raster::raster(matrix(c(0,1,1,1,1,1,1,1,NA), ncol = 3))
#' layers <- raster::stack(sp1, sp2, sp3, sp4)
#' spData <- c("CR","EN","VU","LC")
#' raster::plot(rli.map(spData, layers))
#' @export
rli.map <- function (spData, layers, layers2 = NULL, tree = NULL){
if(!is.null(dim(spData))){ #if to calculate change call this same function twice
if(is.null(layers2)){
layers2 <- layers
}
map1 <- rli.map(spData[,1], layers = layers, tree = tree)
map2 <- rli.map(spData[,2], layers = layers2, tree = tree)
return(map2 - map1)
}
#convert rasters to array
layers = raster::as.array(layers)
#get data for each cell (row by row)
cells = matrix(NA, (nrow(layers) * ncol(layers)), dim(layers)[3])
i = 0
for (r in 1:nrow(layers)){
for(c in 1:ncol(layers)){
i = i+1
cells[i,] = layers[r,c,]
}
}
#RLI of each cell
rliCells = rep(NA, nrow(cells))
for (i in 1:nrow(cells)){
rliNA <- ifelse(cells[i,] == 1, spData, NA) #only consider species present in each cell
rliCells[i] = rli.calc(rliNA, tree = tree)
}
#create RLI map
rliMap = raster::raster(matrix(rliCells, nrow = nrow(layers), byrow = T))
return(rliMap)
}
#' Occurrence records for Hogna maderiana (Walckenaer, 1837).
#'
#' Occurrence records for Hogna maderiana (Walckenaer, 1837).
#'
#' @docType data
#' @keywords datasets
#' @name red.records
#' @usage data(red.records)
#' @format Matrix of longitude and latitude (two columns) of occurrence records for Hogna maderiana (Walckenaer, 1837), a spider species from Madeira Island.
NULL
#' Geographic range for Hogna maderiana (Walckenaer, 1837).
#'
#' Geographic range for Hogna maderiana (Walckenaer, 1837).
#'
#' @docType data
#' @keywords datasets
#' @name red.range
#' @usage data(red.range)
#' @format RasterLayer object as defined by package raster of range for Hogna maderiana (Walckenaer, 1837), a spider species from Madeira Island.
NULL
#' Environmental layers for Madeira.
#'
#' Average annual temperature, total annual precipitation, altitude and landcover for Madeira Island (Fick & Hijmans 2017, Tuanmu & Jetz 2014).
#'
#' @docType data
#' @keywords datasets
#' @name red.layers
#' @usage data(red.layers)
#' @format RasterStack object as defined by package raster.
#' @references Fick, S.E. & Hijmans, R.J. (2017) Worldclim 2: new 1-km spatial resolution climate surfaces for global land areas. International Journal of Climatology, in press.
#' @references Tuanmu, M.-N. & Jetz, W. (2014) A global 1-km consensus land-cover product for biodiversity and ecosystem modeling. Global Ecology and Biogeography, 23: 1031-1045.
NULL
#'
#'
#' World country borders.
#'
#' World country borders.
#'
#' @docType data
#' @keywords datasets
#' @name worldborders
#' @usage data(worldborders)
#' @format SpatialPolygonsDataFrame.
NULL
| /R/red.R | no_license | cardosopmb/red | R | false | false | 92,725 | r | #####RED - IUCN Redlisting Tools
#####Version 1.5.0 (2020-05-04)
#####By Pedro Cardoso
#####Maintainer: pedro.cardoso@helsinki.fi
#####Reference: Cardoso, P.(2017) An R package to facilitate species red list assessments according to the IUCN criteria. Biodiversity Data Journal 5: e20530 doi: 10.3897/BDJ.5.e20530
#####Changed from v1.4.0:
#####added function rli.predict to interpolate and extrapolate linearly beyond the years assessed
#####added new options in functions rli and rli.multi on how to deal with DD species when bootstrapping
#####required packages
library("BAT")
library("dismo")
library("gdistance")
library("geosphere")
library("graphics")
library("grDevices")
library("jsonlite")
library("maptools")
library("methods")
library("raster")
library("rgdal")
library("rgeos")
library("sp")
library("stats")
library("utils")
#' @import gdistance
#' @import graphics
#' @import jsonlite
#' @import maptools
#' @import rgdal
#' @import rgeos
#' @import sp
#' @import stats
#' @import utils
#' @importFrom BAT contribution
#' @importFrom geosphere areaPolygon
#' @importFrom grDevices chull dev.copy dev.off pdf
#' @importFrom methods slot
#' @importFrom raster area cellStats clump crop extent extract getValues layerStats mask raster rasterize rasterToPoints rasterToPolygons reclassify res sampleRandom scalebar terrain trim writeRaster xmax xmin
raster::rasterOptions(maxmemory = 2e+09)
globalVariables(c("worldborders"))
###############################################################################
##############################AUX FUNCTIONS####################################
###############################################################################
longlat2utm <- function(longlat){
longlat = as.matrix(longlat)
minlong = min(longlat[,1])
zone = floor((minlong + 180) / 6) + 1
res = rgdal::project(longlat, paste("+proj=utm +zone=",zone," ellps=WGS84",sep=''))
return(res)
}
utm2longlat <- function(utm, zone){
if(class(utm) == "RasterLayer"){
if(!is.null(zone))
raster::crs(utm) <- paste("+proj=utm +zone=", zone, sep="")
res <- raster::projectRaster(utm, crs = "+proj=longlat +datum=WGS84", method='ngb')
} else {
utm <- SpatialPoints(utm, CRS(paste("+proj=utm +zone=", zone,sep="")))
res <- as.data.frame(spTransform(utm,CRS(paste("+proj=longlat"))))
}
return(res)
}
##warn if maxent.jar is not available
warnMaxent <- function(){
warning("RED could not find maxent.jar.
1. Download the latest version of maxent from:
https://biodiversityinformatics.amnh.org/open_source/maxent/
2. Move the file maxent.jar to the java directory inside dismo package
(there should be a file named dismo.jar already there)
3. Install the latest version of java runtime environment (JRE) with the same architecture (32 or 64 bits) as your version of R:
http://www.oracle.com/technetwork/java/javase/downloads/jre8-downloads-2133155.html")
}
##detect which layers are categorical by checking if all values are integers and if the max is less than 50 (may fail, just an attempt)
find.categorical <- function(layers){
categorical = c()
for(l in 1:(dim(layers)[3])){
lay <- raster::as.matrix(layers[[l]])
lay <- as.vector(lay)
lay <- lay[!is.na(lay)]
if(sum(floor(lay)) == sum(lay) && length(unique(lay)) < 50)
categorical = c(categorical, l)
}
return(categorical)
}
##basic function to calculate the rli of any group of species
rli.calc <- function(spData, tree = NULL, boot = FALSE, dd = FALSE, runs = 1000){
if(all(is.na(spData)))
return(NA)
spData <- rli.convert(spData) ##call function to convert spData to a 0-1 scale
if(is.null(tree)){ ##if not weighted by PD or FD
if(!boot){ ##if no bootstrap to be made
return (mean(spData, na.rm = TRUE))
} else {
run <- rep(NA, runs)
if(!dd){
for(i in 1:runs){
rnd <- sample(spData, replace = TRUE) ##bootstrap with all species
run[i] <- mean(rnd, na.rm = TRUE)
}
} else { ##bootstrap with only DD species
nDD = sum(is.na(spData)) ##number of DD species
rliBase = sum(spData, na.rm = TRUE)
for(i in 1:runs){
rnd <- sample(spData[!is.na(spData)], nDD, replace = TRUE)
run[i] <- (rliBase + sum(rnd)) / length(spData)
}
}
res <- matrix(quantile(run, c(0.025, 0.5, 0.975)), nrow = 1)
colnames(res) <- c("LowCL", "Median", "UpCL")
return(res)
}
} else { ##if weighted by PD or FD, still to work, not available at the moment!!!!!!!!!!!!!!!!!!!!!!!!!!!!
comm <- matrix(1, nrow = 2, ncol = length(spData))
contrib <- BAT::contribution(comm, tree, relative = TRUE)[1,]
contrib <- contrib/sum(contrib[!is.na(spData)]) #needed to standardize the contribution by the total contribution of species living in the community
if(!boot){ ##if no bootstrap to be made
return(sum(spData * contrib, na.rm = TRUE))
} else {
run <- rep(NA, runs)
for(i in 1:runs){
rndSpp <- sample(length(spData), replace = TRUE)
rndComm <- spData[rndSpp]
rndContrib <- contrib[rndSpp]/sum(contrib[rndSpp])
run[i] <- sum(rndComm * rndContrib, na.rm = TRUE)
}
res <- matrix(quantile(run, c(0.025, 0.5, 0.975)), nrow = 1)
colnames(res) <- c("LowCL", "Median", "UpCL")
return(res)
}
}
}
##function to convert strings to numbers in the RLI
rli.convert <- function(spData){
if(!is.numeric(spData)){ ##if letters are given, convert to [0,1]
spData <- replace(spData, which(spData == "EX" ), 0)
spData <- replace(spData, which(spData == "EW" ), 0)
spData <- replace(spData, which(spData == "RE" ), 0)
spData <- replace(spData, which(spData == "CR" ), 0.2)
spData <- replace(spData, which(spData == "CR(PE)" ), 0.2)
spData <- replace(spData, which(spData == "EN" ), 0.4)
spData <- replace(spData, which(spData == "VU" ), 0.6)
spData <- replace(spData, which(spData == "NT" ), 0.8)
spData <- replace(spData, which(spData == "LC" ), 1)
spData <- replace(spData, which(spData == "DD" ), NA)
spData <- as.numeric(spData)
} else if (all(spData == floor(spData))){ #if all integers, a scale [0,5] is given, convert to [0,1]
spData <- 1 - spData/5
}
return(spData)
}
##################################################################################
##################################MAIN FUNCTIONS##################################
##################################################################################
#' Setup GIS directory.
#' @description Setup directory where GIS files are stored.
#' @param gisPath Path to the directory where the gis files are stored.
#' @details Writes a txt file in the red directory allowing the package to always access the world GIS files directory.
#' @export
red.setDir <- function(gisPath = NULL){
if(is.null(gisPath))
gisPath <- readline("Input directory for storing world gis layers:")
gisPath <- paste(gisPath, "/", sep = "")
redFile <- paste(find.package("red"), "/red.txt", sep = "")
dput(gisPath, redFile)
}
#' Read GIS directory.
#' @description Read directory where GIS files are stored.
#' @details Reads a txt file pointing to where the world GIS files are stored.
#' @export
red.getDir <- function(){
redFile <- paste(find.package("red"), "/red.txt", sep = "")
if (file.exists(redFile)){ #if there is already a file read from it
dir <- dget(redFile)
} else {
warning(paste(redFile, "not found, please run red.setDir()"))
return()
}
return(dir)
}
#' Download and setup GIS files.
#' @description Setup red to work with species distribution modelling and layers available online.
#' @details Please check that you have at least 50Gb free in your disk (and a fast internet connection) to download all files. In the end of the process "only" 17.4Gb will be left though. This function will:
#' 1. Check if maxent.jar is available in the dismo package directory.
#' 2. Ask user input for GIS directory.
#' 3. Download global bioclim and elevation files (20) from http://biogeo.ucdavis.edu/data/worldclim/v2.0/tif/base/wc2.0_30s_bio.zip.
#' 4. Download landcover files (12) from http://data.earthenv.org/consensus_landcover/without_DISCover/.
#' 5. Unzip all files and delete the originals.
#' 6. Create a new layer (1) with the dominant land cover at each cell.
#' 7. Resample all files (33) to approximately 10x10km (for use with widespread species) grid cells.
#' Sit back and enjoy, this should take a while.
#' @export
red.setup <- function(){
##test if maxent.jar is in the right directory
if(!file.exists(paste(.libPaths()[[1]], "/dismo/java/maxent.jar", sep=""))){
warnMaxent()
return()
}
oldwd = getwd()
on.exit(expr = setwd(oldwd))
gisdir = red.setDir()
setwd(gisdir)
##basic setup
pb <- txtProgressBar(min = 0, max = 33, style = 3)
##download and process bioclim
download.file("http://biogeo.ucdavis.edu/data/worldclim/v2.0/tif/base/wc2.0_30s_bio.zip", "bioclim2.zip")
unzip(zipfile = "bioclim.zip")
file.remove("bioclim.zip")
for(i in 1:19){
setTxtProgressBar(pb, i)
if(i < 10)
rast <- raster(paste("wc2.0_bio_30s_0", i, ".tif", sep=""))
else
rast <- raster(paste("wc2.0_bio_30s_", i, ".tif", sep=""))
rast <- crop(rast, c(-180, 180, -56, 90))
writeRaster(rast, paste("red_1km_", i, ".tif", sep=""))
rast <- aggregate(rast, 10)
writeRaster(rast, paste("red_10km_", i, ".tif", sep=""))
if(i < 10)
file.remove(paste("wc2.0_bio_30s_0", i, ".tif", sep=""))
else
file.remove(paste("wc2.0_bio_30s_", i, ".tif", sep=""))
gc()
}
##download and process altitude
setTxtProgressBar(pb, 20)
download.file("http://biogeo.ucdavis.edu/data/climate/worldclim/1_4/grid/cur/alt_30s_bil.zip", "alt_30s_bil.zip")
unzip(zipfile = "alt_30s_bil.zip")
file.remove("alt_30s_bil.zip")
rast <- raster("alt.bil")
rast <- crop(rast, c(-180, 180, -56, 90))
writeRaster(rast, "red_1km_20.tif")
rast <- aggregate(rast, 10)
writeRaster(rast, "red_10km_20.tif")
file.remove("alt.bil")
file.remove("alt.hdr")
gc()
##download and process land cover
altmask1 = raster("red_1km_20.tif")
altmask10 = raster("red_10km_20.tif")
for(i in 5:12){
setTxtProgressBar(pb, (i+20))
download.file(paste("http://data.earthenv.org/consensus_landcover/without_DISCover/Consensus_reduced_class_", i, ".tif", sep=""), destfile = paste("Consensus_reduced_class_", i, ".tif", sep=""), mode = "wb")
rast <- raster(paste("Consensus_reduced_class_", i, ".tif", sep=""))
rast <- mask(rast, altmask1)
writeRaster(rast, paste("red_1km_", (i+20), ".tif", sep=""))
rast <- aggregate(rast, 10)
#maskLayer <- sum(altmask, rast)
#maskLayer[!is.na(maskLayer)] <- 1
rast <- mask(rast, altmask10)
writeRaster(rast, paste("red_10km_", (i+20), ".tif", sep=""))
file.remove(paste("Consensus_reduced_class_", i, ".tif", sep=""))
gc()
}
remove(rast)
##create new rasters with most common landcover at each cell
setTxtProgressBar(pb, 33)
max1 <- raster()
max10 <- raster()
for(i in 21:32){
rast <- raster(paste("red_1km_", i, ".tif", sep=""))
max1 <- raster::stack(max1, rast)
rast <- raster(paste("red_10km_", i, ".tif", sep=""))
max10 <- raster::stack(max10, rast)
}
max1 <- which.max(max1)
writeRaster(max1, "red_1km_33.tif")
max10 <- which.max(max10)
writeRaster(max10, "red_10km_33.tif")
remove(max1, max10)
gc()
setwd(oldwd)
##Now the files should be named as:
##red_1km_1.tif
##...
##red_10km_33.tif
##Where 1 to 19 are the corresponding bioclim variables, 20 is altitude, 21 to 32 are landcover proportion and 33 is most common landcover per cell
#download country borders (not working Feb. 2017)
#download.file("http://biogeo.ucdavis.edu/data/gadm2.6/countries_gadm26.rds", destfile = paste("worldcountries.rds"), mode = "wb")
}
#' Download taxon records from GBIF.
#' @description Downloads species or higher taxon data from GBIF and outputs non-duplicate records with geographical coordinates.
#' @param taxon Taxon name.
#' @details As always when using data from multiple sources the user should be careful and check if records "make sense". This can be done by either ploting them in a map (e.g. using red::map.draw()) or using red::outliers().
#' @return A data.frame with longitude and latitude, plus species names if taxon is above species.
#' @examples records("Nephila senegalensis")
#' @export
records <- function(taxon){
taxon = unlist(strsplit(taxon, split = " ")[[1]])
dat <- dismo::gbif(taxon[1], paste(taxon[2], "*", sep = ""))
dat <- dat[c("species","lon","lat")] #filter columns
dat <- dat[!(is.na(dat$lon) | is.na(dat$lat)),] #filter rows
dat <- unique(dat) #delete duplicate rows
colnames(dat) <- c("Species", "long", "lat")
if (length(taxon) == 1){ #if genus
dat[which(is.na(dat[,1])),1] <- paste(taxon, "sp.")
} else { #if species
dat <- dat[,-1]
}
return(dat)
}
#' Move records to closest non-NA cell.
#' @description Identifies and moves presence records to cells with environmental values.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster.
#' @param buffer Maximum distance in map units that a record will move. If 0 all NA records will be changed.
#' @details Often records are in coastal or other areas for which no environmental data is available. This function moves such records to the closest cells with data so that no information is lost during modelling.
#' @return A matrix with new coordinate values.
#' @examples rast <- raster::raster(matrix(c(rep(NA,100), rep(1,100), rep(NA,100)), ncol = 15))
#' pts <- cbind(runif(100, 0, 0.55), runif(100, 0, 1))
#' raster::plot(rast)
#' points(pts)
#' pts <- move(pts, rast)
#' raster::plot(rast)
#' points(pts)
#' @export
move <- function(longlat, layers, buffer = 0){
layers <- layers[[1]]
values <- extract(layers, longlat) #get values of each record
suppressWarnings(
for(i in which(is.na(values))){ #if a value is NA, move it
distRaster = raster::distanceFromPoints(layers, longlat[i,])
distRaster = mask(distRaster, layers)
vmin = raster::minValue(distRaster)
if(buffer <= 0 || buffer > vmin){
vmin = rasterToPoints(distRaster, function(x) x == vmin)
longlat[i,] = vmin[1,1:2]
}
}
)
return(longlat)
}
#' Visual detection of outliers.
#' @description Draws plots of sites in geographical (longlat) and environmental (2-axis PCA) space.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster. It can be any set of environmental layers thought to allow the identification of environmental outliers.
#' @details Erroneous data sources or errors in transcriptions may introduce outliers that can be easily detected by looking at simple graphs of geographical or environmental space.
#' @return A data.frame with coordinate values and distance to centroid in pca is returned. Two plots are drawn for visual inspection. The environmental plot includes row numbers for easy identification of possible outliers.
#' @examples data(red.records)
#' data(red.layers)
#' outliers(red.records, red.layers[[1:3]])
#' @export
outliers <- function(longlat, layers){
if(dim(layers)[3] == 33) #if layers come from raster.read
pca <- raster.reduce(layers[[1:19]], n = 2)
else
pca <- raster.reduce(layers, n = 2)
##extract pca values from longlat
pca <- as.data.frame(raster::extract(pca, longlat))
goodRows <- which(!is.na(pca[,1]))
pca <- pca[goodRows,]
longlat <- longlat[goodRows,]
par(mfrow = c(1,2))
map.draw(longlat, layers[[1]], spName = "Geographical")
raster::plot(pca, main = "Environmental", type = "n")
centroid = colMeans(pca)
text(centroid[1], centroid[2], label = "X")
for(i in 1:nrow(pca)){
text(pca[i,1], pca[i,2], label = row.names(longlat)[i])
}
##build new matrix ordered by distance to centroid
dist2centroid = apply(pca, 1, function(x) dist(rbind(x, centroid)))
out = as.data.frame(cbind(longlat, dist2centroid))
out = out[order(-dist2centroid),]
return(out)
}
#' Spatial thinning of occurrence records.
#' @description Thinning of records with minimum distances either absolute or relative to the species range.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param distance Distance either in relative terms (proportion of maximum distance between any two records) or in raster units.
#' @param relative If TRUE, represents the proportion of maximum distance between any two records. If FALSE, is in raster units.
#' @param runs Number of runs
#' @details Clumped distribution records due to ease of accessibility of sites, emphasis of sampling on certain areas in the past, etc. may bias species distribution models.
#' The algorithm used here eliminates records closer than a given distance to any other record. The choice of records to eliminate is random, so a number of runs are made and the one keeping more of the original records is chosen.
#' @return A matrix of species occurrence records separated by at least the given distance.
#' @examples records <- matrix(sample(100), ncol = 2)
#' par(mfrow=c(1,2))
#' graphics::plot(records)
#' records <- thin(records, 0.1)
#' graphics::plot(records)
#' @export
thin <- function(longlat, distance = 0.01, relative = TRUE, runs = 100){
longlat = longlat[!duplicated(longlat),] #first, remove duplicate rows
nSites = nrow(longlat)
if(nSites < 4)
return(longlat)
##if relative, calculate maxDist between any two points
if(relative){
if(nSites < 40){ #if limited number of sites use all data
maxDist = 0
for(x in 1:(nSites-1)){
for(y in (x+1):nSites){
maxDist = max(maxDist,((longlat[x,1]-longlat[y,1])^2+(longlat[x,2]-longlat[y,2])^2)^.5)
}
}
} else { #if many sites use hypothenusa of square encompassing all of them
horiDist = max(longlat[,1]) - min(longlat[,1])
vertDist = max(longlat[,2]) - min(longlat[,2])
maxDist = (horiDist^2 + vertDist^2)^0.5
}
distance = maxDist*distance
}
listSites = matrix(longlat[1,], ncol=2, byrow = TRUE)
for (r in 1:runs){
longlat = longlat[sample(nSites),] ##shuffle rows (sites)
rndSites = longlat[1,] ##start with first random site
for(newSite in 2:nSites){
for(oldSite in 1:(newSite-1)){
addSite = TRUE
dist = ((longlat[newSite,1]-longlat[oldSite,1])^2+(longlat[newSite,2]-longlat[oldSite,2])^2)^.5
if(dist < distance){
addSite = FALSE
break
}
}
if(addSite)
rndSites = rbind(rndSites, longlat[newSite,])
}
if(nrow(rndSites) > nrow(listSites))
listSites = rndSites
}
return(as.matrix(listSites))
}
#' Read and buffer raster layers.
#' @description Read raster layers of environmental or other variables and crop them to a given extent around the known occurrences.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster.
#' @param ext Either extent of map or buffer around the known records used to crop layers. If buffer, it is relative to the maximum distance between any two records.
#' @details If layers are not given, the function will read either 30 arc-second (approx. 1km) or 5 arc-minutes (approx. 10km) resolution rasters from worldclim (Fick & Hijmans 2017) and landcover (Tuanmu & Jetz 2014) if red.setup() is run previously.
#' @return A RasterStack object (If no layers are given: Variables 1-19 = bioclim, 20 = elevation, 21-32 = proportion landcover, 33 = most common landcover).
#' @references Fick, S.E. & Hijmans, R.J. (2017) Worldclim 2: new 1-km spatial resolution climate surfaces for global land areas. International Journal of Climatology, in press.
#' @references Tuanmu, M.-N. & Jetz, W. (2014) A global 1-km consensus land-cover product for biodiversity and ecosystem modeling. Global Ecology and Biogeography, 23: 1031-1045.
#' @examples data(red.layers)
#' data(red.records)
#' par(mfrow=c(1,2))
#' raster::plot(red.layers[[1]])
#' points(red.records)
#' croppedLayers <- raster.read(red.records, red.layers, 0.1)
#' raster::plot(croppedLayers[[1]])
#' points(red.records)
#' @export
raster.read <- function(longlat, layers = NULL, ext = 1){
xmin = min(longlat[,1])
xmax = max(longlat[,1])
xlen = xmax - xmin
ymin = min(longlat[,2])
ymax = max(longlat[,2])
ylen = ymax - ymin
if(is.null(layers)){ ##if no layers are provided read the ones available
gisdir = red.getDir()
##calculate species range and buffer around it
if(eoo(longlat) < 200000){
layers <- raster::stack(raster::raster(paste(gisdir, "red_1km_1.tif", sep = "")))
for(i in 2:33)
layers <- raster::stack(layers, raster::raster(paste(gisdir, "red_1km_", i, ".tif", sep = "")))
} else {
layers <- raster::stack(raster::raster(paste(gisdir, "red_10km_1.tif", sep = "")))
for(i in 2:33)
layers <- raster::stack(layers, raster::raster(paste(gisdir, "red_10km_", i, ".tif", sep = "")))
}
##determine longitude limits of species to check if crop and paste are needed around longitude 180 for Pacific species
if(xmin < -90 && xmax > 90 && sum(longlat[longlat[,1] < 90 && longlat[,1] > -90,]) != 0){
##crop and merge layers
rightHalf = crop(layers, c(0,180,raster::extent(layers)@ymin,raster::extent(layers)@ymax))
raster::extent(rightHalf) <- c(-180,0,raster::extent(layers)@ymin,raster::extent(layers)@ymax)
leftHalf = crop(layers, c(-180,0,raster::extent(layers)@ymin,raster::extent(layers)@ymax))
raster::extent(leftHalf) <- c(0,180,raster::extent(layers)@ymin,raster::extent(layers)@ymax)
layers <- merge(rightHalf, leftHalf)
##modify longlat
for(i in 1:nrow(longlat))
if(longlat[i,1] > 0)
longlat[i,1] = longlat[i,1] - 180
else
longlat[i,1] = longlat[i,1] + 180
}
}
if(length(ext) == 4) ##if absolute extent is given crop and return, else calculate buffer
return(crop(layers, ext))
if(xlen == 0) ##in case some dimensions are inexistent consider equal to extent
xlen = ext
if(ylen == 0)
ylen = ext
##calculate new extent of layers and crop
ext = max(1, ((xlen + ylen) * ext))
xmin <- max(raster::extent(layers)@xmin, xmin-ext)
xmax <- min(raster::extent(layers)@xmax, xmax+ext)
ymin <- max(raster::extent(layers)@ymin, ymin-ext)
ymax <- min(raster::extent(layers)@ymax, ymax+ext)
layers <- crop(layers, c(xmin,xmax,ymin,ymax))
return(layers)
}
#' Uniformize raster layers.
#' @description Crop raster layers to minimum size possible and uniformize NA values across layers.
#' @param layers Raster* object as defined by package raster.
#' @details Excludes all marginal rows and columns with only NA values and change values to NA if they are NA in any of the layers.
#' @return A Raster* object, same class as layers.
#' @examples data(red.layers)
#' raster::plot(raster.clean(red.layers))
#' @export
raster.clean <- function(layers){
##apply mask to have NAs everywhere where any layer has NAs
maskLayer <- sum(layers)
maskLayer[!is.na(maskLayer)] <- 1
layers <- mask(layers, maskLayer)
##crop by excluding external rows and columns with NAs only
layers <- trim(layers)
return(layers)
}
#' Reduce dimensionality of raster layers.
#' @description Reduce the number of layers by either performing a PCA on them or by eliminating highly correlated ones.
#' @param layers Raster* object as defined by package raster.
#' @param method Either Principal Components Analysis ("pca", default) or Pearson's correlation ("cor").
#' @param n Number of layers to reduce to.
#' @param thres Value for pairwise Pearson's correlation above which one of the layers (randomly selected) is eliminated.
#' @details Using a large number of explanatory variables in models with few records may lead to overfitting. This function allows to avoid it as much as possible.
#' If both n and thres are given, n has priority. If method is not recognized and layers come from raster.read function, only landcover is reduced by using only the dominating landuse of each cell.
#' @return A RasterStack object.
#' @export
raster.reduce <- function(layers, method = "pca", n = NULL, thres = NULL){
##method = "pca, cor", if unrecognized method only reduce landcover but not climate
out <- raster::stack()
if(dim(layers)[3] == 33){ ##check if layers are obtained with raster.read
out <- raster::stack(layers[[33]])
layers = layers[[1:19]]
}
if(method == "cor"){ ##if correlation
if(is.null(n)){
if(is.null(thres))
thres = 0.7
for(i in 1:dim(layers)[3]){ ##delete layers until none are correlated above threshold
cor = as.matrix(as.dist(layerStats(layers, 'pearson', na.rm = TRUE)[[1]]))
if(max(cor) < thres)
break
corLayer = sample(which(cor == max(cor), arr.ind = TRUE)[,1],1)
layers = layers[[-corLayer]]
}
} else {
while (dim(layers)[3] > n){ ##delete layers until reaching n layers
cor = abs(as.matrix(as.dist(layerStats(layers, 'pearson', na.rm = TRUE)[[1]])))
corLayer = sample(which(cor == max(cor), arr.ind = TRUE)[,1],1)
layers = layers[[-corLayer]]
}
}
} else if(method == "pca"){ ##if pca
if(is.null(n))
n = 3
if(sum(!is.na(getValues(layers[[1]]))) > 2000)
sr <- sampleRandom(layers, 1000)
else
sr <- sampleRandom(layers, as.integer(sum(!is.na(getValues(layers[[1]])))/2))
pca <- prcomp(sr)
layers <- raster::predict(layers, pca, index = 1:n)
for(i in 1:n)
names(layers[[i]]) <- paste("pca",i)
}
out <- raster::stack(layers, out)
return(out)
}
#' Create distance layer.
#' @description Creates a layer depicting distances to records using the minimum, average, distance to the minimum convex polygon or distance taking into account a cost surface.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of species occurrence records.
#' @param layers Raster* object as defined by package raster to serve as model to create distance layer. Cost surface in case of param ="cost".
#' @param type text string indicating whether the output should be the "minimum", "average", "mcp" or "cost" distance to all records. "mcp" means the distance to the minimum convex polygon encompassing all records.
#' @details Using distance to records in models may help limiting the extrapolation of the predicted area much beyond known areas.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' alt = red.layers[[3]]
#' data(red.records)
#' par(mfrow=c(3,2))
#' raster::plot(alt)
#' points(red.records)
#' raster::plot(raster.distance(red.records, alt))
#' raster::plot(raster.distance(red.records, alt, type = "average"))
#' raster::plot(raster.distance(red.records, alt, type = "mcp"))
#' raster::plot(raster.distance(red.records, alt, type = "cost"))
#' @export
raster.distance <- function(longlat, layers, type = "minimum"){
if(dim(layers)[3] > 1)
layers <- layers[[1]]
layers[!is.na(layers)] <- 0
if(type == "average"){
for(d in 1:nrow(longlat)){
layers <- layers + raster::distanceFromPoints(layers, longlat[d,])
}
layers <- layers/nrow(longlat)
names(layers) <- "average distance"
} else if (type == "mcp"){
vertices <- chull(longlat)
vertices <- c(vertices, vertices[1])
vertices <- longlat[vertices,]
poly = Polygon(vertices)
poly = Polygons(list(poly),1)
poly = SpatialPolygons(list(poly)) ##minimum convex polygon
longlat = rasterToPoints(rasterize(poly, layers))[,1:2]
layers <- mask(raster::distanceFromPoints(layers, longlat), layers)
names(layers) <- "mcp distance"
} else if (type == "cost"){
layers <- transition(layers, function(x) 1/mean(x), 8)
layers <- geoCorrection(layers)
layers <- accCost(layers, as.matrix(longlat))
names(layers) <- "cost distance"
} else {
layers <- mask(raster::distanceFromPoints(layers, longlat), layers)
names(layers) <- "minimum distance"
}
return(layers)
}
#' Create longitude layer.
#' @description Create a layer depicting longitude based on any other.
#' @param layers Raster* object as defined by package raster.
#' @details Using longitude (and latitude) in models may help limiting the extrapolation of the predicted area much beyond known areas.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.long(red.layers))
#' @export
raster.long <- function(layers){
if(dim(layers)[3] > 1)
layers <- layers[[3]]
x <- rasterToPoints(layers)[,1:2]
long <- rasterize(x, layers, x[,1])
long <- mask(long, layers)
names(long) <- "longitude"
return(long)
}
#' Create latitude layer.
#' @description Create a layer depicting latitude based on any other.
#' @param layers Raster* object as defined by package raster.
#' @details Using latitude (and longitude) in models may help limiting the extrapolation of the predicted area much beyond known areas.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.lat(red.layers[[1]]))
#' @export
raster.lat <- function(layers){
if(dim(layers)[3] > 1)
layers <- layers[[3]]
x <- rasterToPoints(layers)[,1:2]
lat <- rasterize(x, layers, x[,2])
lat <- mask(lat, layers)
names(lat) <- "latitude"
return(lat)
}
#' Create eastness layer.
#' @description Create a layer depicting eastness based on an elevation layer.
#' @param dem RasterLayer object of elevation (a digital elevation model - DEM) as defined by package raster.
#' @details Using elevation, aspect can be calculated. Yet, it is a circular variable (0 = 360) and has to be converted to northness and eastness to be useful for modelling.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.east(red.layers[[3]]))
#' @export
raster.east <- function(dem){
asp <- terrain(dem, opt = "aspect")
return(sin(asp))
}
#' Create northness layer.
#' @description Create a layer depicting northness based on an elevation layer.
#' @param dem RasterLayer object of elevation (a digital elevation model - DEM) as defined by package raster.
#' @details Using elevation, aspect can be calculated. Yet, it is a circular variable (0 = 360) and has to be converted to northness and eastness to be useful for modelling.
#' @return A RasterLayer object.
#' @examples data(red.layers)
#' raster::plot(raster.north(red.layers[[3]]))
#' @export
raster.north <- function(dem){
asp <- terrain(dem, opt = "aspect")
return(cos(asp))
}
#' Predict species distribution.
#' @description Prediction of potential species distributions using maximum entropy (maxent).
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layers Predictor variables, a Raster* object as defined by package raster.
#' @param error Vector of spatial error in longlat (one element per row of longlat) in the same unit as longlat. Used to move any point randomly within the error radius.
#' @param year Vector of sampling years in longlat (one element per row of longlat). Used to exclude old records with a given probability proportional to time passed since sampling (never excluded only for current year).
#' @param idconf Vector of identification confidence in longlat (one element per row of longlat). Used to exclude uncertain records with a given probability. Can be on any scale where max values are certain (e.g. from 1 - very uncertain to 10 - holotype).
#' @param categorical Vector of layer indices of categorical (as opposed to quantitative) data. If NULL the package will try to find them automatically based on the data.
#' @param thres Threshold of logistic output used for conversion of probabilistic to binary (presence/absence) maps. If 0 this will be the value that maximizes the sum of sensitivity and specificity.
#' @param testpercentage Percentage of records used for testing only. If 0 all records will be used for both training and testing.
#' @param mcp Used for a precautionary approach. If TRUE, all areas predicted as present but outside the minimum convex hull polygon encompassing all occurrence records are converted to absence. Exceptions are cells connected to other areas inside the polygon.
#' @param points If TRUE, force map to include cells with presence records even if suitable habitat was not identified.
#' @param eval If TRUE, build a matrix with AUC, Kappa, TSS, EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model).
#' @param runs If <= 0 no ensemble modelling is performed. If > 0, ensemble modelling with n runs is made. For each run, a new random sample of occurrence records (if testpercentage > 0), background points and predictive variables (if subset > 0) are chosen. In the ensemble model, each run is weighted as max(0, (runAUC - 0.5)) ^ 2.
#' @param subset Number of predictive variables to be randomly selected from layers for each run if runs > 0. If <= 0 all layers are used on all runs. Using a small number of layers is usually better than using many variables for rare species, with few occurrence records (Lomba et al. 2010, Breiner et al. 2015).
#' @details Builds maxent (maximum entropy) species distribution models (Phillips et al. 2004, 2006; Elith et al. 2011) using function maxent from R package dismo (Hijmans et al. 2017). Dismo requires the MaxEnt species distribution model software, a java program that can be downloaded from http://biodiversityinformatics.amnh.org/open_source/maxent. Copy the file 'maxent.jar' into the 'java' folder of the dismo package. That is the folder returned by system.file("java", package="dismo"). You need MaxEnt version 3.3.3b or higher. Please note that this program (maxent.jar) cannot be redistributed or used for commercial or for-profit purposes.
#' @return List with either one or two raster objects (depending if ensemble modelling is performed, in which case the second is a probabilistic map from all the runs) and, if eval = TRUE, a matrix with AUC, Kappa, TSS, EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model). Aggregate values are taken from maps after transformation of probabilities to incidence, with presence predicted for cells with ensemble values > 0.5.
#' @references Breiner, F.T., Guisan, A., Bergamini, A., Nobis, M.P. (2015) Overcoming limitations of modelling rare species by using ensembles of small models. Methods in Ecology and Evolution, 6: 1210-1218.
#' @references Hijmans, R.J., Phillips, S., Leathwick, J., Elith, J. (2017) dismo: Species Distribution Modeling. R package version 1.1-4. https://CRAN.R-project.org/package=dismo
#' @references Lomba, A., Pellissier, L., Randin, C.F., Vicente, J., Moreira, F., Honrado, J., Guisan, A. (2010) Overcoming the rare species modelling paradox: a novel hierarchical framework applied to an Iberian endemic plant. Biological Conservation, 143: 2647-2657.
#' @references Phillips, S.J., Dudik, M., Schapire, R.E. (2004) A maximum entropy approach to species distribution modeling. Proceedings of the Twenty-First International Conference on Machine Learning. p. 655-662.
#' @references Phillips, S.J., Anderson, R.P., Schapire, R.E. (2006) Maximum entropy modeling of species geographic distributions. Ecological Modelling, 190: 231-259.
#' @references Elith, J., Phillips, S.J., Hastie, T., Dudik, M., Chee, Y.E., Yates, C.J. (2011) A statistical explanation of MaxEnt for ecologists. Diversity and Distributions, 17: 43-57.
#' @export
map.sdm <- function(longlat, layers, error = NULL, year = NULL, idconf = NULL, categorical = NULL, thres = 0, testpercentage = 0, mcp = TRUE, points = FALSE, eval = TRUE, runs = 0, subset = 0){
raster::rasterOptions(maxmemory = 2e+09)
origLonglat = longlat
##if ensemble is to be done
if(runs > 0){
longlat = origLonglat
#if there is spatial error randomly move points within its radius
if(!is.null(error)){
for(i in 1:nrow(longlat)){
#move up to given error (angular movement converted to x and y)
rndAngle = sample(1:360, 1)
rndDist = runif(1, 0, error[i])
longlat[i,1] = longlat[i,1] + rndDist * cos(rndAngle)
longlat[i,2] = longlat[i,2] + rndDist * sin(rndAngle)
}
}
#if there is year
if(!is.null(year)){
for(i in 1:nrow(longlat)){
if(year[i] < sample(min(year):as.integer(substr(Sys.Date(), 1, 4)), 1))
longlat = longlat[-i,]
}
}
#if there is idconf
if(!is.null(idconf)){
for(i in 1:nrow(longlat)){
if(idconf[i] < sample(1:max(idconf), 1))
longlat = longlat[-i,]
}
}
if(eval)
runEval = matrix(NA, nrow = 1, ncol = 7)
runMap <- rasterize(longlat, layers[[1]], field = 0, background = 0)
pb <- txtProgressBar(min = 0, max = runs, style = 3)
totalAUC = 0
for(i in 1:runs){
if(subset > 0 && subset < dim(layers)[3]){
runLayers <- layers[[sample.int(dim(layers)[3], subset)]]
thisRun <- map.sdm(longlat, runLayers, error = NULL, year = NULL, idconf = NULL, categorical, thres, testpercentage, mcp, points, eval, runs = 0, subset = 0)
} else {
thisRun <- map.sdm(longlat, layers, error = NULL, year = NULL, idconf = NULL, categorical, thres, testpercentage, mcp, points, eval, runs = 0, subset = 0)
}
runAUC = 1
if(eval){
runAUC <- thisRun[[2]][1]
runAUC <- max(0, (runAUC - 0.5)) ^ 2 #weight the map by its AUC above 0.5 to the square
runEval <- rbind(runEval, thisRun[[2]])
thisRun <- thisRun[[1]]
}
totalAUC = totalAUC + runAUC
runMap <- runMap + (thisRun * runAUC)
setTxtProgressBar(pb, i)
}
runMap <- raster::calc(runMap, function(x) {x/totalAUC})
upMap <- reclassify(runMap, matrix(c(0,0.025,0,0.025,1,1), ncol = 3, byrow = TRUE))
consensusMap <- reclassify(runMap, matrix(c(0,0.499,0,0.499,1,1), ncol = 3, byrow = TRUE))
downMap <- reclassify(runMap, matrix(c(0,0.975,0,0.975,1,1), ncol = 3, byrow = TRUE))
if(mcp && aoo(consensusMap) >= 4)
consensusMap <- map.habitat(longlat, consensusMap, mcp = TRUE, eval = FALSE)
if(eval){
runEval <- runEval[-1,]
clEval <- matrix(NA, nrow = 3, ncol = 7)
colnames(clEval) <- c("AUC", "Kappa", "TSS", "EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)")
rownames(clEval) <- c("UpCL", "Consensus", "LowCL")
clEval[1,] <- apply(runEval, 2, quantile, probs= 0.975, na.rm = TRUE)
clEval[2,] <- apply(runEval, 2, quantile, probs= 0.5, na.rm = TRUE)
clEval[3,] <- apply(runEval, 2, quantile, probs= 0.025, na.rm = TRUE)
clEval[1:3,4] <- eoo(longlat)
clEval[1:3,6] <- aoo(longlat)
clEval[1,5] <- eoo(upMap)
clEval[1,7] <- aoo(upMap)
clEval[2,5] <- eoo(consensusMap)
clEval[2,7] <- aoo(consensusMap)
clEval[3,5] <- eoo(downMap)
clEval[3,7] <- aoo(downMap)
return(list(consensusMap, runMap, clEval))
} else {
return (consensusMap)
}
}
longlat <- move(longlat, layers) #move all records falling on NAs
nPoints = min(1000, sum(!is.na(as.vector(layers[[1]])), na.rm = TRUE)/4)
bg <- dismo::randomPoints(layers, nPoints) ##extract background points
##if no categorical variables are given try to figure out which are
if(is.null(categorical))
categorical <- find.categorical(layers)
llTrain <- longlat
llTest <- longlat
if(testpercentage > 0){
testRecords <- sample(1:nrow(longlat), ceiling(nrow(longlat)*testpercentage/100))
llTrain <- longlat[-testRecords,]
llTest <- longlat[testRecords,]
}
mod <- dismo::maxent(layers, llTrain, a = bg, factors = categorical) ##build model
p <- raster::predict(mod, layers) ##do prediction
e <- dismo::evaluate(p = llTrain, a = bg, model = mod, x = layers) ##do evaluation of model
if(thres == 0)
thres <- dismo::threshold(e)$spec_sens ##extract threshold from evaluation
p <- reclassify(p, matrix(c(0,thres,0,thres,1,1), nrow=2, byrow = TRUE)) ##convert to presence/absence
if(mcp && aoo(p) >= 4)
p <- map.habitat(longlat, p, mcp = TRUE, eval = FALSE)
if(points)
p <- max(p, map.points(longlat, p, eval = FALSE))
if(eval){
e <- dismo::evaluate(p = llTest, a = bg, model = mod, x = layers, tr = thres) ##do evaluation of model with threshold
auc <- e@auc
kappa <- e@kappa
sensitivity <- as.numeric(e@TPR/(e@TPR+e@FNR))
specificity <- as.numeric(e@TNR/(e@TNR+e@FPR))
tss <- sensitivity + specificity - 1
eooRaw <- eoo(longlat)
aooRaw <- aoo(longlat)
aooModel <- aoo(p)
if(aooModel > 8)
eooModel <- eoo(p)
else
eooModel = aooModel
txtEval <- matrix(c(auc, kappa, tss, eooRaw, eooModel, aooRaw, aooModel), nrow = 1)
colnames(txtEval) <- c("AUC", "Kappa", "TSS", "EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)")
return(list(p, txtEval))
} else {
return(p)
}
}
#' Map species distribution of habitat specialist.
#' @description Mapping of all habitat patches where the species is known to occur.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layer RasterLayer object representing the presence/absence (1/0) of a single habitat type.
#' @param move If TRUE, identifies and moves presence records to closest cells with suitable habitat. Use when spatial error might put records outside the correct patch.
#' @param mcp If TRUE, all habitat patches inside the minimum convex hull polygon encompassing all occurrence records are converted to presence.
#' @param points If TRUE, force map to include cells with presence records even if suitable habitat was not identified.
#' @param eval If TRUE, build a matrix with EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model).
#' @details In many cases a species has a very restricted habitat and we generally know where it occurs. In such cases using the distribution of the known habitat patches may be enough to map the species.
#' @return One raster object and, if eval = TRUE, a matrix with EOO (from raw data), EOO (from model), AOO (from raw data) and AOO (from model).
#' @export
map.habitat <- function(longlat, layer, move = TRUE, mcp = FALSE, points = FALSE, eval = TRUE){
if(points)
layer <- max(layer, map.points(longlat, layer, eval = FALSE))
if(move){
moveLayer <- layer
moveLayer[moveLayer == 0] <- NA
longlat <- move(longlat, moveLayer)
remove(moveLayer)
}
if(mcp){
vertices <- chull(longlat)
vertices <- c(vertices, vertices[1])
vertices <- longlat[vertices,]
poly = Polygon(vertices)
poly = Polygons(list(poly),1)
poly = SpatialPolygons(list(poly)) ##minimum convex polygon
patches <- raster::clump(layer, gaps=FALSE) ##individual patches, numbered
selPatches <- raster::unique(extract(patches, poly, df = TRUE, weights = TRUE)$clumps) ##which patches are inside polygon
} else {
patches <- raster::clump(layer, gaps=FALSE) ##individual patches, numbered
selPatches <- raster::unique(extract(patches, longlat, df = TRUE, weights = TRUE)$clumps) ##which patches have the species
}
selPatches <- selPatches[!is.na(selPatches)]
allPatches <- raster::unique(patches)
allPatches <- as.data.frame(cbind(allPatches, rep(0, length(allPatches))))
colnames(allPatches) <- c("patches", "selected")
allPatches[selPatches, 2] <- 1
patches <- raster::subs(patches, allPatches)
layer <- mask(layer, patches, maskvalue = 0, updatevalue = 0)
if(eval){
eooRaw <- eoo(longlat)
eooModel <- eoo(layer)
aooRaw <- aoo(longlat)
aooModel <- aoo(layer)
txtEval <- matrix(c(eooRaw, eooModel, aooRaw, aooModel), nrow = 1)
colnames(txtEval) <- c("EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)")
return(list(layer, txtEval))
} else {
return(layer)
}
}
#' Map recorded distribution of species.
#' @description Mapping of all cells where the species is known to occur.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layers Raster* object as defined by package raster. Any raster with the relevant extent and cell size can be used.
#' @param eval If TRUE, build a matrix with EOO and AOO calculated from occurrence records only.
#' @details To be used if either information on the species is very scarce (and it is not possible to model the species distribution) or, on the contrary, complete (and there is no need to model the distribution).
#' @return One raster object and, if EVAL = TRUE, a matrix with EOO and AOO.
#' @examples
#' data(red.records)
#' data(red.layers)
#' raster::plot(map.points(red.records, red.layers, eval = FALSE))
#' points(red.records)
#' @export
map.points <- function(longlat, layers, eval = TRUE){
p <- rasterize(longlat, layers[[1]], field = 1, background = 0)
maskLayer <- sum(layers)
maskLayer[!is.na(maskLayer)] <- 1
p <- mask(p, maskLayer)
if(eval){
eooRaw <- eoo(longlat)
aooRaw <- aoo(longlat)
txtEval <- matrix(c(eooRaw, aooRaw), nrow = 1)
colnames(txtEval) <- c("EOO", "AOO")
return(list(p, txtEval))
} else {
return(p)
}
}
#' Species distributions made easy (multiple species).
#' @description Single step for prediction of multiple species distributions. Output of maps (in pdf format), klms (for Google Earth) and relevant data (in csv format).
#' @param longlat data.frame of taxon names, longitude and latitude or eastness and northness (three columns in this order) of each occurrence record.
#' @param layers If NULL analyses are done with environmental layers read from data files of red.setup(). If a Raster* object as defined by package raster, analyses use these.
#' @param habitat Raster* object as defined by package raster. Habitat extent layer (0/1) used instead of layers if any species is an habitat specialist.
#' @param zone UTM zone if data is in metric units. Used only for correct placement of kmls and countries.
#' @param thin boolean defining if species data should be thinned before modeling (only for SDMs).
#' @param error Vector of spatial error in longlat (one element per row of longlat) in the same unit as longlat. Used to move any point randomly within the error radius.
#' @param move If TRUE, identifies and moves presence records to closest cells with environmental data. Use when spatial error might put records outside such data.
#' @param dem RasterLayer object. It should be a digital elevation model for calculation of elevation limits of the species. If NULL, dem from red.setup() is used if possible, otherwise it will be 0.
#' @param pca Number of pca axes for environmental data reduction. If 0 (default) no pca is made.
#' @param filename Name of output csv file with all results. If NULL it is named "Results_All.csv".
#' @param mapoption Vector of values within options: points, habitat and sdm; each value corresponding to the function to be used for each species (map.points, map.habitat, map.sdm). If a single value, all species will be modelled according to it. If NULL, the function will perform analyses using map.points. Species values must be in same order as latlong.
#' @param testpercentage Percentage of records used for testing only. If 0 all records will be used for both training and testing.
#' @param mintest Minimim number of total occurrence records of any species to set aside a test set. Only used if testpercentage > 0.
#' @param points If TRUE, force map to include cells with presence records even if suitable habitat was not identified.
#' @param runs If <= 0 no ensemble modelling is performed. If > 0, ensemble modelling with n runs is made. For each run, a new random sample of occurrence records (if testpercentage > 0), background points and predictive variables (if subset > 0) are chosen. In the ensemble model, each run is weighted as max(0, (runAUC - 0.5)) ^ 2.
#' @param subset Number of predictive variables to be randomly selected from layers for each run if runs > 0. If <= 0 all layers are used on all runs. Using a small number of layers is usually better than using many variables for rare species, with few occurrence records (Lomba et al. 2010, Breiner et al. 2015).
#' @return Outputs maps in asc, pdf and kml format, plus a file with EOO, AOO and a list of countries where the species is predicted to be present if possible to extract.
#' @references Breiner, F.T., Guisan, A., Bergamini, A., Nobis, M.P. (2015) Overcoming limitations of modelling rare species by using ensembles of small models. Methods in Ecology and Evolution, 6: 1210-1218.
#' @references Lomba, A., Pellissier, L., Randin, C.F., Vicente, J., Moreira, F., Honrado, J., Guisan, A. (2010) Overcoming the rare species modelling paradox: a novel hierarchical framework applied to an Iberian endemic plant. Biological Conservation, 143: 2647-2657.
#' @export
map.easy <- function(longlat, layers = NULL, habitat = NULL, zone = NULL, thin = TRUE, error = NULL, move = TRUE, dem = NULL, pca = 0, filename = NULL, mapoption = NULL, testpercentage = 0, mintest = 20, points = FALSE, runs = 0, subset = 0){
try(dev.off(), silent = TRUE)
spNames <- unique(longlat[,1])
nSp <- length(spNames)
if(is.null(mapoption))
mapoption = rep("points", nSp)
else if(length(mapoption) == 1)
mapoption = rep(mapoption, nSp)
else if(length(mapoption) != nSp)
return(warning("Number of species different from length of mapoption"))
if("sdm" %in% mapoption){
if(!file.exists(paste(.libPaths()[[1]], "/dismo/java/maxent.jar", sep=""))){
warnMaxent()
return()
}
}
if (all(mapoption == rep("points", nSp))){
res <- matrix(NA, nrow = nSp, ncol = 5)
colnames(res) <- c("EOO", "AOO", "Min elevation", "Max elevation", "Countries")
} else if (("sdm" %in% mapoption) && runs > 0) {
res <- matrix(NA, nrow = nSp, ncol = 11)
colnames(res) <- c("EOO (raw)", "EOO (LowCL)", "EOO (Consensus)", "EOO (UpCL)", "AOO (raw)", "AOO (LowCL)", "AOO (Consensus)", "AOO (UpCL)", "Min elevation", "Max elevation", "Countries")
} else {
res <- matrix(NA, nrow = nSp, ncol = 7)
colnames(res) <- c("EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)", "Min elevation", "Max elevation", "Countries")
}
rownames(res) <- spNames
if(is.null(layers))
newLayers <- TRUE
else
newLayers <- FALSE
if(is.null(dem))
newDem <- TRUE
else
newDem <- FALSE
rad = 0.1
for(s in 1:nSp){
cat("\nSpecies", s, "of", nSp, "-", toString(spNames[s]),"\n")
spData <- longlat[longlat[,1] == spNames[s], -1]
if(!is.null(error)){
spError <- error[longlat[,1] == spNames[s]]
if(max(spError) > 1)
rad <- spError/100000
else
rad <- spError
} else {
spError <- NULL
}
if(newLayers){
layers <- raster.read(spData)
if(newDem)
dem <- layers[[20]]
if(pca > 0)
layers <- raster.reduce(layers, n = pca)
}
if(mapoption[s] == "sdm" && aoo(move(spData, layers)) > 8){
if(move)
spData <- move(spData, layers)
if(thin)
spData <- thin(spData)
if(testpercentage > 0)
p <- map.sdm(spData, layers, spError, testpercentage = testpercentage, mcp = TRUE, points = points, runs = runs, subset = subset)
else
p <- map.sdm(spData, layers, spError, testpercentage = 0, mcp = TRUE, points = points, runs = runs, subset = subset)
} else if (mapoption[s] == "habitat"){
p <- map.habitat(spData, habitat, move, points = points)
} else {
mapoption[s] = "points"
p <- map.points(spData, layers)
}
writeRaster(p[[1]], paste(toString(spNames[s]), ".asc", sep=""), overwrite = TRUE)
map.draw(spData, p[[1]], spNames[s], sites = FALSE, print = TRUE)
if(mapoption[s] != "points"){
kml(p[[1]], zone = zone, paste(toString(spNames[s]), ".kml", sep=""), mapoption = "aoo")
countryList <- countries(p[[1]], zone = zone)
if(is.null(dem))
elev <- c(0, 0)
else
elev <- elevation(p[[1]], dem)
} else {
kml(spData, zone = zone, paste(toString(spNames[s]), ".kml", sep=""), mapoption = "points", rad = rad)
countryList <- countries(spData, zone = zone)
if(is.null(dem))
elev <- c(0, 0)
else
elev <- elevation(spData, dem)
}
if(mapoption[s] == "sdm" && aoo(spData) > 8 && runs > 0){
writeRaster(p[[2]], paste(toString(spNames[s]), "_prob.asc", sep=""), overwrite = TRUE)
map.draw(spData, p[[2]], paste(toString(spNames[s]), "_prob", sep = ""), legend = TRUE, print = TRUE)
}
##write output values to csv
spRes = p[[length(p)]]
if(ncol(res) == 5){ #colnames(res) <- c("EOO", "AOO", "Min elevation", "Max elevation", "Countries")
res[s,] <- c(spRes, elev, toString(countryList))
}
if(ncol(res) == 7){ #colnames(res) <- c("EOO (raw)", "EOO (model)", "AOO (raw)", "AOO (model)", "Min elevation", "Max elevation", "Countries")
if(length(spRes) == 7)
res[s,] <- c(spRes[4:7], elev, toString(countryList))
else #if length(spRes) < 7
res[s,] <- c(spRes[c(1,1,2,2)], elev, toString(countryList))
}
if(ncol(res) == 11){ #colnames(res) <- c("EOO (raw)", "EOO (LowCL)", "EOO (Consensus)", "EOO (UpCL)", "AOO (raw)", "AOO (LowCL)", "AOO (Consensus)", "AOO (UpCL)", "Min elevation", "Max elevation", "Countries")
if(length(spRes) == 2)
res[s,] <- c(spRes[c(1,1,1,1,2,2,2,2)], elev, toString(countryList))
else if(length(spRes) == 4)
res[s,] <- c(spRes[c(1,2,2,2,3,4,4,4)], elev, toString(countryList))
else if(is.null(dim(spRes)))
res[s,] <- c(spRes[4:7], elev, toString(countryList))
else #if matrix
res[s,] <- c(spRes[2,4], spRes[3:1,5], spRes[2,6], spRes[3:1,7], elev, toString(countryList))
}
write.csv(res[s,], paste(toString(spNames[s]), ".csv", sep = ""))
if(mapoption[s] == "sdm" && aoo(spData) > 8){
if(runs > 0)
write.csv(p[[3]], paste(toString(spNames[s]), "_detail.csv", sep = ""))
else
write.csv(p[[2]], paste(toString(spNames[s]), "_detail.csv", sep = ""))
}
}
if(is.null(filename))
write.csv(res, "Results_All.csv")
else
write.csv(res, toString(filename))
return(as.data.frame(res))
}
#' Map creation.
#' @description Creates maps ready to print in pdf or other formats.
#' @param longlat Matrix of longitude and latitude or eastness and northness (two columns in this order) of each occurrence record.
#' @param layer RasterLayer object representing the presence/absence map for the species.
#' @param spName String of species name.
#' @param borders If TRUE country borders are drawn.
#' @param scale If TRUE a distance scale in km is drawn.
#' @param legend If TRUE the legend for the map is drawn.
#' @param sites If TRUE the record locations are drawn.
#' @param mcp If TRUE the minimum convex polygon representing the Extent of Occurrence is drawn.
#' @param print If TRUE a pdf is saved instead of the output to the console.
#' @examples data(red.records)
#' data(red.range)
#' par(mfrow = c(1,2))
#' map.draw(red.records, layer = red.range, mcp = TRUE)
#' @export
map.draw <- function(longlat = NULL, layer, spName, borders = FALSE, scale = TRUE, legend = FALSE, sites = TRUE, mcp = FALSE, print = FALSE){
worldborders <- NULL
data(worldborders, envir = environment())
if (borders){
layer[layer == 0] <- NA
raster::plot(layer, main = spName, legend = legend, xlab = "longitude", ylab = "latitude", col = "forestgreen")
lines(worldborders)
} else {
raster::plot(layer, main = spName, legend = legend, colNA = "lightblue", xlab = "longitude", ylab = "latitude")
}
if (scale){
width = (xmax(layer) - xmin(layer))
d = round(width/10^(nchar(width)-1))*10^(nchar(width)-2)
scalebar(d = d, type="bar", divs = 2)
}
if (sites && !is.null(longlat))
points(longlat, pch = 19)
if (mcp){
e <- rasterToPoints(layer, fun = function(dat){dat == 1}) ##convert raster to points
vertices <- chull(e[,1], e[,2])
vertices <- c(vertices, vertices[1])
vertices <- e[vertices,c(1,2)]
poly <- SpatialPolygons(list(Polygons(list(Polygon(vertices)),1)))
raster::plot(poly, add = TRUE)
}
if(print){
dev.copy(device = pdf, file = paste(toString(spName), ".pdf", sep=""))
dev.off()
}
}
#' Extent of Occurrence (EOO).
#' @description Calculates the Extent of Occurrence of a species based on either records or predicted distribution.
#' @param spData spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (either 0/1 or probabilistic values).
#' @details EOO is calculated as the minimum convex polygon covering all known or predicted sites for the species.
#' @return A single value in km2 or a vector with lower confidence limit, consensus and upper confidence limit (probabilities 0.975, 0.5 and 0.025 respectively).
#' @examples data(red.records)
#' data(red.range)
#' eoo(red.records)
#' eoo(red.range)
#' @export
eoo <- function(spData){
if(class(spData) == "RasterLayer"){
if(!all(raster::as.matrix(spData) == floor(raster::as.matrix(spData)), na.rm = TRUE)){ #if probabilistic map
upMap <- reclassify(spData, matrix(c(0,0.025,0,0.025,1,1), ncol = 3, byrow = TRUE))
consensusMap <- reclassify(spData, matrix(c(0,0.499,0,0.499,1,1), ncol = 3, byrow = TRUE))
downMap <- reclassify(spData, matrix(c(0,0.975,0,0.975,1,1), ncol = 3, byrow = TRUE))
area <- c(eoo(downMap), eoo(consensusMap), eoo(upMap))
} else {
if (raster::xmax(spData) <= 180) { #if longlat data
e <- rasterToPoints(spData, fun = function(dat){dat == 1}) ##convert raster to points
vertices <- chull(e[,1], e[,2])
if(length(vertices) < 3) return(0)
vertices <- c(vertices, vertices[1])
vertices <- e[vertices,c(1,2)]
area = geosphere::areaPolygon(vertices)/1000000
} else {
spData[spData < 1] <- NA
spData <- rasterToPoints(spData)
vertices <- chull(spData)
if(length(vertices) < 3) return(0)
vertices <- c(vertices, vertices[1])
vertices <- spData[vertices,]
area = 0
for(i in 1:(nrow(vertices)-1))
area = area + (as.numeric(vertices[i,1])*as.numeric(vertices[(i+1),2]) - as.numeric(vertices[i,2])*as.numeric(vertices[(i+1),1]))
area = abs(area/2000000)
}
}
} else if (ncol(spData) == 2){
vertices <- chull(spData)
if(length(vertices) < 3) return(0)
vertices <- c(vertices, vertices[1])
vertices <- spData[vertices,]
if(max(spData) <= 180) { #if longlat data
area = geosphere::areaPolygon(vertices)/1000000
} else { #if square data in meters
area = 0
for(i in 1:(nrow(vertices)-1))
area = area + (as.numeric(vertices[i,1])*as.numeric(vertices[(i+1),2]) - as.numeric(vertices[i,2])*as.numeric(vertices[(i+1),1]))
area = abs(area/2000000)
}
} else {
return(warning("Data format not recognized"))
}
return(round(area))
}
#' Area of Occupancy (AOO).
#' @description Calculates the Area of Occupancy of a species based on either known records or predicted distribution.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (either 0/1 or probabilistic values).
#' @details AOO is calculated as the area of all known or predicted cells for the species. The resolution will be 2x2km as required by IUCN.
#' @return A single value in km2 or a vector with lower confidence limit, consensus and upper confidence limit (probabilities 0.975, 0.5 and 0.025 respectively).
#' @examples data(red.range)
#' aoo(red.range)
#' @export
aoo <- function(spData){
if (class(spData) == "RasterLayer"){ #if rasterlayer
if(raster::maxValue(spData) == 0){ #if no data (empty raster)
area = 0
} else if(!all(raster::as.matrix(spData) == floor(raster::as.matrix(spData)), na.rm = TRUE)){ #if probabilistic map
upMap <- reclassify(spData, matrix(c(0,0.025,0,0.025,1,1), ncol = 3, byrow = TRUE))
consensusMap <- reclassify(spData, matrix(c(0,0.499,0,0.499,1,1), ncol = 3, byrow = TRUE))
downMap <- reclassify(spData, matrix(c(0,0.975,0,0.975,1,1), ncol = 3, byrow = TRUE))
area <- c(aoo(downMap), aoo(consensusMap), aoo(upMap))
} else {
if (raster::xmax(spData) <= 180) { #if longlat data
if(res(spData)[1] > 0.05){ #if resolution is > 1km use area of cells rounded to nearest 4km
area = round(cellStats((raster::area(spData) * spData), sum)/4)*4
} else {
spData[spData < 1] <- NA
spData <- rasterToPoints(spData)
if(nrow(unique(spData)) == 1){
area = 4
} else {
spData <- longlat2utm(spData[,-3])
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
}
}
} else { #if square data in meters
spData[spData < 1] <- NA
spData <- rasterToPoints(spData)
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
}
}
} else if (ncol(spData) == 2){
if (max(spData) <= 180) { #if longlat data
spData <- longlat2utm(spData)
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
} else { #if square data in meters
spData = floor(spData/2000)
ncells = nrow(unique(spData))
area = ncells * 4
}
} else {
return(warning("Data format not recognized!"))
}
return(round(area))
}
#' Elevation limits.
#' @description Calculates the elevation (or depth) limits (range) of a species based on either known records or predicted distribution.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (0/1 values).
#' @param dem RasterLayer object. Should be a digital elevation model (DEM) of the relevant area. If not given the function will try to read it from base data, only works with longlat data.
#' @details Maximum and minimum elevation are calculated based on the DEM.
#' @return A vector with two values (min and max) in meters above (or below) sea level.
#' @examples data(red.records)
#' data(red.range)
#' data(red.layers)
#' dem = red.layers[[3]]
#' elevation(red.records, dem)
#' elevation(red.range, dem)
#' @export
elevation <- function(spData, dem = NULL){
if(class(spData) != "RasterLayer"){ #if no rasterlayer is given but just a matrix of longlat.
if(is.null(dem) && max(spData) <= 180){
gisdir = red.getDir()
dem <- raster::raster(paste(gisdir, "red_1km_20.tif", sep =""))
dem <- crop(dem, c(min(spData[,1])-0.1, max(spData[,1]+0.1), min(spData[,2])-0.1, max(spData[,2])+0.1))
}
spData = rasterize(spData, dem, field = 1, background = NA) #create a layer of presence based on the dem
} else if (is.null(dem)){
gisdir = red.getDir()
dem <- raster::raster(paste(gisdir, "red_1km_20.tif", sep = ""))
dem <- crop(dem, spData)
}
spData[spData == 0] <- NA
spData <- raster::overlay(spData, dem, fun = function(x,y){(x*y)})
out <- c(raster::minValue(spData), raster::maxValue(spData))
names(out) <- c("Min", "Max")
return(round(out))
}
#' Countries of occurrence.
#' @description Extracts the names or ISO codes of countries of occurrence of a species based on either records or predicted distribution.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (0/1 values).
#' @param zone UTM zone if data is in metric units.
#' @param ISO Outputs either country names (FALSE) or ISO codes (TRUE).
#' @details Country boundaries and designations are based on data(worldborders) from package maptools.
#' @return A vector with country names or codes.
#' @examples data(red.records)
#' data(red.range)
#' countries(red.records)
#' countries(red.range, ISO = TRUE)
#' @export
countries <- function(spData, zone = NULL, ISO = FALSE){
if ((class(spData) == "RasterLayer" && raster::xmax(spData) > 180) || (class(spData) != "RasterLayer" && max(spData) > 180)) ##if need to project to longlat
spData <- utm2longlat(spData, zone)
worldborders <- NULL
data(worldborders, envir = environment())
if(class(spData) == "RasterLayer")
spData <- rasterToPoints(spData, fun = function(dat){dat == 1}) ##convert raster to points
countryList <- sp::over(sp::SpatialPoints(spData), sp::SpatialPolygons(worldborders@polygons))
if(ISO)
countryList <- unique(worldborders@data[countryList,])$ISO2
else
countryList <- unique(worldborders@data[countryList,])$NAME
countryList <- sort(as.vector(countryList[!is.na(countryList)]))
return(countryList)
}
#' Output kml files.
#' @description Creates kml files for Google Maps as required by IUCN guidelines.
#' @param spData One of three options: 1) matrix of longitude and latitude (two columns) of each occurrence record; 2) matrix of easting and northing (two columns, e.g. UTM) of each occurrence record in meters; 3) RasterLayer object of predicted distribution (0/1 values).
#' @param zone UTM zone if data is in metric units.
#' @param filename The name of file to save, should end with .kml.
#' @param mapoption Type of representation, any of "points", "eoo" or "aoo".
#' @param smooth Smooths the kml lines as per IUCN guidelines. Higher values represent smoother polygons.
#' @param rad radius of circles in degrees if mapoption is "points". It can be the same value for all points or a vector with length equal to number of records in spData representing associated error. The default is about 10km (0.1 degrees) as per IUCN guidelines.
#' @return A kml with polygon or circles around records.
#' @export
kml <- function(spData, zone = NULL, filename, mapoption = "aoo", smooth = 0, rad = 0.1){
if ((class(spData) == "RasterLayer" && raster::xmax(spData) > 180) || (class(spData) != "RasterLayer" && max(spData) > 180)) ##if need to project to longlat
spData <- utm2longlat(spData, zone)
if(mapoption == "aoo" && class(spData) == "RasterLayer"){
spData[spData != 1] <- NA
spData <- rasterToPolygons(spData, dissolve = TRUE)
#simplify
if(smooth > 0){
trytol <- c(seq(0.001,0.01,0.001),seq(0.02,0.1,0.01),seq(0.2,1,0.1),2:10,seq(20,100,10),seq(200,1000,100),seq(2000,10000,1000),seq(20000,100000,10000),seq(200000,1000000,100000))
for (i in trytol){
if(class(try(gSimplify(spData, tol = (1 / i)), silent = TRUE)) != "try-error"){
spData <- gSimplify(spData, tol = (smooth / (i*10)))
break
}
}
#cut to coast
spData <- gIntersection(worldborders, spData)
#round
smooth = smooth * 100
polys = methods::slot(spData@polygons[[1]], "Polygons")
spline.poly <- function(xy, vertices, k=3, ...) {
# Assert: xy is an n by 2 matrix with n >= k.
# Wrap k vertices around each end.
n <- dim(xy)[1]
if (k >= 1) {
data <- rbind(xy[(n-k+1):n,], xy, xy[1:k, ])
} else {
data <- xy
}
# Spline the x and y coordinates.
data.spline <- spline(1:(n+2*k), data[,1], n=vertices, ...)
x <- data.spline$x
x1 <- data.spline$y
x2 <- spline(1:(n+2*k), data[,2], n=vertices, ...)$y
# Retain only the middle part.
cbind(x1, x2)[k < x & x <= n+k, ]
}
spData <- SpatialPolygons(
Srl = lapply(1:length(polys),
function(x){
p <- polys[[x]]
#applying spline.poly function for smoothing polygon edges
px <- methods::slot(polys[[x]], "coords")[,1]
py <- methods::slot(polys[[x]], "coords")[,2]
bz <- spline.poly(methods::slot(polys[[x]], "coords"),smooth, k=3)
bz <- rbind(bz, bz[1,])
methods::slot(p, "coords") <- bz
# create Polygons object
poly <- Polygons(list(p), ID = x)
}
)
)
spData <- SpatialPolygonsDataFrame(spData, data=data.frame(ID = 1:length(spData)))
kmlPolygons(spData, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
} else {
kmlPolygon(spData, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
}
} else if(mapoption == "points" || (class(spData) == "RasterLayer" && aoo(spData) <= 8) || nrow(spData) < 3){
poly = list()
for(i in 1:nrow(spData)){
pts = seq(0, 2 * pi, length.out = 100)
if(length(rad) == 1)
xy = cbind(spData[i, 1] + rad * sin(pts), spData[i, 2] + rad * cos(pts))
else
xy = cbind(spData[i, 1] + rad[i] * sin(pts), spData[i, 2] + rad[i] * cos(pts))
poly[[i]] = Polygon(xy)
}
poly = Polygons(poly,1)
kmlPolygon(poly, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
} else {
if (class(spData) == "RasterLayer"){
e <- rasterToPoints(spData, fun = function(dat){dat == 1}) ##convert raster to points
vertices <- chull(e[,1], e[,2])
vertices <- c(vertices, vertices[1])
vertices <- e[vertices,c(1,2)]
} else {
vertices <- chull(spData)
vertices <- c(vertices, vertices[1])
vertices <- spData[vertices,]
}
poly = Polygon(vertices)
poly = Polygons(list(poly),1)
kmlPolygon(poly, filename, name = filename, col = '#FFFFFFAA', border = "red", lwd = 2)
}
}
#' Red List Index.
#' @description Calculates the Red List Index (RLI) for a group of species.
#' @param spData Either a vector with species assessment categories for a single point in time or a matrix with two points in time in different columns (species x date). Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param tree An hclust or phylo object (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @param boot If TRUE bootstrapping for statistical significance is performed on both values per date and the trend between dates.
#' @param dd bootstrap among all species (FALSE) or Data Deficient species only (TRUE).
#' @param runs Number of runs for bootstrapping
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to Extinct/Extinct in the Wild (5).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Each species weight can further be influenced by how much it uniquely contributes to the phylogenetic or functional diversity of the group (Cardoso et al. in prep.).
#' To incorporate Importantly, the RLI is based on true improvements or deteriorations in the status of species, i.e. genuine changes. It excludes category changes resulting from, e.g., new knowledge (Butchart et al. 2007).
#' The RLI approach helps to develop a better understanding of which taxa, regions or ecosystems are declining or improving.
#' Juslen et al. (2016a, b) suggested the use of bootstrapping to search for statistical significance when comparing taxa or for trends in time of the index and this approach is here implemented.
#' @return Either a vector (if no two dates are given) or a matrix with the RLI values and, if bootstrap is performed, their confidence limits and significance.
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PloS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PloS One, 2: e140.
#' @references Juslen, A., Cardoso, P., Kullberg, J., Saari, S. & Kaila, L. (2016a) Trends of extinction risk for Lepidoptera in Finland: the first national Red List Index of butterflies and moths. Insect Conservation and Diversity, 9: 118-123.
#' @references Juslen, A., Pykala, J., Kuusela, S., Kaila, L., Kullberg, J., Mattila, J., Muona, J., Saari, S. & Cardoso, P. (2016b) Application of the Red List Index as an indicator of habitat change. Biodiversity and Conservation, 25: 569-585.
#' @examples rliData <- matrix(c("LC","LC","EN","EN","EX","EX","LC","CR","DD","DD"), ncol = 2, byrow = TRUE)
#' colnames(rliData) <- c("2000", "2010")
#' rli(rliData[,1])
#' rli(rliData[,1], boot = TRUE)
#' rli(rliData)
#' rli(rliData, boot = TRUE, dd = TRUE)
#' @export
rli <- function (spData, tree = NULL, boot = FALSE, dd = FALSE, runs = 1000){
##if only one point in time is given
if(is.null(dim(spData)))
return(rli.calc(spData, tree, boot, dd, runs)) ##return either 1 or 3 values
##if two points in time are given
ts <- apply(spData, 2, function(x) rli.calc(x, tree, boot = FALSE))
sl <- (ts[2] - ts[1]) / (as.numeric(colnames(spData))[2] - as.numeric(colnames(spData))[1])
if(!boot){
res <- matrix(c(ts, sl), nrow = 1)
colnames(res) <- c(colnames(spData), "Change/year")
rownames(res) <- c("Raw")
return(res)
} else {
tr <- apply(spData, 2, function(x) rli.calc(x, tree, boot, dd, runs))
p = 0
rndSl = rep(NA, runs)
for(r in 1:runs){
rndSl[r] <- rli.calc(spData[,2], tree, boot, dd, runs = 1)[2] - rli.calc(spData[,1], tree, boot, dd, runs = 1)[2]
if(sign(sl) < sign(rndSl[r]) || sign(sl) > sign(rndSl[r]))
p = p + 1
}
p = p / runs
rndSl = quantile(rndSl, c(0.025, 0.5, 0.975))
res <- matrix(c(ts[1], tr[,1], ts[2], tr[,2], sl, rndSl), nrow = 4, ncol = 3)
colnames(res) <- c(colnames(spData), "Change")
rownames(res) <- c("Raw", "LowCL", "Median", "UpCL")
return(list("Values" = res, "P_change" = p))
}
}
#' Red List Index for multiple groups.
#' @description Calculates the Red List Index (RLI) for multiple groups of species.
#' @param spData A matrix with group names (first column) and species assessment categories for one or two points in time (remaining columns). Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param tree A list of hclust or phylo objects, each corresponding to a tree per group (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @param boot If TRUE bootstrapping for statistical significance is performed on both values per date and the trend between dates.
#' @param dd bootstrap among all species (FALSE) or Data Deficient species only (TRUE).
#' @param runs Number of runs for bootstrapping
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to 5 (Extinct/Extinct in the Wild).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Each species weight can further be influenced by how much it uniquely contributes to the phylogenetic or functional diversity of the group (Cardoso et al. in prep.).
#' Importantly, the RLI is based on true improvements or deteriorations in the status of species, i.e. genuine changes. It excludes category changes resulting from, e.g., new knowledge (Butchart et al. 2007).
#' The RLI approach helps to develop a better understanding of which taxa, regions or ecosystems are declining or improving.
#' Juslen et al. (2016a, b) suggested the use of bootstrapping to search for statistical significance when comparing taxa or for trends in time of the index and this approach is here implemented.
#' @return A matrix with the RLI values and, if bootstrap is performed, their confidence limits and significance.
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PloS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PloS One, 2: e140.
#' @references Juslen, A., Cardoso, P., Kullberg, J., Saari, S. & Kaila, L. (2016a) Trends of extinction risk for Lepidoptera in Finland: the first national Red List Index of butterflies and moths. Insect Conservation and Diversity, 9: 118-123.
#' @references Juslen, A., Pykala, J., Kuusela, S., Kaila, L., Kullberg, J., Mattila, J., Muona, J., Saari, S. & Cardoso, P. (2016b) Application of the Red List Index as an indicator of habitat change. Biodiversity and Conservation, 25: 569-585.
#' @examples rliData <- matrix(c("LC","LC","EN","EN","EX","EX","LC","CR","CR","EX"), ncol = 2, byrow = TRUE)
#' colnames(rliData) <- c("2000", "2010")
#' rliData <- cbind(c("Arthropods","Arthropods","Birds","Birds","Birds"), rliData)
#' rli.multi(rliData[,1:2])
#' rli.multi(rliData[,1:2], boot = TRUE)
#' rli.multi(rliData)
#' rli.multi(rliData, boot = TRUE)
#' @export
rli.multi <- function (spData, tree = NULL, boot = FALSE, dd = FALSE, runs = 1000){
groups <- unique(spData[,1])
nGroups <- length(groups)
if(ncol(spData) == 2 && !boot){
res <- matrix(NA, nrow = nGroups, ncol = 1)
} else if((ncol(spData) == 2 && boot) || (ncol(spData) == 3 && !boot)){
res <- matrix(NA, nrow = nGroups, ncol = 3)
} else {
res <- matrix(NA, nrow = nGroups, ncol = 13)
colnames(res) <- c(paste(colnames(spData)[2], "(raw)"), paste(colnames(spData)[2], "(lowCL)"), paste(colnames(spData)[2], "(median)"), paste(colnames(spData)[2], "(upCL)"), paste(colnames(spData)[3], "(raw)"), paste(colnames(spData)[3], "(lowCL)"), paste(colnames(spData)[3], "(median)"), paste(colnames(spData)[3], "(upCL)"), "Change (raw)", "Change (lowCL)", "Change (median)", "Change (upCL)", "p (change)")
}
row.names(res) <- groups
for(g in 1:nGroups){
if(is.null(tree))
v <- rli(spData[spData[,1] == groups[g],-1], tree = NULL, boot = boot, dd = dd, runs = runs)
else
v <- rli(spData[spData[,1] == groups[g],-1], tree[[g]], boot = boot, dd = dd, runs = runs)
if(ncol(res) < 13){
res[g,] <- v
colnames(res) <- colnames(v)
} else {
res[g,1:4] <- v$Values[,1]
res[g,5:8] <- v$Values[,2]
res[g,9:12] <- v$Values[,3]
res[g,13] <- v$P_change
}
}
return(res)
}
#' Prediction of Red List Index.
#' @description Linearly interpolates and extrapolates RLI values to any years.
#' @param rliValue Should be a vector with RLI values and names as the corresponding year numbers.
#' @param from Starting year of the sequence to predict.
#' @param to Ending year of the sequence to predict.
#' @param rliPlot Plots the result
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' @return A matrix with the RLI values and confidence limits.
#' @examples rliValue <- c(4.5, 4.3, 4.4, 4.2, 4.0)
#' names(rliValue) <- c(2000, 2004, 2008, 2011, 2017)
#' rli.predict(rliValue, 1990, 2020)
#' @export
rli.predict <- function(rliValue, from = NA, to = NA, rliPlot = FALSE){
year = as.numeric(c(names(rliValue)))
rliTable = data.frame(rliValue, year)
if(is.na(from))
from = min(year)
if(is.na(to))
to = max(year)
newYear = data.frame(year = seq(from = from, to = to, by = 1))
lmOut = predict(lm(rliValue ~ year, data = rliTable), newYear, interval = "confidence", level = 0.95)
res = lmOut[,c(2,1,3)]
colnames(res) = c("LowCL", "Fitted RLI", "UpCL")
rownames(res) = newYear$year
if(rliPlot){
plot(year, rliValue, xlab="Year", ylab="Fitted RLI", xlim = c(from, to), ylim = c(0,5))
abline(lm(rliValue ~ year, data = rliTable), col = "red")
matlines(newYear, lmOut[,2:3], col = "blue", lty = 2)
}
return(res)
}
#' Sampled Red List Index.
#' @description Calculates accumulation curve of confidence limits in sampled RLI.
#' @param spData A vector with species assessment categories for a single point in time. Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param tree An hclust or phylo object (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @param p p-value of confidence limits (in a two-tailed test).
#' @param runs Number of runs for smoothing accumulation curves.
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to Extinct/Extinct in the Wild (5).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Yet, in many groups, it is not possible to assess all species due to huge diversity and/or lack of resources. In such case, the RLI is estimated from a randomly selected sample of species - the Sampled Red List Index (SRLI; Stuart et al. 2010).
#' This function allows to calculate how many species are needed to reach a given maximum error of the SRLI around the true value of the RLI (with all species included) for future assessments of the group.
#' @return A vector with the accumulation of the error of the SRLI around the true value of the RLI (with all species included).
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PLoS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PLoS One, 2: e140.
#' @references Stuart, S.N., Wilson, E.O., McNeely, J.A., Mittermeier, R.A. & Rodriguez, J.P. (2010) The barometer of Life. Science 328, 117.
#' @examples rliData <- c("LC","LC","EN","EN","EX","EX","LC","CR","CR","EX")
#' rli.sampled(rliData)
#' @export
rli.sampled <- function (spData, tree = NULL, p = 0.05, runs = 1000){
nSpp <- length(spData)
accum <- rep(NA, nSpp)
for(n in 1:nSpp){ #test with n species from the entire set
diff = rep(NA, runs) #try runs times each species
for(r in 1:runs){ #do r runs for each n species
rndComm = rep(NA, nSpp)
rndSpp = sample(nSpp, n)
rndComm[rndSpp] = spData[rndSpp]
diff[r] = abs(rli.calc(spData, tree, FALSE, FALSE, runs = 1) - rli.calc(rndComm, tree, FALSE, FALSE, runs = 1)) #calculate absolute difference between true and sampled rli for each run
}
accum[n] = quantile(diff, (1-p))
}
return(accum) #returns the accumulation curve of confidence limit of sampled RLI
}
#' Mapping the Red List Index.
#' @description Creates a map for the red list index according to species distribution and threat status.
#' @param spData Either a vector with species assessment categories for a single point in time or a matrix with two points in time in different columns (species x date). Values can be text (EX, EW, RE, CR, EN, VU, NT, DD, LC) or numeric (0 for LC, 1 for NT, 2 for VU, 3 for EN, 4 for CR, 5 for RE/EW/EX).
#' @param layers Species distributions (0/1), a Raster* object as defined by package raster.
#' @param layers2 Species distributions (0/1) on the second point in time, a Raster* object as defined by package raster. If there are two dates but no layers2, the distributions are assumed to be kept constant in time.
#' @param tree An hclust or phylo object (used when species are weighted by their unique contribution to phylogenetic or functional diversity).
#' @details The IUCN Red List Index (RLI) (Butchart et al. 2004, 2007) reflects overall changes in IUCN Red List status over time of a group of taxa.
#' The RLI uses weight scores based on the Red List status of each of the assessed species. These scores range from 0 (Least Concern) to Extinct/Extinct in the Wild (5).
#' Summing these scores across all species and relating them to the worst-case scenario, i.e. all species extinct, gives us an indication of how biodiversity is doing.
#' Each species weight can further be influenced by how much it uniquely contributes to the phylogenetic or functional diversity of the group (Cardoso et al. in prep.).
#' @return A RasterLayer with point values (if a single date is given) or change per cell (if two dates are given).
#' @references Butchart, S.H.M., Stattersfield, A.J., Bennun, L.A., Shutes, S.M., Akcakaya, H.R., Baillie, J.E.M., Stuart, S.N., Hilton-Taylor, C. & Mace, G.M. (2004) Measuring global trends in the status of biodiversity: Red List Indices for birds. PloS Biology, 2: 2294-2304.
#' @references Butchart, S.H.M., Akcakaya, H.R., Chanson, J., Baillie, J.E.M., Collen, B., Quader, S., Turner, W.R., Amin, R., Stuart, S.N. & Hilton-Taylor, C. (2007) Improvements to the Red List index. PloS One, 2: e140.
#' @examples sp1 <- raster::raster(matrix(c(1,1,1,0,0,0,0,0,NA), ncol = 3))
#' sp2 <- raster::raster(matrix(c(1,0,0,1,0,0,1,0,NA), ncol = 3))
#' sp3 <- raster::raster(matrix(c(1,0,0,0,0,0,0,0,NA), ncol = 3))
#' sp4 <- raster::raster(matrix(c(0,1,1,1,1,1,1,1,NA), ncol = 3))
#' layers <- raster::stack(sp1, sp2, sp3, sp4)
#' spData <- c("CR","EN","VU","LC")
#' raster::plot(rli.map(spData, layers))
#' @export
rli.map <- function (spData, layers, layers2 = NULL, tree = NULL){
if(!is.null(dim(spData))){ #if to calculate change call this same function twice
if(is.null(layers2)){
layers2 <- layers
}
map1 <- rli.map(spData[,1], layers = layers, tree = tree)
map2 <- rli.map(spData[,2], layers = layers2, tree = tree)
return(map2 - map1)
}
#convert rasters to array
layers = raster::as.array(layers)
#get data for each cell (row by row)
cells = matrix(NA, (nrow(layers) * ncol(layers)), dim(layers)[3])
i = 0
for (r in 1:nrow(layers)){
for(c in 1:ncol(layers)){
i = i+1
cells[i,] = layers[r,c,]
}
}
#RLI of each cell
rliCells = rep(NA, nrow(cells))
for (i in 1:nrow(cells)){
rliNA <- ifelse(cells[i,] == 1, spData, NA) #only consider species present in each cell
rliCells[i] = rli.calc(rliNA, tree = tree)
}
#create RLI map
rliMap = raster::raster(matrix(rliCells, nrow = nrow(layers), byrow = T))
return(rliMap)
}
#' Occurrence records for Hogna maderiana (Walckenaer, 1837).
#'
#' Occurrence records for Hogna maderiana (Walckenaer, 1837).
#'
#' @docType data
#' @keywords datasets
#' @name red.records
#' @usage data(red.records)
#' @format Matrix of longitude and latitude (two columns) of occurrence records for Hogna maderiana (Walckenaer, 1837), a spider species from Madeira Island.
NULL
#' Geographic range for Hogna maderiana (Walckenaer, 1837).
#'
#' Geographic range for Hogna maderiana (Walckenaer, 1837).
#'
#' @docType data
#' @keywords datasets
#' @name red.range
#' @usage data(red.range)
#' @format RasterLayer object as defined by package raster of range for Hogna maderiana (Walckenaer, 1837), a spider species from Madeira Island.
NULL
#' Environmental layers for Madeira.
#'
#' Average annual temperature, total annual precipitation, altitude and landcover for Madeira Island (Fick & Hijmans 2017, Tuanmu & Jetz 2014).
#'
#' @docType data
#' @keywords datasets
#' @name red.layers
#' @usage data(red.layers)
#' @format RasterStack object as defined by package raster.
#' @references Fick, S.E. & Hijmans, R.J. (2017) Worldclim 2: new 1-km spatial resolution climate surfaces for global land areas. International Journal of Climatology, in press.
#' @references Tuanmu, M.-N. & Jetz, W. (2014) A global 1-km consensus land-cover product for biodiversity and ecosystem modeling. Global Ecology and Biogeography, 23: 1031-1045.
NULL
#'
#'
#' World country borders.
#'
#' World country borders.
#'
#' @docType data
#' @keywords datasets
#' @name worldborders
#' @usage data(worldborders)
#' @format SpatialPolygonsDataFrame.
NULL
|
#' @title Data - Ambient Temperature for the City of London for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name London2013
#' @docType data
#' @usage data(London2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/EGLL/2013/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for the City of Mumbai, India for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name Mumbai2013
#' @docType data
#' @usage data(Mumbai2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/VABB/2014/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for New York City for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name NewYork2013
#' @docType data
#' @usage data(NewYork2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/KLGA/2013/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Summarized Daily Temperature for the City of San Francisco for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has four columns. The Timestamp (YYYY-MM-DD HH:MM:SS) and three Temperature Columns: Daily Max, Mean and Min (in degrees F)
#' In comparison with the \code{SFO2013} dataset which has 9507 rows, this dataset has exactly
#' 365 rows, one for each day in 2013.
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name SFO2013Summarized
#' @docType data
#' @usage data(SFO2013Summarized)
#' @references
#' \url{http://www.wunderground.com/history/airport/SFO/2013/1/1/CustomHistory.html?dayend=31&monthend=12&yearend=2013&req_city=NA&req_state=NA&req_statename=NA&format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for the City of San Francisco for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name SFO2013
#' @docType data
#' @usage data(SFO2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/KSFO/2013/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for the City of San Francisco for all of 2012
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name SFO2012
#' @docType data
#' @usage data(SFO2012)
#' @references
#' \url{http://www.wunderground.com/history/airport/KSFO/2012/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - US Weather Stations ID's
#'
#' @description This is a data frame of the 1602 stations in Weather Underground's
#' database. The 4-letter "airportCode" is used by functions
#' to check and get the weather data.
#'
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name USAirportWeatherStations
#' @docType data
#' @usage data(USAirportWeatherStations)
#' @references
#' \url{http://www.wunderground.com/about/faq/US_cities.asp}
#' @keywords data
#'
NULL
#' @title Data - International Weather Stations
#' @description This is a data frame of the 1602 stations in Weather Underground's
#' database. The 4-letter "ICAO" is used by the functions in this package
#' to check and get the weather data. Note that not all the stations
#' have weather data.
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name IntlWxStations
#' @docType data
#' @usage data(IntlWxStations)
#' @references This data frame has been created by
#' \url{http://weather.rap.ucar.edu/surface/stations.txt}
#' maintained by Greg Thompson of NCAR.
#' @keywords data
#'
NULL
| /R/data_description.R | no_license | ozagordi/weatherData | R | false | false | 4,697 | r | #' @title Data - Ambient Temperature for the City of London for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name London2013
#' @docType data
#' @usage data(London2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/EGLL/2013/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for the City of Mumbai, India for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name Mumbai2013
#' @docType data
#' @usage data(Mumbai2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/VABB/2014/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for New York City for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name NewYork2013
#' @docType data
#' @usage data(NewYork2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/KLGA/2013/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Summarized Daily Temperature for the City of San Francisco for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has four columns. The Timestamp (YYYY-MM-DD HH:MM:SS) and three Temperature Columns: Daily Max, Mean and Min (in degrees F)
#' In comparison with the \code{SFO2013} dataset which has 9507 rows, this dataset has exactly
#' 365 rows, one for each day in 2013.
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name SFO2013Summarized
#' @docType data
#' @usage data(SFO2013Summarized)
#' @references
#' \url{http://www.wunderground.com/history/airport/SFO/2013/1/1/CustomHistory.html?dayend=31&monthend=12&yearend=2013&req_city=NA&req_state=NA&req_statename=NA&format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for the City of San Francisco for all of 2013
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name SFO2013
#' @docType data
#' @usage data(SFO2013)
#' @references
#' \url{http://www.wunderground.com/history/airport/KSFO/2013/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - Ambient Temperature for the City of San Francisco for all of 2012
#'
#' @description This is a data frame of Ambient temperature data, extracted from Weather Undergound.
#' Each row has two entries (columns). The Timestamp (YYYY-MM-DD HH:MM:SS) and the Temperature (in degrees F)
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name SFO2012
#' @docType data
#' @usage data(SFO2012)
#' @references
#' \url{http://www.wunderground.com/history/airport/KSFO/2012/1/1/DailyHistory.html?format=1}
#' @keywords data
#'
NULL
#' @title Data - US Weather Stations ID's
#'
#' @description This is a data frame of the 1602 stations in Weather Underground's
#' database. The 4-letter "airportCode" is used by functions
#' to check and get the weather data.
#'
#'
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name USAirportWeatherStations
#' @docType data
#' @usage data(USAirportWeatherStations)
#' @references
#' \url{http://www.wunderground.com/about/faq/US_cities.asp}
#' @keywords data
#'
NULL
#' @title Data - International Weather Stations
#' @description This is a data frame of the 1602 stations in Weather Underground's
#' database. The 4-letter "ICAO" is used by the functions in this package
#' to check and get the weather data. Note that not all the stations
#' have weather data.
#' @author Ram Narasimhan \email{ramnarasimhan@@gmail.com}
#' @name IntlWxStations
#' @docType data
#' @usage data(IntlWxStations)
#' @references This data frame has been created by
#' \url{http://weather.rap.ucar.edu/surface/stations.txt}
#' maintained by Greg Thompson of NCAR.
#' @keywords data
#'
NULL
|
load(system.file("internal_db/oedb.rda", package = "oncoEnrichR"))
test_that("Ligand-receptor interactions - testing ", {
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF")
)
)
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF"),
genedb = oedb$genedb$all
)
)
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF"),
genedb = oedb$genedb$all,
ligand_receptor_db = oedb$ligandreceptordb$cellchatdb$db
)
)
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = as.integer(c(200,300)),
genedb = oedb$genedb$all,
ligand_receptor_db = oedb$ligandreceptordb$cellchatdb$db,
ligand_receptor_xref = oedb$ligandreceptordb$cellchatdb$xref)
)
expect_gte(
NROW(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF"),
genedb = oedb$genedb$all,
ligand_receptor_db = oedb$ligandreceptordb$cellchatdb$db,
ligand_receptor_xref = oedb$ligandreceptordb$cellchatdb$xref)$secreted_signaling
),
1
)
})
| /tests/testthat/test_ligandreceptor.R | permissive | sigven/oncoEnrichR | R | false | false | 1,208 | r |
load(system.file("internal_db/oedb.rda", package = "oncoEnrichR"))
test_that("Ligand-receptor interactions - testing ", {
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF")
)
)
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF"),
genedb = oedb$genedb$all
)
)
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF"),
genedb = oedb$genedb$all,
ligand_receptor_db = oedb$ligandreceptordb$cellchatdb$db
)
)
expect_error(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = as.integer(c(200,300)),
genedb = oedb$genedb$all,
ligand_receptor_db = oedb$ligandreceptordb$cellchatdb$db,
ligand_receptor_xref = oedb$ligandreceptordb$cellchatdb$xref)
)
expect_gte(
NROW(
oncoEnrichR:::annotate_ligand_receptor_interactions(
qgenes = c("EGFR", "EGF"),
genedb = oedb$genedb$all,
ligand_receptor_db = oedb$ligandreceptordb$cellchatdb$db,
ligand_receptor_xref = oedb$ligandreceptordb$cellchatdb$xref)$secreted_signaling
),
1
)
})
|
#--------------------------------------------------------------------------------------
#
# explore the specificity based on physchem properties
#
#--------------------------------------------------------------------------------------
dxPhyschemSpecificityZtfiltered <- function(cutoff=0.5) {
printCurrentFunction()
col.name <- "maximum.receptor"
file <- "../output/physchem_specificity_ztfiltered.txt"
filename <- "../input/ToxCast_physchem_QP_Chembl_electrophil_DFT.csv"
physchem <- read.csv(file=filename,stringsAsFactors=F)
temp <- physchem[,"species"]
temp2 <- temp
temp2[] <- 0
temp2 <- as.numeric(temp2)
temp2[] <- NA
temp2[is.element(temp,"NEUTRAL")] <- 0
temp2[is.element(temp,"ACID")] <- 1
temp2[is.element(temp,"BASE")] <- 1
physchem <- cbind(physchem,temp2)
names(physchem)[dim(physchem)[2]] <- "Charged"
s <- paste("Variable\tReceptor\tN.in\tN.out\tnorm.in\tnorm.out\tp.value\n")
cat(s,file=file,append=F)
temp <- SUPERMATRIX[SUPERMATRIX[,"specificity.Z"]>=cutoff,]
supermatrix <- temp[temp[,"specificity.T"]>=cutoff,]
#PHYSCHEM <<- physchem
rownames(physchem) <- physchem[,"CODE"]
#physchem <- physchem[row.names(supermatrix),]
pnames <- names(physchem)[10:dim(physchem)[2]]
nparam <- length(pnames)
pclass <- pnames
pclass[] <- "numeric"
pclass[38] <- "character"
pclass[39] <- "character"
rec.list <- sort(unique(supermatrix[,col.name]))
nrec <- length(rec.list)
for(i in 1:nrec) {
receptor <- rec.list[i]
rec.mask <- supermatrix[,col.name]
rec.mask[] <- 0
rec.mask[is.element(supermatrix[,col.name],receptor)] <- 1
codes.in <- supermatrix[rec.mask==1,"CODE"]
codes.out <- SUPERMATRIX[is.element(SUPERMATRIX[,col.name],"None"),"CODE"]
for(j in 1:nparam) {
if(pclass[j]=="numeric" && length(codes.in)>=5) {
param <- pnames[j]
y.in <- physchem[codes.in,param]
y.out <- physchem[codes.out,param]
y.in <- y.in[!is.na(y.in)]
y.out <- y.out[!is.na(y.out)]
n.in <- length(y.in)
n.out <- length(y.out)
mean.in <- mean(y.in)
mean.out <- mean(y.out)
p.val <- 1
if(!is.na(mean.in)) {
if(!is.na(mean.out)) {
if(n.in>=5 && n.out>=5) {
res <- t.test(y.in,y.out)
cat(param," : ",receptor,"\n")
print(res)
p.val <- res$p.value
}
}
}
if(n.in>=5) {
s <- paste(param,"\t",receptor,"\t",n.in,"\t",n.out,"\t",format(mean.in,digits=2),"\t",format(mean.out,digits=2),"\t",format(p.val,digits=2),"\n",sep="")
cat(s,file=file,append=T)
cat(s)
}
}
}
}
physchem <- physchem[CODE.LIST,10:dim(physchem)[2]]
temp <- cbind(SUPERMATRIX,physchem)
SUPERMATRIX <<- temp
outfile <- "../output/superMatrix_ATG_NVS_Tox21_physchem.csv"
write.csv(SUPERMATRIX,file=outfile, row.names=F)
}
| /dxPhyschemSpecificityZtfiltered.R | no_license | rsjudson/armin | R | false | false | 2,955 | r | #--------------------------------------------------------------------------------------
#
# explore the specificity based on physchem properties
#
#--------------------------------------------------------------------------------------
dxPhyschemSpecificityZtfiltered <- function(cutoff=0.5) {
printCurrentFunction()
col.name <- "maximum.receptor"
file <- "../output/physchem_specificity_ztfiltered.txt"
filename <- "../input/ToxCast_physchem_QP_Chembl_electrophil_DFT.csv"
physchem <- read.csv(file=filename,stringsAsFactors=F)
temp <- physchem[,"species"]
temp2 <- temp
temp2[] <- 0
temp2 <- as.numeric(temp2)
temp2[] <- NA
temp2[is.element(temp,"NEUTRAL")] <- 0
temp2[is.element(temp,"ACID")] <- 1
temp2[is.element(temp,"BASE")] <- 1
physchem <- cbind(physchem,temp2)
names(physchem)[dim(physchem)[2]] <- "Charged"
s <- paste("Variable\tReceptor\tN.in\tN.out\tnorm.in\tnorm.out\tp.value\n")
cat(s,file=file,append=F)
temp <- SUPERMATRIX[SUPERMATRIX[,"specificity.Z"]>=cutoff,]
supermatrix <- temp[temp[,"specificity.T"]>=cutoff,]
#PHYSCHEM <<- physchem
rownames(physchem) <- physchem[,"CODE"]
#physchem <- physchem[row.names(supermatrix),]
pnames <- names(physchem)[10:dim(physchem)[2]]
nparam <- length(pnames)
pclass <- pnames
pclass[] <- "numeric"
pclass[38] <- "character"
pclass[39] <- "character"
rec.list <- sort(unique(supermatrix[,col.name]))
nrec <- length(rec.list)
for(i in 1:nrec) {
receptor <- rec.list[i]
rec.mask <- supermatrix[,col.name]
rec.mask[] <- 0
rec.mask[is.element(supermatrix[,col.name],receptor)] <- 1
codes.in <- supermatrix[rec.mask==1,"CODE"]
codes.out <- SUPERMATRIX[is.element(SUPERMATRIX[,col.name],"None"),"CODE"]
for(j in 1:nparam) {
if(pclass[j]=="numeric" && length(codes.in)>=5) {
param <- pnames[j]
y.in <- physchem[codes.in,param]
y.out <- physchem[codes.out,param]
y.in <- y.in[!is.na(y.in)]
y.out <- y.out[!is.na(y.out)]
n.in <- length(y.in)
n.out <- length(y.out)
mean.in <- mean(y.in)
mean.out <- mean(y.out)
p.val <- 1
if(!is.na(mean.in)) {
if(!is.na(mean.out)) {
if(n.in>=5 && n.out>=5) {
res <- t.test(y.in,y.out)
cat(param," : ",receptor,"\n")
print(res)
p.val <- res$p.value
}
}
}
if(n.in>=5) {
s <- paste(param,"\t",receptor,"\t",n.in,"\t",n.out,"\t",format(mean.in,digits=2),"\t",format(mean.out,digits=2),"\t",format(p.val,digits=2),"\n",sep="")
cat(s,file=file,append=T)
cat(s)
}
}
}
}
physchem <- physchem[CODE.LIST,10:dim(physchem)[2]]
temp <- cbind(SUPERMATRIX,physchem)
SUPERMATRIX <<- temp
outfile <- "../output/superMatrix_ATG_NVS_Tox21_physchem.csv"
write.csv(SUPERMATRIX,file=outfile, row.names=F)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sym_var.R
\name{sym.var}
\alias{sym.var}
\title{Symbolic Variable}
\usage{
sym.var(sym.data, number.sym.var)
}
\arguments{
\item{sym.data}{The symbolic data table}
\item{number.sym.var}{The number of the column for the variable (feature) that we want to get.}
}
\value{
Return a symbolic data variable with the following structure: \cr
$N\cr
[1] 7\cr
$var.name\cr
[1] 'F6'\cr
$var.type\cr
[1] '$I'\cr
$obj.names\cr
[1] 'Case1' 'Case2' 'Case3' 'Case4' 'Case5' 'Case6' 'Case7'\cr
$var.data.vector\cr
F6 F6.1\cr
Case1 0.00 90.00\cr
Case2 -90.00 98.00\cr
Case3 65.00 90.00\cr
Case4 45.00 89.00\cr
Case5 20.00 40.00\cr
Case6 5.00 8.00\cr
Case7 3.14 6.76\cr
}
\description{
This function get a symbolic variable from a symbolic data table.
}
\references{
Billard L. and Diday E. (2006).
Symbolic data analysis: Conceptual statistics and data mining. Wiley, Chichester.
Bock H-H. and Diday E. (eds.) (2000).
Analysis of Symbolic Data. Exploratory methods for extracting statistical information
from complex data. Springer, Germany.
}
\seealso{
sym.obj
}
\author{
Oldemar Rodriguez Rojas
}
\keyword{Symbolic}
\keyword{Variable}
| /man/sym.var.Rd | no_license | cran/RSDA | R | false | true | 1,230 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sym_var.R
\name{sym.var}
\alias{sym.var}
\title{Symbolic Variable}
\usage{
sym.var(sym.data, number.sym.var)
}
\arguments{
\item{sym.data}{The symbolic data table}
\item{number.sym.var}{The number of the column for the variable (feature) that we want to get.}
}
\value{
Return a symbolic data variable with the following structure: \cr
$N\cr
[1] 7\cr
$var.name\cr
[1] 'F6'\cr
$var.type\cr
[1] '$I'\cr
$obj.names\cr
[1] 'Case1' 'Case2' 'Case3' 'Case4' 'Case5' 'Case6' 'Case7'\cr
$var.data.vector\cr
F6 F6.1\cr
Case1 0.00 90.00\cr
Case2 -90.00 98.00\cr
Case3 65.00 90.00\cr
Case4 45.00 89.00\cr
Case5 20.00 40.00\cr
Case6 5.00 8.00\cr
Case7 3.14 6.76\cr
}
\description{
This function get a symbolic variable from a symbolic data table.
}
\references{
Billard L. and Diday E. (2006).
Symbolic data analysis: Conceptual statistics and data mining. Wiley, Chichester.
Bock H-H. and Diday E. (eds.) (2000).
Analysis of Symbolic Data. Exploratory methods for extracting statistical information
from complex data. Springer, Germany.
}
\seealso{
sym.obj
}
\author{
Oldemar Rodriguez Rojas
}
\keyword{Symbolic}
\keyword{Variable}
|
\name{VIR}
\alias{gapVIR}
\alias{gradxgapVIR}
\alias{gradygapVIR}
\alias{fpVIR}
\title{Nikaido Isoda Reformulation}
\description{
functions of the Nikaido Isoda Reformulation of the GNEP
}
\usage{
gapVIR(x, y, dimx, grobj, arggrobj, param=list(), echo=FALSE)
gradxgapVIR(x, y, dimx, grobj, arggrobj, heobj, argheobj, param=list(), echo=FALSE)
gradygapVIR(x, y, dimx, grobj, arggrobj, param=list(), echo=FALSE)
fpVIR(x, dimx, obj, argobj, joint, argjoint,
grobj, arggrobj, jacjoint, argjacjoint, param=list(),
echo=FALSE, control=list(), yinit=NULL, optim.method="default")
}
\arguments{
\item{x,y}{a numeric vector.}
\item{dimx}{a vector of dimension for \code{x}.}
\item{obj}{objective function (to be minimized), see details.}
\item{argobj}{a list of additional arguments.}
\item{grobj}{gradient of the objective function, see details.}
\item{arggrobj}{a list of additional arguments of the objective gradient.}
\item{heobj}{Hessian of the objective function, see details.}
\item{argheobj}{a list of additional arguments of the objective Hessian.}
\item{joint}{joint function, see details.}
\item{argjoint}{a list of additional arguments of the joint function.}
\item{jacjoint}{gradient of the joint function, see details.}
\item{argjacjoint}{a list of additional arguments of the joint Jacobian.}
\item{param}{ a list of parameters.}
\item{control}{a list with control parameters for the fixed point algorithm.}
\item{yinit}{initial point when computing the fixed-point function.}
\item{optim.method}{optimization method when computing the fixed-point function.}
\item{echo}{a logical to show some traces.}
}
\details{
\code{gapVIR} computes the Nikaido Isoda function of the GNEP, while \code{gradxgapVIR}
and \code{gradygapVIR} give its gradient with respect to \eqn{x} and \eqn{y}.
\code{fpVIR} computes the fixed-point function.
}
\value{
A vector for \code{funSSR} or a matrix for \code{jacSSR}.
}
\references{
A. von Heusinger & J. Kanzow (2009),
\emph{Optimization reformulations of the generalized Nash equilibrium problem using Nikaido-Isoda-type functions},
Comput Optim Appl .
F. Facchinei, A. Fischer and V. Piccialli (2009),
\emph{Generalized Nash equilibrium problems and Newton methods},
Math. Program.
}
\seealso{
See also \code{\link{GNE.fpeq}}.
}
\author{
Christophe Dutang
}
\keyword{math}
\keyword{optimize}
| /man/util-VIR.Rd | no_license | cran/GNE | R | false | false | 2,402 | rd | \name{VIR}
\alias{gapVIR}
\alias{gradxgapVIR}
\alias{gradygapVIR}
\alias{fpVIR}
\title{Nikaido Isoda Reformulation}
\description{
functions of the Nikaido Isoda Reformulation of the GNEP
}
\usage{
gapVIR(x, y, dimx, grobj, arggrobj, param=list(), echo=FALSE)
gradxgapVIR(x, y, dimx, grobj, arggrobj, heobj, argheobj, param=list(), echo=FALSE)
gradygapVIR(x, y, dimx, grobj, arggrobj, param=list(), echo=FALSE)
fpVIR(x, dimx, obj, argobj, joint, argjoint,
grobj, arggrobj, jacjoint, argjacjoint, param=list(),
echo=FALSE, control=list(), yinit=NULL, optim.method="default")
}
\arguments{
\item{x,y}{a numeric vector.}
\item{dimx}{a vector of dimension for \code{x}.}
\item{obj}{objective function (to be minimized), see details.}
\item{argobj}{a list of additional arguments.}
\item{grobj}{gradient of the objective function, see details.}
\item{arggrobj}{a list of additional arguments of the objective gradient.}
\item{heobj}{Hessian of the objective function, see details.}
\item{argheobj}{a list of additional arguments of the objective Hessian.}
\item{joint}{joint function, see details.}
\item{argjoint}{a list of additional arguments of the joint function.}
\item{jacjoint}{gradient of the joint function, see details.}
\item{argjacjoint}{a list of additional arguments of the joint Jacobian.}
\item{param}{ a list of parameters.}
\item{control}{a list with control parameters for the fixed point algorithm.}
\item{yinit}{initial point when computing the fixed-point function.}
\item{optim.method}{optimization method when computing the fixed-point function.}
\item{echo}{a logical to show some traces.}
}
\details{
\code{gapVIR} computes the Nikaido Isoda function of the GNEP, while \code{gradxgapVIR}
and \code{gradygapVIR} give its gradient with respect to \eqn{x} and \eqn{y}.
\code{fpVIR} computes the fixed-point function.
}
\value{
A vector for \code{funSSR} or a matrix for \code{jacSSR}.
}
\references{
A. von Heusinger & J. Kanzow (2009),
\emph{Optimization reformulations of the generalized Nash equilibrium problem using Nikaido-Isoda-type functions},
Comput Optim Appl .
F. Facchinei, A. Fischer and V. Piccialli (2009),
\emph{Generalized Nash equilibrium problems and Newton methods},
Math. Program.
}
\seealso{
See also \code{\link{GNE.fpeq}}.
}
\author{
Christophe Dutang
}
\keyword{math}
\keyword{optimize}
|
################################################################################
# Company : Stevens
# Course : Data Mining
# Purpose : Apply naive bayes to the “breast cancer dataset”
# First Name: Kunj
# Last Name : Desai
# ID : 1044511
# Date : 03/12/2020
################################################################################
## remove all objects
rm(list = ls())
#install.packages('e1071', dependencies = TRUE)
library(e1071)
library(class)
data<- read.csv(file = "/Users/kunj/Desktop/Stevens/Spring '20/CS 513/Hw-4/breast-cancer-wisconsin.data.csv",
header = TRUE, colClasses = c('numeric',rep(x = 'factor', times = 10))
)
is.na(data) <- data == '?'
completeData <- data[complete.cases(data),]
View(completeData)
# Data loading and cleaning complete.
# Setting the Seed=1 for consistent generation
set.seed(1)
# Now Selecting 70% of data as sample from total 'n' rows of the data
trainRows <- sample(nrow(completeData), size = floor(.70*nrow(completeData)), replace = F)
train <- completeData[trainRows,-1]
test <- completeData[-trainRows,-1]
## Creating a naive bayes model with F1:F9 variables
nBayes_all <- naiveBayes(Class ~., data =train)
## Predicting the outputs on test data
model_test <- predict(nBayes_all,test)
## Comparing the model output with actual data
data_class<-ftable(TestData=test$Class,PredictedData = model_test)
prop.table(data_class)
## Finding all the values perdicted incorrectly in test data
NB_wrong<-sum(model_test!=test$Class)
NB_error_rate<-NB_wrong/length(model_test)
NB_accurate<-(1-NB_error_rate)*100
NB_accurate
| /CS 513 KDD/Hw-4/Desai_Kunj_10444511_HW4.R | no_license | KunjDesai96/Stevens | R | false | false | 1,622 | r | ################################################################################
# Company : Stevens
# Course : Data Mining
# Purpose : Apply naive bayes to the “breast cancer dataset”
# First Name: Kunj
# Last Name : Desai
# ID : 1044511
# Date : 03/12/2020
################################################################################
## remove all objects
rm(list = ls())
#install.packages('e1071', dependencies = TRUE)
library(e1071)
library(class)
data<- read.csv(file = "/Users/kunj/Desktop/Stevens/Spring '20/CS 513/Hw-4/breast-cancer-wisconsin.data.csv",
header = TRUE, colClasses = c('numeric',rep(x = 'factor', times = 10))
)
is.na(data) <- data == '?'
completeData <- data[complete.cases(data),]
View(completeData)
# Data loading and cleaning complete.
# Setting the Seed=1 for consistent generation
set.seed(1)
# Now Selecting 70% of data as sample from total 'n' rows of the data
trainRows <- sample(nrow(completeData), size = floor(.70*nrow(completeData)), replace = F)
train <- completeData[trainRows,-1]
test <- completeData[-trainRows,-1]
## Creating a naive bayes model with F1:F9 variables
nBayes_all <- naiveBayes(Class ~., data =train)
## Predicting the outputs on test data
model_test <- predict(nBayes_all,test)
## Comparing the model output with actual data
data_class<-ftable(TestData=test$Class,PredictedData = model_test)
prop.table(data_class)
## Finding all the values perdicted incorrectly in test data
NB_wrong<-sum(model_test!=test$Class)
NB_error_rate<-NB_wrong/length(model_test)
NB_accurate<-(1-NB_error_rate)*100
NB_accurate
|
# Source shared function to retrieve source file and return subset of data.
source("download_subset_data.R")
plot1 <- function() {
# Read in data set.
data <- download_subset_data()
# Open PNG device for plot.
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "white")
# Create Global Active Power plot.
with(data, hist(Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red"))
# Close device.
dev.off()
} | /plot1.R | no_license | laurenmc/ExData_Plotting1 | R | false | false | 586 | r | # Source shared function to retrieve source file and return subset of data.
source("download_subset_data.R")
plot1 <- function() {
# Read in data set.
data <- download_subset_data()
# Open PNG device for plot.
png(filename = "plot1.png", width = 480, height = 480, units = "px", bg = "white")
# Create Global Active Power plot.
with(data, hist(Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red"))
# Close device.
dev.off()
} |
library(tidyverse)
library(mgcv)
library(chron)
theme_set(theme_bw())
cb <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# load WGOA data
d2 <- read.csv("data/2018 2019 catch.csv")
# combine with complete 2020 catch data!
d3 <- read.csv("data/all_cpue_2020.csv")
# add a measured column
d3$Measured <- NA
unique(d3$species) # looks good, no different names for cod/pollock
# combine with d2
d3 <- d3 %>%
select(Station, species, Measured, CPUE)
names(d3) <- c("Station", "Species", "Measured", "Total.catch")
# combine
d2 <- rbind(d2, d3)
# check for repeat spp names
unique(d2$Species)[order(unique(d2$Species))]
# pollock and cod are clean - no different names!
cod <- d2 %>%
filter(Species=="Pacific cod")
cod <- data.frame(Station=cod$Station,
species="Pacific cod",
total=cod$Total.catch,
measured=cod$Measured)
pollock <- d2 %>%
filter(Species=="walleye pollock")
pollock <- data.frame(Station=pollock$Station,
species="walleye pollock",
total=pollock$Total.catch,
measured=pollock$Measured)
temp1 <- data.frame(Station=unique(d2$Station))
temp1 <- left_join(temp1, cod)
# fill in NA species as cod
change <- is.na(temp1$species)
temp1$species[change] <- "Pacific cod"
# now pollock
temp2 <- data.frame(Station=unique(d2$Station))
temp2 <- left_join(temp2, pollock)
# fill in NA species as cod
change <- is.na(temp2$species)
temp2$species[change] <- "walleye pollock"
# combine
wgoa.dat <- rbind(temp1, temp2)
# change NAs to 0
change <- is.na(wgoa.dat)
wgoa.dat[change] <- 0
# now need to add year, julian day, site, and bay!
d4 <- read.csv("data/2018 2020 site.csv")
head(d4)
# retain only the keeper sets
## NB! changing 115 to cpue==no!
d4$use.for.CPUE[d4$Station==115] <- "no"
d4 <- d4 %>%
filter(use.for.CPUE=="yes")
d4 <- d4 %>%
select(Date, Station, Site, Bay, Temp.C)
# calculate Julian day
d4$Date <- dates(as.character(d4$Date))
d4$julian <- lubridate::yday(d4$Date)
d4$year <- years(d4$Date)
head(d4)
d4 <- d4 %>%
select(-Date)
names(d4)[1] <- names(wgoa.dat)[1] <- "station"
d4 <- left_join(d4, wgoa.dat)
names(d4)[2:4] <- c("site", "bay", "temp.c")
# remove Kujulik
d4 <- d4 %>%
filter(bay != "Kujulik")
hist(d4$julian)
str(d4)
# now load WGOA length data
wgoa.len <- read.csv("data/gadid_len.csv")
wgoa.len[is.na(wgoa.len$Station), ]
wgoa.len$Species <- as.character(wgoa.len$Species)
# restrict to age-0
head(wgoa.len)
hist(wgoa.len$Length, breaks=50)
# get count of fish >= 150 mm
age.1 <- wgoa.len %>%
filter(Length >= 150) %>%
group_by(Station, Species) %>%
summarise(age.1=n())
names(age.1)[1:2] <- c("station", "species")
d4 <- left_join(d4, age.1)
# replace age.1 NA with 0
change <- is.na(d4$age.1)
d4$age.1[change] <- 0
d4$age.0 <- d4$total-d4$age.1
d4 <- d4 %>%
select(-total, -measured)
# that's our cpue by age!
# exploratory plots!
## Read in data --------------------------------------------
data <- d4 %>%
filter(species == "Pacific cod")
data$date <- as.Date(data$julian,
origin = paste0(data$year, "-01-01"))
## Explore data --------------------------------------------
## Check distributions
plot(data$age.1)
hist(data$age.1, breaks = 100) ## lots of zeros
tab <- table(data$age.1)
plot(tab)
g <- ggplot(data) +
aes(x = date, y = age.1, color = site) +
geom_point() +
facet_wrap( ~ bay) +
theme(legend.position = "none")
print(g)
## all looks swell!
write.csv(d4, "./output/wgoa.cod.poll.cpue.csv", row.names = F)
| /scripts/cod_pollock_cpue_by_age.R | no_license | mikelitzow/seine-utilities | R | false | false | 3,639 | r | library(tidyverse)
library(mgcv)
library(chron)
theme_set(theme_bw())
cb <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# load WGOA data
d2 <- read.csv("data/2018 2019 catch.csv")
# combine with complete 2020 catch data!
d3 <- read.csv("data/all_cpue_2020.csv")
# add a measured column
d3$Measured <- NA
unique(d3$species) # looks good, no different names for cod/pollock
# combine with d2
d3 <- d3 %>%
select(Station, species, Measured, CPUE)
names(d3) <- c("Station", "Species", "Measured", "Total.catch")
# combine
d2 <- rbind(d2, d3)
# check for repeat spp names
unique(d2$Species)[order(unique(d2$Species))]
# pollock and cod are clean - no different names!
cod <- d2 %>%
filter(Species=="Pacific cod")
cod <- data.frame(Station=cod$Station,
species="Pacific cod",
total=cod$Total.catch,
measured=cod$Measured)
pollock <- d2 %>%
filter(Species=="walleye pollock")
pollock <- data.frame(Station=pollock$Station,
species="walleye pollock",
total=pollock$Total.catch,
measured=pollock$Measured)
temp1 <- data.frame(Station=unique(d2$Station))
temp1 <- left_join(temp1, cod)
# fill in NA species as cod
change <- is.na(temp1$species)
temp1$species[change] <- "Pacific cod"
# now pollock
temp2 <- data.frame(Station=unique(d2$Station))
temp2 <- left_join(temp2, pollock)
# fill in NA species as cod
change <- is.na(temp2$species)
temp2$species[change] <- "walleye pollock"
# combine
wgoa.dat <- rbind(temp1, temp2)
# change NAs to 0
change <- is.na(wgoa.dat)
wgoa.dat[change] <- 0
# now need to add year, julian day, site, and bay!
d4 <- read.csv("data/2018 2020 site.csv")
head(d4)
# retain only the keeper sets
## NB! changing 115 to cpue==no!
d4$use.for.CPUE[d4$Station==115] <- "no"
d4 <- d4 %>%
filter(use.for.CPUE=="yes")
d4 <- d4 %>%
select(Date, Station, Site, Bay, Temp.C)
# calculate Julian day
d4$Date <- dates(as.character(d4$Date))
d4$julian <- lubridate::yday(d4$Date)
d4$year <- years(d4$Date)
head(d4)
d4 <- d4 %>%
select(-Date)
names(d4)[1] <- names(wgoa.dat)[1] <- "station"
d4 <- left_join(d4, wgoa.dat)
names(d4)[2:4] <- c("site", "bay", "temp.c")
# remove Kujulik
d4 <- d4 %>%
filter(bay != "Kujulik")
hist(d4$julian)
str(d4)
# now load WGOA length data
wgoa.len <- read.csv("data/gadid_len.csv")
wgoa.len[is.na(wgoa.len$Station), ]
wgoa.len$Species <- as.character(wgoa.len$Species)
# restrict to age-0
head(wgoa.len)
hist(wgoa.len$Length, breaks=50)
# get count of fish >= 150 mm
age.1 <- wgoa.len %>%
filter(Length >= 150) %>%
group_by(Station, Species) %>%
summarise(age.1=n())
names(age.1)[1:2] <- c("station", "species")
d4 <- left_join(d4, age.1)
# replace age.1 NA with 0
change <- is.na(d4$age.1)
d4$age.1[change] <- 0
d4$age.0 <- d4$total-d4$age.1
d4 <- d4 %>%
select(-total, -measured)
# that's our cpue by age!
# exploratory plots!
## Read in data --------------------------------------------
data <- d4 %>%
filter(species == "Pacific cod")
data$date <- as.Date(data$julian,
origin = paste0(data$year, "-01-01"))
## Explore data --------------------------------------------
## Check distributions
plot(data$age.1)
hist(data$age.1, breaks = 100) ## lots of zeros
tab <- table(data$age.1)
plot(tab)
g <- ggplot(data) +
aes(x = date, y = age.1, color = site) +
geom_point() +
facet_wrap( ~ bay) +
theme(legend.position = "none")
print(g)
## all looks swell!
write.csv(d4, "./output/wgoa.cod.poll.cpue.csv", row.names = F)
|
##This program creates plot3.png showing trends of PM2.5 over 1999-2008 time period
##Reading in the data
nei<-readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
scc<-readRDS("exdata-data-NEI_data/Source_Classification_Code.rds")
##Subsetting for Baltimore data
baltimore<-subset(nei, fips == "24510")
##Acquiring total Emissions across all sources over the 1999-2008 time period
library(dplyr)
baltimorePM<-summarise(group_by(baltimore, year, type), sum(Emissions))
##Renaming column names of baltimorePM
names(baltimorePM)<-c("year", "type", "Emissions")
##Opening graphics device, plotting and closing the device
png("plot3.png", width = 480, height = 480, units = "px")
library(ggplot2)
plot<-qplot(year, Emissions, data = baltimorePM) + geom_line(aes(color = type), size = 1) + labs(title = "Baltimore Type Trend from 1999-2008") + labs(x = "Year")
print(plot)
dev.off() | /plot3.R | no_license | Nagateja/Exploratory-Data-Analysis | R | false | false | 881 | r | ##This program creates plot3.png showing trends of PM2.5 over 1999-2008 time period
##Reading in the data
nei<-readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
scc<-readRDS("exdata-data-NEI_data/Source_Classification_Code.rds")
##Subsetting for Baltimore data
baltimore<-subset(nei, fips == "24510")
##Acquiring total Emissions across all sources over the 1999-2008 time period
library(dplyr)
baltimorePM<-summarise(group_by(baltimore, year, type), sum(Emissions))
##Renaming column names of baltimorePM
names(baltimorePM)<-c("year", "type", "Emissions")
##Opening graphics device, plotting and closing the device
png("plot3.png", width = 480, height = 480, units = "px")
library(ggplot2)
plot<-qplot(year, Emissions, data = baltimorePM) + geom_line(aes(color = type), size = 1) + labs(title = "Baltimore Type Trend from 1999-2008") + labs(x = "Year")
print(plot)
dev.off() |
library(data.table)
library(tidyverse)
library(glue)
library(bedr)
## Step 4 Convert this vcf to bed file for the masks
vcftools<-"/u/home/j/jessegar/vcftools-vcftools-2543f81/src/cpp/vcftools"
outputDir<-"/u/flashscratch/j/jessegar/FilteringResequenceHighP"
SGETaskID<-parse_integer(Sys.getenv("SGE_TASK_ID"))
vcfAllSiteParser<-"vcfAllSiteParser.py"
individuals<-c("BER.1.00F",
"CHE.100X.00F",
"CHI.3b.00F",
"CLO.4.00F",
"CVD.8.00F",
"FHL.5.00F",
"GRV.2.00F",
"GRV.7.00F",
"HV.1.00F",
"JAS.5.00F",
"LAY.5.00F",
"LAY.6.00F",
"LYN.4.00F",
"MAR.B.00F",
"MCK.5.00F",
"MOH.3.00F",
"MTR.3.00F",
"PEN.5.00F",
"ROV.3.00F",
"SUN.5.00F",
"UKI.5.00F",
"WLT.2.00F")
filters<-tibble(
vcftools=vcftools,
individual=individuals
)
filters$chromosome<-filters$individual %>% map(~1:12)
filters<-filters %>% unnest(chromosome)
filters<-filters %>%
mutate(inputVCF=glue("{outputDir}/{individual}.2018wgs3.ef.rmIndelRepeatsStar.chr{chromosome}.minDP12.recode.vcf")) %>%
mutate(outputBed=glue("{outputDir}/{individual}.2018wgs3.ef.rmIndelRepeatsStar.chr{chromosome}.minDP12.bed.gz")) %>%
mutate(outputVCF=glue("{outputDir}/{individual}.2018wgs3.ef.rmIndelRepeatsStar.chr{chromosome}.minDP12.recode.nohomoref.vcf")) %>%
mutate(command=glue("cat {inputVCF} | python {vcfAllSiteParser} chr{chromosome} {outputBed} > {outputVCF}" ))
glue("Running: {filters$command[SGETaskID]}
")
system(filters$command[SGETaskID])
| /filtering_resequenced_genomes/ResequenceFilterStep4AllSiteParser.R | no_license | JesseGarcia562/quercus_lobata_demographic_history | R | false | false | 1,402 | r | library(data.table)
library(tidyverse)
library(glue)
library(bedr)
## Step 4 Convert this vcf to bed file for the masks
vcftools<-"/u/home/j/jessegar/vcftools-vcftools-2543f81/src/cpp/vcftools"
outputDir<-"/u/flashscratch/j/jessegar/FilteringResequenceHighP"
SGETaskID<-parse_integer(Sys.getenv("SGE_TASK_ID"))
vcfAllSiteParser<-"vcfAllSiteParser.py"
individuals<-c("BER.1.00F",
"CHE.100X.00F",
"CHI.3b.00F",
"CLO.4.00F",
"CVD.8.00F",
"FHL.5.00F",
"GRV.2.00F",
"GRV.7.00F",
"HV.1.00F",
"JAS.5.00F",
"LAY.5.00F",
"LAY.6.00F",
"LYN.4.00F",
"MAR.B.00F",
"MCK.5.00F",
"MOH.3.00F",
"MTR.3.00F",
"PEN.5.00F",
"ROV.3.00F",
"SUN.5.00F",
"UKI.5.00F",
"WLT.2.00F")
filters<-tibble(
vcftools=vcftools,
individual=individuals
)
filters$chromosome<-filters$individual %>% map(~1:12)
filters<-filters %>% unnest(chromosome)
filters<-filters %>%
mutate(inputVCF=glue("{outputDir}/{individual}.2018wgs3.ef.rmIndelRepeatsStar.chr{chromosome}.minDP12.recode.vcf")) %>%
mutate(outputBed=glue("{outputDir}/{individual}.2018wgs3.ef.rmIndelRepeatsStar.chr{chromosome}.minDP12.bed.gz")) %>%
mutate(outputVCF=glue("{outputDir}/{individual}.2018wgs3.ef.rmIndelRepeatsStar.chr{chromosome}.minDP12.recode.nohomoref.vcf")) %>%
mutate(command=glue("cat {inputVCF} | python {vcfAllSiteParser} chr{chromosome} {outputBed} > {outputVCF}" ))
glue("Running: {filters$command[SGETaskID]}
")
system(filters$command[SGETaskID])
|
#Integer
x <- 2L
typeof(x)
#Double
y <- 2.5
typeof(y)
#Complex
z <- 3 + 2i
typeof(z)
#Character
a <- "A"
typeof(a)
#Logical
b <- T
typeof(b)
| /9-TypesOfVariables.R | no_license | JackMcKechnie/Udemy-Course-R-Programming-A-Z | R | false | false | 164 | r | #Integer
x <- 2L
typeof(x)
#Double
y <- 2.5
typeof(y)
#Complex
z <- 3 + 2i
typeof(z)
#Character
a <- "A"
typeof(a)
#Logical
b <- T
typeof(b)
|
category Network
メールを送信するためのプロトコル SMTP (Simple Mail Transfer Protocol)
を扱うライブラリです。
ヘッダなどメールのデータを扱うことはできません。
SMTP の実装は [[RFC:2821]] に基いています。
=== 使用例
==== とにかくメールを送る
SMTP を使ってメールを送るにはまず SMTP.start でセッションを開きます。
第一引数がサーバのアドレスで第二引数がポート番号です。
ブロックを使うと File.open と同じように終端処理を自動的にやってくれる
のでおすすめです。
require 'net/smtp'
Net::SMTP.start( 'smtp.example.com', 25 ) {|smtp|
# use smtp object only in this block
}
smtp-server.example.com は適切な SMTP サーバのアドレスに読みかえてください。
通常は LAN の管理者やプロバイダが SMTP サーバを用意してくれているはずです。
セッションが開いたらあとは [[m:Net::SMTP#send_message]]
でメールを流しこむだけです。
require 'net/smtp'
Net::SMTP.start('smtp.example.com', 25) {|smtp|
smtp.send_message(<<-EndOfMail, 'from@example.com', 'to@example.net')
From: Your Name <from@example.com>
To: Dest Address <to@example.net>
Subject: test mail
Date: Sat, 23 Jun 2001 16:26:43 +0900
Message-Id: <unique.message.id.string@yourhost.example.com>
This is a test mail.
EndOfMail
}
==== セッションを終了する
メールを送ったら [[m:Net::SMTP#finish]] を呼んで
セッションを終了しなければいけません。
File のように GC 時に勝手に close されることもありません。
# using SMTP#finish
require 'net/smtp'
smtp = Net::SMTP.start('smtp.example.com', 25)
smtp.send_message mail_string, 'from@example.com', 'to@example.net'
smtp.finish
またブロック付きの [[m:Net::SMTP.start]], [[m:Net::SMTP#start]]
を使うと finish を呼んでくれるので便利です。
可能な限りブロック付きの start を使うのがよいでしょう。
# using block form of SMTP.start
require 'net/smtp'
Net::SMTP.start('smtp.example.com', 25) {|smtp|
smtp.send_message mail_string, 'from@example.com', 'to@example.net'
}
==== 文字列以外からの送信
ひとつ上の例では文字列リテラル (ヒアドキュメント) を使って送信しましたが、
each メソッドを持ったオブジェクトからならなんでも送ることができます。
以下は File オブジェクトから直接送信する例です。
require 'net/smtp'
Net::SMTP.start('your.smtp.server', 25) {|smtp|
File.open('Mail/draft/1') {|f|
smtp.send_message f, 'from@example.com', 'to@example.net'
}
}
=== HELO ドメイン
SMTP ではメールを送る側のホストの名前 (HELO ドメインと呼ぶ) を要求
されます。HELO ドメインは [[m:Net::SMTP.start]], [[m:Net::SMTP#start]]
の第三引数 helo_domain に指定します。
たいていの SMTP サーバはこの HELO ドメイン
による認証はあまり真面目に行わないので (簡単に偽造できるからです)
デフォルト値を用いて問題にならないことが多いのですが、セッションを切られる
こともあります。そういうときはとりあえず HELO ドメインを与えてみて
ください。もちろんそれ以外の時も HELO ドメインはちゃんと渡すのが
よいでしょう。
Net::SMTP.start('smtp.example.com', 25, 'yourdomain.example.com') {|smtp|
よくあるダイヤルアップホストの場合、HELO ドメインには ISP のメール
サーバのドメインを使っておけばたいてい通ります。
=== SMTP認証
[[c:Net::SMTP]] は PLAIN, LOGIN, CRAM MD5 の3つの方法で認証をすることができます。
(認証については [[RFC:2554]], [[RFC:2195]] を参照してください)
認証するためには、[[m:Net::SMTP.start]] もしくは [[m:Net::SMTP#start]]
の引数に追加の引数を渡してください。
# 例
Net::SMTP.start('smtp.example.com', 25, 'yourdomain.example.com',
'your_account', 'your_password', :cram_md5)
=== TLSを用いたSMTP通信
[[c:Net::SMTP]] は [[RFC:3207]] に基づいた STARTTLS を用いる
方法、もしくは SMTPS と呼ばれる非標準的な方法
(ポート465を用い、通信全体をTLSで包む)
によるメール送信の暗号化が可能です。
この2つは排他で、同時に利用できません。
TLSを用いることで、通信相手の認証、および通信経路の暗号化ができます。
ただし、現在のメール送信の仕組みとして、あるサーバから別のサーバへの
中継を行うことがあります。そこでの通信が認証されているか否か、暗号化され
ているか否かはこの仕組みの範囲外であり、なんらかの保証があるわけでは
ないことに注意してください。メールそのものの暗号化や、メールを
送る人、受け取る人を認証する
必要がある場合は別の方法を考える必要があるでしょう。
# STARTTLSの例
smtp = Net::SMTP.new('smtp.example.com', 25)
# SSLのコンテキストを作成してSSLの設定をし、context に代入しておく
# TLSを常に使うようにする
smtp.enable_starttls(context)
smtp.start() do
# send messages ...
end
= class Net::SMTP < Object
alias Net::SMTPSession
SMTP のセッションを表現したクラスです。
== Singleton Methods
--- new(address, port = Net::SMTP.default_port) -> Net::SMTP
新しい SMTP オブジェクトを生成します。
address はSMTPサーバーのFQDNで、
port は接続するポート番号です。
ただし、このメソッドではまだTCPの接続はしません。
[[m:Net::SMTP#start]] で接続します。
オブジェクトの生成と接続を同時にしたい場合には
[[m:Net::SMTP.start]] を代わりに使ってください。
@param address 接続先のSMTPサーバの文字列
@param port 接続ポート番号
@see [[m:Net::SMTP.start]], [[m:Net::SMTP#start]]
#@until 1.9.1
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@else
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@end
新しい SMTP オブジェクトを生成し、サーバに接続し、セッションを開始します。
以下と同じです。
Net::SMTP.new(address, port).start(helo_domain, account, password, authtype)
このメソッドにブロックを与えた場合には、新しく作られた [[c:Net::SMTP]] オブジェクト
を引数としてそのブロックを呼び、ブロック終了時に自動的に接続を閉じます。
ブロックを与えなかった場合には新しく作られた [[c:Net::SMTP]] オブジェクトが
返されます。この場合終了時に [[m:Net::SMTP#finish]] を呼ぶのは利用者の責任と
なります。
account と password の両方が与えられた場合、
SMTP AUTH コマンドによって認証を行います。
authtype は使用する認証のタイプで、
シンボルで :plain, :login, :cram_md5 を指定します。
Example:
require 'net/smtp'
Net::SMTP.start('smtp.example.com') {|smtp|
smtp.send_message mail_string, 'from@example.jp', 'to@example.jp'
}
@param address 接続するサーバをホスト名もしくはIPアドレスで指定します
@param port ポート番号、デフォルトは 25 です
@param helo_domain HELO で名乗るドメイン名です
@param account 認証で使うアカウント名
@param password 認証で使うパスワード
@param authtype 認証の種類(:plain, :login, :cram_md5 のいずれか)
@raise TimeoutError 接続時にタイムアウトした場合に発生します
@raise Net::SMTPUnsupportedCommand TLSをサポートしていないサーバでTLSを使おうとした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
@see [[m:Net::SMTP#start]], [[m:Net::SMTP.new]]
--- default_port -> Integer
SMTPのデフォルトのポート番号(25)を返します。
#@since 1.8.7
--- default_submission_port -> Integer
デフォルトのサブミッションポート番号(587)を返します。
--- default_ssl_context -> OpenSSL::SSL::SSLContext
SSL 通信に使われる SSL のコンテキストのデフォルト値を返します。
--- default_tls_port -> Integer
--- default_ssl_port -> Integer
デフォルトのSMTPSのポート番号(465)を返します。
#@end
== Instance Methods
--- esmtp? -> bool
--- esmtp -> bool
その Net::SMTP オブジェクトが ESMTP を使う場合に真を返します。
デフォルトは真です。
@see [[m:Net::SMTP#esmtp=]]
--- esmtp=(bool)
その Net::SMTP オブジェクトが ESMTP を使うかどうかを指定します。
この指定は [[m:Net::SMTP#start]] を呼ぶ前にする必要があります。
ESMTPモードで [[m:Net::SMTP#start]] を呼び、うまくいかなかった
場合には 普通の SMTP モードに切り替えてやりなおします
(逆はしません)。
@see [[m:Net::SMTP#esmtp?]]
#@since 1.8.7
--- capable_starttls? -> bool
サーバが STARTTLS を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_cram_md5_auth? -> bool
サーバが AUTH CRAM-MD5 を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_login_auth? -> bool
サーバが AUTH LOGIN を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_plain_auth? -> bool
サーバが AUTH PLAIN を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_auth_types -> [String]
接続したサーバで利用可能な認証を配列で返します。
返り値の配列の要素は、 'PLAIN', 'LOGIN', 'CRAM-MD5' です。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- tls? -> bool
--- ssl? -> bool
その Net::SMTP オブジェクトが SMTPS を利用するならば真を返します。
@see [[m:Net::SMTP#enable_tls]], [[m:Net::SMTP#disable_tls]], [[m:Net::SMTP#start]]
--- enable_ssl(context = Net::SMTP.default_ssl_context) -> ()
--- enable_tls(context = Net::SMTP.default_ssl_context) -> ()
その Net::SMTP オブジェクトが SMTPS を利用するよう設定します。
このメソッドは [[m:Net::SMTP#start]] を呼ぶ前に呼ぶ必要があります。
@param context SSL接続で利用する [[c:OpenSSL::SSL::SSLContext]]
@see [[m:Net::SMTP#tls?]], [[m:Net::SMTP#disable_tls]]
--- disable_ssl -> ()
--- disable_tls -> ()
その Net::SMTP オブジェクトが SMTPS を利用しないよう設定します。
@see [[m:Net::SMTP#disable_tls]], [[m:Net::SMTP#tls?]]
--- starttls? -> Symbol/nil
その Net::SMTP オブジェクトが STARTTLSを利用するかどうかを返します。
常に利用する(利用できないときは [[m:Net::SMTP#start]] で例外
[[c:Net::SMTPUnsupportedCommand]] を発生) するときは :always を、
利用可能な場合のみ利用する場合は :auto を、
常に利用しない場合には nil を返します。
@see [[m:Net::SMTP#start]]
--- starttls_always? -> bool
その Net::SMTP オブジェクトが 常にSTARTTLSを利用する
(利用できない場合には例外を発生する)ならば
真を返します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_auto?]], [[m:Net::SMTP#enable_starttls]]
--- starttls_auto? -> bool
その Net::SMTP オブジェクトが利用可能な場合にのみにSTARTTLSを利用するならば
真を返します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_always?]], [[m:Net::SMTP#enable_starttls_auto]]
--- enable_starttls(context = Net::SMTP.default_ssl_context) -> ()
その Net::SMTP オブジェクトが 常にSTARTTLSを利用する
(利用できない場合には例外を発生する)ように設定します。
@param context SSL接続で利用する [[c:OpenSSL::SSL::SSLContext]]
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_always?]], [[m:Net::SMTP#enable_starttls_auto]]
--- enable_starttls_auto(context = Net::SMTP.default_ssl_context) -> ()
その Net::SMTP オブジェクトがSTARTTLSが利用可能な場合
(つまりサーバがSTARTTLSを広告した場合)のみにSTARTTLSを利用する
ように設定します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_auto?]], [[m:Net::SMTP#enable_starttls_auto]]
@param context SSL接続で利用する [[c:OpenSSL::SSL::SSLContext]]
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_auto?]], [[m:Net::SMTP#enable_starttls]]
--- disable_starttls -> ()
その Net::SMTP オブジェクトがSTARTTLSを常に使わないよう設定します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#enable_starttls]], [[m:Net::SMTP#enable_starttls_auto]]
#@end
--- set_debug_output(f) -> ()
#@since 1.8.7
--- debug_output=(f)
#@end
デバッグ出力の出力先を指定します。
このメソッドは深刻なセキュリティホールの原因となりえます。
デバッグ用にのみ利用してください。
@param f デバッグ出力先を [[c:IO]] (もしくは << というメソッドを持つクラス)で指定します
#@until 1.9.1
--- start(helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@else
--- start(helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@end
サーバにコネクションを張り、同時に SMTP セッションを開始します。
もしすでにセッションが開始していたら IOError が発生します。
account と password の両方が与えられた場合、
SMTP AUTH コマンドによって認証を行います。
authtype は使用する認証のタイプで、
シンボルで :plain, :login, :cram_md5 を指定します。
このメソッドにブロックを与えた場合には、そのオブジェクト
を引数としてそのブロックを呼び、ブロック終了時に自動的に接続を閉じます。
ブロックを与えなかった場合には自分自身を返します。
この場合終了時に [[m:Net::SMTP#finish]] を呼ぶのは利用者の責任と
なります。
@param helo_domain HELO で名乗るドメイン名です
@param account 認証で使うアカウント名
@param password 認証で使うパスワード
@param authtype 認証の種類(:plain, :login, :cram_md5 のいずれか)
@raise IOError すでにセッションを開始している場合に発生します
@raise TimeoutError 接続がタイムアウトした場合に発生します
@raise Net::SMTPUnsupportedCommand STARTTLSをサポートしていないサーバでSTARTTLSを利用しようとした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
--- started? -> bool
SMTP セッションが開始されていたら真を返します。
セッションがまだ開始していない、もしくは終了している場合には偽を返します。
@see [[m:Net::SMTP#start]], [[m:Net::SMTP#finish]]
--- inspect -> String
@see [[m:Object#inspect]]
--- address -> String
接続先のアドレスを返します。
--- port -> Integer
接続先のポート番号を返します。
--- open_timeout -> Integer
接続時に待つ最大秒数を返します。
デフォルトは30(秒)です。
この秒数たってもコネクションが
開かなければ例外 TimeoutError を発生します。
@see [[m:Net::SMTP#open_timeout=]]
--- open_timeout=(n)
接続時に待つ最大秒数を設定します。
@see [[m:Net::SMTP#open_timeout]]
--- read_timeout -> Integer
読みこみ ([[man:read(2)]] 一回) でブロックしてよい最大秒数を返します。
デフォルトは60(秒)です。
この秒数たっても読みこめなければ例外 TimeoutError を発生します。
@see [[m:Net::SMTP#read_timeout=]]
--- read_timeout=(n)
読み込みでブロックしてよい最大秒数を設定します。
@see [[m:Net::SMTP#read_timeout]]
--- finish -> ()
SMTP セッションを終了します。
@raise IOError セッション開始前にこのメソッドが呼ばれた場合に発生します
@see [[m:Net::SMTP#start]]
--- send_message(mailsrc, from_addr, *to_addrs) -> ()
--- send_mail(mailsrc, from_addr, *to_addrs) -> ()
--- sendmail(mailsrc, from_addr, *to_addrs) -> ()
メールを送信します。
mailsrc をメールとして送信します。
mailsrc は each イテレータを持つ
オブジェクトならなんでも構いません(たとえば String や File)。
from_domain は送り主のメールアドレス ('...@...'のかたち) 、
to_addrs には送信先メールアドレスを文字列で渡します。
require 'net/smtp'
Net::SMTP.start('smtp.example.com') {|smtp|
smtp.send_message mail_string,
'from@example.com',
'to1@example.net', 'to2@example.net'
}
sendmail は obsolete です。
@param mailsrc メールの内容
@param from_addr 送信元のメールアドレス
@param to_addrs 送信先のメールアドレス(複数可、少なくとも1個)
@raise IOError すでにセッションが終了している場合に発生します
@raise TimeoutError 接続がタイムアウトした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
@raise Net::SMTPUnknownError SMTPエラーコードがプロトコル上不正な場合に発生します
--- open_message_stream(from_addr, *to_addrs) {|f| .... } -> ()
--- ready(from_addr, *to_addrs) {|f| .... } -> ()
メール書き込みの準備をし、書き込み先のストリームオブジェクトを
ブロックに渡します。ブロック終了後、書きこんだ結果が
送られます。
渡されるストリームオブジェクトは以下のメソッドを持っています。
* puts(str = '') strを出力して CR LFを出力
* print(str) strを出力
* printf(fmt, *args) sprintf(fmt,*args) を出力
* write(str):: str を出力して書き込んだバイト数を返す
* <<(str):: str を出力してストリームオブジェクト自身を返す
from_domain は送り主のメールアドレス ('...@...'のかたち) 、
to_addrs には送信先メールアドレスを文字列で渡します。
require 'net/smtp'
Net::SMTP.start('smtp.exmaple.com', 25) {|smtp|
smtp.open_message_stream('from@example.com', 'to@example.net') {|f|
f.puts 'From: from@example.com'
f.puts 'To: to@example.net'
f.puts 'Subject: test mail'
f.puts
f.puts 'This is test mail.'
}
}
ready は obsolete です。
@param from_addr 送信元のメールアドレス
@param to_addrs 送信先のメールアドレス(複数可、少なくとも1個)
@raise IOError すでにセッションが終了している場合に発生します
@raise TimeoutError 接続がタイムアウトした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
@raise Net::SMTPAuthenticationError 送信に必要な認証を行っていなかった場合に発生します
@raise Net::SMTPUnknownError SMTPエラーコードがプロトコル上不正な場合に発生します
@see [[m:Net::SMTP#send_message]]
#@since 1.8.7
--- authenticate(user, secret, authtype) -> ()
認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
@param authtype 認証の種類(:plain, :login, :cram_md5 のいずれか)
@see [[m:Net::SMTP.start]], [[m:Net::SMTP#start]], [[m:Net::SMTP#auth_plain]], [[m:Net::SMTP#auth_login]], [[m:Net::SMTP#auth_cram_md5]]
--- auth_plain(user, secret) -> ()
PLAIN 認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
--- auth_login(user, secret) -> ()
LOGIN 認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
--- auth_cram_md5(user, secret) -> ()
CRAM-MD5 認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
#@since 2.1.0
--- rset -> Net::SMTP::Response
RSET コマンドを送ります。
#@end
--- starttls -> Net::SMTP::Response
STARTTLS コマンドを送ります。
通常は [[m:Net::SMTP#start]] で STARTTLS が送られるため
利用する必要はないはずです。
--- helo(domain) -> Net::SMTP::Response
HELO コマンドを送ります(標準的な SMTP を使います)。
通常は [[m:Net::SMTP.start]], [[m:Net::SMTP#start]] で HELO が
送られるため利用する必要はないはずです。
@param domain HELOで送るドメイン名
--- ehlo(domain) -> Net::SMTP::Response
EHLO コマンドを送ります(ESMTP を使います)。
通常は [[m:Net::SMTP.start]], [[m:Net::SMTP#start]] で EHLO が
送られるため利用する必要はないはずです。
@param domain EHLOで送るドメイン名
--- mailfrom(from_addr) -> Net::SMTP::Response
MAILFROM コマンドを送ります。
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
MAILFROM が送られるため利用する必要はないはずです。
@param from_addr 送信元メールアドレス
#@until 1.9.1
--- rcptto_list(to_addrs) -> ()
#@else
--- rcptto_list(to_addrs){ ... } -> object
#@end
RCPTTO コマンドを to_addrs のすべてのメールアドレスに対して送ります。
#@since 1.9.1
コマンドを送った後、ブロックを呼び出します。
このメソッドの返り値はブロックの返り値になります。
#@end
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
RCPTTO が送られるため利用する必要はないはずです。
@param to_addrs 送信先メールアドレスの配列
--- rcptto(to_addr) -> Net::SMTP::Response
RCPTTO コマンドを送ります。
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
RCPTTO が送られるため利用する必要はないはずです。
@param to_addr 送信先メールアドレス
--- data(message) -> Net::SMTP::Response
--- data {|f| .... } -> Net::SMTP::Response
DATA コマンドを送ります。
文字列を引数に与えた場合はそれを本文として送ります。
ブロックを与えた場合にはそのブロックにストリームオブジェクトが渡されます
([[m:Net::SMTP#open_message_stream]]参考)。
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
DATA が送られるため利用する必要はないはずです。
@param message メールの本文
--- quit -> Net::SMTP::Response
QUIT コマンドを送ります。
通常は [[m:Net::SMTP#finish]] で
QUIT が送られるため利用する必要はないはずです。
#@end
== Constants
#@since 1.8.7
--- DEFAULT_AUTH_TYPE -> Symbol
デフォルトの認証スキーム(:plain)です。
#@end
#@# internal constants for CRAM-MD5 authentication
#@# --- IMASK
#@# --- OMASK
#@# --- CRAM_BUFSIZE
--- Revision -> String
ファイルのリビジョンです。使わないでください。
#@since 1.8.7
= class Net::SMTP::Response < Object
[[c:Net::SMTP]] の内部用クラスです。
#@end
= module Net::SMTPError
SMTP 関連の例外に include されるモジュールです。
= class Net::SMTPAuthenticationError < Net::ProtoAuthError
include Net::SMTPError
SMTP 認証エラー(エラーコード 530)に対応する例外クラスです。
= class Net::SMTPServerBusy < Net::ProtoServerError
include Net::SMTPError
SMTP 一時エラーに対応する例外クラスです。
SMTP エラーコード 420, 450 に対応します。
= class Net::SMTPSyntaxError < Net::ProtoSyntaxError
include Net::SMTPError
SMTP コマンド文法エラー(エラーコード 500) に対応する
例外クラスです。
= class Net::SMTPFatalError < Net::ProtoFatalError
include Net::SMTPError
SMTP 致命的エラー(エラーコード 5xx、 ただし500除く)に対応する
例外クラスです。
= class Net::SMTPUnknownError < Net::ProtoUnknownError
include Net::SMTPError
サーバからの応答コードが予期されていない値であった場合に
対応する例外クラスです。サーバもしくはクライアントに何らかの
バグがあった場合に発生します。
= class Net::SMTPUnsupportedCommand < Net::ProtocolError
include Net::SMTPError
サーバで利用できないコマンドを送ろうとした時に発生する
例外のクラスです。
| /refm/api/src/net/smtp.rd | no_license | snoozer05/doctree | R | false | false | 27,882 | rd | category Network
メールを送信するためのプロトコル SMTP (Simple Mail Transfer Protocol)
を扱うライブラリです。
ヘッダなどメールのデータを扱うことはできません。
SMTP の実装は [[RFC:2821]] に基いています。
=== 使用例
==== とにかくメールを送る
SMTP を使ってメールを送るにはまず SMTP.start でセッションを開きます。
第一引数がサーバのアドレスで第二引数がポート番号です。
ブロックを使うと File.open と同じように終端処理を自動的にやってくれる
のでおすすめです。
require 'net/smtp'
Net::SMTP.start( 'smtp.example.com', 25 ) {|smtp|
# use smtp object only in this block
}
smtp-server.example.com は適切な SMTP サーバのアドレスに読みかえてください。
通常は LAN の管理者やプロバイダが SMTP サーバを用意してくれているはずです。
セッションが開いたらあとは [[m:Net::SMTP#send_message]]
でメールを流しこむだけです。
require 'net/smtp'
Net::SMTP.start('smtp.example.com', 25) {|smtp|
smtp.send_message(<<-EndOfMail, 'from@example.com', 'to@example.net')
From: Your Name <from@example.com>
To: Dest Address <to@example.net>
Subject: test mail
Date: Sat, 23 Jun 2001 16:26:43 +0900
Message-Id: <unique.message.id.string@yourhost.example.com>
This is a test mail.
EndOfMail
}
==== セッションを終了する
メールを送ったら [[m:Net::SMTP#finish]] を呼んで
セッションを終了しなければいけません。
File のように GC 時に勝手に close されることもありません。
# using SMTP#finish
require 'net/smtp'
smtp = Net::SMTP.start('smtp.example.com', 25)
smtp.send_message mail_string, 'from@example.com', 'to@example.net'
smtp.finish
またブロック付きの [[m:Net::SMTP.start]], [[m:Net::SMTP#start]]
を使うと finish を呼んでくれるので便利です。
可能な限りブロック付きの start を使うのがよいでしょう。
# using block form of SMTP.start
require 'net/smtp'
Net::SMTP.start('smtp.example.com', 25) {|smtp|
smtp.send_message mail_string, 'from@example.com', 'to@example.net'
}
==== 文字列以外からの送信
ひとつ上の例では文字列リテラル (ヒアドキュメント) を使って送信しましたが、
each メソッドを持ったオブジェクトからならなんでも送ることができます。
以下は File オブジェクトから直接送信する例です。
require 'net/smtp'
Net::SMTP.start('your.smtp.server', 25) {|smtp|
File.open('Mail/draft/1') {|f|
smtp.send_message f, 'from@example.com', 'to@example.net'
}
}
=== HELO ドメイン
SMTP ではメールを送る側のホストの名前 (HELO ドメインと呼ぶ) を要求
されます。HELO ドメインは [[m:Net::SMTP.start]], [[m:Net::SMTP#start]]
の第三引数 helo_domain に指定します。
たいていの SMTP サーバはこの HELO ドメイン
による認証はあまり真面目に行わないので (簡単に偽造できるからです)
デフォルト値を用いて問題にならないことが多いのですが、セッションを切られる
こともあります。そういうときはとりあえず HELO ドメインを与えてみて
ください。もちろんそれ以外の時も HELO ドメインはちゃんと渡すのが
よいでしょう。
Net::SMTP.start('smtp.example.com', 25, 'yourdomain.example.com') {|smtp|
よくあるダイヤルアップホストの場合、HELO ドメインには ISP のメール
サーバのドメインを使っておけばたいてい通ります。
=== SMTP認証
[[c:Net::SMTP]] は PLAIN, LOGIN, CRAM MD5 の3つの方法で認証をすることができます。
(認証については [[RFC:2554]], [[RFC:2195]] を参照してください)
認証するためには、[[m:Net::SMTP.start]] もしくは [[m:Net::SMTP#start]]
の引数に追加の引数を渡してください。
# 例
Net::SMTP.start('smtp.example.com', 25, 'yourdomain.example.com',
'your_account', 'your_password', :cram_md5)
=== TLSを用いたSMTP通信
[[c:Net::SMTP]] は [[RFC:3207]] に基づいた STARTTLS を用いる
方法、もしくは SMTPS と呼ばれる非標準的な方法
(ポート465を用い、通信全体をTLSで包む)
によるメール送信の暗号化が可能です。
この2つは排他で、同時に利用できません。
TLSを用いることで、通信相手の認証、および通信経路の暗号化ができます。
ただし、現在のメール送信の仕組みとして、あるサーバから別のサーバへの
中継を行うことがあります。そこでの通信が認証されているか否か、暗号化され
ているか否かはこの仕組みの範囲外であり、なんらかの保証があるわけでは
ないことに注意してください。メールそのものの暗号化や、メールを
送る人、受け取る人を認証する
必要がある場合は別の方法を考える必要があるでしょう。
# STARTTLSの例
smtp = Net::SMTP.new('smtp.example.com', 25)
# SSLのコンテキストを作成してSSLの設定をし、context に代入しておく
# TLSを常に使うようにする
smtp.enable_starttls(context)
smtp.start() do
# send messages ...
end
= class Net::SMTP < Object
alias Net::SMTPSession
SMTP のセッションを表現したクラスです。
== Singleton Methods
--- new(address, port = Net::SMTP.default_port) -> Net::SMTP
新しい SMTP オブジェクトを生成します。
address はSMTPサーバーのFQDNで、
port は接続するポート番号です。
ただし、このメソッドではまだTCPの接続はしません。
[[m:Net::SMTP#start]] で接続します。
オブジェクトの生成と接続を同時にしたい場合には
[[m:Net::SMTP.start]] を代わりに使ってください。
@param address 接続先のSMTPサーバの文字列
@param port 接続ポート番号
@see [[m:Net::SMTP.start]], [[m:Net::SMTP#start]]
#@until 1.9.1
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@else
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(address, port = Net::SMTP.default_port, helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@end
新しい SMTP オブジェクトを生成し、サーバに接続し、セッションを開始します。
以下と同じです。
Net::SMTP.new(address, port).start(helo_domain, account, password, authtype)
このメソッドにブロックを与えた場合には、新しく作られた [[c:Net::SMTP]] オブジェクト
を引数としてそのブロックを呼び、ブロック終了時に自動的に接続を閉じます。
ブロックを与えなかった場合には新しく作られた [[c:Net::SMTP]] オブジェクトが
返されます。この場合終了時に [[m:Net::SMTP#finish]] を呼ぶのは利用者の責任と
なります。
account と password の両方が与えられた場合、
SMTP AUTH コマンドによって認証を行います。
authtype は使用する認証のタイプで、
シンボルで :plain, :login, :cram_md5 を指定します。
Example:
require 'net/smtp'
Net::SMTP.start('smtp.example.com') {|smtp|
smtp.send_message mail_string, 'from@example.jp', 'to@example.jp'
}
@param address 接続するサーバをホスト名もしくはIPアドレスで指定します
@param port ポート番号、デフォルトは 25 です
@param helo_domain HELO で名乗るドメイン名です
@param account 認証で使うアカウント名
@param password 認証で使うパスワード
@param authtype 認証の種類(:plain, :login, :cram_md5 のいずれか)
@raise TimeoutError 接続時にタイムアウトした場合に発生します
@raise Net::SMTPUnsupportedCommand TLSをサポートしていないサーバでTLSを使おうとした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
@see [[m:Net::SMTP#start]], [[m:Net::SMTP.new]]
--- default_port -> Integer
SMTPのデフォルトのポート番号(25)を返します。
#@since 1.8.7
--- default_submission_port -> Integer
デフォルトのサブミッションポート番号(587)を返します。
--- default_ssl_context -> OpenSSL::SSL::SSLContext
SSL 通信に使われる SSL のコンテキストのデフォルト値を返します。
--- default_tls_port -> Integer
--- default_ssl_port -> Integer
デフォルトのSMTPSのポート番号(465)を返します。
#@end
== Instance Methods
--- esmtp? -> bool
--- esmtp -> bool
その Net::SMTP オブジェクトが ESMTP を使う場合に真を返します。
デフォルトは真です。
@see [[m:Net::SMTP#esmtp=]]
--- esmtp=(bool)
その Net::SMTP オブジェクトが ESMTP を使うかどうかを指定します。
この指定は [[m:Net::SMTP#start]] を呼ぶ前にする必要があります。
ESMTPモードで [[m:Net::SMTP#start]] を呼び、うまくいかなかった
場合には 普通の SMTP モードに切り替えてやりなおします
(逆はしません)。
@see [[m:Net::SMTP#esmtp?]]
#@since 1.8.7
--- capable_starttls? -> bool
サーバが STARTTLS を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_cram_md5_auth? -> bool
サーバが AUTH CRAM-MD5 を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_login_auth? -> bool
サーバが AUTH LOGIN を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_plain_auth? -> bool
サーバが AUTH PLAIN を広告してきた場合に真を返します。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- capable_auth_types -> [String]
接続したサーバで利用可能な認証を配列で返します。
返り値の配列の要素は、 'PLAIN', 'LOGIN', 'CRAM-MD5' です。
このメソッドは [[m:Net::SMTP#start]] などでセッションを開始
した以降にしか正しい値を返しません。
--- tls? -> bool
--- ssl? -> bool
その Net::SMTP オブジェクトが SMTPS を利用するならば真を返します。
@see [[m:Net::SMTP#enable_tls]], [[m:Net::SMTP#disable_tls]], [[m:Net::SMTP#start]]
--- enable_ssl(context = Net::SMTP.default_ssl_context) -> ()
--- enable_tls(context = Net::SMTP.default_ssl_context) -> ()
その Net::SMTP オブジェクトが SMTPS を利用するよう設定します。
このメソッドは [[m:Net::SMTP#start]] を呼ぶ前に呼ぶ必要があります。
@param context SSL接続で利用する [[c:OpenSSL::SSL::SSLContext]]
@see [[m:Net::SMTP#tls?]], [[m:Net::SMTP#disable_tls]]
--- disable_ssl -> ()
--- disable_tls -> ()
その Net::SMTP オブジェクトが SMTPS を利用しないよう設定します。
@see [[m:Net::SMTP#disable_tls]], [[m:Net::SMTP#tls?]]
--- starttls? -> Symbol/nil
その Net::SMTP オブジェクトが STARTTLSを利用するかどうかを返します。
常に利用する(利用できないときは [[m:Net::SMTP#start]] で例外
[[c:Net::SMTPUnsupportedCommand]] を発生) するときは :always を、
利用可能な場合のみ利用する場合は :auto を、
常に利用しない場合には nil を返します。
@see [[m:Net::SMTP#start]]
--- starttls_always? -> bool
その Net::SMTP オブジェクトが 常にSTARTTLSを利用する
(利用できない場合には例外を発生する)ならば
真を返します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_auto?]], [[m:Net::SMTP#enable_starttls]]
--- starttls_auto? -> bool
その Net::SMTP オブジェクトが利用可能な場合にのみにSTARTTLSを利用するならば
真を返します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_always?]], [[m:Net::SMTP#enable_starttls_auto]]
--- enable_starttls(context = Net::SMTP.default_ssl_context) -> ()
その Net::SMTP オブジェクトが 常にSTARTTLSを利用する
(利用できない場合には例外を発生する)ように設定します。
@param context SSL接続で利用する [[c:OpenSSL::SSL::SSLContext]]
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_always?]], [[m:Net::SMTP#enable_starttls_auto]]
--- enable_starttls_auto(context = Net::SMTP.default_ssl_context) -> ()
その Net::SMTP オブジェクトがSTARTTLSが利用可能な場合
(つまりサーバがSTARTTLSを広告した場合)のみにSTARTTLSを利用する
ように設定します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_auto?]], [[m:Net::SMTP#enable_starttls_auto]]
@param context SSL接続で利用する [[c:OpenSSL::SSL::SSLContext]]
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#starttls_auto?]], [[m:Net::SMTP#enable_starttls]]
--- disable_starttls -> ()
その Net::SMTP オブジェクトがSTARTTLSを常に使わないよう設定します。
@see [[m:Net::SMTP#starttls?]], [[m:Net::SMTP#enable_starttls]], [[m:Net::SMTP#enable_starttls_auto]]
#@end
--- set_debug_output(f) -> ()
#@since 1.8.7
--- debug_output=(f)
#@end
デバッグ出力の出力先を指定します。
このメソッドは深刻なセキュリティホールの原因となりえます。
デバッグ用にのみ利用してください。
@param f デバッグ出力先を [[c:IO]] (もしくは << というメソッドを持つクラス)で指定します
#@until 1.9.1
--- start(helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(helo_domain = 'localhost.localdomain', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@else
--- start(helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) -> Net::SMTP
--- start(helo_domain = 'localhost', account = nil, password = nil, authtype = DEFAULT_AUTH_TYPE) {|smtp| .... } -> object
#@end
サーバにコネクションを張り、同時に SMTP セッションを開始します。
もしすでにセッションが開始していたら IOError が発生します。
account と password の両方が与えられた場合、
SMTP AUTH コマンドによって認証を行います。
authtype は使用する認証のタイプで、
シンボルで :plain, :login, :cram_md5 を指定します。
このメソッドにブロックを与えた場合には、そのオブジェクト
を引数としてそのブロックを呼び、ブロック終了時に自動的に接続を閉じます。
ブロックを与えなかった場合には自分自身を返します。
この場合終了時に [[m:Net::SMTP#finish]] を呼ぶのは利用者の責任と
なります。
@param helo_domain HELO で名乗るドメイン名です
@param account 認証で使うアカウント名
@param password 認証で使うパスワード
@param authtype 認証の種類(:plain, :login, :cram_md5 のいずれか)
@raise IOError すでにセッションを開始している場合に発生します
@raise TimeoutError 接続がタイムアウトした場合に発生します
@raise Net::SMTPUnsupportedCommand STARTTLSをサポートしていないサーバでSTARTTLSを利用しようとした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
--- started? -> bool
SMTP セッションが開始されていたら真を返します。
セッションがまだ開始していない、もしくは終了している場合には偽を返します。
@see [[m:Net::SMTP#start]], [[m:Net::SMTP#finish]]
--- inspect -> String
@see [[m:Object#inspect]]
--- address -> String
接続先のアドレスを返します。
--- port -> Integer
接続先のポート番号を返します。
--- open_timeout -> Integer
接続時に待つ最大秒数を返します。
デフォルトは30(秒)です。
この秒数たってもコネクションが
開かなければ例外 TimeoutError を発生します。
@see [[m:Net::SMTP#open_timeout=]]
--- open_timeout=(n)
接続時に待つ最大秒数を設定します。
@see [[m:Net::SMTP#open_timeout]]
--- read_timeout -> Integer
読みこみ ([[man:read(2)]] 一回) でブロックしてよい最大秒数を返します。
デフォルトは60(秒)です。
この秒数たっても読みこめなければ例外 TimeoutError を発生します。
@see [[m:Net::SMTP#read_timeout=]]
--- read_timeout=(n)
読み込みでブロックしてよい最大秒数を設定します。
@see [[m:Net::SMTP#read_timeout]]
--- finish -> ()
SMTP セッションを終了します。
@raise IOError セッション開始前にこのメソッドが呼ばれた場合に発生します
@see [[m:Net::SMTP#start]]
--- send_message(mailsrc, from_addr, *to_addrs) -> ()
--- send_mail(mailsrc, from_addr, *to_addrs) -> ()
--- sendmail(mailsrc, from_addr, *to_addrs) -> ()
メールを送信します。
mailsrc をメールとして送信します。
mailsrc は each イテレータを持つ
オブジェクトならなんでも構いません(たとえば String や File)。
from_domain は送り主のメールアドレス ('...@...'のかたち) 、
to_addrs には送信先メールアドレスを文字列で渡します。
require 'net/smtp'
Net::SMTP.start('smtp.example.com') {|smtp|
smtp.send_message mail_string,
'from@example.com',
'to1@example.net', 'to2@example.net'
}
sendmail は obsolete です。
@param mailsrc メールの内容
@param from_addr 送信元のメールアドレス
@param to_addrs 送信先のメールアドレス(複数可、少なくとも1個)
@raise IOError すでにセッションが終了している場合に発生します
@raise TimeoutError 接続がタイムアウトした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
@raise Net::SMTPUnknownError SMTPエラーコードがプロトコル上不正な場合に発生します
--- open_message_stream(from_addr, *to_addrs) {|f| .... } -> ()
--- ready(from_addr, *to_addrs) {|f| .... } -> ()
メール書き込みの準備をし、書き込み先のストリームオブジェクトを
ブロックに渡します。ブロック終了後、書きこんだ結果が
送られます。
渡されるストリームオブジェクトは以下のメソッドを持っています。
* puts(str = '') strを出力して CR LFを出力
* print(str) strを出力
* printf(fmt, *args) sprintf(fmt,*args) を出力
* write(str):: str を出力して書き込んだバイト数を返す
* <<(str):: str を出力してストリームオブジェクト自身を返す
from_domain は送り主のメールアドレス ('...@...'のかたち) 、
to_addrs には送信先メールアドレスを文字列で渡します。
require 'net/smtp'
Net::SMTP.start('smtp.exmaple.com', 25) {|smtp|
smtp.open_message_stream('from@example.com', 'to@example.net') {|f|
f.puts 'From: from@example.com'
f.puts 'To: to@example.net'
f.puts 'Subject: test mail'
f.puts
f.puts 'This is test mail.'
}
}
ready は obsolete です。
@param from_addr 送信元のメールアドレス
@param to_addrs 送信先のメールアドレス(複数可、少なくとも1個)
@raise IOError すでにセッションが終了している場合に発生します
@raise TimeoutError 接続がタイムアウトした場合に発生します
@raise Net::SMTPServerBusy SMTPエラーコード420,450の場合に発生します
@raise Net::SMTPSyntaxError SMTPエラーコード500の場合に発生します
@raise Net::SMTPFatalError SMTPエラーコード5xxの場合に発生します
@raise Net::SMTPAuthenticationError 送信に必要な認証を行っていなかった場合に発生します
@raise Net::SMTPUnknownError SMTPエラーコードがプロトコル上不正な場合に発生します
@see [[m:Net::SMTP#send_message]]
#@since 1.8.7
--- authenticate(user, secret, authtype) -> ()
認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
@param authtype 認証の種類(:plain, :login, :cram_md5 のいずれか)
@see [[m:Net::SMTP.start]], [[m:Net::SMTP#start]], [[m:Net::SMTP#auth_plain]], [[m:Net::SMTP#auth_login]], [[m:Net::SMTP#auth_cram_md5]]
--- auth_plain(user, secret) -> ()
PLAIN 認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
--- auth_login(user, secret) -> ()
LOGIN 認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
--- auth_cram_md5(user, secret) -> ()
CRAM-MD5 認証を行います。
このメソッドはセッション開始([[m:Net::SMTP#start]])後、
メールを送る前に呼びだしてください。
通常は [[m:Net::SMTP.start]] や [[m:Net::SMTP#start]] で認証を
行うためこれを利用する必要はないはずです。
@param user 認証で使うアカウント名
@param secret 認証で使うパスワード
#@since 2.1.0
--- rset -> Net::SMTP::Response
RSET コマンドを送ります。
#@end
--- starttls -> Net::SMTP::Response
STARTTLS コマンドを送ります。
通常は [[m:Net::SMTP#start]] で STARTTLS が送られるため
利用する必要はないはずです。
--- helo(domain) -> Net::SMTP::Response
HELO コマンドを送ります(標準的な SMTP を使います)。
通常は [[m:Net::SMTP.start]], [[m:Net::SMTP#start]] で HELO が
送られるため利用する必要はないはずです。
@param domain HELOで送るドメイン名
--- ehlo(domain) -> Net::SMTP::Response
EHLO コマンドを送ります(ESMTP を使います)。
通常は [[m:Net::SMTP.start]], [[m:Net::SMTP#start]] で EHLO が
送られるため利用する必要はないはずです。
@param domain EHLOで送るドメイン名
--- mailfrom(from_addr) -> Net::SMTP::Response
MAILFROM コマンドを送ります。
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
MAILFROM が送られるため利用する必要はないはずです。
@param from_addr 送信元メールアドレス
#@until 1.9.1
--- rcptto_list(to_addrs) -> ()
#@else
--- rcptto_list(to_addrs){ ... } -> object
#@end
RCPTTO コマンドを to_addrs のすべてのメールアドレスに対して送ります。
#@since 1.9.1
コマンドを送った後、ブロックを呼び出します。
このメソッドの返り値はブロックの返り値になります。
#@end
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
RCPTTO が送られるため利用する必要はないはずです。
@param to_addrs 送信先メールアドレスの配列
--- rcptto(to_addr) -> Net::SMTP::Response
RCPTTO コマンドを送ります。
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
RCPTTO が送られるため利用する必要はないはずです。
@param to_addr 送信先メールアドレス
--- data(message) -> Net::SMTP::Response
--- data {|f| .... } -> Net::SMTP::Response
DATA コマンドを送ります。
文字列を引数に与えた場合はそれを本文として送ります。
ブロックを与えた場合にはそのブロックにストリームオブジェクトが渡されます
([[m:Net::SMTP#open_message_stream]]参考)。
通常は [[m:Net::SMTP#send_message]], [[m:Net::SMTP#open_message_stream]] で
DATA が送られるため利用する必要はないはずです。
@param message メールの本文
--- quit -> Net::SMTP::Response
QUIT コマンドを送ります。
通常は [[m:Net::SMTP#finish]] で
QUIT が送られるため利用する必要はないはずです。
#@end
== Constants
#@since 1.8.7
--- DEFAULT_AUTH_TYPE -> Symbol
デフォルトの認証スキーム(:plain)です。
#@end
#@# internal constants for CRAM-MD5 authentication
#@# --- IMASK
#@# --- OMASK
#@# --- CRAM_BUFSIZE
--- Revision -> String
ファイルのリビジョンです。使わないでください。
#@since 1.8.7
= class Net::SMTP::Response < Object
[[c:Net::SMTP]] の内部用クラスです。
#@end
= module Net::SMTPError
SMTP 関連の例外に include されるモジュールです。
= class Net::SMTPAuthenticationError < Net::ProtoAuthError
include Net::SMTPError
SMTP 認証エラー(エラーコード 530)に対応する例外クラスです。
= class Net::SMTPServerBusy < Net::ProtoServerError
include Net::SMTPError
SMTP 一時エラーに対応する例外クラスです。
SMTP エラーコード 420, 450 に対応します。
= class Net::SMTPSyntaxError < Net::ProtoSyntaxError
include Net::SMTPError
SMTP コマンド文法エラー(エラーコード 500) に対応する
例外クラスです。
= class Net::SMTPFatalError < Net::ProtoFatalError
include Net::SMTPError
SMTP 致命的エラー(エラーコード 5xx、 ただし500除く)に対応する
例外クラスです。
= class Net::SMTPUnknownError < Net::ProtoUnknownError
include Net::SMTPError
サーバからの応答コードが予期されていない値であった場合に
対応する例外クラスです。サーバもしくはクライアントに何らかの
バグがあった場合に発生します。
= class Net::SMTPUnsupportedCommand < Net::ProtocolError
include Net::SMTPError
サーバで利用できないコマンドを送ろうとした時に発生する
例外のクラスです。
|
#' @name dgp_spsur
#' @rdname dgp_spsur
#'
#' @title Generation of a random dataset with a spatial SUR structure.
#'
#' @description
#' The purpose of the function \code{dgp_spsur} is to generate a random
#' dataset with the dimensions and spatial structure decided by the user.
#' This function may be useful in pure simulation experiments or with the
#' aim of showing specific properties and characteristics
#' of a spatial SUR dataset and inferential procedures related to them.
#'
#' The user of \code{dgp_spsur} should think in terms of a Monte Carlo
#' experiment. The arguments of the function specify the dimensions of the
#' dataset to be generated, the spatial mechanism underlying the data, the
#' intensity of the SUR structure among the equations and the values of the
#' parameters to be used to obtain the simulated data, which includes the
#' error terms, the regressors and the explained variables.
#'
#' @usage dgp_spsur(Sigma, Tm = 1, G, N, Betas, Thetas = NULL,
#' rho = NULL, lambda = NULL, p = NULL, listw = NULL,
#' X = NULL, type = "matrix", pdfU = "nvrnorm",
#' pdfX = "nvrnorm")
#'
#' @param G Number of equations.
#' @param N Number of cross-section or spatial units
#' @param Tm Number of time periods. Default = \code{1}
#' @param p Number of regressors by equation, including the intercept.
#' \emph{p} can be a row vector of order \emph{(1xG)}, if the number of
#' regressors is not the same for all the equations, or a scalar, if the
#' \emph{G} equations have the same number of regressors.
#' @param listw A \code{listw} object created for example by
#' \code{\link[spdep]{nb2listw}} from \pkg{spatialreg} package; if
#' \code{\link[spdep]{nb2listw}} not given, set to
#' the same spatial weights as the \code{listw} argument. It can
#' also be a spatial weighting matrix of order \emph{(NxN)} instead of
#' a \code{listw} object. Default = \code{NULL}.
#' @param Sigma Covariance matrix between the \emph{G} equations of the
#' SUR model. This matrix should be definite positive and the user must
#' check for that.
#' @param Betas A row vector of order \eqn{(1xP)} showing the values for
#' the \emph{beta} coefficients.
#' The first \eqn{P_{1}} terms correspond to the first equation (where
#' the first element is the intercept), the second \eqn{P_{2}} terms to
#' the coefficients of the second equation and so on.
#' @param Thetas Values for the \eqn{\theta} coefficients in the
#' \emph{G} equations of the model, when the type of spatial SUR model to
#' be simulated is a "slx", "sdm" or "sdem". \emph{Thetas} is a
#' row vector of order \emph{\eqn{1xPTheta}}, where
#' \emph{\eqn{PThetas=p-G}}; let us note that the intercept cannot
#' appear among the spatial lags of the regressors. The first
#' \emph{\eqn{1xKTheta_{1}}} terms correspond to the first equation,
#' the second \emph{\eqn{1xPTheta_{2}}} terms correspond to the
#' second equation, and so on. Default = \code{NULL}.
#' @param rho Values of the coefficients \eqn{\rho_{g}; g=1,2,..., G}
#' related to the spatial lag of the explained variable of the g-th equation.
#' If \eqn{rho} is an scalar and there are \emph{G} equations in the
#' model, the same value will be used for all the equations. If \eqn{rho}
#' is a row vector, of order \emph{(1xG)}, the function \code{dgp_spsur}
#' will use these values, one for each equation. Default = \code{NULL}.
#' @param lambda Values of the coefficients \eqn{\lambda_{g}; g=1,2,..., G}
#' related to the spatial lag of the errors in the \emph{G} equations.
#' If \eqn{lambda} is an scalar and there are \emph{G} equations
#' in the model, the same value will be used for all the equations.
#' If \eqn{lambda} is a row vector, of order \emph{(1xG)}, the function
#' \code{dgp_spsur} will use these values, one for each equation of the
#' spatial errors. Default = \code{NULL}.
#' @param X This argument tells the function \code{dgp_spsur} which \emph{X}
#' matrix should be used to generate the SUR dataset. If \emph{X} is
#' different from \code{NULL}, \code{{dgp_spsur}} will upload the \emph{X}
#' matrix selected in this argument. Note that the \emph{X} must be consistent
#' with the dimensions of the model. If \emph{X} is \code{NULL},
#' \code{dgp_spsur} will generate the desired matrix of regressors from a
#' multivariate Normal distribution with mean value zero and identity
#' \eqn{(PxP)} covariance matrix. As an alternative, the user may change
#' this probability distribution function to the uniform case, \eqn{U(0,1)},
#' through the argument \emph{pdfX}. Default = \code{NULL}.
#' @param type Selection of the type of output. The alternatives are
#' \code{matrix}, \code{df}, \code{panel}, \code{all}. Default \code{matrix}
#' @param pdfX Multivariate probability distribution function (Mpdf), from
#' which the values of the regressors will be drawn. The regressors are
#' assumed to be independent. \code{dgp_spsur} provides two Mpdf,
#' the multivariate Normal, which is the default, and the uniform in the
#' interval \eqn{U[0,1]}, using the dunif function.
#' \code{\link[stats]{dunif}}, from the \pkg{stats} package. Two alternatives
#' \code{"nvrunif"}, \code{"nvrnorm"}. Default \code{"nvrnorm"}.
#' @param pdfU Multivariate probability distribution function, Mpdf, from
#' which the values of the error terms will be drawn. The covariance matrix
#' is the \eqn{\Sigma} matrix specified by the user in the argument. Two alternatives
#' \code{"lognvrnorm"}, \code{"nvrnorm"}. Default \code{"nvrnorm"}.
#'
#' \emph{Sigma}.
#' The function \code{dgp_spsur} provides two Mpdf, the multivariate Normal,
#' which is the default, and the log-Normal distribution function which
#' means just exponenciate the sampling drawn form a \eqn{N(0,\Sigma)}
#' distribution. Default = \code{"nvrnorm"}.
#'
#'
#' @details
#' The purpose of the function \code{dgp_spsur} is to generate random
#' datasets, of a SUR nature, with the spatial structure decided by the user.
#' The function requires certain information to be supplied externally
#' because, in fact, \code{dgp_spsur} constitutes a Data Generation
#' Process, DGP. The following aspects should be addressed:
#' \itemize{
#' \item The user must define the dimensions of the dataset, that is,
#' number of equations, \emph{G}, number of time periods, \emph{Tm}, and number of
#' cross-sectional units, \emph{N}.
#' \item The user must choose the type of spatial structure desired
#' for the model from among the list of candidates of "sim", "slx",
#' "slm", "sem", "sdm", "sdem" or "sarar". The default is the "sim"
#' specification which does not have spatial structure. The decision is
#' made implicitly, just omitting the specification of the spatial
#' parameters which are not involved in the model (i.e., in a "slm"
#' there are no \eqn{\lambda} parameters but appear \eqn{\rho}
#' parameters; in a "sdem" model there are \eqn{\lambda} and \eqn{\theta}
#' parameters but no \eqn{\rho} coefficients).
#' \item If the user needs a model with spatial structure, a \emph{(NxN)} weighting
#' matrix, \emph{W}, should be chosen.
#' \item The next step builds the equations of the SUR model. In this
#' case, the user must specify the number of regressors that intervene
#' in each equation and the coefficients, \eqn{\beta} parameters,
#' associated with each regressor. The \emph{first} question is solved
#' through the argument \emph{p} which, if a scalar, indicates that
#' the same number of regressors should appear in all the equations
#' of the model; if the user seeks for a model with different number
#' of regressors in the \emph{G} equations, the argument \emph{p} must
#' be a \emph{(1xG)} row vector with the required information. It must
#' be remembered that \code{dgp_spsur} assumes that an
#' intercept appears in all equations of the model.
#'
#' The \emph{second} part of the problem posited above is solved through
#' the argument \emph{Betas}, which is a row vector of order \emph{(1xp)}
#' with the information required for this set of coefficients.
#' \item The user must specify, also, the values of the spatial parameters
#' corresponding to the chosen specification; we are referring to the
#' \eqn{\rho_{g}}, \eqn{\lambda_{g}} and \eqn{\theta_{g}},
#' for \eqn{g=1, ..., G and k=1,..., K_{g}} parameters. This is done
#' thought the arguments \emph{rho}, \emph{lambda} and \emph{theta}.
#' The firs two, \emph{rho} and \emph{lambda}, work as \emph{K}: if
#' they are scalar, the same value will be used in the \emph{G}
#' equations of the SUR model; if they are \emph{(1xG)} row vectors,
#' a different value will be assigned for each equation.
#'
#' Moreover, \emph{Theta} works like the argument \emph{Betas}. The user
#' must define a row vector of order \eqn{1xPTheta} showing these values.
#' It is worth to remember that in no case the intercept will appear
#' among the lagged regressors.
#' \item With the argument \code{type} the user take the decision of the
#' output format. See Value section.
#' \item Finally, the user must decide which values of the regressors and
#' of the error terms are to be used in the simulation. The regressors
#' can be uploaded from an external matrix generated previously by the
#' user. This is the argument \emph{X}. It is the responsibility of the
#' user to check that the dimensions of the external matrix are consistent
#' with the dataset required for the SUR model. A second possibility
#' implies the regressors to be generated randomly by the function
#' \code{\link{dgp_spsur}}.
#' In this case, the user must select the probability distribution
#' function from which the corresponding data (of the regressors and
#' the error terms) are to be drawn.\cr
#'}
#' \code{dgp_spsur} provides two multivariate distribution functions,
#' namely, the Normal and the log-Normal for the errors (the second
#' should be taken as a clear departure from the standard assumption of
#' normality). In both cases, random matrices of order \emph{(TmNxG)}
#' are obtained from a multivariate normal distribution, with a mean
#' value of zero and the covariance matrix specified in the argument
#' \emph{Sigma}; then, this matrix is exponentiated for the log-Normal
#' case. Roughly, the same procedure applies for drawing the values of
#' the regressor. There are two distribution functions available, the
#' normal and the uniform in the interval \eqn{U[0,1]}; the regressors
#' are always independent.
#'
#'
#' @return
#' The default output ("matrix") is a list with a vector \eqn{Y} of order
#' \emph{(TmNGx1)} with the values
#' generated for the explained variable in the G equations of the SUR and
#' a matrix \eqn{XX} of order (\emph{(TmNGxsum(p))}, with the values
#' generated for the regressors of the SUR, including an intercept for
#' each equation.
#'
#' In case of Tm = 1 or G = 1 several alternatives
#' output can be select:
#'\itemize{
#' \item If the user select \code{type = "df"} the output is a data frame where each
#' column is a variable.
#'
#' \item If the user select \code{type = "panel"} the output is a data frame in
#' panel format including two factors. The first factor point out the observation
#' of the individual and the second the equation for different Tm or G.
#'
#' \item Finally, if \code{type = "all"} is select the output is a list including all
#' alternatives format.
#' }
#'
#' @author
#' \tabular{ll}{
#' Fernando López \tab \email{fernando.lopez@@upct.es} \cr
#' Román Mínguez \tab \email{roman.minguez@@uclm.es} \cr
#' Jesús Mur \tab \email{jmur@@unizar.es} \cr
#' }
#' @references
#' \itemize{
#' \item López, F. A., Mínguez, R., Mur, J. (2020). ML versus IV estimates
#' of spatial SUR models: evidence from the case of Airbnb in Madrid urban
#' area. \emph{The Annals of Regional Science}, 64(2), 313-347.
#' <doi:10.1007/s00168-019-00914-1>
#'
#' }
#' @seealso
#' \code{\link{spsurml}}, \code{\link{spsur3sls}}, \code{\link{spsurtime}}
#' @examples
#'
#' ## VIP: The output of the whole set of the examples can be examined
#' ## by executing demo(demo_dgp_spsur, package="spsur")
#'
#' ################################################
#' ### PANEL DATA (Tm = 1 or G = 1) ##
#' ################################################
#'
#' ################################################
#' #### Example 1: DGP SLM model. G equations
#' ################################################
#' rm(list = ls()) # Clean memory
#' Tm <- 1 # Number of time periods
#' G <- 3 # Number of equations
#' N <- 200 # Number of spatial elements
#' p <- 3 # Number of independent variables
#' Sigma <- matrix(0.3, ncol = G, nrow = G)
#' diag(Sigma) <- 1
#' Betas <- c(1, 2, 3, 1, -1, 0.5, 1, -0.5, 2)
#' rho <- 0.5 # level of spatial dependence
#' lambda <- 0.0 # spatial autocorrelation error term = 0
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP <- dgp_spsur(Sigma = Sigma, Betas = Betas,
#' rho = rho, lambda = lambda, Tm = Tm,
#' G = G, N = N, p = p, listw = lw)
#' \donttest{
#' SLM <- spsurml(X = DGP$X, Y = DGP$Y, Tm = Tm, N = N, G = G,
#' p = c(3, 3, 3), listw = lw, type = "slm")
#' summary(SLM)
#'
#' ################################################
#' #### Example 2: DGP SEM model with Tm>1; G=1 and
#' #### different p for each equation
#' ################################################
#' rm(list = ls()) # Clean memory
#' Tm <- 3 # Number of time periods
#' G <- 1 # Number of equations
#' N <- 500 # Number of spatial elements
#' p <- c(2,3,4) # Number of independent variables
#' Sigma <- matrix(0.8, ncol = Tm, nrow = Tm)
#' diag(Sigma) <- 1
#' Betas <- c(1,2,1,2,3,1,2,3,4)
#' rho <- 0 # level of spatial dependence = 0
#' lambda <- c(0.2,0.5,0.8)
#' ## spatial autocorrelation error terms for each equation
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP2 <- dgp_spsur(Sigma = Sigma, Betas = Betas, rho = rho,
#' lambda = lambda, Tm = Tm, G = G, N = N, p = p,
#' listw = lw)
#' SLM2 <- spsurml(X = DGP2$X, Y = DGP2$Y, Tm = Tm, N = N, G = G,
#' p = c(2,3,4), listw = lw, type = "slm")
#' summary(SLM2)
#' SEM2 <- spsurml(X = DGP2$X, Y = DGP2$Y, Tm = Tm, N = N, G = G,
#' p = c(2,3,4), listw = lw, type = "sem")
#' summary(SEM2)
#'
#' ################################################
#' #### Example 3: DGP SEM model with Tm>1; G=1 and
#' #### different p for each equation. Output "df"
#' ################################################
#' rm(list = ls()) # Clean memory
#' Tm <- 3 # Number of time periods
#' G <- 1 # Number of equations
#' N <- 500 # Number of spatial elements
#' p <- c(2,3,4) # Number of independent variables
#' Sigma <- matrix(0.8, ncol = Tm, nrow = Tm)
#' diag(Sigma) <- 1
#' Betas <- c(1,2,1,2,3,1,2,3,4)
#' rho <- 0 # level of spatial dependence = 0
#' lambda <- c(0.2,0.5,0.8)
#' ## spatial autocorrelation error terms for each equation
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP3 <- dgp_spsur(Sigma = Sigma, Betas = Betas, rho = rho,
#' lambda = lambda, Tm = Tm, G = G, N = N, p = p,
#' listw = lw, type = "df")
#' formula <- Y_1 | Y_2 | Y_3 ~ X_11 | X_21 + X_22 | X_31 + X_32 + X_33
#' SLM3 <- spsurml(formula = formula, data = DGP3$df,
#' listw = lw, type = "slm")
#' summary(SLM3)
#' SEM3 <- spsurml(formula = formula, data = DGP3$df,
#' listw = lw, type = "sem")
#' summary(SEM3)
#'
#' ################################################
#' ### MULTI-DIMENSIONAL PANEL DATA G>1 and Tm>1 ##
#' ################################################
#'
#' rm(list = ls()) # Clean memory
#' Tm <- 10 # Number of time periods
#' G <- 3 # Number of equations
#' N <- 100 # Number of spatial elements
#' p <- 3 # Number of independent variables
#' Sigma <- matrix(0.5, ncol = G, nrow = G)
#' diag(Sigma) <- 1
#' Betas <- rep(1:3, G)
#' rho <- c(0.5, 0.1, 0.8)
#' lambda <- 0.0 # spatial autocorrelation error term = 0
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP4 <- dgp_spsur(Sigma = Sigma, Betas = Betas, rho = rho,
#' lambda = lambda, Tm = Tm, G = G, N = N, p = p,
#' listw = lw)
#' SLM4 <- spsurml(Y = DGP4$Y, X = DGP4$X, G = G, N = N, Tm = Tm,
#' p = p, listw = lw, type = "slm")
#' summary(SLM4)
#' }
#' @export
dgp_spsur <- function(Sigma, Tm = 1, G, N, Betas,
Thetas = NULL, rho = NULL,
lambda = NULL, p = NULL,
listw = NULL, X = NULL,
type = "matrix",
pdfU = "nvrnorm", pdfX = "nvrnorm") {
type <- match.arg(type, c("matrix","df","panel","all"))
pdfX <- match.arg(pdfX,c("nvrunif","nvrnorm"))
pdfU <- match.arg(pdfU,c("lognvrnorm","nvrnorm"))
if (is.null(listw) || !inherits(listw, c("listw","Matrix","matrix")))
stop("listw format unknown or NULL")
if (inherits(listw, "listw")) {
W <- Matrix::Matrix(spdep::listw2mat(listw))
} else if (inherits(listw, "matrix")) {
W <- Matrix::Matrix(listw)
listw <- spdep::mat2listw(W)
} else if (inherits(listw, "Matrix")) {
W <- listw
listw <- spdep::mat2listw(as.matrix(W))
} else W <- Matrix::Diagonal(N)
xxx <- Tm # To include names in the output
if (Tm > 1 && G == 1) { #Change dimensions
G <- Tm
Tm <- 1
}
if (!is.null(Thetas)) durbin <- TRUE else durbin <- FALSE
if (!is.null(p) & length(p) == 1) p <- matrix(p, nrow = G, ncol = 1)
if (is.null(lambda)) lambda <- rep(0, G)
if (is.null(rho)) rho <- rep(0, G)
if (length(lambda) == 1) lambda <- as.numeric(matrix(lambda,
nrow = G,ncol = 1))
if (length(rho) == 1) rho <- as.numeric(matrix(rho,
nrow = G,ncol = 1))
if (is.null(X)) {
if (is.null(p)) stop("Arguments X and p can not be NULL simultaneously")
if (pdfX == "nvrunif") {
X0 <- matrix(runif(N * (p[1] - 1)), N, (p[1] - 1))
colnames(X0) <- paste0("X_1",1:dim(X0)[2])
X <- cbind(matrix(1,N,1),X0)
Xf <- X0
for (i in 1:(G-1)) {
X0 <- matrix(runif(N * (p[i + 1] - 1)), N,
(p[i + 1] - 1))
colnames(X0) <- paste0("X_",(i+1),1:dim(X0)[2])
X <- Matrix::bdiag(X,cbind(matrix(1, N, 1),
X0))
XF <- cbind(XF,X0)
}
if (Tm > 1) {
for (i in 1:(Tm-1)) {
X2 <- cbind(matrix(1,N,1),
matrix(runif(N * (p[1] - 1)),
N, (p[1] - 1)))
for (i in 1:(G - 1)) {
X2 <- Matrix::bdiag(X2,cbind(matrix(1, N, 1),
matrix(runif(N * (p[i + 1] - 1)),
N,(p[i + 1] - 1))))
}
X <- rbind(X, X2)
}
}
} else if (pdfX == "nvrnorm"){
X0 <- matrix(rnorm(N * (p[1] - 1),0, 1), N, (p[1] - 1))
colnames(X0) <- paste0("X_1",1:dim(X0)[2])
X <- cbind(matrix(1, N, 1),X0)
XF <- X0
for (i in 1:(G - 1)) {
X0 <- matrix(rnorm(N * (p[i + 1] - 1), 0, 1),
N, (p[i + 1] - 1))
colnames(X0) <- paste0("X_",(i+1),1:dim(X0)[2])
X <- Matrix::bdiag(X, cbind(matrix(1, N, 1),X0))
XF <- cbind(XF, X0)
}
if (Tm > 1){
for (i in 1:(Tm - 1)) {
X2 <- cbind(matrix(1, N, 1),
matrix(rnorm(N * (p[1] - 1), 0, 1), N,
(p[1] - 1)))
for (i in 1:(G - 1)){
X2 <- Matrix::bdiag(X2, cbind(matrix(1, N, 1),
matrix(rnorm(N * (p[i + 1] - 1),
0, 1),
N, (p[i + 1] - 1))))
}
X <- rbind(X, X2)
}
}
} else stop("pdfX only can be nvrnorm or nvrunif")
# Nombro las columnas de X
nam <- c(paste0("Intercep_", 1),
paste(paste0("X", 1, "_"), 1:(p[1] - 1), sep = ""))
if (length(p > 1)) {
for (i in 2:(length(p))) {
nam <- c(nam,c(paste0("Intercep_", i),
paste(paste0("X", i, "_"), 1:(p[i] - 1),
sep = "")))
}
}
dimnames(X)[[2]] <- nam
}
if (is.null(p)) {
if ((ncol(X) %% G) != 0) stop("Argument p need to be set")
p <- rep(ncol(X) / G, G)
}
IT <- Matrix::Diagonal(Tm)
IR <- Matrix::Diagonal(N)
IG <- Matrix::Diagonal(G)
IGR <- Matrix::Diagonal(G * N)
# CAMBIA MATRIZ X Y COEFICIENTES EN EL CASO DURBIN
## MODIFICAR CÓDIGO....
if (durbin) {
WX <- (IT %x% IG %x% W) %*% X
dimnames(WX)[[2]] <- paste0("lag.", colnames(X))
Xdurbin <- NULL
pdurbin <- p - 1 # Sin intercepto
for (i in 1:length(p))
{
if (i == 1) {
Xdurbin <- cbind(X[, 1:p[i]], WX[, 2:p[i]])
Coeff <- c(Betas[1:p[1]], Thetas[1:pdurbin[1]])
} else {
Xdurbin <- cbind(Xdurbin,
X[, (cumsum(p)[i - 1] + 1):cumsum(p)[i]],
WX[, (cumsum(p)[i - 1] + 2):cumsum(p)[i]])
# Sin intercepto
Coeff <- c(Coeff,
Betas[(cumsum(p)[i - 1] + 1):cumsum(p)[i]],
Thetas[(cumsum(pdurbin)[i - 1] + 1):cumsum(pdurbin)[i]])
}
}
#p <- p + (p-1) # Para el caso sdm cambia el p (ojo Intercepto)
}
S <- Sigma
OME <- Matrix::kronecker((Matrix::kronecker(IT, S)), IR)
# Factor Cholesky covarianzas
chol_OME <- Matrix::Cholesky(OME)
#factors_chol_OME <- Matrix::expand(chol_OME)
#Lchol_OME <- Matrix::t(factors_chol_OME$P) %*% factors_chol_OME$L
#Uchol_OME <- Matrix::t(factors_chol_OME$L) %*% factors_chol_OME$P
M <- Matrix::Matrix(0, ncol=1, nrow = Tm * G * N)
U <- matrix(sparseMVN::rmvn.sparse(n = 1, mu = M,
CH = chol_OME, prec = FALSE),
ncol = 1)
U <- Matrix::Matrix(U)
if (pdfU == "lognvrnorm") U <- exp(U)
if (pdfU != "lognvrnorm" && pdfU != "nvrnorm")
print(" Improper pdf. The errors will be drawn from a multivariate Normal ")
# Si Tm*G*N es muy grande (>30000 ó 40000) hay problemas
IBU <- Matrix::solve(Matrix::kronecker(IT,
(IGR - Matrix::kronecker(
Matrix::Diagonal(length(lambda), lambda),
W))), U)
if (durbin) {
Y <- Matrix::solve(Matrix::kronecker(IT,
(IGR - Matrix::kronecker(
Matrix::Diagonal(length(rho), rho),
W))),
(Xdurbin %*% Coeff + IBU))
} else {
Y <- Matrix::solve(Matrix::kronecker(IT,
(IGR - Matrix::kronecker(
Matrix::Diagonal(length(rho), rho),
W))),
(X %*% Betas + IBU))
}
## Output
if (Tm == 1){
index_indiv <- rep(1:N, Tm)
YY <- matrix(Y[1:(N*G)],ncol = G)
# Output type panel. Only in case of equal number of variables in each equation
if (sum(p==p[1])==length(p)){
if (xxx != 1){eq <- c("year_","year")} else {eq <- c("eq_","equation")}
YYY <- as.data.frame(cbind(paste0("Indv_",rep(1:N,each = G)),rep(paste0(eq[1],1:G),N)))
YYY$Y <- c(rbind(t(YY)))
h <- c(rbind(t(XF[,substr(colnames(XF),4,4)==1])))
for (i in 2:p[1]){
h <- rbind(h,c(rbind(t(XF[,substr(colnames(XF),4,4)==i]))))
}
h <- t(h)
colnames(h) <- paste0("X_",1:dim(h)[2])
names(YYY) <- c("index_indiv",eq[2],"Y")
YYY <- cbind(YYY,h)
YYY$index_indiv <- as.factor(YYY$index_indiv)
YYY[,2] <- as.factor(YYY[,2])
} else {
YYY = NULL
if (type == "panel" |type == "all")
warning("Unbalanced panel data. Panel output = NULL")
}
# Output type df
YY <- cbind(index_indiv,YY)
colnames(YY) <- c("index_indiv",paste0("Y_",1:G))
YY <- as.data.frame(cbind(YY,XF))
if (type == "df"){
results0 <- list(df = YY)
}
if (type == "panel"){
results0 <- list(panel = YYY)
}
if (type == "matrix"){
results0 <- list(X = as.matrix(X), Y = as.matrix(Y))
}
if (type == "all"){
results0 <- list(X = as.matrix(X), Y = as.matrix(Y), df = YY, panel = YYY)
}
} else {
results0 <- list(Y = as.matrix(Y),X = as.matrix(X))
}
results <- results0
}
| /R/dgp_spSUR.R | no_license | rsbivand/spsur | R | false | false | 26,165 | r | #' @name dgp_spsur
#' @rdname dgp_spsur
#'
#' @title Generation of a random dataset with a spatial SUR structure.
#'
#' @description
#' The purpose of the function \code{dgp_spsur} is to generate a random
#' dataset with the dimensions and spatial structure decided by the user.
#' This function may be useful in pure simulation experiments or with the
#' aim of showing specific properties and characteristics
#' of a spatial SUR dataset and inferential procedures related to them.
#'
#' The user of \code{dgp_spsur} should think in terms of a Monte Carlo
#' experiment. The arguments of the function specify the dimensions of the
#' dataset to be generated, the spatial mechanism underlying the data, the
#' intensity of the SUR structure among the equations and the values of the
#' parameters to be used to obtain the simulated data, which includes the
#' error terms, the regressors and the explained variables.
#'
#' @usage dgp_spsur(Sigma, Tm = 1, G, N, Betas, Thetas = NULL,
#' rho = NULL, lambda = NULL, p = NULL, listw = NULL,
#' X = NULL, type = "matrix", pdfU = "nvrnorm",
#' pdfX = "nvrnorm")
#'
#' @param G Number of equations.
#' @param N Number of cross-section or spatial units
#' @param Tm Number of time periods. Default = \code{1}
#' @param p Number of regressors by equation, including the intercept.
#' \emph{p} can be a row vector of order \emph{(1xG)}, if the number of
#' regressors is not the same for all the equations, or a scalar, if the
#' \emph{G} equations have the same number of regressors.
#' @param listw A \code{listw} object created for example by
#' \code{\link[spdep]{nb2listw}} from \pkg{spatialreg} package; if
#' \code{\link[spdep]{nb2listw}} not given, set to
#' the same spatial weights as the \code{listw} argument. It can
#' also be a spatial weighting matrix of order \emph{(NxN)} instead of
#' a \code{listw} object. Default = \code{NULL}.
#' @param Sigma Covariance matrix between the \emph{G} equations of the
#' SUR model. This matrix should be definite positive and the user must
#' check for that.
#' @param Betas A row vector of order \eqn{(1xP)} showing the values for
#' the \emph{beta} coefficients.
#' The first \eqn{P_{1}} terms correspond to the first equation (where
#' the first element is the intercept), the second \eqn{P_{2}} terms to
#' the coefficients of the second equation and so on.
#' @param Thetas Values for the \eqn{\theta} coefficients in the
#' \emph{G} equations of the model, when the type of spatial SUR model to
#' be simulated is a "slx", "sdm" or "sdem". \emph{Thetas} is a
#' row vector of order \emph{\eqn{1xPTheta}}, where
#' \emph{\eqn{PThetas=p-G}}; let us note that the intercept cannot
#' appear among the spatial lags of the regressors. The first
#' \emph{\eqn{1xKTheta_{1}}} terms correspond to the first equation,
#' the second \emph{\eqn{1xPTheta_{2}}} terms correspond to the
#' second equation, and so on. Default = \code{NULL}.
#' @param rho Values of the coefficients \eqn{\rho_{g}; g=1,2,..., G}
#' related to the spatial lag of the explained variable of the g-th equation.
#' If \eqn{rho} is an scalar and there are \emph{G} equations in the
#' model, the same value will be used for all the equations. If \eqn{rho}
#' is a row vector, of order \emph{(1xG)}, the function \code{dgp_spsur}
#' will use these values, one for each equation. Default = \code{NULL}.
#' @param lambda Values of the coefficients \eqn{\lambda_{g}; g=1,2,..., G}
#' related to the spatial lag of the errors in the \emph{G} equations.
#' If \eqn{lambda} is an scalar and there are \emph{G} equations
#' in the model, the same value will be used for all the equations.
#' If \eqn{lambda} is a row vector, of order \emph{(1xG)}, the function
#' \code{dgp_spsur} will use these values, one for each equation of the
#' spatial errors. Default = \code{NULL}.
#' @param X This argument tells the function \code{dgp_spsur} which \emph{X}
#' matrix should be used to generate the SUR dataset. If \emph{X} is
#' different from \code{NULL}, \code{{dgp_spsur}} will upload the \emph{X}
#' matrix selected in this argument. Note that the \emph{X} must be consistent
#' with the dimensions of the model. If \emph{X} is \code{NULL},
#' \code{dgp_spsur} will generate the desired matrix of regressors from a
#' multivariate Normal distribution with mean value zero and identity
#' \eqn{(PxP)} covariance matrix. As an alternative, the user may change
#' this probability distribution function to the uniform case, \eqn{U(0,1)},
#' through the argument \emph{pdfX}. Default = \code{NULL}.
#' @param type Selection of the type of output. The alternatives are
#' \code{matrix}, \code{df}, \code{panel}, \code{all}. Default \code{matrix}
#' @param pdfX Multivariate probability distribution function (Mpdf), from
#' which the values of the regressors will be drawn. The regressors are
#' assumed to be independent. \code{dgp_spsur} provides two Mpdf,
#' the multivariate Normal, which is the default, and the uniform in the
#' interval \eqn{U[0,1]}, using the dunif function.
#' \code{\link[stats]{dunif}}, from the \pkg{stats} package. Two alternatives
#' \code{"nvrunif"}, \code{"nvrnorm"}. Default \code{"nvrnorm"}.
#' @param pdfU Multivariate probability distribution function, Mpdf, from
#' which the values of the error terms will be drawn. The covariance matrix
#' is the \eqn{\Sigma} matrix specified by the user in the argument. Two alternatives
#' \code{"lognvrnorm"}, \code{"nvrnorm"}. Default \code{"nvrnorm"}.
#'
#' \emph{Sigma}.
#' The function \code{dgp_spsur} provides two Mpdf, the multivariate Normal,
#' which is the default, and the log-Normal distribution function which
#' means just exponenciate the sampling drawn form a \eqn{N(0,\Sigma)}
#' distribution. Default = \code{"nvrnorm"}.
#'
#'
#' @details
#' The purpose of the function \code{dgp_spsur} is to generate random
#' datasets, of a SUR nature, with the spatial structure decided by the user.
#' The function requires certain information to be supplied externally
#' because, in fact, \code{dgp_spsur} constitutes a Data Generation
#' Process, DGP. The following aspects should be addressed:
#' \itemize{
#' \item The user must define the dimensions of the dataset, that is,
#' number of equations, \emph{G}, number of time periods, \emph{Tm}, and number of
#' cross-sectional units, \emph{N}.
#' \item The user must choose the type of spatial structure desired
#' for the model from among the list of candidates of "sim", "slx",
#' "slm", "sem", "sdm", "sdem" or "sarar". The default is the "sim"
#' specification which does not have spatial structure. The decision is
#' made implicitly, just omitting the specification of the spatial
#' parameters which are not involved in the model (i.e., in a "slm"
#' there are no \eqn{\lambda} parameters but appear \eqn{\rho}
#' parameters; in a "sdem" model there are \eqn{\lambda} and \eqn{\theta}
#' parameters but no \eqn{\rho} coefficients).
#' \item If the user needs a model with spatial structure, a \emph{(NxN)} weighting
#' matrix, \emph{W}, should be chosen.
#' \item The next step builds the equations of the SUR model. In this
#' case, the user must specify the number of regressors that intervene
#' in each equation and the coefficients, \eqn{\beta} parameters,
#' associated with each regressor. The \emph{first} question is solved
#' through the argument \emph{p} which, if a scalar, indicates that
#' the same number of regressors should appear in all the equations
#' of the model; if the user seeks for a model with different number
#' of regressors in the \emph{G} equations, the argument \emph{p} must
#' be a \emph{(1xG)} row vector with the required information. It must
#' be remembered that \code{dgp_spsur} assumes that an
#' intercept appears in all equations of the model.
#'
#' The \emph{second} part of the problem posited above is solved through
#' the argument \emph{Betas}, which is a row vector of order \emph{(1xp)}
#' with the information required for this set of coefficients.
#' \item The user must specify, also, the values of the spatial parameters
#' corresponding to the chosen specification; we are referring to the
#' \eqn{\rho_{g}}, \eqn{\lambda_{g}} and \eqn{\theta_{g}},
#' for \eqn{g=1, ..., G and k=1,..., K_{g}} parameters. This is done
#' thought the arguments \emph{rho}, \emph{lambda} and \emph{theta}.
#' The firs two, \emph{rho} and \emph{lambda}, work as \emph{K}: if
#' they are scalar, the same value will be used in the \emph{G}
#' equations of the SUR model; if they are \emph{(1xG)} row vectors,
#' a different value will be assigned for each equation.
#'
#' Moreover, \emph{Theta} works like the argument \emph{Betas}. The user
#' must define a row vector of order \eqn{1xPTheta} showing these values.
#' It is worth to remember that in no case the intercept will appear
#' among the lagged regressors.
#' \item With the argument \code{type} the user take the decision of the
#' output format. See Value section.
#' \item Finally, the user must decide which values of the regressors and
#' of the error terms are to be used in the simulation. The regressors
#' can be uploaded from an external matrix generated previously by the
#' user. This is the argument \emph{X}. It is the responsibility of the
#' user to check that the dimensions of the external matrix are consistent
#' with the dataset required for the SUR model. A second possibility
#' implies the regressors to be generated randomly by the function
#' \code{\link{dgp_spsur}}.
#' In this case, the user must select the probability distribution
#' function from which the corresponding data (of the regressors and
#' the error terms) are to be drawn.\cr
#'}
#' \code{dgp_spsur} provides two multivariate distribution functions,
#' namely, the Normal and the log-Normal for the errors (the second
#' should be taken as a clear departure from the standard assumption of
#' normality). In both cases, random matrices of order \emph{(TmNxG)}
#' are obtained from a multivariate normal distribution, with a mean
#' value of zero and the covariance matrix specified in the argument
#' \emph{Sigma}; then, this matrix is exponentiated for the log-Normal
#' case. Roughly, the same procedure applies for drawing the values of
#' the regressor. There are two distribution functions available, the
#' normal and the uniform in the interval \eqn{U[0,1]}; the regressors
#' are always independent.
#'
#'
#' @return
#' The default output ("matrix") is a list with a vector \eqn{Y} of order
#' \emph{(TmNGx1)} with the values
#' generated for the explained variable in the G equations of the SUR and
#' a matrix \eqn{XX} of order (\emph{(TmNGxsum(p))}, with the values
#' generated for the regressors of the SUR, including an intercept for
#' each equation.
#'
#' In case of Tm = 1 or G = 1 several alternatives
#' output can be select:
#'\itemize{
#' \item If the user select \code{type = "df"} the output is a data frame where each
#' column is a variable.
#'
#' \item If the user select \code{type = "panel"} the output is a data frame in
#' panel format including two factors. The first factor point out the observation
#' of the individual and the second the equation for different Tm or G.
#'
#' \item Finally, if \code{type = "all"} is select the output is a list including all
#' alternatives format.
#' }
#'
#' @author
#' \tabular{ll}{
#' Fernando López \tab \email{fernando.lopez@@upct.es} \cr
#' Román Mínguez \tab \email{roman.minguez@@uclm.es} \cr
#' Jesús Mur \tab \email{jmur@@unizar.es} \cr
#' }
#' @references
#' \itemize{
#' \item López, F. A., Mínguez, R., Mur, J. (2020). ML versus IV estimates
#' of spatial SUR models: evidence from the case of Airbnb in Madrid urban
#' area. \emph{The Annals of Regional Science}, 64(2), 313-347.
#' <doi:10.1007/s00168-019-00914-1>
#'
#' }
#' @seealso
#' \code{\link{spsurml}}, \code{\link{spsur3sls}}, \code{\link{spsurtime}}
#' @examples
#'
#' ## VIP: The output of the whole set of the examples can be examined
#' ## by executing demo(demo_dgp_spsur, package="spsur")
#'
#' ################################################
#' ### PANEL DATA (Tm = 1 or G = 1) ##
#' ################################################
#'
#' ################################################
#' #### Example 1: DGP SLM model. G equations
#' ################################################
#' rm(list = ls()) # Clean memory
#' Tm <- 1 # Number of time periods
#' G <- 3 # Number of equations
#' N <- 200 # Number of spatial elements
#' p <- 3 # Number of independent variables
#' Sigma <- matrix(0.3, ncol = G, nrow = G)
#' diag(Sigma) <- 1
#' Betas <- c(1, 2, 3, 1, -1, 0.5, 1, -0.5, 2)
#' rho <- 0.5 # level of spatial dependence
#' lambda <- 0.0 # spatial autocorrelation error term = 0
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP <- dgp_spsur(Sigma = Sigma, Betas = Betas,
#' rho = rho, lambda = lambda, Tm = Tm,
#' G = G, N = N, p = p, listw = lw)
#' \donttest{
#' SLM <- spsurml(X = DGP$X, Y = DGP$Y, Tm = Tm, N = N, G = G,
#' p = c(3, 3, 3), listw = lw, type = "slm")
#' summary(SLM)
#'
#' ################################################
#' #### Example 2: DGP SEM model with Tm>1; G=1 and
#' #### different p for each equation
#' ################################################
#' rm(list = ls()) # Clean memory
#' Tm <- 3 # Number of time periods
#' G <- 1 # Number of equations
#' N <- 500 # Number of spatial elements
#' p <- c(2,3,4) # Number of independent variables
#' Sigma <- matrix(0.8, ncol = Tm, nrow = Tm)
#' diag(Sigma) <- 1
#' Betas <- c(1,2,1,2,3,1,2,3,4)
#' rho <- 0 # level of spatial dependence = 0
#' lambda <- c(0.2,0.5,0.8)
#' ## spatial autocorrelation error terms for each equation
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP2 <- dgp_spsur(Sigma = Sigma, Betas = Betas, rho = rho,
#' lambda = lambda, Tm = Tm, G = G, N = N, p = p,
#' listw = lw)
#' SLM2 <- spsurml(X = DGP2$X, Y = DGP2$Y, Tm = Tm, N = N, G = G,
#' p = c(2,3,4), listw = lw, type = "slm")
#' summary(SLM2)
#' SEM2 <- spsurml(X = DGP2$X, Y = DGP2$Y, Tm = Tm, N = N, G = G,
#' p = c(2,3,4), listw = lw, type = "sem")
#' summary(SEM2)
#'
#' ################################################
#' #### Example 3: DGP SEM model with Tm>1; G=1 and
#' #### different p for each equation. Output "df"
#' ################################################
#' rm(list = ls()) # Clean memory
#' Tm <- 3 # Number of time periods
#' G <- 1 # Number of equations
#' N <- 500 # Number of spatial elements
#' p <- c(2,3,4) # Number of independent variables
#' Sigma <- matrix(0.8, ncol = Tm, nrow = Tm)
#' diag(Sigma) <- 1
#' Betas <- c(1,2,1,2,3,1,2,3,4)
#' rho <- 0 # level of spatial dependence = 0
#' lambda <- c(0.2,0.5,0.8)
#' ## spatial autocorrelation error terms for each equation
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP3 <- dgp_spsur(Sigma = Sigma, Betas = Betas, rho = rho,
#' lambda = lambda, Tm = Tm, G = G, N = N, p = p,
#' listw = lw, type = "df")
#' formula <- Y_1 | Y_2 | Y_3 ~ X_11 | X_21 + X_22 | X_31 + X_32 + X_33
#' SLM3 <- spsurml(formula = formula, data = DGP3$df,
#' listw = lw, type = "slm")
#' summary(SLM3)
#' SEM3 <- spsurml(formula = formula, data = DGP3$df,
#' listw = lw, type = "sem")
#' summary(SEM3)
#'
#' ################################################
#' ### MULTI-DIMENSIONAL PANEL DATA G>1 and Tm>1 ##
#' ################################################
#'
#' rm(list = ls()) # Clean memory
#' Tm <- 10 # Number of time periods
#' G <- 3 # Number of equations
#' N <- 100 # Number of spatial elements
#' p <- 3 # Number of independent variables
#' Sigma <- matrix(0.5, ncol = G, nrow = G)
#' diag(Sigma) <- 1
#' Betas <- rep(1:3, G)
#' rho <- c(0.5, 0.1, 0.8)
#' lambda <- 0.0 # spatial autocorrelation error term = 0
#' ## random coordinates
#' co <- cbind(runif(N,0,1),runif(N,0,1))
#' lw <- spdep::nb2listw(spdep::knn2nb(spdep::knearneigh(co, k = 5,
#' longlat = FALSE)))
#' DGP4 <- dgp_spsur(Sigma = Sigma, Betas = Betas, rho = rho,
#' lambda = lambda, Tm = Tm, G = G, N = N, p = p,
#' listw = lw)
#' SLM4 <- spsurml(Y = DGP4$Y, X = DGP4$X, G = G, N = N, Tm = Tm,
#' p = p, listw = lw, type = "slm")
#' summary(SLM4)
#' }
#' @export
dgp_spsur <- function(Sigma, Tm = 1, G, N, Betas,
Thetas = NULL, rho = NULL,
lambda = NULL, p = NULL,
listw = NULL, X = NULL,
type = "matrix",
pdfU = "nvrnorm", pdfX = "nvrnorm") {
type <- match.arg(type, c("matrix","df","panel","all"))
pdfX <- match.arg(pdfX,c("nvrunif","nvrnorm"))
pdfU <- match.arg(pdfU,c("lognvrnorm","nvrnorm"))
if (is.null(listw) || !inherits(listw, c("listw","Matrix","matrix")))
stop("listw format unknown or NULL")
if (inherits(listw, "listw")) {
W <- Matrix::Matrix(spdep::listw2mat(listw))
} else if (inherits(listw, "matrix")) {
W <- Matrix::Matrix(listw)
listw <- spdep::mat2listw(W)
} else if (inherits(listw, "Matrix")) {
W <- listw
listw <- spdep::mat2listw(as.matrix(W))
} else W <- Matrix::Diagonal(N)
xxx <- Tm # To include names in the output
if (Tm > 1 && G == 1) { #Change dimensions
G <- Tm
Tm <- 1
}
if (!is.null(Thetas)) durbin <- TRUE else durbin <- FALSE
if (!is.null(p) & length(p) == 1) p <- matrix(p, nrow = G, ncol = 1)
if (is.null(lambda)) lambda <- rep(0, G)
if (is.null(rho)) rho <- rep(0, G)
if (length(lambda) == 1) lambda <- as.numeric(matrix(lambda,
nrow = G,ncol = 1))
if (length(rho) == 1) rho <- as.numeric(matrix(rho,
nrow = G,ncol = 1))
if (is.null(X)) {
if (is.null(p)) stop("Arguments X and p can not be NULL simultaneously")
if (pdfX == "nvrunif") {
X0 <- matrix(runif(N * (p[1] - 1)), N, (p[1] - 1))
colnames(X0) <- paste0("X_1",1:dim(X0)[2])
X <- cbind(matrix(1,N,1),X0)
Xf <- X0
for (i in 1:(G-1)) {
X0 <- matrix(runif(N * (p[i + 1] - 1)), N,
(p[i + 1] - 1))
colnames(X0) <- paste0("X_",(i+1),1:dim(X0)[2])
X <- Matrix::bdiag(X,cbind(matrix(1, N, 1),
X0))
XF <- cbind(XF,X0)
}
if (Tm > 1) {
for (i in 1:(Tm-1)) {
X2 <- cbind(matrix(1,N,1),
matrix(runif(N * (p[1] - 1)),
N, (p[1] - 1)))
for (i in 1:(G - 1)) {
X2 <- Matrix::bdiag(X2,cbind(matrix(1, N, 1),
matrix(runif(N * (p[i + 1] - 1)),
N,(p[i + 1] - 1))))
}
X <- rbind(X, X2)
}
}
} else if (pdfX == "nvrnorm"){
X0 <- matrix(rnorm(N * (p[1] - 1),0, 1), N, (p[1] - 1))
colnames(X0) <- paste0("X_1",1:dim(X0)[2])
X <- cbind(matrix(1, N, 1),X0)
XF <- X0
for (i in 1:(G - 1)) {
X0 <- matrix(rnorm(N * (p[i + 1] - 1), 0, 1),
N, (p[i + 1] - 1))
colnames(X0) <- paste0("X_",(i+1),1:dim(X0)[2])
X <- Matrix::bdiag(X, cbind(matrix(1, N, 1),X0))
XF <- cbind(XF, X0)
}
if (Tm > 1){
for (i in 1:(Tm - 1)) {
X2 <- cbind(matrix(1, N, 1),
matrix(rnorm(N * (p[1] - 1), 0, 1), N,
(p[1] - 1)))
for (i in 1:(G - 1)){
X2 <- Matrix::bdiag(X2, cbind(matrix(1, N, 1),
matrix(rnorm(N * (p[i + 1] - 1),
0, 1),
N, (p[i + 1] - 1))))
}
X <- rbind(X, X2)
}
}
} else stop("pdfX only can be nvrnorm or nvrunif")
# Nombro las columnas de X
nam <- c(paste0("Intercep_", 1),
paste(paste0("X", 1, "_"), 1:(p[1] - 1), sep = ""))
if (length(p > 1)) {
for (i in 2:(length(p))) {
nam <- c(nam,c(paste0("Intercep_", i),
paste(paste0("X", i, "_"), 1:(p[i] - 1),
sep = "")))
}
}
dimnames(X)[[2]] <- nam
}
if (is.null(p)) {
if ((ncol(X) %% G) != 0) stop("Argument p need to be set")
p <- rep(ncol(X) / G, G)
}
IT <- Matrix::Diagonal(Tm)
IR <- Matrix::Diagonal(N)
IG <- Matrix::Diagonal(G)
IGR <- Matrix::Diagonal(G * N)
# CAMBIA MATRIZ X Y COEFICIENTES EN EL CASO DURBIN
## MODIFICAR CÓDIGO....
if (durbin) {
WX <- (IT %x% IG %x% W) %*% X
dimnames(WX)[[2]] <- paste0("lag.", colnames(X))
Xdurbin <- NULL
pdurbin <- p - 1 # Sin intercepto
for (i in 1:length(p))
{
if (i == 1) {
Xdurbin <- cbind(X[, 1:p[i]], WX[, 2:p[i]])
Coeff <- c(Betas[1:p[1]], Thetas[1:pdurbin[1]])
} else {
Xdurbin <- cbind(Xdurbin,
X[, (cumsum(p)[i - 1] + 1):cumsum(p)[i]],
WX[, (cumsum(p)[i - 1] + 2):cumsum(p)[i]])
# Sin intercepto
Coeff <- c(Coeff,
Betas[(cumsum(p)[i - 1] + 1):cumsum(p)[i]],
Thetas[(cumsum(pdurbin)[i - 1] + 1):cumsum(pdurbin)[i]])
}
}
#p <- p + (p-1) # Para el caso sdm cambia el p (ojo Intercepto)
}
S <- Sigma
OME <- Matrix::kronecker((Matrix::kronecker(IT, S)), IR)
# Factor Cholesky covarianzas
chol_OME <- Matrix::Cholesky(OME)
#factors_chol_OME <- Matrix::expand(chol_OME)
#Lchol_OME <- Matrix::t(factors_chol_OME$P) %*% factors_chol_OME$L
#Uchol_OME <- Matrix::t(factors_chol_OME$L) %*% factors_chol_OME$P
M <- Matrix::Matrix(0, ncol=1, nrow = Tm * G * N)
U <- matrix(sparseMVN::rmvn.sparse(n = 1, mu = M,
CH = chol_OME, prec = FALSE),
ncol = 1)
U <- Matrix::Matrix(U)
if (pdfU == "lognvrnorm") U <- exp(U)
if (pdfU != "lognvrnorm" && pdfU != "nvrnorm")
print(" Improper pdf. The errors will be drawn from a multivariate Normal ")
# Si Tm*G*N es muy grande (>30000 ó 40000) hay problemas
IBU <- Matrix::solve(Matrix::kronecker(IT,
(IGR - Matrix::kronecker(
Matrix::Diagonal(length(lambda), lambda),
W))), U)
if (durbin) {
Y <- Matrix::solve(Matrix::kronecker(IT,
(IGR - Matrix::kronecker(
Matrix::Diagonal(length(rho), rho),
W))),
(Xdurbin %*% Coeff + IBU))
} else {
Y <- Matrix::solve(Matrix::kronecker(IT,
(IGR - Matrix::kronecker(
Matrix::Diagonal(length(rho), rho),
W))),
(X %*% Betas + IBU))
}
## Output
if (Tm == 1){
index_indiv <- rep(1:N, Tm)
YY <- matrix(Y[1:(N*G)],ncol = G)
# Output type panel. Only in case of equal number of variables in each equation
if (sum(p==p[1])==length(p)){
if (xxx != 1){eq <- c("year_","year")} else {eq <- c("eq_","equation")}
YYY <- as.data.frame(cbind(paste0("Indv_",rep(1:N,each = G)),rep(paste0(eq[1],1:G),N)))
YYY$Y <- c(rbind(t(YY)))
h <- c(rbind(t(XF[,substr(colnames(XF),4,4)==1])))
for (i in 2:p[1]){
h <- rbind(h,c(rbind(t(XF[,substr(colnames(XF),4,4)==i]))))
}
h <- t(h)
colnames(h) <- paste0("X_",1:dim(h)[2])
names(YYY) <- c("index_indiv",eq[2],"Y")
YYY <- cbind(YYY,h)
YYY$index_indiv <- as.factor(YYY$index_indiv)
YYY[,2] <- as.factor(YYY[,2])
} else {
YYY = NULL
if (type == "panel" |type == "all")
warning("Unbalanced panel data. Panel output = NULL")
}
# Output type df
YY <- cbind(index_indiv,YY)
colnames(YY) <- c("index_indiv",paste0("Y_",1:G))
YY <- as.data.frame(cbind(YY,XF))
if (type == "df"){
results0 <- list(df = YY)
}
if (type == "panel"){
results0 <- list(panel = YYY)
}
if (type == "matrix"){
results0 <- list(X = as.matrix(X), Y = as.matrix(Y))
}
if (type == "all"){
results0 <- list(X = as.matrix(X), Y = as.matrix(Y), df = YY, panel = YYY)
}
} else {
results0 <- list(Y = as.matrix(Y),X = as.matrix(X))
}
results <- results0
}
|
# Simulate data
library(amt)
library(lubridate)
set.seed(123)
trk <- tibble(x = cumsum(rnorm(20)), y = cumsum(rnorm(20)),
ts = ymd_hm("2019-01-01 00:00") + hours(0:19))
t1 <- make_track(trk, x, y, ts)
s1 <- steps(t1)
r1 <- random_points(t1)
h1.1<- hr_mcp(t1)
h1.2 <- hr_kde(t1)
data(deer)
t2 <- deer[1:100, ]
s2 <- steps(t2)
h2.1 <- hr_mcp(t2)
h2.2 <- hr_kde(t2)
h2.3 <- hr_akde(t2)
# get crs
expect_true(is.na(get_crs(t1)))
expect_true(is.na(get_crs(s1)))
expect_true(is.na(get_crs(h1.1)))
expect_true(is.na(get_crs(h1.2)))
expect_true(is(get_crs(t2), "CRS"))
expect_true(is(get_crs(s2), "CRS"))
expect_true(is(get_crs(h2.1), "crs"))
expect_true(is(get_crs(h2.2), "crs"))
expect_true(is(get_crs(h2.3), "crs"))
expect_false(has_crs(t1))
expect_false(has_crs(s1))
expect_false(has_crs(h1.1))
expect_false(has_crs(h1.2))
expect_true(has_crs(t2))
expect_true(has_crs(s2))
expect_true(has_crs(h2.1))
expect_true(has_crs(h2.2))
expect_true(has_crs(h2.3))
| /amt/inst/tinytest/test_crs.R | no_license | akhikolla/InformationHouse | R | false | false | 973 | r | # Simulate data
library(amt)
library(lubridate)
set.seed(123)
trk <- tibble(x = cumsum(rnorm(20)), y = cumsum(rnorm(20)),
ts = ymd_hm("2019-01-01 00:00") + hours(0:19))
t1 <- make_track(trk, x, y, ts)
s1 <- steps(t1)
r1 <- random_points(t1)
h1.1<- hr_mcp(t1)
h1.2 <- hr_kde(t1)
data(deer)
t2 <- deer[1:100, ]
s2 <- steps(t2)
h2.1 <- hr_mcp(t2)
h2.2 <- hr_kde(t2)
h2.3 <- hr_akde(t2)
# get crs
expect_true(is.na(get_crs(t1)))
expect_true(is.na(get_crs(s1)))
expect_true(is.na(get_crs(h1.1)))
expect_true(is.na(get_crs(h1.2)))
expect_true(is(get_crs(t2), "CRS"))
expect_true(is(get_crs(s2), "CRS"))
expect_true(is(get_crs(h2.1), "crs"))
expect_true(is(get_crs(h2.2), "crs"))
expect_true(is(get_crs(h2.3), "crs"))
expect_false(has_crs(t1))
expect_false(has_crs(s1))
expect_false(has_crs(h1.1))
expect_false(has_crs(h1.2))
expect_true(has_crs(t2))
expect_true(has_crs(s2))
expect_true(has_crs(h2.1))
expect_true(has_crs(h2.2))
expect_true(has_crs(h2.3))
|
#' @title Plot a tab object
#' @description Plot a frequency or cumulative frequency table
#' @param x An object of class \code{tab}
#' @param fill Fill color for bars
#' @param size numeric. Size of bar text labels.
#' @param ... Parameters passed to a function
#' @importFrom stats reorder
#' @return a ggplot2 graph
#' @examples
#' tbl1 <- tab(cars74, carb)
#' plot(tbl1)
#'
#' tbl2 <- tab(cars74, carb, sort = TRUE)
#' plot(tbl2)
#'
#' tbl3 <- tab(cars74, carb, cum=TRUE)
#' plot(tbl3)
#' @rdname plot.tab
#' @import ggplot2
#' @export
plot.tab <- function(x, fill="deepskyblue2", size=3.5, ...) {
if(!inherits(x, "tab")) stop("Must be class 'tab'")
x$ord <- 1:nrow(x)
vname <- attr(x, "vname")
if (length(x)==4){
p <- ggplot(x, aes(x=reorder(.data[["level"]], .data[["ord"]]),
y=.data[["percent"]])) +
geom_bar(stat="identity", fill=fill) +
labs(x=vname, y="percent") + coord_flip() +
geom_text(aes(label = paste0(round(.data[["percent"]]), "%")),
hjust=1, size=size, color="grey30")
}
if (length(x) == 6){
p <- ggplot(x,
aes(x=reorder(.data[["level"]], .data[["ord"]]),
y=.data[["cum_percent"]])) +
geom_bar(fill="grey", alpha=.6, stat="identity") +
geom_bar(aes(x=reorder(.data[["level"]], .data[["ord"]]),
y=.data[["percent"]]),
fill=fill, stat="identity") +
labs(x=vname, y="cumulative percent") + coord_flip() +
geom_text(aes(label = paste0(round(.data[["cum_percent"]]), "%")),
hjust=1, size=size, color="grey30")
}
return(p)
}
| /R/plot.tab.R | permissive | Rkabacoff/qacr | R | false | false | 1,632 | r | #' @title Plot a tab object
#' @description Plot a frequency or cumulative frequency table
#' @param x An object of class \code{tab}
#' @param fill Fill color for bars
#' @param size numeric. Size of bar text labels.
#' @param ... Parameters passed to a function
#' @importFrom stats reorder
#' @return a ggplot2 graph
#' @examples
#' tbl1 <- tab(cars74, carb)
#' plot(tbl1)
#'
#' tbl2 <- tab(cars74, carb, sort = TRUE)
#' plot(tbl2)
#'
#' tbl3 <- tab(cars74, carb, cum=TRUE)
#' plot(tbl3)
#' @rdname plot.tab
#' @import ggplot2
#' @export
plot.tab <- function(x, fill="deepskyblue2", size=3.5, ...) {
if(!inherits(x, "tab")) stop("Must be class 'tab'")
x$ord <- 1:nrow(x)
vname <- attr(x, "vname")
if (length(x)==4){
p <- ggplot(x, aes(x=reorder(.data[["level"]], .data[["ord"]]),
y=.data[["percent"]])) +
geom_bar(stat="identity", fill=fill) +
labs(x=vname, y="percent") + coord_flip() +
geom_text(aes(label = paste0(round(.data[["percent"]]), "%")),
hjust=1, size=size, color="grey30")
}
if (length(x) == 6){
p <- ggplot(x,
aes(x=reorder(.data[["level"]], .data[["ord"]]),
y=.data[["cum_percent"]])) +
geom_bar(fill="grey", alpha=.6, stat="identity") +
geom_bar(aes(x=reorder(.data[["level"]], .data[["ord"]]),
y=.data[["percent"]]),
fill=fill, stat="identity") +
labs(x=vname, y="cumulative percent") + coord_flip() +
geom_text(aes(label = paste0(round(.data[["cum_percent"]]), "%")),
hjust=1, size=size, color="grey30")
}
return(p)
}
|
#This R code was created by Ed Ryan on 27th August 2020
#It builds three multinomial logistic regression models which will be used as part of a larger
#model that will simulate a simple cricket game. For simplicity we use the same set of values
#for the input covariates (Batting average, Powerplay and SpinBowler) to determine the possible
#outcome for each of the 120 (20 overs) balls that we simulate.
#Remove all current objects stored in R environment:
rm(list = ls())
#install and load any R packages that are needed:
#install.packages("nnet") #uncomment this for the first time you run R code.
library(nnet)
#Set the current working directory (i.e. where the R code and dataset is stored):
setwd("C:/Work/Rcourse/Session4")
# Read in data and look at the first few rows of it
cdata.IN <- read.csv("Cricket_data_v2_Durham_home_matches_training.csv")
head(cdata.IN)
####################################################################
#BUILD SUBMODEL 1: PREDICTING 0 RUNS, 1-6 RUNs, OR A WICKET. #
####################################################################
#Create an index and find the row numbers of those that have NAs in the Batting Average column:
N=dim(cdata.IN)[1]
Ind=c(1:N)
Ind.noNA=Ind[is.na(cdata.IN$BattingAverage)==FALSE]
inputs_m1=cdata.IN[Ind.noNA,1:9]
#Process the outputs that we only get 3 possible outcomes:
outputs_m1=cdata.IN[Ind.noNA,10:16]
outcome1=as.vector(1*outputs_m1[,1]) #outcome1 = 0 runs
outcome2=as.vector(2*apply(outputs_m1[,2:6],1,sum)) #outcome2 = 1,2,3,4 or 6 runs
outcome3=as.vector(3*outputs_m1[,7]) #outcome3 = wicket
outcome_m1=outcome1+outcome2+outcome3
#For clarity we'll put the four columns of data into a new data frame which we'll call cdata:
cdata_m1=as.data.frame(cbind(inputs_m1$PowerPlay,inputs_m1$SpinBowler,inputs_m1$BattingAverage,outcome_m1))
names(cdata_m1)=c("PowerPlay","SpinBowler","BattingAverage","Outcome")
names(cdata_m1)
#Format categorical variables
PowerPlay=factor(cdata_m1$PowerPlay)
SpinBowler=factor(cdata_m1$SpinBowler)
Outcome=factor(cdata_m1$Outcome)
#Train the logistic regression model:
model1 <- multinom(Outcome ~ PowerPlay + SpinBowler + BattingAverage, data=cdata_m1)
#Check out the results to the model:
s1=summary(model1)
####################################################################################################
#BUILD SUBMODEL 2: WHERE THERE IS AT LEAST 1 RUN, PREDICT WHETHER IT'S 1-3 RUNS, 4 RUNS OR 6 RUNS. #
####################################################################################################
#Create an index and find the row numbers of those that record 1-6 runs
#(recall that 'outputs_m1' was calculated at the start of the R code for submodel 1)
N1=dim(outputs_m1)[1]
Ind1=c(1:N1)
names(outputs_m1)
Ind1.onlyruns=Ind1[(outputs_m1$runs_0==0) & (outputs_m1$Wicket==0)]
inputs_m2=inputs_m1[Ind1.onlyruns,]
#Process the outputs that we only get 3 possible outcomes:
names(outputs_m1)
outputs_m2=outputs_m1[Ind1.onlyruns,2:6] #outputs_m1 consists of 7 columns but only need columns 2-6.
names(outputs_m2)
outcome1=as.vector(1*apply(outputs_m2[,1:3],1,sum)) #outcome1 = 1,2 or 3 runs. check with names(outputs_m2).
outcome2=as.vector(2*outputs_m2[,4]) #outcome2 = 4 runs (boundary and touches ground beforehand)
outcome3=as.vector(3*outputs_m2[,5]) #outcome3 = 6 runs (boundary without touching ground beforehand)
outcome_m2=outcome1+outcome2+outcome3
#Put the four columns of data into a new data frame which we'll call cdata_m2:
cdata_m2=as.data.frame(cbind(inputs_m2$PowerPlay,inputs_m2$SpinBowler,inputs_m2$BattingAverage,outcome_m2))
names(cdata_m2)=c("PowerPlay","SpinBowler","BattingAverage","Outcome")
names(cdata_m2)
#Format categorical variables
PowerPlay=factor(cdata_m2$PowerPlay)
SpinBowler=factor(cdata_m2$SpinBowler)
Outcome=factor(cdata_m2$Outcome)
#Train the logistic regression model:
model2 <- multinom(Outcome ~ PowerPlay + SpinBowler + BattingAverage, data=cdata_m2)
#Check out the results to the model:
s2=summary(model2)
############################################################################################
#BUILD SUBMODEL 3: WHERE THERE are 1-3 RUNS, PREDICT WHETHER IT'S 1 RUN, 2 RUNS OR 3 RUNS. #
############################################################################################
#Create an index and find the row numbers of those that record 1-3 runs
#(recall that 'outputs_m2' was calculated at the start of the R code for submodel 2)
N2=dim(outputs_m2)[1]
Ind2=c(1:N2)
names(outputs_m1)
Ind2.onlyruns=Ind1[(outputs_m2$runs_4==0) & (outputs_m2$runs_6==0)]
inputs_m3=inputs_m2[Ind2.onlyruns,]
#Process the outputs that we only get 3 possible outcomes:
names(outputs_m2)
outputs_m2=outputs_m1[Ind2.onlyruns,2:6] #outputs_m1 consists of 5 columns but only need columns 1-3
names(outputs_m2)
outcome1=as.vector(1*outputs_m2[,1]) #outcome1 = 1 runs
outcome2=as.vector(2*outputs_m2[,2]) #outcome2 = 2 runs
outcome3=as.vector(3*outputs_m2[,3]) #outcome3 = 3 runs
outcome_m3=outcome1+outcome2+outcome3
#Put the four columns of data into a new data frame which we'll call cdata_m3:
cdata_m3=as.data.frame(cbind(inputs_m3$PowerPlay,inputs_m3$SpinBowler,inputs_m3$BattingAverage,outcome_m3))
names(cdata_m3)=c("PowerPlay","SpinBowler","BattingAverage","Outcome")
names(cdata_m3)
#Format categorical variables
PowerPlay=factor(cdata_m3$PowerPlay)
SpinBowler=factor(cdata_m3$SpinBowler)
Outcome=factor(cdata_m3$Outcome)
#Train the logistic regression model:
model3 <- multinom(Outcome ~ PowerPlay + SpinBowler + BattingAverage, data=cdata_m3)
#Check out the results to the model:
s3=summary(model3)
###########################################################################
#SAVE ALL THE MODEL PARAMETERS (STORED IN s1, s2 AND s3) AS AN RData file #
###########################################################################
save(s1, s2, s3, file = "CricketModel_parameters.RData")
| /Rcourse_Session4_ParameterEstimation.R | no_license | edmundryan/Rcourse_session4 | R | false | false | 6,233 | r | #This R code was created by Ed Ryan on 27th August 2020
#It builds three multinomial logistic regression models which will be used as part of a larger
#model that will simulate a simple cricket game. For simplicity we use the same set of values
#for the input covariates (Batting average, Powerplay and SpinBowler) to determine the possible
#outcome for each of the 120 (20 overs) balls that we simulate.
#Remove all current objects stored in R environment:
rm(list = ls())
#install and load any R packages that are needed:
#install.packages("nnet") #uncomment this for the first time you run R code.
library(nnet)
#Set the current working directory (i.e. where the R code and dataset is stored):
setwd("C:/Work/Rcourse/Session4")
# Read in data and look at the first few rows of it
cdata.IN <- read.csv("Cricket_data_v2_Durham_home_matches_training.csv")
head(cdata.IN)
####################################################################
#BUILD SUBMODEL 1: PREDICTING 0 RUNS, 1-6 RUNs, OR A WICKET. #
####################################################################
#Create an index and find the row numbers of those that have NAs in the Batting Average column:
N=dim(cdata.IN)[1]
Ind=c(1:N)
Ind.noNA=Ind[is.na(cdata.IN$BattingAverage)==FALSE]
inputs_m1=cdata.IN[Ind.noNA,1:9]
#Process the outputs that we only get 3 possible outcomes:
outputs_m1=cdata.IN[Ind.noNA,10:16]
outcome1=as.vector(1*outputs_m1[,1]) #outcome1 = 0 runs
outcome2=as.vector(2*apply(outputs_m1[,2:6],1,sum)) #outcome2 = 1,2,3,4 or 6 runs
outcome3=as.vector(3*outputs_m1[,7]) #outcome3 = wicket
outcome_m1=outcome1+outcome2+outcome3
#For clarity we'll put the four columns of data into a new data frame which we'll call cdata:
cdata_m1=as.data.frame(cbind(inputs_m1$PowerPlay,inputs_m1$SpinBowler,inputs_m1$BattingAverage,outcome_m1))
names(cdata_m1)=c("PowerPlay","SpinBowler","BattingAverage","Outcome")
names(cdata_m1)
#Format categorical variables
PowerPlay=factor(cdata_m1$PowerPlay)
SpinBowler=factor(cdata_m1$SpinBowler)
Outcome=factor(cdata_m1$Outcome)
#Train the logistic regression model:
model1 <- multinom(Outcome ~ PowerPlay + SpinBowler + BattingAverage, data=cdata_m1)
#Check out the results to the model:
s1=summary(model1)
####################################################################################################
#BUILD SUBMODEL 2: WHERE THERE IS AT LEAST 1 RUN, PREDICT WHETHER IT'S 1-3 RUNS, 4 RUNS OR 6 RUNS. #
####################################################################################################
#Create an index and find the row numbers of those that record 1-6 runs
#(recall that 'outputs_m1' was calculated at the start of the R code for submodel 1)
N1=dim(outputs_m1)[1]
Ind1=c(1:N1)
names(outputs_m1)
Ind1.onlyruns=Ind1[(outputs_m1$runs_0==0) & (outputs_m1$Wicket==0)]
inputs_m2=inputs_m1[Ind1.onlyruns,]
#Process the outputs that we only get 3 possible outcomes:
names(outputs_m1)
outputs_m2=outputs_m1[Ind1.onlyruns,2:6] #outputs_m1 consists of 7 columns but only need columns 2-6.
names(outputs_m2)
outcome1=as.vector(1*apply(outputs_m2[,1:3],1,sum)) #outcome1 = 1,2 or 3 runs. check with names(outputs_m2).
outcome2=as.vector(2*outputs_m2[,4]) #outcome2 = 4 runs (boundary and touches ground beforehand)
outcome3=as.vector(3*outputs_m2[,5]) #outcome3 = 6 runs (boundary without touching ground beforehand)
outcome_m2=outcome1+outcome2+outcome3
#Put the four columns of data into a new data frame which we'll call cdata_m2:
cdata_m2=as.data.frame(cbind(inputs_m2$PowerPlay,inputs_m2$SpinBowler,inputs_m2$BattingAverage,outcome_m2))
names(cdata_m2)=c("PowerPlay","SpinBowler","BattingAverage","Outcome")
names(cdata_m2)
#Format categorical variables
PowerPlay=factor(cdata_m2$PowerPlay)
SpinBowler=factor(cdata_m2$SpinBowler)
Outcome=factor(cdata_m2$Outcome)
#Train the logistic regression model:
model2 <- multinom(Outcome ~ PowerPlay + SpinBowler + BattingAverage, data=cdata_m2)
#Check out the results to the model:
s2=summary(model2)
############################################################################################
#BUILD SUBMODEL 3: WHERE THERE are 1-3 RUNS, PREDICT WHETHER IT'S 1 RUN, 2 RUNS OR 3 RUNS. #
############################################################################################
#Create an index and find the row numbers of those that record 1-3 runs
#(recall that 'outputs_m2' was calculated at the start of the R code for submodel 2)
N2=dim(outputs_m2)[1]
Ind2=c(1:N2)
names(outputs_m1)
Ind2.onlyruns=Ind1[(outputs_m2$runs_4==0) & (outputs_m2$runs_6==0)]
inputs_m3=inputs_m2[Ind2.onlyruns,]
#Process the outputs that we only get 3 possible outcomes:
names(outputs_m2)
outputs_m2=outputs_m1[Ind2.onlyruns,2:6] #outputs_m1 consists of 5 columns but only need columns 1-3
names(outputs_m2)
outcome1=as.vector(1*outputs_m2[,1]) #outcome1 = 1 runs
outcome2=as.vector(2*outputs_m2[,2]) #outcome2 = 2 runs
outcome3=as.vector(3*outputs_m2[,3]) #outcome3 = 3 runs
outcome_m3=outcome1+outcome2+outcome3
#Put the four columns of data into a new data frame which we'll call cdata_m3:
cdata_m3=as.data.frame(cbind(inputs_m3$PowerPlay,inputs_m3$SpinBowler,inputs_m3$BattingAverage,outcome_m3))
names(cdata_m3)=c("PowerPlay","SpinBowler","BattingAverage","Outcome")
names(cdata_m3)
#Format categorical variables
PowerPlay=factor(cdata_m3$PowerPlay)
SpinBowler=factor(cdata_m3$SpinBowler)
Outcome=factor(cdata_m3$Outcome)
#Train the logistic regression model:
model3 <- multinom(Outcome ~ PowerPlay + SpinBowler + BattingAverage, data=cdata_m3)
#Check out the results to the model:
s3=summary(model3)
###########################################################################
#SAVE ALL THE MODEL PARAMETERS (STORED IN s1, s2 AND s3) AS AN RData file #
###########################################################################
save(s1, s2, s3, file = "CricketModel_parameters.RData")
|
## per treballar amb factors
xxx$sex <- as.factor(with(xxx, ifelse(sexe_b==1, "Male", "Female")))
xxx$sex <- relevel(xxx$sex, ref= "Male")
intervals(lm(lnimta_CCA ~ C(as.factor(sexe_b),base= 1), data = xxx))
intervals(lm(lnimta_CCA ~ sex, data = xxx))
## per llegir excel
dat <- readWorksheetFromFile( "./dat/Basedades2filtradasensedades4.xlsx", sheet = "Full 1", header = T,dateTimeFormat = "%d-%m-%Y", startRow=1, endRow = 386)
redcap
ronald multistage
## per installar RODBC
http://superuser.com/questions/283272/problem-with-rodbc-installation-in-ubuntu
rm(list=ls())
#RutinesLocals<-"C:/Users/jvila/Dropbox//rutines"
RutinesLocals<-"/home/jvila/Dropbox/rutines"
RutinesLocals <- "/Users/jvila/Dropbox/rutines"
install.packages("readr")
date_names_langs()
parse_date("1 enero 2015", "%d %B %Y", locale = locale("es"))
install.packages("haven")
source(file.path(RutinesLocals,"table2.r"))
source(file.path(RutinesLocals,"subset2.r"))
source(file.path(RutinesLocals,"carrega.llibreria.r"))
source(file.path(RutinesLocals,"calculadora.risc.r"))
source(file.path(RutinesLocals,"merge2.r"))
source(file.path(RutinesLocals,"intervals.r"))
source(file.path(RutinesLocals,"prepare.r"))
source(file.path(RutinesLocals,"export.SPSS.r"))
source(file.path(RutinesLocals,"arregla.formats.r"))
source(file.path(RutinesLocals,"import.ACCESS2.r"))
source(file.path(RutinesLocals,"merge2.r"))
source(file.path(RutinesLocals,"add.cases.r"))
source(file.path(RutinesLocals,"format2.r"))
source(file.path(RutinesLocals,"order2.r"))
source(file.path(RutinesLocals,"print2.r"))
source(file.path(RutinesLocals,"read.spss4.r"))
source(file.path(RutinesLocals,"spss_varlist.r"))
####packages
install.packages("shiny")
install.packages("compareGroups")
install.packages("gam")
install.packages("png")
install.packages("epitools")
install.packages("pROC")
install.packages("psych")
install.packages("plotrix")
install.packages("knitr")
install.packages("chron")
## pgirmess
## primer he hagut d'instalar "gdal"
## gdal-config em mostra que no existeix
## sudo apt-get install libgdal-dev
## sudo apt-get install libgdal1-dev libproj-dev
## sudo apt-get update
install.packages("rgdal")
install.packages("pgirmess")
install.packages("stringr")
install.packages("MASS")
install.packages("nnet")
install.packages("car")
install.packages("RODBC")
install.packages("survival")
install.packages("lattice")
install.packages("cluster")
install.packages("Hmisc")
install.packages("xtable")
install.packages("gdata")
install.packages("oce")
install.packages("tcltk2")
##install.packages("odfWeave")
install.packages("Rcmdr")
install.packages("extrafont")
###############################################################################
############## rJava 20/10/2015 ########################################
###############################################################################
## veure: http://tecadmin.net/install-oracle-java-8-jdk-8-ubuntu-via-ppa/
## per veure on es el JAVA
whereis java
## s'ha d'executar:
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java8-installer
## comprovar la versio instalada
java -version
## si el resultat no ?s 1.8
sudo update-alternatives --config java # i seleccionar 1.8
## un cop haguem comprovat que es la 1.8
sudo apt-get install oracle-java8-set-default
## par tal de que el R l'incorpori
R CMD javareconf
###############################################################################
###############################################################################
###############################################################################
install.packages("rJava")
install.packages("xlsx")
#deb
#http://cran.rstudio.com/bin/linux/ubuntu
#lucid/
R.Version()
rm(list=ls(mgcv))
http://cran.rstudio.com/
library(frailtypack) # la llibreria de Juan Ramon de Supervivencia
## png
install.packages("png")
library(png)
## lme4
install.packages("epitools")
## pROC
install.packages("pROC")
## psych
install.packages("psych")
library(psych)
## plotrix
install.packages("plotrix")
library(plotrix)
install.packages("knitr")
library(knitr)
##chron
install.packages("chron")
## pgirmess
## primer he hagut d'instalar "gdal"
## gdal-config em mostra que no existeix
## sudo apt-get install libgdal-dev
## sudo apt-get install libgdal1-dev libproj-dev
## sudo apt-get update
install.packages("rgdal")
install.packages("pgirmess")
install.packages("rgdal")
install.packages("stringr")
install.packages('stringr', repos='http://cran.us.r-project.org')
## per instal.lar "car" a linux
install.packages("MASS")
install.packages("nnet")
install.packages("car")
library(car)
## per instal.lar "RODBC"
## sudo aptitude install unixodbc-dev
install.packages("RODBC")
library(RODBC)
install.packages("survival")
library(survival)
install.packages("gam")
library(gam)
## per instal.lar "Hmisc"
install.packages("lattice")
install.packages("cluster")
install.packages("Hmisc")
library(Hmisc)
install.packages("xtable", dependencies=TRUE)
library(xtable)
install.packages("gdata", dependencies=TRUE)
library(gdata)
install.packages("oce", dependencies=TRUE)
library(oce)
install.packages("tcltk2", dependencies=TRUE)
library(tcltk2)
install.packages("odfWeave", dependencies=TRUE)
library(odfWeave)
install.packages("compareGroups")
library(compareGroups)
install.packages("Rcmdr", dependencies=TRUE)
library(Rcmdr)
install.packages("extrafont")
library(extrafont)
font_import()
fonts()
## rjava / xlsx / XLConnect
## des del promt:
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java7-installer
sudo apt-get update
sudo R CMD javareconf
## des de R
install.packages("rJava", dependencies=TRUE)
install.packages("XLConnect", dependencies=TRUE)
install.packages("XLConnectJars", dependencies=TRUE)
################################################################################
#################### r Java ##############################################
################################################################################
## veure si es la versio de 32 o 64 bits amb
## sessionInfo()
## baixar-se la versi? de 64-bits de:
## http://java.com/en/download/manual.jsp
## ho he instal.lat a C:/Programs/Java64/
## he posat aquesta adre?a al path d'inici de windows
library(rJava)
################################################################################
################################################################################
################################################################################
Sys.setenv(JAVA_HOME='C:/Programs/Java64') # for 64-bit version
Sys.setenv(JAVA_HOME='/usr/lib/jvm/java-7-oracle/jre')
## .libPaths()
## .libPaths(c("/home/ars/R/x86_64-pc-linux-gnu-library/2.15","/usr/local/lib/R/site-library","/usr/lib/R/site-library","/usr/lib/R/library"))
## veure:
## http://www.r-statistics.com/2012/08/how-to-load-the-rjava-package-after-the-error-java_home-cannot-be-determined-from-the-registry/
## Sys.setenv(JAVA_HOME='C:\\Program Files (x86)\\Java\\jre7') # for 32-bit version
Sys.setenv(JAVA_HOME=': /usr/lib/jvm/java-7-openjdk-i386/jre')
Sys.setenv(JAVA_HOME='C:/Programs/Java/bin')
Sys.getenv("JAVA_HOME")
Sys.setenv(JAVA_HOME='C:/ProgramData/Oracle/Java')
if(Sys.getenv("JAVA_HOME")!="") Sys.setenv(JAVA_HOME="")
install.packages("rJava")
library(rJava)
install.packages("xlsx", dependencies=TRUE)
library(xlsx)
## exemple d'escriure un xlsx
dades<-as.data.frame(cbind(c(1,1,2,3,4,5), c(11,11,12,13,14,15)))
write.xlsx(dades, file= "xxx.xlsx", sheetName="Sheet1") ## exporta a XLSX
## llegir fitxer de EXCEL
xfile<-"U:/ULEC/Exemples_estadistica/register/dat/tasques.xls"
channel<- odbcConnectExcel(xfile)
sqlTables(channel)
dat<-sqlFetch(channel, sqtable="Hoja1$")
close(channel)
## guardar fitxer de EXCEL
xxx<-as.data.frame(cbind(c(1,2,3,4,5,6), c(11,12,13,14,15,16)))
setwd("/home/jvila/xxx")
channel<- odbcConnectExcel("xxx.xls", readOnly=FALSE)
sqlSave(channel, catok, tablename="Participants",append = FALSE,safer= FALSE,rownames=FALSE,colnames=FALSE)
close(channel)
## library(sos)
findFn("open office")
save(clin, file ="xxx.RData")
## per triar el fitxer on es vol savel
save(exemple, file= file.choose())
# quan dos numeros no son iguals pero afecta a un decimal a prendre pel cul
rp17<-ifelse(isTRUE(all.equal(sup17a, (sup17ok*100))), 1,
ifelse(isTRUE(all.equal(sup17b, (sup17ok*100))), 2,
ifelse(isTRUE(all.equal(sup17c, (sup17ok*100))), 3, 999)))
rp17
format(sup17c,digits=16,nsmall=16)
# una altre opci? es definir la funci? "==" perque faci aix?:
"==" <- function(x,y) isTRUE(all.equal(x, y))
# per desactivar un grafic
graphics.off()
# per borrar una lliberia
search() # miro a quina posicio es, p.e. la 2
detach(2)
a<--2.9841282
b<-sqrt(0.4656142)
c<-qnorm(0.1,a,b)
d<-round(1/(1+exp(-c)),4)
#posar ordre
## per mes d'una variable:
problems <- problems[order(problems$centreid, problems$paccentreid, -problems$type), ]
xxx <- subdat[order(subdat[,"centro"],subdat[,"paci"]),]
xxx$ordre <- seq(1, nrow(subdat))
subdat <- merge2(subdat, xxx[, c("idrepe", "ordre")], by.id=c("idrepe"),all.x=TRUE, sort= FALSE)
subdat <- order2(subdat, c("ordre"))
subdat <- remove.vars(subdat, "ordre")
head(ictus[order(ictus[,"id"],order(ictus$ancestor, decreasing = TRUE)),])
tots2<-tots2[order(tots2[,"parella"],-tots2[,"cascon"]),]
xxx<-xxx[order(xxx[,"id"]),]
x<-c( 1, 2, 3, 4, 5, 6, 7, 1, 3, 4, 5,8)
y<-c(22,23,23,24,25,26,27,28,24,22,20,21)
z<-cbind(x,y)
t(z[order(z[,1],-z[,2]),])
pred<-order2(pred, c("id"))
xxx[order(c(xxx$font,xxx$lp)),]
vari<-scan(what="character", sep="\n")
idpaci
estudi
idepisodi
nom
ape1
ape2
f_ing
iamseg
diamseg
iam2a
toiam
xxx<-tot[!is.na(tot$iam2a) & tot$iam2a==1, vari]
xxx[order(xxx[,"estudi"],-xxx[,"idepisodi"]),]
packageDescription("gnm")
example(rpanel)
library()
search()
ls(4)
help(solve)
?solve
help("[[")
help.start()
example("hclust")
source("c:\\jvila\\r\\comandos.R")
sink("resultado.txt")
sink()
# punto de corte
qt(.975, df = 24)
# calcular "p" para un valors de "t", bilateral
(1-pt(2.063899,df=24))*2
## generar una random variable and testing normality
library(MASS)
x<-rt(300,df=5)
fitdistr(x,"t")
qqnorm(x); qqline(x)
qqplot(qt(ppoints(length(x)),df=5.55),x)
qqline(x)
# exemple de plots amb distribucio normal
x<-seq(-6,6,by=0.1)
plot(x,dnorm(x),type="l",xlim=c(-6,6),ylim=c(0,0.9))
lines(x,dnorm(x,mean=0,sd=2),col="red")
x<-seq(0,40,by=0.01)
curve(dgamma(x,shape=2,scale=3),from=0,to=40)
abline(v=2*3,lty=2)
x<-0:20
plot(x,dpois(x,lambda=4),type="h")
plot(x,ppois(x,lambda=4),type="s")
# calular mitjanes per linia
muestras<-matrix(rnorm(1000),nrow=100,byrow=T)
medias<-apply(muestras,1,mean)
# calcular el nombre de missings per linia
answer$na<-apply(t(apply(answer[,8:57],1,is.na)), 1, sum)
answer$na<-apply(is.na(answer[,8:57]),1,sum))
## crea una variable que indica si hi ha missing o no
lpa$keep<-apply(!is.na(lpa),1,all)
# calcula mitjana i t-Student
with(pred,by(edad,sexo,function(x) c(mean(x,na.rm=TRUE),sd(x,na.rm=TRUE))))
t.test(edad ~ sexo, data = subset2(pred, "sexo<999 & edad <999"),var.equal = TRUE)
# generar numeros de una binomial
x<-rbinom(20,size=1,prob= 0.2)
## seleccionar
x<-sample(c("A","B","C"),200,replace=T,prob=c(0.5,0.4,0.1))
wom<-wom[sample(1:nrow(wom),16),]
#exemple de buscar una variable
agrep("diarev",names(mcr),value = TRUE)
keep.var<-c("aparece","atencion","fllega","primeringr","fcoro1","ptcaprim","ptcaresca", "ptcaelec","ptcafarma","ptcasincla","frevas")
keep.var[!keep.var%in%names(mcr)]
# per fer moltes taules
xxx<-names(pred)
for(i in 12:length(pred))print(cbind(table2(pred[,xxx[i]],pred$sexo)))
cbind(table2(pred$hta,pred$sexo))
sink(file="xxx.doc")
for(i in 3:length(xxx))print(cbind(table2(pred[,xxx[i]])))
sink()
file.show("xxx.doc")
shell.exec("xxx.doc")
# per borrar variables
dades<-remove.vars(dades,"resucoro")
## localitzar registres i variables
which(names(pred)=="hta")
pred$numcase<-1:nrow(pred)
rownames(pred)
# per fer un excel dels resultats
write.table(datos, file = "c:/jvila/r/r.xls",append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE)
write.table(jsanchez, file = paste(treball,"jsanchez.xls", sep=""),append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE, na="")
shell.exec("c:/jvila/r/r.xls")
#exemple de recode, rename, attrib
x1$colmed<-car::recode(x1$colmed,"2=0;1=1;else=NA")
casos<-rename.vars(casos, from="hipolip6", to="colmed")
attr(ic.coef,"vari.label")<-c("Identificador", "x2", "ss")
attr(x3$colmed,"vari.label")<-"Hipolipemiantes (en casos a 6 meses)"
attr(x3$colmed,"value.labels")<-c("No"=0, "Si" =1)
#seleccionar pacients i variables
vari<-scan(what="character", sep="\n")
id
nodo
edad
xxx<-subset2(pred, "nodo ==1 & edad >70")[,vari]
fix2(clin[is.na(clin$edad), c("fechini","fechnac","xxx1","edad")])
# salvar atributs
atri.ahtam<-attributes(clin$ahtam)
attributes(clin$ahtam)<-atri.ahtam
# subset
clin<-subset2(clin,"clin$lugartto==1 | clin$lugartto==6")
vari<-c("idepisodi","estudi", "nombrepa", "hsintmon", "msintmon", "infosinmon","fechini", "sint")
subset2(dat, "infosinmon ==1 & hsintmon >24")[,vari]
#merge
clin<-merge2(clin,fili,by.id=c("estudi","idepisodi"),all.x=TRUE, sort= FALSE)
# dates
library(chron)
seg6m$xxx1<-paste(as.character(seg6m$adhospdtdy),"-", as.character(seg6m$adhospdtmo),"-", as.character(seg6m$adhospdtyr),sep="")
seg6m$recruitdat<-chron(seg6m$xxx1,format=c(dates="d-m-y"),out.format=c(dates="day-mon-year"))
tot$f_ing<-chron(tot$f_ing,format=c(dates="d-m-y"),out.format=c(dates="day-mon-year"))
min(tot[tot$origen==3,]$f_ing)
tot$f_ing<-chron(tot$f_ing,out.format=c(date="d-mon-Y"))
xx<-chron(paste("31","-","12","-","2002",sep=""),format=c(dates="d-m-y"),out.format=c(dates="day-mon-year"))
xxx<-c("06/01/2012 20:36:25" "06/01/2012 20:36:25" "12/01/2012 01:38:33" "10/01/2012 11:23:16" "08/01/2012 22:14:22" "08/01/2012 22:14:22")
dts<-substr(xxx, 1, 10)
tms<-substr(xxx, 12, 20)
x1<-chron(dates=dts,format=c("d/m/Y"),out.format=c("d-mon-y"))
x2<-chron(times=tms,format=c("h:m:s"),out.format=c("h:m:s"))
answer$moment<-chron(dates = x1, times = x2,format=c(dates="d/m/Y", times = "h:m:s"),out.format=c(dates="day-mon-year", times = "h:m:s"))
ini<-chron(c("4/6/2004","8/12/1995","1/1/2004"),format=c("d/m/Y"),out.format=c("d-mon-y"))
fi<-chron(c("1/11/2003","31/12/1997","31/12/2007"),format=c("d/m/Y"),out.format=c("d-mon-y"))
df<-data.frame(ini,fi)
df$res<-rep(NA,nrow(df))
for (i in 1:nrow(df)){
df$res[i]<-trunc(runif(1,df$ini[i],df$fi[i]))
}
df$res<-chron(df$res,out.format=c("d-mon-y"))
df
#funcio
f1=function (a,b) {
v=a*2
w=b*2
return (v,w)
}
x<-f1(3,5)
f2=function (a,b) {
a*b
}
xxx<-f2(2,9)
## escriure una taula
write.table(datos, file = "c:/jvila/r/r.xls",append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE)
shell.exec("c:/jvila/r/r.xls")
########################################################################
################## importo SPSS i exporto acces ########
########################################################################
vari<-tolower(scan(what="character"))
rescate
n_h
HOSP1
NUM_PACIENTE
caso
ape1
ape2
nom
edad
sex
RTRSIMO
admi
ahtai
acoli
fitxer<-"U:\\Estudis\\Epidemiologia\\REGICOR\\POBLACIONAL\\dades\\regi78_actual\\original\\bases de dades procedencia fusio\\78-95 procedeix de investigats.sav"
hola<-read.spss4(fitxer,keep.var=vari)
acces<-paste(treball, "problemes.mdb", sep="")
export.ACCESS(taula=gedaps, file.mdb=acces, table.name="gedaps", table.dict = "dicgedaps")
shell.exec(acces)
#### importar acces
import.ACCESS2(
file.mbd="U:\\Estudis\\Clinic\\BASICMAR\\dades\\DEA Jordi\\JJimenez.mdb",
nom.taula=c("basic","m3","gen"),
nom.variables=list(c("ALL"),
c("ALL"),
c("partic", "K406", "K1444", "K375", "K246","K201")),
nom.dicc="Dic",
file.spss="",
var.dicc=c("nombre","etiqueta_variable","etiqueta_valor","tabla2"),
noms.taules=c("basic","m3","gen"),
fix.formats=TRUE)
# per buscar repetits
(repes <- with(stud,table(dni)))[repes>1]
repes<-with(check1,table(id))
repes<-as.double(names(repes)[repes>1])
check1$exclu<-with(check1, ifelse(check1$id%in%repes, 74, exclu))
sum(with(cascon,table(idfortiam))>1)
t<-with(fortiam,table(colest))
sum(t>1)
t[t>1]
valors.repes<-as.double(names(t)[t>1])
fortiam$num_paci[fortiam$colest%in%valors.repes]
xxx<-subset(fortiam,colest%in%valors.repes)[,c("num_paci","colest")]
fix2(xxx[order(xxx$colest),])
# correlacions
vari<-scan(what="character")
nkg2a_cd3_
nkg2c_cd3_
x2a_2c_cd3_
nkg2c_en_cd3__cd56_
nkg2a_en_cd3__cd56_
nkg2c_en_cd56__cd3_
nkg2a_en_cd56__cd3_
nkg2c_en_cd3__cd56__1
nkg2a_en_cd3__cd56__1
x2a_2c_cd3__cd56_
x2a_2c_cd3__cd56__1
ilt2_cd3__cd56_
ilt2_cd3__cd56__1
ilt2_cd3__cd56__2
ilt2_cd3_
nkg2c_en_nk
nkg2a_en_nk
ilt2_en_nk
x2a_2c_en_nk
xxx<-dades[,vari]
res<-NULL
for (i in 2:ncol(xxx)){
for (j in 1:(i-1)){
x<-xxx[,i]
y<-xxx[,j]
ct<-cor.test(x,y,method = "spearm")
r<-ct$estimate
pvalor<-ct$p.value
n<-sum(!is.na(x) & !is.na(y))
label.x<-attr(x,"vari.label")
label.y<-attr(y,"vari.label")
label<-paste(label.x,label.y,sep=" vs. ")
res<-rbind(res,c(label, r,pvalor,n))
}
}
colnames(res)<-c("Variables2","rho","pvalor","n")
write.table(res,
file = "U:\\Estudis\\Externs\\NKG2C M Lopez Botet\\Dades\\cor.xls",append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE)
# per fer LR univariades
vari<-scan(what="character")
edad
C(as.factor(sexo),base=1)
C(as.factor(period),base=1)
write.table("Univariat", file = paste(treball,"LRuni.xls",sep=""),col.names=FALSE,row.names=FALSE)
write.table(rbind(c("Variable", "OR", "95%CI inf", "95%CI sup", "p-value")), sep="\t",file = paste(treball,"LRuni.xls",sep=""),append= TRUE, col.names=FALSE,row.names=FALSE)
for (i in 1:length(vari)){
formul<-paste("def"," ~ ", noquote(vari[i]), sep="")
mod<-glm(
formula=formul,
family="binomial",
data=dat,
na.action=na.exclude
)
write.table(intervals(mod)[2,,drop=FALSE], file = paste(treball,"LRuni.xls",sep=""),append=TRUE,sep="\t",col.names=FALSE,row.names=TRUE)
}
shell.exec(paste(treball,"LRuni.xls",sep=""))
## per fer moltes tab
for (i in 2:length(vari)){
eval(parse(text=paste("with(clin,table2(",noquote(vari[i]),"))",sep="")))
}
for (i in 2:length(vari)){
cat("\n_______",vari[i],"_________\n")
table2(clin[,vari[i]])
cat("\n\n\n")
}
for (i in 2:length(vari)){
clin[,vari[i]]<-car::recode(clin[,vari[i]],"NA=999")
}
# per imprimir molts resultats
sink(file = "c:\\jvila\\xxx.txt")
for (i in 1:length(vari)){
cat("\n_______",vari[i],"_________\n")
print(table(clin[,vari[i]],clin$a?oini))
cat("\n\n\n")
}
sink()
shell.exec("c:\\jvila\\xxx.doc")
# per comprovar linealitat
####################################
# tria explicativa, outcome i les dades
explicativa<-"imc"
outcome<-"itb_cutrec"
nom.dades<-"hermesok"
# aqui fa el model
temp<-eval(parse(text=paste("subset(",nom.dades,",!is.na(",outcome,") & !is.na(",explicativa,"))",sep="")))
formul<-paste(noquote(outcome), "~ s(", noquote(explicativa),")",sep="")
mod.lin<-gam(
formula=as.formula(noquote(formul)),
family="binomial",
data=temp,
#subset =sexe==1,
na.action=na.exclude
)
# grafic
res.mod<-preplot.gam(mod.lin,type="terms",terms=paste("s(",noquote(explicativa),")",sep=""),se.fit=TRUE)[[1]]
ci<-cbind(res.mod$y,res.mod$y-qnorm(1-0.05/2)*res.mod$se.y,res.mod$y+qnorm(1-0.05/2)*res.mod$se.y)
orden<-order(res.mod$x)
ci<-ci[orden,]
matplot(sort(res.mod$x),ci,type="l",lty=c(1,2,2),col="black",xlab=explicativa,ylab="logit smooth estimate")
title("gam logistica")
rug(jitter(res.mod$x))
#####################################
### sumar per columnes
x1<-colSums(with(fusio,table(smoker,font)))
x2<-with(fusio,apply(table(smoker,font),2,sum))
# taules bivariades
var.taula<-"VARIABLE\tKEEP\tDIGITS\tMETHOD\tELIM\tTIPUS\tLOGPTREND
hours\tNULL\t1\t2\tNULL\tNULL\tFALSE"
write(var.taula,file="C:\\xxx.doc")
file.show("C:\\xxx.doc")
taules.bivariades(file.input = NULL, var.taula = var.taula, nom.col = "group",
dades = oren, nom.arxiu = "C:\\jvila\\oren\\resu", dec.car = ",", plot.norm = TRUE,
lim.p.value = 0.05)
##genera noms del tipus xp01, xp02, etc.
grep("^xp[0-9]+$",names(notes),value=TRUE)
toupper(letters[1:8])
## per omplir de 0
xxx<-tr05lab$id
xxx<-c(99999, xxx)
xxx<-format(xxx)
xxx<-gsub(" ", "0", xxx)
xxx<-xxx[-1]
tr05lab$xxx<-xxx
## pastes varis
xxx<-rbind(paste(rep("p", 8), as.character(seq(1,8, 1)), sep=""))
lettercode<-cbind(paste(rep(toupper(letters[1:8]), 12), rep(as.character(seq(1,12, 1)),each= 8), sep=""))
numbercode<-cbind(seq(1,length(lettercode), 1))
convert<-cbind(lettercode, numbercode)
# genera cadenes del tipu an01, an02, etc.
cbind(paste(rep("an", 50), num.pract<-gsub(" ","0",format(1:50)), sep=""))
c(paste(rep("r", 20), gsub(" ","0",format(1:20)), sep=""))
result<-54
paste("La respuesta es",result,sep=": ")
x<-c(1,3,4)
paste(x,collapse="/")
paste(x,sep="/")
x<-c(1,2,3)
y<-c(4,5,6)
z<-c(7,8,9)
paste(x,y,z,sep="+")
paste(paste("Pregunta",1:5,sep=""),collapse="\t")
toupper(letters[1:8])
paste(paste("Pregunta",letters[1:5],sep=" "),collapse="\n")
paste(paste("Pregunta",LETTERS[1:5],sep=" "),collapse="\n")
write(rbind(paste(paste("Pregunta",1:npreg,sep=""),collapse="\t")),file="xxx")
file.show("xxx")
## legir un fitxer EXCEL
regiair<-read.xls( paste(treball,"alea.xls", sep =""),colNames = FALSE,sheet = 1)
# replicates
numok$xxx<-rep(1:19, each= 40)
rep(c("a","b","c"),c(10,20,5))
save(dat,file = file.path(treball,"dat.Rdata"))
# per llegir un excel
jsanchez<-xlsReadWrite::read.xls( paste(treball, "Muestras empleadas para pools.xls", sep=""),
colNames = TRUE,
sheet = 1,
type = "data.frame",
from = 1,
rowNames = NA, colClasses = NA, checkNames = TRUE,
dateTimeAs = "numeric",
stringsAsFactors = default.stringsAsFactors())
# per salvar com etiquetes els valors d'una variable de cadena
xxx<-levels(flow$situ2)
flow$situ2<-as.integer(as.factor(flow$situ))
attr(flow$situ2,"value.labels")<-structure(1:length(xxx), names=xxx)
### per buscar alguna sintaxis (p.e. casos.RData) feta mab R
xxx<-list.files("/home/jvila/gdrivelars/d449/MU/MUAnalysis/MuscEsque/empresa", pattern= ".R$", recursive=TRUE, full.names = TRUE)
for (i in 1:length(xxx)){
contingut<-scan(xxx[i],what="character",sep="\n")
if (length(grep("loc<-",contingut))) print(xxx[i])
}
### per veure les caracter?stiques de les variables
lapply(jm, class)
### per exportar a SPSS
export.SPSS (m4, file.save = NULL, var.keep = "ALL", run.spss = FALSE)
export.SPSS (par1a1, file.dict = NULL, file.save = "U:/Estudis/Clinic/FORTIAM - RESCATE II/FORTIAM/analisi/MG?mez/Article 2/par1a1.sav"
, var.keep = "ALL", file.runsyntax = "C:/Archivos de programa/SPSS Evaluation/runsyntx.exe")
## per que no sorti en format cient?fic
format((prec/100)^2,scientific = FALSE)
# Data per imputar
##############################################
#data aleatoria entre inici i final de l'estudi
n<-nrow(segok)
segok$temp<-with(segok,chron(iam_ind + round(runif(nrow(segok),0,d_ult2-iam_ind),0),out.format="d-mon-Y"))
## calcular la data maxima
surv11$timemax<-with(surv11, ifelse(event>=1, apply(surv11[,c("datiam", "dataltraci", "datavc", "datdef")], 1, min), apply(surv11[,c("datiam", "dataltraci", "datavc", "datdef")], 1, max)))
# 4 dimensional plot
m<-matrix(unlist(with(countries,by(event,eventq,function(x) c(min(x,na.rm=TRUE),max(x,na.rm=TRUE))))),
ncol=2,byrow=TRUE)
m<-format(round(m,3))
m<-apply(m,1,function(x) paste("[",x[1],";",x[2],"]",sep=""))
colors<-c("blue", "green", "yellow", "red")
plot(countries$gross,countries$cvdeath
,cex=sqrt(countries$n/100)
,col=colors[countries$eventq]
,xlab="Yearly gross national income per capita ($)"
,ylab="Age-standardized mortality rate for cardiovascular diseases",pch=19)
points(countries$gross,countries$cvdeath,cex=sqrt(countries$n/100))
legend("topright",legend=paste("Q",1:4,": ",m,sep=""),
fill=colors,title="in-hospital mortality")
par(xpd=NA)
identify(countries$gross,countries$cvdeath,countries$name,cex=0.8,col="black",font=2)
# nova finestra gr?fica
win.graph()
## funcions i classess
> print.isaac<-function(x) cat("hola qu? tal",x,"\n")
> x<-3
> class(x)<-"isaac"
> x
hola qu? tal 3
> print(x)
hola qu? tal 3
> unclass(x)
[1] 3
> class(x)
[1] "isaac"
> class(unclass(x))
[1] "numeric"
> print.default
function (x, digits = NULL, quote = TRUE, na.print = NULL, print.gap = NULL,
right = FALSE, max = NULL, useSource = TRUE, ...)
{
noOpt <- missing(digits) && missing(quote) && missing(na.print) &&
missing(print.gap) && missing(right) && missing(max) &&
missing(useSource) && length(list(...)) == 0
.Internal(print.default(x, digits, quote, na.print, print.gap,
right, max, useSource, noOpt))
}
<environment: namespace:base>
> methods(class="isaac")
[1] print.isaac
> methods(class="cox.zph")
[1] [.cox.zph* plot.cox.zph* print.cox.zph*
Non-visible functions are asterisked
> methods(class="glm")
[1] add1.glm* anova.glm Anova.glm*
[4] av.plot.glm* ceres.plot.glm* confidence.ellipse.glm*
[7] confint.glm* cooks.distance.glm* cr.plot.glm*
[10] deviance.glm drop1.glm* effects.glm*
[13] extractAIC.glm* family.glm* formula.glm*
[16] influence.glm* intervals.glm leverage.plot.glm*
[19] linear.hypothesis.glm* logLik.glm* model.frame.glm
[22] ncv.test.glm* outlier.test.glm* predict.glm
[25] print.glm qq.plot.glm* residuals.glm
[28] rstandard.glm rstudent.glm summary.glm
[31] Var.glm* Varcov.glm vcov.glm*
[34] weights.glm*
Non-visible functions are asterisked
> add1.glm
Error: objeto "add1.glm" no encontrado
> ?add1.glm
> getAnywhere(add1.glm) # i surt tota la funcio add1.glm
#### per treure espais en blanc
ibespss$poblaci_<-with(ibespss, sub(" +$","", poblaci_))
albaspss<-subset2(ibespss, "poblaci_=='ALBACETE'")
### per truere el punt al final de un carcater
alldat$tropo_peak<- with(alldat, sub("\\.+$", "", tropo_peak, fixed = FALSE ))
## per saber els valors que no es poden convertir a numeric
x<-c("2.1","2,2",NA)
x<-trim(x)
x<-ifelse(x=='',NA,x)
ww1<-which(is.na(x))
x2<-as.double(x)
ww2<-which(is.na(x2))
ww<-ww2[!ww2%in%ww1]
x[ww]
### per calcular or, rr, etc.
library(epicalc)
help(package="epicalc")
example(cs)
## la data del sistema
Sys.Date()
## attributs
cbind(lapply(euphoric3, function(x) attr(x,"vari.label")))
cbind(unlist(lapply(dexa, function(x) attr(x, "vari.label"))))
## per treure els espais en blanc
dades$xxx <- ifelse(sub(" +$", "", dades$comentario)=="tercera generaci?n",1,0)
## taules varies
install.packages("Epi")
install.packages("catspec")
install.packages("gmodels")
install.packages("epitools")
library("Epi")
library("catspec")
library("gmodels")
library("epitools")
example(stat.table)
example(ctab)
example(CrossTable)
example(riskratio)
### per treure els missing
macrowom<-macrowom[apply(t(apply(macrowom,1,is.na)), 1, sum) == 0, ]
### per dibuixar un grafic de barres
par(las=1, mar=c(5, 6, 4, 2), xpd=FALSE)
mehta<-48.2
lohta<-47.4
uphta<-49.1
hta<-c(42.8, 46.6, 48.3, 51.2, 50.2, 43.7, 51.2, 52.6, 43.1)
centers<-c("REGICOR", "HERMEX", "TALAVERA", "CDC", "RIVANA", "RECCyL", "CORSAIB", "DINO", "DRECA")
htac<-hta-mehta
color<-ifelse(hta<lohta, "green", ifelse(hta>uphta, "red", "blue"))
xxx<-barplot(htac,horiz=TRUE,axes=F,col=color, xlim= c(-6,5),
main="Age-standardized Hypertension prevalence: MEN")
axis(1,pretty(range(htac)),(pretty(range(htac))+ mehta))
axis(2,xxx, centers)
abline(v=c(lohta, mehta, uphta)-mehta, lty=c(2,1,2))
par(xpd=NA)
legend(mean(par()$usr[1:2]),par()$usr[3]-diff(par()$usr[3:4])*0.1,c("Overall","95%CI"),xjust=0.5,lty=1:2,bty="n")
## per veure el que fa un package
help(package="survival")
OR<-c(1.13,3.75,4.32,5.54,5.01)
selogOR<-c(0.2,0.3,0.25,0.12,0.2)
meta.DSL(OR,selogOR)
meta.DSL(OR[-1],selogOR[-1])
### per buscar un tros de sintaxis en tots el tinn-R d'una carpeta
carpeta<-"U:/Estudis/Colaboracions/2009 DARIOS Prevalencia FRCV Espa?a siglo XXI/Analisis"
arxius<-list.files(carpeta, pattern=".r$", full.names=T, recursive=T)
for (i in 1:length(arxius) ){
xxx<-scan(file=arxius[i], what="character", sep="\n")
print(grep("Comparaci?n de resultados",xxx))
}
## per calcular mitjanes per fila
offv01$dbp<-with(offv01,apply(cbind(a56,a58),1, mean, na.rm=TRUE))
## Per fer taules amb totals
xxx<-as.matrix(with( fusio, table2(flow2, font, margin=0)))
cbind(xxx,apply(with(fusio, table (flow2, font)), 1, function(x) sum(x)))
## per definir l'amplada de la consola
options(width = 60)
seq(1, 100, 1)
options(width = 32)
seq(1, 100, 1)
## compare groups
library(foreign)
library(compareGroups)
setwd("C:/cursR/data")
datos<-read.spss("partoFin.sav",
use.value.labels = FALSE,
to.data.frame = TRUE)
datos$naci_ca<-factor(datos$naci_ca,labels= names(attr(datos$naci_ca,"value.labels")))
datos$sexo<-factor(datos$sexo,labels= names(attr(datos$sexo,"value.labels")))
res <- compareGroups(tx ~ edad + peso + sexo + naci_ca, data = datos,
selec = c(peso = "datos$edad < 40"),
method = c(peso=2))
restab <- createTable(res, show.n = TRUE,
hide = c(sexo =1),
digits = c(edad=3))
export2latex(restab, file = "C:/xxx/table1", dec = ",")
export2csv(restab, file = "C:/xxx/table1", sep = ";")
# un altres exemple
# primer fer un scan . . . .
dat<-fusio[, vari]
dat<-prepare(dat)
res <- compareGroups(font ~ ., data = dat, subset = fusio$st3c==0 | fusio$st3c==1)
restab <- createTable(res, show.n = TRUE, hide = c(sexo= 1,ant_dm= 1,ant_tab= 1,ant_col= 1,ant_hta= 1,ant_iam=1 ,ant_rev= 1,onda_q= 1,loc_ar= 1,ucc_exit= 1,mort28= 1,mort6= 1,hemodin= 1))
export2csv(restab, file = "C:/xxx/xxx", sep = ";")
shell.exec("c:/xxx/xxx.csv")
## update
res<-update(res, font ~ . -hemodin, subset = fusio$st3c==0)
restab <- createTable(res, show.n = TRUE, hide = c(sexo= 1,ant_dm= 1,ant_tab= 1,ant_col= 1,ant_hta= 1,ant_iam=1 ,ant_rev= 1,onda_q= 1,loc_ar= 1,ucc_exit= 1,mort28= 1,mort6= 1,hemodin= 1), show.p.trend=TRUE)
# restab <- update(restab, show.all = FALSE)
export2csv(restab, file = "C:/xxx/xxx", sep = ";")
shell.exec("c:/xxx/xxx.csv")
## per saber les etiquetes de les variables
varnames<-NULL
for (i in 1:ncol(fusio) ) {
varnames<-rbind(varnames, trim(paste(paste(i, ") ", names(fusio[i]), sep=""), attributes(fusio[ , i])$vari.label, sep=": ")))
}
## per esborrar packages
remove.packages("compareGroups")
## per instal?lar un tar.gz
install.packages("C:/CursR/menorca/packages/tar.gz/compareGroups_0.1-5.tar.gz", repos=NULL, type="source")
install.packages("/xxx/compareGroups_2.0.3.tar.gz", repos=NULL, type="source")
install.packages("SNPassoc")
install.packages("XLConnect")
install.packages("shiny")
install.packages("HardyWeinberg")
install.packages("/home/jvila/Dropbox/CompareGroups/package/compareGroups_without_odfWeave/compareGroups_2.1.tar.gz",
repos=NULL, type="source")
## ajuda sobre un package
help(package=oce)
## exemple de if else
alpha <- 0
if (alpha > 1) {x <- 88} else {x <- -88}
x
## per fer comparacions m?ltiples
p.adjust(c(0.004, 0.0003, 0.005), "BH")
## exemple de factors
gender<-rbinom(10,1,0.5)
gender<-c(gender,9)
table(gender)
gender<-factor(gender,levels=c(0,1),labels=c('home','dona'))
table(gender)
## per saber les dades que hi ha al R
data()
########### spss.get2 ############
source(file.path(RutinesLocals,"spss_varlist.r"))
source(file.path(RutinesLocals,"prepare.r"))
source(file.path(RutinesLocals,"arregla.formats.r"))
library(Hmisc)
xfile<-"./dat/cancer_incidente_npnm_enviado.sav"
dict<-spss_varlist(xfile)
xdates<-dict[grep("^DATE",dict[,2]),"longname"]
dat<-spss.get(xfile,allow="_",use.value.labels=FALSE,datevars=xdates)
dat[,xdates]<-arregla.formats(dat[,xdates])
for (i in 1:ncol(dat)) attr(dat[,i],"vari.label")<-label(dat[,i])
##################################
## per guardar els factors com etiquetes
x1$abo<-as.factor(x1$abo)
ll<-levels(x1$abo)
x1$abo<-as.integer(x1$abo)
attr(x1$abo,"value.labels")<-structure(1:length(ll),names=ll)
attr(x1$abo,"vari.label")<-"ABO"
## per substituir els valor d'edat < 40
sapply(age, function(x) if (x<40) runif(1,40,45) else x)
## per calcular el temps que triga a fer-se una cosa
system.time({ qnorm(0.05/2)})
## per posar numero d'ordre
xalib$count<-NA
xalib$count[1]<-1
xnum<-1
for (i in 1:(nrow(xalib)-1)){
x1<-xalib$id[i]
xnum<-ifelse(xalib$id[i+1]==x1, xnum+1, 1)
xalib$count[i+1]<-xnum
}
# per buscar una funcio, especialment les que estan amagades (son les que tenen un asterix)
getAnywhere(mean)
getAnywhere(print.coxph.penal)
# per buscar en els packages intal?lats
help.search("ancova")
# per buscar a la p?gina web del CRAN
RSiteSearch("ancova")
# utilitzant el paquet SOS
library(sos)
findFn("ancova")
## regular expressions
######################
## busca exactament "36." al comen??ament
x <- c("736.0", "36.", "366.1", "366.")
x[grep("^36\\.", x)]
# busca la primera vegada (^) que surt un numero [0-9] i el substitueix per xxx
sub("^[0-9]","xxx","0124hola")
[1] "xxx124hola"
# busca la primera vegada que surt una sequencia de numeros [0-9]+ i aquesta sequencia la substitueix per xxx
sub("[0-9]+","xxx","0124hola123")
[1] "xxxhola123"
# busca qualsevol (gsub) numero [0-9] i el substitueix per xxx
gsub("[0-9]","xxx","0124hola04")
[1] "xxxxxxxxxxxxholaxxxxxx"
# busca qualsevol (gsub) sequencia de numeros [0-9]+ i la substitueix per xxx
> gsub("[0-9]+","xxx","0124hola04")
[1] "xxxholaxxx"
# busca la primera (sub) sequencia de numeros [0-9]+ i la substitueix per xxx
sub("[0-9]+","xxx","aaaaa0124hola04")
[1] "aaaaaxxxhola04"
# busca la primera (sub) sequencia de numeros [0-9]+ que esta a comen??ament, pero no n'hi ha cap
sub("^[0-9]+","xxx","aaaaa0124hola04")
[1] "aaaaa0124hola04"
sub(" $","","apoefhawpehf ")
[1] "apoefhawpehf"
sub(" $","","apoefhawpehf ")
[1] "apoefhawpehf "
sub("[ ]+$","","apoefhawpehf ")
[1] "apoefhawpehf"
> sub("[ ]+","","apo efhawpe hf")
[1] "apoefhawpe hf"
> sub("[ ]","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("[ ]","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("[ ]","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("[ ]2","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("^[ ]+",""," wapoeufhapuwef")
[1] "wapoeufhapuwef"
> sub("^[ ]+",""," wapoeufhapu wef")
[1] "wapoeufhapu wef"
> gsub(" ",""," wapoeufhapu wef ")
[1] "wapoeufhapuwef"
gsub("^[0-9]+","","10987561023asdof?341525iwhapfohe")
[1] "asdof?341525iwhapfohe"
> sub("^[0-9]+","","10987561023asdof?341525iwhapfohe")
[1] "asdof?341525iwhapfohe"
> gsub("[0-9]+","","10987561023asdof?341525iwhapfohe")
[1] "asdof?iwhapfohe"
> gsub("[0-9]","","10987561023asdof?341525iwhapfohe")
[1] "asdof?iwhapfohe"
> grep("[0-9]",c("asd?ofih","askoufh21938"))
[1] 2
> grep("^[0-9]",c("asd?ofih","askoufh21938"))
integer(0)
> grep("[0-9]$",c("asd?ofih","askoufh21938"))
[1] 2
> grep("[0-9]",c("asd?ofih","askoufh21938"))
[1] 2
> grep("[0-9]",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2 3
> grep(".[0-9]+.",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2 3
> grep(".[0-9].",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2 3
> grep(".[0-9].$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep(".[0-9]+.$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep(".[0-9]$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep(".[0-9].$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep("^.[0-9].$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
integer(0)
> grep(".",c("apofh","apesoh.apoeh"))
[1] 1 2
> grep("\\.",c("apofh","apesoh.apoeh"))
[1] 2
> sub("\\.","[","apesoh.apoeh")
[1] "apesoh[apoeh"
> grep("[",c("apofh","apesoh[apoeh"))
Error in grep("[", c("apofh", "apesoh[apoeh")) :
invalid regular expression '[', reason 'Missing ']''
> grep("\\[",c("apofh","apesoh[apoeh"))
[1] 2
#### apply i sapply
####################
N<-100000
donant<-as.data.frame(1:N)
names(donant)<-"parti"
donant$aliq<-rpois(N,3)
## repeteix una fila varies vegades
system.time({
x<-NULL
for (i in 1:nrow(donant)){
x <- c(x, rep(donant$parti[i],donant$aliq[i]))
}
})
system.time(
x2 <- sapply(1:nrow(donant), function(i) rep(donant$parti[i],donant$aliq[i]))
)
x2<-unlist(x2)
## enumera les vegades que surt un individu
x2<-sort(x2)
tt<-table(x2)
system.time(
ordre <- sapply(1:length(tt), function(i) 1:tt[i])
)
ordre<-unlist(ordre)
cbind(x2,ordre)[1:100,]
## per indicar quin es el registre ultim
id <- c(rep(1,4), rep(2, 2), rep(3, 5))
sequ <- c(1,2,3,4,1,2,1,2,3,4,5)
dat <- data.frame(id,sequ)
tt<-table(dat$id)
dat2<-data.frame(id=names(tt),freq=as.integer(tt))
dat<-merge(dat,dat2,by="id",all.x=TRUE)
dat$ultimo<-as.numeric(with(dat,freq==sequ))
##################################################################
########### selccionar ultima entrada ########################
##################################################################
## partim d'una base de dades: els individus = id_unic; estan enurants com "id"
## vull quedar-me l'ultim "id" de cada "id_unic"
id_unic <- c(rep("AAA", 3), rep("BBB", 4), rep("CCC",1), rep("DDD", 2))
id <- sample(seq(1:length(id_unic)))
xdat <- as.data.frame(cbind(id, id_unic))
xdat$id <- as.numeric(as.character(xdat$id))
xdat$id_unic <- as.character(xdat$id_unic)
## la poso per ordre
xdat <- xdat[order(xdat$id_unic, xdat$id), ]
## li poso la variable "orden"
kk <- table(sort(xdat$id_unic))
orden <- sapply(1:length(kk), function(i) 1:kk[i])
xdat$orden <- unlist(orden)
## calculo les vegades que surt cada id_unic
tt <- table(xdat$id_unic)
dat2<-data.frame(id_unic=names(tt),freq=as.integer(tt))
## afageixo la informacio de les vegades que surt cada id_unic
xdat<-merge(xdat,dat2,by="id_unic",all.x=TRUE)
## els que orden==freq es el ultim
xdat$ultimo<-as.numeric(with(xdat,freq==orden))
##################################################################
##################################################################
##################################################################
## per posar una data a cadena
(fecha <- chron("15-05-2016", format="d-m-Y", out.format=c(dates="day-mon-year")))
class(fecha)
(fecha2 <- format(as.Date(fecha), "%d-%m-%Y"))
class(fecha2)
## per saber quin es converteix a missing a transformar a numero
x<-c("2.1","2,2",NA)
x<-trim(x)
x<-ifelse(x=='',NA,x)
ww1<-which(is.na(x))
x2<-as.double(x)
ww2<-which(is.na(x2))
ww<-ww2[!ww2%in%ww1]
x[ww]
## per guardar amb cadena les estiquetes de les variables
xxx<-NULL
x2<-wik$flow
for (i in 1:length(x2)){
x1<-names(attr(wik$flow,"value.labels")[attr(wik$flow,"value.labels")==x2[i]])
xxx<-rbind(xxx,x1)
}
wik$flow2<-as.vector(xxx)
## per cambiar l'rodre dels levels d'un factor
dat$bmicat<-factor(dat$bmicat, c("<24", "[24-30)", "30+"))
## la data del systema
Sys.Date()
## per llegir dades d'un servidor
setwd("/run/user/jvila/gvfs/sftp:host=134.0.8.34,user=ars/home/ars/ESTUDIS/ALTRES/jvila/mgil/partners/")
dat<-read.csv("partners.csv", sep=";", header = TRUE, allowEscapes=FALSE)
## per llegir MySQL
install.packages("DBI")
install.packages("RMySQL",lib= "/home/jvila/R/i486-pc-linux-gnu-library/3.1/lib")
## he anat a UBUNTU Software centre i he installat
## libmysqlclient-dev
## he instal.lat el package linux (previament ho'havia baixat el tar.gz
## R CMD INSTALL /home/jvila/Downloads/RMySQL_0.9-3.tar.gz
library(RMySQL)
con2 <- dbConnect(MySQL(), user="web", password="ieT6io9z", dbname="web", host="134.0.8.34")
con2 <- dbConnect(MySQL(), user="userdbcr", password="7437fgs78", dbname="iCRDvas", host="crd.ivascular.es")
dbGetQuery(con2, "SET NAMES utf8")
con2 <- dbConnect(MySQL(), user="root", password="xxx127",
dbname="Modul1", host="localhost")
dbListTables(con2)
dbListFields(con2, "congelador")
mydata <- dbReadTable(con2, "congelador")
dbWriteTable(con2, "mmar", subtr9500)
dbDisconnect(con2)
## per trobar un caracter en una cadena
regexpr("a", "bcvgdhdbbfassss")[[1]]
##
install.packages("png",lib= "/home/jvila/R/i486-pc-linux-gnu-library/3.1/lib")
library(png)
## per instalar un tar.gz
install.packages("C:/programs/Dropbox/JVila/compareGroups_2.1.tar.gz", repos= NULL,
type= "source")
cGroupsWUI()
## per retardar l'execucio
?Sys.sleep
## per treure els warning
options(warn=-1)
## per carregar una base de dades de la web
setwd("/run/user/jvila/gvfs/sftp:host=134.0.8.34,user=ars/home/ars/ESTUDIS/L02_MUTUA/Analisi/screening/")
load("./dat/2013-11-13.RData")
## per ordenar un factor
value.lab<-c("<35"=1, "35-44"=2, "45-54"=3, "55+"=4)
dat$agegr<-factor(dat$agegr,levels=sort(value.lab),labels=names(sort(value.lab)))
## per buscar una cadena entre fitxers
ff<-list.files("U:/Estudis/Tancats/A37_GEDAPS",pattern=".r$",recursive=TRUE,full=TRUE)
for (i in ff){
temp<-scan(what="character",file=i,sep="\n",quiet=TRUE)
if(length(ww<-grep(">8",temp))>0){
cat("---------",i,"-----------\n")
print(temp[ww])
cat("\n")
}
}
## per saber la versi??
sessionInfo()
## per fer vanilla
/usr/bin/R --vanilla --slave --args "Hospital de la Monta??a", "67676767678" < /home/ars/ESTUDIS/L02_MUTUA/Analisi/Cardiovascular/empresa/Maker.R
/usr/bin/R --vanilla --slave < /home/ars/ESTUDIS/L01_DEMCOM/Analisi/queries/maker.R
## per codis ascci i utf8
library(oce)
integerToAscii(126L)
paste(rep("??", 10), collapse="")
paste(rep(integerToAscii(175L), 10), collapse="")
cat(integerToAscii(194L), integerToAscii(175L), sep="" )
today<-chron(as.character(Sys.Date()), format="Y-m-d", out.format="d-mon-Y")
sessionInfo()
## Per a calcular la memoria
library(memuse)
howbig(10000, 500)
## retraasar un segons l'execucio
?Sys.sleep()
################################################################################
############ EXEMPLE Inserir dades a MySQL ##################################
################################################################################
## insert bd sql
library(RMySQL)
library(chron)
# db connect
con<- dbConnect(MySQL(), user="web", password="ieT6io9z",dbname="web", host="localhost")
taula<-"salut_laboral_tabac"
ndatasql<-dbListFields(con,taula)
dat<-smk
ndatar<-names(dat)
xxx<-ndatar[ndatar%in%ndatasql]
yyy<-ndatasql[ndatasql%nin%ndatar]
dat$idu<-""
dat$time<-format(Sys.time(), "%Y-%m-%d %H:%M:%S")
# ordena y elige las variables.
varilab<-scan(what="character", sep="\n")
idu
id
cigar
fuma
inifum
puros
pipas
minutes
dificul
whatcigar
smkmorning
smkill
hasta1
morning
cigar2
ncigar
fager
fagercat
situ
time
dat<-dat[, varilab]
# insert taula
cadena<-paste("INSERT INTO ", taula," VALUES('",paste(dat[1,],collapse=","),"')",sep ="")
cadena<-gsub(",","','",cadena)
#dbGetQuery(con,cadena)
## llegir, dins de un path, la part del nom del fitxer
indiv<-basename(xfile)
## la part inicial i la part final
indiv<-sub("^indiv_","",indiv)
indiv<-sub("\\.csv$","",indiv)
#############################################################################
### afegir casos a un ACCESS
#############################################################################
setwd("c:/xxx")
a <- c(1,2,3,4,5)
b <- c("a", "b", "c", "d", "e")
dat <- as.data.frame(cbind(a, b))
names(dat) <- c("numero", "caracter")
dat$numero <- as.numeric(dat$numero)
dat$caracter <- as.character(dat$caracter)
dat2 <- dat
dat2$numero <- dat2$numero*10
export.ACCESS (dat, "xxx.mdb", table.name= "mitabla")
con <- odbcConnectAccess("xxx.mdb")
sqlSave(con, dat=dat2, tablename = "mitabla", append = TRUE,
rownames = FALSE, safer = FALSE)
###
## per netajar la consola
cat("\014")
###############################################################################
## per saber el que pesen els objectes
.ls.objects <- function (pos = 1, pattern, order.by = "Size", decreasing=TRUE, head = TRUE, n = 10) {
# based on postings by Petr Pikal and David Hinds to the r-help list in 2004
# modified by: Dirk Eddelbuettel (http://stackoverflow.com/questions/1358003/tricks-to-manage-the-available-memory-in-an-r-session)
# I then gave it a few tweaks (show size as megabytes and use defaults that I like)
# a data frame of the objects and their associated storage needs.
napply <- function(names, fn) sapply(names, function(x)
fn(get(x, pos = pos)))
names <- ls(pos = pos, pattern = pattern)
obj.class <- napply(names, function(x) as.character(class(x))[1])
obj.mode <- napply(names, mode)
obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)
obj.size <- napply(names, object.size) / 10^6 # megabytes
obj.dim <- t(napply(names, function(x)
as.numeric(dim(x))[1:2]))
vec <- is.na(obj.dim)[, 1] & (obj.type != "function")
obj.dim[vec, 1] <- napply(names, length)[vec]
out <- data.frame(obj.type, obj.size, obj.dim)
names(out) <- c("Type", "Size", "Rows", "Columns")
out <- out[order(out[[order.by]], decreasing=decreasing), ]
if (head)
out <- head(out, n)
out
}
.ls.objects()
################################################################################
## per canviar el codi a UTF-8
Encoding(attr(dat$pesfuer, "vari.label")) <- "latin1"
attr(dat$pesfuer, "vari.label") <- iconv(attr(dat$pesfuer, "vari.label"), "latin1", "UTF-8")
Encoding(names(attr(dat$pesfuer, "value.labels"))) <- "latin1"
names(attr(dat$pesfuer, "value.labels"))<- iconv(names(attr(dat$pesfuer, "value.labels")), "latin1", "UTF-8")
####packages
install.packages("shiny")
install.packages("compareGroups")
install.packages("gam")
install.packages("png")
install.packages("epitools")
install.packages("pROC")
install.packages("psych")
install.packages("plotrix")
install.packages("knitr")
install.packages("chron")
install.packages("rgdal")
install.packages("pgirmess")
install.packages("stringr")
install.packages("MASS")
install.packages("nnet")
install.packages("car")
install.packages("RODBC")
install.packages("survival")
install.packages("lattice")
install.packages("cluster")
install.packages("Hmisc")
install.packages("xtable")
install.packages("gdata")
install.packages("oce")
install.packages("tcltk2")
install.packages("odfWeave")
install.packages("Rcmdr")
install.packages("extrafont")
install.packages("xlsx")
## per saber la versi?? d'un paquest
packageDescription("shiny")
## fer una taula d'un table2
<<echo=FALSE, results='hide', warning=FALSE, message=FALSE>>=
xdat <- prepare(dat[, c("id", "idcentro")])
x <- table2(xdat$idcentro)
yy <- cbind(unlist(attr(x, "dimnames")[1]), x[1:length(x)])
xtable(yy)
@
\begin{table}[H]
\centering
\caption{Recruited participants by center}
\
\\
\begin{tabular}{lr}
\hline
&\\
& N (\%)\\
&\\
<<echo=FALSE, results='asis', warning=FALSE, message=FALSE>>=
print.xtable(xtable(yy), only.contents=TRUE, include.rownames = FALSE,
include.colnames=FALSE, hline.after=FALSE)
@
\hline
\end{tabular}
\end{table}
## per que no surti un output
{sink("/dev/null"); x <- table2(dat$avulsio, margin=0); sink()}
## per treballar contra el servidor
setwd("/run/user/1000/gvfs/sftp:host=134.0.8.34,user=ars/home/ars")
list.files()
################################################################################
################################################################################
## per posar el s??mbol major o igual
plot(0, 0)
title(main=
eval(parse(text='expression(phantom("")<=phantom(""))'))
)
aaa <- "xxx"
plot(0, 0)
title(main=
eval(substitute(expression( a + phantom("")<=phantom("")), list(a = aaa)))
)
aaa <- "xxx"
bbb <- "yyy"
plot(0, 0)
title(main=
eval(substitute(expression(paste(a, phantom("")<=phantom(""), b)),
list(a = aaa, b= bbb)))
)
bbb <- "750 Kcal/sem."
plot(0, 0)
title(main=
eval(substitute(expression(paste(phantom("")>=phantom(""), b)),
list(b= bbb)))
)
bbb <- "750 Kcal/sem."
ccc <- " = 80%"
plot(0, 0)
text(0,-0.2, eval(substitute(expression(paste(phantom("")>=phantom(""), b, c)),
list(b= bbb, c=ccc)))
)
################################################################################
################################################################################
## per canviar el code
Encoding(dat$puesto) <- "latin1"
dat$puesto <- iconv(dat$puesto, "latin1", "UTF-8")
################################################################################
################################################################################
## per modificar celles de un EXCEL
rm(list=ls())
setwd("/DATA/scratch_isaac/EUROTRACS")
options(java.parameters = "-Xmx4g")
library(XLConnect)
library(xlsx)
file.remove("suma2.xlsx")
file.copy("suma.xlsx", "suma2.xlsx",overwrite=TRUE)
# read input and ouput
wb <- XLConnect::loadWorkbook("suma2.xlsx")
XLConnect::readWorksheet(wb, sheet = "Hoja1",header=FALSE,startRow=1,startCol=1,endRow=8,endCol=4)
#xlsx::read.xlsx(file="suma2.xlsx", sheetName="Hoja1", rowIndex=1:3,colIndex=1,header=FALSE)
# modify cells
writeNamedRegionToFile("suma2.xlsx",2, name="yyy",formula = "Hoja1!$A$1",header=FALSE,rownames=NULL)
wb <- XLConnect::loadWorkbook("suma2.xlsx")
XLConnect::setForceFormulaRecalculation(wb, sheet = "Hoja1", TRUE)
XLConnect::readWorksheet(wb, sheet = "Hoja1",header=FALSE,startRow=1,startCol=1,endRow=4,endCol=1)
## per augmentar la memoria
options(java.parameters = "-Xmx4000m")
## per llegir xlsx
installXLSXsupport()
## per llegir dates des de EXCEL que entren numeros
chron(as.numeric(as.character(dat$fecha))-365.5*70+16, out.format = "d/m/yy")
## Per fer un bucle (la funcio Recall())
mydata <- function() {
n1<-round(runif(1, 180, 190), 0)
mcguill1<-SimCon(n1,29.8,11.9,10,100,0)
n0<-round(runif(1, 180, 190), 0)
mcguill0<-SimCon(n0,29.8,11.9,10,100,0)
group<-c(rep(1,n1), rep(0,n0))
dat<-as.data.frame(cbind(c(mcguill1, mcguill0), group))
names(dat)<-c("mcguill", "group")
m1<-format(signif(mean(subset(dat, group==1)$mcguill), digits=3), scientific=FALSE)
m0<-format(signif(mean(subset(dat, group==0)$mcguill), digits=3), scientific=FALSE)
tval<-signif(with(dat, t.test(mcguill~group, var.equal=TRUE))$statistic, 4)
pval<-with(dat, t.test(mcguill~group, var.equal=TRUE))$p.value
if (pval > 0.2) return(dat)
Recall()
}
dat <- mydata()
| /comandos.R | no_license | xxxjvila/rutines | R | false | false | 50,733 | r |
## per treballar amb factors
xxx$sex <- as.factor(with(xxx, ifelse(sexe_b==1, "Male", "Female")))
xxx$sex <- relevel(xxx$sex, ref= "Male")
intervals(lm(lnimta_CCA ~ C(as.factor(sexe_b),base= 1), data = xxx))
intervals(lm(lnimta_CCA ~ sex, data = xxx))
## per llegir excel
dat <- readWorksheetFromFile( "./dat/Basedades2filtradasensedades4.xlsx", sheet = "Full 1", header = T,dateTimeFormat = "%d-%m-%Y", startRow=1, endRow = 386)
redcap
ronald multistage
## per installar RODBC
http://superuser.com/questions/283272/problem-with-rodbc-installation-in-ubuntu
rm(list=ls())
#RutinesLocals<-"C:/Users/jvila/Dropbox//rutines"
RutinesLocals<-"/home/jvila/Dropbox/rutines"
RutinesLocals <- "/Users/jvila/Dropbox/rutines"
install.packages("readr")
date_names_langs()
parse_date("1 enero 2015", "%d %B %Y", locale = locale("es"))
install.packages("haven")
source(file.path(RutinesLocals,"table2.r"))
source(file.path(RutinesLocals,"subset2.r"))
source(file.path(RutinesLocals,"carrega.llibreria.r"))
source(file.path(RutinesLocals,"calculadora.risc.r"))
source(file.path(RutinesLocals,"merge2.r"))
source(file.path(RutinesLocals,"intervals.r"))
source(file.path(RutinesLocals,"prepare.r"))
source(file.path(RutinesLocals,"export.SPSS.r"))
source(file.path(RutinesLocals,"arregla.formats.r"))
source(file.path(RutinesLocals,"import.ACCESS2.r"))
source(file.path(RutinesLocals,"merge2.r"))
source(file.path(RutinesLocals,"add.cases.r"))
source(file.path(RutinesLocals,"format2.r"))
source(file.path(RutinesLocals,"order2.r"))
source(file.path(RutinesLocals,"print2.r"))
source(file.path(RutinesLocals,"read.spss4.r"))
source(file.path(RutinesLocals,"spss_varlist.r"))
####packages
install.packages("shiny")
install.packages("compareGroups")
install.packages("gam")
install.packages("png")
install.packages("epitools")
install.packages("pROC")
install.packages("psych")
install.packages("plotrix")
install.packages("knitr")
install.packages("chron")
## pgirmess
## primer he hagut d'instalar "gdal"
## gdal-config em mostra que no existeix
## sudo apt-get install libgdal-dev
## sudo apt-get install libgdal1-dev libproj-dev
## sudo apt-get update
install.packages("rgdal")
install.packages("pgirmess")
install.packages("stringr")
install.packages("MASS")
install.packages("nnet")
install.packages("car")
install.packages("RODBC")
install.packages("survival")
install.packages("lattice")
install.packages("cluster")
install.packages("Hmisc")
install.packages("xtable")
install.packages("gdata")
install.packages("oce")
install.packages("tcltk2")
##install.packages("odfWeave")
install.packages("Rcmdr")
install.packages("extrafont")
###############################################################################
############## rJava 20/10/2015 ########################################
###############################################################################
## veure: http://tecadmin.net/install-oracle-java-8-jdk-8-ubuntu-via-ppa/
## per veure on es el JAVA
whereis java
## s'ha d'executar:
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java8-installer
## comprovar la versio instalada
java -version
## si el resultat no ?s 1.8
sudo update-alternatives --config java # i seleccionar 1.8
## un cop haguem comprovat que es la 1.8
sudo apt-get install oracle-java8-set-default
## par tal de que el R l'incorpori
R CMD javareconf
###############################################################################
###############################################################################
###############################################################################
install.packages("rJava")
install.packages("xlsx")
#deb
#http://cran.rstudio.com/bin/linux/ubuntu
#lucid/
R.Version()
rm(list=ls(mgcv))
http://cran.rstudio.com/
library(frailtypack) # la llibreria de Juan Ramon de Supervivencia
## png
install.packages("png")
library(png)
## lme4
install.packages("epitools")
## pROC
install.packages("pROC")
## psych
install.packages("psych")
library(psych)
## plotrix
install.packages("plotrix")
library(plotrix)
install.packages("knitr")
library(knitr)
##chron
install.packages("chron")
## pgirmess
## primer he hagut d'instalar "gdal"
## gdal-config em mostra que no existeix
## sudo apt-get install libgdal-dev
## sudo apt-get install libgdal1-dev libproj-dev
## sudo apt-get update
install.packages("rgdal")
install.packages("pgirmess")
install.packages("rgdal")
install.packages("stringr")
install.packages('stringr', repos='http://cran.us.r-project.org')
## per instal.lar "car" a linux
install.packages("MASS")
install.packages("nnet")
install.packages("car")
library(car)
## per instal.lar "RODBC"
## sudo aptitude install unixodbc-dev
install.packages("RODBC")
library(RODBC)
install.packages("survival")
library(survival)
install.packages("gam")
library(gam)
## per instal.lar "Hmisc"
install.packages("lattice")
install.packages("cluster")
install.packages("Hmisc")
library(Hmisc)
install.packages("xtable", dependencies=TRUE)
library(xtable)
install.packages("gdata", dependencies=TRUE)
library(gdata)
install.packages("oce", dependencies=TRUE)
library(oce)
install.packages("tcltk2", dependencies=TRUE)
library(tcltk2)
install.packages("odfWeave", dependencies=TRUE)
library(odfWeave)
install.packages("compareGroups")
library(compareGroups)
install.packages("Rcmdr", dependencies=TRUE)
library(Rcmdr)
install.packages("extrafont")
library(extrafont)
font_import()
fonts()
## rjava / xlsx / XLConnect
## des del promt:
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java7-installer
sudo apt-get update
sudo R CMD javareconf
## des de R
install.packages("rJava", dependencies=TRUE)
install.packages("XLConnect", dependencies=TRUE)
install.packages("XLConnectJars", dependencies=TRUE)
################################################################################
#################### r Java ##############################################
################################################################################
## veure si es la versio de 32 o 64 bits amb
## sessionInfo()
## baixar-se la versi? de 64-bits de:
## http://java.com/en/download/manual.jsp
## ho he instal.lat a C:/Programs/Java64/
## he posat aquesta adre?a al path d'inici de windows
library(rJava)
################################################################################
################################################################################
################################################################################
Sys.setenv(JAVA_HOME='C:/Programs/Java64') # for 64-bit version
Sys.setenv(JAVA_HOME='/usr/lib/jvm/java-7-oracle/jre')
## .libPaths()
## .libPaths(c("/home/ars/R/x86_64-pc-linux-gnu-library/2.15","/usr/local/lib/R/site-library","/usr/lib/R/site-library","/usr/lib/R/library"))
## veure:
## http://www.r-statistics.com/2012/08/how-to-load-the-rjava-package-after-the-error-java_home-cannot-be-determined-from-the-registry/
## Sys.setenv(JAVA_HOME='C:\\Program Files (x86)\\Java\\jre7') # for 32-bit version
Sys.setenv(JAVA_HOME=': /usr/lib/jvm/java-7-openjdk-i386/jre')
Sys.setenv(JAVA_HOME='C:/Programs/Java/bin')
Sys.getenv("JAVA_HOME")
Sys.setenv(JAVA_HOME='C:/ProgramData/Oracle/Java')
if(Sys.getenv("JAVA_HOME")!="") Sys.setenv(JAVA_HOME="")
install.packages("rJava")
library(rJava)
install.packages("xlsx", dependencies=TRUE)
library(xlsx)
## exemple d'escriure un xlsx
dades<-as.data.frame(cbind(c(1,1,2,3,4,5), c(11,11,12,13,14,15)))
write.xlsx(dades, file= "xxx.xlsx", sheetName="Sheet1") ## exporta a XLSX
## llegir fitxer de EXCEL
xfile<-"U:/ULEC/Exemples_estadistica/register/dat/tasques.xls"
channel<- odbcConnectExcel(xfile)
sqlTables(channel)
dat<-sqlFetch(channel, sqtable="Hoja1$")
close(channel)
## guardar fitxer de EXCEL
xxx<-as.data.frame(cbind(c(1,2,3,4,5,6), c(11,12,13,14,15,16)))
setwd("/home/jvila/xxx")
channel<- odbcConnectExcel("xxx.xls", readOnly=FALSE)
sqlSave(channel, catok, tablename="Participants",append = FALSE,safer= FALSE,rownames=FALSE,colnames=FALSE)
close(channel)
## library(sos)
findFn("open office")
save(clin, file ="xxx.RData")
## per triar el fitxer on es vol savel
save(exemple, file= file.choose())
# quan dos numeros no son iguals pero afecta a un decimal a prendre pel cul
rp17<-ifelse(isTRUE(all.equal(sup17a, (sup17ok*100))), 1,
ifelse(isTRUE(all.equal(sup17b, (sup17ok*100))), 2,
ifelse(isTRUE(all.equal(sup17c, (sup17ok*100))), 3, 999)))
rp17
format(sup17c,digits=16,nsmall=16)
# una altre opci? es definir la funci? "==" perque faci aix?:
"==" <- function(x,y) isTRUE(all.equal(x, y))
# per desactivar un grafic
graphics.off()
# per borrar una lliberia
search() # miro a quina posicio es, p.e. la 2
detach(2)
a<--2.9841282
b<-sqrt(0.4656142)
c<-qnorm(0.1,a,b)
d<-round(1/(1+exp(-c)),4)
#posar ordre
## per mes d'una variable:
problems <- problems[order(problems$centreid, problems$paccentreid, -problems$type), ]
xxx <- subdat[order(subdat[,"centro"],subdat[,"paci"]),]
xxx$ordre <- seq(1, nrow(subdat))
subdat <- merge2(subdat, xxx[, c("idrepe", "ordre")], by.id=c("idrepe"),all.x=TRUE, sort= FALSE)
subdat <- order2(subdat, c("ordre"))
subdat <- remove.vars(subdat, "ordre")
head(ictus[order(ictus[,"id"],order(ictus$ancestor, decreasing = TRUE)),])
tots2<-tots2[order(tots2[,"parella"],-tots2[,"cascon"]),]
xxx<-xxx[order(xxx[,"id"]),]
x<-c( 1, 2, 3, 4, 5, 6, 7, 1, 3, 4, 5,8)
y<-c(22,23,23,24,25,26,27,28,24,22,20,21)
z<-cbind(x,y)
t(z[order(z[,1],-z[,2]),])
pred<-order2(pred, c("id"))
xxx[order(c(xxx$font,xxx$lp)),]
vari<-scan(what="character", sep="\n")
idpaci
estudi
idepisodi
nom
ape1
ape2
f_ing
iamseg
diamseg
iam2a
toiam
xxx<-tot[!is.na(tot$iam2a) & tot$iam2a==1, vari]
xxx[order(xxx[,"estudi"],-xxx[,"idepisodi"]),]
packageDescription("gnm")
example(rpanel)
library()
search()
ls(4)
help(solve)
?solve
help("[[")
help.start()
example("hclust")
source("c:\\jvila\\r\\comandos.R")
sink("resultado.txt")
sink()
# punto de corte
qt(.975, df = 24)
# calcular "p" para un valors de "t", bilateral
(1-pt(2.063899,df=24))*2
## generar una random variable and testing normality
library(MASS)
x<-rt(300,df=5)
fitdistr(x,"t")
qqnorm(x); qqline(x)
qqplot(qt(ppoints(length(x)),df=5.55),x)
qqline(x)
# exemple de plots amb distribucio normal
x<-seq(-6,6,by=0.1)
plot(x,dnorm(x),type="l",xlim=c(-6,6),ylim=c(0,0.9))
lines(x,dnorm(x,mean=0,sd=2),col="red")
x<-seq(0,40,by=0.01)
curve(dgamma(x,shape=2,scale=3),from=0,to=40)
abline(v=2*3,lty=2)
x<-0:20
plot(x,dpois(x,lambda=4),type="h")
plot(x,ppois(x,lambda=4),type="s")
# calular mitjanes per linia
muestras<-matrix(rnorm(1000),nrow=100,byrow=T)
medias<-apply(muestras,1,mean)
# calcular el nombre de missings per linia
answer$na<-apply(t(apply(answer[,8:57],1,is.na)), 1, sum)
answer$na<-apply(is.na(answer[,8:57]),1,sum))
## crea una variable que indica si hi ha missing o no
lpa$keep<-apply(!is.na(lpa),1,all)
# calcula mitjana i t-Student
with(pred,by(edad,sexo,function(x) c(mean(x,na.rm=TRUE),sd(x,na.rm=TRUE))))
t.test(edad ~ sexo, data = subset2(pred, "sexo<999 & edad <999"),var.equal = TRUE)
# generar numeros de una binomial
x<-rbinom(20,size=1,prob= 0.2)
## seleccionar
x<-sample(c("A","B","C"),200,replace=T,prob=c(0.5,0.4,0.1))
wom<-wom[sample(1:nrow(wom),16),]
#exemple de buscar una variable
agrep("diarev",names(mcr),value = TRUE)
keep.var<-c("aparece","atencion","fllega","primeringr","fcoro1","ptcaprim","ptcaresca", "ptcaelec","ptcafarma","ptcasincla","frevas")
keep.var[!keep.var%in%names(mcr)]
# per fer moltes taules
xxx<-names(pred)
for(i in 12:length(pred))print(cbind(table2(pred[,xxx[i]],pred$sexo)))
cbind(table2(pred$hta,pred$sexo))
sink(file="xxx.doc")
for(i in 3:length(xxx))print(cbind(table2(pred[,xxx[i]])))
sink()
file.show("xxx.doc")
shell.exec("xxx.doc")
# per borrar variables
dades<-remove.vars(dades,"resucoro")
## localitzar registres i variables
which(names(pred)=="hta")
pred$numcase<-1:nrow(pred)
rownames(pred)
# per fer un excel dels resultats
write.table(datos, file = "c:/jvila/r/r.xls",append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE)
write.table(jsanchez, file = paste(treball,"jsanchez.xls", sep=""),append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE, na="")
shell.exec("c:/jvila/r/r.xls")
#exemple de recode, rename, attrib
x1$colmed<-car::recode(x1$colmed,"2=0;1=1;else=NA")
casos<-rename.vars(casos, from="hipolip6", to="colmed")
attr(ic.coef,"vari.label")<-c("Identificador", "x2", "ss")
attr(x3$colmed,"vari.label")<-"Hipolipemiantes (en casos a 6 meses)"
attr(x3$colmed,"value.labels")<-c("No"=0, "Si" =1)
#seleccionar pacients i variables
vari<-scan(what="character", sep="\n")
id
nodo
edad
xxx<-subset2(pred, "nodo ==1 & edad >70")[,vari]
fix2(clin[is.na(clin$edad), c("fechini","fechnac","xxx1","edad")])
# salvar atributs
atri.ahtam<-attributes(clin$ahtam)
attributes(clin$ahtam)<-atri.ahtam
# subset
clin<-subset2(clin,"clin$lugartto==1 | clin$lugartto==6")
vari<-c("idepisodi","estudi", "nombrepa", "hsintmon", "msintmon", "infosinmon","fechini", "sint")
subset2(dat, "infosinmon ==1 & hsintmon >24")[,vari]
#merge
clin<-merge2(clin,fili,by.id=c("estudi","idepisodi"),all.x=TRUE, sort= FALSE)
# dates
library(chron)
seg6m$xxx1<-paste(as.character(seg6m$adhospdtdy),"-", as.character(seg6m$adhospdtmo),"-", as.character(seg6m$adhospdtyr),sep="")
seg6m$recruitdat<-chron(seg6m$xxx1,format=c(dates="d-m-y"),out.format=c(dates="day-mon-year"))
tot$f_ing<-chron(tot$f_ing,format=c(dates="d-m-y"),out.format=c(dates="day-mon-year"))
min(tot[tot$origen==3,]$f_ing)
tot$f_ing<-chron(tot$f_ing,out.format=c(date="d-mon-Y"))
xx<-chron(paste("31","-","12","-","2002",sep=""),format=c(dates="d-m-y"),out.format=c(dates="day-mon-year"))
xxx<-c("06/01/2012 20:36:25" "06/01/2012 20:36:25" "12/01/2012 01:38:33" "10/01/2012 11:23:16" "08/01/2012 22:14:22" "08/01/2012 22:14:22")
dts<-substr(xxx, 1, 10)
tms<-substr(xxx, 12, 20)
x1<-chron(dates=dts,format=c("d/m/Y"),out.format=c("d-mon-y"))
x2<-chron(times=tms,format=c("h:m:s"),out.format=c("h:m:s"))
answer$moment<-chron(dates = x1, times = x2,format=c(dates="d/m/Y", times = "h:m:s"),out.format=c(dates="day-mon-year", times = "h:m:s"))
ini<-chron(c("4/6/2004","8/12/1995","1/1/2004"),format=c("d/m/Y"),out.format=c("d-mon-y"))
fi<-chron(c("1/11/2003","31/12/1997","31/12/2007"),format=c("d/m/Y"),out.format=c("d-mon-y"))
df<-data.frame(ini,fi)
df$res<-rep(NA,nrow(df))
for (i in 1:nrow(df)){
df$res[i]<-trunc(runif(1,df$ini[i],df$fi[i]))
}
df$res<-chron(df$res,out.format=c("d-mon-y"))
df
#funcio
f1=function (a,b) {
v=a*2
w=b*2
return (v,w)
}
x<-f1(3,5)
f2=function (a,b) {
a*b
}
xxx<-f2(2,9)
## escriure una taula
write.table(datos, file = "c:/jvila/r/r.xls",append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE)
shell.exec("c:/jvila/r/r.xls")
########################################################################
################## importo SPSS i exporto acces ########
########################################################################
vari<-tolower(scan(what="character"))
rescate
n_h
HOSP1
NUM_PACIENTE
caso
ape1
ape2
nom
edad
sex
RTRSIMO
admi
ahtai
acoli
fitxer<-"U:\\Estudis\\Epidemiologia\\REGICOR\\POBLACIONAL\\dades\\regi78_actual\\original\\bases de dades procedencia fusio\\78-95 procedeix de investigats.sav"
hola<-read.spss4(fitxer,keep.var=vari)
acces<-paste(treball, "problemes.mdb", sep="")
export.ACCESS(taula=gedaps, file.mdb=acces, table.name="gedaps", table.dict = "dicgedaps")
shell.exec(acces)
#### importar acces
import.ACCESS2(
file.mbd="U:\\Estudis\\Clinic\\BASICMAR\\dades\\DEA Jordi\\JJimenez.mdb",
nom.taula=c("basic","m3","gen"),
nom.variables=list(c("ALL"),
c("ALL"),
c("partic", "K406", "K1444", "K375", "K246","K201")),
nom.dicc="Dic",
file.spss="",
var.dicc=c("nombre","etiqueta_variable","etiqueta_valor","tabla2"),
noms.taules=c("basic","m3","gen"),
fix.formats=TRUE)
# per buscar repetits
(repes <- with(stud,table(dni)))[repes>1]
repes<-with(check1,table(id))
repes<-as.double(names(repes)[repes>1])
check1$exclu<-with(check1, ifelse(check1$id%in%repes, 74, exclu))
sum(with(cascon,table(idfortiam))>1)
t<-with(fortiam,table(colest))
sum(t>1)
t[t>1]
valors.repes<-as.double(names(t)[t>1])
fortiam$num_paci[fortiam$colest%in%valors.repes]
xxx<-subset(fortiam,colest%in%valors.repes)[,c("num_paci","colest")]
fix2(xxx[order(xxx$colest),])
# correlacions
vari<-scan(what="character")
nkg2a_cd3_
nkg2c_cd3_
x2a_2c_cd3_
nkg2c_en_cd3__cd56_
nkg2a_en_cd3__cd56_
nkg2c_en_cd56__cd3_
nkg2a_en_cd56__cd3_
nkg2c_en_cd3__cd56__1
nkg2a_en_cd3__cd56__1
x2a_2c_cd3__cd56_
x2a_2c_cd3__cd56__1
ilt2_cd3__cd56_
ilt2_cd3__cd56__1
ilt2_cd3__cd56__2
ilt2_cd3_
nkg2c_en_nk
nkg2a_en_nk
ilt2_en_nk
x2a_2c_en_nk
xxx<-dades[,vari]
res<-NULL
for (i in 2:ncol(xxx)){
for (j in 1:(i-1)){
x<-xxx[,i]
y<-xxx[,j]
ct<-cor.test(x,y,method = "spearm")
r<-ct$estimate
pvalor<-ct$p.value
n<-sum(!is.na(x) & !is.na(y))
label.x<-attr(x,"vari.label")
label.y<-attr(y,"vari.label")
label<-paste(label.x,label.y,sep=" vs. ")
res<-rbind(res,c(label, r,pvalor,n))
}
}
colnames(res)<-c("Variables2","rho","pvalor","n")
write.table(res,
file = "U:\\Estudis\\Externs\\NKG2C M Lopez Botet\\Dades\\cor.xls",append=FALSE,sep="\t",col.names=TRUE,row.names=FALSE)
# per fer LR univariades
vari<-scan(what="character")
edad
C(as.factor(sexo),base=1)
C(as.factor(period),base=1)
write.table("Univariat", file = paste(treball,"LRuni.xls",sep=""),col.names=FALSE,row.names=FALSE)
write.table(rbind(c("Variable", "OR", "95%CI inf", "95%CI sup", "p-value")), sep="\t",file = paste(treball,"LRuni.xls",sep=""),append= TRUE, col.names=FALSE,row.names=FALSE)
for (i in 1:length(vari)){
formul<-paste("def"," ~ ", noquote(vari[i]), sep="")
mod<-glm(
formula=formul,
family="binomial",
data=dat,
na.action=na.exclude
)
write.table(intervals(mod)[2,,drop=FALSE], file = paste(treball,"LRuni.xls",sep=""),append=TRUE,sep="\t",col.names=FALSE,row.names=TRUE)
}
shell.exec(paste(treball,"LRuni.xls",sep=""))
## per fer moltes tab
for (i in 2:length(vari)){
eval(parse(text=paste("with(clin,table2(",noquote(vari[i]),"))",sep="")))
}
for (i in 2:length(vari)){
cat("\n_______",vari[i],"_________\n")
table2(clin[,vari[i]])
cat("\n\n\n")
}
for (i in 2:length(vari)){
clin[,vari[i]]<-car::recode(clin[,vari[i]],"NA=999")
}
# per imprimir molts resultats
sink(file = "c:\\jvila\\xxx.txt")
for (i in 1:length(vari)){
cat("\n_______",vari[i],"_________\n")
print(table(clin[,vari[i]],clin$a?oini))
cat("\n\n\n")
}
sink()
shell.exec("c:\\jvila\\xxx.doc")
# per comprovar linealitat
####################################
# tria explicativa, outcome i les dades
explicativa<-"imc"
outcome<-"itb_cutrec"
nom.dades<-"hermesok"
# aqui fa el model
temp<-eval(parse(text=paste("subset(",nom.dades,",!is.na(",outcome,") & !is.na(",explicativa,"))",sep="")))
formul<-paste(noquote(outcome), "~ s(", noquote(explicativa),")",sep="")
mod.lin<-gam(
formula=as.formula(noquote(formul)),
family="binomial",
data=temp,
#subset =sexe==1,
na.action=na.exclude
)
# grafic
res.mod<-preplot.gam(mod.lin,type="terms",terms=paste("s(",noquote(explicativa),")",sep=""),se.fit=TRUE)[[1]]
ci<-cbind(res.mod$y,res.mod$y-qnorm(1-0.05/2)*res.mod$se.y,res.mod$y+qnorm(1-0.05/2)*res.mod$se.y)
orden<-order(res.mod$x)
ci<-ci[orden,]
matplot(sort(res.mod$x),ci,type="l",lty=c(1,2,2),col="black",xlab=explicativa,ylab="logit smooth estimate")
title("gam logistica")
rug(jitter(res.mod$x))
#####################################
### sumar per columnes
x1<-colSums(with(fusio,table(smoker,font)))
x2<-with(fusio,apply(table(smoker,font),2,sum))
# taules bivariades
var.taula<-"VARIABLE\tKEEP\tDIGITS\tMETHOD\tELIM\tTIPUS\tLOGPTREND
hours\tNULL\t1\t2\tNULL\tNULL\tFALSE"
write(var.taula,file="C:\\xxx.doc")
file.show("C:\\xxx.doc")
taules.bivariades(file.input = NULL, var.taula = var.taula, nom.col = "group",
dades = oren, nom.arxiu = "C:\\jvila\\oren\\resu", dec.car = ",", plot.norm = TRUE,
lim.p.value = 0.05)
##genera noms del tipus xp01, xp02, etc.
grep("^xp[0-9]+$",names(notes),value=TRUE)
toupper(letters[1:8])
## per omplir de 0
xxx<-tr05lab$id
xxx<-c(99999, xxx)
xxx<-format(xxx)
xxx<-gsub(" ", "0", xxx)
xxx<-xxx[-1]
tr05lab$xxx<-xxx
## pastes varis
xxx<-rbind(paste(rep("p", 8), as.character(seq(1,8, 1)), sep=""))
lettercode<-cbind(paste(rep(toupper(letters[1:8]), 12), rep(as.character(seq(1,12, 1)),each= 8), sep=""))
numbercode<-cbind(seq(1,length(lettercode), 1))
convert<-cbind(lettercode, numbercode)
# genera cadenes del tipu an01, an02, etc.
cbind(paste(rep("an", 50), num.pract<-gsub(" ","0",format(1:50)), sep=""))
c(paste(rep("r", 20), gsub(" ","0",format(1:20)), sep=""))
result<-54
paste("La respuesta es",result,sep=": ")
x<-c(1,3,4)
paste(x,collapse="/")
paste(x,sep="/")
x<-c(1,2,3)
y<-c(4,5,6)
z<-c(7,8,9)
paste(x,y,z,sep="+")
paste(paste("Pregunta",1:5,sep=""),collapse="\t")
toupper(letters[1:8])
paste(paste("Pregunta",letters[1:5],sep=" "),collapse="\n")
paste(paste("Pregunta",LETTERS[1:5],sep=" "),collapse="\n")
write(rbind(paste(paste("Pregunta",1:npreg,sep=""),collapse="\t")),file="xxx")
file.show("xxx")
## legir un fitxer EXCEL
regiair<-read.xls( paste(treball,"alea.xls", sep =""),colNames = FALSE,sheet = 1)
# replicates
numok$xxx<-rep(1:19, each= 40)
rep(c("a","b","c"),c(10,20,5))
save(dat,file = file.path(treball,"dat.Rdata"))
# per llegir un excel
jsanchez<-xlsReadWrite::read.xls( paste(treball, "Muestras empleadas para pools.xls", sep=""),
colNames = TRUE,
sheet = 1,
type = "data.frame",
from = 1,
rowNames = NA, colClasses = NA, checkNames = TRUE,
dateTimeAs = "numeric",
stringsAsFactors = default.stringsAsFactors())
# per salvar com etiquetes els valors d'una variable de cadena
xxx<-levels(flow$situ2)
flow$situ2<-as.integer(as.factor(flow$situ))
attr(flow$situ2,"value.labels")<-structure(1:length(xxx), names=xxx)
### per buscar alguna sintaxis (p.e. casos.RData) feta mab R
xxx<-list.files("/home/jvila/gdrivelars/d449/MU/MUAnalysis/MuscEsque/empresa", pattern= ".R$", recursive=TRUE, full.names = TRUE)
for (i in 1:length(xxx)){
contingut<-scan(xxx[i],what="character",sep="\n")
if (length(grep("loc<-",contingut))) print(xxx[i])
}
### per veure les caracter?stiques de les variables
lapply(jm, class)
### per exportar a SPSS
export.SPSS (m4, file.save = NULL, var.keep = "ALL", run.spss = FALSE)
export.SPSS (par1a1, file.dict = NULL, file.save = "U:/Estudis/Clinic/FORTIAM - RESCATE II/FORTIAM/analisi/MG?mez/Article 2/par1a1.sav"
, var.keep = "ALL", file.runsyntax = "C:/Archivos de programa/SPSS Evaluation/runsyntx.exe")
## per que no sorti en format cient?fic
format((prec/100)^2,scientific = FALSE)
# Data per imputar
##############################################
#data aleatoria entre inici i final de l'estudi
n<-nrow(segok)
segok$temp<-with(segok,chron(iam_ind + round(runif(nrow(segok),0,d_ult2-iam_ind),0),out.format="d-mon-Y"))
## calcular la data maxima
surv11$timemax<-with(surv11, ifelse(event>=1, apply(surv11[,c("datiam", "dataltraci", "datavc", "datdef")], 1, min), apply(surv11[,c("datiam", "dataltraci", "datavc", "datdef")], 1, max)))
# 4 dimensional plot
m<-matrix(unlist(with(countries,by(event,eventq,function(x) c(min(x,na.rm=TRUE),max(x,na.rm=TRUE))))),
ncol=2,byrow=TRUE)
m<-format(round(m,3))
m<-apply(m,1,function(x) paste("[",x[1],";",x[2],"]",sep=""))
colors<-c("blue", "green", "yellow", "red")
plot(countries$gross,countries$cvdeath
,cex=sqrt(countries$n/100)
,col=colors[countries$eventq]
,xlab="Yearly gross national income per capita ($)"
,ylab="Age-standardized mortality rate for cardiovascular diseases",pch=19)
points(countries$gross,countries$cvdeath,cex=sqrt(countries$n/100))
legend("topright",legend=paste("Q",1:4,": ",m,sep=""),
fill=colors,title="in-hospital mortality")
par(xpd=NA)
identify(countries$gross,countries$cvdeath,countries$name,cex=0.8,col="black",font=2)
# nova finestra gr?fica
win.graph()
## funcions i classess
> print.isaac<-function(x) cat("hola qu? tal",x,"\n")
> x<-3
> class(x)<-"isaac"
> x
hola qu? tal 3
> print(x)
hola qu? tal 3
> unclass(x)
[1] 3
> class(x)
[1] "isaac"
> class(unclass(x))
[1] "numeric"
> print.default
function (x, digits = NULL, quote = TRUE, na.print = NULL, print.gap = NULL,
right = FALSE, max = NULL, useSource = TRUE, ...)
{
noOpt <- missing(digits) && missing(quote) && missing(na.print) &&
missing(print.gap) && missing(right) && missing(max) &&
missing(useSource) && length(list(...)) == 0
.Internal(print.default(x, digits, quote, na.print, print.gap,
right, max, useSource, noOpt))
}
<environment: namespace:base>
> methods(class="isaac")
[1] print.isaac
> methods(class="cox.zph")
[1] [.cox.zph* plot.cox.zph* print.cox.zph*
Non-visible functions are asterisked
> methods(class="glm")
[1] add1.glm* anova.glm Anova.glm*
[4] av.plot.glm* ceres.plot.glm* confidence.ellipse.glm*
[7] confint.glm* cooks.distance.glm* cr.plot.glm*
[10] deviance.glm drop1.glm* effects.glm*
[13] extractAIC.glm* family.glm* formula.glm*
[16] influence.glm* intervals.glm leverage.plot.glm*
[19] linear.hypothesis.glm* logLik.glm* model.frame.glm
[22] ncv.test.glm* outlier.test.glm* predict.glm
[25] print.glm qq.plot.glm* residuals.glm
[28] rstandard.glm rstudent.glm summary.glm
[31] Var.glm* Varcov.glm vcov.glm*
[34] weights.glm*
Non-visible functions are asterisked
> add1.glm
Error: objeto "add1.glm" no encontrado
> ?add1.glm
> getAnywhere(add1.glm) # i surt tota la funcio add1.glm
#### per treure espais en blanc
ibespss$poblaci_<-with(ibespss, sub(" +$","", poblaci_))
albaspss<-subset2(ibespss, "poblaci_=='ALBACETE'")
### per truere el punt al final de un carcater
alldat$tropo_peak<- with(alldat, sub("\\.+$", "", tropo_peak, fixed = FALSE ))
## per saber els valors que no es poden convertir a numeric
x<-c("2.1","2,2",NA)
x<-trim(x)
x<-ifelse(x=='',NA,x)
ww1<-which(is.na(x))
x2<-as.double(x)
ww2<-which(is.na(x2))
ww<-ww2[!ww2%in%ww1]
x[ww]
### per calcular or, rr, etc.
library(epicalc)
help(package="epicalc")
example(cs)
## la data del sistema
Sys.Date()
## attributs
cbind(lapply(euphoric3, function(x) attr(x,"vari.label")))
cbind(unlist(lapply(dexa, function(x) attr(x, "vari.label"))))
## per treure els espais en blanc
dades$xxx <- ifelse(sub(" +$", "", dades$comentario)=="tercera generaci?n",1,0)
## taules varies
install.packages("Epi")
install.packages("catspec")
install.packages("gmodels")
install.packages("epitools")
library("Epi")
library("catspec")
library("gmodels")
library("epitools")
example(stat.table)
example(ctab)
example(CrossTable)
example(riskratio)
### per treure els missing
macrowom<-macrowom[apply(t(apply(macrowom,1,is.na)), 1, sum) == 0, ]
### per dibuixar un grafic de barres
par(las=1, mar=c(5, 6, 4, 2), xpd=FALSE)
mehta<-48.2
lohta<-47.4
uphta<-49.1
hta<-c(42.8, 46.6, 48.3, 51.2, 50.2, 43.7, 51.2, 52.6, 43.1)
centers<-c("REGICOR", "HERMEX", "TALAVERA", "CDC", "RIVANA", "RECCyL", "CORSAIB", "DINO", "DRECA")
htac<-hta-mehta
color<-ifelse(hta<lohta, "green", ifelse(hta>uphta, "red", "blue"))
xxx<-barplot(htac,horiz=TRUE,axes=F,col=color, xlim= c(-6,5),
main="Age-standardized Hypertension prevalence: MEN")
axis(1,pretty(range(htac)),(pretty(range(htac))+ mehta))
axis(2,xxx, centers)
abline(v=c(lohta, mehta, uphta)-mehta, lty=c(2,1,2))
par(xpd=NA)
legend(mean(par()$usr[1:2]),par()$usr[3]-diff(par()$usr[3:4])*0.1,c("Overall","95%CI"),xjust=0.5,lty=1:2,bty="n")
## per veure el que fa un package
help(package="survival")
OR<-c(1.13,3.75,4.32,5.54,5.01)
selogOR<-c(0.2,0.3,0.25,0.12,0.2)
meta.DSL(OR,selogOR)
meta.DSL(OR[-1],selogOR[-1])
### per buscar un tros de sintaxis en tots el tinn-R d'una carpeta
carpeta<-"U:/Estudis/Colaboracions/2009 DARIOS Prevalencia FRCV Espa?a siglo XXI/Analisis"
arxius<-list.files(carpeta, pattern=".r$", full.names=T, recursive=T)
for (i in 1:length(arxius) ){
xxx<-scan(file=arxius[i], what="character", sep="\n")
print(grep("Comparaci?n de resultados",xxx))
}
## per calcular mitjanes per fila
offv01$dbp<-with(offv01,apply(cbind(a56,a58),1, mean, na.rm=TRUE))
## Per fer taules amb totals
xxx<-as.matrix(with( fusio, table2(flow2, font, margin=0)))
cbind(xxx,apply(with(fusio, table (flow2, font)), 1, function(x) sum(x)))
## per definir l'amplada de la consola
options(width = 60)
seq(1, 100, 1)
options(width = 32)
seq(1, 100, 1)
## compare groups
library(foreign)
library(compareGroups)
setwd("C:/cursR/data")
datos<-read.spss("partoFin.sav",
use.value.labels = FALSE,
to.data.frame = TRUE)
datos$naci_ca<-factor(datos$naci_ca,labels= names(attr(datos$naci_ca,"value.labels")))
datos$sexo<-factor(datos$sexo,labels= names(attr(datos$sexo,"value.labels")))
res <- compareGroups(tx ~ edad + peso + sexo + naci_ca, data = datos,
selec = c(peso = "datos$edad < 40"),
method = c(peso=2))
restab <- createTable(res, show.n = TRUE,
hide = c(sexo =1),
digits = c(edad=3))
export2latex(restab, file = "C:/xxx/table1", dec = ",")
export2csv(restab, file = "C:/xxx/table1", sep = ";")
# un altres exemple
# primer fer un scan . . . .
dat<-fusio[, vari]
dat<-prepare(dat)
res <- compareGroups(font ~ ., data = dat, subset = fusio$st3c==0 | fusio$st3c==1)
restab <- createTable(res, show.n = TRUE, hide = c(sexo= 1,ant_dm= 1,ant_tab= 1,ant_col= 1,ant_hta= 1,ant_iam=1 ,ant_rev= 1,onda_q= 1,loc_ar= 1,ucc_exit= 1,mort28= 1,mort6= 1,hemodin= 1))
export2csv(restab, file = "C:/xxx/xxx", sep = ";")
shell.exec("c:/xxx/xxx.csv")
## update
res<-update(res, font ~ . -hemodin, subset = fusio$st3c==0)
restab <- createTable(res, show.n = TRUE, hide = c(sexo= 1,ant_dm= 1,ant_tab= 1,ant_col= 1,ant_hta= 1,ant_iam=1 ,ant_rev= 1,onda_q= 1,loc_ar= 1,ucc_exit= 1,mort28= 1,mort6= 1,hemodin= 1), show.p.trend=TRUE)
# restab <- update(restab, show.all = FALSE)
export2csv(restab, file = "C:/xxx/xxx", sep = ";")
shell.exec("c:/xxx/xxx.csv")
## per saber les etiquetes de les variables
varnames<-NULL
for (i in 1:ncol(fusio) ) {
varnames<-rbind(varnames, trim(paste(paste(i, ") ", names(fusio[i]), sep=""), attributes(fusio[ , i])$vari.label, sep=": ")))
}
## per esborrar packages
remove.packages("compareGroups")
## per instal?lar un tar.gz
install.packages("C:/CursR/menorca/packages/tar.gz/compareGroups_0.1-5.tar.gz", repos=NULL, type="source")
install.packages("/xxx/compareGroups_2.0.3.tar.gz", repos=NULL, type="source")
install.packages("SNPassoc")
install.packages("XLConnect")
install.packages("shiny")
install.packages("HardyWeinberg")
install.packages("/home/jvila/Dropbox/CompareGroups/package/compareGroups_without_odfWeave/compareGroups_2.1.tar.gz",
repos=NULL, type="source")
## ajuda sobre un package
help(package=oce)
## exemple de if else
alpha <- 0
if (alpha > 1) {x <- 88} else {x <- -88}
x
## per fer comparacions m?ltiples
p.adjust(c(0.004, 0.0003, 0.005), "BH")
## exemple de factors
gender<-rbinom(10,1,0.5)
gender<-c(gender,9)
table(gender)
gender<-factor(gender,levels=c(0,1),labels=c('home','dona'))
table(gender)
## per saber les dades que hi ha al R
data()
########### spss.get2 ############
source(file.path(RutinesLocals,"spss_varlist.r"))
source(file.path(RutinesLocals,"prepare.r"))
source(file.path(RutinesLocals,"arregla.formats.r"))
library(Hmisc)
xfile<-"./dat/cancer_incidente_npnm_enviado.sav"
dict<-spss_varlist(xfile)
xdates<-dict[grep("^DATE",dict[,2]),"longname"]
dat<-spss.get(xfile,allow="_",use.value.labels=FALSE,datevars=xdates)
dat[,xdates]<-arregla.formats(dat[,xdates])
for (i in 1:ncol(dat)) attr(dat[,i],"vari.label")<-label(dat[,i])
##################################
## per guardar els factors com etiquetes
x1$abo<-as.factor(x1$abo)
ll<-levels(x1$abo)
x1$abo<-as.integer(x1$abo)
attr(x1$abo,"value.labels")<-structure(1:length(ll),names=ll)
attr(x1$abo,"vari.label")<-"ABO"
## per substituir els valor d'edat < 40
sapply(age, function(x) if (x<40) runif(1,40,45) else x)
## per calcular el temps que triga a fer-se una cosa
system.time({ qnorm(0.05/2)})
## per posar numero d'ordre
xalib$count<-NA
xalib$count[1]<-1
xnum<-1
for (i in 1:(nrow(xalib)-1)){
x1<-xalib$id[i]
xnum<-ifelse(xalib$id[i+1]==x1, xnum+1, 1)
xalib$count[i+1]<-xnum
}
# per buscar una funcio, especialment les que estan amagades (son les que tenen un asterix)
getAnywhere(mean)
getAnywhere(print.coxph.penal)
# per buscar en els packages intal?lats
help.search("ancova")
# per buscar a la p?gina web del CRAN
RSiteSearch("ancova")
# utilitzant el paquet SOS
library(sos)
findFn("ancova")
## regular expressions
######################
## busca exactament "36." al comen??ament
x <- c("736.0", "36.", "366.1", "366.")
x[grep("^36\\.", x)]
# busca la primera vegada (^) que surt un numero [0-9] i el substitueix per xxx
sub("^[0-9]","xxx","0124hola")
[1] "xxx124hola"
# busca la primera vegada que surt una sequencia de numeros [0-9]+ i aquesta sequencia la substitueix per xxx
sub("[0-9]+","xxx","0124hola123")
[1] "xxxhola123"
# busca qualsevol (gsub) numero [0-9] i el substitueix per xxx
gsub("[0-9]","xxx","0124hola04")
[1] "xxxxxxxxxxxxholaxxxxxx"
# busca qualsevol (gsub) sequencia de numeros [0-9]+ i la substitueix per xxx
> gsub("[0-9]+","xxx","0124hola04")
[1] "xxxholaxxx"
# busca la primera (sub) sequencia de numeros [0-9]+ i la substitueix per xxx
sub("[0-9]+","xxx","aaaaa0124hola04")
[1] "aaaaaxxxhola04"
# busca la primera (sub) sequencia de numeros [0-9]+ que esta a comen??ament, pero no n'hi ha cap
sub("^[0-9]+","xxx","aaaaa0124hola04")
[1] "aaaaa0124hola04"
sub(" $","","apoefhawpehf ")
[1] "apoefhawpehf"
sub(" $","","apoefhawpehf ")
[1] "apoefhawpehf "
sub("[ ]+$","","apoefhawpehf ")
[1] "apoefhawpehf"
> sub("[ ]+","","apo efhawpe hf")
[1] "apoefhawpe hf"
> sub("[ ]","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("[ ]","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("[ ]","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("[ ]2","","apo efhawpe hf")
[1] "apo efhawpe hf"
> sub("^[ ]+",""," wapoeufhapuwef")
[1] "wapoeufhapuwef"
> sub("^[ ]+",""," wapoeufhapu wef")
[1] "wapoeufhapu wef"
> gsub(" ",""," wapoeufhapu wef ")
[1] "wapoeufhapuwef"
gsub("^[0-9]+","","10987561023asdof?341525iwhapfohe")
[1] "asdof?341525iwhapfohe"
> sub("^[0-9]+","","10987561023asdof?341525iwhapfohe")
[1] "asdof?341525iwhapfohe"
> gsub("[0-9]+","","10987561023asdof?341525iwhapfohe")
[1] "asdof?iwhapfohe"
> gsub("[0-9]","","10987561023asdof?341525iwhapfohe")
[1] "asdof?iwhapfohe"
> grep("[0-9]",c("asd?ofih","askoufh21938"))
[1] 2
> grep("^[0-9]",c("asd?ofih","askoufh21938"))
integer(0)
> grep("[0-9]$",c("asd?ofih","askoufh21938"))
[1] 2
> grep("[0-9]",c("asd?ofih","askoufh21938"))
[1] 2
> grep("[0-9]",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2 3
> grep(".[0-9]+.",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2 3
> grep(".[0-9].",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2 3
> grep(".[0-9].$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep(".[0-9]+.$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep(".[0-9]$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep(".[0-9].$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
[1] 2
> grep("^.[0-9].$",c("asd?ofih","askoufh21938","a?sdlfh039465aposdf"))
integer(0)
> grep(".",c("apofh","apesoh.apoeh"))
[1] 1 2
> grep("\\.",c("apofh","apesoh.apoeh"))
[1] 2
> sub("\\.","[","apesoh.apoeh")
[1] "apesoh[apoeh"
> grep("[",c("apofh","apesoh[apoeh"))
Error in grep("[", c("apofh", "apesoh[apoeh")) :
invalid regular expression '[', reason 'Missing ']''
> grep("\\[",c("apofh","apesoh[apoeh"))
[1] 2
#### apply i sapply
####################
N<-100000
donant<-as.data.frame(1:N)
names(donant)<-"parti"
donant$aliq<-rpois(N,3)
## repeteix una fila varies vegades
system.time({
x<-NULL
for (i in 1:nrow(donant)){
x <- c(x, rep(donant$parti[i],donant$aliq[i]))
}
})
system.time(
x2 <- sapply(1:nrow(donant), function(i) rep(donant$parti[i],donant$aliq[i]))
)
x2<-unlist(x2)
## enumera les vegades que surt un individu
x2<-sort(x2)
tt<-table(x2)
system.time(
ordre <- sapply(1:length(tt), function(i) 1:tt[i])
)
ordre<-unlist(ordre)
cbind(x2,ordre)[1:100,]
## per indicar quin es el registre ultim
id <- c(rep(1,4), rep(2, 2), rep(3, 5))
sequ <- c(1,2,3,4,1,2,1,2,3,4,5)
dat <- data.frame(id,sequ)
tt<-table(dat$id)
dat2<-data.frame(id=names(tt),freq=as.integer(tt))
dat<-merge(dat,dat2,by="id",all.x=TRUE)
dat$ultimo<-as.numeric(with(dat,freq==sequ))
##################################################################
########### selccionar ultima entrada ########################
##################################################################
## partim d'una base de dades: els individus = id_unic; estan enurants com "id"
## vull quedar-me l'ultim "id" de cada "id_unic"
id_unic <- c(rep("AAA", 3), rep("BBB", 4), rep("CCC",1), rep("DDD", 2))
id <- sample(seq(1:length(id_unic)))
xdat <- as.data.frame(cbind(id, id_unic))
xdat$id <- as.numeric(as.character(xdat$id))
xdat$id_unic <- as.character(xdat$id_unic)
## la poso per ordre
xdat <- xdat[order(xdat$id_unic, xdat$id), ]
## li poso la variable "orden"
kk <- table(sort(xdat$id_unic))
orden <- sapply(1:length(kk), function(i) 1:kk[i])
xdat$orden <- unlist(orden)
## calculo les vegades que surt cada id_unic
tt <- table(xdat$id_unic)
dat2<-data.frame(id_unic=names(tt),freq=as.integer(tt))
## afageixo la informacio de les vegades que surt cada id_unic
xdat<-merge(xdat,dat2,by="id_unic",all.x=TRUE)
## els que orden==freq es el ultim
xdat$ultimo<-as.numeric(with(xdat,freq==orden))
##################################################################
##################################################################
##################################################################
## per posar una data a cadena
(fecha <- chron("15-05-2016", format="d-m-Y", out.format=c(dates="day-mon-year")))
class(fecha)
(fecha2 <- format(as.Date(fecha), "%d-%m-%Y"))
class(fecha2)
## per saber quin es converteix a missing a transformar a numero
x<-c("2.1","2,2",NA)
x<-trim(x)
x<-ifelse(x=='',NA,x)
ww1<-which(is.na(x))
x2<-as.double(x)
ww2<-which(is.na(x2))
ww<-ww2[!ww2%in%ww1]
x[ww]
## per guardar amb cadena les estiquetes de les variables
xxx<-NULL
x2<-wik$flow
for (i in 1:length(x2)){
x1<-names(attr(wik$flow,"value.labels")[attr(wik$flow,"value.labels")==x2[i]])
xxx<-rbind(xxx,x1)
}
wik$flow2<-as.vector(xxx)
## per cambiar l'rodre dels levels d'un factor
dat$bmicat<-factor(dat$bmicat, c("<24", "[24-30)", "30+"))
## la data del systema
Sys.Date()
## per llegir dades d'un servidor
setwd("/run/user/jvila/gvfs/sftp:host=134.0.8.34,user=ars/home/ars/ESTUDIS/ALTRES/jvila/mgil/partners/")
dat<-read.csv("partners.csv", sep=";", header = TRUE, allowEscapes=FALSE)
## per llegir MySQL
install.packages("DBI")
install.packages("RMySQL",lib= "/home/jvila/R/i486-pc-linux-gnu-library/3.1/lib")
## he anat a UBUNTU Software centre i he installat
## libmysqlclient-dev
## he instal.lat el package linux (previament ho'havia baixat el tar.gz
## R CMD INSTALL /home/jvila/Downloads/RMySQL_0.9-3.tar.gz
library(RMySQL)
con2 <- dbConnect(MySQL(), user="web", password="ieT6io9z", dbname="web", host="134.0.8.34")
con2 <- dbConnect(MySQL(), user="userdbcr", password="7437fgs78", dbname="iCRDvas", host="crd.ivascular.es")
dbGetQuery(con2, "SET NAMES utf8")
con2 <- dbConnect(MySQL(), user="root", password="xxx127",
dbname="Modul1", host="localhost")
dbListTables(con2)
dbListFields(con2, "congelador")
mydata <- dbReadTable(con2, "congelador")
dbWriteTable(con2, "mmar", subtr9500)
dbDisconnect(con2)
## per trobar un caracter en una cadena
regexpr("a", "bcvgdhdbbfassss")[[1]]
##
install.packages("png",lib= "/home/jvila/R/i486-pc-linux-gnu-library/3.1/lib")
library(png)
## per instalar un tar.gz
install.packages("C:/programs/Dropbox/JVila/compareGroups_2.1.tar.gz", repos= NULL,
type= "source")
cGroupsWUI()
## per retardar l'execucio
?Sys.sleep
## per treure els warning
options(warn=-1)
## per carregar una base de dades de la web
setwd("/run/user/jvila/gvfs/sftp:host=134.0.8.34,user=ars/home/ars/ESTUDIS/L02_MUTUA/Analisi/screening/")
load("./dat/2013-11-13.RData")
## per ordenar un factor
value.lab<-c("<35"=1, "35-44"=2, "45-54"=3, "55+"=4)
dat$agegr<-factor(dat$agegr,levels=sort(value.lab),labels=names(sort(value.lab)))
## per buscar una cadena entre fitxers
ff<-list.files("U:/Estudis/Tancats/A37_GEDAPS",pattern=".r$",recursive=TRUE,full=TRUE)
for (i in ff){
temp<-scan(what="character",file=i,sep="\n",quiet=TRUE)
if(length(ww<-grep(">8",temp))>0){
cat("---------",i,"-----------\n")
print(temp[ww])
cat("\n")
}
}
## per saber la versi??
sessionInfo()
## per fer vanilla
/usr/bin/R --vanilla --slave --args "Hospital de la Monta??a", "67676767678" < /home/ars/ESTUDIS/L02_MUTUA/Analisi/Cardiovascular/empresa/Maker.R
/usr/bin/R --vanilla --slave < /home/ars/ESTUDIS/L01_DEMCOM/Analisi/queries/maker.R
## per codis ascci i utf8
library(oce)
integerToAscii(126L)
paste(rep("??", 10), collapse="")
paste(rep(integerToAscii(175L), 10), collapse="")
cat(integerToAscii(194L), integerToAscii(175L), sep="" )
today<-chron(as.character(Sys.Date()), format="Y-m-d", out.format="d-mon-Y")
sessionInfo()
## Per a calcular la memoria
library(memuse)
howbig(10000, 500)
## retraasar un segons l'execucio
?Sys.sleep()
################################################################################
############ EXEMPLE Inserir dades a MySQL ##################################
################################################################################
## insert bd sql
library(RMySQL)
library(chron)
# db connect
con<- dbConnect(MySQL(), user="web", password="ieT6io9z",dbname="web", host="localhost")
taula<-"salut_laboral_tabac"
ndatasql<-dbListFields(con,taula)
dat<-smk
ndatar<-names(dat)
xxx<-ndatar[ndatar%in%ndatasql]
yyy<-ndatasql[ndatasql%nin%ndatar]
dat$idu<-""
dat$time<-format(Sys.time(), "%Y-%m-%d %H:%M:%S")
# ordena y elige las variables.
varilab<-scan(what="character", sep="\n")
idu
id
cigar
fuma
inifum
puros
pipas
minutes
dificul
whatcigar
smkmorning
smkill
hasta1
morning
cigar2
ncigar
fager
fagercat
situ
time
dat<-dat[, varilab]
# insert taula
cadena<-paste("INSERT INTO ", taula," VALUES('",paste(dat[1,],collapse=","),"')",sep ="")
cadena<-gsub(",","','",cadena)
#dbGetQuery(con,cadena)
## llegir, dins de un path, la part del nom del fitxer
indiv<-basename(xfile)
## la part inicial i la part final
indiv<-sub("^indiv_","",indiv)
indiv<-sub("\\.csv$","",indiv)
#############################################################################
### afegir casos a un ACCESS
#############################################################################
setwd("c:/xxx")
a <- c(1,2,3,4,5)
b <- c("a", "b", "c", "d", "e")
dat <- as.data.frame(cbind(a, b))
names(dat) <- c("numero", "caracter")
dat$numero <- as.numeric(dat$numero)
dat$caracter <- as.character(dat$caracter)
dat2 <- dat
dat2$numero <- dat2$numero*10
export.ACCESS (dat, "xxx.mdb", table.name= "mitabla")
con <- odbcConnectAccess("xxx.mdb")
sqlSave(con, dat=dat2, tablename = "mitabla", append = TRUE,
rownames = FALSE, safer = FALSE)
###
## per netajar la consola
cat("\014")
###############################################################################
## per saber el que pesen els objectes
.ls.objects <- function (pos = 1, pattern, order.by = "Size", decreasing=TRUE, head = TRUE, n = 10) {
# based on postings by Petr Pikal and David Hinds to the r-help list in 2004
# modified by: Dirk Eddelbuettel (http://stackoverflow.com/questions/1358003/tricks-to-manage-the-available-memory-in-an-r-session)
# I then gave it a few tweaks (show size as megabytes and use defaults that I like)
# a data frame of the objects and their associated storage needs.
napply <- function(names, fn) sapply(names, function(x)
fn(get(x, pos = pos)))
names <- ls(pos = pos, pattern = pattern)
obj.class <- napply(names, function(x) as.character(class(x))[1])
obj.mode <- napply(names, mode)
obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)
obj.size <- napply(names, object.size) / 10^6 # megabytes
obj.dim <- t(napply(names, function(x)
as.numeric(dim(x))[1:2]))
vec <- is.na(obj.dim)[, 1] & (obj.type != "function")
obj.dim[vec, 1] <- napply(names, length)[vec]
out <- data.frame(obj.type, obj.size, obj.dim)
names(out) <- c("Type", "Size", "Rows", "Columns")
out <- out[order(out[[order.by]], decreasing=decreasing), ]
if (head)
out <- head(out, n)
out
}
.ls.objects()
################################################################################
## per canviar el codi a UTF-8
Encoding(attr(dat$pesfuer, "vari.label")) <- "latin1"
attr(dat$pesfuer, "vari.label") <- iconv(attr(dat$pesfuer, "vari.label"), "latin1", "UTF-8")
Encoding(names(attr(dat$pesfuer, "value.labels"))) <- "latin1"
names(attr(dat$pesfuer, "value.labels"))<- iconv(names(attr(dat$pesfuer, "value.labels")), "latin1", "UTF-8")
####packages
install.packages("shiny")
install.packages("compareGroups")
install.packages("gam")
install.packages("png")
install.packages("epitools")
install.packages("pROC")
install.packages("psych")
install.packages("plotrix")
install.packages("knitr")
install.packages("chron")
install.packages("rgdal")
install.packages("pgirmess")
install.packages("stringr")
install.packages("MASS")
install.packages("nnet")
install.packages("car")
install.packages("RODBC")
install.packages("survival")
install.packages("lattice")
install.packages("cluster")
install.packages("Hmisc")
install.packages("xtable")
install.packages("gdata")
install.packages("oce")
install.packages("tcltk2")
install.packages("odfWeave")
install.packages("Rcmdr")
install.packages("extrafont")
install.packages("xlsx")
## per saber la versi?? d'un paquest
packageDescription("shiny")
## fer una taula d'un table2
<<echo=FALSE, results='hide', warning=FALSE, message=FALSE>>=
xdat <- prepare(dat[, c("id", "idcentro")])
x <- table2(xdat$idcentro)
yy <- cbind(unlist(attr(x, "dimnames")[1]), x[1:length(x)])
xtable(yy)
@
\begin{table}[H]
\centering
\caption{Recruited participants by center}
\
\\
\begin{tabular}{lr}
\hline
&\\
& N (\%)\\
&\\
<<echo=FALSE, results='asis', warning=FALSE, message=FALSE>>=
print.xtable(xtable(yy), only.contents=TRUE, include.rownames = FALSE,
include.colnames=FALSE, hline.after=FALSE)
@
\hline
\end{tabular}
\end{table}
## per que no surti un output
{sink("/dev/null"); x <- table2(dat$avulsio, margin=0); sink()}
## per treballar contra el servidor
setwd("/run/user/1000/gvfs/sftp:host=134.0.8.34,user=ars/home/ars")
list.files()
################################################################################
################################################################################
## per posar el s??mbol major o igual
plot(0, 0)
title(main=
eval(parse(text='expression(phantom("")<=phantom(""))'))
)
aaa <- "xxx"
plot(0, 0)
title(main=
eval(substitute(expression( a + phantom("")<=phantom("")), list(a = aaa)))
)
aaa <- "xxx"
bbb <- "yyy"
plot(0, 0)
title(main=
eval(substitute(expression(paste(a, phantom("")<=phantom(""), b)),
list(a = aaa, b= bbb)))
)
bbb <- "750 Kcal/sem."
plot(0, 0)
title(main=
eval(substitute(expression(paste(phantom("")>=phantom(""), b)),
list(b= bbb)))
)
bbb <- "750 Kcal/sem."
ccc <- " = 80%"
plot(0, 0)
text(0,-0.2, eval(substitute(expression(paste(phantom("")>=phantom(""), b, c)),
list(b= bbb, c=ccc)))
)
################################################################################
################################################################################
## per canviar el code
Encoding(dat$puesto) <- "latin1"
dat$puesto <- iconv(dat$puesto, "latin1", "UTF-8")
################################################################################
################################################################################
## per modificar celles de un EXCEL
rm(list=ls())
setwd("/DATA/scratch_isaac/EUROTRACS")
options(java.parameters = "-Xmx4g")
library(XLConnect)
library(xlsx)
file.remove("suma2.xlsx")
file.copy("suma.xlsx", "suma2.xlsx",overwrite=TRUE)
# read input and ouput
wb <- XLConnect::loadWorkbook("suma2.xlsx")
XLConnect::readWorksheet(wb, sheet = "Hoja1",header=FALSE,startRow=1,startCol=1,endRow=8,endCol=4)
#xlsx::read.xlsx(file="suma2.xlsx", sheetName="Hoja1", rowIndex=1:3,colIndex=1,header=FALSE)
# modify cells
writeNamedRegionToFile("suma2.xlsx",2, name="yyy",formula = "Hoja1!$A$1",header=FALSE,rownames=NULL)
wb <- XLConnect::loadWorkbook("suma2.xlsx")
XLConnect::setForceFormulaRecalculation(wb, sheet = "Hoja1", TRUE)
XLConnect::readWorksheet(wb, sheet = "Hoja1",header=FALSE,startRow=1,startCol=1,endRow=4,endCol=1)
## per augmentar la memoria
options(java.parameters = "-Xmx4000m")
## per llegir xlsx
installXLSXsupport()
## per llegir dates des de EXCEL que entren numeros
chron(as.numeric(as.character(dat$fecha))-365.5*70+16, out.format = "d/m/yy")
## Per fer un bucle (la funcio Recall())
mydata <- function() {
n1<-round(runif(1, 180, 190), 0)
mcguill1<-SimCon(n1,29.8,11.9,10,100,0)
n0<-round(runif(1, 180, 190), 0)
mcguill0<-SimCon(n0,29.8,11.9,10,100,0)
group<-c(rep(1,n1), rep(0,n0))
dat<-as.data.frame(cbind(c(mcguill1, mcguill0), group))
names(dat)<-c("mcguill", "group")
m1<-format(signif(mean(subset(dat, group==1)$mcguill), digits=3), scientific=FALSE)
m0<-format(signif(mean(subset(dat, group==0)$mcguill), digits=3), scientific=FALSE)
tval<-signif(with(dat, t.test(mcguill~group, var.equal=TRUE))$statistic, 4)
pval<-with(dat, t.test(mcguill~group, var.equal=TRUE))$p.value
if (pval > 0.2) return(dat)
Recall()
}
dat <- mydata()
|
## ---------------------------
##
## Purpose of script:
##
## Author: Ari-Pekka Jokinen
##
## Date Created: 2020-10-26
##
## Copyright (c) Ari-Pekka Jokinen, 2020
## Email: ari-pekka.jokinen@helsinki.fi
##
## ---------------------------
##
## Notes:
##
##
## ---------------------------
#
#
#
#
#
library(raster)
library(rgdal)
# set work dir
setwd("C:/Users/Ap/Documents/ProtectedAreas/Bhutan/")
# read raster file
r <- raster("Hansen_forestcover2018_above30.tif")
# set time limit
setTimeLimit(1200)
# calculate distance grid
distgrid <- gridDistance(r, origin=1)
# write output
writeRaster(distgrid, filename="dist_to_forest_above30_2018.tif", format="GTiff", datatype="FLT4S")
| /R/distGrid.R | no_license | aripekkj/protected_areas | R | false | false | 752 | r | ## ---------------------------
##
## Purpose of script:
##
## Author: Ari-Pekka Jokinen
##
## Date Created: 2020-10-26
##
## Copyright (c) Ari-Pekka Jokinen, 2020
## Email: ari-pekka.jokinen@helsinki.fi
##
## ---------------------------
##
## Notes:
##
##
## ---------------------------
#
#
#
#
#
library(raster)
library(rgdal)
# set work dir
setwd("C:/Users/Ap/Documents/ProtectedAreas/Bhutan/")
# read raster file
r <- raster("Hansen_forestcover2018_above30.tif")
# set time limit
setTimeLimit(1200)
# calculate distance grid
distgrid <- gridDistance(r, origin=1)
# write output
writeRaster(distgrid, filename="dist_to_forest_above30_2018.tif", format="GTiff", datatype="FLT4S")
|
# -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
library(ggplot2)
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "OHdonor_AHdist_morse_fit",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
extract_transform_features <- function(sample_sources){
sele <-"
SELECT
geom.AHdist,
geom.cosBAH,
geom.cosAHD,
geom.chi
FROM
hbond_geom_coords AS geom,
hbonds AS hb,
hbond_sites AS don, hbond_sites AS acc
WHERE
geom.struct_id = hb.struct_id AND geom.hbond_id = hb.hbond_id AND
don.struct_id = hb.struct_id AND don.site_id = hb.don_id AND
acc.struct_id = hb.struct_id AND acc.site_id = hb.acc_id AND
acc.HBChemType != 'hbacc_PBA' AND
(don.HBChemType = 'hbdon_AHX' OR don.HBChemType = 'hbdon_HXL');"
query_sample_sources(sample_sources, sele)
}
all_geom <- extract_transform_features(sample_sources)
plot_id <- "OHdonor_AHdist_all_acceptor_types"
dens <- estimate_density_1d(
data = all_geom,
ids = c("sample_source"),
variable = "AHdist",
weight_fun = radial_3d_normalization)
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=x, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds A-H Distance\n(normalized for equal weight per unit distance)") +
scale_y_continuous(
"Feature Density",
limits=c(0,2.9), breaks=0:2) +
scale_x_continuous(
expression(paste('Acceptor -- Hydrogen Distance (', ring(A), ')')),
limits=c(1.4,2.7), breaks=c(1.6, 1.9, 2.2, 2.6))
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosBAH_all_acceptor_types"
dens <- estimate_density_1d(
data = all_geom,
ids = c("sample_source"),
variable = "cosBAH")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*180/pi, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle \n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Base -- Acceptor -- Hydrogen (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosAHD_all_acceptor_types"
dens <- estimate_density_1d(
data = all_geom,
ids = c("sample_source"),
variable = "cosAHD")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*180/pi, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle \n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor -- Hydrogen -- Donor (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_chi_all_acceptor_types"
dens <- estimate_density_1d_wrap(
data = all_geom,
ids = c("sample_source"),
variable = "chi")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle \n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor Base -- Acceptor Torsion (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
sidechain_geom <- all_geom[all_geom$acc_chem_type != "hbacc_PBA",]
plot_id <- "OHdonor_AHdist_sidechain_acceptor_types"
dens <- estimate_density_1d(
data = sidechain_geom,
ids = c("sample_source"),
variable = "AHdist",
weight_fun = radial_3d_normalization)
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=x, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds A-H Distance to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
scale_y_continuous("Feature Density", limits=c(0,2.9), breaks=0:2) +
scale_x_continuous(
expression(paste('Acceptor -- Proton Distance (', ring(A), ')')),
limits=c(1.4,2.7), breaks=c(1.6, 1.9, 2.2, 2.6))
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosBAH_sidechain_acceptor_types"
dens <- estimate_density_1d(
data = sidechain_geom,
ids = c("sample_source"),
variable = "cosBAH")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Base -- Acceptor -- Hydrogen (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosAHD_sidechain_acceptor_types"
dens <- estimate_density_1d(
data = sidechain_geom,
ids = c("sample_source"),
variable = "cosAHD")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds AHD Angle to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor -- Hydrogen -- Donor (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_chi_sidechain_acceptor_types"
dens <- estimate_density_1d_logspline(
data = sidechain_geom,
ids = c("sample_source"),
variable = "chi")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds chi Torsion Angle to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor Base -- Acceptor Torsion (degrees)')),
y="log(FeatureDensity + 1)")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
})) # end FeaturesAnalysis
| /inst/scripts/analysis/plots/hbonds/hydroxyl_sites/OHdonor_AHdist_morse_fit.R | no_license | momeara/RosettaFeatures | R | false | false | 6,847 | r | # -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
library(ggplot2)
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "OHdonor_AHdist_morse_fit",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
extract_transform_features <- function(sample_sources){
sele <-"
SELECT
geom.AHdist,
geom.cosBAH,
geom.cosAHD,
geom.chi
FROM
hbond_geom_coords AS geom,
hbonds AS hb,
hbond_sites AS don, hbond_sites AS acc
WHERE
geom.struct_id = hb.struct_id AND geom.hbond_id = hb.hbond_id AND
don.struct_id = hb.struct_id AND don.site_id = hb.don_id AND
acc.struct_id = hb.struct_id AND acc.site_id = hb.acc_id AND
acc.HBChemType != 'hbacc_PBA' AND
(don.HBChemType = 'hbdon_AHX' OR don.HBChemType = 'hbdon_HXL');"
query_sample_sources(sample_sources, sele)
}
all_geom <- extract_transform_features(sample_sources)
plot_id <- "OHdonor_AHdist_all_acceptor_types"
dens <- estimate_density_1d(
data = all_geom,
ids = c("sample_source"),
variable = "AHdist",
weight_fun = radial_3d_normalization)
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=x, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds A-H Distance\n(normalized for equal weight per unit distance)") +
scale_y_continuous(
"Feature Density",
limits=c(0,2.9), breaks=0:2) +
scale_x_continuous(
expression(paste('Acceptor -- Hydrogen Distance (', ring(A), ')')),
limits=c(1.4,2.7), breaks=c(1.6, 1.9, 2.2, 2.6))
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosBAH_all_acceptor_types"
dens <- estimate_density_1d(
data = all_geom,
ids = c("sample_source"),
variable = "cosBAH")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*180/pi, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle \n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Base -- Acceptor -- Hydrogen (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosAHD_all_acceptor_types"
dens <- estimate_density_1d(
data = all_geom,
ids = c("sample_source"),
variable = "cosAHD")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*180/pi, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle \n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor -- Hydrogen -- Donor (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_chi_all_acceptor_types"
dens <- estimate_density_1d_wrap(
data = all_geom,
ids = c("sample_source"),
variable = "chi")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle \n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor Base -- Acceptor Torsion (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
sidechain_geom <- all_geom[all_geom$acc_chem_type != "hbacc_PBA",]
plot_id <- "OHdonor_AHdist_sidechain_acceptor_types"
dens <- estimate_density_1d(
data = sidechain_geom,
ids = c("sample_source"),
variable = "AHdist",
weight_fun = radial_3d_normalization)
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=x, y=y, colour=sample_source)) +
geom_indicator(aes(indicator=counts, colour=sample_source, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds A-H Distance to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
scale_y_continuous("Feature Density", limits=c(0,2.9), breaks=0:2) +
scale_x_continuous(
expression(paste('Acceptor -- Proton Distance (', ring(A), ')')),
limits=c(1.4,2.7), breaks=c(1.6, 1.9, 2.2, 2.6))
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosBAH_sidechain_acceptor_types"
dens <- estimate_density_1d(
data = sidechain_geom,
ids = c("sample_source"),
variable = "cosBAH")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds BAH Angle to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Base -- Acceptor -- Hydrogen (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_cosAHD_sidechain_acceptor_types"
dens <- estimate_density_1d(
data = sidechain_geom,
ids = c("sample_source"),
variable = "cosAHD")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds AHD Angle to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor -- Hydrogen -- Donor (degrees)')),
y="Feature Density")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
plot_id <- "OHdonor_chi_sidechain_acceptor_types"
dens <- estimate_density_1d_logspline(
data = sidechain_geom,
ids = c("sample_source"),
variable = "chi")
p <- ggplot(data=dens) + theme_bw() +
geom_line(aes(x=acos(x)*360/pi, y=y, colour=sample_source)) +
geom_indicator(aes(colour=sample_source, indicator=counts, group=sample_source)) +
ggtitle("Hydroxyl Donor Hydrogen Bonds chi Torsion Angle to Sidechain Acceptors\n(normalized for equal weight per unit distance)") +
labs(x=expression(paste('Acceptor Base -- Acceptor Torsion (degrees)')),
y="log(FeatureDensity + 1)")
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
})) # end FeaturesAnalysis
|
###############################################################################
## R LECTURE 2: Regression and Testing ##
## Quantitative Analysis - 2019 Spring 3/4 ##
###############################################################################
#
#
###############################################################################
## Author ##
## 王元翰 Spencer Wang ##
###############################################################################
####################################################################
# Section 1: List and Data Frames #
####################################################################
#------
# List
#------
#Lists are a special type of vector that can contain elements of different classes.
#Lists are a very important data type in R and you should get to know them well.
n <-c(2,3,5)
s <-c("aa","bb","cc","dd","ee")
b <-c(TRUE,FALSE,TRUE,FALSE,FALSE)
x <-list(n,s,b,3) # list() is the code to create list
x
x = list(1, "a", TRUE, 1 + 4i)
x
class(x)
y = c(1, "a", TRUE, 1 + 4i)
y
class(y) # Observe that elements in y are now "characters".
x[[1]]+1
y[1]+1 # since y[1] = "1", and "1" are character to R, NOT numbers.
#----------------
# Indexing a list
#----------------
v=list(bob=c(2,3,5),john=c("aa","bb"))
v
names(v) # call out the names in the list
v$bob # "$" will extract the value under "bob"
##Example
x = rnorm(100)
y = rnorm(100)
LM = summary(lm(y~x)) # This is the summary of the simple linear regression x on y.
# In this summary we can see the coefficients, p-value, R-squared,...
LM
names(LM) # Suppose we want to extract the value of R-squared in this summary, then the code is
LM$residuals # we can extract the residuals form the model.
#-----------
# Data frams
#-----------
#Data frame is similiar as the matrix but more useful. Bascially, it's just like a sheet.
N = 10
u = rnorm(N)
x1 = rnorm(N)
x2 = rnorm(N)
y = 1 + x1 + x2 + u
mydataframe = data.frame(y,x1,x2) # data.frame() is the code to create data frame
mydataframe
names(mydataframe) # Note that the data in data frame are "named"
mydataframe["x1"] # which is equivalent as "mydataframe$x1"
mydataframe$x1
mymatrix <- cbind(y, x1, x2)
mymatrix
names(mymatrix)
mymatrix["x1"] # Note that here the data are not named, hence we can not extract the data
mymatrix$x1
## Example
id <- c(1, 2, 3, 4)
age <- c(15, 25, 26, 38)
sex <- c("M", "F", "M", "M")
pay <- c(20, 3, 67, 98)
X.dataframe <- data.frame(id, age, sex, pay)
X.dataframe
X.dataframe [3, 2]
X.dataframe$age # refer to the content in age
X.dataframe[2] # refer to the second column
edit(X.dataframe) # click the cell twice, then edit like excel
## Example: R in-built data set
data(mtcars) # load the data set "mtcars" from R
mtcars # use help(mtcars) to get help from the definition of this dataset in R
mtcars["Mazda RX4", "cyl"] # select a specific value, identical to "mtcars[1,2]"
mtcars["Mazda RX4",] # select a row
mtcars$cyl # select a column
nrow(mtcars) # number of rows
ncol(mtcars)
#------------------
# Naming Data frams
#------------------
x = data.frame(company_A = 1:4, familyfirm = c(T, T, F, F))
rownames(x) = c("1998", "1999", "2000", "2001")
x
row.names(x) # row names
names(x) # column names
#---------------------------------
# Logical indexing for Data frams
#---------------------------------
## Question: how do we select the column "drat" in mtcar with the condition that "am=0"?
mtcars$am # the column "am" in mtcar
mtcars$am==0 # indicate the location where am=0 as "True"
L1 = mtcars$am==0
mtcars[L1,] # here is the full data set with "am=0"
mtcars[L1,]$drat # job done!
# Here's a short cut, using the function "subset()"
SC = subset(mtcars,am==0)
SC$drat
# Here's the data set with both "am=0" and "gear=4"
subset(mtcars,am==0 & gear==4)
#------------------------------
# Importing and Exporting Data
#------------------------------
# Import cvs files. "C:\\Users\\User\\Desktop" is the directory while "crime.csv" is the files name.
read.csv("C:\\Users\\User\\Desktop\\crime.csv")
# Export data set "Crime" as cvs files. "C:\\Users\\User\\Desktop" is the directory while "crime.csv" is the files name.
write.table(Crime, file = "C:\\Users\\User\\Desktop\\crime.csv", sep = ",")
####################################################################
# Section 2: Regrssion and Testing #
####################################################################
#----------------
# Regression: OLS
#----------------
rm(list=ls(all=T))
## Simple linear regression with intercept
n=5000
x = rnorm(n, mean=0, sd=10)
epsilon = rnorm(n, mean=0, sd=1)
y = 2 + 5*x + epsilon
# regress by hand
X=cbind(1,x)
beta_hat = solve(t(X)%*%X)%*%t(X)%*%y
beta_hat
## Run linear regression: lm()
lm(y~x) # regress y on x (with intercept)
lm(y~x-1) # regress y on x without intercept
## Call the data stored automatically by R
lm(y~x)
summary(lm(y~x))
coef(lm(y~x))
coef(summary(lm(y~x)))
names(summary(lm(y~x))) # see the data that is stored in "summary(lm(y~x)) "
#------------
# Coef Tests
#------------
rm(list=ls(all=T))
# constructing data
n=500
x = rnorm(n, mean=0, sd=1)
epsilon = rnorm(n, mean=0, sd=5)
y = 2 + 5*x + epsilon
# Now in order to test the coefficients, we first has to install the package "lmtest"
library(lmtest) # call the package
lm.fit = lm(y~x)
coeftest(lm.fit) # IID assumptions
#---------------------------
# Testing linear hypothesis
#---------------------------
# we first construct the data
rm(list=ls(all=T))
# constructing data
n=500
x_1 = rnorm(n, mean=0, sd=1)
x_2 = rnorm(n, mean=0, sd=1)
x_3 = rnorm(n, mean=0, sd=1)
x_4 = rnorm(n, mean=0, sd=1)
epsilon = rnorm(n, mean=0, sd=5)
y = 5 + 1*x_1 + 2*x_2 + 3*x_3 + 4*x_4 + epsilon
lm.fit = lm(y~x_1+x_2+x_3+x_4)
# First we need to install the package "car"
# Now we wish to test "b1=b2=b3=b4=1", the following code will do the trick
library(car)
library(sandwich)
linearHypothesis(lm.fit,c("x_1 = 1", "x_2 = 1", "x_3 = 1", "x_4 = 1")) # linear hypothesis with IID assumption
# For the test "b1=1, b2=2, b3=3, b4=4"
linearHypothesis(lm.fit,c("x_1 = 1", "x_2 = 2", "x_3 = 3", "x_4 = 4"))
# For the test "b1+b2+b3 = 6"
linearHypothesis(lm.fit,c("x_1 + x_2 + x_3 = 6"))
| /HW2/R_Lec-2_20190304.R | no_license | JustinBear99/QA | R | false | false | 6,615 | r | ###############################################################################
## R LECTURE 2: Regression and Testing ##
## Quantitative Analysis - 2019 Spring 3/4 ##
###############################################################################
#
#
###############################################################################
## Author ##
## 王元翰 Spencer Wang ##
###############################################################################
####################################################################
# Section 1: List and Data Frames #
####################################################################
#------
# List
#------
#Lists are a special type of vector that can contain elements of different classes.
#Lists are a very important data type in R and you should get to know them well.
n <-c(2,3,5)
s <-c("aa","bb","cc","dd","ee")
b <-c(TRUE,FALSE,TRUE,FALSE,FALSE)
x <-list(n,s,b,3) # list() is the code to create list
x
x = list(1, "a", TRUE, 1 + 4i)
x
class(x)
y = c(1, "a", TRUE, 1 + 4i)
y
class(y) # Observe that elements in y are now "characters".
x[[1]]+1
y[1]+1 # since y[1] = "1", and "1" are character to R, NOT numbers.
#----------------
# Indexing a list
#----------------
v=list(bob=c(2,3,5),john=c("aa","bb"))
v
names(v) # call out the names in the list
v$bob # "$" will extract the value under "bob"
##Example
x = rnorm(100)
y = rnorm(100)
LM = summary(lm(y~x)) # This is the summary of the simple linear regression x on y.
# In this summary we can see the coefficients, p-value, R-squared,...
LM
names(LM) # Suppose we want to extract the value of R-squared in this summary, then the code is
LM$residuals # we can extract the residuals form the model.
#-----------
# Data frams
#-----------
#Data frame is similiar as the matrix but more useful. Bascially, it's just like a sheet.
N = 10
u = rnorm(N)
x1 = rnorm(N)
x2 = rnorm(N)
y = 1 + x1 + x2 + u
mydataframe = data.frame(y,x1,x2) # data.frame() is the code to create data frame
mydataframe
names(mydataframe) # Note that the data in data frame are "named"
mydataframe["x1"] # which is equivalent as "mydataframe$x1"
mydataframe$x1
mymatrix <- cbind(y, x1, x2)
mymatrix
names(mymatrix)
mymatrix["x1"] # Note that here the data are not named, hence we can not extract the data
mymatrix$x1
## Example
id <- c(1, 2, 3, 4)
age <- c(15, 25, 26, 38)
sex <- c("M", "F", "M", "M")
pay <- c(20, 3, 67, 98)
X.dataframe <- data.frame(id, age, sex, pay)
X.dataframe
X.dataframe [3, 2]
X.dataframe$age # refer to the content in age
X.dataframe[2] # refer to the second column
edit(X.dataframe) # click the cell twice, then edit like excel
## Example: R in-built data set
data(mtcars) # load the data set "mtcars" from R
mtcars # use help(mtcars) to get help from the definition of this dataset in R
mtcars["Mazda RX4", "cyl"] # select a specific value, identical to "mtcars[1,2]"
mtcars["Mazda RX4",] # select a row
mtcars$cyl # select a column
nrow(mtcars) # number of rows
ncol(mtcars)
#------------------
# Naming Data frams
#------------------
x = data.frame(company_A = 1:4, familyfirm = c(T, T, F, F))
rownames(x) = c("1998", "1999", "2000", "2001")
x
row.names(x) # row names
names(x) # column names
#---------------------------------
# Logical indexing for Data frams
#---------------------------------
## Question: how do we select the column "drat" in mtcar with the condition that "am=0"?
mtcars$am # the column "am" in mtcar
mtcars$am==0 # indicate the location where am=0 as "True"
L1 = mtcars$am==0
mtcars[L1,] # here is the full data set with "am=0"
mtcars[L1,]$drat # job done!
# Here's a short cut, using the function "subset()"
SC = subset(mtcars,am==0)
SC$drat
# Here's the data set with both "am=0" and "gear=4"
subset(mtcars,am==0 & gear==4)
#------------------------------
# Importing and Exporting Data
#------------------------------
# Import cvs files. "C:\\Users\\User\\Desktop" is the directory while "crime.csv" is the files name.
read.csv("C:\\Users\\User\\Desktop\\crime.csv")
# Export data set "Crime" as cvs files. "C:\\Users\\User\\Desktop" is the directory while "crime.csv" is the files name.
write.table(Crime, file = "C:\\Users\\User\\Desktop\\crime.csv", sep = ",")
####################################################################
# Section 2: Regrssion and Testing #
####################################################################
#----------------
# Regression: OLS
#----------------
rm(list=ls(all=T))
## Simple linear regression with intercept
n=5000
x = rnorm(n, mean=0, sd=10)
epsilon = rnorm(n, mean=0, sd=1)
y = 2 + 5*x + epsilon
# regress by hand
X=cbind(1,x)
beta_hat = solve(t(X)%*%X)%*%t(X)%*%y
beta_hat
## Run linear regression: lm()
lm(y~x) # regress y on x (with intercept)
lm(y~x-1) # regress y on x without intercept
## Call the data stored automatically by R
lm(y~x)
summary(lm(y~x))
coef(lm(y~x))
coef(summary(lm(y~x)))
names(summary(lm(y~x))) # see the data that is stored in "summary(lm(y~x)) "
#------------
# Coef Tests
#------------
rm(list=ls(all=T))
# constructing data
n=500
x = rnorm(n, mean=0, sd=1)
epsilon = rnorm(n, mean=0, sd=5)
y = 2 + 5*x + epsilon
# Now in order to test the coefficients, we first has to install the package "lmtest"
library(lmtest) # call the package
lm.fit = lm(y~x)
coeftest(lm.fit) # IID assumptions
#---------------------------
# Testing linear hypothesis
#---------------------------
# we first construct the data
rm(list=ls(all=T))
# constructing data
n=500
x_1 = rnorm(n, mean=0, sd=1)
x_2 = rnorm(n, mean=0, sd=1)
x_3 = rnorm(n, mean=0, sd=1)
x_4 = rnorm(n, mean=0, sd=1)
epsilon = rnorm(n, mean=0, sd=5)
y = 5 + 1*x_1 + 2*x_2 + 3*x_3 + 4*x_4 + epsilon
lm.fit = lm(y~x_1+x_2+x_3+x_4)
# First we need to install the package "car"
# Now we wish to test "b1=b2=b3=b4=1", the following code will do the trick
library(car)
library(sandwich)
linearHypothesis(lm.fit,c("x_1 = 1", "x_2 = 1", "x_3 = 1", "x_4 = 1")) # linear hypothesis with IID assumption
# For the test "b1=1, b2=2, b3=3, b4=4"
linearHypothesis(lm.fit,c("x_1 = 1", "x_2 = 2", "x_3 = 3", "x_4 = 4"))
# For the test "b1+b2+b3 = 6"
linearHypothesis(lm.fit,c("x_1 + x_2 + x_3 = 6"))
|
##' QGIS Algorithm provided by GRASS r.in.lidar.info (grass7:r.in.lidar.info)
##'
##' @title QGIS algorithm r.in.lidar.info
##'
##' @param input `file` - LAS input file. Path to a file.
##' @param html `fileDestination` - LAS information. Path for new file.
##' @param GRASS_REGION_PARAMETER `extent` - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * html - outputHtml - LAS information
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
grass7_r_in_lidar_info <- function(input = qgisprocess::qgis_default_value(), html = qgisprocess::qgis_default_value(), GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("grass7:r.in.lidar.info")
output <- qgisprocess::qgis_run_algorithm("grass7:r.in.lidar.info", `input` = input, `html` = html, `GRASS_REGION_PARAMETER` = GRASS_REGION_PARAMETER,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "html")
}
} | /R/grass7_r_in_lidar_info.R | permissive | VB6Hobbyst7/r_package_qgis | R | false | false | 1,465 | r | ##' QGIS Algorithm provided by GRASS r.in.lidar.info (grass7:r.in.lidar.info)
##'
##' @title QGIS algorithm r.in.lidar.info
##'
##' @param input `file` - LAS input file. Path to a file.
##' @param html `fileDestination` - LAS information. Path for new file.
##' @param GRASS_REGION_PARAMETER `extent` - GRASS GIS 7 region extent. A comma delimited string of x min, x max, y min, y max. E.g. '4,10,101,105'. Path to a layer. The extent of the layer is used..
##' @param ... further parameters passed to `qgisprocess::qgis_run_algorithm()`
##' @param .complete_output logical specifing if complete out of `qgisprocess::qgis_run_algorithm()` should be used (`TRUE`) or first output (most likely the main) should read (`FALSE`). Default value is `TRUE`.
##'
##' @details
##' ## Outputs description
##' * html - outputHtml - LAS information
##'
##'
##' @export
##' @md
##' @importFrom qgisprocess qgis_run_algorithm qgis_default_value
grass7_r_in_lidar_info <- function(input = qgisprocess::qgis_default_value(), html = qgisprocess::qgis_default_value(), GRASS_REGION_PARAMETER = qgisprocess::qgis_default_value(),..., .complete_output = TRUE) {
check_algorithm_necessities("grass7:r.in.lidar.info")
output <- qgisprocess::qgis_run_algorithm("grass7:r.in.lidar.info", `input` = input, `html` = html, `GRASS_REGION_PARAMETER` = GRASS_REGION_PARAMETER,...)
if (.complete_output) {
return(output)
}
else{
qgisprocess::qgis_output(output, "html")
}
} |
page_inclusion <- function(...) {
djpr_tab_panel(
title = "Inclusion",
h1("Key groups"),
# tagList(
# "This page contains information about the labour force status of key groups of ",
# "Victorians, such as women, and young people. ",
# htmltools::tags$b("More information will be included with future releases. "),
# "For more information about overall labour force indicators ",
# "see the ",
# actionLink("link_indicators", "indicators page"),
# ". For information about how employment and unemployment varies across Victoria, see the ",
# actionLink("link_regions", "regions page"), "."
# ),
br(),
h2("Women and men"),
djpr_plot_ui("gr_gen_emp_bar"),
djpr_plot_ui("gr_emppopratio_line"),
djpr_plot_ui("gr_gen_unemp_line"),
djpr_plot_ui("gr_gen_partrate_line"),
h2("Young people"),
fluidRow(column(6,
djpr_plot_ui("gr_yth_emp_sincecovid_line")),
column(6,
djpr_plot_ui("gr_yth_lfpartrate_vicaus_line"))),
br(),
focus_box(
shiny::selectInput("youth_focus",
"Select an indicator",
choices = c(
"Unemployment rate" = "unemp_rate",
"Participation rate" = "part_rate",
"Employment-to-population ratio" = "emp_pop"
),
width = "100%"
),
column(
6,
djpr_plot_ui("gr_youth_states_dot",
height = "600px"
)
),
column(
6,
djpr_plot_ui("gr_ages_line",
height = "300px"
),
djpr_plot_ui("gr_yth_melbvrest_line",
height = "300px"
)
)
),
br(),
h2("Long-term unemployed"),
htmlOutput("inclusion_footnote"),
br()
)
}
| /R/page_inclusion.R | permissive | monikasarder/djprlabourdash | R | false | false | 1,774 | r | page_inclusion <- function(...) {
djpr_tab_panel(
title = "Inclusion",
h1("Key groups"),
# tagList(
# "This page contains information about the labour force status of key groups of ",
# "Victorians, such as women, and young people. ",
# htmltools::tags$b("More information will be included with future releases. "),
# "For more information about overall labour force indicators ",
# "see the ",
# actionLink("link_indicators", "indicators page"),
# ". For information about how employment and unemployment varies across Victoria, see the ",
# actionLink("link_regions", "regions page"), "."
# ),
br(),
h2("Women and men"),
djpr_plot_ui("gr_gen_emp_bar"),
djpr_plot_ui("gr_emppopratio_line"),
djpr_plot_ui("gr_gen_unemp_line"),
djpr_plot_ui("gr_gen_partrate_line"),
h2("Young people"),
fluidRow(column(6,
djpr_plot_ui("gr_yth_emp_sincecovid_line")),
column(6,
djpr_plot_ui("gr_yth_lfpartrate_vicaus_line"))),
br(),
focus_box(
shiny::selectInput("youth_focus",
"Select an indicator",
choices = c(
"Unemployment rate" = "unemp_rate",
"Participation rate" = "part_rate",
"Employment-to-population ratio" = "emp_pop"
),
width = "100%"
),
column(
6,
djpr_plot_ui("gr_youth_states_dot",
height = "600px"
)
),
column(
6,
djpr_plot_ui("gr_ages_line",
height = "300px"
),
djpr_plot_ui("gr_yth_melbvrest_line",
height = "300px"
)
)
),
br(),
h2("Long-term unemployed"),
htmlOutput("inclusion_footnote"),
br()
)
}
|
library(dplyr)
library(reshape2)
library(ggplot2)
library(plotly)
library(tidyr)
library(shiny)
intro <- tabPanel(
"Dashboard Overview",
h3(
id = "headers",
"This is a project that explores how inequality indicies have changed in
the USA from 1930 - 2015. The 'National Inequlity' tab explores how the change
in equality indicies compared to the change in real GDP (a measure of economic
growth). The 'Inequlity By State' shows how the indicies have changed by state
as well as showing a comparison of selected indicies of two states in a given
year."
),
p(id = "headers",
"The three indicies being analyzed are: "),
p(
h5( id = "TheilHeader",
"The Theil Index"),
),
p("The Theil Index is a a type of general entropy measurement that has values
varying between perfect equality of 0, and perfectly inequal at infinity or
1 (depending on if it is normalized). These measurment can be decomposed by
population groups or income sources. It essentially measures the distance a
population is from state of everyone having the same income."),
p(h5(id = "GiniHeader",
"The Gini Index")),
p("The Gini Coefficient was developed as a measure of
economic inequality by measuring wealth distribution among a population (Dorfman).
The value will range from 0 (perfect equality- every person has equal amount
of income) to 1 (perfect inequality- one person has all the income) and if
there is a value over 1 there are negative incomes . A higher
Gini Index means that there is greater inequality which means high income
individuals receiving a larger percentage of the total income.
A country will try to have a lower Gini because that means
there is not an overly unequal income across their population."),
p(h5(id = "AtkinHeader",
"The Atkin Index")),
p("The Atkin Index represents percentage of total income a society has to forgo
to have more equal shares of incomes between citizens. Measure depends on the
researchers, as they choose a theoretical parmenter whose value is societys willingness
to accept smaller incomes for equal distribution. "),
p(h5(id = "headers", "Data Source")),
tags$a(href = "https://www.shsu.edu/eco_mwf/inequality.html", "View Source")
)
| /introTab.R | no_license | rajc90/Inequality-Analysis | R | false | false | 2,402 | r | library(dplyr)
library(reshape2)
library(ggplot2)
library(plotly)
library(tidyr)
library(shiny)
intro <- tabPanel(
"Dashboard Overview",
h3(
id = "headers",
"This is a project that explores how inequality indicies have changed in
the USA from 1930 - 2015. The 'National Inequlity' tab explores how the change
in equality indicies compared to the change in real GDP (a measure of economic
growth). The 'Inequlity By State' shows how the indicies have changed by state
as well as showing a comparison of selected indicies of two states in a given
year."
),
p(id = "headers",
"The three indicies being analyzed are: "),
p(
h5( id = "TheilHeader",
"The Theil Index"),
),
p("The Theil Index is a a type of general entropy measurement that has values
varying between perfect equality of 0, and perfectly inequal at infinity or
1 (depending on if it is normalized). These measurment can be decomposed by
population groups or income sources. It essentially measures the distance a
population is from state of everyone having the same income."),
p(h5(id = "GiniHeader",
"The Gini Index")),
p("The Gini Coefficient was developed as a measure of
economic inequality by measuring wealth distribution among a population (Dorfman).
The value will range from 0 (perfect equality- every person has equal amount
of income) to 1 (perfect inequality- one person has all the income) and if
there is a value over 1 there are negative incomes . A higher
Gini Index means that there is greater inequality which means high income
individuals receiving a larger percentage of the total income.
A country will try to have a lower Gini because that means
there is not an overly unequal income across their population."),
p(h5(id = "AtkinHeader",
"The Atkin Index")),
p("The Atkin Index represents percentage of total income a society has to forgo
to have more equal shares of incomes between citizens. Measure depends on the
researchers, as they choose a theoretical parmenter whose value is societys willingness
to accept smaller incomes for equal distribution. "),
p(h5(id = "headers", "Data Source")),
tags$a(href = "https://www.shsu.edu/eco_mwf/inequality.html", "View Source")
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_get_default_credit_specification}
\alias{ec2_get_default_credit_specification}
\title{Describes the default credit option for CPU usage of a burstable
performance instance family}
\usage{
ec2_get_default_credit_specification(DryRun, InstanceFamily)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{InstanceFamily}{[required] The instance family.}
}
\value{
A list with the following syntax:\preformatted{list(
InstanceFamilyCreditSpecification = list(
InstanceFamily = "t2"|"t3"|"t3a"|"t4g",
CpuCredits = "string"
)
)
}
}
\description{
Describes the default credit option for CPU usage of a burstable
performance instance family.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html}{Burstable performance instances}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
}
\section{Request syntax}{
\preformatted{svc$get_default_credit_specification(
DryRun = TRUE|FALSE,
InstanceFamily = "t2"|"t3"|"t3a"|"t4g"
)
}
}
\keyword{internal}
| /cran/paws.compute/man/ec2_get_default_credit_specification.Rd | permissive | TWarczak/paws | R | false | true | 1,380 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_get_default_credit_specification}
\alias{ec2_get_default_credit_specification}
\title{Describes the default credit option for CPU usage of a burstable
performance instance family}
\usage{
ec2_get_default_credit_specification(DryRun, InstanceFamily)
}
\arguments{
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{InstanceFamily}{[required] The instance family.}
}
\value{
A list with the following syntax:\preformatted{list(
InstanceFamilyCreditSpecification = list(
InstanceFamily = "t2"|"t3"|"t3a"|"t4g",
CpuCredits = "string"
)
)
}
}
\description{
Describes the default credit option for CPU usage of a burstable
performance instance family.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html}{Burstable performance instances}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
}
\section{Request syntax}{
\preformatted{svc$get_default_credit_specification(
DryRun = TRUE|FALSE,
InstanceFamily = "t2"|"t3"|"t3a"|"t4g"
)
}
}
\keyword{internal}
|
\name{hypervolume_prune}
\alias{hypervolume_prune}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Removes small hypervolumes from a HypervolumeList
}
\description{
Identifies hypervolumes characterized either by a number of uniformly random points or a volume below a user-specified value and removes them from a \code{HypervolumeList}.
This function is useful for removing small features that can occur stochastically during segmentation after set operations or hole detection.
}
\usage{
hypervolume_prune(hvlist, num.points.min = NULL, volume.min = NULL, return.ids=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{hvlist}{
A \code{HypervolumeList} object.
}
\item{num.points.min}{
The minimum number of points in each input hypervolume.
}
\item{volume.min}{
The minimum volume in each input hypervolume
}
\item{return.ids}{
If \code{TRUE}, returns indices of input list as well as a pruned hypervolume list
}
}
\details{
Either \code{minnp} or \code{minvol} (but not both) must be specified.
}
\value{
A \code{HypervolumeList} pruned to only those hypervolumes of sizes above the desired value. If \code{returnids=TRUE}, instead returns a list structure with first item being the \code{HypervolumeList} and the second item being the indices of the retained hypervolumes.
}
\examples{
\dontrun{
data(penguins,package='palmerpenguins')
penguins_no_na = as.data.frame(na.omit(penguins))
penguins_adelie = penguins_no_na[penguins_no_na$species=="Adelie",
c("bill_length_mm","bill_depth_mm","flipper_length_mm")]
hv = hypervolume_gaussian(penguins_adelie,name='Adelie')
hv_segmented <- hypervolume_segment(hv,
num.points.max=200, distance.factor=1,
check.memory=FALSE) # intentionally under-segment
hv_segmented_pruned <- hypervolume_prune(hv_segmented,
num.points.min=20)
plot(hv_segmented_pruned)
}
}
\seealso{
\code{\link{hypervolume_holes}}, \code{\link{hypervolume_segment}}
} | /man/hypervolume_prune.Rd | no_license | bblonder/hypervolume | R | false | false | 2,053 | rd | \name{hypervolume_prune}
\alias{hypervolume_prune}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Removes small hypervolumes from a HypervolumeList
}
\description{
Identifies hypervolumes characterized either by a number of uniformly random points or a volume below a user-specified value and removes them from a \code{HypervolumeList}.
This function is useful for removing small features that can occur stochastically during segmentation after set operations or hole detection.
}
\usage{
hypervolume_prune(hvlist, num.points.min = NULL, volume.min = NULL, return.ids=FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{hvlist}{
A \code{HypervolumeList} object.
}
\item{num.points.min}{
The minimum number of points in each input hypervolume.
}
\item{volume.min}{
The minimum volume in each input hypervolume
}
\item{return.ids}{
If \code{TRUE}, returns indices of input list as well as a pruned hypervolume list
}
}
\details{
Either \code{minnp} or \code{minvol} (but not both) must be specified.
}
\value{
A \code{HypervolumeList} pruned to only those hypervolumes of sizes above the desired value. If \code{returnids=TRUE}, instead returns a list structure with first item being the \code{HypervolumeList} and the second item being the indices of the retained hypervolumes.
}
\examples{
\dontrun{
data(penguins,package='palmerpenguins')
penguins_no_na = as.data.frame(na.omit(penguins))
penguins_adelie = penguins_no_na[penguins_no_na$species=="Adelie",
c("bill_length_mm","bill_depth_mm","flipper_length_mm")]
hv = hypervolume_gaussian(penguins_adelie,name='Adelie')
hv_segmented <- hypervolume_segment(hv,
num.points.max=200, distance.factor=1,
check.memory=FALSE) # intentionally under-segment
hv_segmented_pruned <- hypervolume_prune(hv_segmented,
num.points.min=20)
plot(hv_segmented_pruned)
}
}
\seealso{
\code{\link{hypervolume_holes}}, \code{\link{hypervolume_segment}}
} |
\name{graph_predictions}
\alias{graph_predictions}
\title{
Scatterplot of observed and predicted batting averages
}
\description{
Scatterplot of observed and predicted batting averages
}
\usage{
graph_predictions(d2)
}
\arguments{
\item{d2}{
output from component_predict() function
}
}
\value{
ggplot2 object of the scatterplot
}
\author{
Jim Albert
}
\examples{
## Not run:
d <- collect_hitting_data()
S <- component_predict(d)
graph_predictions(S)
}
| /man/graph_predictions.Rd | no_license | karthy257/BApredict | R | false | false | 479 | rd | \name{graph_predictions}
\alias{graph_predictions}
\title{
Scatterplot of observed and predicted batting averages
}
\description{
Scatterplot of observed and predicted batting averages
}
\usage{
graph_predictions(d2)
}
\arguments{
\item{d2}{
output from component_predict() function
}
}
\value{
ggplot2 object of the scatterplot
}
\author{
Jim Albert
}
\examples{
## Not run:
d <- collect_hitting_data()
S <- component_predict(d)
graph_predictions(S)
}
|
################################################################################################
## Extract the environmental conditions for all grid cells in resolution of 200km;
## and then calculate the the mean of environmental conditions across 3*3 or 5*5 grid cells
################################################################################################
rm(list = ls())
# Set user dependent working directories
user <- Sys.info()["nodename"]
path2wd <- switch(user,
"IDIVNB341" = "C:/Dropbox/AU/global_tree_beta_2022",
"IDIVTS01" = "H:/wubing/AU/global_tree_beta_2022")
setwd(path2wd)
# load packages
needed_libs <- c("tidyverse","letsR", "raster", "spdep", "sp")
usePackage <- function(p) {
if (!is.element(p, installed.packages()[,1])) {
install.packages(p)
}
require(p, character.only = TRUE)
}
sapply(needed_libs, usePackage)
rm(usePackage)
## get spatial grid cells
load("data/tree_pam/tree_pam6_final.RDATA")
## load environmental variables
# elevation
elev_dir <-paste("data/environment_rasters/elevation/wc2.1_30s_elev.tif")
elev <- stack(elev_dir)
# current climates
bioc_dir <-paste("data/environment_rasters/current_climate/wc2.1_5m_bio/wc2.1_5m_bio_", 1:19, ".tif", sep="")
bioc <- stack(bioc_dir)
# LGM climates
lgmc_dir <-paste("data/environment_rasters/LGM_climate/chelsa_LGM_v1_2B_r5m/5min/bio_", c(1,12), ".tif", sep="")
lgmc <- stack(lgmc_dir)
# Human Modification index
hmi_dir <- "data/environment_rasters/Global_Human_Modification/gHM/gHM.tif"
hmi <- stack(hmi_dir)
# A function to extract the value of environments for each mypolygon
extract_env <- function(env, mypolygon, res, fun=mean, weights=FALSE){
CRS_mypolygon <- projection(mypolygon)
env <- projectRaster(env, crs=CRS_mypolygon, res=res)
env_mypolygon_value <- extract(env, mypolygon, fun=fun, weights=weights, na.rm=TRUE, df=TRUE)
return(env_mypolygon_value)
}
# Calculate current and LGM climates for each grid cell
bioc_grid <- extract_env(env=bioc, mypolygon=grid_land, res=10)
bioc_grid[, c(5)] <- bioc_grid[, 5]/100 #the raw unit is standard deviation*100
bioc_grid[, 1] <- grid_land@data[, 1]
lgmc_grid <- extract_env(env=lgmc, mypolygon=grid_land, res=10)
lgmc_grid[, c(2)] <- lgmc_grid[, 2]/10 #the raw unit is 1/10 degree
lgmc_grid[, 1] <- grid_land@data[, 1]
## Calculate mean and range of elevation
get_range <- function(x, na.rm=TRUE) {
range = max(x, na.rm=na.rm) - min(x,na.rm=TRUE)
return(range)
}
# elevational range
elev_range_grid200 <- extract_env(env=elev, mypolygon=grid_land, res=1, fun=get_range)
colnames(elev_range_grid200)[2] <- "topography"
elev_range_grid200[, 1] <- grid_land@data[, 1]
# mean elevatioins
elev_5m <- aggregate(elev, 10)
elev_mean_grid200 <- extract_env(env=elev_5m, mypolygon=grid_land, res=10, fun=mean)
colnames(elev_mean_grid200)[2] <- "elevation"
elev_mean_grid200[, 1] <- grid_land@data[, 1]
# mean HMI for each grid cell
hmi_10km <- aggregate(hmi, 10)
hmi_grid <- extract_env(env=hmi_10km, mypolygon=grid_land, res=10, fun=mean)
colnames(hmi_grid)[2] <- "hmi"
hmi_grid[, 1] <- grid_land@data[, 1]
save(bioc_grid, lgmc_grid, elev_range_grid200, elev_mean_grid200, hmi_grid,
file="intermediate_results/environments_allCells.RDATA")
load("intermediate_results/environments_allCells.RDATA")
#################
## Assemble environmental variables
# the projected coordinates
xy <- coordinates(tree_pam6[[2]])[grid_land@data[, 1], ]
colnames(xy) <- c("x", "y")
# get longitude and latitude
cell_points <- as.data.frame(xy)
coordinates(cell_points) <- c("x","y")
projection(cell_points) <- projection(tree_pam6[[2]])
cell_points_longlat <- spTransform(cell_points, CRS("+proj=longlat +datum=WGS84"))
long_lat <- coordinates(cell_points_longlat)
colnames(long_lat) <- c("longitude", "latitude")
# temperature and precipitation anomaly since the LGM
lgmcc_grid <- data.frame(ID = grid_land@data[, 1],
mat.anomaly = bioc_grid[, 2] - lgmc_grid[, 2],
map.anomaly = bioc_grid[, 13] - lgmc_grid[, 3])
# change names of bioclimatic variables
colnames(bioc_grid)[-1] <- paste0("bio_", 1:19)
# combine environmental variables
env200 <- data.frame(ID = grid_land@data[, 1],
xy, long_lat, bioc_grid[, -1], lgmcc_grid[, -1],
elevation = elev_mean_grid200[,-1],
topography = elev_range_grid200[,-1],
hmi = hmi_grid[,-1]) %>%
as_tibble() %>%
# remove data of grid-cells with small part in the land
mutate(land_area = rgeos::gArea(grid_land, byid = TRUE)) %>%
filter(land_area >= 4000) %>%
dplyr::select(-land_area)
##############################################
##Calculate the mean of environmmental conditions of focal cells and their eight neighboring cells
# the environment subset of cells with tree distributions that were used to calculate beta
tree_cells <- which(!is.na(tree_pam6$Richness_Raster[]))
env200_treecell <- env200 %>%
filter(ID %in% tree_cells)
# define the 8 nearest neighboring cells for each focal cells, and include including itself
nb8 <- dnearneigh(x = as.matrix(env200_treecell[,2:3]), d1 = 0, d2 = 300, longlat = FALSE)
nb8mat <- nb2mat(neighbours = nb8, style = "B", zero.policy = TRUE)
diag(nb8mat) <- 1
# define the 24 nearest neighboring cells for each focal cells, and includeincluding itself
nb24 <- dnearneigh(x = as.matrix(env200_treecell[,2:3]), d1 = 0, d2 = 570, longlat = FALSE)
nb24mat <- nb2mat(neighbours = nb24, style = "B", zero.policy = TRUE)
diag(nb24mat) <- 1
# define the 24 nearest neighboring cells for each focal cells use all grid cells (not just cells that have tree observations)
nb24_all <- dnearneigh(x = as.matrix(env200[,2:3]), d1 = 0, d2 = 570, longlat = FALSE)
nb24mat_all <- nb2mat(neighbours = nb24_all, style = "B", zero.policy = TRUE)
diag(nb24mat_all) <- 1
# A function to calculate environmental conditions of focal and neighboring cells based on a function
get_env_nb_summ <- function(envdata = envdata, nbmat = NA, fun){
envdata.res <- envdata # a new data frame to store results
envdata.res[, -1] <- NA
get_env_nb_cell <- function(x){
id <- which(x == 1)
if(ncol(envdata) == 2) env.nb.cell <- apply(as.data.frame(envdata[id,-1]), 2 ,fun, na.rm=TRUE)
if(ncol(envdata) > 2) env.nb.cell <- apply(envdata[id,-1], 2, fun, na.rm=TRUE)
return(env.nb.cell)
}
env.nb.cells <- apply(nbmat, 1, get_env_nb_cell)
if(ncol(envdata)>2) env.nb.cells <- t(env.nb.cells)
envdata.res[, -1] <- env.nb.cells
return(envdata.res)
}
# mean environmental conditions
env200_mean_nn8 <- env200_treecell
env200_mean_nn8[,c(1, 6:29)] <- get_env_nb_summ(envdata = env200_treecell[,c(1, 6:29)], nbmat = nb8mat, fun=mean)
env200_mean_nn24 <- env200_treecell
env200_mean_nn24[,c(1, 6:29)] <- get_env_nb_summ(envdata = env200_treecell[,c(1, 6:29)], nbmat = nb24mat, fun=mean)
env200_mean_nn24_all <- env200
env200_mean_nn24_all[,c(1, 6:29)] <- get_env_nb_summ(envdata = env200[,c(1, 6:29)], nbmat = nb24mat_all, fun=mean)
# save all calculated output
save(env200, env200_mean_nn8, env200_mean_nn24, env200_mean_nn24_all, file = "intermediate_results/environments_final.RDATA")
| /Rcode/4_environments.R | no_license | Wubing-Xu/Global_tree_beta-diversity | R | false | false | 7,237 | r | ################################################################################################
## Extract the environmental conditions for all grid cells in resolution of 200km;
## and then calculate the the mean of environmental conditions across 3*3 or 5*5 grid cells
################################################################################################
rm(list = ls())
# Set user dependent working directories
user <- Sys.info()["nodename"]
path2wd <- switch(user,
"IDIVNB341" = "C:/Dropbox/AU/global_tree_beta_2022",
"IDIVTS01" = "H:/wubing/AU/global_tree_beta_2022")
setwd(path2wd)
# load packages
needed_libs <- c("tidyverse","letsR", "raster", "spdep", "sp")
usePackage <- function(p) {
if (!is.element(p, installed.packages()[,1])) {
install.packages(p)
}
require(p, character.only = TRUE)
}
sapply(needed_libs, usePackage)
rm(usePackage)
## get spatial grid cells
load("data/tree_pam/tree_pam6_final.RDATA")
## load environmental variables
# elevation
elev_dir <-paste("data/environment_rasters/elevation/wc2.1_30s_elev.tif")
elev <- stack(elev_dir)
# current climates
bioc_dir <-paste("data/environment_rasters/current_climate/wc2.1_5m_bio/wc2.1_5m_bio_", 1:19, ".tif", sep="")
bioc <- stack(bioc_dir)
# LGM climates
lgmc_dir <-paste("data/environment_rasters/LGM_climate/chelsa_LGM_v1_2B_r5m/5min/bio_", c(1,12), ".tif", sep="")
lgmc <- stack(lgmc_dir)
# Human Modification index
hmi_dir <- "data/environment_rasters/Global_Human_Modification/gHM/gHM.tif"
hmi <- stack(hmi_dir)
# A function to extract the value of environments for each mypolygon
extract_env <- function(env, mypolygon, res, fun=mean, weights=FALSE){
CRS_mypolygon <- projection(mypolygon)
env <- projectRaster(env, crs=CRS_mypolygon, res=res)
env_mypolygon_value <- extract(env, mypolygon, fun=fun, weights=weights, na.rm=TRUE, df=TRUE)
return(env_mypolygon_value)
}
# Calculate current and LGM climates for each grid cell
bioc_grid <- extract_env(env=bioc, mypolygon=grid_land, res=10)
bioc_grid[, c(5)] <- bioc_grid[, 5]/100 #the raw unit is standard deviation*100
bioc_grid[, 1] <- grid_land@data[, 1]
lgmc_grid <- extract_env(env=lgmc, mypolygon=grid_land, res=10)
lgmc_grid[, c(2)] <- lgmc_grid[, 2]/10 #the raw unit is 1/10 degree
lgmc_grid[, 1] <- grid_land@data[, 1]
## Calculate mean and range of elevation
get_range <- function(x, na.rm=TRUE) {
range = max(x, na.rm=na.rm) - min(x,na.rm=TRUE)
return(range)
}
# elevational range
elev_range_grid200 <- extract_env(env=elev, mypolygon=grid_land, res=1, fun=get_range)
colnames(elev_range_grid200)[2] <- "topography"
elev_range_grid200[, 1] <- grid_land@data[, 1]
# mean elevatioins
elev_5m <- aggregate(elev, 10)
elev_mean_grid200 <- extract_env(env=elev_5m, mypolygon=grid_land, res=10, fun=mean)
colnames(elev_mean_grid200)[2] <- "elevation"
elev_mean_grid200[, 1] <- grid_land@data[, 1]
# mean HMI for each grid cell
hmi_10km <- aggregate(hmi, 10)
hmi_grid <- extract_env(env=hmi_10km, mypolygon=grid_land, res=10, fun=mean)
colnames(hmi_grid)[2] <- "hmi"
hmi_grid[, 1] <- grid_land@data[, 1]
save(bioc_grid, lgmc_grid, elev_range_grid200, elev_mean_grid200, hmi_grid,
file="intermediate_results/environments_allCells.RDATA")
load("intermediate_results/environments_allCells.RDATA")
#################
## Assemble environmental variables
# the projected coordinates
xy <- coordinates(tree_pam6[[2]])[grid_land@data[, 1], ]
colnames(xy) <- c("x", "y")
# get longitude and latitude
cell_points <- as.data.frame(xy)
coordinates(cell_points) <- c("x","y")
projection(cell_points) <- projection(tree_pam6[[2]])
cell_points_longlat <- spTransform(cell_points, CRS("+proj=longlat +datum=WGS84"))
long_lat <- coordinates(cell_points_longlat)
colnames(long_lat) <- c("longitude", "latitude")
# temperature and precipitation anomaly since the LGM
lgmcc_grid <- data.frame(ID = grid_land@data[, 1],
mat.anomaly = bioc_grid[, 2] - lgmc_grid[, 2],
map.anomaly = bioc_grid[, 13] - lgmc_grid[, 3])
# change names of bioclimatic variables
colnames(bioc_grid)[-1] <- paste0("bio_", 1:19)
# combine environmental variables
env200 <- data.frame(ID = grid_land@data[, 1],
xy, long_lat, bioc_grid[, -1], lgmcc_grid[, -1],
elevation = elev_mean_grid200[,-1],
topography = elev_range_grid200[,-1],
hmi = hmi_grid[,-1]) %>%
as_tibble() %>%
# remove data of grid-cells with small part in the land
mutate(land_area = rgeos::gArea(grid_land, byid = TRUE)) %>%
filter(land_area >= 4000) %>%
dplyr::select(-land_area)
##############################################
##Calculate the mean of environmmental conditions of focal cells and their eight neighboring cells
# the environment subset of cells with tree distributions that were used to calculate beta
tree_cells <- which(!is.na(tree_pam6$Richness_Raster[]))
env200_treecell <- env200 %>%
filter(ID %in% tree_cells)
# define the 8 nearest neighboring cells for each focal cells, and include including itself
nb8 <- dnearneigh(x = as.matrix(env200_treecell[,2:3]), d1 = 0, d2 = 300, longlat = FALSE)
nb8mat <- nb2mat(neighbours = nb8, style = "B", zero.policy = TRUE)
diag(nb8mat) <- 1
# define the 24 nearest neighboring cells for each focal cells, and includeincluding itself
nb24 <- dnearneigh(x = as.matrix(env200_treecell[,2:3]), d1 = 0, d2 = 570, longlat = FALSE)
nb24mat <- nb2mat(neighbours = nb24, style = "B", zero.policy = TRUE)
diag(nb24mat) <- 1
# define the 24 nearest neighboring cells for each focal cells use all grid cells (not just cells that have tree observations)
nb24_all <- dnearneigh(x = as.matrix(env200[,2:3]), d1 = 0, d2 = 570, longlat = FALSE)
nb24mat_all <- nb2mat(neighbours = nb24_all, style = "B", zero.policy = TRUE)
diag(nb24mat_all) <- 1
# A function to calculate environmental conditions of focal and neighboring cells based on a function
get_env_nb_summ <- function(envdata = envdata, nbmat = NA, fun){
envdata.res <- envdata # a new data frame to store results
envdata.res[, -1] <- NA
get_env_nb_cell <- function(x){
id <- which(x == 1)
if(ncol(envdata) == 2) env.nb.cell <- apply(as.data.frame(envdata[id,-1]), 2 ,fun, na.rm=TRUE)
if(ncol(envdata) > 2) env.nb.cell <- apply(envdata[id,-1], 2, fun, na.rm=TRUE)
return(env.nb.cell)
}
env.nb.cells <- apply(nbmat, 1, get_env_nb_cell)
if(ncol(envdata)>2) env.nb.cells <- t(env.nb.cells)
envdata.res[, -1] <- env.nb.cells
return(envdata.res)
}
# mean environmental conditions
env200_mean_nn8 <- env200_treecell
env200_mean_nn8[,c(1, 6:29)] <- get_env_nb_summ(envdata = env200_treecell[,c(1, 6:29)], nbmat = nb8mat, fun=mean)
env200_mean_nn24 <- env200_treecell
env200_mean_nn24[,c(1, 6:29)] <- get_env_nb_summ(envdata = env200_treecell[,c(1, 6:29)], nbmat = nb24mat, fun=mean)
env200_mean_nn24_all <- env200
env200_mean_nn24_all[,c(1, 6:29)] <- get_env_nb_summ(envdata = env200[,c(1, 6:29)], nbmat = nb24mat_all, fun=mean)
# save all calculated output
save(env200, env200_mean_nn8, env200_mean_nn24, env200_mean_nn24_all, file = "intermediate_results/environments_final.RDATA")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slab_functions.R
\name{read_vo1}
\alias{read_vo1}
\title{Title}
\usage{
read_vo1(vfile = "~/Dropbox/data/global/volcanoes/", limsx = c(-180, 180),
limsy = c(-90, 90), ppp = TRUE, to = NA)
}
\arguments{
\item{to}{}
}
\description{
Title
}
| /man/read_vo1.Rd | no_license | msandifo/slab | R | false | true | 318 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slab_functions.R
\name{read_vo1}
\alias{read_vo1}
\title{Title}
\usage{
read_vo1(vfile = "~/Dropbox/data/global/volcanoes/", limsx = c(-180, 180),
limsy = c(-90, 90), ppp = TRUE, to = NA)
}
\arguments{
\item{to}{}
}
\description{
Title
}
|
library(CARBayesST)
library(CARBayesdata)
library(sp)
library(tidyverse)
library(ggplot2)
library(spdep)
library(lubridate)
library(sf)
library(tmap)
library(janitor)
library(here)
library(ggridges)
library(rgdal)
library(broom)
library(car)
library(rmapshaper)
library(ggdist)
#### PRE-PROCESSING PARTY DATA ####
df <- read_csv(here::here('data',
'scotland-house-parties-2020.csv'))
df <- df %>%
clean_names()
#Converting date column to datetime
df[['date']] <- as.Date(df[['date']], format='%d/%m/%Y') #why did this take so long
#Creating df without non-spatially referenced rows
df_spatialref <- df %>%
dplyr::filter(!is.na(area_commands))
#Creating table of total house parties attended by date
#only for visualisation purposes
house_gatherings_by_date <- df %>%
dplyr::select(date, house_gatherings_attended, house_gatherings_in_breach_of_restrictions) %>%
group_by(date) %>%
summarise_at(c("house_gatherings_attended", "house_gatherings_in_breach_of_restrictions"), sum, na.rm = TRUE) %>%
pivot_longer(cols=2:3,
names_to='category',
values_to='gathering_count')
#Plotting daily house parties attended and parties recorded as breaching restrictions
daily_plot <- ggplot(house_gatherings_by_date,
aes(x=date, y=gathering_count, fill=category)) +
geom_bar(stat='identity') +
scale_fill_brewer(palette='Paired',
name="",
labels=c("Total house gatherings attended",
"House gatherings in breach of restrictions")) +
xlab('Date') +
ylab('Number of house gatherings attended by police') +
geom_vline(aes(xintercept = as.Date('2020-09-01'),
linetype='Household visits banned in Glasgow,\nWest Dunbartonshire, and\nEast Renfrewshire'),
color='red') +
geom_vline(aes(xintercept = as.Date('2020-09-23'),
linetype='Household visits banned nationwide'),
color='red') +
scale_linetype_manual(name = 'Restrictions introduced',
values = c('Household visits banned in Glasgow,\nWest Dunbartonshire, and\nEast Renfrewshire' = 'dashed',
'Household visits banned nationwide' = 'solid')) +
theme(legend.title=element_blank(),
axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)))
#Another version of daily plot
daily_plot <- ggplot(house_gatherings_by_date,
aes(x=date, y=gathering_count, fill=category)) +
geom_bar(stat='identity') +
scale_fill_brewer(palette='Paired',
name="",
labels=c("Total house gatherings attended",
"House gatherings in breach of restrictions")) +
xlab('Date') +
ylab('Number of house gatherings attended by police') +
geom_vline(aes(xintercept = as.Date('2020-09-01')),
linetype='dashed',
color='red') +
geom_vline(aes(xintercept = as.Date('2020-09-23')),
linetype='solid',
color='red') +
annotate("text", x = as.Date('2020-09-02'),
y = 295,
size = 3,
label = "Household gatherings banned in\nGlasgow, West Dunbartonshire,\nand East Renfrewshire",
colour='red',
hjust=0) +
annotate("text", x = as.Date('2020-09-24'),
y = 298,
size = 3,
label = "Household gatherings banned\nnationwide",
colour='red',
hjust=0) +
theme(legend.title=element_blank(),
axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)))
daily_plot
ggsave('daily_plot2.png', plot=daily_plot, height = 21 , width = 33.87, units='cm')
#Creating table of house gatherings by week in each area command
#this will be used for analysis later
area_command_house_gatherings_weekly <- df_spatialref %>%
dplyr::select(date, area_commands, house_gatherings_in_breach_of_restrictions) %>%
mutate(week = floor_date(date, unit="week", week_start=getOption('lubridate.week.start', 5))) %>%
group_by(week, area_commands) %>%
summarise_at("house_gatherings_in_breach_of_restrictions", sum, na.rm = TRUE) %>%
dplyr::filter(week != as.Date('2020-10-09') & area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
#### CONSTRUCTING LOOKUP TABLE ####
#Read in lookup table
lookup <- read_csv(here::here('data',
'Datazone2011lookup.csv'))
#Read in population data for electoral wards
wardpop <- read_csv(here::here('data',
'electoral-wards-19-tabs',
'electoral-wards-19-tabs_2019.csv'), skip = 3)
#We only want the ward population data for all ages, not split by gender
wardpop <- wardpop %>%
clean_names() %>%
dplyr::filter((sex == 'Persons') & (area_name != 'Scotland'))
#dealing with duplicated ward names in different councils
wardpop[268, 'area_name'] <- "North East (Glasgow)"
wardpop[269, 'area_name'] <- "North Isles (Orkney)"
wardpop[270, 'area_name'] <- "North Isles (Shetland)"
#Join ward name to local authority, adding information on population aged 18-29 in the process
agecols = c('x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28', 'x29')
wardpop_la <- left_join(wardpop,
lookup,
by=c("area_code" = "MMWard_Code")) %>%
dplyr::select(c(area_code, area_name, all_ages, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, LA_Code, LA_Name, SPD_Code, SPD_Name)) %>%
distinct(area_code, .keep_all=TRUE) %>%
mutate(pop_18_29 = rowSums(.[agecols]))
#Function for processing raw text pasted from Police Scotland website
create_ward_list <- function(string) {
string <- str_split(string, "\\n")
for (i in 1:length(string)) {
string[[i]] <- str_replace(string[[i]], "&", "and")
string[[i]] <- str_replace(string[[i]], "(:|-|–).*", "")
string[[i]] <- str_replace(string[[i]], "\\s(([[:graph:]]+@[[:graph:]]+)( /\\s+[[:graph:]]+@[[:graph:]]+)?)", "")
string[[i]] <- str_replace(string[[i]], "\\s+$", "")
}
return(unlist(string)) #turn list output into character vector
}
#Function for processing wards where area commands covers one council
council_ward_list <- function(council) {
wards <- wardpop_la %>%
dplyr::filter(LA_Name == council) %>%
pull(area_name)
return(wards)
}
#Create lookup list
area_commands.levels <- stack(list(
'Aberdeen City North' = c('Dyce/Bucksburn/Danestone',
'Bridge of Don',
'Kingswells/Sheddocksley/Summerhill',
'Northfield/Mastrick North',
'Hilton/Woodside/Stockethill',
'Tillydrone/Seaton/Old Aberdeen',
'George St/Harbour'),
'Aberdeen City South' = c('Midstocket/Rosemount',
'Lower Deeside',
'Hazlehead/Queens Cross/Countesswells',
'Airyhall/Broomhill/Garthdee',
'Torry/Ferryhill',
'Kincorth/Nigg/Cove'),
'Aberdeenshire North' = create_ward_list('Banff and District - BanffDistrictCPT@Scotland.pnn.police.uk
Troup - TroupCPT@scotland.pnn.police.uk
Fraserburgh and District - FraserburghDistrictCPT@scotland.pnn.police.uk
Central Buchan - CentralBuchanCPT@Scotland.pnn.police.uk
Peterhead North and Rattray - PeterheadNorthRattrayCPT@Scotland.pnn.police.uk
Peterhead South and Cruden - PeterheadSouthCrudenCPT@Scotland.pnn.police.uk
Turriff and District - TurriffDistrictCPT@Scotland.pnn.police.uk
Mid Formartine - MidFormartineCPT@Scotland.pnn.police.uk
Ellon and District - EllonDistrictCPT@Scotland.pnn.police.uk'),
'Aberdeenshire South' = create_ward_list('West Garioch - WestGariochCPT@Scotland.pnn.police.uk
Inverurie and District - InverurieDistrictCPT@Scotland.pnn.police.uk
East Garioch - EastGariochCPT@Scotland.pnn.police.uk
Westhill and District - WesthillDistrictCPT@Scotland.pnn.police.uk
Huntly, Strathbogie and Howe of Alford - HuntlyStrathbogieHoweofAlfordCPT@Scotland.pnn.police.uk
Aboyne, Upper Deeside and Donside - AboyneUpperDeesideDonsideCPT@Scotland.pnn.police.uk
Banchory and Mid Deeside - BanchoryMidDeesideCPT@Scotland.pnn.police.uk
North Kincardine - NorthKincardineCPT@Scotland.pnn.police.uk
Stonehaven and Lower Deeside - StonehavenLowerDeesideCPT@Scotland.pnn.police.uk
Mearns - MearnsCPT@Scotland.pnn.police.uk'),
'Angus' = council_ward_list('Angus'),
'Central' = create_ward_list('Kirkcaldy Central
Kirkcaldy East
Kirkcaldy North
Burntisland, Kinghorn and Western Kirkcaldy
Glenrothes West and Kinglassie
Glenrothes Central and Thornton
Glenrothes North, Leslie and Markinch'),
'Clackmannanshire' = create_ward_list('Clackmannanshire East - ClackmannanshireEastCPT@scotland.pnn.police.uk
Clackmannanshire North - ClackmannanshireNorthCPT@scotland.pnn.police.uk
Clackmannanshire South - ClackmannanshireSouthCPT@scotland.pnn.police.uk
Clackmannanshire West - ClackmannanshireWestCPT@scotland.pnn.police.uk
Clackmannanshire Central'),
'Dumfriesshire' = create_ward_list('North West Dumfries
Mid and Upper Nithsdale
Lochar
Nith
Annandale South
Annandale North
Annandale East and Eskdale'),
'Dundee' = council_ward_list('Dundee City'),
'East' = create_ward_list('Tay Bridgehead
St. Andrews
East Neuk and Landward
Cupar
Howe of Fife and Tay Coast
Leven, Kennoway and Largo
Buckhaven, Methil and Wemyss Villages'),
'East Ayrshire' = create_ward_list('Annick – AyrshireLPSTAnnick@scotland.pnn.police.uk
Kilmarnock North – AyrshireLPSETKilmarnock@scotland.pnn.police.uk
Kilmarnock West and Crosshouse – AyrshireLPSTKilmarnock@scotland.pnn.police.uk
Kilmarnock East and Hurlford - AyrshireLPSTKilmarnock@scotland.pnn.police.uk
Hurlford - AyrshireLPSTIrvineValley@scotland.pnn.police.uk
Kilmarnock South – AyrshireLPSTKilmarnock@scotland.pnn.police.uk
Irvine Valley – AyrshireLPSTIrvineValley@scotland.pnn.police.uk
Ballochmyle – AyrshireLPSTCumnock@scotland.pnn.uk
Cumnock and New Cumnock – AyrshireLPSTCumnock@scotland.pnn.police.uk
Doon Valley – AyrshireLPSTDoonValley@scotland.pnn.police.uk'),
'East Dunbartonshire' = create_ward_list('Milngavie
Bearsden North
Bearsden South
Bishopbriggs North and Campsie
Bishopbriggs South
Lenzie and Kirkintilloch South
Kirkintilloch East and North and Twechar'),
'East Kilbride, Cambuslang and Rutherglen' = create_ward_list('East Kilbride Central North
East Kilbride Central South
East Kilbride West
East Kilbride South
East Kilbride East
Rutherglen Central and North
Rutherglen South
Cambuslang East
Cambuslang West'),
'East Lothian' = create_ward_list('Musselburgh - MusselburghWestCPT@scotland.pnn.police.uk, MusselburghEastCarberryCPT@scotland.pnn.police.uk
Preston, Seton and Gosford - PrestonSetonCPT@scotland.pnn.police.uk
Tranent, Wallyford and Macmerry - FasideCPT@scotland.pnn.police.uk
Haddington and Lammermuir - HaddingtonLammermuirCPT@scotland.pnn.police.uk
North Berwick Coastal - NorthBerwickCoastalCPT@scotland.pnn.police.uk
Dunbar and East Linton - DunbarEastLintonCPT@scotland.pnn.police.uk'),
'East Renfrewshire' = create_ward_list('Barrhead, Liboside and Uplawmoor
Newton Mearns North and Neilston
Giffnock and Thornliebank
Clarkston, Netherlee and Williamwood
Newton Mearns South and Eaglesham'),
'Falkirk' = create_ward_list("Bo'ness and Blackness - Bo'NessBlacknessCPT@scotland.pnn.police.uk
Bonnybridge and Larbert - BonnybridgeLarbertCPT@scotland.pnn.police.uk
Carse, Kinnaird and Tryst - CarseKinnairdTrystCPT@scotland.pnn.police.uk
Denny and Banknock - DennyBanknockCPT@scotland.pnn.police.uk
Falkirk North - FalkirkNorthCPT@scotland.pnn.police.uk
Falkirk South - FalkirkSouthCPT@scotland.pnn.police.uk
Grangemouth - GrangemouthCPT@scotland.pnn.police.uk
Lower Braes - LowerBraesCPT@scotland.pnn.police.uk
Upper Braes - UpperBraesCPT@Scotland.pnn.police.uk"),
'Galloway' = create_ward_list('Stranraer and the Rhins
Mid Galloway and Wigtown West
Dee and Glenkens
Castle Douglas and Crocketford
Abbey'),
'Glasgow City Centre' = 'Anderston/City/Yorkhill',
'Glasgow East' = create_ward_list('Calton GreaterGlasgowLPSTLondonRoad@scotland.pnn.police.uk
East Centre GreaterGlasgowLPSTLondonRoad@scotland.pnn.police.uk
Dennistoun'), #figured out that Dennistoun was in Glasgow East by looking up the ward councillor's FB page
'Glasgow North' = create_ward_list('Maryhill
Canal
Springburn/Robroyston'),
'Glasgow North East' = c("Baillieston",
"Shettleston",
"North East (Glasgow)"), #figured out Glasgow NE wards through process of elimination
'Glasgow North West' = create_ward_list('Hillhead - GreaterGlasgowLPSTPartick@scotland.pnn.police.uk
Victoria Park - GreaterGlasgowLPSTDrumchapel@scotland.pnn.police.uk
Garscadden/Scotstounhill - GreaterGlasgowLPSTDrumchapel@scotland.pnn.police.uk
Drumchapel/Anniesland - GreaterGlasgowLPSTDrumchapel@scotland.pnn.police.uk
Partick East/Kelvindale - GreaterGlasgowLPSTPartick@scotland.pnn.police.uk'),
'Glasgow South East' = create_ward_list('Linn - GreaterGlasgowLPSTCathcart@scotland.pnn.police.uk
Pollokshields - GreaterGlasgowLPSTGorbals@scotland.pnn.police.uk
Langside - GreaterGlasgowLPSTCathcart@scotland.pnn.police.uk
Southside Central - GreaterGlasgowLPSTGorbals@scotland.pnn.police.uk'),
'Glasgow South West' = create_ward_list('Newlands/Auldburn GreaterGlasgowLPSTPollok@scotland.pnn.police.uk
Greater Pollok GreaterGlasgowLPSTPollok@scotland.pnn.police.uk
Cardonald GreaterGlasgowLPSTGovan@scotland.pnn.police.uk
Govan GreaterGlasgowLPSTGovan@scotland.pnn.police.uk'),
'Hamilton & Clydesdale' = create_ward_list('Hamilton North and East
Hamilton South
Hamilton West and Earnock
Larkhall
Avondale and Stonehouse
Blantyre
Bothwell and Uddingston
Clydesdale North
Clydesdale East
Clydesdale South
Clydesdale West'),
'Inverclyde' = create_ward_list('Inverclyde East: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde East Central: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde North: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde South: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde West: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde South West: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde Central'),
'Inverness' = c('Aird and Loch Ness',
'Culloden and Ardersier',
'Inverness South',
'Inverness Millburn',
'Inverness Ness-side',
'Inverness Central',
'Inverness West'),
'Mid-Argyll, Kintyre, Oban, Lorn and the Islands' = create_ward_list('Oban North and Lorn ObanNorthLornCPT@scotland.pnn.police.uk
Oban South and the Isles ObanSouthTheIslesCPT@scotland.pnn.police.uk
South Kintyre SouthKintyreCPT@scotland.pnn.police.uk
Kintyre and the Islands KintyreTheIslandsCPT@scotland.pnn.police.uk
Mid Argyll midargyllcpt@scotland.pnn.police.uk'),
'Midlothian' = council_ward_list('Midlothian'),
'Monklands & Cumbernauld' = create_ward_list('Airdrie Central
Airdrie North
Airdrie South
Gartcosh, Glenboig and Moodiesburn
Coatbridge South
Coatbridge West
Coatbridge North
Cumbernauld North
Kilsyth
Cumbernauld South
Stepps, Chryston and Muirhead
Cumbernauld East'),
'Moray' = create_ward_list('Speyside Glenlivet - SpeysideGlenlivetCPT@Scotland.pnn.police.uk
Keith and Cullen - KeithCullenCPT@Scotland.pnn.police.uk
Buckie - BuckieCPT@Scotland.pnn.police.uk
Fochabers Lhanbryde - FochabersLhanbrydeCPT@Scotland.pnn.police.uk
Heldon and Laich - HeldonLaichCPT@Scotland.pnn.police.uk
Elgin City North - ElginCityNorthCPT@Scotland.pnn.police.uk
Elgin City South - ElginCitySouthCPT@Scotland.pnn.police.uk
Forres - ForresCPT@Scotland.pnn.police.uk'),
'Motherwell, Wishaw and Bellshill' = create_ward_list('Motherwell South East and Ravenscraig
Wishaw
Murdostoun
Motherwell West
Motherwell North
Fortissat
Thorniewood
Bellshill
Mossend and Holytown'),
'North Ayrshire' = create_ward_list('Irvine West – AyrshireLPSTIrvine@scotland.pnn.police.uk
Irvine East – AyrshireLPSTIrvine@scotland.pnn.police.uk
Kilwinning – AyrshireLPSTKilwinning@scotland.pnn.police.uk
Stevenston – AyrshireLPST3Towns@scotland.pnn.police.uk / AyrshireLPSTArran@scotland.pnn.police.uk
Ardrossan and Arran - AyrshireLPST3Towns@scotland.pnn.police.uk / AyrshireLPSTArran@scotland.pnn.police.uk
Dalry & West Kilbride - AyrshireLPSTGarnockValley@scotland.pnn.police.uk / AyrshireLPSTNorthCoast&Cumbraes@scotland.pnn.police.uk
Kilbirnie & Beith – AyrshireLPSTGarnockVAlley@scotland.pnn.police.uk
North Coast & Cumbraes - AyrshireLPSTNorthCoast&Cumbraes@scotland.pnn.police.uk
Irvine South – AyrshireLPSTIrvine@scotland.pnn.police.uk
Saltcoats – AyrshireLPST3Towns@Scotland.pnn.police.uk / AyrshireLPSTArran@scotland.pnn.police.uk'),
'North East' = create_ward_list('Leith
Leith Walk
Craigentinny/Duddingston
Portobello/Craigmillar'),
'North Highlands' = c('Thurso and Northwest Caithness',
'Wick and East Caithness',
'North, West and Central Sutherland',
'East Sutherland and Edderton',
'Wester Ross, Strathpeffer and Lochalsh',
'Cromarty Firth',
'Tain and Easter Ross',
'Dingwall and Seaforth',
'Black Isle'),
'North West' = create_ward_list('Almond
Drum Brae/Gyle
Corstorphine/Murrayfield
Forth
Inverleith'),
'Orkney' = council_ward_list('Orkney Islands'),
'Paisley' = create_ward_list('Paisley East and Central: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Northwest: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Southeast: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Northeast and Ralston: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Southwest'),
'Perth & Kinross' = council_ward_list('Perth and Kinross'),
'Renfrew' = create_ward_list('Renfrew North and Braehead: RenfrewshireInverclydeLPSTRenfrew@Scotland.pnn.police.uk
Renfrew South and Gallowhill: RenfrewshireInverclydeLPSTRenfrew@Scotland.pnn.police.uk
Johnstone South and Elderslie: RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Johnstone North, Kilbarchan, Howwood and Lochwinnoch: RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Houston, Crosslee and Linwood RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Bishopton, Bridge of Weir and Langbank: RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Erskine and Inchinnan: RenfrewshireInverclydeLPSTRenfrew@Scotland.pnn.police.uk'),
'Scottish Borders' = council_ward_list('Scottish Borders'),
'Shetland' = council_ward_list('Shetland Islands'),
'South Argyll, Helensburgh, Lomond, Bute and Cowal.' = create_ward_list('Cowal - CowalCPT@scotland.pnn.police.uk
Dunoon - DunoonCPT@scotland.pnn.police.uk
Isle of Bute - IsleofButeCPT@scotland.pnn.police.uk
Lomond North - LomondNorthCPT@scotland.pnn.police.uk
Helensburgh Central - HelensburghCentralCPT@scotland.pnn.police.uk
Helensburgh and Lomond South - HelensburghLomondSouthCPT@scotland.pnn.police.uk'),
'South Ayrshire' = create_ward_list('Troon – AyrshireLPSTTroon@scotland.pnn.police.uk
Prestwick – AyrshireLPSTPrestwick@scotland.pnn.police.uk
Ayr North – AyrshireLPSTAyrNorth@scotland.pnn.police.uk
Ayr East – AyrshireLPSTSouthCoylton@scotland.pnn.police.uk
Ayr West – AyrshireLPSTSouthCoylton@scotland.pnn.police.uk
Symington and Monkton - AyrshireLPSTPrestwick@scotland.pnn.police.uk
Tarbolton, Mossblow, Craigie, Failford and St Quivox - AyrshireLPSTAyrNorth@scotland.pnn.police.uk
Maybole, North Carrick & Coylton – AyrshireLPSTMayboleNorthCarrick@scotland.pnn.police.uk or AyrshireLPSTGirvanSouthCarrick@scotland.pnn.police.uk
Girvan & South Carrick - AyrshireLPSTMayboleNorthCarrick@scotland.pnn.police.uk
Kyle'),
'South East' = create_ward_list('City Centre
Morningside
Southside/Newington
Liberton/Gilmerton'),
'South Highlands' = c("Caol and Mallaig",
"Fort William and Ardnamurchan",
"Eilean a'Cheo",
"Badenoch and Strathspey",
"Nairn and Cawdor"),
'South West' = create_ward_list('Pentland Hills
Sighthill/Gorgie
Colinton/Fairmilehead
Fountainbridge/Craiglockhart'),
'Stirling' = create_ward_list('Bannockburn - BannockburnCPT@Scotland.pnn.police.uk
Dunblane and Bridge of Allan - DunblaneBridgeofAllanCPT@scotland.pnn.police.uk
Forth and Endrick - ForthEndrickCPT@scotland.pnn.police.uk
Stirling East - StirlingEastCPT@Scotland.pnn.police.uk
Stirling North - StirlingNorthCPT@Scotland.pnn.police.uk
Stirling West - StirlingWestCPT@Scotland.pnn.police.uk
Trossachs and Teith - TrossachsTeithCPT@scotland.pnn.police.uk'),
'West' = create_ward_list('Dunfermline South DunfermlineSouthCPT@Scotland.pnn.police.uk
Dunfermline Central DunfermlineCentralCPT@Scotland.pnn.police.uk
Dunfermline North DunfermlineNorthCPT@Scotland.pnn.police.uk
Cowdenbeath CowdenbeathCPT@Scotland.pnn.police.uk
The Lochs TheLochsCPT@Scotland.pnn.police.uk
Lochgelly, Cardenden and Benarty LochgellyCardendenCPT@Scotland.pnn.police.uk
West Fife & Coastal Villages WestFifeCoastalVillagesCPT@scotland.pnn.police.uk
Rosyth RosythCPT@Scotland.pnn.police.uk
Inverkeithing & Dalgety Bay InverkeithingDalgetyBayCPT@Scotland.pnn.police.uk'),
'West Dumbartonshire' = create_ward_list('Clydebank Central - ClydebankCentralCPT@scotland.pnn.police.uk
Clydebank Waterfront - ClydebankWaterfrontCPT@scotland.pnn.police.uk
Kilpatrick - KilpatrickCPT@scotland.pnn.police.uk
Dumbarton - DumbartonCPT@scotland.pnn.police.uk
Leven - LevenCPT@scotland.pnn.police.uk
Lomond – lomondCPT@scotland.pnn.police.uk'),
'West Lothian' = council_ward_list('West Lothian'),
'Western Isles' = council_ward_list('Na h-Eileanan Siar')
))
#Join wards to area commands using lookup table
wardpop_area_commands <- wardpop_la %>%
dplyr::select(area_name, area_code, all_ages, pop_18_29, SPD_Name) %>%
left_join(., area_commands.levels, by=c("area_name"="values")) %>%
dplyr::rename(area_commands=ind) %>%
mutate(all_ages = as.numeric(gsub(',', '', all_ages))) #not dropping islands yet bc i need them for an accurate estimate of police officers per area command
#Find number of house parties per 100,000 residents
area_command_pop <- wardpop_area_commands %>%
group_by(area_commands) %>%
summarise_at(c('all_ages', 'pop_18_29'), sum, na.rm=TRUE) %>%
mutate(pc_18_29 = (pop_18_29/all_ages)*100) %>%
dplyr::select(area_commands, all_ages, pc_18_29) %>%
dplyr::filter(area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
#Control variable - number of police officers per 10,000 residents
SPD_lookup <- wardpop_area_commands %>%
dplyr::select(SPD_Name, area_commands) %>%
distinct(area_commands, .keep_all=TRUE)
police_officers <- SPD_lookup %>%
group_by(SPD_Name) %>%
summarise(area_command_count = n_distinct(area_commands)) %>%
mutate(police_officer_count = case_when(
SPD_Name == 'North East' ~ 1103, #local police officer counts used bc they reflect police officers who would respond to house parties
SPD_Name == 'Tayside' ~ 916,
SPD_Name == 'Highlands and Islands' ~ 652,
SPD_Name == 'Forth Valley' ~ 641,
SPD_Name == 'Edinburgh' ~ 1125,
SPD_Name == 'The Lothians and Scottish Borders' ~ 907,
SPD_Name == 'Fife' ~ 775,
SPD_Name == 'Greater Glasgow' ~ 2452,
SPD_Name == 'Ayrshire' ~ 831,
SPD_Name == 'Lanarkshire' ~ 1385,
SPD_Name == 'Argyll and West Dunbartonshire' ~ 553,
SPD_Name == 'Renfrewshire and Inverclyde' ~ 611,
SPD_Name == 'Dumfries and Galloway' ~ 401
)) %>%
mutate(police_officers_per_area_command = floor(police_officer_count/area_command_count))
police_by_area_command <- SPD_lookup %>%
left_join(., police_officers, by='SPD_Name') %>%
dplyr::select(area_commands, police_officers_per_area_command) %>%
dplyr::filter(area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
#Create df of ALL area commands per week
#a) vector of weeks, each repeated 51 times
weeks <- area_command_house_gatherings_weekly %>%
distinct(week) %>%
pull() %>%
rep(., each=49)
#b) vector of area commands, the whole set repeated 6 times (number of weeks)
areas <- area_command_pop %>%
dplyr::select(area_commands) %>%
#dplyr::filter(area_commands != 'Western Isles') %>%
#dplyr::filter(area_commands != 'Orkney') %>%
#dplyr::filter(area_commands != 'Shetland') %>%
pull() %>%
rep(., 6)
#c) concatenate two vectors into data frame
area_command_gatherings_per_100k <- data.frame(weeks, areas) %>%
dplyr::rename(week = weeks,
area_commands = areas)
#Storing dates for constructing regulation dummy variables
glasgow_ban_date <- as.Date('2020-09-01')
scotland_ban_date <- as.Date('2020-09-23')
area_command_gatherings_per_100k <- area_command_gatherings_per_100k %>%
merge(.,
area_command_house_gatherings_weekly,
by.x=c('week', 'area_commands'),
by.y=c('week', 'area_commands'),
all=TRUE) %>%
replace_na(list(house_gatherings_in_breach_of_restrictions = 0)) %>%
left_join(., area_command_pop, by="area_commands") %>%
mutate(illegal_gatherings_rate = (house_gatherings_in_breach_of_restrictions / all_ages)*100000, #only for visualisation
pop_over_100k = all_ages/100000, #used as offset in regression analysis
household_visits_banned = case_when(
week >= glasgow_ban_date & grepl('Glasgow|East Renfrewshire|West Dumbartonshire', area_commands) ~ 1,
week >= scotland_ban_date ~ 1,
TRUE ~ 0 #the reference level is a restriction on house gatherings of over 15 people
)) %>%
left_join(., police_by_area_command, by="area_commands") %>%
mutate(police_per_10k = floor((police_officers_per_area_command/all_ages)*10000))
#Plotting variation in house gatherings per week
weekly_plot <- ggplot(area_command_gatherings_per_100k, aes(x = as.factor(week), y=illegal_gatherings_rate)) +
geom_boxplot(fill='#1f78b4', color='#12486C', lwd=0.25) +
xlab('Week (first day shown)') +
ylab('Number of house gatherings breaching restrictions\nper 100,000 residents') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)))
weekly_plot
ggsave('weekly_plot.png', plot=weekly_plot, height = 21 , width = 33.87, units='cm')
#histogram shows that every week, the rates of house gatherings are positively skewed
#variance increases with the median, suggesting a poisson process
#should be noted that the numbers are very low
#### MAPPING ####
#Load in ward boundaries and merge them into area commands
#will need it as an sp object later, but sf is easier to work with
area_commands_sf <- st_read(here::here('data',
'bdline_essh_gb',
'Data',
'GB',
'district_borough_unitary_ward_region.shp')) %>%
filter(str_detect(CODE, "^S13")) %>%
left_join(.,
wardpop_area_commands,
by=c("CODE"="area_code")) %>%
group_by(area_commands) %>%
summarise() %>%
dplyr::filter(area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
st_write(area_commands_sf, here::here('data',
'area_commands.geojson'))
#With premade GeoJSON file
area_commands_sf <- st_read(here::here('data',
'area_commands.geojson'))
#Convert to sp and join area command illegal gatherings data
area_commands_sp <- area_commands_sf %>%
as(., "Spatial")
#### REGRESSION ANALYSIS ####
#Find mean for each areal unit over the time period
mean_weekly_gatherings <- area_command_gatherings_per_100k %>%
group_by(area_commands) %>%
summarise_at('illegal_gatherings_rate', mean)
#Create summary statistics table
area_command_pop <- area_command_pop %>%
left_join(.,
mean_weekly_gatherings,
by="area_commands") %>%
left_join(.,
police_by_area_command,
by="area_commands")
summary(area_command_pop)
#Map summary statistics
area_commands_sf <- area_commands_sf %>%
dplyr::select(!(c(area, police_per_km2)))
#Simplify outline, because it's not important for this stage and it takes forever
simple_area_commands_sf <- area_commands_sf %>%
ms_simplify(.,keep=0.05)
#Bring population attributes into simplified sf object
simple_area_commands_sf <- simple_area_commands_sf %>%
left_join(.,
area_command_pop,
by="area_commands")
tmap_mode('plot')
party_map <- tm_shape(simple_area_commands_sf) +
tm_fill(col = 'illegal_gatherings_rate',
style = 'quantile',
palette = 'PuBu',
legend.hist = TRUE,
title = "",
legend.format = list(fun=function(x) paste0(formatC(x, digits=2, format="f")))) +
tm_borders(col = 'white', lwd = 0.5, alpha = 0.6) +
tm_layout(legend.hist.height = 0.2,
legend.hist.width = 0.3,
title = 'Mean rate of parties\nper 100,000 residents',
title.fontface = 2,
legend.text.size = 0.7) +
tm_scale_bar(position = c(0.6,0.02), text.size = 0.6) +
tm_compass(north=0, position=c(0.9, 0.9))
age_map <- tm_shape(simple_area_commands_sf) +
tm_fill(col = 'pc_18_29',
style='quantile',
palette = 'YlOrBr',
legend.hist = TRUE,
title="",
legend.format = list(fun=function(x) paste0(formatC(x, digits=2, format="f"))),
legend.position = c('left', 'bottom')) +
tm_borders(col = 'white', lwd = 0.5, alpha = 0.6) +
tm_layout(legend.hist.height = 0.2,
legend.hist.width = 0.3,
title = '% aged 18-29',
title.fontface = 2,
legend.text.size = 0.7) +
tm_scale_bar(position = c(0.6,0.02), text.size = 0.6) +
tm_compass(north=0, position=c(0.9, 0.9))
var_maps <- tmap_arrange(party_map, age_map, ncol=2)
var_maps
tmap_save(var_maps, 'var_maps.png', width=12.46, height=7)
#Join mean weekly gatherings per area command to sp data frame
#order is v important! the order of polygons in the sp data frame MUST match the order of spatial units in mean data frame
area_commands_sp@data$mean_weekly_gatherings <- mean_weekly_gatherings$illegal_gatherings_rate
#Create binary spatial weights matrix using sp dataframe
weights.nb <- poly2nb(area_commands_sp, row.names=mean_weekly_gatherings$area_commands)
weights <- nb2mat(weights.nb, style='B')
#Create vector of unique weeks
unique_weeks <- unique(weeks)
#### RESULTS - TIME W NO OTHER VARIABLES ####
#Run regression analysis using temporal data, with spatial weights matrix from sp object
formula1 <- house_gatherings_in_breach_of_restrictions ~ offset(log(pop_over_100k)) + police_officers_per_area_command
chain1 <- ST.CARsepspatial(formula=formula1,
family='poisson',
data=area_command_gatherings_per_100k,
W=weights,
burnin=3000,
n.sample=450000,
thin=100)
print(chain1)
summary(chain1$samples)
#beta = coefficients for covariates
#phi = spatial random effect for each time period to account for autocorrelation
#tau2 = spatial variance for each time period
#delta = overall temporal trend
#rho.S and rho.T = spatial and temporal autcorrelation parameters (common to all time periods)
#in bayesian inference, parameters are assumed to be drawn from prior distributions
#normally, the prior distribution of these parameters is constructed using existing knowledge on potential effect sizes, e.g. through systematic reviews
#for the autocorrelation parameters, the CAR.sepspatial model assumes a 'flat' distribution - no external information is included when calculating these parameters
#for the spatial variance parameters, a conjugate prior distribution is used -
#the posterior distribution is assumed to be the prior
#Visualising median rate over time
#create data frame of each temporal unit, with a column corresponding to the fitted median,
#lower + upper credibility intervals
trend.median <- data.frame(Week=unique_weeks, array(NA, c(6,3))) #first number is the number of temporal units
colnames(trend.median) <- c("Week", "Median", "LCI", "UCI")
#Visualising spatial SD over time
#create another data frame
trend.sd <- data.frame(Week=unique_weeks, array(NA, c(6,3)))
colnames(trend.sd) <- c("Week", "Median", "LCI", "UCI")
#Populate data frames using data from model
for(i in 1:6) { #i in the range of temporal units
#create posterior distribution of estimated rates across space for each year through matrix addition
posterior <- exp(chain1$samples$phi[ , ((i-1) * 49 + 1):(i * 49)] +
#samples$phi is a matrix, with rows corresponding to number of samples
#and columns corresponding to number of spatial units for each year i
#e.g. for the first week, the code will extract all the phi samples generated for each spatial unit
matrix(rep(chain1$samples$beta[,1] + chain1$samples$beta[,2] + chain1$samples$delta[ , i], 49),
ncol=49, byrow=FALSE))
#all beta samples are added to the delta samples for year i and repeated 271 times (rows of matrix)
#number of columns is the number of areal units
#posterior is the matrix of phi + beta + delta for each spatial unit in year i?
trend.median[i, 2:4] <- quantile(apply(posterior, 1, mean),
c(0.5, 0.025, 0.975))
#apply(posterior, 1, mean) finds the mean of each row in the posterior mean matrix for that year
#quantile() finds the median, lower credibility interval, and upper credibility interval for all the means
trend.sd[i, 2:4] <- quantile(apply(posterior, 1, sd),
c(0.5, 0.025, 0.975))
}
trend.median_long <- trend.median %>%
pivot_longer(cols=2:4,
names_to='category',
values_to='estimate') %>%
mutate(category = gsub('UCI|LCI', 'CI', category))
#Plot median over time
medianplot <- ggplot(aes(x = factor(week), y = illegal_gatherings_rate),
data=area_command_gatherings_per_100k) +
geom_jitter(color='#1f78b4') +
scale_x_discrete(name = "Week (first day shown)") +
scale_y_continuous(name = "Rate of illegal house gatherings") +
geom_line(data=trend.median, mapping=aes(x=factor(Week), y=Median,
group=1), colour='#990000', lwd=1) +
geom_line(data=trend.median, mapping=aes(x=factor(Week), y=LCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
geom_line(data=trend.median, mapping=aes(x=factor(Week), y=UCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)),
title = element_text(margin=margin(b=10), face='bold')) +
ggtitle('Predicted mean rate of illegal house gatherings\nper 100,000 residents')
medianplot
ggsave('medianplot.png', plot=medianplot, width=16.33, height=7)
#Plot SD over time
sdplot <- ggplot() +
scale_x_discrete(name = "Year") +
scale_y_continuous(name = "Spatial standard deviation") +
geom_line(data=trend.sd, mapping=aes(x=factor(Week), y=Median,
group=1), colour='#990000', lwd=1) +
geom_line(data=trend.sd, mapping=aes(x=factor(Week), y=LCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
geom_line(data=trend.sd, mapping=aes(x=factor(Week), y=UCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)),
plot.title = element_text(margin=margin(b=10), face='bold')) +
ggtitle('Standard deviation of estimated mean rates')
sdplot
ggsave('sdplot.png', plot=sdplot, width=16.33, height=7)
#### RESULTS - W COEFFICIENTS ####
#Model with coefficients
formula2 <- house_gatherings_in_breach_of_restrictions ~ offset(log(pop_over_100k)) + police_per_10k + pc_18_29 + household_visits_banned
chain2 <- ST.CARsepspatial(formula=formula2,
family='poisson',
data=area_command_gatherings_per_100k,
W=weights,
burnin=3000,
n.sample=450000,
thin=100)
print(chain2)
summary(chain2$samples)
#Visualising median rate over time
#create data frame of each temporal unit, with a column corresponding to the fitted median,
#lower + upper credibility intervals
trend.median2 <- data.frame(Week=unique_weeks, array(NA, c(6,3))) #first number is the number of temporal units
colnames(trend.median2) <- c("Week", "Median", "LCI", "UCI")
#Visualising spatial SD over time
#create another data frame
trend.sd2 <- data.frame(Week=unique_weeks, array(NA, c(6,3)))
colnames(trend.sd2) <- c("Week", "Median", "LCI", "UCI")
#Populate data frames using data from model
for(i in 1:6) { #i in the range of temporal units
#create posterior distribution of estimated rates across space for each year through matrix addition
posterior2 <- exp(chain2$samples$phi[ , ((i-1) * 49 + 1):(i * 49)] +
#samples$phi is a matrix, with rows corresponding to number of samples
#and columns corresponding to number of spatial units for each year i
#e.g. for the first week, the code will extract all the phi samples generated for each spatial unit
matrix(rep(chain2$samples$beta[,1] + chain2$samples$beta[,2] + chain2$samples$beta[,3] + chain2$samples$beta[,4] + chain2$samples$delta[ , i], 49),
ncol=49, byrow=FALSE))
#all beta samples are added to the delta samples for year i and repeated 271 times (rows of matrix)
#number of columns is the number of areal units
#posterior is the matrix of phi + beta + delta for each spatial unit in year i?
trend.median2[i, 2:4] <- quantile(apply(posterior2, 1, mean),
c(0.5, 0.025, 0.975))
#apply(posterior, 1, mean) finds the mean of each row in the posterior mean matrix for that year
#quantile() finds the median, lower credibility interval, and upper credibility interval for all the means
trend.sd2[i, 2:4] <- quantile(apply(posterior2, 1, sd),
c(0.5, 0.025, 0.975))
}
#Plot median over time
medianplot2 <- ggplot(aes(x = factor(week), y = illegal_gatherings_rate),
data=area_command_gatherings_per_100k) +
geom_jitter(color='#1f78b4') +
scale_x_discrete(name = "Week (first day shown)") +
scale_y_continuous(name = "Rate of illegal house gatherings") +
geom_line(data=trend.median2, mapping=aes(x=factor(Week), y=Median,
group=1), colour='#990000', lwd=1) +
geom_line(data=trend.median2, mapping=aes(x=factor(Week), y=LCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
geom_line(data=trend.median2, mapping=aes(x=factor(Week), y=UCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)),
title = element_text(margin=margin(b=10), face='bold')) +
ggtitle('Predicted mean rate of illegal house gatherings\nper 100,000 residents')
medianplot2
ggsave('medianplot2.png', plot=medianplot2, width=16.33, height=7)
#variable coefficients would be interpreted as the percent change in y for a unit change in x
#e to the power of the coefficient would give the ratio of y with predictor value x+1 to y with predictor value x
#e.g. if the coefficient were -0.0047, e^-0.0047 would be 0.995,
#meaning that for a one unit change in x, the corresponding value of y would be 99.5% of the preceding value
#or, more intuitively, 0.5% lower | /methodology.R | no_license | caranvr/GIS-final-public | R | false | false | 41,041 | r | library(CARBayesST)
library(CARBayesdata)
library(sp)
library(tidyverse)
library(ggplot2)
library(spdep)
library(lubridate)
library(sf)
library(tmap)
library(janitor)
library(here)
library(ggridges)
library(rgdal)
library(broom)
library(car)
library(rmapshaper)
library(ggdist)
#### PRE-PROCESSING PARTY DATA ####
df <- read_csv(here::here('data',
'scotland-house-parties-2020.csv'))
df <- df %>%
clean_names()
#Converting date column to datetime
df[['date']] <- as.Date(df[['date']], format='%d/%m/%Y') #why did this take so long
#Creating df without non-spatially referenced rows
df_spatialref <- df %>%
dplyr::filter(!is.na(area_commands))
#Creating table of total house parties attended by date
#only for visualisation purposes
house_gatherings_by_date <- df %>%
dplyr::select(date, house_gatherings_attended, house_gatherings_in_breach_of_restrictions) %>%
group_by(date) %>%
summarise_at(c("house_gatherings_attended", "house_gatherings_in_breach_of_restrictions"), sum, na.rm = TRUE) %>%
pivot_longer(cols=2:3,
names_to='category',
values_to='gathering_count')
#Plotting daily house parties attended and parties recorded as breaching restrictions
daily_plot <- ggplot(house_gatherings_by_date,
aes(x=date, y=gathering_count, fill=category)) +
geom_bar(stat='identity') +
scale_fill_brewer(palette='Paired',
name="",
labels=c("Total house gatherings attended",
"House gatherings in breach of restrictions")) +
xlab('Date') +
ylab('Number of house gatherings attended by police') +
geom_vline(aes(xintercept = as.Date('2020-09-01'),
linetype='Household visits banned in Glasgow,\nWest Dunbartonshire, and\nEast Renfrewshire'),
color='red') +
geom_vline(aes(xintercept = as.Date('2020-09-23'),
linetype='Household visits banned nationwide'),
color='red') +
scale_linetype_manual(name = 'Restrictions introduced',
values = c('Household visits banned in Glasgow,\nWest Dunbartonshire, and\nEast Renfrewshire' = 'dashed',
'Household visits banned nationwide' = 'solid')) +
theme(legend.title=element_blank(),
axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)))
#Another version of daily plot
daily_plot <- ggplot(house_gatherings_by_date,
aes(x=date, y=gathering_count, fill=category)) +
geom_bar(stat='identity') +
scale_fill_brewer(palette='Paired',
name="",
labels=c("Total house gatherings attended",
"House gatherings in breach of restrictions")) +
xlab('Date') +
ylab('Number of house gatherings attended by police') +
geom_vline(aes(xintercept = as.Date('2020-09-01')),
linetype='dashed',
color='red') +
geom_vline(aes(xintercept = as.Date('2020-09-23')),
linetype='solid',
color='red') +
annotate("text", x = as.Date('2020-09-02'),
y = 295,
size = 3,
label = "Household gatherings banned in\nGlasgow, West Dunbartonshire,\nand East Renfrewshire",
colour='red',
hjust=0) +
annotate("text", x = as.Date('2020-09-24'),
y = 298,
size = 3,
label = "Household gatherings banned\nnationwide",
colour='red',
hjust=0) +
theme(legend.title=element_blank(),
axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)))
daily_plot
ggsave('daily_plot2.png', plot=daily_plot, height = 21 , width = 33.87, units='cm')
#Creating table of house gatherings by week in each area command
#this will be used for analysis later
area_command_house_gatherings_weekly <- df_spatialref %>%
dplyr::select(date, area_commands, house_gatherings_in_breach_of_restrictions) %>%
mutate(week = floor_date(date, unit="week", week_start=getOption('lubridate.week.start', 5))) %>%
group_by(week, area_commands) %>%
summarise_at("house_gatherings_in_breach_of_restrictions", sum, na.rm = TRUE) %>%
dplyr::filter(week != as.Date('2020-10-09') & area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
#### CONSTRUCTING LOOKUP TABLE ####
#Read in lookup table
lookup <- read_csv(here::here('data',
'Datazone2011lookup.csv'))
#Read in population data for electoral wards
wardpop <- read_csv(here::here('data',
'electoral-wards-19-tabs',
'electoral-wards-19-tabs_2019.csv'), skip = 3)
#We only want the ward population data for all ages, not split by gender
wardpop <- wardpop %>%
clean_names() %>%
dplyr::filter((sex == 'Persons') & (area_name != 'Scotland'))
#dealing with duplicated ward names in different councils
wardpop[268, 'area_name'] <- "North East (Glasgow)"
wardpop[269, 'area_name'] <- "North Isles (Orkney)"
wardpop[270, 'area_name'] <- "North Isles (Shetland)"
#Join ward name to local authority, adding information on population aged 18-29 in the process
agecols = c('x18', 'x19', 'x20', 'x21', 'x22', 'x23', 'x24', 'x25', 'x26', 'x27', 'x28', 'x29')
wardpop_la <- left_join(wardpop,
lookup,
by=c("area_code" = "MMWard_Code")) %>%
dplyr::select(c(area_code, area_name, all_ages, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, LA_Code, LA_Name, SPD_Code, SPD_Name)) %>%
distinct(area_code, .keep_all=TRUE) %>%
mutate(pop_18_29 = rowSums(.[agecols]))
#Function for processing raw text pasted from Police Scotland website
create_ward_list <- function(string) {
string <- str_split(string, "\\n")
for (i in 1:length(string)) {
string[[i]] <- str_replace(string[[i]], "&", "and")
string[[i]] <- str_replace(string[[i]], "(:|-|–).*", "")
string[[i]] <- str_replace(string[[i]], "\\s(([[:graph:]]+@[[:graph:]]+)( /\\s+[[:graph:]]+@[[:graph:]]+)?)", "")
string[[i]] <- str_replace(string[[i]], "\\s+$", "")
}
return(unlist(string)) #turn list output into character vector
}
#Function for processing wards where area commands covers one council
council_ward_list <- function(council) {
wards <- wardpop_la %>%
dplyr::filter(LA_Name == council) %>%
pull(area_name)
return(wards)
}
#Create lookup list
area_commands.levels <- stack(list(
'Aberdeen City North' = c('Dyce/Bucksburn/Danestone',
'Bridge of Don',
'Kingswells/Sheddocksley/Summerhill',
'Northfield/Mastrick North',
'Hilton/Woodside/Stockethill',
'Tillydrone/Seaton/Old Aberdeen',
'George St/Harbour'),
'Aberdeen City South' = c('Midstocket/Rosemount',
'Lower Deeside',
'Hazlehead/Queens Cross/Countesswells',
'Airyhall/Broomhill/Garthdee',
'Torry/Ferryhill',
'Kincorth/Nigg/Cove'),
'Aberdeenshire North' = create_ward_list('Banff and District - BanffDistrictCPT@Scotland.pnn.police.uk
Troup - TroupCPT@scotland.pnn.police.uk
Fraserburgh and District - FraserburghDistrictCPT@scotland.pnn.police.uk
Central Buchan - CentralBuchanCPT@Scotland.pnn.police.uk
Peterhead North and Rattray - PeterheadNorthRattrayCPT@Scotland.pnn.police.uk
Peterhead South and Cruden - PeterheadSouthCrudenCPT@Scotland.pnn.police.uk
Turriff and District - TurriffDistrictCPT@Scotland.pnn.police.uk
Mid Formartine - MidFormartineCPT@Scotland.pnn.police.uk
Ellon and District - EllonDistrictCPT@Scotland.pnn.police.uk'),
'Aberdeenshire South' = create_ward_list('West Garioch - WestGariochCPT@Scotland.pnn.police.uk
Inverurie and District - InverurieDistrictCPT@Scotland.pnn.police.uk
East Garioch - EastGariochCPT@Scotland.pnn.police.uk
Westhill and District - WesthillDistrictCPT@Scotland.pnn.police.uk
Huntly, Strathbogie and Howe of Alford - HuntlyStrathbogieHoweofAlfordCPT@Scotland.pnn.police.uk
Aboyne, Upper Deeside and Donside - AboyneUpperDeesideDonsideCPT@Scotland.pnn.police.uk
Banchory and Mid Deeside - BanchoryMidDeesideCPT@Scotland.pnn.police.uk
North Kincardine - NorthKincardineCPT@Scotland.pnn.police.uk
Stonehaven and Lower Deeside - StonehavenLowerDeesideCPT@Scotland.pnn.police.uk
Mearns - MearnsCPT@Scotland.pnn.police.uk'),
'Angus' = council_ward_list('Angus'),
'Central' = create_ward_list('Kirkcaldy Central
Kirkcaldy East
Kirkcaldy North
Burntisland, Kinghorn and Western Kirkcaldy
Glenrothes West and Kinglassie
Glenrothes Central and Thornton
Glenrothes North, Leslie and Markinch'),
'Clackmannanshire' = create_ward_list('Clackmannanshire East - ClackmannanshireEastCPT@scotland.pnn.police.uk
Clackmannanshire North - ClackmannanshireNorthCPT@scotland.pnn.police.uk
Clackmannanshire South - ClackmannanshireSouthCPT@scotland.pnn.police.uk
Clackmannanshire West - ClackmannanshireWestCPT@scotland.pnn.police.uk
Clackmannanshire Central'),
'Dumfriesshire' = create_ward_list('North West Dumfries
Mid and Upper Nithsdale
Lochar
Nith
Annandale South
Annandale North
Annandale East and Eskdale'),
'Dundee' = council_ward_list('Dundee City'),
'East' = create_ward_list('Tay Bridgehead
St. Andrews
East Neuk and Landward
Cupar
Howe of Fife and Tay Coast
Leven, Kennoway and Largo
Buckhaven, Methil and Wemyss Villages'),
'East Ayrshire' = create_ward_list('Annick – AyrshireLPSTAnnick@scotland.pnn.police.uk
Kilmarnock North – AyrshireLPSETKilmarnock@scotland.pnn.police.uk
Kilmarnock West and Crosshouse – AyrshireLPSTKilmarnock@scotland.pnn.police.uk
Kilmarnock East and Hurlford - AyrshireLPSTKilmarnock@scotland.pnn.police.uk
Hurlford - AyrshireLPSTIrvineValley@scotland.pnn.police.uk
Kilmarnock South – AyrshireLPSTKilmarnock@scotland.pnn.police.uk
Irvine Valley – AyrshireLPSTIrvineValley@scotland.pnn.police.uk
Ballochmyle – AyrshireLPSTCumnock@scotland.pnn.uk
Cumnock and New Cumnock – AyrshireLPSTCumnock@scotland.pnn.police.uk
Doon Valley – AyrshireLPSTDoonValley@scotland.pnn.police.uk'),
'East Dunbartonshire' = create_ward_list('Milngavie
Bearsden North
Bearsden South
Bishopbriggs North and Campsie
Bishopbriggs South
Lenzie and Kirkintilloch South
Kirkintilloch East and North and Twechar'),
'East Kilbride, Cambuslang and Rutherglen' = create_ward_list('East Kilbride Central North
East Kilbride Central South
East Kilbride West
East Kilbride South
East Kilbride East
Rutherglen Central and North
Rutherglen South
Cambuslang East
Cambuslang West'),
'East Lothian' = create_ward_list('Musselburgh - MusselburghWestCPT@scotland.pnn.police.uk, MusselburghEastCarberryCPT@scotland.pnn.police.uk
Preston, Seton and Gosford - PrestonSetonCPT@scotland.pnn.police.uk
Tranent, Wallyford and Macmerry - FasideCPT@scotland.pnn.police.uk
Haddington and Lammermuir - HaddingtonLammermuirCPT@scotland.pnn.police.uk
North Berwick Coastal - NorthBerwickCoastalCPT@scotland.pnn.police.uk
Dunbar and East Linton - DunbarEastLintonCPT@scotland.pnn.police.uk'),
'East Renfrewshire' = create_ward_list('Barrhead, Liboside and Uplawmoor
Newton Mearns North and Neilston
Giffnock and Thornliebank
Clarkston, Netherlee and Williamwood
Newton Mearns South and Eaglesham'),
'Falkirk' = create_ward_list("Bo'ness and Blackness - Bo'NessBlacknessCPT@scotland.pnn.police.uk
Bonnybridge and Larbert - BonnybridgeLarbertCPT@scotland.pnn.police.uk
Carse, Kinnaird and Tryst - CarseKinnairdTrystCPT@scotland.pnn.police.uk
Denny and Banknock - DennyBanknockCPT@scotland.pnn.police.uk
Falkirk North - FalkirkNorthCPT@scotland.pnn.police.uk
Falkirk South - FalkirkSouthCPT@scotland.pnn.police.uk
Grangemouth - GrangemouthCPT@scotland.pnn.police.uk
Lower Braes - LowerBraesCPT@scotland.pnn.police.uk
Upper Braes - UpperBraesCPT@Scotland.pnn.police.uk"),
'Galloway' = create_ward_list('Stranraer and the Rhins
Mid Galloway and Wigtown West
Dee and Glenkens
Castle Douglas and Crocketford
Abbey'),
'Glasgow City Centre' = 'Anderston/City/Yorkhill',
'Glasgow East' = create_ward_list('Calton GreaterGlasgowLPSTLondonRoad@scotland.pnn.police.uk
East Centre GreaterGlasgowLPSTLondonRoad@scotland.pnn.police.uk
Dennistoun'), #figured out that Dennistoun was in Glasgow East by looking up the ward councillor's FB page
'Glasgow North' = create_ward_list('Maryhill
Canal
Springburn/Robroyston'),
'Glasgow North East' = c("Baillieston",
"Shettleston",
"North East (Glasgow)"), #figured out Glasgow NE wards through process of elimination
'Glasgow North West' = create_ward_list('Hillhead - GreaterGlasgowLPSTPartick@scotland.pnn.police.uk
Victoria Park - GreaterGlasgowLPSTDrumchapel@scotland.pnn.police.uk
Garscadden/Scotstounhill - GreaterGlasgowLPSTDrumchapel@scotland.pnn.police.uk
Drumchapel/Anniesland - GreaterGlasgowLPSTDrumchapel@scotland.pnn.police.uk
Partick East/Kelvindale - GreaterGlasgowLPSTPartick@scotland.pnn.police.uk'),
'Glasgow South East' = create_ward_list('Linn - GreaterGlasgowLPSTCathcart@scotland.pnn.police.uk
Pollokshields - GreaterGlasgowLPSTGorbals@scotland.pnn.police.uk
Langside - GreaterGlasgowLPSTCathcart@scotland.pnn.police.uk
Southside Central - GreaterGlasgowLPSTGorbals@scotland.pnn.police.uk'),
'Glasgow South West' = create_ward_list('Newlands/Auldburn GreaterGlasgowLPSTPollok@scotland.pnn.police.uk
Greater Pollok GreaterGlasgowLPSTPollok@scotland.pnn.police.uk
Cardonald GreaterGlasgowLPSTGovan@scotland.pnn.police.uk
Govan GreaterGlasgowLPSTGovan@scotland.pnn.police.uk'),
'Hamilton & Clydesdale' = create_ward_list('Hamilton North and East
Hamilton South
Hamilton West and Earnock
Larkhall
Avondale and Stonehouse
Blantyre
Bothwell and Uddingston
Clydesdale North
Clydesdale East
Clydesdale South
Clydesdale West'),
'Inverclyde' = create_ward_list('Inverclyde East: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde East Central: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde North: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde South: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde West: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde South West: RenfrewshireInverclydeLPSTGreenock@Scotland.pnn.police.uk
Inverclyde Central'),
'Inverness' = c('Aird and Loch Ness',
'Culloden and Ardersier',
'Inverness South',
'Inverness Millburn',
'Inverness Ness-side',
'Inverness Central',
'Inverness West'),
'Mid-Argyll, Kintyre, Oban, Lorn and the Islands' = create_ward_list('Oban North and Lorn ObanNorthLornCPT@scotland.pnn.police.uk
Oban South and the Isles ObanSouthTheIslesCPT@scotland.pnn.police.uk
South Kintyre SouthKintyreCPT@scotland.pnn.police.uk
Kintyre and the Islands KintyreTheIslandsCPT@scotland.pnn.police.uk
Mid Argyll midargyllcpt@scotland.pnn.police.uk'),
'Midlothian' = council_ward_list('Midlothian'),
'Monklands & Cumbernauld' = create_ward_list('Airdrie Central
Airdrie North
Airdrie South
Gartcosh, Glenboig and Moodiesburn
Coatbridge South
Coatbridge West
Coatbridge North
Cumbernauld North
Kilsyth
Cumbernauld South
Stepps, Chryston and Muirhead
Cumbernauld East'),
'Moray' = create_ward_list('Speyside Glenlivet - SpeysideGlenlivetCPT@Scotland.pnn.police.uk
Keith and Cullen - KeithCullenCPT@Scotland.pnn.police.uk
Buckie - BuckieCPT@Scotland.pnn.police.uk
Fochabers Lhanbryde - FochabersLhanbrydeCPT@Scotland.pnn.police.uk
Heldon and Laich - HeldonLaichCPT@Scotland.pnn.police.uk
Elgin City North - ElginCityNorthCPT@Scotland.pnn.police.uk
Elgin City South - ElginCitySouthCPT@Scotland.pnn.police.uk
Forres - ForresCPT@Scotland.pnn.police.uk'),
'Motherwell, Wishaw and Bellshill' = create_ward_list('Motherwell South East and Ravenscraig
Wishaw
Murdostoun
Motherwell West
Motherwell North
Fortissat
Thorniewood
Bellshill
Mossend and Holytown'),
'North Ayrshire' = create_ward_list('Irvine West – AyrshireLPSTIrvine@scotland.pnn.police.uk
Irvine East – AyrshireLPSTIrvine@scotland.pnn.police.uk
Kilwinning – AyrshireLPSTKilwinning@scotland.pnn.police.uk
Stevenston – AyrshireLPST3Towns@scotland.pnn.police.uk / AyrshireLPSTArran@scotland.pnn.police.uk
Ardrossan and Arran - AyrshireLPST3Towns@scotland.pnn.police.uk / AyrshireLPSTArran@scotland.pnn.police.uk
Dalry & West Kilbride - AyrshireLPSTGarnockValley@scotland.pnn.police.uk / AyrshireLPSTNorthCoast&Cumbraes@scotland.pnn.police.uk
Kilbirnie & Beith – AyrshireLPSTGarnockVAlley@scotland.pnn.police.uk
North Coast & Cumbraes - AyrshireLPSTNorthCoast&Cumbraes@scotland.pnn.police.uk
Irvine South – AyrshireLPSTIrvine@scotland.pnn.police.uk
Saltcoats – AyrshireLPST3Towns@Scotland.pnn.police.uk / AyrshireLPSTArran@scotland.pnn.police.uk'),
'North East' = create_ward_list('Leith
Leith Walk
Craigentinny/Duddingston
Portobello/Craigmillar'),
'North Highlands' = c('Thurso and Northwest Caithness',
'Wick and East Caithness',
'North, West and Central Sutherland',
'East Sutherland and Edderton',
'Wester Ross, Strathpeffer and Lochalsh',
'Cromarty Firth',
'Tain and Easter Ross',
'Dingwall and Seaforth',
'Black Isle'),
'North West' = create_ward_list('Almond
Drum Brae/Gyle
Corstorphine/Murrayfield
Forth
Inverleith'),
'Orkney' = council_ward_list('Orkney Islands'),
'Paisley' = create_ward_list('Paisley East and Central: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Northwest: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Southeast: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Northeast and Ralston: RenfrewshireInverclydeLPSTPaisley@Scotland.pnn.police.uk
Paisley Southwest'),
'Perth & Kinross' = council_ward_list('Perth and Kinross'),
'Renfrew' = create_ward_list('Renfrew North and Braehead: RenfrewshireInverclydeLPSTRenfrew@Scotland.pnn.police.uk
Renfrew South and Gallowhill: RenfrewshireInverclydeLPSTRenfrew@Scotland.pnn.police.uk
Johnstone South and Elderslie: RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Johnstone North, Kilbarchan, Howwood and Lochwinnoch: RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Houston, Crosslee and Linwood RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Bishopton, Bridge of Weir and Langbank: RenfrewshireInverclydeLPSTJohnstone@Scotland.pnn.police.uk
Erskine and Inchinnan: RenfrewshireInverclydeLPSTRenfrew@Scotland.pnn.police.uk'),
'Scottish Borders' = council_ward_list('Scottish Borders'),
'Shetland' = council_ward_list('Shetland Islands'),
'South Argyll, Helensburgh, Lomond, Bute and Cowal.' = create_ward_list('Cowal - CowalCPT@scotland.pnn.police.uk
Dunoon - DunoonCPT@scotland.pnn.police.uk
Isle of Bute - IsleofButeCPT@scotland.pnn.police.uk
Lomond North - LomondNorthCPT@scotland.pnn.police.uk
Helensburgh Central - HelensburghCentralCPT@scotland.pnn.police.uk
Helensburgh and Lomond South - HelensburghLomondSouthCPT@scotland.pnn.police.uk'),
'South Ayrshire' = create_ward_list('Troon – AyrshireLPSTTroon@scotland.pnn.police.uk
Prestwick – AyrshireLPSTPrestwick@scotland.pnn.police.uk
Ayr North – AyrshireLPSTAyrNorth@scotland.pnn.police.uk
Ayr East – AyrshireLPSTSouthCoylton@scotland.pnn.police.uk
Ayr West – AyrshireLPSTSouthCoylton@scotland.pnn.police.uk
Symington and Monkton - AyrshireLPSTPrestwick@scotland.pnn.police.uk
Tarbolton, Mossblow, Craigie, Failford and St Quivox - AyrshireLPSTAyrNorth@scotland.pnn.police.uk
Maybole, North Carrick & Coylton – AyrshireLPSTMayboleNorthCarrick@scotland.pnn.police.uk or AyrshireLPSTGirvanSouthCarrick@scotland.pnn.police.uk
Girvan & South Carrick - AyrshireLPSTMayboleNorthCarrick@scotland.pnn.police.uk
Kyle'),
'South East' = create_ward_list('City Centre
Morningside
Southside/Newington
Liberton/Gilmerton'),
'South Highlands' = c("Caol and Mallaig",
"Fort William and Ardnamurchan",
"Eilean a'Cheo",
"Badenoch and Strathspey",
"Nairn and Cawdor"),
'South West' = create_ward_list('Pentland Hills
Sighthill/Gorgie
Colinton/Fairmilehead
Fountainbridge/Craiglockhart'),
'Stirling' = create_ward_list('Bannockburn - BannockburnCPT@Scotland.pnn.police.uk
Dunblane and Bridge of Allan - DunblaneBridgeofAllanCPT@scotland.pnn.police.uk
Forth and Endrick - ForthEndrickCPT@scotland.pnn.police.uk
Stirling East - StirlingEastCPT@Scotland.pnn.police.uk
Stirling North - StirlingNorthCPT@Scotland.pnn.police.uk
Stirling West - StirlingWestCPT@Scotland.pnn.police.uk
Trossachs and Teith - TrossachsTeithCPT@scotland.pnn.police.uk'),
'West' = create_ward_list('Dunfermline South DunfermlineSouthCPT@Scotland.pnn.police.uk
Dunfermline Central DunfermlineCentralCPT@Scotland.pnn.police.uk
Dunfermline North DunfermlineNorthCPT@Scotland.pnn.police.uk
Cowdenbeath CowdenbeathCPT@Scotland.pnn.police.uk
The Lochs TheLochsCPT@Scotland.pnn.police.uk
Lochgelly, Cardenden and Benarty LochgellyCardendenCPT@Scotland.pnn.police.uk
West Fife & Coastal Villages WestFifeCoastalVillagesCPT@scotland.pnn.police.uk
Rosyth RosythCPT@Scotland.pnn.police.uk
Inverkeithing & Dalgety Bay InverkeithingDalgetyBayCPT@Scotland.pnn.police.uk'),
'West Dumbartonshire' = create_ward_list('Clydebank Central - ClydebankCentralCPT@scotland.pnn.police.uk
Clydebank Waterfront - ClydebankWaterfrontCPT@scotland.pnn.police.uk
Kilpatrick - KilpatrickCPT@scotland.pnn.police.uk
Dumbarton - DumbartonCPT@scotland.pnn.police.uk
Leven - LevenCPT@scotland.pnn.police.uk
Lomond – lomondCPT@scotland.pnn.police.uk'),
'West Lothian' = council_ward_list('West Lothian'),
'Western Isles' = council_ward_list('Na h-Eileanan Siar')
))
#Join wards to area commands using lookup table
wardpop_area_commands <- wardpop_la %>%
dplyr::select(area_name, area_code, all_ages, pop_18_29, SPD_Name) %>%
left_join(., area_commands.levels, by=c("area_name"="values")) %>%
dplyr::rename(area_commands=ind) %>%
mutate(all_ages = as.numeric(gsub(',', '', all_ages))) #not dropping islands yet bc i need them for an accurate estimate of police officers per area command
#Find number of house parties per 100,000 residents
area_command_pop <- wardpop_area_commands %>%
group_by(area_commands) %>%
summarise_at(c('all_ages', 'pop_18_29'), sum, na.rm=TRUE) %>%
mutate(pc_18_29 = (pop_18_29/all_ages)*100) %>%
dplyr::select(area_commands, all_ages, pc_18_29) %>%
dplyr::filter(area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
#Control variable - number of police officers per 10,000 residents
SPD_lookup <- wardpop_area_commands %>%
dplyr::select(SPD_Name, area_commands) %>%
distinct(area_commands, .keep_all=TRUE)
police_officers <- SPD_lookup %>%
group_by(SPD_Name) %>%
summarise(area_command_count = n_distinct(area_commands)) %>%
mutate(police_officer_count = case_when(
SPD_Name == 'North East' ~ 1103, #local police officer counts used bc they reflect police officers who would respond to house parties
SPD_Name == 'Tayside' ~ 916,
SPD_Name == 'Highlands and Islands' ~ 652,
SPD_Name == 'Forth Valley' ~ 641,
SPD_Name == 'Edinburgh' ~ 1125,
SPD_Name == 'The Lothians and Scottish Borders' ~ 907,
SPD_Name == 'Fife' ~ 775,
SPD_Name == 'Greater Glasgow' ~ 2452,
SPD_Name == 'Ayrshire' ~ 831,
SPD_Name == 'Lanarkshire' ~ 1385,
SPD_Name == 'Argyll and West Dunbartonshire' ~ 553,
SPD_Name == 'Renfrewshire and Inverclyde' ~ 611,
SPD_Name == 'Dumfries and Galloway' ~ 401
)) %>%
mutate(police_officers_per_area_command = floor(police_officer_count/area_command_count))
police_by_area_command <- SPD_lookup %>%
left_join(., police_officers, by='SPD_Name') %>%
dplyr::select(area_commands, police_officers_per_area_command) %>%
dplyr::filter(area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
#Create df of ALL area commands per week
#a) vector of weeks, each repeated 51 times
weeks <- area_command_house_gatherings_weekly %>%
distinct(week) %>%
pull() %>%
rep(., each=49)
#b) vector of area commands, the whole set repeated 6 times (number of weeks)
areas <- area_command_pop %>%
dplyr::select(area_commands) %>%
#dplyr::filter(area_commands != 'Western Isles') %>%
#dplyr::filter(area_commands != 'Orkney') %>%
#dplyr::filter(area_commands != 'Shetland') %>%
pull() %>%
rep(., 6)
#c) concatenate two vectors into data frame
area_command_gatherings_per_100k <- data.frame(weeks, areas) %>%
dplyr::rename(week = weeks,
area_commands = areas)
#Storing dates for constructing regulation dummy variables
glasgow_ban_date <- as.Date('2020-09-01')
scotland_ban_date <- as.Date('2020-09-23')
area_command_gatherings_per_100k <- area_command_gatherings_per_100k %>%
merge(.,
area_command_house_gatherings_weekly,
by.x=c('week', 'area_commands'),
by.y=c('week', 'area_commands'),
all=TRUE) %>%
replace_na(list(house_gatherings_in_breach_of_restrictions = 0)) %>%
left_join(., area_command_pop, by="area_commands") %>%
mutate(illegal_gatherings_rate = (house_gatherings_in_breach_of_restrictions / all_ages)*100000, #only for visualisation
pop_over_100k = all_ages/100000, #used as offset in regression analysis
household_visits_banned = case_when(
week >= glasgow_ban_date & grepl('Glasgow|East Renfrewshire|West Dumbartonshire', area_commands) ~ 1,
week >= scotland_ban_date ~ 1,
TRUE ~ 0 #the reference level is a restriction on house gatherings of over 15 people
)) %>%
left_join(., police_by_area_command, by="area_commands") %>%
mutate(police_per_10k = floor((police_officers_per_area_command/all_ages)*10000))
#Plotting variation in house gatherings per week
weekly_plot <- ggplot(area_command_gatherings_per_100k, aes(x = as.factor(week), y=illegal_gatherings_rate)) +
geom_boxplot(fill='#1f78b4', color='#12486C', lwd=0.25) +
xlab('Week (first day shown)') +
ylab('Number of house gatherings breaching restrictions\nper 100,000 residents') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)))
weekly_plot
ggsave('weekly_plot.png', plot=weekly_plot, height = 21 , width = 33.87, units='cm')
#histogram shows that every week, the rates of house gatherings are positively skewed
#variance increases with the median, suggesting a poisson process
#should be noted that the numbers are very low
#### MAPPING ####
#Load in ward boundaries and merge them into area commands
#will need it as an sp object later, but sf is easier to work with
area_commands_sf <- st_read(here::here('data',
'bdline_essh_gb',
'Data',
'GB',
'district_borough_unitary_ward_region.shp')) %>%
filter(str_detect(CODE, "^S13")) %>%
left_join(.,
wardpop_area_commands,
by=c("CODE"="area_code")) %>%
group_by(area_commands) %>%
summarise() %>%
dplyr::filter(area_commands != 'Western Isles') %>%
dplyr::filter(area_commands != 'Orkney') %>%
dplyr::filter(area_commands != 'Shetland')
st_write(area_commands_sf, here::here('data',
'area_commands.geojson'))
#With premade GeoJSON file
area_commands_sf <- st_read(here::here('data',
'area_commands.geojson'))
#Convert to sp and join area command illegal gatherings data
area_commands_sp <- area_commands_sf %>%
as(., "Spatial")
#### REGRESSION ANALYSIS ####
#Find mean for each areal unit over the time period
mean_weekly_gatherings <- area_command_gatherings_per_100k %>%
group_by(area_commands) %>%
summarise_at('illegal_gatherings_rate', mean)
#Create summary statistics table
area_command_pop <- area_command_pop %>%
left_join(.,
mean_weekly_gatherings,
by="area_commands") %>%
left_join(.,
police_by_area_command,
by="area_commands")
summary(area_command_pop)
#Map summary statistics
area_commands_sf <- area_commands_sf %>%
dplyr::select(!(c(area, police_per_km2)))
#Simplify outline, because it's not important for this stage and it takes forever
simple_area_commands_sf <- area_commands_sf %>%
ms_simplify(.,keep=0.05)
#Bring population attributes into simplified sf object
simple_area_commands_sf <- simple_area_commands_sf %>%
left_join(.,
area_command_pop,
by="area_commands")
tmap_mode('plot')
party_map <- tm_shape(simple_area_commands_sf) +
tm_fill(col = 'illegal_gatherings_rate',
style = 'quantile',
palette = 'PuBu',
legend.hist = TRUE,
title = "",
legend.format = list(fun=function(x) paste0(formatC(x, digits=2, format="f")))) +
tm_borders(col = 'white', lwd = 0.5, alpha = 0.6) +
tm_layout(legend.hist.height = 0.2,
legend.hist.width = 0.3,
title = 'Mean rate of parties\nper 100,000 residents',
title.fontface = 2,
legend.text.size = 0.7) +
tm_scale_bar(position = c(0.6,0.02), text.size = 0.6) +
tm_compass(north=0, position=c(0.9, 0.9))
age_map <- tm_shape(simple_area_commands_sf) +
tm_fill(col = 'pc_18_29',
style='quantile',
palette = 'YlOrBr',
legend.hist = TRUE,
title="",
legend.format = list(fun=function(x) paste0(formatC(x, digits=2, format="f"))),
legend.position = c('left', 'bottom')) +
tm_borders(col = 'white', lwd = 0.5, alpha = 0.6) +
tm_layout(legend.hist.height = 0.2,
legend.hist.width = 0.3,
title = '% aged 18-29',
title.fontface = 2,
legend.text.size = 0.7) +
tm_scale_bar(position = c(0.6,0.02), text.size = 0.6) +
tm_compass(north=0, position=c(0.9, 0.9))
var_maps <- tmap_arrange(party_map, age_map, ncol=2)
var_maps
tmap_save(var_maps, 'var_maps.png', width=12.46, height=7)
#Join mean weekly gatherings per area command to sp data frame
#order is v important! the order of polygons in the sp data frame MUST match the order of spatial units in mean data frame
area_commands_sp@data$mean_weekly_gatherings <- mean_weekly_gatherings$illegal_gatherings_rate
#Create binary spatial weights matrix using sp dataframe
weights.nb <- poly2nb(area_commands_sp, row.names=mean_weekly_gatherings$area_commands)
weights <- nb2mat(weights.nb, style='B')
#Create vector of unique weeks
unique_weeks <- unique(weeks)
#### RESULTS - TIME W NO OTHER VARIABLES ####
#Run regression analysis using temporal data, with spatial weights matrix from sp object
formula1 <- house_gatherings_in_breach_of_restrictions ~ offset(log(pop_over_100k)) + police_officers_per_area_command
chain1 <- ST.CARsepspatial(formula=formula1,
family='poisson',
data=area_command_gatherings_per_100k,
W=weights,
burnin=3000,
n.sample=450000,
thin=100)
print(chain1)
summary(chain1$samples)
#beta = coefficients for covariates
#phi = spatial random effect for each time period to account for autocorrelation
#tau2 = spatial variance for each time period
#delta = overall temporal trend
#rho.S and rho.T = spatial and temporal autcorrelation parameters (common to all time periods)
#in bayesian inference, parameters are assumed to be drawn from prior distributions
#normally, the prior distribution of these parameters is constructed using existing knowledge on potential effect sizes, e.g. through systematic reviews
#for the autocorrelation parameters, the CAR.sepspatial model assumes a 'flat' distribution - no external information is included when calculating these parameters
#for the spatial variance parameters, a conjugate prior distribution is used -
#the posterior distribution is assumed to be the prior
#Visualising median rate over time
#create data frame of each temporal unit, with a column corresponding to the fitted median,
#lower + upper credibility intervals
trend.median <- data.frame(Week=unique_weeks, array(NA, c(6,3))) #first number is the number of temporal units
colnames(trend.median) <- c("Week", "Median", "LCI", "UCI")
#Visualising spatial SD over time
#create another data frame
trend.sd <- data.frame(Week=unique_weeks, array(NA, c(6,3)))
colnames(trend.sd) <- c("Week", "Median", "LCI", "UCI")
#Populate data frames using data from model
for(i in 1:6) { #i in the range of temporal units
#create posterior distribution of estimated rates across space for each year through matrix addition
posterior <- exp(chain1$samples$phi[ , ((i-1) * 49 + 1):(i * 49)] +
#samples$phi is a matrix, with rows corresponding to number of samples
#and columns corresponding to number of spatial units for each year i
#e.g. for the first week, the code will extract all the phi samples generated for each spatial unit
matrix(rep(chain1$samples$beta[,1] + chain1$samples$beta[,2] + chain1$samples$delta[ , i], 49),
ncol=49, byrow=FALSE))
#all beta samples are added to the delta samples for year i and repeated 271 times (rows of matrix)
#number of columns is the number of areal units
#posterior is the matrix of phi + beta + delta for each spatial unit in year i?
trend.median[i, 2:4] <- quantile(apply(posterior, 1, mean),
c(0.5, 0.025, 0.975))
#apply(posterior, 1, mean) finds the mean of each row in the posterior mean matrix for that year
#quantile() finds the median, lower credibility interval, and upper credibility interval for all the means
trend.sd[i, 2:4] <- quantile(apply(posterior, 1, sd),
c(0.5, 0.025, 0.975))
}
trend.median_long <- trend.median %>%
pivot_longer(cols=2:4,
names_to='category',
values_to='estimate') %>%
mutate(category = gsub('UCI|LCI', 'CI', category))
#Plot median over time
medianplot <- ggplot(aes(x = factor(week), y = illegal_gatherings_rate),
data=area_command_gatherings_per_100k) +
geom_jitter(color='#1f78b4') +
scale_x_discrete(name = "Week (first day shown)") +
scale_y_continuous(name = "Rate of illegal house gatherings") +
geom_line(data=trend.median, mapping=aes(x=factor(Week), y=Median,
group=1), colour='#990000', lwd=1) +
geom_line(data=trend.median, mapping=aes(x=factor(Week), y=LCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
geom_line(data=trend.median, mapping=aes(x=factor(Week), y=UCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)),
title = element_text(margin=margin(b=10), face='bold')) +
ggtitle('Predicted mean rate of illegal house gatherings\nper 100,000 residents')
medianplot
ggsave('medianplot.png', plot=medianplot, width=16.33, height=7)
#Plot SD over time
sdplot <- ggplot() +
scale_x_discrete(name = "Year") +
scale_y_continuous(name = "Spatial standard deviation") +
geom_line(data=trend.sd, mapping=aes(x=factor(Week), y=Median,
group=1), colour='#990000', lwd=1) +
geom_line(data=trend.sd, mapping=aes(x=factor(Week), y=LCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
geom_line(data=trend.sd, mapping=aes(x=factor(Week), y=UCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)),
plot.title = element_text(margin=margin(b=10), face='bold')) +
ggtitle('Standard deviation of estimated mean rates')
sdplot
ggsave('sdplot.png', plot=sdplot, width=16.33, height=7)
#### RESULTS - W COEFFICIENTS ####
#Model with coefficients
formula2 <- house_gatherings_in_breach_of_restrictions ~ offset(log(pop_over_100k)) + police_per_10k + pc_18_29 + household_visits_banned
chain2 <- ST.CARsepspatial(formula=formula2,
family='poisson',
data=area_command_gatherings_per_100k,
W=weights,
burnin=3000,
n.sample=450000,
thin=100)
print(chain2)
summary(chain2$samples)
#Visualising median rate over time
#create data frame of each temporal unit, with a column corresponding to the fitted median,
#lower + upper credibility intervals
trend.median2 <- data.frame(Week=unique_weeks, array(NA, c(6,3))) #first number is the number of temporal units
colnames(trend.median2) <- c("Week", "Median", "LCI", "UCI")
#Visualising spatial SD over time
#create another data frame
trend.sd2 <- data.frame(Week=unique_weeks, array(NA, c(6,3)))
colnames(trend.sd2) <- c("Week", "Median", "LCI", "UCI")
#Populate data frames using data from model
for(i in 1:6) { #i in the range of temporal units
#create posterior distribution of estimated rates across space for each year through matrix addition
posterior2 <- exp(chain2$samples$phi[ , ((i-1) * 49 + 1):(i * 49)] +
#samples$phi is a matrix, with rows corresponding to number of samples
#and columns corresponding to number of spatial units for each year i
#e.g. for the first week, the code will extract all the phi samples generated for each spatial unit
matrix(rep(chain2$samples$beta[,1] + chain2$samples$beta[,2] + chain2$samples$beta[,3] + chain2$samples$beta[,4] + chain2$samples$delta[ , i], 49),
ncol=49, byrow=FALSE))
#all beta samples are added to the delta samples for year i and repeated 271 times (rows of matrix)
#number of columns is the number of areal units
#posterior is the matrix of phi + beta + delta for each spatial unit in year i?
trend.median2[i, 2:4] <- quantile(apply(posterior2, 1, mean),
c(0.5, 0.025, 0.975))
#apply(posterior, 1, mean) finds the mean of each row in the posterior mean matrix for that year
#quantile() finds the median, lower credibility interval, and upper credibility interval for all the means
trend.sd2[i, 2:4] <- quantile(apply(posterior2, 1, sd),
c(0.5, 0.025, 0.975))
}
#Plot median over time
medianplot2 <- ggplot(aes(x = factor(week), y = illegal_gatherings_rate),
data=area_command_gatherings_per_100k) +
geom_jitter(color='#1f78b4') +
scale_x_discrete(name = "Week (first day shown)") +
scale_y_continuous(name = "Rate of illegal house gatherings") +
geom_line(data=trend.median2, mapping=aes(x=factor(Week), y=Median,
group=1), colour='#990000', lwd=1) +
geom_line(data=trend.median2, mapping=aes(x=factor(Week), y=LCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
geom_line(data=trend.median2, mapping=aes(x=factor(Week), y=UCI,
group=1), lwd=0.5, linetype='dashed', colour='black') +
theme(axis.title.x = element_text(margin = margin(t=10)),
axis.title.y = element_text(margin = margin(r=10)),
title = element_text(margin=margin(b=10), face='bold')) +
ggtitle('Predicted mean rate of illegal house gatherings\nper 100,000 residents')
medianplot2
ggsave('medianplot2.png', plot=medianplot2, width=16.33, height=7)
#variable coefficients would be interpreted as the percent change in y for a unit change in x
#e to the power of the coefficient would give the ratio of y with predictor value x+1 to y with predictor value x
#e.g. if the coefficient were -0.0047, e^-0.0047 would be 0.995,
#meaning that for a one unit change in x, the corresponding value of y would be 99.5% of the preceding value
#or, more intuitively, 0.5% lower |
/pla/inst/R.xtables/fun.R | no_license | ingted/R-Examples | R | false | false | 3,292 | r | ||
#==============================================================================
#Metric Sensitivity
#==============================================================================
#'Metric Sensitivity
#'
#'@param metrics.df = data frame of metric values for each station
#'@param upper.class = The site classification that represents better
#'environmental conditions.
#'@param lower.class = The site classification that represents the degraded
#'environmental conditions.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'reference and degraded stations.
#'@export
#'
sensitivity <- function(metrics.df, upper.class, lower.class, method = "ODE"){
if("PCT_UNIDENTIFIED" %in% names(metrics.df)){
metrics.df <- metrics.df[, !grepl("PCT_UNIDENTIFIED", names(metrics.df))]
}
if(any(grepl(".y", names(metrics.df)))){
metrics.df <- metrics.df[, !grepl(".y", names(metrics.df))]
}
if(any(grepl(".x", names(metrics.df)))){
names(metrics.df) <- gsub(".x", "", names(metrics.df))
}
#Create new data frames specific for Degraded and Reference sites
deg.df <- metrics.df[metrics.df$CATEGORY %in% lower.class, ]
ref.df <- metrics.df[metrics.df$CATEGORY %in% upper.class, ]
if(method == "BARBOUR"){
final.df <- barbour(metrics.df, ref.df, deg.df)
}
#Calculate the median values for the reference and degraded distributions.
if(length(ref.df) < 8){
ref_50 <- quantile(ref.df[, 7], 0.50, na.rm = TRUE)
deg_50 <- quantile(deg.df[, 7], 0.50, na.rm = TRUE)
#Provide the each reference percentile value for each metric.
quant.ref <- data.frame(quantile(ref.df[, 7], probs = seq(0, 1, by = 0.01), na.rm = TRUE))
colnames(quant.ref) <- colnames(ref.df)[7]
#Create a column listing all of the metrics and join the reference percentile values
quant.df <- cbind(data.frame(colnames(metrics.df[7])), t(quant.ref))
names(quant.df)[1] <- "METRICS" #Rename column 1
#quant.df$DISTURBANCE <- ifelse(ref_50 > deg_50, "DECREASE",
# ifelse(ref_50 < deg_50, "INCREASE", "EQUAL"))
#quant.df <- quant.df[!(quant.df$DISTURBANCE %in% "EQUAL"), ]
#quant.df <- quant.df[rowSums(quant.df[, 2:102]) > 0, ]
#Create new data frames specific for Degraded and Reference sites
severe.df <- metrics.df[metrics.df$CATEGORY %in% lower.class, ]
reference.df <- metrics.df[metrics.df$CATEGORY %in% upper.class, ]
#Calculate the median values for the reference and degraded distributions.
reference_50 <- quantile(reference.df[, 7], 0.50, na.rm = TRUE)
severe_50 <- quantile(severe.df[, 7], 0.50, na.rm = TRUE)
#Insert a column to suggest how the metric reacts to disturbance. If the reference median
# is greater than the degraded median, the metric decreases with distrubance. If the reference
# median is less than the degraded median, the metric increases with disturbance. If the
# medians are equal, equal is return to indicate that this metric shows no distinction between
# reference and degraded contions.
quant.df$DISTURBANCE <- ifelse(reference_50 > severe_50, "DECREASE",
ifelse(reference_50 < severe_50, "INCREASE", "EQUAL"))
}
if(length(ref.df) >= 8){
ref_50 <- sapply(ref.df[, 7:ncol(ref.df)], quantile, 0.50, na.rm = TRUE)
deg_50 <- sapply(deg.df[, 7:ncol(deg.df)], quantile, 0.50, na.rm = TRUE)
#Provide the each reference percentile value for each metric.
quant.ref <- data.frame(apply(ref.df[, 7:ncol(ref.df)], 2, function(x){
quantile(x, probs = seq(0, 1, by = 0.01), na.rm = TRUE)
} ))
#Create a column listing all of the metrics and join the reference percentile values
quant.df <- cbind(data.frame(colnames(metrics.df[7:ncol(metrics.df)])), t(quant.ref))
names(quant.df)[1] <- "METRICS" #Rename column 1
#quant.df$DISTURBANCE <- ifelse(ref_50 > deg_50, "DECREASE",
# ifelse(ref_50 < deg_50, "INCREASE", "EQUAL"))
#quant.df <- quant.df[!(quant.df$DISTURBANCE %in% "EQUAL"), ]
#quant.df <- quant.df[rowSums(quant.df[, 2:102]) > 0, ]
#Create new data frames specific for Degraded and Reference sites
severe.df <- metrics.df[metrics.df$CATEGORY == lower.class, ]
reference.df <- metrics.df[metrics.df$CATEGORY == upper.class, ]
#Calculate the median values for the reference and degraded distributions.
reference_50 <- sapply(reference.df[, 7:ncol(reference.df)], quantile, 0.50, na.rm = TRUE)
severe_50 <- sapply(severe.df[, 7:ncol(severe.df)], quantile, 0.50, na.rm = TRUE)
#Insert a column to suggest how the metric reacts to disturbance. If the reference median
# is greater than the degraded median, the metric decreases with distrubance. If the reference
# median is less than the degraded median, the metric increases with disturbance. If the
# medians are equal, equal is return to indicate that this metric shows no distinction between
# reference and degraded contions.
quant.df$DISTURBANCE <- ifelse(reference_50 > severe_50, "DECREASE",
ifelse(reference_50 < severe_50, "INCREASE", "EQUAL"))
}
if(method == "DE"){
final.df <- d_e(deg.df, quant.df)
}
if(method == "ODE"){
final.df <- ode(metrics.df, quant.df, upper.class, lower.class,
ref.df, quant.ref)
}
if(method == "CMA"){
final.df <- cma(metrics.df, quant.df, upper.class, lower.class,
ref.df, quant.ref)
}
if(method == "SSE"){
final.df <- sse(metrics.df, quant.df, upper.class, lower.class,
ref.df, quant.ref)
}
return(final.df)
}
#==============================================================================
#'
#'Chunk Sensitivity
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param upper.class = the site class that represents the better condition.
#'@param lower.class = the site class that represents the poorer condition.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
chunk_sensitivity <- function(metrics.df, upper.class = "REF", lower.class = "SEV", method){
metrics.list <- break.me(metrics.df, 100, 6)
#============================================================================
datalist = list()
for(j in 1:length(metrics.list)){
sub.metrics <- metrics.list[[j]]
de.thresh <- sensitivity(sub.metrics, upper.class, lower.class, method)
datalist[[j]] <- de.thresh
}
#============================================================================
final.df <- do.call(rbind, datalist)
return(final.df)
}
#==============================================================================
#'Pairwise Sensitivity
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
pairwise_sensitivity <- function(metrics.df, method){
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "NEAR", "MIN", method)
rm.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
mm.df <- chunk_sensitivity(metrics.df, "MIN", "MOD", method)
ms.df <- chunk_sensitivity(metrics.df, "MOD", "SEV", method)
rs.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
#rs.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
#mm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
#ms.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
if(method %in% c("ODE", "SSE")){
#rn.df <- rn.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR", "THRESH_REF_NEAR")
#nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN", "THRESH_NEAR_MIN")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MIN", "THRESH_REF_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD", "THRESH_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV", "THRESH_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "THRESHOLD",
"DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV",
"THRESH_REF_SEV", "DISTURBANCE")
}
if(method %in% c("DE", "BARBOUR")){
rn.df <- rn.df[, c("METRICS", "SENSITIVITY")]
names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR")
nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY")]
names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV", "DISTURBANCE")
}
if(method %in% c("ODE", "SSE", "CMA")){
m3 <- cbind(rm.df,
#rn.df, nmin.df[, c(2,3)],
mm.df[, c(2,3)],
ms.df[, c(2,3)], rs.df[, 2:4])
}
if(method %in% c("DE", "BARBOUR")){
m3 <- cbind(rn.df, nmin.df[, c(2)], mm.df[, c(2)], ms.df[, c(2)], rs.df[, 2:3])
names(m3) <- c("METRICS",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_REF_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV", "DISTURBANCE")
}
m3$SENSITIVITY <- (rowSums(m3[, c("SENSITIVITY_REF_MIN",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV")])) / 4
return(m3)
}
#==============================================================================
#'Pairwise Sensitivity 2
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
pairwise_sensitivity2 <- function(metrics.df, method){
ns.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
rm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
rs.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
#rs.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
#mm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
#ms.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
if(method %in% c("ODE", "SSE")){
ns.df <- ns.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(ns.df) <- c("METRICS", "SENSITIVITY_NEAR_SEV", "THRESH_NEAR_SEV")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MOD", "THRESH_REF_MOD")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "THRESHOLD",
"DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV",
"THRESH_REF_SEV", "DISTURBANCE")
}
if(method %in% c("DE", "BARBOUR")){
ns.df <- ns.df[, c("METRICS", "SENSITIVITY")]
names(ns.df) <- c("METRICS", "SENSITIVITY_NEAR_SEV")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MOD")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV", "DISTURBANCE")
}
if(method %in% c("ODE", "SSE")){
m3 <- cbind(ns.df, rm.df[, c(2,3)], rs.df[, 2:4])
}
if(method %in% c("DE", "BARBOUR")){
m3 <- cbind(rs.df, rm.df[, c(2)], rs.df[, 2:3])
names(m3) <- c("METRICS", "SENSITIVITY_NEAR_SEV", "SENSITIVITY_REF_MOD",
"SENSITIVITY_REF_SEV", "DISTURBANCE")
}
m3$SENSITIVITY <- rowSums(m3[, c("SENSITIVITY_NEAR_SEV", "SENSITIVITY_REF_MOD",
"SENSITIVITY_REF_SEV")]) / 3
return(m3)
}
#==============================================================================
#'Pairwise Sensitivity 3
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
pairwise_sensitivity3 <- function(metrics.df, method){
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "NEAR", "MIN", method)
rm.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
mm.df <- chunk_sensitivity(metrics.df, "MIN", "MOD", method)
ms.df <- chunk_sensitivity(metrics.df, "MOD", "SEV", method)
rs.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
#rs.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
#mm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
#ms.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
if(method %in% c("ODE", "SSE")){
#rn.df <- rn.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR", "THRESH_REF_NEAR")
#nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN", "THRESH_NEAR_MIN")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MIN", "THRESH_REF_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD", "THRESH_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV", "THRESH_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "THRESHOLD",
"DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV",
"THRESH_REF_SEV", "DISTURBANCE")
}
if(method %in% c("DE", "BARBOUR")){
rn.df <- rn.df[, c("METRICS", "SENSITIVITY")]
names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR")
nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY")]
names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV", "DISTURBANCE")
}
if(method %in% c("ODE", "SSE")){
m3 <- cbind(rm.df,
#rn.df, nmin.df[, c(2,3)],
mm.df[, c(2,3)],
ms.df[, c(2,3)], rs.df[, 2:4])
}
if(method %in% c("DE", "BARBOUR")){
m3 <- cbind(rn.df, nmin.df[, c(2)], mm.df[, c(2)], ms.df[, c(2)], rs.df[, 2:3])
names(m3) <- c("METRICS",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_REF_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV", "DISTURBANCE")
}
m3$SENSITIVITY <- (rowSums(m3[, c("SENSITIVITY_REF_MIN",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV")]) + m3$SENSITIVITY_REF_SEV) / 4
return(m3)
}
#==============================================================================
#'Range and Variability Test
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@return Tests that the range of the reference condition is not too low and
#'that variability is not too high.
#'@export
range_variability <- function(metrics.df){
if("NO_MATCH" %in% names(metrics.df)){
metrics.df <- metrics.df[, !(names(metrics.df) %in% "NO_MATCH")]
}
if("EFFECTIVE_RICH_SIMPSON" %in% names(metrics.df)){
metrics.df <- metrics.df[, !(names(metrics.df) %in% "EFFECTIVE_RICH_SIMPSON")]
}
ref <- metrics.df[metrics.df$CATEGORY %in% "REF", ]
if("PIELOU" %in% names(ref)){
ref$PIELOU <- ref$PIELOU * 100
}
if("HURLBERTS_PIE" %in% names(ref)){
ref$HURLBERTS_PIE <- ref$HURLBERTS_PIE * 100
}
if("SIMPSONS" %in% names(ref)){
ref$SIMPSONS <- ref$SIMPSONS * 100
}
if(ncol(ref) > 7){
df <- data.frame(METRICS = names(ref[, 7:ncol(ref)]))
#df <- merge(df, sensitivity.df[, c("METRICS", "DISTURBANCE")], by = "METRICS", all.x = TRUE, sort = FALSE)
df$MIN <- apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.05, na.rm = TRUE))
df$MAX <- apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.95, na.rm = TRUE))
}
if(ncol(ref) == 7){
df <- data.frame(METRICS = names(ref)[7])
df$MIN <- quantile(ref[, 7], probs = 0.05, na.rm = TRUE)
df$MAX <- quantile(ref[, 7], probs = 0.95, na.rm = TRUE)
}
df$DIFF <- abs(df$MIN - df$MAX)
pct.m <- paste(c("PCT", "PIELOU", "GOLD", "SIMPSON", "HURLBERT"), collapse = "|")
rich.m <- paste(c("RICH", "BECK"), collapse = "|")
div.m <- paste(c("SHANNON", "MENHINICKS", "MARGALEFS"), collapse = "|")
tol.m <- paste(c("HBI", "ASPT"), collapse = "|")
df$RANGE <- ifelse(grepl(pct.m, df$METRIC) & df$DIFF <= 10, "LOW",
ifelse(grepl(pct.m, df$METRIC) & df$DIFF > 10, "HIGH",
ifelse(grepl(div.m, df$METRIC) & df$DIFF < 1, "LOW",
ifelse(grepl(div.m, df$METRIC) & df$DIFF >= 1, "HIGH",
ifelse(grepl(tol.m, df$METRIC) & df$DIFF < 2, "LOW",
ifelse(grepl(tol.m, df$METRIC) & df$DIFF >= 2, "HIGH",
ifelse(grepl(rich.m, df$METRIC) & df$DIFF < 3, "LOW",
ifelse(grepl(rich.m, df$METRIC) & df$DIFF >= 3, "HIGH",
ifelse(!grepl(pct.m, df$METRICS) & !grepl(rich.m, df$METRICS) &
!grepl(tol.m, df$METRICS) & !grepl(div.m, df$METRICS), "Not Measured", "ERROR")))))))))
if(ncol(ref) > 7){
df$Q25 <- round(apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.25, na.rm = TRUE)), 0)
df$Q75 <- round(apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.75, na.rm = TRUE)), 0)
}
if(ncol(ref) == 7){
df$Q25 <- round(quantile(ref[, 7], probs = 0.25, na.rm = TRUE), 0)
df$Q75 <- round(quantile(ref[, 7], probs = 0.75, na.rm = TRUE), 0)
}
df$Q_DIFF <- df$Q75 - df$Q25
#df$VARIABILITY <- ifelse((df$Q_DIFF) == 0, "LOW",
# ifelse((df$Q_DIFF / df$Q25) > 1, "HIGH",
# ifelse((df$Q_DIFF / df$Q25) <= 1, "LOW", "ERROR")))
df$VARIABILITY <- ifelse((df$Q_DIFF) == 0, "LOW",
ifelse((df$Q_DIFF / df$Q25) > 3, "HIGH",
ifelse((df$Q_DIFF / df$Q25) <= 3, "LOW", "ERROR")))
return(df)
}
#==============================================================================
#'Summary of Metric Tests
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param bioregion = the bioregion to perform the analysis.
#'@return Summarizes multiple metric tests into a single table.
#'@export
metrics_summary <- function(metrics.df, bioregion, de.method = "CMA"){
metrics.df <- metrics.df[metrics.df$ABUNDANCE >= 70, ]
metrics.df <- metrics.df[metrics.df$BIOREGION %in% bioregion, ]
metrics.df <- metrics.df[, !names(metrics.df) %in% "BIOREGION"]
#pair.cma <- unique(pairwise_sensitivity(metrics.df, de.method))
#names(pair.cma)[names(pair.cma) %in% "SENSITIVITY"] <- "PAIRWISE_CMA"
bi.cma <- unique(chunk_sensitivity(metrics.df, "REF", "SEV", de.method))
names(bi.cma) <- c("METRICS", "DISTURBANCE", "BINARY_CMA",
"PRECENTILE_BINARY_CMA",
"PCT_REF_BI_CMA", "PCT_DEG_BI_CMA",
"REF_MEDIAN", "THRESHOLD_BI_CMA", "BOUND_BI_CMA")
bi.de <- unique(chunk_sensitivity(metrics.df, "REF", "SEV", "DE"))
names(bi.de) <- c("METRICS", "DISTURBANCE", "BINARY_DE")
bi_barbour <- unique(chunk_sensitivity(metrics.df, "REF", "SEV", "BARBOUR"))
names(bi_barbour) <- c("METRICS", "DISTURBANCE", "BINARY_BARBOUR")
range.var <- unique(range_variability(metrics.df))
names(range.var) <- c("METRICS", "REF_MIN", "REF_MAX", "REF_RANGE_VALUE",
"REF_RANGE_CLASS", "REF_Q25", "REF_Q75",
"REF_VARIABILITY_VALUE", "REF_VARIABILITY_CLASS")
zero.inflate <- zero_inflate(metrics.df, bi_barbour)
final.df <- plyr::join_all(list(#pair.cma,
bi.cma[, c(1, 3:9)],
bi.de[, c(1, 3)],
bi_barbour[, c(1, 3)],
range.var, zero.inflate), "METRICS")
final.df$QUALITY <- ifelse(#final.df$SENSITIVITY >= 70 &
final.df$BINARY_CMA >= 70 &
final.df$BINARY_BARBOUR >= 2 &
final.df$REF_RANGE_CLASS %in% "HIGH" &
final.df$REF_VARIABILITY_CLASS %in% "LOW" &
final.df$ZERO_INFLATE %in% "GOOD", "HIGH",
ifelse(#final.df$SENSITIVITY >= 70 &
final.df$BINARY_CMA >= 70 &
final.df$BINARY_BARBOUR >= 2 &
final.df$REF_RANGE_CLASS %in% "HIGH" &
final.df$REF_VARIABILITY_CLASS %in% "LOW" &
final.df$ZERO_INFLATE %in% "REVIEW", "REVIEW", "POOR"))
final.df <- final.df[!final.df$METRICS %in% "EFFECTIVE_RICH_SIMPSON", ]
return(final.df)
}
#==============================================================================
#'Zero Inflation Test
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param bi.barbour = a data frame created within another function and used for
#'the calculated disturbance value.
#'@return Tests the influence of zeros on the results.
#'@export
zero_inflate <- function(metrics.df, bi.barbour){
ref.df <- metrics.df[metrics.df$CATEGORY %in% "REF", ]
if(ncol(ref.df) > 7){
ref.df <- ref.df[, c(names(ref.df[, 1:6]), sort(names(ref.df[, 7:ncol(ref.df)])))]
}
deg.df <- metrics.df[metrics.df$CATEGORY %in% "SEV", ]
if(ncol(deg.df) > 7){
deg.df <- deg.df[, c(names(deg.df[, 1:6]), sort(names(deg.df[, 7:ncol(deg.df)])))]
}
barb <- bi.barbour[, c("METRICS", "DISTURBANCE")]
new.df <- data.frame(METRICS = names(metrics.df[, 7:ncol(metrics.df)]))
new.df <- merge(new.df , barb, by = "METRICS")
if(ncol(ref.df) > 7){
new.df$PCT_0_REF <- apply(ref.df[, 7:ncol(ref.df)], 2, function(x){
round((sum(x == 0) / length(x)) * 100, 0)
})
new.df$PCT_0_DEG <- apply(deg.df[, 7:ncol(deg.df)], 2, function(x){
round((sum(x == 0) / length(x)) * 100, 0)
})
}
if(ncol(ref.df) == 7){
new.df$PCT_0_REF <- round((sum(ref.df[, 7] == 0) / length(ref.df[, 7])) * 100, 0)
new.df$PCT_0_DEG <- round((sum(deg.df[, 7] == 0) / length(deg.df[, 7])) * 100, 0)
}
new.df$ZERO_INFLATE <- ifelse(new.df$PCT_0_REF > 10 & new.df$PCT_0_REF <= 50 &
new.df$PCT_0_DEG > 10 & new.df$PCT_0_DEG <= 50, "REVIEW",
ifelse(new.df$PCT_0_REF > 10 & new.df$PCT_0_REF <= 50 & new.df$PCT_0_DEG > 50, "REVIEW",
ifelse(new.df$PCT_0_REF > 50 & new.df$PCT_0_DEG > 10 & new.df$PCT_0_DEG <= 50, "REVIEW",
ifelse(new.df$PCT_0_REF > 50 & new.df$PCT_0_DEG > 50, "POOR",
ifelse(new.df$PCT_0_REF <= 10 & new.df$PCT_0_DEG <= 10, "GOOD",
ifelse(new.df$DISTURBANCE %in% "DECREASE" &
new.df$PCT_0_REF > 10 & new.df$PCT_0_DEG <= 10, "POOR",
ifelse(new.df$DISTURBANCE %in% "DECREASE" &
new.df$PCT_0_REF <= 10 & new.df$PCT_0_DEG > 10, "GOOD",
ifelse(new.df$DISTURBANCE %in% "INCREASE" &
new.df$PCT_0_REF > 10 & new.df$PCT_0_DEG <= 10, "GOOD",
ifelse(new.df$DISTURBANCE %in% "INCREASE" &
new.df$PCT_0_REF <= 10 & new.df$PCT_0_DEG > 10, "POOR", "ERROR")))))))))
return(new.df)
} | /R/metric_sensitivity.R | no_license | InterstateCommissionPotomacRiverBasin/BIBI | R | false | false | 26,794 | r | #==============================================================================
#Metric Sensitivity
#==============================================================================
#'Metric Sensitivity
#'
#'@param metrics.df = data frame of metric values for each station
#'@param upper.class = The site classification that represents better
#'environmental conditions.
#'@param lower.class = The site classification that represents the degraded
#'environmental conditions.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'reference and degraded stations.
#'@export
#'
sensitivity <- function(metrics.df, upper.class, lower.class, method = "ODE"){
if("PCT_UNIDENTIFIED" %in% names(metrics.df)){
metrics.df <- metrics.df[, !grepl("PCT_UNIDENTIFIED", names(metrics.df))]
}
if(any(grepl(".y", names(metrics.df)))){
metrics.df <- metrics.df[, !grepl(".y", names(metrics.df))]
}
if(any(grepl(".x", names(metrics.df)))){
names(metrics.df) <- gsub(".x", "", names(metrics.df))
}
#Create new data frames specific for Degraded and Reference sites
deg.df <- metrics.df[metrics.df$CATEGORY %in% lower.class, ]
ref.df <- metrics.df[metrics.df$CATEGORY %in% upper.class, ]
if(method == "BARBOUR"){
final.df <- barbour(metrics.df, ref.df, deg.df)
}
#Calculate the median values for the reference and degraded distributions.
if(length(ref.df) < 8){
ref_50 <- quantile(ref.df[, 7], 0.50, na.rm = TRUE)
deg_50 <- quantile(deg.df[, 7], 0.50, na.rm = TRUE)
#Provide the each reference percentile value for each metric.
quant.ref <- data.frame(quantile(ref.df[, 7], probs = seq(0, 1, by = 0.01), na.rm = TRUE))
colnames(quant.ref) <- colnames(ref.df)[7]
#Create a column listing all of the metrics and join the reference percentile values
quant.df <- cbind(data.frame(colnames(metrics.df[7])), t(quant.ref))
names(quant.df)[1] <- "METRICS" #Rename column 1
#quant.df$DISTURBANCE <- ifelse(ref_50 > deg_50, "DECREASE",
# ifelse(ref_50 < deg_50, "INCREASE", "EQUAL"))
#quant.df <- quant.df[!(quant.df$DISTURBANCE %in% "EQUAL"), ]
#quant.df <- quant.df[rowSums(quant.df[, 2:102]) > 0, ]
#Create new data frames specific for Degraded and Reference sites
severe.df <- metrics.df[metrics.df$CATEGORY %in% lower.class, ]
reference.df <- metrics.df[metrics.df$CATEGORY %in% upper.class, ]
#Calculate the median values for the reference and degraded distributions.
reference_50 <- quantile(reference.df[, 7], 0.50, na.rm = TRUE)
severe_50 <- quantile(severe.df[, 7], 0.50, na.rm = TRUE)
#Insert a column to suggest how the metric reacts to disturbance. If the reference median
# is greater than the degraded median, the metric decreases with distrubance. If the reference
# median is less than the degraded median, the metric increases with disturbance. If the
# medians are equal, equal is return to indicate that this metric shows no distinction between
# reference and degraded contions.
quant.df$DISTURBANCE <- ifelse(reference_50 > severe_50, "DECREASE",
ifelse(reference_50 < severe_50, "INCREASE", "EQUAL"))
}
if(length(ref.df) >= 8){
ref_50 <- sapply(ref.df[, 7:ncol(ref.df)], quantile, 0.50, na.rm = TRUE)
deg_50 <- sapply(deg.df[, 7:ncol(deg.df)], quantile, 0.50, na.rm = TRUE)
#Provide the each reference percentile value for each metric.
quant.ref <- data.frame(apply(ref.df[, 7:ncol(ref.df)], 2, function(x){
quantile(x, probs = seq(0, 1, by = 0.01), na.rm = TRUE)
} ))
#Create a column listing all of the metrics and join the reference percentile values
quant.df <- cbind(data.frame(colnames(metrics.df[7:ncol(metrics.df)])), t(quant.ref))
names(quant.df)[1] <- "METRICS" #Rename column 1
#quant.df$DISTURBANCE <- ifelse(ref_50 > deg_50, "DECREASE",
# ifelse(ref_50 < deg_50, "INCREASE", "EQUAL"))
#quant.df <- quant.df[!(quant.df$DISTURBANCE %in% "EQUAL"), ]
#quant.df <- quant.df[rowSums(quant.df[, 2:102]) > 0, ]
#Create new data frames specific for Degraded and Reference sites
severe.df <- metrics.df[metrics.df$CATEGORY == lower.class, ]
reference.df <- metrics.df[metrics.df$CATEGORY == upper.class, ]
#Calculate the median values for the reference and degraded distributions.
reference_50 <- sapply(reference.df[, 7:ncol(reference.df)], quantile, 0.50, na.rm = TRUE)
severe_50 <- sapply(severe.df[, 7:ncol(severe.df)], quantile, 0.50, na.rm = TRUE)
#Insert a column to suggest how the metric reacts to disturbance. If the reference median
# is greater than the degraded median, the metric decreases with distrubance. If the reference
# median is less than the degraded median, the metric increases with disturbance. If the
# medians are equal, equal is return to indicate that this metric shows no distinction between
# reference and degraded contions.
quant.df$DISTURBANCE <- ifelse(reference_50 > severe_50, "DECREASE",
ifelse(reference_50 < severe_50, "INCREASE", "EQUAL"))
}
if(method == "DE"){
final.df <- d_e(deg.df, quant.df)
}
if(method == "ODE"){
final.df <- ode(metrics.df, quant.df, upper.class, lower.class,
ref.df, quant.ref)
}
if(method == "CMA"){
final.df <- cma(metrics.df, quant.df, upper.class, lower.class,
ref.df, quant.ref)
}
if(method == "SSE"){
final.df <- sse(metrics.df, quant.df, upper.class, lower.class,
ref.df, quant.ref)
}
return(final.df)
}
#==============================================================================
#'
#'Chunk Sensitivity
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param upper.class = the site class that represents the better condition.
#'@param lower.class = the site class that represents the poorer condition.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
chunk_sensitivity <- function(metrics.df, upper.class = "REF", lower.class = "SEV", method){
metrics.list <- break.me(metrics.df, 100, 6)
#============================================================================
datalist = list()
for(j in 1:length(metrics.list)){
sub.metrics <- metrics.list[[j]]
de.thresh <- sensitivity(sub.metrics, upper.class, lower.class, method)
datalist[[j]] <- de.thresh
}
#============================================================================
final.df <- do.call(rbind, datalist)
return(final.df)
}
#==============================================================================
#'Pairwise Sensitivity
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
pairwise_sensitivity <- function(metrics.df, method){
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "NEAR", "MIN", method)
rm.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
mm.df <- chunk_sensitivity(metrics.df, "MIN", "MOD", method)
ms.df <- chunk_sensitivity(metrics.df, "MOD", "SEV", method)
rs.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
#rs.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
#mm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
#ms.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
if(method %in% c("ODE", "SSE")){
#rn.df <- rn.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR", "THRESH_REF_NEAR")
#nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN", "THRESH_NEAR_MIN")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MIN", "THRESH_REF_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD", "THRESH_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV", "THRESH_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "THRESHOLD",
"DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV",
"THRESH_REF_SEV", "DISTURBANCE")
}
if(method %in% c("DE", "BARBOUR")){
rn.df <- rn.df[, c("METRICS", "SENSITIVITY")]
names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR")
nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY")]
names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV", "DISTURBANCE")
}
if(method %in% c("ODE", "SSE", "CMA")){
m3 <- cbind(rm.df,
#rn.df, nmin.df[, c(2,3)],
mm.df[, c(2,3)],
ms.df[, c(2,3)], rs.df[, 2:4])
}
if(method %in% c("DE", "BARBOUR")){
m3 <- cbind(rn.df, nmin.df[, c(2)], mm.df[, c(2)], ms.df[, c(2)], rs.df[, 2:3])
names(m3) <- c("METRICS",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_REF_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV", "DISTURBANCE")
}
m3$SENSITIVITY <- (rowSums(m3[, c("SENSITIVITY_REF_MIN",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV")])) / 4
return(m3)
}
#==============================================================================
#'Pairwise Sensitivity 2
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
pairwise_sensitivity2 <- function(metrics.df, method){
ns.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
rm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
rs.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
#rs.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
#mm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
#ms.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
if(method %in% c("ODE", "SSE")){
ns.df <- ns.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(ns.df) <- c("METRICS", "SENSITIVITY_NEAR_SEV", "THRESH_NEAR_SEV")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MOD", "THRESH_REF_MOD")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "THRESHOLD",
"DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV",
"THRESH_REF_SEV", "DISTURBANCE")
}
if(method %in% c("DE", "BARBOUR")){
ns.df <- ns.df[, c("METRICS", "SENSITIVITY")]
names(ns.df) <- c("METRICS", "SENSITIVITY_NEAR_SEV")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MOD")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV", "DISTURBANCE")
}
if(method %in% c("ODE", "SSE")){
m3 <- cbind(ns.df, rm.df[, c(2,3)], rs.df[, 2:4])
}
if(method %in% c("DE", "BARBOUR")){
m3 <- cbind(rs.df, rm.df[, c(2)], rs.df[, 2:3])
names(m3) <- c("METRICS", "SENSITIVITY_NEAR_SEV", "SENSITIVITY_REF_MOD",
"SENSITIVITY_REF_SEV", "DISTURBANCE")
}
m3$SENSITIVITY <- rowSums(m3[, c("SENSITIVITY_NEAR_SEV", "SENSITIVITY_REF_MOD",
"SENSITIVITY_REF_SEV")]) / 3
return(m3)
}
#==============================================================================
#'Pairwise Sensitivity 3
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param method = the sensitivity function to be used during the assessment.
#'@return Determines the threshold at which a metric best categorizes
#'two defined environmental conditions.
#'@export
pairwise_sensitivity3 <- function(metrics.df, method){
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "NEAR", "MIN", method)
rm.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
mm.df <- chunk_sensitivity(metrics.df, "MIN", "MOD", method)
ms.df <- chunk_sensitivity(metrics.df, "MOD", "SEV", method)
rs.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
#rs.df <- chunk_sensitivity(metrics.df, "NEAR", "SEV", method)
#rn.df <- chunk_sensitivity(metrics.df, "REF", "NEAR", method)
#nmin.df <- chunk_sensitivity(metrics.df, "REF", "MIN", method)
#mm.df <- chunk_sensitivity(metrics.df, "REF", "MOD", method)
#ms.df <- chunk_sensitivity(metrics.df, "REF", "SEV", method)
if(method %in% c("ODE", "SSE")){
#rn.df <- rn.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR", "THRESH_REF_NEAR")
#nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
#names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN", "THRESH_NEAR_MIN")
rm.df <- rm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(rm.df) <- c("METRICS", "SENSITIVITY_REF_MIN", "THRESH_REF_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD", "THRESH_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY", "THRESHOLD")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV", "THRESH_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "THRESHOLD",
"DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV",
"THRESH_REF_SEV", "DISTURBANCE")
}
if(method %in% c("DE", "BARBOUR")){
rn.df <- rn.df[, c("METRICS", "SENSITIVITY")]
names(rn.df) <- c("METRICS", "SENSITIVITY_REF_NEAR")
nmin.df <- nmin.df[, c("METRICS", "SENSITIVITY")]
names(nmin.df) <- c("METRICS", "SENSITIVITY_NEAR_MIN")
mm.df <- mm.df[, c("METRICS", "SENSITIVITY")]
names(mm.df) <- c("METRICS", "SENSITIVITY_MIN_MOD")
ms.df <- ms.df[, c("METRICS", "SENSITIVITY")]
names(ms.df) <- c("METRICS", "SENSITIVITY_MOD_SEV")
rs.df <- rs.df[,c("METRICS", "SENSITIVITY", "DISTURBANCE")]
names(rs.df) <- c("METRICS", "SENSITIVITY_REF_SEV", "DISTURBANCE")
}
if(method %in% c("ODE", "SSE")){
m3 <- cbind(rm.df,
#rn.df, nmin.df[, c(2,3)],
mm.df[, c(2,3)],
ms.df[, c(2,3)], rs.df[, 2:4])
}
if(method %in% c("DE", "BARBOUR")){
m3 <- cbind(rn.df, nmin.df[, c(2)], mm.df[, c(2)], ms.df[, c(2)], rs.df[, 2:3])
names(m3) <- c("METRICS",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_REF_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV", "DISTURBANCE")
}
m3$SENSITIVITY <- (rowSums(m3[, c("SENSITIVITY_REF_MIN",
#"SENSITIVITY_REF_NEAR", "SENSITIVITY_NEAR_MIN",
"SENSITIVITY_MIN_MOD","SENSITIVITY_MOD_SEV",
"SENSITIVITY_REF_SEV")]) + m3$SENSITIVITY_REF_SEV) / 4
return(m3)
}
#==============================================================================
#'Range and Variability Test
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@return Tests that the range of the reference condition is not too low and
#'that variability is not too high.
#'@export
range_variability <- function(metrics.df){
if("NO_MATCH" %in% names(metrics.df)){
metrics.df <- metrics.df[, !(names(metrics.df) %in% "NO_MATCH")]
}
if("EFFECTIVE_RICH_SIMPSON" %in% names(metrics.df)){
metrics.df <- metrics.df[, !(names(metrics.df) %in% "EFFECTIVE_RICH_SIMPSON")]
}
ref <- metrics.df[metrics.df$CATEGORY %in% "REF", ]
if("PIELOU" %in% names(ref)){
ref$PIELOU <- ref$PIELOU * 100
}
if("HURLBERTS_PIE" %in% names(ref)){
ref$HURLBERTS_PIE <- ref$HURLBERTS_PIE * 100
}
if("SIMPSONS" %in% names(ref)){
ref$SIMPSONS <- ref$SIMPSONS * 100
}
if(ncol(ref) > 7){
df <- data.frame(METRICS = names(ref[, 7:ncol(ref)]))
#df <- merge(df, sensitivity.df[, c("METRICS", "DISTURBANCE")], by = "METRICS", all.x = TRUE, sort = FALSE)
df$MIN <- apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.05, na.rm = TRUE))
df$MAX <- apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.95, na.rm = TRUE))
}
if(ncol(ref) == 7){
df <- data.frame(METRICS = names(ref)[7])
df$MIN <- quantile(ref[, 7], probs = 0.05, na.rm = TRUE)
df$MAX <- quantile(ref[, 7], probs = 0.95, na.rm = TRUE)
}
df$DIFF <- abs(df$MIN - df$MAX)
pct.m <- paste(c("PCT", "PIELOU", "GOLD", "SIMPSON", "HURLBERT"), collapse = "|")
rich.m <- paste(c("RICH", "BECK"), collapse = "|")
div.m <- paste(c("SHANNON", "MENHINICKS", "MARGALEFS"), collapse = "|")
tol.m <- paste(c("HBI", "ASPT"), collapse = "|")
df$RANGE <- ifelse(grepl(pct.m, df$METRIC) & df$DIFF <= 10, "LOW",
ifelse(grepl(pct.m, df$METRIC) & df$DIFF > 10, "HIGH",
ifelse(grepl(div.m, df$METRIC) & df$DIFF < 1, "LOW",
ifelse(grepl(div.m, df$METRIC) & df$DIFF >= 1, "HIGH",
ifelse(grepl(tol.m, df$METRIC) & df$DIFF < 2, "LOW",
ifelse(grepl(tol.m, df$METRIC) & df$DIFF >= 2, "HIGH",
ifelse(grepl(rich.m, df$METRIC) & df$DIFF < 3, "LOW",
ifelse(grepl(rich.m, df$METRIC) & df$DIFF >= 3, "HIGH",
ifelse(!grepl(pct.m, df$METRICS) & !grepl(rich.m, df$METRICS) &
!grepl(tol.m, df$METRICS) & !grepl(div.m, df$METRICS), "Not Measured", "ERROR")))))))))
if(ncol(ref) > 7){
df$Q25 <- round(apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.25, na.rm = TRUE)), 0)
df$Q75 <- round(apply(ref[, 7:ncol(ref)], 2, function(x) quantile(x, probs = 0.75, na.rm = TRUE)), 0)
}
if(ncol(ref) == 7){
df$Q25 <- round(quantile(ref[, 7], probs = 0.25, na.rm = TRUE), 0)
df$Q75 <- round(quantile(ref[, 7], probs = 0.75, na.rm = TRUE), 0)
}
df$Q_DIFF <- df$Q75 - df$Q25
#df$VARIABILITY <- ifelse((df$Q_DIFF) == 0, "LOW",
# ifelse((df$Q_DIFF / df$Q25) > 1, "HIGH",
# ifelse((df$Q_DIFF / df$Q25) <= 1, "LOW", "ERROR")))
df$VARIABILITY <- ifelse((df$Q_DIFF) == 0, "LOW",
ifelse((df$Q_DIFF / df$Q25) > 3, "HIGH",
ifelse((df$Q_DIFF / df$Q25) <= 3, "LOW", "ERROR")))
return(df)
}
#==============================================================================
#'Summary of Metric Tests
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param bioregion = the bioregion to perform the analysis.
#'@return Summarizes multiple metric tests into a single table.
#'@export
metrics_summary <- function(metrics.df, bioregion, de.method = "CMA"){
metrics.df <- metrics.df[metrics.df$ABUNDANCE >= 70, ]
metrics.df <- metrics.df[metrics.df$BIOREGION %in% bioregion, ]
metrics.df <- metrics.df[, !names(metrics.df) %in% "BIOREGION"]
#pair.cma <- unique(pairwise_sensitivity(metrics.df, de.method))
#names(pair.cma)[names(pair.cma) %in% "SENSITIVITY"] <- "PAIRWISE_CMA"
bi.cma <- unique(chunk_sensitivity(metrics.df, "REF", "SEV", de.method))
names(bi.cma) <- c("METRICS", "DISTURBANCE", "BINARY_CMA",
"PRECENTILE_BINARY_CMA",
"PCT_REF_BI_CMA", "PCT_DEG_BI_CMA",
"REF_MEDIAN", "THRESHOLD_BI_CMA", "BOUND_BI_CMA")
bi.de <- unique(chunk_sensitivity(metrics.df, "REF", "SEV", "DE"))
names(bi.de) <- c("METRICS", "DISTURBANCE", "BINARY_DE")
bi_barbour <- unique(chunk_sensitivity(metrics.df, "REF", "SEV", "BARBOUR"))
names(bi_barbour) <- c("METRICS", "DISTURBANCE", "BINARY_BARBOUR")
range.var <- unique(range_variability(metrics.df))
names(range.var) <- c("METRICS", "REF_MIN", "REF_MAX", "REF_RANGE_VALUE",
"REF_RANGE_CLASS", "REF_Q25", "REF_Q75",
"REF_VARIABILITY_VALUE", "REF_VARIABILITY_CLASS")
zero.inflate <- zero_inflate(metrics.df, bi_barbour)
final.df <- plyr::join_all(list(#pair.cma,
bi.cma[, c(1, 3:9)],
bi.de[, c(1, 3)],
bi_barbour[, c(1, 3)],
range.var, zero.inflate), "METRICS")
final.df$QUALITY <- ifelse(#final.df$SENSITIVITY >= 70 &
final.df$BINARY_CMA >= 70 &
final.df$BINARY_BARBOUR >= 2 &
final.df$REF_RANGE_CLASS %in% "HIGH" &
final.df$REF_VARIABILITY_CLASS %in% "LOW" &
final.df$ZERO_INFLATE %in% "GOOD", "HIGH",
ifelse(#final.df$SENSITIVITY >= 70 &
final.df$BINARY_CMA >= 70 &
final.df$BINARY_BARBOUR >= 2 &
final.df$REF_RANGE_CLASS %in% "HIGH" &
final.df$REF_VARIABILITY_CLASS %in% "LOW" &
final.df$ZERO_INFLATE %in% "REVIEW", "REVIEW", "POOR"))
final.df <- final.df[!final.df$METRICS %in% "EFFECTIVE_RICH_SIMPSON", ]
return(final.df)
}
#==============================================================================
#'Zero Inflation Test
#'
#'@param metrics.df = data frame of metric values for each station with site
#'a column of site classes defined by environmental variables.
#'@param bi.barbour = a data frame created within another function and used for
#'the calculated disturbance value.
#'@return Tests the influence of zeros on the results.
#'@export
zero_inflate <- function(metrics.df, bi.barbour){
ref.df <- metrics.df[metrics.df$CATEGORY %in% "REF", ]
if(ncol(ref.df) > 7){
ref.df <- ref.df[, c(names(ref.df[, 1:6]), sort(names(ref.df[, 7:ncol(ref.df)])))]
}
deg.df <- metrics.df[metrics.df$CATEGORY %in% "SEV", ]
if(ncol(deg.df) > 7){
deg.df <- deg.df[, c(names(deg.df[, 1:6]), sort(names(deg.df[, 7:ncol(deg.df)])))]
}
barb <- bi.barbour[, c("METRICS", "DISTURBANCE")]
new.df <- data.frame(METRICS = names(metrics.df[, 7:ncol(metrics.df)]))
new.df <- merge(new.df , barb, by = "METRICS")
if(ncol(ref.df) > 7){
new.df$PCT_0_REF <- apply(ref.df[, 7:ncol(ref.df)], 2, function(x){
round((sum(x == 0) / length(x)) * 100, 0)
})
new.df$PCT_0_DEG <- apply(deg.df[, 7:ncol(deg.df)], 2, function(x){
round((sum(x == 0) / length(x)) * 100, 0)
})
}
if(ncol(ref.df) == 7){
new.df$PCT_0_REF <- round((sum(ref.df[, 7] == 0) / length(ref.df[, 7])) * 100, 0)
new.df$PCT_0_DEG <- round((sum(deg.df[, 7] == 0) / length(deg.df[, 7])) * 100, 0)
}
new.df$ZERO_INFLATE <- ifelse(new.df$PCT_0_REF > 10 & new.df$PCT_0_REF <= 50 &
new.df$PCT_0_DEG > 10 & new.df$PCT_0_DEG <= 50, "REVIEW",
ifelse(new.df$PCT_0_REF > 10 & new.df$PCT_0_REF <= 50 & new.df$PCT_0_DEG > 50, "REVIEW",
ifelse(new.df$PCT_0_REF > 50 & new.df$PCT_0_DEG > 10 & new.df$PCT_0_DEG <= 50, "REVIEW",
ifelse(new.df$PCT_0_REF > 50 & new.df$PCT_0_DEG > 50, "POOR",
ifelse(new.df$PCT_0_REF <= 10 & new.df$PCT_0_DEG <= 10, "GOOD",
ifelse(new.df$DISTURBANCE %in% "DECREASE" &
new.df$PCT_0_REF > 10 & new.df$PCT_0_DEG <= 10, "POOR",
ifelse(new.df$DISTURBANCE %in% "DECREASE" &
new.df$PCT_0_REF <= 10 & new.df$PCT_0_DEG > 10, "GOOD",
ifelse(new.df$DISTURBANCE %in% "INCREASE" &
new.df$PCT_0_REF > 10 & new.df$PCT_0_DEG <= 10, "GOOD",
ifelse(new.df$DISTURBANCE %in% "INCREASE" &
new.df$PCT_0_REF <= 10 & new.df$PCT_0_DEG > 10, "POOR", "ERROR")))))))))
return(new.df)
} |
library(MCMC4Extremes)
### Name: barcelos
### Title: 30-day maxima rainfall at Barcelos Station
### Aliases: barcelos
### Keywords: datasets
### ** Examples
data(barcelos)
hist(barcelos, main=NULL)
| /data/genthat_extracted_code/MCMC4Extremes/examples/barcelos.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 205 | r | library(MCMC4Extremes)
### Name: barcelos
### Title: 30-day maxima rainfall at Barcelos Station
### Aliases: barcelos
### Keywords: datasets
### ** Examples
data(barcelos)
hist(barcelos, main=NULL)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize_pbp_data.R
\name{compute_turnover_rate}
\alias{compute_turnover_rate}
\title{Compute the turnover rate for a given period of pbp time}
\usage{
compute_turnover_rate(dat)
}
\arguments{
\item{dat}{pbp data}
}
\value{
turnover rate for the given chunk of pbp data
}
\description{
Compute the turnover rate for a given period of pbp time
}
\seealso{
Other summarize_pbp_data:
\code{\link{compute_average_plus_minus}()},
\code{\link{compute_defensive_free_throw_rate}()},
\code{\link{compute_defensive_rebound_rate}()},
\code{\link{compute_defensive_turnover_rate}()},
\code{\link{compute_effective_defensive_fgp}()},
\code{\link{compute_effective_fgp}()},
\code{\link{compute_free_throw_rate}()},
\code{\link{compute_plus_minus}()},
\code{\link{compute_rebound_rate}()},
\code{\link{estimate_team_possessions_basic}()},
\code{\link{estimate_team_possessions_custom}()}
}
\concept{summarize_pbp_data}
| /man/compute_turnover_rate.Rd | no_license | kburnham/tidynbadata | R | false | true | 985 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarize_pbp_data.R
\name{compute_turnover_rate}
\alias{compute_turnover_rate}
\title{Compute the turnover rate for a given period of pbp time}
\usage{
compute_turnover_rate(dat)
}
\arguments{
\item{dat}{pbp data}
}
\value{
turnover rate for the given chunk of pbp data
}
\description{
Compute the turnover rate for a given period of pbp time
}
\seealso{
Other summarize_pbp_data:
\code{\link{compute_average_plus_minus}()},
\code{\link{compute_defensive_free_throw_rate}()},
\code{\link{compute_defensive_rebound_rate}()},
\code{\link{compute_defensive_turnover_rate}()},
\code{\link{compute_effective_defensive_fgp}()},
\code{\link{compute_effective_fgp}()},
\code{\link{compute_free_throw_rate}()},
\code{\link{compute_plus_minus}()},
\code{\link{compute_rebound_rate}()},
\code{\link{estimate_team_possessions_basic}()},
\code{\link{estimate_team_possessions_custom}()}
}
\concept{summarize_pbp_data}
|
library(pheatmap)
library(ggplot)
library(magrittr)
library(dplyr)
library(reshape2)
library(readr)
library(OneR)
library(microbenchmark)
#path_to_pais <- "/Users/maxcummins/Dropbox/Doctorate/Manuscripts/Salmonella_AMR/SG17-135/analysis/abricate/abricate_PAIs.txt"
path_to_pais <- "/Users/maxcummins/Dropbox/Doctorate/Manuscripts/Salmonella_AMR/Submission_2-mSphere/SG17-135/analysis/abricate/abricate_PAIs_CT18.txt"
#Read in the abricate genotype data sheet (small number of rows for colname reassignment)
pais_df <-
read_delim(
path_to_pais,
"\t",
escape_double = FALSE,
trim_ws = TRUE,
n_max = 10
)
#Colname reassignment
colnames(pais_df)[c(1, 10:11)] <-
c("name", "perc_coverage", "perc_identity")
pais_df_colnames <- colnames(pais_df)
#Re-read in PAI abricate genotype data sheet
pais_df <-
read_delim(
path_to_pais,
"\t",
escape_double = FALSE,
trim_ws = TRUE,
col_names = FALSE,
skip = 1
)
#Remove cases where there are multiple headers from concatenation of abricate reports
pais_df <- pais_df %>% filter(X2 != "SEQUENCE")
#Colname reassignment
colnames(pais_df) <- pais_df_colnames
#Convert percent coverage and identity to numeric type to allow filtering
pais_df$perc_coverage <- as.numeric(pais_df$perc_coverage)
pais_df$perc_identity <- as.numeric(pais_df$perc_identity)
#Filter to perc_identity > 95%
#pais_df <-
pais_df <- pais_df %>% filter(perc_identity > 95)
#Trim excess characters the assembly names and reassign this to rownames
pais_df$name <- gsub("\\..*", "", pais_df$name)
#Replace "SAL_HC4750AA_AS" with SG17-135
pais_df$name <- gsub("SAL_HC4750AA_AS", "SG17-135", pais_df$name)
pais_df$newcov <- gsub("\\/.*","", pais_df$COVERAGE)
pais_df$length_gene <- gsub(".*\\/","", pais_df$COVERAGE)
pais_df$newcov <- gsub("-",":", pais_df$newcov)
new_df <- pais_df %>% group_by(name, GENE, length_gene) %>% filter(perc_coverage > 5) %>% summarise(start =paste(sort(unique(newcov)), collapse=","), end = paste(sort(unique(newcov)), collapse=",")) #%>% filter(grepl("SPI-1_", GENE))
#new_df <- pais_df %>% group_by(name, GENE, length_gene) %>% summarise(start =paste(sort(unique(newcov)), collapse=","), end = paste(sort(unique(newcov)), collapse=",")) #%>% filter(grepl("SPI-1_", GENE))
new_df$end <- gsub("[0-9]+:","", new_df$end)
new_df$start <- gsub(":[0-9]+","", new_df$start)
spl <-strsplit(as.character(new_df$start), ",")
start_coord <- data.frame(name = new_df$name, gene = new_df$GENE,
length_gene = new_df$length_gene,
chunk1 = sapply(spl, "[", 1),
chunk2 = sapply(spl, "[", 2),
chunk3 = sapply(spl, "[", 3),
chunk4= sapply(spl, "[", 4),
chunk5 = sapply(spl, "[", 5),
chunk6 = sapply(spl, "[", 6),
chunk7 = sapply(spl, "[", 7),
chunk8 = sapply(spl, "[", 8))
start_coord <- melt(start_coord, id=1:3, value.name = "start")
start_coord <- start_coord %>% select(-starts_with("variable"))
spl <-strsplit(as.character(new_df$end), ",")
end_coord <- data.frame(name = new_df$name, gene = new_df$GENE,
length_gene = new_df$length_gene,
chunk1 = sapply(spl, "[", 1),
chunk2 = sapply(spl, "[", 2),
chunk3 = sapply(spl, "[", 3),
chunk4= sapply(spl, "[", 4),
chunk5 = sapply(spl, "[", 5),
chunk6 = sapply(spl, "[", 6),
chunk7 = sapply(spl, "[", 7),
chunk8 = sapply(spl, "[", 8))
end_coord <- melt(end_coord, id=1:3, value.name = "end")
end_coord <- end_coord %>% select(-starts_with("variable"))
coords <- start_coord
coords$end <- end_coord$end
coords <- coords[complete.cases(coords),]
unique(coords$length_gene)
coords$start <- as.numeric(coords$start)
coords$end <- as.numeric(coords$end)
coords$length_gene <- as.numeric(levels(coords$length_gene))[coords$length_gene]
coords$percentage <- (((coords$end-coords$start)+1)/coords$length_gene)*100
test <- coords# %>% filter(name == "SAL_AB7542AA_AS", gene == "SPI-12_NC_006905_P4") %>% arrange(desc(end))
list_ <- vector(mode = "list", length = 0)
for(sample in unique(test$name)){
test2 <- test %>% filter(name == sample)
for(gene_ in unique(test$gene)){
test3 <- test2 %>% filter(gene == gene_)
length_of_gene <- test3$length_gene[1]
if(is.na(length_of_gene) == FALSE){
range_matrix <- rep(0, times = length_of_gene)
for(hit in 1:nrow(test3)){
start_ <- test3[hit,4]
end_ <- test3[hit,5]
range_matrix[start_:end_] <- 1
range_matrix[range_matrix > 1] <- 1
}
}
newline <- c(sample, gene_, round((sum(range_matrix)/length_of_gene)*100, digits = 3))
list_ <- append(list_,newline)
}
}
abc <- length(list_)/3
df <- data.frame(matrix(unlist(list_), nrow = abc, byrow=T), stringsAsFactors = F)
colnames(df) <- c("name","GENE","Coverage_percentage")
df$Coverage_percentage[is.na(df$Coverage_percentage)] <- 0
df$Coverage_percentage <- as.numeric(df$Coverage_percentage)
final_table <- dcast(df, name ~ GENE)
final_final_table <- final_table[1:nrow(final_table),2:ncol(final_table)]
final_final_table_2 <- final_final_table
final_final_table[final_final_table < 60] <- 0
final_final_table[final_final_table >= 60] <- 1
rownames(final_final_table) <- final_table$name
final_table <- final_final_table
write.csv(final_table, "analysis/PAIs_present_absent.csv")
pheatmap(final_final_table, fontsize_row = 2)
| /scripts/SPI-analysis.R | no_license | maxlcummins/SG17-135 | R | false | false | 5,999 | r | library(pheatmap)
library(ggplot)
library(magrittr)
library(dplyr)
library(reshape2)
library(readr)
library(OneR)
library(microbenchmark)
#path_to_pais <- "/Users/maxcummins/Dropbox/Doctorate/Manuscripts/Salmonella_AMR/SG17-135/analysis/abricate/abricate_PAIs.txt"
path_to_pais <- "/Users/maxcummins/Dropbox/Doctorate/Manuscripts/Salmonella_AMR/Submission_2-mSphere/SG17-135/analysis/abricate/abricate_PAIs_CT18.txt"
#Read in the abricate genotype data sheet (small number of rows for colname reassignment)
pais_df <-
read_delim(
path_to_pais,
"\t",
escape_double = FALSE,
trim_ws = TRUE,
n_max = 10
)
#Colname reassignment
colnames(pais_df)[c(1, 10:11)] <-
c("name", "perc_coverage", "perc_identity")
pais_df_colnames <- colnames(pais_df)
#Re-read in PAI abricate genotype data sheet
pais_df <-
read_delim(
path_to_pais,
"\t",
escape_double = FALSE,
trim_ws = TRUE,
col_names = FALSE,
skip = 1
)
#Remove cases where there are multiple headers from concatenation of abricate reports
pais_df <- pais_df %>% filter(X2 != "SEQUENCE")
#Colname reassignment
colnames(pais_df) <- pais_df_colnames
#Convert percent coverage and identity to numeric type to allow filtering
pais_df$perc_coverage <- as.numeric(pais_df$perc_coverage)
pais_df$perc_identity <- as.numeric(pais_df$perc_identity)
#Filter to perc_identity > 95%
#pais_df <-
pais_df <- pais_df %>% filter(perc_identity > 95)
#Trim excess characters the assembly names and reassign this to rownames
pais_df$name <- gsub("\\..*", "", pais_df$name)
#Replace "SAL_HC4750AA_AS" with SG17-135
pais_df$name <- gsub("SAL_HC4750AA_AS", "SG17-135", pais_df$name)
pais_df$newcov <- gsub("\\/.*","", pais_df$COVERAGE)
pais_df$length_gene <- gsub(".*\\/","", pais_df$COVERAGE)
pais_df$newcov <- gsub("-",":", pais_df$newcov)
new_df <- pais_df %>% group_by(name, GENE, length_gene) %>% filter(perc_coverage > 5) %>% summarise(start =paste(sort(unique(newcov)), collapse=","), end = paste(sort(unique(newcov)), collapse=",")) #%>% filter(grepl("SPI-1_", GENE))
#new_df <- pais_df %>% group_by(name, GENE, length_gene) %>% summarise(start =paste(sort(unique(newcov)), collapse=","), end = paste(sort(unique(newcov)), collapse=",")) #%>% filter(grepl("SPI-1_", GENE))
new_df$end <- gsub("[0-9]+:","", new_df$end)
new_df$start <- gsub(":[0-9]+","", new_df$start)
spl <-strsplit(as.character(new_df$start), ",")
start_coord <- data.frame(name = new_df$name, gene = new_df$GENE,
length_gene = new_df$length_gene,
chunk1 = sapply(spl, "[", 1),
chunk2 = sapply(spl, "[", 2),
chunk3 = sapply(spl, "[", 3),
chunk4= sapply(spl, "[", 4),
chunk5 = sapply(spl, "[", 5),
chunk6 = sapply(spl, "[", 6),
chunk7 = sapply(spl, "[", 7),
chunk8 = sapply(spl, "[", 8))
start_coord <- melt(start_coord, id=1:3, value.name = "start")
start_coord <- start_coord %>% select(-starts_with("variable"))
spl <-strsplit(as.character(new_df$end), ",")
end_coord <- data.frame(name = new_df$name, gene = new_df$GENE,
length_gene = new_df$length_gene,
chunk1 = sapply(spl, "[", 1),
chunk2 = sapply(spl, "[", 2),
chunk3 = sapply(spl, "[", 3),
chunk4= sapply(spl, "[", 4),
chunk5 = sapply(spl, "[", 5),
chunk6 = sapply(spl, "[", 6),
chunk7 = sapply(spl, "[", 7),
chunk8 = sapply(spl, "[", 8))
end_coord <- melt(end_coord, id=1:3, value.name = "end")
end_coord <- end_coord %>% select(-starts_with("variable"))
coords <- start_coord
coords$end <- end_coord$end
coords <- coords[complete.cases(coords),]
unique(coords$length_gene)
coords$start <- as.numeric(coords$start)
coords$end <- as.numeric(coords$end)
coords$length_gene <- as.numeric(levels(coords$length_gene))[coords$length_gene]
coords$percentage <- (((coords$end-coords$start)+1)/coords$length_gene)*100
test <- coords# %>% filter(name == "SAL_AB7542AA_AS", gene == "SPI-12_NC_006905_P4") %>% arrange(desc(end))
list_ <- vector(mode = "list", length = 0)
for(sample in unique(test$name)){
test2 <- test %>% filter(name == sample)
for(gene_ in unique(test$gene)){
test3 <- test2 %>% filter(gene == gene_)
length_of_gene <- test3$length_gene[1]
if(is.na(length_of_gene) == FALSE){
range_matrix <- rep(0, times = length_of_gene)
for(hit in 1:nrow(test3)){
start_ <- test3[hit,4]
end_ <- test3[hit,5]
range_matrix[start_:end_] <- 1
range_matrix[range_matrix > 1] <- 1
}
}
newline <- c(sample, gene_, round((sum(range_matrix)/length_of_gene)*100, digits = 3))
list_ <- append(list_,newline)
}
}
abc <- length(list_)/3
df <- data.frame(matrix(unlist(list_), nrow = abc, byrow=T), stringsAsFactors = F)
colnames(df) <- c("name","GENE","Coverage_percentage")
df$Coverage_percentage[is.na(df$Coverage_percentage)] <- 0
df$Coverage_percentage <- as.numeric(df$Coverage_percentage)
final_table <- dcast(df, name ~ GENE)
final_final_table <- final_table[1:nrow(final_table),2:ncol(final_table)]
final_final_table_2 <- final_final_table
final_final_table[final_final_table < 60] <- 0
final_final_table[final_final_table >= 60] <- 1
rownames(final_final_table) <- final_table$name
final_table <- final_final_table
write.csv(final_table, "analysis/PAIs_present_absent.csv")
pheatmap(final_final_table, fontsize_row = 2)
|
require(httr)
require(RCurl)
require(stringr)
aws_key = function() Sys.getenv('AWS_KEY')
aws_secret = function() Sys.getenv('AWS_SECRET_KEY')
# Miscellaneous functions to format time and date
now <- function() format(lubridate::now(), '%Y%m%dT%H%M%SZ')
today <- function() format(lubridate::today(), '%Y%m%d')
request_date = now()
#' Reorder a query and URI encode the parameter names and values
construct_query <- function(query) {
# Split query on '&' and '='
split_query <- str_split(str_split(query, pattern='&')[[1]], pattern = '=')
query_df <- do.call(rbind, split_query)
# URI encode strings
query_df <- apply(query_df, 2, curlEscape)
# Need to change locale to ensure the sort is on ASCII value
old_locale <- Sys.getlocale("LC_COLLATE")
Sys.setlocale("LC_COLLATE", "C")
if(!is.matrix(query_df)){
return(str_c(query_df[1], "=", query_df[2]))
}
query_df <- query_df[order(query_df[,1]),]
Sys.setlocale("LC_COLLATE", old_locale)
return(str_c(query_df[,1], "=", query_df[,2], collapse="&"))
}
#' Create a canonical request and hashed canonical request.
#'
#' This function puts together an http request into a standardised (canonical) form,
#' to ensure that the signature calculated by AWS when it receives the request
#' matches the one calculated by us. This is the equivalent of Task 1 in the AWS
#' API Signature Version 4 signing process
#' (http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html).
#' @param request_method the HTTP verb used for the request (GET/POST/PUT etc)
#' @param headers list of headers to be included in the request. Must include a
#' \code{host} header. See examples for correct format
#' @param payload the payload from the body of the HTTP/HTTPS request
#' @param uri the absolute path component of the uri
#' @param query the query string of the request. May be empty if the query is in
#' the payload instead (the default)
#' @examples
#' headers <- list(
#' 'content-type' = 'application/x-www-form-urlencoded; charset=utf-8',
#' 'x-amz-date' = '20110909T233600Z',
#' 'host' = 'iam.amazonaws.com')
#' create_request('POST', headers, 'Action=ListUsers&Version=2010-05-08', '/', '')
create_request = function(request_method, headers, payload,
uri = '/', query = '') {
# Only encode query if it's given
if (query != ''){
query <- construct_query(query)
}
# Canonicalise the headers
headers <- headers[order(names(headers))]
names(headers) <- tolower(names(headers))
canonical_headers <- str_c(names(headers), ':', unlist(headers), collapse = '\n')
canonical_headers <- str_c(canonical_headers, '\n')
signed_headers <- str_c(names(headers), collapse = ';')
hashed_payload = digest::digest(payload, algo="sha256", serialize = FALSE)
canonical_request = str_c(request_method, '\n',
uri, '\n',
query, '\n',
canonical_headers, '\n',
signed_headers, '\n',
hashed_payload)
hashed_canonical_request = digest::digest(canonical_request, algo="sha256", serialize = FALSE)
out <- list(canonical_request = canonical_request,
hashed_canonical_request = hashed_canonical_request,
signed_headers = signed_headers)
return(out)
}
#' Create the credential scope string.
#'
#' Helper function for concatenating strings into the right format for the
#' credential scope value.
#' @param date_stamp date in the form YYYYMMDD - must match that used in other
#' steps
#' @param region region being targeted
#' @param service being targeted
#' @examples
#' credential_scope('20110909', 'us-east-1', 'iam')
create_credential_scope = function(date_stamp = date_stamp, region, service) {
str_c(date_stamp, region, service, 'aws4_request', sep = "/")
}
#' Create a string to sign.
#'
#' This function is the equivalent of Task 2 in the AWS API Signature Version 4
#' signing process
#' (http://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html).
#' It currently only uses SHA256; this can be easily changed in future if
#' necessary.
#'
#' @param hashed_canonical_request hashed canonical request passed on from
#' create_request
#' @param credential_scope credential_scope string calculated by the function
#' of the same name
#' @param request_date string containing the date and time of the request,
#' matching the value used in previous steps, in the form YYYYMMDDTHHMMSSZ
#' @examples
#' create_string_to_sign('3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2',
#' '20110909/us-east-1/iam/aws4_request\n',
#' '20110909T233600Z\n')
create_string_to_sign = function(full_canonical_request,
credential_scope,
request_date = request_date) {
str_c('AWS4-HMAC-SHA256\n',
request_date, '\n',
credential_scope, '\n',
full_canonical_request$hashed_canonical_request)
}
#' Calculate the signing key.
#'
#' This function is the equivalent of Task 3 in the AWS API Signature Version 4
#' signing process
#' (http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html).
#' It currently only uses SHA256; this can be easily changed in future if
#' necessary.
#'
#' @param date_stamp request date in the form YYYYMMDD; defaults to the current
#' date. This must match the date used in the credential scope when creating
#' the string to sign
#' @param region_name name of the AWS region being targeted, e.g. 'eu-west-1'
#' @param service_name name of the AWS service being targeted, e.g. 'ec2'
#' @examples
#' create_signing_key('20120215', 'us-east-1', 'iam')
create_signing_key = function(date_stamp = today(), region_name, service_name) {
key_date = digest::hmac(str_c('AWS4', aws_secret()), date_stamp, algo = 'sha256', raw = TRUE)
key_region = digest::hmac(key_date, region_name, algo = 'sha256', raw = TRUE)
key_service = digest::hmac(key_region, service_name, algo = 'sha256', raw = TRUE)
key_signing = digest::hmac(key_service, 'aws4_request', algo = 'sha256', raw = TRUE)
key_signing
}
#' Create the final signature to be added to the HTTP header as Authorization.
#'
#' This is the final step in the authorization procedure, where the three tasks
#' are put together to create the authorization value.
#' @param request_method the HTTP verb used for the request (GET/POST/PUT etc)
#' @param headers list of headers to be included in the request. Must include a
#' \code{host} header. See examples for correct format
#' @param payload the payload from the body of the HTTP/HTTPS request
#' @param uri
#' @param query
#' @param date_stamp request date in the form YYYYMMDD; defaults to the current
#' date.
#' @param region_name name of the AWS region being targeted, e.g. 'eu-west-1'
#' @param service_name name of the AWS service being targeted, e.g. 'ec2'
#' @param request_date string containing the date and time of the request,
#' matching the value used in previous steps, in the form YYYYMMDDTHHMMSSZ
#' @examples
#' headers <- list(
#' 'content-type' = 'application/x-www-form-urlencoded; charset=utf-8',
#' 'x-amz-date' = '20110909T233600Z',
#' 'host' = 'iam.amazonaws.com')
#' create_auth('POST', headers, 'Action=ListUsers&Version=2010-05-08', '/',
#' '', '20110909', 'us-east-1', 'iam', '20110909T233600Z')
#' create_auth('GET',
#' list('Date'='Mon, 09 Sep 2011 23:36:00 GMT','Host'='host.foo.com'),
#' '', '/', 'foo=Zoo&foo=aha', '20110909', 'us-east-1', 'host',
#' '20110909T233600Z')
create_auth <- function(request_method, headers, payload, uri, query,
date_stamp, region_name, service_name,
request_date) {
full_request <- create_request(request_method, headers, payload, uri, query)
credential_scope <- create_credential_scope(date_stamp, region_name, service_name)
string_to_sign <- create_string_to_sign(full_request, credential_scope, request_date)
signing_key <- create_signing_key(date_stamp, region_name, service_name)
signature <- digest::hmac(signing_key, string_to_sign, algo="sha256")
auth <- str_c('AWS4-HMAC-SHA256 Credential=', aws_key(), '/',
credential_scope, ', SignedHeaders=',
full_request$signed_headers, ", Signature=", signature)
out <- add_headers(Authorization = auth)
}
| /R/create_auth.R | no_license | TotallyBullshit/awsr | R | false | false | 8,567 | r | require(httr)
require(RCurl)
require(stringr)
aws_key = function() Sys.getenv('AWS_KEY')
aws_secret = function() Sys.getenv('AWS_SECRET_KEY')
# Miscellaneous functions to format time and date
now <- function() format(lubridate::now(), '%Y%m%dT%H%M%SZ')
today <- function() format(lubridate::today(), '%Y%m%d')
request_date = now()
#' Reorder a query and URI encode the parameter names and values
construct_query <- function(query) {
# Split query on '&' and '='
split_query <- str_split(str_split(query, pattern='&')[[1]], pattern = '=')
query_df <- do.call(rbind, split_query)
# URI encode strings
query_df <- apply(query_df, 2, curlEscape)
# Need to change locale to ensure the sort is on ASCII value
old_locale <- Sys.getlocale("LC_COLLATE")
Sys.setlocale("LC_COLLATE", "C")
if(!is.matrix(query_df)){
return(str_c(query_df[1], "=", query_df[2]))
}
query_df <- query_df[order(query_df[,1]),]
Sys.setlocale("LC_COLLATE", old_locale)
return(str_c(query_df[,1], "=", query_df[,2], collapse="&"))
}
#' Create a canonical request and hashed canonical request.
#'
#' This function puts together an http request into a standardised (canonical) form,
#' to ensure that the signature calculated by AWS when it receives the request
#' matches the one calculated by us. This is the equivalent of Task 1 in the AWS
#' API Signature Version 4 signing process
#' (http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html).
#' @param request_method the HTTP verb used for the request (GET/POST/PUT etc)
#' @param headers list of headers to be included in the request. Must include a
#' \code{host} header. See examples for correct format
#' @param payload the payload from the body of the HTTP/HTTPS request
#' @param uri the absolute path component of the uri
#' @param query the query string of the request. May be empty if the query is in
#' the payload instead (the default)
#' @examples
#' headers <- list(
#' 'content-type' = 'application/x-www-form-urlencoded; charset=utf-8',
#' 'x-amz-date' = '20110909T233600Z',
#' 'host' = 'iam.amazonaws.com')
#' create_request('POST', headers, 'Action=ListUsers&Version=2010-05-08', '/', '')
create_request = function(request_method, headers, payload,
uri = '/', query = '') {
# Only encode query if it's given
if (query != ''){
query <- construct_query(query)
}
# Canonicalise the headers
headers <- headers[order(names(headers))]
names(headers) <- tolower(names(headers))
canonical_headers <- str_c(names(headers), ':', unlist(headers), collapse = '\n')
canonical_headers <- str_c(canonical_headers, '\n')
signed_headers <- str_c(names(headers), collapse = ';')
hashed_payload = digest::digest(payload, algo="sha256", serialize = FALSE)
canonical_request = str_c(request_method, '\n',
uri, '\n',
query, '\n',
canonical_headers, '\n',
signed_headers, '\n',
hashed_payload)
hashed_canonical_request = digest::digest(canonical_request, algo="sha256", serialize = FALSE)
out <- list(canonical_request = canonical_request,
hashed_canonical_request = hashed_canonical_request,
signed_headers = signed_headers)
return(out)
}
#' Create the credential scope string.
#'
#' Helper function for concatenating strings into the right format for the
#' credential scope value.
#' @param date_stamp date in the form YYYYMMDD - must match that used in other
#' steps
#' @param region region being targeted
#' @param service being targeted
#' @examples
#' credential_scope('20110909', 'us-east-1', 'iam')
create_credential_scope = function(date_stamp = date_stamp, region, service) {
str_c(date_stamp, region, service, 'aws4_request', sep = "/")
}
#' Create a string to sign.
#'
#' This function is the equivalent of Task 2 in the AWS API Signature Version 4
#' signing process
#' (http://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html).
#' It currently only uses SHA256; this can be easily changed in future if
#' necessary.
#'
#' @param hashed_canonical_request hashed canonical request passed on from
#' create_request
#' @param credential_scope credential_scope string calculated by the function
#' of the same name
#' @param request_date string containing the date and time of the request,
#' matching the value used in previous steps, in the form YYYYMMDDTHHMMSSZ
#' @examples
#' create_string_to_sign('3511de7e95d28ecd39e9513b642aee07e54f4941150d8df8bf94b328ef7e55e2',
#' '20110909/us-east-1/iam/aws4_request\n',
#' '20110909T233600Z\n')
create_string_to_sign = function(full_canonical_request,
credential_scope,
request_date = request_date) {
str_c('AWS4-HMAC-SHA256\n',
request_date, '\n',
credential_scope, '\n',
full_canonical_request$hashed_canonical_request)
}
#' Calculate the signing key.
#'
#' This function is the equivalent of Task 3 in the AWS API Signature Version 4
#' signing process
#' (http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html).
#' It currently only uses SHA256; this can be easily changed in future if
#' necessary.
#'
#' @param date_stamp request date in the form YYYYMMDD; defaults to the current
#' date. This must match the date used in the credential scope when creating
#' the string to sign
#' @param region_name name of the AWS region being targeted, e.g. 'eu-west-1'
#' @param service_name name of the AWS service being targeted, e.g. 'ec2'
#' @examples
#' create_signing_key('20120215', 'us-east-1', 'iam')
create_signing_key = function(date_stamp = today(), region_name, service_name) {
key_date = digest::hmac(str_c('AWS4', aws_secret()), date_stamp, algo = 'sha256', raw = TRUE)
key_region = digest::hmac(key_date, region_name, algo = 'sha256', raw = TRUE)
key_service = digest::hmac(key_region, service_name, algo = 'sha256', raw = TRUE)
key_signing = digest::hmac(key_service, 'aws4_request', algo = 'sha256', raw = TRUE)
key_signing
}
#' Create the final signature to be added to the HTTP header as Authorization.
#'
#' This is the final step in the authorization procedure, where the three tasks
#' are put together to create the authorization value.
#' @param request_method the HTTP verb used for the request (GET/POST/PUT etc)
#' @param headers list of headers to be included in the request. Must include a
#' \code{host} header. See examples for correct format
#' @param payload the payload from the body of the HTTP/HTTPS request
#' @param uri
#' @param query
#' @param date_stamp request date in the form YYYYMMDD; defaults to the current
#' date.
#' @param region_name name of the AWS region being targeted, e.g. 'eu-west-1'
#' @param service_name name of the AWS service being targeted, e.g. 'ec2'
#' @param request_date string containing the date and time of the request,
#' matching the value used in previous steps, in the form YYYYMMDDTHHMMSSZ
#' @examples
#' headers <- list(
#' 'content-type' = 'application/x-www-form-urlencoded; charset=utf-8',
#' 'x-amz-date' = '20110909T233600Z',
#' 'host' = 'iam.amazonaws.com')
#' create_auth('POST', headers, 'Action=ListUsers&Version=2010-05-08', '/',
#' '', '20110909', 'us-east-1', 'iam', '20110909T233600Z')
#' create_auth('GET',
#' list('Date'='Mon, 09 Sep 2011 23:36:00 GMT','Host'='host.foo.com'),
#' '', '/', 'foo=Zoo&foo=aha', '20110909', 'us-east-1', 'host',
#' '20110909T233600Z')
create_auth <- function(request_method, headers, payload, uri, query,
date_stamp, region_name, service_name,
request_date) {
full_request <- create_request(request_method, headers, payload, uri, query)
credential_scope <- create_credential_scope(date_stamp, region_name, service_name)
string_to_sign <- create_string_to_sign(full_request, credential_scope, request_date)
signing_key <- create_signing_key(date_stamp, region_name, service_name)
signature <- digest::hmac(signing_key, string_to_sign, algo="sha256")
auth <- str_c('AWS4-HMAC-SHA256 Credential=', aws_key(), '/',
credential_scope, ', SignedHeaders=',
full_request$signed_headers, ", Signature=", signature)
out <- add_headers(Authorization = auth)
}
|
#!/usr/bin/env Rscript
####################
### vectors
####################
######## vectorized operations and recycling
nums <- c(10, 20, 30, 40)
mult <- c(10, -10)
print(nums * mult) # 100, -200, 300, 400
## single elements are length-1 vectors:
mult <- 100 # a length-1 vector
print(nums * mult) # 1000, 2000, 3000, 4000
## other vector types:
# character vector (vector of strings)
names <- c("Joe", "Jim", "Kim")
# logical
checkit <- c(FALSE, TRUE, FALSE)
## vectors can't mix types - you'll get autoconversion
test <- c(1.2, as.integer(4), "hi") # "1.2", "4", "hi"
######### selection/subsetting
## by index #
subnums <- nums[c(3, 2)] # second and third element (300, 200)
subnum <- nums[3] # just the 3rd (300)
## replacement:
nums[c(3, 2)] <- c(-1, -2) # change third and second element
nums[c(1, 2, 3)] <- NA # entries are recyled if they are shorter; NA is a special not available type
## by logical
nums[c(TRUE, FALSE, TRUE, FALSE)] <- c(52, 42)
# combining vectorized operations with local operators
large_values <- nums[nums > median(nums)] # select w/ logical selection, produced by recycled > operator
########### named vectors: a bit weird
nums <- c(10, 20, 30, 40)
# setting the names attribute
attr(nums, "names") <- c("A", "B", "C", "D")
# more canonical:
names(nums) <- c("A", "B", "C", "D")
# now we can index by character vector:
nums[c("C", "A")] <- c(3, 1)
############### handy vector functions, logic
nums <- seq(1, 20, 0.2) # 0 to 20 in steps of 0.2
nums <- seq(1, 20) # steps of 1
nums <- 1:20 # sugar
print(length(nums)) # 20 (also works on lists)
nums_sample <- sample(nums) # a random permutation
nums_sample <- sample(nums, size = 5) # get 5 random elements
nums_sample <- sample(nums, size = 5, replace = TRUE) # sample w/ replacement
nums_sample <- rnorm(100, mean = 20, sd = 4) # sample from a normal distribution
nums_sample[4] <- NA # replace the 4th entry with NA (unknown/not available) for exposition
# return a logical indicating which entries are NA
where_na <- is.na(nums_sample) # FALSE FALSE FALSE TRUE FALSE FALSE FALSE ...
nums_mean <- mean(nums_sample) # will be NA since there's an NA in the mix
nums_mean <- mean(nums_sample, na.rm = TRUE) # remove NA's during computation
nums_mean <- mean(nums_sample[!is.na(nums_sample)]) # no thank you, I can remove NAs myself. (! for negation)
# see also: sd(), median(), sum(), max(), min()
# logical operators are & and | (&& and || exist but don't operate in the same vectorized way as other operators like +, *, >, etc)
a<- c(TRUE, TRUE, FALSE, FALSE)
b<- c(TRUE, FALSE, TRUE, FALSE)
print(a & b) # T F F F
print(a | b) # T T T F
####################
### lists
####################
## lists can hold anything - other lists, etc. they are often named
person_list <- list(36, "male", c("Fido", "Fluffy"))
names(person_list) <- c("age", "gender", "pets")
# directly:
person_list <- list(age = 36,
gender = "male",
pets = c("Fido", "Fluffy"))
# accessing w/ [] returns a sublist:
person_no_pets <- person_list[c(2, 3)]
# aka, by name
person_no_pets <- person_list[c("gender", "pets")]
# but you'll get a list of 1 if you ask for it this way:
pets_only_list <- person[3] # not a vector of len 2, but rather a list of lenght 1 holding a vector of length 2
# double-brackets are used for that
pets <- person[[3]] # vector of length 2
# by name
pets <- person[["pets"]]
# syntactic sugar:
pets <- person$"pets"
# syntactic sugar (if name is simple, no funky chars)
pets <- person$pets
# we can work with items inside lists
person$pets[2] <- "DemonCat" # renaming pet # 2
# and add new entries by name
person$pet_types <- c("dog", "cat")
############## lists as hashes
# we can use lists like hashes (lookup is fast, but they don't grow efficiently, see the hash package for a better alternative: https://cran.r-project.org/web/packages/hash/index.html)
# to so though, we need to watch out for the sugar
myhash <- list() # an empty list
new_key <- "Joe"
new_value <- 36
# this won't work because myhash$new_key is sugar for myhash$"new_key" (is sugar for myhash[["new_key"]])
# myhash$new_key <- new_value
# but this does
myhash[[new_key]] <- new_value
############## lists as objects
# lists are often complex, and they're the de-facto way to store structured (non-rectangular) data. the str() function prints their structure summary
str(person)
# many R functions actually return lists;
samp1 <- rnorm(40, mean = 4, sd = 1)
samp2 <- rnorm(40, mean = 8, sd = 5)
result <- t.test(samp1, samp2)
print(result) # fancy formatting
str(result) # show the list structure
# the "class" attribute determines what methods will be dispatched to from generic functions. (in the S3 object system)
attr(person, "class") <- c("Adult", "Person")
# canonically:
class(person) <- c("Adult", "Person")
# when we run
print(person)
# because print is generic, it will try print.Adult(person), if not found print.Person(person), and finally if not found print.default(person).
print(methods(print)) # show all print.* functions
print(methods(class = "list")) # show all *.list functions
######### lapply (map)
# the lapply() function acts as a map; first param is a list, second is a function to call on each element
samples <- list(s1 = rnorm(4),
s2 = rnorm(50),
s3 = rnorm(25))
medians <- lapply(samples, median)
# optional follow-on parameters can be specifid for each call in the call to lapply:
medians_ignore_nas <- lapply(samples, median, na.rm = TRUE)
###################
### Misc data types and their caveats
###################
######## matrices and arrays
m <- matrix(1:4, nrow = 2, ncol = 2)
multidimArray <- array(1:12, dim = c(2, 3, 3))
# matrix is a special case of array
print(class(m)) # "matrix", "array"
print(class(multidimArray)) # "array"
## WARNING
# both matrices and arrays are backed by vectors (with metadata on dimensionality for lookup by index),
# meaning both types are limited in the same way as vectors: they can't mix types; there are numeric matrices, character matrices, logical
# matrices, etc.
## WARNING 2
# R's lapply() is nice, and it can also work on a vector input (producing a list output for each element of the vector)
# R also has apply() and sapply() - sapply() tries to convert the output into an appropriate type (vector, list, matrix...) by guessing
# ugh
# apply() applies a function over dimensions of a matrix or array
# don't use it on a dataframe: it will first convert the dataframe to a matrix (coercing all the data to be the same type)
# https://www.talyarkoni.org/blog/2012/06/08/r-the-master-troll-of-statistical-languages/
######## factors
# factors are annoying, basically a way to efficiently store string vectors and put restrictions on them.
s <- as.factor(c("Good", "Bad", "OK", "Bad"))
print(s)
# to see what's really going on, we remove the class attribute (so we don't get dispatched to factor-specific output)
str(unclass(s))
# output:
# int [1:4] 2 1 3 1
# - attr(*, "levels")= chr [1:3] "Bad" "Good" "OK"
# thus: a factor is an integer vector, with an attribute called "levels" that maps integers to their representation
# too much to say about factors here... normally they aren't worth worrying about at first but machine learning in R uses them frequently
# more on factors (in factors section, they broke my anchor links): https://open.oregonstate.education/computationalbiology/chapter/character-and-categorical-data/
########### dates and times
# R has native support for these w/ POSIXct and POSIXlt vector types
# the lubridate package adds functions for these types that are actually reasonable
####################
### data frames
####################
# data frames are lists of vectors, one per column, and they keep their columns the same length (recyling entries when creating new columns if necessary,
# or throwing an error if you try to add a column that's too long)
data <- data.frame(colA = c("A", "B", "C"),
colB = c(1, 2, 3),
colC = c(TRUE, FALSE, TRUE),
stringsAsFactors = FALSE) # set this if you don't want your char cols turned to factors (this if finally default to false in R 4.0)
# because dataframes are lists of vectors, we can all the stuff we can w/ lists
print(names(data)) # names are the column names
print(data$colB) # 1 2 3
data$colC[1] <- FALSE # set an entry to false
# when we craeat a new entry, e.g. by name, it's recycled:
data$likes_music <- NA # recycled to NA NA NA
print(data)
# colA colB colC likes_music
#1 A 1 FALSE NA
#2 B 2 FALSE NA
#3 C 3 TRUE NA
# colnames() is the column vector names (same as returned by names())
print(colnames(data)) # "colA" "colB" "colC" "likes_music"
# the 1, 2, 3 on the left are not row indices, they are row *names* - stored as a character vector
print(rownames(data)) # "1" "2" "3"
rownames(data) <- c("A1", "A2", "A3")
print(data)
# colA colB colC likes_music
#A1 A 1 FALSE NA
#A2 B 2 FALSE NA
#A3 C 3 TRUE NA
# Notice that the quotations are also left off of colA, making it hard to distinguish column types (is colB a character, factor, integer, or numeric vector?!)
# the tidyverse folks have created tibbles - an extension of data.frames that inherit data frame methods but provide nicer versions for
# some operations
library(tibble)
print(as_tibble(data))
## A tibble: 3 x 4
# colA colB colC likes_music
# <chr> <dbl> <lgl> <lgl>
#1 A 1 FALSE NA
#2 B 2 FALSE NA
#3 C 3 TRUE NA
##### base-R indexing
# vectors and lists can be indexed with [], lists and dataframes can be indexed with [[]], and dataframes can be indexed with [ , ]
# where the syntax is [<row_selector>, <col_selector>]; either of these can be a numeric or integer vector (to select by row or column
# index), character vector (to select by row or column name), or logical vector (to select by logical keep/don't keep)
subdata <- data[c(1, 3), c(TRUE, FALSE, FALSE, TRUE)] # rows 1 and 3, cols 1 and 4
# get rows with colB greater than the median, all cols
subdata <- data[data$colB >= median(data$colB), ]
########################33
### tidyverse
##########################
# in the last ~decade there's been a growth of packages aimed at cleaning up the R user experience,
# particularly around common data-munging tasks
# since many default R functions have varying parameter names for common parameters, etc.
# these also aimed at providing a user-friendliness & compactness
# the main downside is they tend to verge on being DSLs with specialized functions, sometimes broad API for each one
# most tidyverse packages have focused on dataframes, but more recent additions have expanded to include functions for lists,
# arrays, etc.
entries <- data.frame(colA = c("A", "B", "C"),
colB = c(1, 2, 3),
colC = c(TRUE, FALSE, TRUE),
stringsAsFactors = FALSE)
# this is a base R function that illustrates R's use of non-standard-evaluation to allow working with column names
# as unquoted entries
# changes colB >= median(colB) to entries[["colB"]] >= median(entries[["colB"]] ) before execution
sub_data <- subset(entries, colB >= median(colB))
# tidyverse *loves* these unqouted things
# regular R functions are spotty about doing this, and spotty about which argument is the data (here it's first)
# tidyverse functions strive to take the data argument first; here's dplyrs filter which does the same thing
library(dplyr)
sub_data <- filter(entries, colB >= median(colB))
# to create a new column which is colX = 5 * colB
sub_data$colX <- sub_data$colB * 5 # old-school
sub_data <- mutate(sub_data, colX = 5 * colB)
# the %>% supplies the result of it's left hand side as the first argument to the function on the right-hand side (also using non-
# standard evaluation to accept the calling-form of the right side funtion)
sub_data <- filter(entries, colB >= median(colB)) %>%
mutate(colX = 5 * colB) # could also be mutate(., colX = 5. colB), where . is interpreted to mean the input from the LHS
| /rstuff/crash_course.R | no_license | oneilsh/covid-19-dream | R | false | false | 12,365 | r | #!/usr/bin/env Rscript
####################
### vectors
####################
######## vectorized operations and recycling
nums <- c(10, 20, 30, 40)
mult <- c(10, -10)
print(nums * mult) # 100, -200, 300, 400
## single elements are length-1 vectors:
mult <- 100 # a length-1 vector
print(nums * mult) # 1000, 2000, 3000, 4000
## other vector types:
# character vector (vector of strings)
names <- c("Joe", "Jim", "Kim")
# logical
checkit <- c(FALSE, TRUE, FALSE)
## vectors can't mix types - you'll get autoconversion
test <- c(1.2, as.integer(4), "hi") # "1.2", "4", "hi"
######### selection/subsetting
## by index #
subnums <- nums[c(3, 2)] # second and third element (300, 200)
subnum <- nums[3] # just the 3rd (300)
## replacement:
nums[c(3, 2)] <- c(-1, -2) # change third and second element
nums[c(1, 2, 3)] <- NA # entries are recyled if they are shorter; NA is a special not available type
## by logical
nums[c(TRUE, FALSE, TRUE, FALSE)] <- c(52, 42)
# combining vectorized operations with local operators
large_values <- nums[nums > median(nums)] # select w/ logical selection, produced by recycled > operator
########### named vectors: a bit weird
nums <- c(10, 20, 30, 40)
# setting the names attribute
attr(nums, "names") <- c("A", "B", "C", "D")
# more canonical:
names(nums) <- c("A", "B", "C", "D")
# now we can index by character vector:
nums[c("C", "A")] <- c(3, 1)
############### handy vector functions, logic
nums <- seq(1, 20, 0.2) # 0 to 20 in steps of 0.2
nums <- seq(1, 20) # steps of 1
nums <- 1:20 # sugar
print(length(nums)) # 20 (also works on lists)
nums_sample <- sample(nums) # a random permutation
nums_sample <- sample(nums, size = 5) # get 5 random elements
nums_sample <- sample(nums, size = 5, replace = TRUE) # sample w/ replacement
nums_sample <- rnorm(100, mean = 20, sd = 4) # sample from a normal distribution
nums_sample[4] <- NA # replace the 4th entry with NA (unknown/not available) for exposition
# return a logical indicating which entries are NA
where_na <- is.na(nums_sample) # FALSE FALSE FALSE TRUE FALSE FALSE FALSE ...
nums_mean <- mean(nums_sample) # will be NA since there's an NA in the mix
nums_mean <- mean(nums_sample, na.rm = TRUE) # remove NA's during computation
nums_mean <- mean(nums_sample[!is.na(nums_sample)]) # no thank you, I can remove NAs myself. (! for negation)
# see also: sd(), median(), sum(), max(), min()
# logical operators are & and | (&& and || exist but don't operate in the same vectorized way as other operators like +, *, >, etc)
a<- c(TRUE, TRUE, FALSE, FALSE)
b<- c(TRUE, FALSE, TRUE, FALSE)
print(a & b) # T F F F
print(a | b) # T T T F
####################
### lists
####################
## lists can hold anything - other lists, etc. they are often named
person_list <- list(36, "male", c("Fido", "Fluffy"))
names(person_list) <- c("age", "gender", "pets")
# directly:
person_list <- list(age = 36,
gender = "male",
pets = c("Fido", "Fluffy"))
# accessing w/ [] returns a sublist:
person_no_pets <- person_list[c(2, 3)]
# aka, by name
person_no_pets <- person_list[c("gender", "pets")]
# but you'll get a list of 1 if you ask for it this way:
pets_only_list <- person[3] # not a vector of len 2, but rather a list of lenght 1 holding a vector of length 2
# double-brackets are used for that
pets <- person[[3]] # vector of length 2
# by name
pets <- person[["pets"]]
# syntactic sugar:
pets <- person$"pets"
# syntactic sugar (if name is simple, no funky chars)
pets <- person$pets
# we can work with items inside lists
person$pets[2] <- "DemonCat" # renaming pet # 2
# and add new entries by name
person$pet_types <- c("dog", "cat")
############## lists as hashes
# we can use lists like hashes (lookup is fast, but they don't grow efficiently, see the hash package for a better alternative: https://cran.r-project.org/web/packages/hash/index.html)
# to so though, we need to watch out for the sugar
myhash <- list() # an empty list
new_key <- "Joe"
new_value <- 36
# this won't work because myhash$new_key is sugar for myhash$"new_key" (is sugar for myhash[["new_key"]])
# myhash$new_key <- new_value
# but this does
myhash[[new_key]] <- new_value
############## lists as objects
# lists are often complex, and they're the de-facto way to store structured (non-rectangular) data. the str() function prints their structure summary
str(person)
# many R functions actually return lists;
samp1 <- rnorm(40, mean = 4, sd = 1)
samp2 <- rnorm(40, mean = 8, sd = 5)
result <- t.test(samp1, samp2)
print(result) # fancy formatting
str(result) # show the list structure
# the "class" attribute determines what methods will be dispatched to from generic functions. (in the S3 object system)
attr(person, "class") <- c("Adult", "Person")
# canonically:
class(person) <- c("Adult", "Person")
# when we run
print(person)
# because print is generic, it will try print.Adult(person), if not found print.Person(person), and finally if not found print.default(person).
print(methods(print)) # show all print.* functions
print(methods(class = "list")) # show all *.list functions
######### lapply (map)
# the lapply() function acts as a map; first param is a list, second is a function to call on each element
samples <- list(s1 = rnorm(4),
s2 = rnorm(50),
s3 = rnorm(25))
medians <- lapply(samples, median)
# optional follow-on parameters can be specifid for each call in the call to lapply:
medians_ignore_nas <- lapply(samples, median, na.rm = TRUE)
###################
### Misc data types and their caveats
###################
######## matrices and arrays
m <- matrix(1:4, nrow = 2, ncol = 2)
multidimArray <- array(1:12, dim = c(2, 3, 3))
# matrix is a special case of array
print(class(m)) # "matrix", "array"
print(class(multidimArray)) # "array"
## WARNING
# both matrices and arrays are backed by vectors (with metadata on dimensionality for lookup by index),
# meaning both types are limited in the same way as vectors: they can't mix types; there are numeric matrices, character matrices, logical
# matrices, etc.
## WARNING 2
# R's lapply() is nice, and it can also work on a vector input (producing a list output for each element of the vector)
# R also has apply() and sapply() - sapply() tries to convert the output into an appropriate type (vector, list, matrix...) by guessing
# ugh
# apply() applies a function over dimensions of a matrix or array
# don't use it on a dataframe: it will first convert the dataframe to a matrix (coercing all the data to be the same type)
# https://www.talyarkoni.org/blog/2012/06/08/r-the-master-troll-of-statistical-languages/
######## factors
# factors are annoying, basically a way to efficiently store string vectors and put restrictions on them.
s <- as.factor(c("Good", "Bad", "OK", "Bad"))
print(s)
# to see what's really going on, we remove the class attribute (so we don't get dispatched to factor-specific output)
str(unclass(s))
# output:
# int [1:4] 2 1 3 1
# - attr(*, "levels")= chr [1:3] "Bad" "Good" "OK"
# thus: a factor is an integer vector, with an attribute called "levels" that maps integers to their representation
# too much to say about factors here... normally they aren't worth worrying about at first but machine learning in R uses them frequently
# more on factors (in factors section, they broke my anchor links): https://open.oregonstate.education/computationalbiology/chapter/character-and-categorical-data/
########### dates and times
# R has native support for these w/ POSIXct and POSIXlt vector types
# the lubridate package adds functions for these types that are actually reasonable
####################
### data frames
####################
# data frames are lists of vectors, one per column, and they keep their columns the same length (recyling entries when creating new columns if necessary,
# or throwing an error if you try to add a column that's too long)
data <- data.frame(colA = c("A", "B", "C"),
colB = c(1, 2, 3),
colC = c(TRUE, FALSE, TRUE),
stringsAsFactors = FALSE) # set this if you don't want your char cols turned to factors (this if finally default to false in R 4.0)
# because dataframes are lists of vectors, we can all the stuff we can w/ lists
print(names(data)) # names are the column names
print(data$colB) # 1 2 3
data$colC[1] <- FALSE # set an entry to false
# when we craeat a new entry, e.g. by name, it's recycled:
data$likes_music <- NA # recycled to NA NA NA
print(data)
# colA colB colC likes_music
#1 A 1 FALSE NA
#2 B 2 FALSE NA
#3 C 3 TRUE NA
# colnames() is the column vector names (same as returned by names())
print(colnames(data)) # "colA" "colB" "colC" "likes_music"
# the 1, 2, 3 on the left are not row indices, they are row *names* - stored as a character vector
print(rownames(data)) # "1" "2" "3"
rownames(data) <- c("A1", "A2", "A3")
print(data)
# colA colB colC likes_music
#A1 A 1 FALSE NA
#A2 B 2 FALSE NA
#A3 C 3 TRUE NA
# Notice that the quotations are also left off of colA, making it hard to distinguish column types (is colB a character, factor, integer, or numeric vector?!)
# the tidyverse folks have created tibbles - an extension of data.frames that inherit data frame methods but provide nicer versions for
# some operations
library(tibble)
print(as_tibble(data))
## A tibble: 3 x 4
# colA colB colC likes_music
# <chr> <dbl> <lgl> <lgl>
#1 A 1 FALSE NA
#2 B 2 FALSE NA
#3 C 3 TRUE NA
##### base-R indexing
# vectors and lists can be indexed with [], lists and dataframes can be indexed with [[]], and dataframes can be indexed with [ , ]
# where the syntax is [<row_selector>, <col_selector>]; either of these can be a numeric or integer vector (to select by row or column
# index), character vector (to select by row or column name), or logical vector (to select by logical keep/don't keep)
subdata <- data[c(1, 3), c(TRUE, FALSE, FALSE, TRUE)] # rows 1 and 3, cols 1 and 4
# get rows with colB greater than the median, all cols
subdata <- data[data$colB >= median(data$colB), ]
########################33
### tidyverse
##########################
# in the last ~decade there's been a growth of packages aimed at cleaning up the R user experience,
# particularly around common data-munging tasks
# since many default R functions have varying parameter names for common parameters, etc.
# these also aimed at providing a user-friendliness & compactness
# the main downside is they tend to verge on being DSLs with specialized functions, sometimes broad API for each one
# most tidyverse packages have focused on dataframes, but more recent additions have expanded to include functions for lists,
# arrays, etc.
entries <- data.frame(colA = c("A", "B", "C"),
colB = c(1, 2, 3),
colC = c(TRUE, FALSE, TRUE),
stringsAsFactors = FALSE)
# this is a base R function that illustrates R's use of non-standard-evaluation to allow working with column names
# as unquoted entries
# changes colB >= median(colB) to entries[["colB"]] >= median(entries[["colB"]] ) before execution
sub_data <- subset(entries, colB >= median(colB))
# tidyverse *loves* these unqouted things
# regular R functions are spotty about doing this, and spotty about which argument is the data (here it's first)
# tidyverse functions strive to take the data argument first; here's dplyrs filter which does the same thing
library(dplyr)
sub_data <- filter(entries, colB >= median(colB))
# to create a new column which is colX = 5 * colB
sub_data$colX <- sub_data$colB * 5 # old-school
sub_data <- mutate(sub_data, colX = 5 * colB)
# the %>% supplies the result of it's left hand side as the first argument to the function on the right-hand side (also using non-
# standard evaluation to accept the calling-form of the right side funtion)
sub_data <- filter(entries, colB >= median(colB)) %>%
mutate(colX = 5 * colB) # could also be mutate(., colX = 5. colB), where . is interpreted to mean the input from the LHS
|
R.oo::setConstructorS3("ModelPoset",
function() { extend(Object(), "Object") },
abstract = T)
#' Topological ordering of models.
#'
#' Returns a topological ordering of models in the collection.
#'
#' @name getTopOrder
#' @export
#'
#' @param this the model poset object.
getTopOrder <- function(this) {
UseMethod("getTopOrder")
}
#' The prior on the models.
#'
#' Returns the unnormalized prior on the collection.
#'
#' @name getPrior
#' @export
#'
#' @param this the model poset object.
getPrior <- function(this) {
UseMethod("getPrior")
}
#' Number of models.
#'
#' Returns the number of models in the collection.
#'
#' @name getNumModels
#' @export
#'
#' @param this the model poset object.
getNumModels <- function(this) {
UseMethod("getNumModels")
}
#' Set data for a model poset.
#'
#' Sets the data to be used by a poset of models when computing MLEs.
#'
#' @name setData
#' @export
#'
#' @param this the model poset object.
#' @param data the data to be set.
setData <- function(this, data) {
UseMethod("setData")
}
#' Return the set data.
#'
#' If data has been set for the object using the setData() function
#' then will return that data, otherwise will throw an error.
#'
#' @name getData
#' @export
#'
#' @param this the object from which to get the data.
getData <- function(this) {
UseMethod("getData")
}
#' Number of samples in the set data.
#'
#' If data has been set using the setData method then returns the
#' number of samples in the data. Otherwise throws an error.
#'
#' @name getNumSamples
#' @export
#'
#' @param this the object from which to get the number of samples.
getNumSamples <- function(this) {
UseMethod("getNumSamples")
}
#' Parents of a model.
#'
#' Returns the immediate parents of a given model, i.e. those models
#' M that are (in the poset ordering) less than the given model but for
#' which there exists no other model M' such that M < M' < (given model).
#'
#' @name parents
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the parents should be found.
parents <- function(this, model) {
UseMethod("parents")
}
#' Maximum likelihood for data.
#'
#' Computes the maximum likelihood of a model in the model poset for the
#' data set using the setData command.
#'
#' @name logLikeMle
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the maximum likelihood should be computed.
#' @param ... further parameters to be passed to methods
logLikeMle <- function(this, model, ...) {
UseMethod("logLikeMle")
}
#' Maximum likelihood estimator.
#'
#' Computes the maximum likelihood estimator of the model parameters (for a
#' given model in the collection) given the data set with setData.
#'
#' @name mle
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the maximum likelihood should be computed.
mle <- function(this, model) {
UseMethod("mle")
}
#' Learning coefficient
#'
#' Computes the learning coefficient for a model with respect to one of the
#' model's submodels.
#'
#' @name learnCoef
#' @export
#'
#' @param this the object representing the model poset.
#' @param superModel the larger model of the two input models.
#' @param subModel the submodel of the larger model.
learnCoef <- function(this, superModel, subModel) {
UseMethod("learnCoef")
}
#' Model dimension.
#'
#' Computes the dimension of a model in the model poset.
#'
#' @name getDimension
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the dimension should be computed.
getDimension <- function(this, model) {
UseMethod("getDimension")
}
| /R/ModelPoset.R | no_license | yuinityk/sBIC | R | false | false | 3,809 | r | R.oo::setConstructorS3("ModelPoset",
function() { extend(Object(), "Object") },
abstract = T)
#' Topological ordering of models.
#'
#' Returns a topological ordering of models in the collection.
#'
#' @name getTopOrder
#' @export
#'
#' @param this the model poset object.
getTopOrder <- function(this) {
UseMethod("getTopOrder")
}
#' The prior on the models.
#'
#' Returns the unnormalized prior on the collection.
#'
#' @name getPrior
#' @export
#'
#' @param this the model poset object.
getPrior <- function(this) {
UseMethod("getPrior")
}
#' Number of models.
#'
#' Returns the number of models in the collection.
#'
#' @name getNumModels
#' @export
#'
#' @param this the model poset object.
getNumModels <- function(this) {
UseMethod("getNumModels")
}
#' Set data for a model poset.
#'
#' Sets the data to be used by a poset of models when computing MLEs.
#'
#' @name setData
#' @export
#'
#' @param this the model poset object.
#' @param data the data to be set.
setData <- function(this, data) {
UseMethod("setData")
}
#' Return the set data.
#'
#' If data has been set for the object using the setData() function
#' then will return that data, otherwise will throw an error.
#'
#' @name getData
#' @export
#'
#' @param this the object from which to get the data.
getData <- function(this) {
UseMethod("getData")
}
#' Number of samples in the set data.
#'
#' If data has been set using the setData method then returns the
#' number of samples in the data. Otherwise throws an error.
#'
#' @name getNumSamples
#' @export
#'
#' @param this the object from which to get the number of samples.
getNumSamples <- function(this) {
UseMethod("getNumSamples")
}
#' Parents of a model.
#'
#' Returns the immediate parents of a given model, i.e. those models
#' M that are (in the poset ordering) less than the given model but for
#' which there exists no other model M' such that M < M' < (given model).
#'
#' @name parents
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the parents should be found.
parents <- function(this, model) {
UseMethod("parents")
}
#' Maximum likelihood for data.
#'
#' Computes the maximum likelihood of a model in the model poset for the
#' data set using the setData command.
#'
#' @name logLikeMle
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the maximum likelihood should be computed.
#' @param ... further parameters to be passed to methods
logLikeMle <- function(this, model, ...) {
UseMethod("logLikeMle")
}
#' Maximum likelihood estimator.
#'
#' Computes the maximum likelihood estimator of the model parameters (for a
#' given model in the collection) given the data set with setData.
#'
#' @name mle
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the maximum likelihood should be computed.
mle <- function(this, model) {
UseMethod("mle")
}
#' Learning coefficient
#'
#' Computes the learning coefficient for a model with respect to one of the
#' model's submodels.
#'
#' @name learnCoef
#' @export
#'
#' @param this the object representing the model poset.
#' @param superModel the larger model of the two input models.
#' @param subModel the submodel of the larger model.
learnCoef <- function(this, superModel, subModel) {
UseMethod("learnCoef")
}
#' Model dimension.
#'
#' Computes the dimension of a model in the model poset.
#'
#' @name getDimension
#' @export
#'
#' @param this the object representing the model poset.
#' @param model the model for which the dimension should be computed.
getDimension <- function(this, model) {
UseMethod("getDimension")
}
|
# Plot Files from other under index directory
#
library("dplyr")
library("ggplot2")
if(!exists("book_words")) {
book_words <- read.csv(file = "data/deepNLP.csv",
stringsAsFactors = FALSE)
}
plotWords <- function(lfile, corte = 0) {
doc <- subset(book_words,file == lfile)
doc <- subset(doc, tf_idf > corte)
doc$i <- 1:length(doc$word)
if(dim(doc)[1] > 50)
doc <- doc[1:50,]
doc <- doc[order(doc$tf_idf,decreasing = FALSE),]
p <- ggplot(doc, aes(i, tf_idf, label = doc$word)) +
geom_text(check_overlap = TRUE,size = (doc$tf_idf*10)/max(doc$tf_idf), aes(colour = doc$tf_idf)) +
theme(legend.position="none")
print(p)
corM <- lm(doc$tf_idf ~ doc$i + I(doc$i^2))
return(corM)
}
readCentroid <- function(classCentroid) {
if(file.exists(paste0("data/centroid.",classCentroid))) {
return(read.csv(paste0("data/centroid.",classCentroid),stringsAsFactors = FALSE))
}
}
plotFile <- function(file1 = file1, file2 = NULL, wplot = TRUE,
classCentroid = NULL) {
source("loadConfig.R")
doc1 <- subset(book_words,file == file1)
if(!is.null(classCentroid))
ni <- readCentroid(classCentroid)
if(is.null(classCentroid) && !is.null(file2))
ni <- subset(book_words,file == file2)
corM <- 0
ni$tfidf <- 0
ni$i <- 0
if(wplot) {
soma <- 0
for(i in 1:length(doc1$word)[1]) {
ind <- which(ni$word == doc1[i,]$word)
if(length(ind))
ni[ind,]$tfidf <- doc1[i,]$tf_idf
}
ni <- ni[order(ni$mean,decreasing = FALSE),]
ni$i <- 1:length(ni$word)
ni <- subset(ni, tfidf > 0)
ni <- subset(ni, mean > 0)
if(length(ni$word) < 3) {
return(paste0("File ",file2,"has less than 10 characters"))
}
model1 <- lm(ni$mean ~ ni$i + I(ni$i^2))
model2 <- lm(ni$tfidf ~ ni$i + I(ni$i^2))
corM <- abs(cor(predict(model1),predict(model2)))
plot(ni$i, ni$mean, col = "blue",
type = "p", main = paste(file1,file2),
xlim = c(0,max(ni$i)), ylim = c(0,max(ni$tfidf)),
xlab = paste("correlation: ",corM), ylab = "TF-IDF")
lines(ni$i, predict(lm(ni$mean ~ ni$i + I(ni$i^2))), col = c("blue"))
par(new = "T")
plot(ni$i, ni$tfidf, col = "red",
type = "p",
xlim = c(0,max(ni$i)), ylim = c(0,max(ni$tfidf)),
xlab = paste("correlation: ",corM), ylab = "TF-IDF")
lines(ni$i, predict(lm(ni$tfidf ~ ni$i + I(ni$i^2))), col = c("red"))
return(corM)
}
return(c("ERRO",length(compare)[1]))
}
| /plotFiles.R | no_license | TheScientistBr/deep-NLP | R | false | false | 3,115 | r | # Plot Files from other under index directory
#
library("dplyr")
library("ggplot2")
if(!exists("book_words")) {
book_words <- read.csv(file = "data/deepNLP.csv",
stringsAsFactors = FALSE)
}
plotWords <- function(lfile, corte = 0) {
doc <- subset(book_words,file == lfile)
doc <- subset(doc, tf_idf > corte)
doc$i <- 1:length(doc$word)
if(dim(doc)[1] > 50)
doc <- doc[1:50,]
doc <- doc[order(doc$tf_idf,decreasing = FALSE),]
p <- ggplot(doc, aes(i, tf_idf, label = doc$word)) +
geom_text(check_overlap = TRUE,size = (doc$tf_idf*10)/max(doc$tf_idf), aes(colour = doc$tf_idf)) +
theme(legend.position="none")
print(p)
corM <- lm(doc$tf_idf ~ doc$i + I(doc$i^2))
return(corM)
}
readCentroid <- function(classCentroid) {
if(file.exists(paste0("data/centroid.",classCentroid))) {
return(read.csv(paste0("data/centroid.",classCentroid),stringsAsFactors = FALSE))
}
}
plotFile <- function(file1 = file1, file2 = NULL, wplot = TRUE,
classCentroid = NULL) {
source("loadConfig.R")
doc1 <- subset(book_words,file == file1)
if(!is.null(classCentroid))
ni <- readCentroid(classCentroid)
if(is.null(classCentroid) && !is.null(file2))
ni <- subset(book_words,file == file2)
corM <- 0
ni$tfidf <- 0
ni$i <- 0
if(wplot) {
soma <- 0
for(i in 1:length(doc1$word)[1]) {
ind <- which(ni$word == doc1[i,]$word)
if(length(ind))
ni[ind,]$tfidf <- doc1[i,]$tf_idf
}
ni <- ni[order(ni$mean,decreasing = FALSE),]
ni$i <- 1:length(ni$word)
ni <- subset(ni, tfidf > 0)
ni <- subset(ni, mean > 0)
if(length(ni$word) < 3) {
return(paste0("File ",file2,"has less than 10 characters"))
}
model1 <- lm(ni$mean ~ ni$i + I(ni$i^2))
model2 <- lm(ni$tfidf ~ ni$i + I(ni$i^2))
corM <- abs(cor(predict(model1),predict(model2)))
plot(ni$i, ni$mean, col = "blue",
type = "p", main = paste(file1,file2),
xlim = c(0,max(ni$i)), ylim = c(0,max(ni$tfidf)),
xlab = paste("correlation: ",corM), ylab = "TF-IDF")
lines(ni$i, predict(lm(ni$mean ~ ni$i + I(ni$i^2))), col = c("blue"))
par(new = "T")
plot(ni$i, ni$tfidf, col = "red",
type = "p",
xlim = c(0,max(ni$i)), ylim = c(0,max(ni$tfidf)),
xlab = paste("correlation: ",corM), ylab = "TF-IDF")
lines(ni$i, predict(lm(ni$tfidf ~ ni$i + I(ni$i^2))), col = c("red"))
return(corM)
}
return(c("ERRO",length(compare)[1]))
}
|
## Download and unzip the data if zip or txt file does not exist
filename <- "electric_power_consumption.zip"
if (!file.exists(filename)){
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile = "electric_power_consumption.zip")
unzip("electric_power_consumption.zip")
}
if (!file.exists("household_power_consumption.txt")) {
unzip(filename)
}
## Read the data
library("data.table")
full_data <- fread("household_power_consumption.txt", sep = ";",header = TRUE,na.strings="?")
# Subset data from 01/02/2007 and 02/02/2007
data <- full_data[(full_data$Date=="1/2/2007" | full_data$Date=="2/2/2007" ), ]
## Adapt the date and time format
# Convert the char date as a date date
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# Concatenate date and time in a char vector
date_time <- paste(data$Date, data$Time)
# Transform the char vector into a date-time variables and add it to the dataset
data$Date_time <- as.POSIXct(date_time)
## Define the graphic device
png("plot4.png",width=480,height=480)
## Create the plots (it will be added to the defined graphic device)
# In case your computer language is not english
Sys.setlocale("LC_TIME", "English")
# Define the 4X4 frame
par(mfrow = c(2, 2))
# Plot 1
plot(data$Date_time, data$Global_active_power,type = "l", ylab="Global Active Power", xlab="")
# Plot 2
plot(data$Date_time, data$Voltage, type = "l", ylab="Voltage", xlab="dateTime")
# Plot 3
with(data,plot(Date_time,Sub_metering_1,type="l",ylab="Energy sub metering",xlab=""))
with(data,lines(Date_time,Sub_metering_2,col='Red'))
with(data,lines(Date_time,Sub_metering_3,col='Blue'))
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
# Plot 4
plot(data$Date_time, data$Global_reactive_power,type = "l", ylab="Global_reactive_power",xlab="dateTime")
dev.off() | /plot4.R | no_license | AmelieRu/ExData_Plotting1 | R | false | false | 1,940 | r | ## Download and unzip the data if zip or txt file does not exist
filename <- "electric_power_consumption.zip"
if (!file.exists(filename)){
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile = "electric_power_consumption.zip")
unzip("electric_power_consumption.zip")
}
if (!file.exists("household_power_consumption.txt")) {
unzip(filename)
}
## Read the data
library("data.table")
full_data <- fread("household_power_consumption.txt", sep = ";",header = TRUE,na.strings="?")
# Subset data from 01/02/2007 and 02/02/2007
data <- full_data[(full_data$Date=="1/2/2007" | full_data$Date=="2/2/2007" ), ]
## Adapt the date and time format
# Convert the char date as a date date
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# Concatenate date and time in a char vector
date_time <- paste(data$Date, data$Time)
# Transform the char vector into a date-time variables and add it to the dataset
data$Date_time <- as.POSIXct(date_time)
## Define the graphic device
png("plot4.png",width=480,height=480)
## Create the plots (it will be added to the defined graphic device)
# In case your computer language is not english
Sys.setlocale("LC_TIME", "English")
# Define the 4X4 frame
par(mfrow = c(2, 2))
# Plot 1
plot(data$Date_time, data$Global_active_power,type = "l", ylab="Global Active Power", xlab="")
# Plot 2
plot(data$Date_time, data$Voltage, type = "l", ylab="Voltage", xlab="dateTime")
# Plot 3
with(data,plot(Date_time,Sub_metering_1,type="l",ylab="Energy sub metering",xlab=""))
with(data,lines(Date_time,Sub_metering_2,col='Red'))
with(data,lines(Date_time,Sub_metering_3,col='Blue'))
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"))
# Plot 4
plot(data$Date_time, data$Global_reactive_power,type = "l", ylab="Global_reactive_power",xlab="dateTime")
dev.off() |
bivprob = function(rho, lower, upper = -lower, mean = 0) {
nu = 0
low = rep(as.double((lower - mean)), 2)
upp = rep(as.double((upper - mean)), 2)
if (any(lower == upper))
return(0)
infin = c(2, 2)
infin = as.integer(infin)
low = replace(low, low == -Inf, 0)
upp = replace(upp, upp == Inf, 0)
rho = as.double(rho)
prob = as.double(0)
a = lapply(rho, function(r, low, upp) biv.nt.prob(df = Inf, lower = low, upper = upp,
mean = rep(0, 2), S = matrix(c(1, r, r, 1), 2, 2)), low = low, upp = upp)
return(unlist(a))
}
Dt = function(rho) {
threshold = 0.05
ut = qnorm(1 - threshold/2)
delta = unlist(lapply(rho, bivprob, lower = -ut)) - (1 - threshold)^2
dt <- delta/(threshold * (1 - threshold))
return(dt)
}
VarInflation <- function(data.train, Blist, maxnbfactors, dig) {
m <- ncol(data.train)
n <- nrow(data.train)
vecrho <- round(seq(10^(-dig), 1, 10^(-dig)), digits = dig)
vecdt <- unlist(lapply(vecrho, Dt))
sampled <- sample(1:m, min(1000, m))
sampsize <- length(sampled)
cordata <- crossprod(data.train[, sampled, drop = FALSE])/(n - 1)
sdt <- sapply(1:(maxnbfactors + 1), function(i) {
B <- matrix(Blist[[i]][sampled, ], nrow = sampsize)
sdb <- sqrt(1 - rowSums(B^2))
matrho <- cordata - tcrossprod(B)
matrho <- sweep(matrho, 2, FUN = "/", STATS = sdb)
matrho <- sweep(matrho, 1, FUN = "/", STATS = sdb)
rho <- matrho[col(matrho) > row(matrho)]
rho[abs(rho) >= 1] <- 1
veccor <- sort(round(abs(rho), digits = dig))
duplic <- duplicated(veccor)
vduplic <- sort(unique(veccor[duplic]))
vunic <- setdiff(unique(veccor), vduplic)
dtunic <- vecdt[is.element(vecrho, vunic)]
dtduplic <- vecdt[is.element(vecrho, vduplic)]
vmatch <- match(vecrho, veccor, 0)
nboccur <- diff(c(vmatch[vmatch > 0], length(veccor) + 1))
nboccur <- nboccur[nboccur > 1]
tmp <- 2 * (m - 1) * (sum(dtunic) + crossprod(nboccur, dtduplic))/(sampsize *
(sampsize - 1))
return(tmp)
})
names(sdt) <- paste(0:maxnbfactors, "factors")
return(sdt)
}
ifa = function(Psi, B) {
if (class(B) == "numeric")
B = matrix(B, ncol = 1)
q = ncol(B)
Phi = rep(0, length(Psi))
Phi[abs(Psi) > 1e-05] = 1/Psi[abs(Psi) > 1e-05]
PhiB = tcrossprod(Phi, rep(1, q))
PhiB = PhiB * B
G = diag(q) + t(B) %*% PhiB
GinvtPhiB = tcrossprod(solve(G), PhiB)
Phib2 = tcrossprod(PhiB, t(GinvtPhiB))
iS = diag(Phi) - Phib2
PhiB2 = crossprod(PhiB, B)
GinvtPhiB2 = crossprod(solve(G), PhiB2)
Phib2 = tcrossprod(PhiB, t(GinvtPhiB2))
iSB = PhiB - Phib2
return(list(iS = iS, iSB = iSB))
}
emfa = function(data, nbf, EM = TRUE, minerr = 1e-06, verbose = FALSE) {
n = nrow(data)
m = ncol(data)
my = crossprod(rep(1, n), data)/n
vy = crossprod(rep(1, n), data^2)/n - my^2
vy = (n/(n - 1)) * vy
cdata = scale(data, center = my, scale = FALSE)
csdata = scale(data, center = my, scale = sqrt(vy))
S = crossprod(csdata)/(n - 1)
if (((n > m) & (m <= 200) & (m >= 3)) & (!EM)) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
fa = factanal(csdata, factors = nbf, rotation = "varimax")
B = fa$loadings
class(B) = "matrix"
Psi = fa$uniquenesses
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
if ((n <= m) | (m > 200) | EM) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
if (verbose)
print(paste("Fitting EM Factor Analysis Model with", nbf, "factors"))
eig = fast.svd((1/sqrt((n - 1))) * t(csdata))
evectors = eig$u[, 1:nbf]
evalues = eig$d^2
if (nbf > 1)
B = evectors[, 1:nbf] * matrix(sqrt(evalues[1:nbf]), ncol = nbf,
nrow = m, byrow = TRUE)
if (nbf == 1)
B = matrix(evectors, nrow = m, ncol = 1) * sqrt(evalues[1])
b2 = rowSums(B^2)
Psi = 1 - b2
crit = 1
while (crit > minerr) {
inv = ifa(Psi, B)
Cyz = crossprod(S, inv$iSB)
Czz = crossprod(inv$iSB, Cyz) + diag(nbf) - crossprod(B, inv$iSB)
Bnew = tcrossprod(Cyz, solve(Czz))
Psinew = 1 - rowSums(Bnew * Cyz)
crit = mean((Psi - Psinew)^2)
B = Bnew
Psi = Psinew
if (verbose)
print(paste("Objective criterion in EM-FA : ", signif(crit, 6)))
}
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
res = list(B = B, Psi = Psi)
return(res)
}
nbfactors <- function(data.train, maxnbfactors = 12, diagnostic.plot, minerr = 0.001,
EM = TRUE, jumps.nbfactor = 0.05) {
dig <- 2
m <- ncol(data.train)
n <- nrow(data.train)
my = crossprod(rep(1, n), data.train)/n
vy = crossprod(rep(1, n), data.train^2)/n - my^2
vy = (n/(n - 1)) * vy
cdata = scale(data.train, center = my, scale = FALSE)
csdata = scale(data.train, center = my, scale = sqrt(vy))
S = crossprod(csdata)/(n - 1)
eig = fast.svd((1/sqrt((n - 1))) * t(csdata))
falist <- vector(length = maxnbfactors + 1, "list")
falist[[1]] <- list(B = matrix(0, ncol = 1, nrow = m))
falist[-1] <- lapply(1:maxnbfactors, emfa.nbf, csdata = csdata, S = S, eig = eig,
vy = vy, minerr = minerr, EM = EM, verbose = FALSE)
Blist <- lapply(falist, function(fa, m) matrix(fa$B, nrow = m), m = m)
sdt <- VarInflation(data.train, Blist, maxnbfactors, dig)
if (diagnostic.plot) {
dev.new()
plot(0:maxnbfactors, sdt, ylab = "Variance Inflation Criterion", xlab = "Number of factors",
bty = "l", lwd = 1.25, type = "b", pch = 16, cex.lab = 1.25, cex = 1.25,
cex.axis = 1.25)
}
if (which.min(sdt) == 1)
opt <- 0
if (which.min(sdt) > 1) {
jumps <- -diff(sdt)/sdt[-length(sdt)]
opt <- max((1:maxnbfactors)[jumps > jumps.nbfactor])
}
list(criterion = sdt, optimalnbfactors = opt)
}
emfa.nbf = function(csdata, S, eig, vy, nbf, EM = TRUE, minerr = 1e-06, verbose = FALSE) {
n <- nrow(csdata)
m <- ncol(csdata)
if (((n > m) & (m <= 200) & (m >= 3)) & (!EM)) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
fa = factanal(csdata, factors = nbf, rotation = "varimax")
B = fa$loadings
class(B) = "matrix"
Psi = fa$uniquenesses
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
if ((n <= m) | (m > 200) | EM) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
if (verbose)
print(paste("Fitting EM Factor Analysis Model with", nbf, "factors"))
evectors = eig$u[, 1:nbf]
evalues = eig$d^2
if (nbf > 1)
B = evectors[, 1:nbf] * matrix(sqrt(evalues[1:nbf]), ncol = nbf,
nrow = m, byrow = TRUE)
if (nbf == 1)
B = matrix(evectors, nrow = m, ncol = 1) * sqrt(evalues[1])
b2 = rowSums(B^2)
Psi = 1 - b2
crit = 1
while (crit > minerr) {
inv = ifa(Psi, B)
Cyz = crossprod(S, inv$iSB)
Czz = crossprod(inv$iSB, Cyz) + diag(nbf) - crossprod(B, inv$iSB)
Bnew = tcrossprod(Cyz, solve(Czz))
Psinew = 1 - rowSums(Bnew * Cyz)
crit = mean((Psi - Psinew)^2)
B = Bnew
Psi = Psinew
if (verbose)
print(paste("Objective criterion in EM-FA : ", signif(crit, 6)))
}
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
res = list(B = B, Psi = Psi)
return(res)
}
LassoML <- function(data.train, ...) {
p <- ncol(data.train$x)
n <- nrow(data.train$x)
nbclass <- length(unique(data.train$y))
cl <- sort(unique(data.train$y))
if (!all(cl == c(1:nbclass))) {
stop("Group variable must be 1,2, ...")
}
family <- ifelse(nbclass == 2, "binomial", "multinomial")
cvmod <- cv.glmnet(x = as.matrix(data.train$x), y = data.train$y, family = family,
type.measure = "class")
lambda.min <- cvmod$lambda.min
mod <- glmnet(x = as.matrix(data.train$x), y = data.train$y, family = family,
lambda = lambda.min, ...)
proba.train <- predict(mod, newx = as.matrix(data.train$x), type = "response")
if (nbclass == 2) {
proba.train <- matrix(c(1 - proba.train, proba.train), ncol = 2, byrow = FALSE)
}
if (nbclass > 2) {
proba.train <- proba.train[, , 1]
}
return(list(proba.train = proba.train, model = mod))
}
FADA.tmp <- function(faobject, method, sda.method, alpha,...) {
fadta <- faobject$fa.training
fatest <- faobject$fa.testing
groups <- faobject$groups
p <- ncol(faobject$fa.training)
nbclass <- length(unique(groups))
if (method == "glmnet") {
out <- LassoML(list(x = fadta, y = groups), ...)
selected <- out$selected
proba.test <- predict(out$mod, newx = as.matrix(fatest), type = "response")
if (nbclass == 2) {
proba.test <- matrix(c(1 - proba.test, proba.test), ncol = 2, byrow = FALSE)
}
predict.test <- apply(proba.test, 1, which.max)
out <- out$model
proba.train <- predict(out, fadta, type = "response")
}
if (method == "sda") {
ranking.LDA <- sda::sda.ranking(fadta, groups, verbose = FALSE,...)
if (sda.method == "lfdr") {
selected <- as.numeric(ranking.LDA[ranking.LDA[, "lfdr"] < 0.8, "idx"])
} else {
thr <- which.max(ranking.LDA[1:round(alpha * p), "HC"])
selected <- as.numeric(ranking.LDA[1:thr, "idx"])
}
out <- sda::sda(fadta[, selected, drop = FALSE], groups, verbose = FALSE,...)
pred <- sda::predict.sda(out, fatest[, selected, drop = FALSE], verbose = FALSE)
proba.test <- pred$posterior
predict.test <- pred$class
proba.train <- sda::predict.sda(out, fadta[, selected, drop = FALSE],
verbose = FALSE)$posterior
}
if (method == "sparseLDA") {
Xc <- normalize(fadta)
Xn <- Xc$Xc
out <- sparseLDA::sda(Xn, factor(groups), ...)
Xctest <- normalizetest(fatest, Xc)
Xctest <- matrix(Xctest, nrow = nrow(fatest), byrow = FALSE)
colnames(Xctest) <- colnames(Xn)
pred <- sparseLDA::predict.sda(out, Xctest)
selected <- out$varIndex
proba.test <- pred$posterior
predict.test <- pred$class
proba.train <- sparseLDA::predict.sda(out, Xn)$posterior
}
return(list(method = method, selected = selected, proba.train = proba.train,
proba.test = proba.test, predict.test = predict.test, mod = out))
}
cv.FADA <- function(train.x, train.y, test.x, test.y, nbf.cv, method,sda.method,maxnbfactors,
min.err, EM, maxiter, alpha,...) {
fa.train <- decorrelate.train(list(x = train.x, y = train.y),
nbf = nbf.cv, maxnbfactors = maxnbfactors, diagnostic.plot = FALSE,
min.err = min.err, verbose = FALSE, EM = EM, maxiter = maxiter,...)
fa.test <- decorrelate.test(fa.train, list(x = test.x))
fada <- FADA.tmp(fa.test, method, sda.method, alpha,...)
return(mean(fada$predict.test != test.y))
} | /FADA/R/func.R | no_license | ingted/R-Examples | R | false | false | 10,833 | r | bivprob = function(rho, lower, upper = -lower, mean = 0) {
nu = 0
low = rep(as.double((lower - mean)), 2)
upp = rep(as.double((upper - mean)), 2)
if (any(lower == upper))
return(0)
infin = c(2, 2)
infin = as.integer(infin)
low = replace(low, low == -Inf, 0)
upp = replace(upp, upp == Inf, 0)
rho = as.double(rho)
prob = as.double(0)
a = lapply(rho, function(r, low, upp) biv.nt.prob(df = Inf, lower = low, upper = upp,
mean = rep(0, 2), S = matrix(c(1, r, r, 1), 2, 2)), low = low, upp = upp)
return(unlist(a))
}
Dt = function(rho) {
threshold = 0.05
ut = qnorm(1 - threshold/2)
delta = unlist(lapply(rho, bivprob, lower = -ut)) - (1 - threshold)^2
dt <- delta/(threshold * (1 - threshold))
return(dt)
}
VarInflation <- function(data.train, Blist, maxnbfactors, dig) {
m <- ncol(data.train)
n <- nrow(data.train)
vecrho <- round(seq(10^(-dig), 1, 10^(-dig)), digits = dig)
vecdt <- unlist(lapply(vecrho, Dt))
sampled <- sample(1:m, min(1000, m))
sampsize <- length(sampled)
cordata <- crossprod(data.train[, sampled, drop = FALSE])/(n - 1)
sdt <- sapply(1:(maxnbfactors + 1), function(i) {
B <- matrix(Blist[[i]][sampled, ], nrow = sampsize)
sdb <- sqrt(1 - rowSums(B^2))
matrho <- cordata - tcrossprod(B)
matrho <- sweep(matrho, 2, FUN = "/", STATS = sdb)
matrho <- sweep(matrho, 1, FUN = "/", STATS = sdb)
rho <- matrho[col(matrho) > row(matrho)]
rho[abs(rho) >= 1] <- 1
veccor <- sort(round(abs(rho), digits = dig))
duplic <- duplicated(veccor)
vduplic <- sort(unique(veccor[duplic]))
vunic <- setdiff(unique(veccor), vduplic)
dtunic <- vecdt[is.element(vecrho, vunic)]
dtduplic <- vecdt[is.element(vecrho, vduplic)]
vmatch <- match(vecrho, veccor, 0)
nboccur <- diff(c(vmatch[vmatch > 0], length(veccor) + 1))
nboccur <- nboccur[nboccur > 1]
tmp <- 2 * (m - 1) * (sum(dtunic) + crossprod(nboccur, dtduplic))/(sampsize *
(sampsize - 1))
return(tmp)
})
names(sdt) <- paste(0:maxnbfactors, "factors")
return(sdt)
}
ifa = function(Psi, B) {
if (class(B) == "numeric")
B = matrix(B, ncol = 1)
q = ncol(B)
Phi = rep(0, length(Psi))
Phi[abs(Psi) > 1e-05] = 1/Psi[abs(Psi) > 1e-05]
PhiB = tcrossprod(Phi, rep(1, q))
PhiB = PhiB * B
G = diag(q) + t(B) %*% PhiB
GinvtPhiB = tcrossprod(solve(G), PhiB)
Phib2 = tcrossprod(PhiB, t(GinvtPhiB))
iS = diag(Phi) - Phib2
PhiB2 = crossprod(PhiB, B)
GinvtPhiB2 = crossprod(solve(G), PhiB2)
Phib2 = tcrossprod(PhiB, t(GinvtPhiB2))
iSB = PhiB - Phib2
return(list(iS = iS, iSB = iSB))
}
emfa = function(data, nbf, EM = TRUE, minerr = 1e-06, verbose = FALSE) {
n = nrow(data)
m = ncol(data)
my = crossprod(rep(1, n), data)/n
vy = crossprod(rep(1, n), data^2)/n - my^2
vy = (n/(n - 1)) * vy
cdata = scale(data, center = my, scale = FALSE)
csdata = scale(data, center = my, scale = sqrt(vy))
S = crossprod(csdata)/(n - 1)
if (((n > m) & (m <= 200) & (m >= 3)) & (!EM)) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
fa = factanal(csdata, factors = nbf, rotation = "varimax")
B = fa$loadings
class(B) = "matrix"
Psi = fa$uniquenesses
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
if ((n <= m) | (m > 200) | EM) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
if (verbose)
print(paste("Fitting EM Factor Analysis Model with", nbf, "factors"))
eig = fast.svd((1/sqrt((n - 1))) * t(csdata))
evectors = eig$u[, 1:nbf]
evalues = eig$d^2
if (nbf > 1)
B = evectors[, 1:nbf] * matrix(sqrt(evalues[1:nbf]), ncol = nbf,
nrow = m, byrow = TRUE)
if (nbf == 1)
B = matrix(evectors, nrow = m, ncol = 1) * sqrt(evalues[1])
b2 = rowSums(B^2)
Psi = 1 - b2
crit = 1
while (crit > minerr) {
inv = ifa(Psi, B)
Cyz = crossprod(S, inv$iSB)
Czz = crossprod(inv$iSB, Cyz) + diag(nbf) - crossprod(B, inv$iSB)
Bnew = tcrossprod(Cyz, solve(Czz))
Psinew = 1 - rowSums(Bnew * Cyz)
crit = mean((Psi - Psinew)^2)
B = Bnew
Psi = Psinew
if (verbose)
print(paste("Objective criterion in EM-FA : ", signif(crit, 6)))
}
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
res = list(B = B, Psi = Psi)
return(res)
}
nbfactors <- function(data.train, maxnbfactors = 12, diagnostic.plot, minerr = 0.001,
EM = TRUE, jumps.nbfactor = 0.05) {
dig <- 2
m <- ncol(data.train)
n <- nrow(data.train)
my = crossprod(rep(1, n), data.train)/n
vy = crossprod(rep(1, n), data.train^2)/n - my^2
vy = (n/(n - 1)) * vy
cdata = scale(data.train, center = my, scale = FALSE)
csdata = scale(data.train, center = my, scale = sqrt(vy))
S = crossprod(csdata)/(n - 1)
eig = fast.svd((1/sqrt((n - 1))) * t(csdata))
falist <- vector(length = maxnbfactors + 1, "list")
falist[[1]] <- list(B = matrix(0, ncol = 1, nrow = m))
falist[-1] <- lapply(1:maxnbfactors, emfa.nbf, csdata = csdata, S = S, eig = eig,
vy = vy, minerr = minerr, EM = EM, verbose = FALSE)
Blist <- lapply(falist, function(fa, m) matrix(fa$B, nrow = m), m = m)
sdt <- VarInflation(data.train, Blist, maxnbfactors, dig)
if (diagnostic.plot) {
dev.new()
plot(0:maxnbfactors, sdt, ylab = "Variance Inflation Criterion", xlab = "Number of factors",
bty = "l", lwd = 1.25, type = "b", pch = 16, cex.lab = 1.25, cex = 1.25,
cex.axis = 1.25)
}
if (which.min(sdt) == 1)
opt <- 0
if (which.min(sdt) > 1) {
jumps <- -diff(sdt)/sdt[-length(sdt)]
opt <- max((1:maxnbfactors)[jumps > jumps.nbfactor])
}
list(criterion = sdt, optimalnbfactors = opt)
}
emfa.nbf = function(csdata, S, eig, vy, nbf, EM = TRUE, minerr = 1e-06, verbose = FALSE) {
n <- nrow(csdata)
m <- ncol(csdata)
if (((n > m) & (m <= 200) & (m >= 3)) & (!EM)) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
fa = factanal(csdata, factors = nbf, rotation = "varimax")
B = fa$loadings
class(B) = "matrix"
Psi = fa$uniquenesses
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
if ((n <= m) | (m > 200) | EM) {
if (nbf == 0) {
B = NULL
Psi = rep(1, m)
}
if (nbf > 0) {
if (verbose)
print(paste("Fitting EM Factor Analysis Model with", nbf, "factors"))
evectors = eig$u[, 1:nbf]
evalues = eig$d^2
if (nbf > 1)
B = evectors[, 1:nbf] * matrix(sqrt(evalues[1:nbf]), ncol = nbf,
nrow = m, byrow = TRUE)
if (nbf == 1)
B = matrix(evectors, nrow = m, ncol = 1) * sqrt(evalues[1])
b2 = rowSums(B^2)
Psi = 1 - b2
crit = 1
while (crit > minerr) {
inv = ifa(Psi, B)
Cyz = crossprod(S, inv$iSB)
Czz = crossprod(inv$iSB, Cyz) + diag(nbf) - crossprod(B, inv$iSB)
Bnew = tcrossprod(Cyz, solve(Czz))
Psinew = 1 - rowSums(Bnew * Cyz)
crit = mean((Psi - Psinew)^2)
B = Bnew
Psi = Psinew
if (verbose)
print(paste("Objective criterion in EM-FA : ", signif(crit, 6)))
}
Psi = Psi * vy
B = matrix(rep(sqrt(vy), ncol(B)), nrow = nrow(B)) * B
sB = scale(t(B), center = FALSE, scale = sqrt(Psi))
G = solve(diag(nbf) + tcrossprod(sB))
sB = scale(t(B), center = FALSE, scale = Psi)
}
}
res = list(B = B, Psi = Psi)
return(res)
}
LassoML <- function(data.train, ...) {
p <- ncol(data.train$x)
n <- nrow(data.train$x)
nbclass <- length(unique(data.train$y))
cl <- sort(unique(data.train$y))
if (!all(cl == c(1:nbclass))) {
stop("Group variable must be 1,2, ...")
}
family <- ifelse(nbclass == 2, "binomial", "multinomial")
cvmod <- cv.glmnet(x = as.matrix(data.train$x), y = data.train$y, family = family,
type.measure = "class")
lambda.min <- cvmod$lambda.min
mod <- glmnet(x = as.matrix(data.train$x), y = data.train$y, family = family,
lambda = lambda.min, ...)
proba.train <- predict(mod, newx = as.matrix(data.train$x), type = "response")
if (nbclass == 2) {
proba.train <- matrix(c(1 - proba.train, proba.train), ncol = 2, byrow = FALSE)
}
if (nbclass > 2) {
proba.train <- proba.train[, , 1]
}
return(list(proba.train = proba.train, model = mod))
}
FADA.tmp <- function(faobject, method, sda.method, alpha,...) {
fadta <- faobject$fa.training
fatest <- faobject$fa.testing
groups <- faobject$groups
p <- ncol(faobject$fa.training)
nbclass <- length(unique(groups))
if (method == "glmnet") {
out <- LassoML(list(x = fadta, y = groups), ...)
selected <- out$selected
proba.test <- predict(out$mod, newx = as.matrix(fatest), type = "response")
if (nbclass == 2) {
proba.test <- matrix(c(1 - proba.test, proba.test), ncol = 2, byrow = FALSE)
}
predict.test <- apply(proba.test, 1, which.max)
out <- out$model
proba.train <- predict(out, fadta, type = "response")
}
if (method == "sda") {
ranking.LDA <- sda::sda.ranking(fadta, groups, verbose = FALSE,...)
if (sda.method == "lfdr") {
selected <- as.numeric(ranking.LDA[ranking.LDA[, "lfdr"] < 0.8, "idx"])
} else {
thr <- which.max(ranking.LDA[1:round(alpha * p), "HC"])
selected <- as.numeric(ranking.LDA[1:thr, "idx"])
}
out <- sda::sda(fadta[, selected, drop = FALSE], groups, verbose = FALSE,...)
pred <- sda::predict.sda(out, fatest[, selected, drop = FALSE], verbose = FALSE)
proba.test <- pred$posterior
predict.test <- pred$class
proba.train <- sda::predict.sda(out, fadta[, selected, drop = FALSE],
verbose = FALSE)$posterior
}
if (method == "sparseLDA") {
Xc <- normalize(fadta)
Xn <- Xc$Xc
out <- sparseLDA::sda(Xn, factor(groups), ...)
Xctest <- normalizetest(fatest, Xc)
Xctest <- matrix(Xctest, nrow = nrow(fatest), byrow = FALSE)
colnames(Xctest) <- colnames(Xn)
pred <- sparseLDA::predict.sda(out, Xctest)
selected <- out$varIndex
proba.test <- pred$posterior
predict.test <- pred$class
proba.train <- sparseLDA::predict.sda(out, Xn)$posterior
}
return(list(method = method, selected = selected, proba.train = proba.train,
proba.test = proba.test, predict.test = predict.test, mod = out))
}
cv.FADA <- function(train.x, train.y, test.x, test.y, nbf.cv, method,sda.method,maxnbfactors,
min.err, EM, maxiter, alpha,...) {
fa.train <- decorrelate.train(list(x = train.x, y = train.y),
nbf = nbf.cv, maxnbfactors = maxnbfactors, diagnostic.plot = FALSE,
min.err = min.err, verbose = FALSE, EM = EM, maxiter = maxiter,...)
fa.test <- decorrelate.test(fa.train, list(x = test.x))
fada <- FADA.tmp(fa.test, method, sda.method, alpha,...)
return(mean(fada$predict.test != test.y))
} |
# constructor
# E.Blondel - 2013/06/09
#=======================
SDMX <- function(xmlObj){
schema <- SDMXSchema(xmlObj);
header <- SDMXHeader(xmlObj);
footer <- SDMXFooter(xmlObj);
new("SDMX",
xmlObj = xmlObj,
schema = schema,
header = header,
footer = footer);
}
#generics
if (!isGeneric("as.XML"))
setGeneric("as.XML", function(obj) standardGeneric("as.XML"));
if (!isGeneric("getSDMXSchema"))
setGeneric("getSDMXSchema", function(obj) standardGeneric("getSDMXSchema"));
if (!isGeneric("getSDMXHeader"))
setGeneric("getSDMXHeader", function(obj) standardGeneric("getSDMXHeader"));
if (!isGeneric("getSDMXType"))
setGeneric("getSDMXType", function(obj) standardGeneric("getSDMXType"));
if (!isGeneric("getNamespaces"))
setGeneric("getNamespaces", function(obj) standardGeneric("getNamespaces"));
if (!isGeneric("getSDMXFooter"))
setGeneric("getSDMXFooter", function(obj) standardGeneric("getSDMXFooter"));
#methods
setMethod(f = "as.XML", signature = "SDMX", function(obj){
return(obj@xmlObj);
}
)
setMethod(f = "getSDMXSchema", signature = "SDMX", function(obj){
return(obj@schema);
}
)
setMethod(f = "getSDMXHeader", signature = "SDMX", function(obj){
return(obj@header);
}
)
setMethod(f = "getSDMXType", signature = "SDMX", function(obj){
return(SDMXType(obj@xmlObj));
}
)
setMethod(f = "getSDMXFooter", signature = "SDMX", function(obj){
return(SDMXFooter(obj@xmlObj));
}
)
namespaces.SDMX <- function(xmlObj){
nsFromXML <- xmlNamespaceDefinitions(xmlObj, recursive = TRUE, simplify = FALSE)
nsDefs.df <- do.call("rbind",
lapply(nsFromXML,
function(x){
c(x$id, x$uri)
}))
row.names(nsDefs.df) <- 1:nrow(nsDefs.df)
nsDefs.df <-as.data.frame(nsDefs.df, stringAsFactors = FALSE)
if(nrow(nsDefs.df) > 0){
colnames(nsDefs.df) <- c("id","uri")
nsDefs.df$id <- as.character(nsDefs.df$id)
nsDefs.df$uri <- as.character(nsDefs.df$uri)
}
nsDefs.df <- unique(nsDefs.df)
nsDefs.df <- nsDefs.df[!duplicated(nsDefs.df$uri),]
return(nsDefs.df)
}
setMethod(f = "getNamespaces", signature = "SDMX", function(obj){
return(namespaces.SDMX(obj@xmlObj));
}
)
#others non-S4 methods
#====================
#findNamespace
findNamespace <- function(namespaces, messageType){
regexp <- paste(messageType, "$", sep = "")
ns <- c(ns = namespaces$uri[grep(regexp, namespaces$uri)])
return(ns)
}
#isSoapRequestEnvelope
isSoapRequestEnvelope <- function(xmlObj){
namespaces <- namespaces.SDMX(xmlObj)
ns <- c(ns = namespaces$uri[grep("soap", namespaces$uri)])
return(length(ns) > 0)
}
#getSoapRequestResult
getSoapRequestResult <- function(xmlObj){
body <- xmlChildren(xmlRoot(xmlObj))
response <- xmlChildren(body[[1]]); rm(body);
result <- xmlChildren(response[[1]]); rm(response);
sdmxDoc <- xmlDoc(xmlChildren(result[[1]])[[1]]); rm(result);
return(sdmxDoc)
}
| /R/SDMX-methods.R | no_license | h-kipple/rsdmx | R | false | false | 3,069 | r | # constructor
# E.Blondel - 2013/06/09
#=======================
SDMX <- function(xmlObj){
schema <- SDMXSchema(xmlObj);
header <- SDMXHeader(xmlObj);
footer <- SDMXFooter(xmlObj);
new("SDMX",
xmlObj = xmlObj,
schema = schema,
header = header,
footer = footer);
}
#generics
if (!isGeneric("as.XML"))
setGeneric("as.XML", function(obj) standardGeneric("as.XML"));
if (!isGeneric("getSDMXSchema"))
setGeneric("getSDMXSchema", function(obj) standardGeneric("getSDMXSchema"));
if (!isGeneric("getSDMXHeader"))
setGeneric("getSDMXHeader", function(obj) standardGeneric("getSDMXHeader"));
if (!isGeneric("getSDMXType"))
setGeneric("getSDMXType", function(obj) standardGeneric("getSDMXType"));
if (!isGeneric("getNamespaces"))
setGeneric("getNamespaces", function(obj) standardGeneric("getNamespaces"));
if (!isGeneric("getSDMXFooter"))
setGeneric("getSDMXFooter", function(obj) standardGeneric("getSDMXFooter"));
#methods
setMethod(f = "as.XML", signature = "SDMX", function(obj){
return(obj@xmlObj);
}
)
setMethod(f = "getSDMXSchema", signature = "SDMX", function(obj){
return(obj@schema);
}
)
setMethod(f = "getSDMXHeader", signature = "SDMX", function(obj){
return(obj@header);
}
)
setMethod(f = "getSDMXType", signature = "SDMX", function(obj){
return(SDMXType(obj@xmlObj));
}
)
setMethod(f = "getSDMXFooter", signature = "SDMX", function(obj){
return(SDMXFooter(obj@xmlObj));
}
)
namespaces.SDMX <- function(xmlObj){
nsFromXML <- xmlNamespaceDefinitions(xmlObj, recursive = TRUE, simplify = FALSE)
nsDefs.df <- do.call("rbind",
lapply(nsFromXML,
function(x){
c(x$id, x$uri)
}))
row.names(nsDefs.df) <- 1:nrow(nsDefs.df)
nsDefs.df <-as.data.frame(nsDefs.df, stringAsFactors = FALSE)
if(nrow(nsDefs.df) > 0){
colnames(nsDefs.df) <- c("id","uri")
nsDefs.df$id <- as.character(nsDefs.df$id)
nsDefs.df$uri <- as.character(nsDefs.df$uri)
}
nsDefs.df <- unique(nsDefs.df)
nsDefs.df <- nsDefs.df[!duplicated(nsDefs.df$uri),]
return(nsDefs.df)
}
setMethod(f = "getNamespaces", signature = "SDMX", function(obj){
return(namespaces.SDMX(obj@xmlObj));
}
)
#others non-S4 methods
#====================
#findNamespace
findNamespace <- function(namespaces, messageType){
regexp <- paste(messageType, "$", sep = "")
ns <- c(ns = namespaces$uri[grep(regexp, namespaces$uri)])
return(ns)
}
#isSoapRequestEnvelope
isSoapRequestEnvelope <- function(xmlObj){
namespaces <- namespaces.SDMX(xmlObj)
ns <- c(ns = namespaces$uri[grep("soap", namespaces$uri)])
return(length(ns) > 0)
}
#getSoapRequestResult
getSoapRequestResult <- function(xmlObj){
body <- xmlChildren(xmlRoot(xmlObj))
response <- xmlChildren(body[[1]]); rm(body);
result <- xmlChildren(response[[1]]); rm(response);
sdmxDoc <- xmlDoc(xmlChildren(result[[1]])[[1]]); rm(result);
return(sdmxDoc)
}
|
gets.log.like <-
function (thetain)
{
logcheck <- function (x) { ifelse(x > 0, logb(x), -1e+21) }
iter.count <- get(envir = .frame0, "iter.count") + 1
assign(envir = .frame0, inherits = !TRUE,"iter.count", iter.count )
model <- get(envir = .frame0, "model")
f.origparam <- model$f.origparam
distribution <- model$sub.distribution
logtp1 <- thetain[1]
logtp2 <- thetain[2]
sigma <- thetain[3]
if ((iter.count < 4 && map.SMRDDebugLevel() >= 1) || map.SMRDDebugLevel() >
4)
print(paste("in gets.log.like", paste(model$t.param.names,
collapse = " "), "=", paste(format(thetain), collapse = " ")))
theta.origparam <- f.origparam(thetain, model)
alpha <- theta.origparam[1]
sigma <- theta.origparam[2]
varzeta <- theta.origparam[3]
if (varzeta < 1e-05)
return(1e+10)
data.ld <- get(envir = .frame0, "data.ld")
z <-Response(data.ld)
the.censor.codes <- censor.codes(data.ld)
the.case.weights <- case.weights(data.ld)
fail.part <- 0
rcensor.part <- 0
lcensor.part <- 0
icensor.part <- 0
if (any(the.censor.codes == 1))
fail.part <- sum(the.case.weights[the.censor.codes ==
1] * dlgets(z[the.censor.codes == 1, 1], alpha, sigma,
varzeta, distribution = distribution))
if (any(the.censor.codes == 2))
rcensor.part <- sum(the.case.weights[the.censor.codes ==
2] * logcheck(sgets(z[the.censor.codes == 2, 1],
alpha, sigma, varzeta, distribution = distribution)))
if (any(the.censor.codes == 3))
lcensor.part <- sum(the.case.weights[the.censor.codes ==
3] * logcheck(pgets(z[the.censor.codes == 3, 1],
alpha, sigma, varzeta, distribution = distribution)))
if (any(the.censor.codes == 4))
icensor.part <- sum(the.case.weights[the.censor.codes ==
4] * (logcheck(pgets(z[the.censor.codes == 4, 2],
alpha, sigma, varzeta, distribution = distribution) -
pgets(z[the.censor.codes == 4, 1], alpha, sigma,
varzeta, distribution = distribution))))
loglikelihood <- (fail.part + rcensor.part + lcensor.part +
icensor.part)
if ((iter.count < 4 && map.SMRDDebugLevel() >= 1) || map.SMRDDebugLevel() >=
4) {
print(paste("in gets.log.like", paste(model$orig.param.names,
collapse = " "), "=", paste(format(c(alpha, sigma,
varzeta)), collapse = " ")))
print(paste("in gets.log.like, likelihood=", paste(format(c(loglikelihood,
fail.part, rcensor.part, lcensor.part, icensor.part)),
collapse = " ")))
}
return(Uminus(loglikelihood))
}
| /R/gets.log.like.R | no_license | anhnguyendepocen/SMRD | R | false | false | 2,705 | r | gets.log.like <-
function (thetain)
{
logcheck <- function (x) { ifelse(x > 0, logb(x), -1e+21) }
iter.count <- get(envir = .frame0, "iter.count") + 1
assign(envir = .frame0, inherits = !TRUE,"iter.count", iter.count )
model <- get(envir = .frame0, "model")
f.origparam <- model$f.origparam
distribution <- model$sub.distribution
logtp1 <- thetain[1]
logtp2 <- thetain[2]
sigma <- thetain[3]
if ((iter.count < 4 && map.SMRDDebugLevel() >= 1) || map.SMRDDebugLevel() >
4)
print(paste("in gets.log.like", paste(model$t.param.names,
collapse = " "), "=", paste(format(thetain), collapse = " ")))
theta.origparam <- f.origparam(thetain, model)
alpha <- theta.origparam[1]
sigma <- theta.origparam[2]
varzeta <- theta.origparam[3]
if (varzeta < 1e-05)
return(1e+10)
data.ld <- get(envir = .frame0, "data.ld")
z <-Response(data.ld)
the.censor.codes <- censor.codes(data.ld)
the.case.weights <- case.weights(data.ld)
fail.part <- 0
rcensor.part <- 0
lcensor.part <- 0
icensor.part <- 0
if (any(the.censor.codes == 1))
fail.part <- sum(the.case.weights[the.censor.codes ==
1] * dlgets(z[the.censor.codes == 1, 1], alpha, sigma,
varzeta, distribution = distribution))
if (any(the.censor.codes == 2))
rcensor.part <- sum(the.case.weights[the.censor.codes ==
2] * logcheck(sgets(z[the.censor.codes == 2, 1],
alpha, sigma, varzeta, distribution = distribution)))
if (any(the.censor.codes == 3))
lcensor.part <- sum(the.case.weights[the.censor.codes ==
3] * logcheck(pgets(z[the.censor.codes == 3, 1],
alpha, sigma, varzeta, distribution = distribution)))
if (any(the.censor.codes == 4))
icensor.part <- sum(the.case.weights[the.censor.codes ==
4] * (logcheck(pgets(z[the.censor.codes == 4, 2],
alpha, sigma, varzeta, distribution = distribution) -
pgets(z[the.censor.codes == 4, 1], alpha, sigma,
varzeta, distribution = distribution))))
loglikelihood <- (fail.part + rcensor.part + lcensor.part +
icensor.part)
if ((iter.count < 4 && map.SMRDDebugLevel() >= 1) || map.SMRDDebugLevel() >=
4) {
print(paste("in gets.log.like", paste(model$orig.param.names,
collapse = " "), "=", paste(format(c(alpha, sigma,
varzeta)), collapse = " ")))
print(paste("in gets.log.like, likelihood=", paste(format(c(loglikelihood,
fail.part, rcensor.part, lcensor.part, icensor.part)),
collapse = " ")))
}
return(Uminus(loglikelihood))
}
|
#' @title Compute the maximum diameter of the profile and its location
#'
#' @param image_profile a numeric 2D matrix. The binary matrix with the drawing cropped at its bounding box, as a result of get_vessel.bbox.
#' @param xx a dataframe. Melted profile matrix as a result of get_transforms.
#'
#' @return A list with the following items:
#' \itemize{
#' \item max.diameter - a numeric scalar. The maximum diameter of the profile in pixels.
#' \item max.diameter.loc - a numeric scalar. The location of the maximum diameter from the top of the profile in pixels.
#' \item YposXmin - a numeric 1D vector. The y position in the image_profile of the maximum diameter on the left of the profile.
#' \item YposXmax - a numeric 1D vector. The y position in the image_profile of the maximum diameter on the right of the profile.
#' }
#' @export
#'
#' @author Danai Kafetzaki
#'
#' @examples
#' get_max_diameter(image_profile = m7$image_profile, xx = m10)
get_max_diameter = function(image_profile, xx){
indexX = unique(xx[(xx$value == 1), "Var1"])
ind_X1 = indexX[which.min(indexX)]
ind_X2 = indexX[which.max(indexX)]
sherd_maxDiam = ind_X2 - ind_X1 + 1
YposXmin = xx$Var2[(xx$Var1 == ind_X1) & (xx$value == 1)]
YposXmax = xx$Var2[(xx$Var1 == ind_X2) & (xx$value == 1)]
sherd_maxDiam_loc = dim(image_profile)[2] - YposXmin[length(YposXmin)]
returns = list("max.diameter" = sherd_maxDiam, "max.diameter.loc" = sherd_maxDiam_loc, "YposXmin" = YposXmin, "YposXmax" = YposXmax)
# are YposXmin and YposXmax needed? There are 6 int in this example - test of sequential ?! - These 6 here are the tip thickness.
return(returns)
}
| /R/get_max_diameter.R | permissive | kafetzakid/morphotype | R | false | false | 1,653 | r | #' @title Compute the maximum diameter of the profile and its location
#'
#' @param image_profile a numeric 2D matrix. The binary matrix with the drawing cropped at its bounding box, as a result of get_vessel.bbox.
#' @param xx a dataframe. Melted profile matrix as a result of get_transforms.
#'
#' @return A list with the following items:
#' \itemize{
#' \item max.diameter - a numeric scalar. The maximum diameter of the profile in pixels.
#' \item max.diameter.loc - a numeric scalar. The location of the maximum diameter from the top of the profile in pixels.
#' \item YposXmin - a numeric 1D vector. The y position in the image_profile of the maximum diameter on the left of the profile.
#' \item YposXmax - a numeric 1D vector. The y position in the image_profile of the maximum diameter on the right of the profile.
#' }
#' @export
#'
#' @author Danai Kafetzaki
#'
#' @examples
#' get_max_diameter(image_profile = m7$image_profile, xx = m10)
get_max_diameter = function(image_profile, xx){
indexX = unique(xx[(xx$value == 1), "Var1"])
ind_X1 = indexX[which.min(indexX)]
ind_X2 = indexX[which.max(indexX)]
sherd_maxDiam = ind_X2 - ind_X1 + 1
YposXmin = xx$Var2[(xx$Var1 == ind_X1) & (xx$value == 1)]
YposXmax = xx$Var2[(xx$Var1 == ind_X2) & (xx$value == 1)]
sherd_maxDiam_loc = dim(image_profile)[2] - YposXmin[length(YposXmin)]
returns = list("max.diameter" = sherd_maxDiam, "max.diameter.loc" = sherd_maxDiam_loc, "YposXmin" = YposXmin, "YposXmax" = YposXmax)
# are YposXmin and YposXmax needed? There are 6 int in this example - test of sequential ?! - These 6 here are the tip thickness.
return(returns)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ecdfHT.R
\name{ecdfHT.draw}
\alias{ecdfHT.axes}
\alias{ecdfHT.draw}
\alias{ecdfHT.g}
\alias{ecdfHT.h}
\title{Graph and annotate an ecdfHT plot}
\usage{
ecdfHT.draw(transform.info, x, p, show.plot = TRUE, new.plot = FALSE,
show.ci = FALSE, xlab = "x", ylab = "", ...)
ecdfHT.axes(transform.info, x.labels = c(), y.labels = c(),
show.vert.gridlines = FALSE, show.horiz.gridlines = FALSE, ...)
ecdfHT.h(x, t)
ecdfHT.g(p, q)
}
\arguments{
\item{transform.info}{A list with information about the transformation, computed in \code{ecdfHT}}
\item{x}{The data, a vector of double precision numbers. Assumbed to be sorted and have distinct values.}
\item{p}{Probabilities, a vector of doubles. Typically p[i]=(i=0.5)/length(x), unless there are repeats in x.}
\item{show.plot}{Boolean value: indicates whether to plot or not.}
\item{new.plot}{Boolean value: indicates whether to produce a new plot or add to an existing plot.}
\item{show.ci}{Boolean value: indicates whether or not confidence intervals are shown.}
\item{xlab}{String to label the horizontal axis.}
\item{ylab}{String to label the vertical axis.}
\item{...}{Optional parameters for the plot, e.g. col='red'.}
\item{x.labels}{Vector of numbers specifying the location of the labels on the horizontal axis}
\item{y.labels}{Vector of numbers specifying the location of the labels on the vertical axis}
\item{show.vert.gridlines}{Boolean value indicating whether or not vertical grid lines should be drawn.}
\item{show.horiz.gridlines}{Boolean value indicating whether or not horizontal grid lines should be drawn.}
\item{t}{A vector of length 3 that specifies the x values that determine the left tail, middle, and right tail}
\item{q}{A vector of length 3 that specifies the quantile values that determine the left tail, middle, and right tail.}
}
\value{
A list of values used in the plot, see return value of \code{ecdfHT}.
\code{ecdfHT.h} returns the vector y=h(x;t), \code{ecdfHT.g} returns the vector y=g(p;q)
}
\description{
Does the computations and plotting for \code{ecdfHT} and can be used to add to an existing plot.
}
\details{
\code{ecdfHT.draw} computes transform and plots.
\code{ecdfHT.axes} draws axes on the plot; it can be used to manually select tick marks, etc.
\code{ecdfHT.h} computes the function h(x) for the transformation of the horizontal axis.
\code{ecdfHT.g} computes the function g(p) for the transformation of the vertical axis.
Always call \code{ecdfHT} first to produce the basic plot, then use \code{ecdfHT.draw}
to add other curves to the plot as in the examples below
}
\examples{
set.seed(1)
x <- rcauchy( 1000 )
t.info <- ecdfHT( x, show.axes=FALSE )
ecdfHT.axes( t.info, x.labels=c(-50,-5,0,5,50), y.labels=c(.001,.01,.1,.5,.9,.99,.999),
show.vert.gridlines=TRUE, show.horiz.gridline=TRUE, lty=2 )
q1 <- qcauchy(t.info$ecdf) # Cauchy quantiles
ecdfHT.draw( t.info, q1, t.info$ecdf, col='red',show.ci=TRUE)
q2 <- qnorm(t.info$ecdf,sd=sd(x)) # Gaussian quantiles
ecdfHT.draw( t.info, q2, t.info$ecdf, col='green',show.ci=TRUE)
title(paste("simulated Cauchy data, n=",length(x),"\\nred=Cauchy cdf, green=normal cdf"))
x <- seq(-5,5,1)
t <- c(-3,0,3)
ecdfHT.h(x,t)
p <- seq(0.05,.95,.1)
q <- c(.1,.5,.9)
ecdfHT.g(p,q)
}
| /man/ecdfHT.draw.Rd | no_license | cran/ecdfHT | R | false | true | 3,408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ecdfHT.R
\name{ecdfHT.draw}
\alias{ecdfHT.axes}
\alias{ecdfHT.draw}
\alias{ecdfHT.g}
\alias{ecdfHT.h}
\title{Graph and annotate an ecdfHT plot}
\usage{
ecdfHT.draw(transform.info, x, p, show.plot = TRUE, new.plot = FALSE,
show.ci = FALSE, xlab = "x", ylab = "", ...)
ecdfHT.axes(transform.info, x.labels = c(), y.labels = c(),
show.vert.gridlines = FALSE, show.horiz.gridlines = FALSE, ...)
ecdfHT.h(x, t)
ecdfHT.g(p, q)
}
\arguments{
\item{transform.info}{A list with information about the transformation, computed in \code{ecdfHT}}
\item{x}{The data, a vector of double precision numbers. Assumbed to be sorted and have distinct values.}
\item{p}{Probabilities, a vector of doubles. Typically p[i]=(i=0.5)/length(x), unless there are repeats in x.}
\item{show.plot}{Boolean value: indicates whether to plot or not.}
\item{new.plot}{Boolean value: indicates whether to produce a new plot or add to an existing plot.}
\item{show.ci}{Boolean value: indicates whether or not confidence intervals are shown.}
\item{xlab}{String to label the horizontal axis.}
\item{ylab}{String to label the vertical axis.}
\item{...}{Optional parameters for the plot, e.g. col='red'.}
\item{x.labels}{Vector of numbers specifying the location of the labels on the horizontal axis}
\item{y.labels}{Vector of numbers specifying the location of the labels on the vertical axis}
\item{show.vert.gridlines}{Boolean value indicating whether or not vertical grid lines should be drawn.}
\item{show.horiz.gridlines}{Boolean value indicating whether or not horizontal grid lines should be drawn.}
\item{t}{A vector of length 3 that specifies the x values that determine the left tail, middle, and right tail}
\item{q}{A vector of length 3 that specifies the quantile values that determine the left tail, middle, and right tail.}
}
\value{
A list of values used in the plot, see return value of \code{ecdfHT}.
\code{ecdfHT.h} returns the vector y=h(x;t), \code{ecdfHT.g} returns the vector y=g(p;q)
}
\description{
Does the computations and plotting for \code{ecdfHT} and can be used to add to an existing plot.
}
\details{
\code{ecdfHT.draw} computes transform and plots.
\code{ecdfHT.axes} draws axes on the plot; it can be used to manually select tick marks, etc.
\code{ecdfHT.h} computes the function h(x) for the transformation of the horizontal axis.
\code{ecdfHT.g} computes the function g(p) for the transformation of the vertical axis.
Always call \code{ecdfHT} first to produce the basic plot, then use \code{ecdfHT.draw}
to add other curves to the plot as in the examples below
}
\examples{
set.seed(1)
x <- rcauchy( 1000 )
t.info <- ecdfHT( x, show.axes=FALSE )
ecdfHT.axes( t.info, x.labels=c(-50,-5,0,5,50), y.labels=c(.001,.01,.1,.5,.9,.99,.999),
show.vert.gridlines=TRUE, show.horiz.gridline=TRUE, lty=2 )
q1 <- qcauchy(t.info$ecdf) # Cauchy quantiles
ecdfHT.draw( t.info, q1, t.info$ecdf, col='red',show.ci=TRUE)
q2 <- qnorm(t.info$ecdf,sd=sd(x)) # Gaussian quantiles
ecdfHT.draw( t.info, q2, t.info$ecdf, col='green',show.ci=TRUE)
title(paste("simulated Cauchy data, n=",length(x),"\\nred=Cauchy cdf, green=normal cdf"))
x <- seq(-5,5,1)
t <- c(-3,0,3)
ecdfHT.h(x,t)
p <- seq(0.05,.95,.1)
q <- c(.1,.5,.9)
ecdfHT.g(p,q)
}
|
\name{OUwie}
\alias{OUwie}
\title{Generalized Hansen models}
\description{Fits generalized Ornstein-Uhlenbeck-based Hansen models of continuous characters evolving under discrete selective regimes.}
\usage{
OUwie(phy, data, model=c("BM1","BMS","OU1","OUM","OUMV","OUMA","OUMVA",
"TrendyM","TrendyMS"), simmap.tree=FALSE, root.age=NULL,scaleHeight=FALSE,
root.station=TRUE, clade=NULL, mserr="none", starting.vals=NULL, diagn=FALSE,
quiet=FALSE, warn=TRUE)
}
\arguments{
\item{phy}{a phylogenetic tree, in \code{ape} \dQuote{phylo} format and with internal nodes labeled denoting the ancestral selective regimes.}
\item{data}{a data.frame containing species information (see Details).}
\item{model}{models to fit to comparative data (see Details).}
\item{simmap.tree}{a logical indicating whether the input tree is in SIMMAP format. The default is \code{FALSE}.}
\item{root.age}{indicates the age of the tree. This is to be used in cases where the "tips" are not contemporary, such as in cases for fossil trees. Default is \code{NULL} meaning latest tip is modern day.}
\item{scaleHeight}{a logical indicating whether the total tree height should be scaled to 1 (see Details). The default is \code{FALSE}.}
\item{root.station}{a logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, should be estimated (see Details).}
\item{clade}{a list containing a pair of taxa whose MRCA is the clade of interest (see Details).}
\item{mserr}{designates whether a fourth column in the data matrix contains measurement error for each species value ("known"). The measurement error is assumed to be the standard error of the species mean. The default is "none".}
\item{starting.vals}{a vector of initial values for the optimization search. For OU models, two must be supplied, with the first being the initial alpha value and the second being the initial sigma squared. For BM models, just a single value is needed.}
\item{diagn}{a logical indicating whether the full diagnostic analysis should be carried out. The default is \code{FALSE}.}
\item{quiet}{a logical indicating whether progress should be written to the screen. The default is \code{FALSE}.}
\item{warn}{a logical indicating whether a warning should be printed if the number of parameters exceeds ntips/10. The default is \code{TRUE}.}
}
\details{
This function fits various likelihood models for continuous characters evolving under discrete selective regimes. The function returns parameter estimates and their approximate standard errors. The R package \code{nloptr} provides a common interface to NLopt, an open-source library for nonlinear optimization. The likelihood function is maximized using the bounded subplex optimization routine (\code{NLOPT_LN_SBPLX}). As input all \code{OUwie} requires is a tree and a trait data.frame. The tree must be of class \dQuote{phylo} and must contain the ancestral selective regimes as internal node labels. Internal node labels can be applied manually or from some sort of ancestral state reconstruction procedure (BayesTraits, \code{ape}, \code{diversitree}, SIMMAP, etc.), which would then be brought into OUwie. This is essentially what is required by \code{ouch} and Brownie (though Brownie provides built-in ancestral state reconstruction capabilities). The trait data.frame must have column entries in the following order: [,1] species names, [,2] current selective regime, and [,3] the continuous trait of interest. Alternatively, if the user wants to incorporate measurement error (\code{mserr}="known"), then a fourth column, [,4] must be included that provides the standard error estimates for each species mean. However, a global measurement error for all taxa can be estimated from the data (\code{mserr}="est"); is not well tested, so use at your own risk. Also, a user can specify a particular clade as being in a different selective regime, by inputting a pair of species whose mrca is the root of the clade of interest [e.g., \code{clade}=c("taxaA","taxaB")]. OUwie will automatically assign internal node labels and update the data matrix according to this clade designation.
The initial implementation followed \code{ouch} in that the tree is automatically rescaled so that the branch lengths were in proportion to the total height of the tree. However, this makes the results inconsistent with other implementations such as Brownie or \code{geiger}. Therefore, we allow the user to choose whether the tree should be rescaled or not. Note that the when \code{scaleHeight=FALSE} the bounds will have to be adjusted to the appropriate scale.
Possible models are as follows: single-rate Brownian motion (\code{model=BM1}), Brownian motion with different rate parameters for each state on a tree (\code{model=BMS}), Ornstein-Uhlenbeck model with a single optimum for all species (\code{model=OU1}), Ornstein-Uhlenbeck model with different state means and a single \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} acting all selective regimes (\code{model=OUM}), and new Ornstein-Uhlenbeck models that assume different state means as well as either multiple \eqn{\sigma^2}{sigma^2} (\code{model=OUMV}), multiple \eqn{\alpha}{alpha} (\code{model=OUMA}), or multiple \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} per selective regime (\code{model=OUMVA}).
If \code{root.station} is \code{TRUE} (the default), \eqn{\theta_0}{theta_0} is dropped from the model. Under these conditions it is assumed that the starting value is distributed according to the stationary distribution of the OU process. This would not fit a biological scenario involving moving away from an ancestral state, but it does fit a scenario of evolution at a steady state. Dropping \eqn{\theta_0}{theta_0} from the model can sometimes stabilize estimates of the primary optima, especially in situations where the estimates of \eqn{\theta}{theta} in the full model are non-sensical. In regards to the accuracy of estimating \eqn{\theta_0}{theta_0}, it is important to note that in simulation, as \eqn{\alpha}{alpha} increases estimates of \eqn{\theta_0}{theta_0} converge to zero. Thus, when \eqn{\alpha}{alpha} is large (i.e. \eqn{\alpha}{alpha}>2) it is likely that any inference of an evolutionary trend will be an artifact and positively misleading.
Also note, when specifying the BMS model be mindful of the root.station flag. When root.station=FALSE, the non-censored model of O'Meara et al. 2006 is invoked (i.e., a single regime at the root is estimated), and when root.station==TRUE the group mean model of Thomas et al. 2006 (i.e., the number of means equals the number of regimes). The latter case appears to be a strange special case of OU, in that it behaves similarly to the OUMV model, but without selection. I would say that this is more consistent with the censored test of O'Meara et al. (2006), as opposed to having any real connection to OU. In any case, more work is clearly needed to understand the behavior of the group means model, and therefore, I recommend setting root.station=FALSE in the BMS case.
The Hessian matrix is used as a means to estimate the approximate standard errors of the model parameters and to assess whether they are the maximum likelihood estimates. The variance-covariance matrix of the estimated values of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} are computed as the inverse of the Hessian matrix and the standard errors are the square roots of the diagonals of this matrix. The Hessian is a matrix of second-order derivatives and is approximated in the R package \code{numDeriv}. So, if changes in the value of a parameter results in sharp changes in the slope around the maximum of the log-likelihood function, the second-order derivative will be large, the standard error will be small, and the parameter estimate is considered stable. On the other hand, if the second-order derivative is nearly zero, then the change in the slope around the maximum is also nearly zero, indicating that the parameter value can be moved in any direction without greatly affecting the log-likelihood. In such situations, the standard error of the parameter will be large.
For models that allow \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} to vary (i.e., \code{OUMV}, \code{OUMA}, and \code{OUMVA}), the complexity of the model can often times be greater than the information that is contained within the data. As a result one or many parameters are poorly estimated, which can cause the function to return a log-likelihood that is suboptimal. This has great potential for poor model choice and incorrect biological interpretations. An eigendecomposition of the Hessian can provide an indication of whether the search returned the maximum likelihood estimates. If all the eigenvalues of the Hessian are positive, then the Hessian is positive definite, and all parameter estimates are considered reliable. However, if there are both positive and negative eigenvalues, then the objective function is at a saddlepoint and one or several parameters cannot be estimated adequately. One solution is to just fit a simpler model. Another is to actually identify the offending parameters. This can be done through the examination of the eigenvectors. The row order corresponds to the entries in \code{index.matrix}, the columns correspond to the order of values in \code{eigval}, and the larger the value of the row entry the greater the association between the corresponding parameter and the eigenvalue. Thus, the largest values in the columns associated with negative eigenvalues are the parameters that are causing the objective function to be at a saddlepoint.
}
\value{
\code{OUwie} returns an object of class \code{OUwie}. This is a list with elements:
\item{$loglik}{the maximum log-likelihood.}
\item{$AIC}{Akaike information criterion.}
\item{$AICc}{Akaike information criterion corrected for sample-size.}
\item{$model}{The model being fit}
\item{$param.count}{The number of parameters counted in the model.}
\item{$solution}{a matrix containing the maximum likelihood estimates of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}.}
\item{$theta}{a matrix containing the maximum likelihood estimates of \eqn{\theta}{theta} and its standard error.}
\item{$solution.se}{a matrix containing the approximate standard errors of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}. The standard error is calculated as the diagonal of the inverse of the Hessian matrix.}
\item{$tot.state}{A vector of names for the different regimes}
\item{$index.mat}{The indices of the parameters being estimated are returned. The numbers correspond to the row in the \code{eigvect} and can useful for identifying the parameters that are causing the objective function to be at a saddlepoint (see Details)}
\item{$simmap.tree}{A logical indicating whether the input phylogeny is a SIMMAP formatted tree.}
\item{$root.age}{The user-supplied age at the root of the tree.}
\item{$opts}{Internal settings of the likelihood search}
\item{$data}{User-supplied dataset}
\item{$phy}{User-supplied tree}
\item{$root.station}{A logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, was estimated}
\item{$starting.vals}{A vector of user-supplied initial search parameters.}
\item{$lb}{The lower bound set}
\item{$ub}{The upper bound set}
\item{$iterations}{Number of iterations of the likelihood search that were executed}
\item{$mserr.est}{The estimated measurement error if mserr="est". Otherwise, the value is NULL.}
\item{$res}{A vector of residuals from the model fit. The residuals are ordered in the same way as the tips in the tree.}
\item{$eigval}{The eigenvalues from the decomposition of the Hessian of the likelihood function. If any \code{eigval<0} then one or more parameters were not optimized during the likelihood search (see Details)}
\item{$eigvect}{The eigenvectors from the decomposition of the Hessian of the likelihood function is returned (see Details)}
}
\examples{
data(tworegime)
#Plot the tree and the internal nodes to highlight the selective regimes:
select.reg<-character(length(tree$node.label))
select.reg[tree$node.label == 1] <- "black"
select.reg[tree$node.label == 2] <- "red"
plot(tree)
nodelabels(pch=21, bg=select.reg)
#Not run
#To see the first 5 lines of the data matrix to see what how to
#structure the data:
#trait[1:5,]
#Now fit an OU model that allows different sigma^2:
#OUwie(tree,trait,model=c("OUMV"),root.station=TRUE)
#Fit an OU model based on a clade of interest:
#OUwie(tree,trait,model=c("OUMV"), root.station=TRUE, clade=c("t50", "t64"))
}
\references{
Beaulieu J.M., Jhwueng D.C., Boettiger C., and O'Meara B.C. 2012. Modeling stabilizing selection: Expanding the Ornstein-Uhlenbeck model of adaptive evolution. Evolution 66:2369-2383.
O'Meara B.C., Ane C., Sanderson P.C., Wainwright P.C. 2006. Testing for different rates of continuous trait evolution using likelihood. Evolution 60:922-933.
Butler M.A., King A.A. 2004. Phylogenetic comparative analysis: A modeling approach for adaptive evolution. American Naturalist 164:683-695.
Thomas G.H., Freckleton R.P., and Szekely T. 2006. Comparative analysis of the influence of developmental mode on phenotypic diversification rates in shorebirds. Proceedings of the Royal Society, B. 273:1619-1624.
}
\author{Jeremy M. Beaulieu and Brian C. O'Meara}
\keyword{models}
| /man/OUwie.Rd | no_license | chloerobins/OUwie | R | false | false | 13,307 | rd | \name{OUwie}
\alias{OUwie}
\title{Generalized Hansen models}
\description{Fits generalized Ornstein-Uhlenbeck-based Hansen models of continuous characters evolving under discrete selective regimes.}
\usage{
OUwie(phy, data, model=c("BM1","BMS","OU1","OUM","OUMV","OUMA","OUMVA",
"TrendyM","TrendyMS"), simmap.tree=FALSE, root.age=NULL,scaleHeight=FALSE,
root.station=TRUE, clade=NULL, mserr="none", starting.vals=NULL, diagn=FALSE,
quiet=FALSE, warn=TRUE)
}
\arguments{
\item{phy}{a phylogenetic tree, in \code{ape} \dQuote{phylo} format and with internal nodes labeled denoting the ancestral selective regimes.}
\item{data}{a data.frame containing species information (see Details).}
\item{model}{models to fit to comparative data (see Details).}
\item{simmap.tree}{a logical indicating whether the input tree is in SIMMAP format. The default is \code{FALSE}.}
\item{root.age}{indicates the age of the tree. This is to be used in cases where the "tips" are not contemporary, such as in cases for fossil trees. Default is \code{NULL} meaning latest tip is modern day.}
\item{scaleHeight}{a logical indicating whether the total tree height should be scaled to 1 (see Details). The default is \code{FALSE}.}
\item{root.station}{a logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, should be estimated (see Details).}
\item{clade}{a list containing a pair of taxa whose MRCA is the clade of interest (see Details).}
\item{mserr}{designates whether a fourth column in the data matrix contains measurement error for each species value ("known"). The measurement error is assumed to be the standard error of the species mean. The default is "none".}
\item{starting.vals}{a vector of initial values for the optimization search. For OU models, two must be supplied, with the first being the initial alpha value and the second being the initial sigma squared. For BM models, just a single value is needed.}
\item{diagn}{a logical indicating whether the full diagnostic analysis should be carried out. The default is \code{FALSE}.}
\item{quiet}{a logical indicating whether progress should be written to the screen. The default is \code{FALSE}.}
\item{warn}{a logical indicating whether a warning should be printed if the number of parameters exceeds ntips/10. The default is \code{TRUE}.}
}
\details{
This function fits various likelihood models for continuous characters evolving under discrete selective regimes. The function returns parameter estimates and their approximate standard errors. The R package \code{nloptr} provides a common interface to NLopt, an open-source library for nonlinear optimization. The likelihood function is maximized using the bounded subplex optimization routine (\code{NLOPT_LN_SBPLX}). As input all \code{OUwie} requires is a tree and a trait data.frame. The tree must be of class \dQuote{phylo} and must contain the ancestral selective regimes as internal node labels. Internal node labels can be applied manually or from some sort of ancestral state reconstruction procedure (BayesTraits, \code{ape}, \code{diversitree}, SIMMAP, etc.), which would then be brought into OUwie. This is essentially what is required by \code{ouch} and Brownie (though Brownie provides built-in ancestral state reconstruction capabilities). The trait data.frame must have column entries in the following order: [,1] species names, [,2] current selective regime, and [,3] the continuous trait of interest. Alternatively, if the user wants to incorporate measurement error (\code{mserr}="known"), then a fourth column, [,4] must be included that provides the standard error estimates for each species mean. However, a global measurement error for all taxa can be estimated from the data (\code{mserr}="est"); is not well tested, so use at your own risk. Also, a user can specify a particular clade as being in a different selective regime, by inputting a pair of species whose mrca is the root of the clade of interest [e.g., \code{clade}=c("taxaA","taxaB")]. OUwie will automatically assign internal node labels and update the data matrix according to this clade designation.
The initial implementation followed \code{ouch} in that the tree is automatically rescaled so that the branch lengths were in proportion to the total height of the tree. However, this makes the results inconsistent with other implementations such as Brownie or \code{geiger}. Therefore, we allow the user to choose whether the tree should be rescaled or not. Note that the when \code{scaleHeight=FALSE} the bounds will have to be adjusted to the appropriate scale.
Possible models are as follows: single-rate Brownian motion (\code{model=BM1}), Brownian motion with different rate parameters for each state on a tree (\code{model=BMS}), Ornstein-Uhlenbeck model with a single optimum for all species (\code{model=OU1}), Ornstein-Uhlenbeck model with different state means and a single \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} acting all selective regimes (\code{model=OUM}), and new Ornstein-Uhlenbeck models that assume different state means as well as either multiple \eqn{\sigma^2}{sigma^2} (\code{model=OUMV}), multiple \eqn{\alpha}{alpha} (\code{model=OUMA}), or multiple \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} per selective regime (\code{model=OUMVA}).
If \code{root.station} is \code{TRUE} (the default), \eqn{\theta_0}{theta_0} is dropped from the model. Under these conditions it is assumed that the starting value is distributed according to the stationary distribution of the OU process. This would not fit a biological scenario involving moving away from an ancestral state, but it does fit a scenario of evolution at a steady state. Dropping \eqn{\theta_0}{theta_0} from the model can sometimes stabilize estimates of the primary optima, especially in situations where the estimates of \eqn{\theta}{theta} in the full model are non-sensical. In regards to the accuracy of estimating \eqn{\theta_0}{theta_0}, it is important to note that in simulation, as \eqn{\alpha}{alpha} increases estimates of \eqn{\theta_0}{theta_0} converge to zero. Thus, when \eqn{\alpha}{alpha} is large (i.e. \eqn{\alpha}{alpha}>2) it is likely that any inference of an evolutionary trend will be an artifact and positively misleading.
Also note, when specifying the BMS model be mindful of the root.station flag. When root.station=FALSE, the non-censored model of O'Meara et al. 2006 is invoked (i.e., a single regime at the root is estimated), and when root.station==TRUE the group mean model of Thomas et al. 2006 (i.e., the number of means equals the number of regimes). The latter case appears to be a strange special case of OU, in that it behaves similarly to the OUMV model, but without selection. I would say that this is more consistent with the censored test of O'Meara et al. (2006), as opposed to having any real connection to OU. In any case, more work is clearly needed to understand the behavior of the group means model, and therefore, I recommend setting root.station=FALSE in the BMS case.
The Hessian matrix is used as a means to estimate the approximate standard errors of the model parameters and to assess whether they are the maximum likelihood estimates. The variance-covariance matrix of the estimated values of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} are computed as the inverse of the Hessian matrix and the standard errors are the square roots of the diagonals of this matrix. The Hessian is a matrix of second-order derivatives and is approximated in the R package \code{numDeriv}. So, if changes in the value of a parameter results in sharp changes in the slope around the maximum of the log-likelihood function, the second-order derivative will be large, the standard error will be small, and the parameter estimate is considered stable. On the other hand, if the second-order derivative is nearly zero, then the change in the slope around the maximum is also nearly zero, indicating that the parameter value can be moved in any direction without greatly affecting the log-likelihood. In such situations, the standard error of the parameter will be large.
For models that allow \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2} to vary (i.e., \code{OUMV}, \code{OUMA}, and \code{OUMVA}), the complexity of the model can often times be greater than the information that is contained within the data. As a result one or many parameters are poorly estimated, which can cause the function to return a log-likelihood that is suboptimal. This has great potential for poor model choice and incorrect biological interpretations. An eigendecomposition of the Hessian can provide an indication of whether the search returned the maximum likelihood estimates. If all the eigenvalues of the Hessian are positive, then the Hessian is positive definite, and all parameter estimates are considered reliable. However, if there are both positive and negative eigenvalues, then the objective function is at a saddlepoint and one or several parameters cannot be estimated adequately. One solution is to just fit a simpler model. Another is to actually identify the offending parameters. This can be done through the examination of the eigenvectors. The row order corresponds to the entries in \code{index.matrix}, the columns correspond to the order of values in \code{eigval}, and the larger the value of the row entry the greater the association between the corresponding parameter and the eigenvalue. Thus, the largest values in the columns associated with negative eigenvalues are the parameters that are causing the objective function to be at a saddlepoint.
}
\value{
\code{OUwie} returns an object of class \code{OUwie}. This is a list with elements:
\item{$loglik}{the maximum log-likelihood.}
\item{$AIC}{Akaike information criterion.}
\item{$AICc}{Akaike information criterion corrected for sample-size.}
\item{$model}{The model being fit}
\item{$param.count}{The number of parameters counted in the model.}
\item{$solution}{a matrix containing the maximum likelihood estimates of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}.}
\item{$theta}{a matrix containing the maximum likelihood estimates of \eqn{\theta}{theta} and its standard error.}
\item{$solution.se}{a matrix containing the approximate standard errors of \eqn{\alpha}{alpha} and \eqn{\sigma^2}{sigma^2}. The standard error is calculated as the diagonal of the inverse of the Hessian matrix.}
\item{$tot.state}{A vector of names for the different regimes}
\item{$index.mat}{The indices of the parameters being estimated are returned. The numbers correspond to the row in the \code{eigvect} and can useful for identifying the parameters that are causing the objective function to be at a saddlepoint (see Details)}
\item{$simmap.tree}{A logical indicating whether the input phylogeny is a SIMMAP formatted tree.}
\item{$root.age}{The user-supplied age at the root of the tree.}
\item{$opts}{Internal settings of the likelihood search}
\item{$data}{User-supplied dataset}
\item{$phy}{User-supplied tree}
\item{$root.station}{A logical indicating whether the starting state, \eqn{\theta_0}{theta_0}, was estimated}
\item{$starting.vals}{A vector of user-supplied initial search parameters.}
\item{$lb}{The lower bound set}
\item{$ub}{The upper bound set}
\item{$iterations}{Number of iterations of the likelihood search that were executed}
\item{$mserr.est}{The estimated measurement error if mserr="est". Otherwise, the value is NULL.}
\item{$res}{A vector of residuals from the model fit. The residuals are ordered in the same way as the tips in the tree.}
\item{$eigval}{The eigenvalues from the decomposition of the Hessian of the likelihood function. If any \code{eigval<0} then one or more parameters were not optimized during the likelihood search (see Details)}
\item{$eigvect}{The eigenvectors from the decomposition of the Hessian of the likelihood function is returned (see Details)}
}
\examples{
data(tworegime)
#Plot the tree and the internal nodes to highlight the selective regimes:
select.reg<-character(length(tree$node.label))
select.reg[tree$node.label == 1] <- "black"
select.reg[tree$node.label == 2] <- "red"
plot(tree)
nodelabels(pch=21, bg=select.reg)
#Not run
#To see the first 5 lines of the data matrix to see what how to
#structure the data:
#trait[1:5,]
#Now fit an OU model that allows different sigma^2:
#OUwie(tree,trait,model=c("OUMV"),root.station=TRUE)
#Fit an OU model based on a clade of interest:
#OUwie(tree,trait,model=c("OUMV"), root.station=TRUE, clade=c("t50", "t64"))
}
\references{
Beaulieu J.M., Jhwueng D.C., Boettiger C., and O'Meara B.C. 2012. Modeling stabilizing selection: Expanding the Ornstein-Uhlenbeck model of adaptive evolution. Evolution 66:2369-2383.
O'Meara B.C., Ane C., Sanderson P.C., Wainwright P.C. 2006. Testing for different rates of continuous trait evolution using likelihood. Evolution 60:922-933.
Butler M.A., King A.A. 2004. Phylogenetic comparative analysis: A modeling approach for adaptive evolution. American Naturalist 164:683-695.
Thomas G.H., Freckleton R.P., and Szekely T. 2006. Comparative analysis of the influence of developmental mode on phenotypic diversification rates in shorebirds. Proceedings of the Royal Society, B. 273:1619-1624.
}
\author{Jeremy M. Beaulieu and Brian C. O'Meara}
\keyword{models}
|
# Script Description --------------------
# This script creates a stacked area graph of multi-temporal land cover data, particularly the land cover maps produced
# using Landsat data at four time-points: 1988, 2000, 2010, and 2015 for Mindoro Island, Philippines. The land cover maps
# consist of 8 categories including: forest, mangrove, grasland, rice paddy/bare soil, exposed rock, shrubs/other
# vegetation, and water.
#
# Script By: Jose Don T De Alban
# Date Created: 20 Nov 2017
# Last Modified: 08 Apr 2021
# Set Working Directories ---------------
Dir1 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/mindoro_island/"
Dir2 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/pa_mcws/"
Dir3 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/kba_siburan/"
Dir4 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/pa_mibnp/"
DirMAIN <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/"
# Load Libraries and Data ---------------
library(egg)
library(ggplot2)
library(grid)
library(gtable)
library(reshape2)
library(tidyverse)
# Function to Read Data Files -----------
readdata <- function(filename) {
df <- read.csv(filename, sep="\t")
vec <- df[,3] # Read column with percentage values
names(vec) <- df[,1] # Read column with class codes
return(vec)
}
# Generate Study Area Plots -------------
# MINDORO ISLAND
# Read csv files in the directory and store as a list
setwd(Dir1)
filenames1 <- list.files()
# Combine as class codes and percentage values in a matrix
temp1 <- do.call(rbind, lapply(filenames1, readdata))
colnames(temp1) <- c("1","2","3","4","5","6","7","8")
row.names(temp1) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data1 <- melt(temp1, id.vars="years", variable.name="class", value.name="percentage")
colnames(data1) <- c("Years","Class","Percentage")
# Create stacked area plot
plot1 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Mangrove",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation",
"Built-up Area",
"Water Body"))),
data=data1)
plot1 <- plot1 + labs(title="Mindoro Island", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot1 <- plot1 + scale_fill_manual(values=c("#246a24","#6666ff","#c6f800","#ffff66","#bcbdbc","#07d316","#ff0000","#66ccff"))
plot1 <- plot1 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot1 <- plot1 + theme_bw()
plot1 <- plot1 + theme(legend.position="none")
plot1 <- plot1 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot1 <- plot1 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11))
plot1 <- plot1 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# MT CALAVITE WILDLIFE SANCTUARY
# Read csv files in the directory and store as a list
setwd(Dir2)
filenames2 <- list.files()
# Combine as class codes and percentage values in a matrix
temp2 <- do.call(rbind, lapply(filenames2, readdata))
colnames(temp2) <- c("1","2","3","4","5","6","7","8")
row.names(temp2) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data2 <- melt(temp2, id.vars="years", variable.name="class", value.name="percentage")
colnames(data2) <- c("Years","Class","Percentage")
# Create stacked area plot
plot2 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Mangrove",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation",
"Built-up Area",
"Water Body"))),
data=data2)
plot2 <- plot2 + labs(title="Mt. Calavite WS", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot2 <- plot2 + scale_fill_manual(values=c("#246a24","#6666ff","#c6f800","#ffff66","#bcbdbc","#07d316","#ff0000","#66ccff"))
plot2 <- plot2 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot2 <- plot2 + theme_bw()
plot2 <- plot2 + theme(legend.position="none")
plot2 <- plot2 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot2 <- plot2 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11), axis.title.y=element_blank())
plot2 <- plot2 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# MT SIBURAN KEY BIODIVERSITY AREA
# Read csv files in the directory and store as a list
setwd(Dir3)
filenames3 <- list.files()
# Store files into four separate temporary dataframes
period1s <- filenames3[1] # 1988
period2s <- filenames3[2] # 2000
period3s <- filenames3[3] # 2010
period4s <- filenames3[4] # 2015
# Combine as class codes and percentage values in a matrix
vec1s <- do.call(rbind, lapply(period1s, readdata))
vec2s <- do.call(rbind, lapply(period2s, readdata))
vec3s <- do.call(rbind, lapply(period3s, readdata))
vec4s <- do.call(rbind, lapply(period4s, readdata))
# Create new column with zeroes for Class 6 in 2nd and 3rd period and insert in matrix
mat1s <- t(as.matrix(c(vec1s[,1:ncol(vec1s)]))) # transposed 1x7 matrix
mat2s <- t(as.matrix(c(vec2s[,1:5], 0, vec2s[,6]))) # transposed 1x7 matrix
mat3s <- t(as.matrix(c(vec3s[,1:5], 0, vec3s[,6]))) # transposed 1x7 matrix
mat4s <- t(as.matrix(c(vec4s[,1:ncol(vec4s)]))) # transposed 1x7 matrix
# Combine matrices from two periods and change column names
temp3 <- rbind(mat1s, mat2s, mat3s, mat4s)
colnames(temp3) <- c("1","2","3","4","5","6","7")
row.names(temp3) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data3 <- melt(temp3, id.vars="years", variable.name="class", value.name="percentage")
colnames(data3) <- c("Years","Class","Percentage")
# Create stacked area plot
plot3 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation",
"Built-up Area",
"Water Body"))),
data=data3)
plot3 <- plot3 + labs(title="Mt. Siburan KBA", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot3 <- plot3 + scale_fill_manual(values=c("#246a24","#c6f800","#ffff66",
"#bcbdbc","#07d316","#ff0000","#66ccff"))
plot3 <- plot3 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot3 <- plot3 + theme_bw()
plot3 <- plot3 + theme(legend.position="none")
plot3 <- plot3 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot3 <- plot3 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11), axis.title.y=element_blank())
plot3 <- plot3 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# MTS IGLIT-BACO NATIONAL PARK
# Read csv files in the directory and store as a list
setwd(Dir4)
filenames4 <- list.files()
# Store files into four separate temporary dataframes
period1b <- filenames4[1] # 1988
period2b <- filenames4[2] # 2000
period3b <- filenames4[3] # 2010
period4b <- filenames4[4] # 2015
# Combine as class codes and percentage values in a matrix
vec1b <- do.call(rbind, lapply(period1b, readdata))
vec2b <- do.call(rbind, lapply(period2b, readdata))
vec3b <- do.call(rbind, lapply(period3b, readdata))
vec4b <- do.call(rbind, lapply(period4b, readdata))
# Remove unnecessary columns
mat1b <- t(as.matrix(vec1b[,-c(2,7)]))
mat2b <- t(as.matrix(vec2b[,-c(2,7)]))
mat3b <- t(as.matrix(vec3b[,-c(6)]))
mat4b <- t(as.matrix(vec4b[,-c(6:7)]))
# Combine matrices from two periods and change column names
temp4 <- rbind(mat1b, mat2b, mat3b, mat4b)
colnames(temp4) <- c("1","2","3","4","5")
row.names(temp4) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data4 <- melt(temp4, id.vars="years", variable.name="class", value.name="percentage")
colnames(data4) <- c("Years","Class","Percentage")
# Create stacked area plot
plot4 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation"))),
data=data4)
plot4 <- plot4 + labs(title="Mts. Iglit-Baco NP", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot4 <- plot4 + scale_fill_manual(values=c("#246a24","#c6f800","#ffff66","#bcbdbc","#07d316"))
plot4 <- plot4 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot4 <- plot4 + theme_bw()
plot4 <- plot4 + theme(legend.position="none")
plot4 <- plot4 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot4 <- plot4 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11), axis.title.y=element_blank())
plot4 <- plot4 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# Expose ggplot2 Layouts -----------------
plotlayout <- lapply(list(plot1, plot2, plot3, plot4), expose_layout, FALSE, FALSE)
grid.arrange(
grobs = plotlayout,
widths = c(2,2),
layout_matrix = rbind(c(1,2),
c(3,4))
)
mergeplot <- ggarrange(plot1, plot2, plot3, plot4, widths=c(1,1), heights=c(1,1))
# Function to Combine Legend -------------
grid_arrange_shared_legend <-
function(...,
ncol = length(list(...)),
nrow = 1,
position = c("bottom", "right"))
{
plots <- list(...)
position <- match.arg(position)
g <-
ggplotGrob(plots[[1]] + theme(legend.position = position))$grobs
legend <- g[[which(sapply(g, function(x)
x$name) == "guide-box")]]
lheight <- sum(legend$height)
lwidth <- sum(legend$width)
gl <- lapply(plots, function(x)
x + theme(legend.position = "none"))
gl <- c(gl, ncol = ncol, nrow = nrow)
combined <- switch(
position,
"bottom" = arrangeGrob(
do.call(arrangeGrob, gl),
legend,
ncol = 1,
heights = unit.c(unit(1, "npc") - lheight, lheight)
),
"right" = arrangeGrob(
do.call(arrangeGrob, gl),
legend,
ncol = 2,
widths = unit.c(unit(1, "npc") - lwidth, lwidth)
)
)
grid.newpage()
grid.draw(combined)
# return gtable invisibly
invisible(combined)
}
# Combine legend of merged plot
grid_arrange_shared_legend(plot1, plot2, plot3, plot4)
# Save Plots -----------------------------
setwd(DirMAIN)
ggsave(grid_arrange_shared_legend(plot1, plot2, plot3, plot4), file="StackedArea_Combined_v1.pdf", width=30, height=15, units="cm", dpi=300)
| /scripts/R_Net-Change-Stacked-Area_Combined.R | no_license | dondealban/mindoro | R | false | false | 11,413 | r | # Script Description --------------------
# This script creates a stacked area graph of multi-temporal land cover data, particularly the land cover maps produced
# using Landsat data at four time-points: 1988, 2000, 2010, and 2015 for Mindoro Island, Philippines. The land cover maps
# consist of 8 categories including: forest, mangrove, grasland, rice paddy/bare soil, exposed rock, shrubs/other
# vegetation, and water.
#
# Script By: Jose Don T De Alban
# Date Created: 20 Nov 2017
# Last Modified: 08 Apr 2021
# Set Working Directories ---------------
Dir1 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/mindoro_island/"
Dir2 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/pa_mcws/"
Dir3 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/kba_siburan/"
Dir4 <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/pa_mibnp/"
DirMAIN <- "/Users/dondealban/Dropbox/Research/mindoro/stacked area/"
# Load Libraries and Data ---------------
library(egg)
library(ggplot2)
library(grid)
library(gtable)
library(reshape2)
library(tidyverse)
# Function to Read Data Files -----------
readdata <- function(filename) {
df <- read.csv(filename, sep="\t")
vec <- df[,3] # Read column with percentage values
names(vec) <- df[,1] # Read column with class codes
return(vec)
}
# Generate Study Area Plots -------------
# MINDORO ISLAND
# Read csv files in the directory and store as a list
setwd(Dir1)
filenames1 <- list.files()
# Combine as class codes and percentage values in a matrix
temp1 <- do.call(rbind, lapply(filenames1, readdata))
colnames(temp1) <- c("1","2","3","4","5","6","7","8")
row.names(temp1) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data1 <- melt(temp1, id.vars="years", variable.name="class", value.name="percentage")
colnames(data1) <- c("Years","Class","Percentage")
# Create stacked area plot
plot1 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Mangrove",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation",
"Built-up Area",
"Water Body"))),
data=data1)
plot1 <- plot1 + labs(title="Mindoro Island", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot1 <- plot1 + scale_fill_manual(values=c("#246a24","#6666ff","#c6f800","#ffff66","#bcbdbc","#07d316","#ff0000","#66ccff"))
plot1 <- plot1 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot1 <- plot1 + theme_bw()
plot1 <- plot1 + theme(legend.position="none")
plot1 <- plot1 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot1 <- plot1 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11))
plot1 <- plot1 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# MT CALAVITE WILDLIFE SANCTUARY
# Read csv files in the directory and store as a list
setwd(Dir2)
filenames2 <- list.files()
# Combine as class codes and percentage values in a matrix
temp2 <- do.call(rbind, lapply(filenames2, readdata))
colnames(temp2) <- c("1","2","3","4","5","6","7","8")
row.names(temp2) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data2 <- melt(temp2, id.vars="years", variable.name="class", value.name="percentage")
colnames(data2) <- c("Years","Class","Percentage")
# Create stacked area plot
plot2 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Mangrove",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation",
"Built-up Area",
"Water Body"))),
data=data2)
plot2 <- plot2 + labs(title="Mt. Calavite WS", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot2 <- plot2 + scale_fill_manual(values=c("#246a24","#6666ff","#c6f800","#ffff66","#bcbdbc","#07d316","#ff0000","#66ccff"))
plot2 <- plot2 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot2 <- plot2 + theme_bw()
plot2 <- plot2 + theme(legend.position="none")
plot2 <- plot2 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot2 <- plot2 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11), axis.title.y=element_blank())
plot2 <- plot2 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# MT SIBURAN KEY BIODIVERSITY AREA
# Read csv files in the directory and store as a list
setwd(Dir3)
filenames3 <- list.files()
# Store files into four separate temporary dataframes
period1s <- filenames3[1] # 1988
period2s <- filenames3[2] # 2000
period3s <- filenames3[3] # 2010
period4s <- filenames3[4] # 2015
# Combine as class codes and percentage values in a matrix
vec1s <- do.call(rbind, lapply(period1s, readdata))
vec2s <- do.call(rbind, lapply(period2s, readdata))
vec3s <- do.call(rbind, lapply(period3s, readdata))
vec4s <- do.call(rbind, lapply(period4s, readdata))
# Create new column with zeroes for Class 6 in 2nd and 3rd period and insert in matrix
mat1s <- t(as.matrix(c(vec1s[,1:ncol(vec1s)]))) # transposed 1x7 matrix
mat2s <- t(as.matrix(c(vec2s[,1:5], 0, vec2s[,6]))) # transposed 1x7 matrix
mat3s <- t(as.matrix(c(vec3s[,1:5], 0, vec3s[,6]))) # transposed 1x7 matrix
mat4s <- t(as.matrix(c(vec4s[,1:ncol(vec4s)]))) # transposed 1x7 matrix
# Combine matrices from two periods and change column names
temp3 <- rbind(mat1s, mat2s, mat3s, mat4s)
colnames(temp3) <- c("1","2","3","4","5","6","7")
row.names(temp3) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data3 <- melt(temp3, id.vars="years", variable.name="class", value.name="percentage")
colnames(data3) <- c("Years","Class","Percentage")
# Create stacked area plot
plot3 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation",
"Built-up Area",
"Water Body"))),
data=data3)
plot3 <- plot3 + labs(title="Mt. Siburan KBA", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot3 <- plot3 + scale_fill_manual(values=c("#246a24","#c6f800","#ffff66",
"#bcbdbc","#07d316","#ff0000","#66ccff"))
plot3 <- plot3 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot3 <- plot3 + theme_bw()
plot3 <- plot3 + theme(legend.position="none")
plot3 <- plot3 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot3 <- plot3 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11), axis.title.y=element_blank())
plot3 <- plot3 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# MTS IGLIT-BACO NATIONAL PARK
# Read csv files in the directory and store as a list
setwd(Dir4)
filenames4 <- list.files()
# Store files into four separate temporary dataframes
period1b <- filenames4[1] # 1988
period2b <- filenames4[2] # 2000
period3b <- filenames4[3] # 2010
period4b <- filenames4[4] # 2015
# Combine as class codes and percentage values in a matrix
vec1b <- do.call(rbind, lapply(period1b, readdata))
vec2b <- do.call(rbind, lapply(period2b, readdata))
vec3b <- do.call(rbind, lapply(period3b, readdata))
vec4b <- do.call(rbind, lapply(period4b, readdata))
# Remove unnecessary columns
mat1b <- t(as.matrix(vec1b[,-c(2,7)]))
mat2b <- t(as.matrix(vec2b[,-c(2,7)]))
mat3b <- t(as.matrix(vec3b[,-c(6)]))
mat4b <- t(as.matrix(vec4b[,-c(6:7)]))
# Combine matrices from two periods and change column names
temp4 <- rbind(mat1b, mat2b, mat3b, mat4b)
colnames(temp4) <- c("1","2","3","4","5")
row.names(temp4) <- c("1988","2000","2010","2015") # Add years as another column
# Convert wide format data frame into long format data frame
data4 <- melt(temp4, id.vars="years", variable.name="class", value.name="percentage")
colnames(data4) <- c("Years","Class","Percentage")
# Create stacked area plot
plot4 <- ggplot() + geom_area(aes(x=Years, y=Percentage, fill=factor(Class,
labels=c("Forest",
"Grassland",
"Rice Paddy / Bare Soil",
"Exposed Rock",
"Shrub / Other Vegetation"))),
data=data4)
plot4 <- plot4 + labs(title="Mts. Iglit-Baco NP", x="Year", y="Percentage of Landscape", fill="Land Cover Category")
plot4 <- plot4 + scale_fill_manual(values=c("#246a24","#c6f800","#ffff66","#bcbdbc","#07d316"))
plot4 <- plot4 + scale_x_continuous(breaks=c(1988,2000,2010,2015))
plot4 <- plot4 + theme_bw()
plot4 <- plot4 + theme(legend.position="none")
plot4 <- plot4 + theme(legend.title=element_text(size=13), legend.text=element_text(size=13))
plot4 <- plot4 + theme(axis.title=element_text(size=13), axis.text=element_text(size=11), axis.title.y=element_blank())
plot4 <- plot4 + theme(panel.grid.major=element_blank(), panel.grid.minor=element_blank())
# Expose ggplot2 Layouts -----------------
plotlayout <- lapply(list(plot1, plot2, plot3, plot4), expose_layout, FALSE, FALSE)
grid.arrange(
grobs = plotlayout,
widths = c(2,2),
layout_matrix = rbind(c(1,2),
c(3,4))
)
mergeplot <- ggarrange(plot1, plot2, plot3, plot4, widths=c(1,1), heights=c(1,1))
# Function to Combine Legend -------------
grid_arrange_shared_legend <-
function(...,
ncol = length(list(...)),
nrow = 1,
position = c("bottom", "right"))
{
plots <- list(...)
position <- match.arg(position)
g <-
ggplotGrob(plots[[1]] + theme(legend.position = position))$grobs
legend <- g[[which(sapply(g, function(x)
x$name) == "guide-box")]]
lheight <- sum(legend$height)
lwidth <- sum(legend$width)
gl <- lapply(plots, function(x)
x + theme(legend.position = "none"))
gl <- c(gl, ncol = ncol, nrow = nrow)
combined <- switch(
position,
"bottom" = arrangeGrob(
do.call(arrangeGrob, gl),
legend,
ncol = 1,
heights = unit.c(unit(1, "npc") - lheight, lheight)
),
"right" = arrangeGrob(
do.call(arrangeGrob, gl),
legend,
ncol = 2,
widths = unit.c(unit(1, "npc") - lwidth, lwidth)
)
)
grid.newpage()
grid.draw(combined)
# return gtable invisibly
invisible(combined)
}
# Combine legend of merged plot
grid_arrange_shared_legend(plot1, plot2, plot3, plot4)
# Save Plots -----------------------------
setwd(DirMAIN)
ggsave(grid_arrange_shared_legend(plot1, plot2, plot3, plot4), file="StackedArea_Combined_v1.pdf", width=30, height=15, units="cm", dpi=300)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_td.R
\name{create_td}
\alias{create_td}
\title{Create Topics Docs JSON}
\usage{
create_td(x, count = 15)
}
\arguments{
\item{x}{A doc topic matrix.}
\item{count}{**optional** Number of documents per document to store, default=15)}
}
\value{
A string containing the JSON.
}
\description{
A function to create a string containing one array which has
has the top docs per topic.
If doc topics is incredibly large, then just pass a sample.
Output string has this format:
var td_inds = [11,23...];
}
\examples{
\dontrun{
td_small = create_td(dt)
td_small = create_td(dt, 20)
}
}
| /man/create_td.Rd | no_license | ucdavisdatalab/ldaviewer | R | false | true | 664 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_td.R
\name{create_td}
\alias{create_td}
\title{Create Topics Docs JSON}
\usage{
create_td(x, count = 15)
}
\arguments{
\item{x}{A doc topic matrix.}
\item{count}{**optional** Number of documents per document to store, default=15)}
}
\value{
A string containing the JSON.
}
\description{
A function to create a string containing one array which has
has the top docs per topic.
If doc topics is incredibly large, then just pass a sample.
Output string has this format:
var td_inds = [11,23...];
}
\examples{
\dontrun{
td_small = create_td(dt)
td_small = create_td(dt, 20)
}
}
|
# simulate 100 intervals and plot them.
results <- CIsim(n=20, samples=100, estimand=500,
rdist=rnorm, args=list(mean=500,sd=100),
method=ci, method.args=list(sd=100))
coverPlot <- xYplot(Cbind(estimate,lower,upper) ~ sample, results,
groups=cover, col=c('black','gray40'),cap=0,lwd=2,pch=16)
| /inst/snippet/CI-vis.R | no_license | cran/fastR | R | false | false | 297 | r | # simulate 100 intervals and plot them.
results <- CIsim(n=20, samples=100, estimand=500,
rdist=rnorm, args=list(mean=500,sd=100),
method=ci, method.args=list(sd=100))
coverPlot <- xYplot(Cbind(estimate,lower,upper) ~ sample, results,
groups=cover, col=c('black','gray40'),cap=0,lwd=2,pch=16)
|
template_file = 'run_compute_exceedance_rates_at_epistemic_uncertainty_RUNDIR_PERCENTILE_LOWER_UPPER.sh'
template_script = readLines(template_file)
rundir = 'ptha18-BunburyBusseltonRevised-sealevel60cm'
# Determine the number of domains in the multidomain by counting depth raster files in one case.
testfile = Sys.glob(paste0('../../swals/OUTPUTS/', rundir, '/random_outerrisesunda/*/raster_output_files.tar'))[1]
max_domains = as.numeric(system(paste0('tar --list -f ', testfile, ' | grep "depth_" | wc -w'), intern=TRUE))
# Define sets of domains by upper/lower indices. Each set will be run on a separate job.
# Notice we skip domain 1 -- it is too large, we get memory failures. Solution
# would be to not merge subdomains during raster creation.
NJOBS = 12 # Number of jobs (for each percentile)
dbounds = round(seq(1, max_domains, length=NJOBS))
n = length(dbounds)
uppers = dbounds[-1]
lowers = dbounds[-n] + 1 # Skip domain 1
percentiles = c("0.16", "0.84")
for(pp in percentiles){
for(i in 1:length(lowers)){
script = template_script
script = gsub('_PERCENTILE_', pp, script)
script = gsub('_LOWER_', lowers[i], script)
script = gsub('_UPPER_', uppers[i], script)
script = gsub('_RUNDIR_', rundir, script)
outfile = template_file
outfile = gsub('PERCENTILE', pp, outfile)
outfile = gsub('LOWER', lowers[i], outfile)
outfile = gsub('UPPER', uppers[i], outfile)
outfile = gsub('RUNDIR', rundir, outfile)
cat(script, file=outfile, sep="\n")
}
}
| /misc/SW_WA_2021_2024/bunbury_busselton/analysis/probabilistic_inundation/make_exceedance_rate_jobs.R | permissive | GeoscienceAustralia/ptha | R | false | false | 1,567 | r | template_file = 'run_compute_exceedance_rates_at_epistemic_uncertainty_RUNDIR_PERCENTILE_LOWER_UPPER.sh'
template_script = readLines(template_file)
rundir = 'ptha18-BunburyBusseltonRevised-sealevel60cm'
# Determine the number of domains in the multidomain by counting depth raster files in one case.
testfile = Sys.glob(paste0('../../swals/OUTPUTS/', rundir, '/random_outerrisesunda/*/raster_output_files.tar'))[1]
max_domains = as.numeric(system(paste0('tar --list -f ', testfile, ' | grep "depth_" | wc -w'), intern=TRUE))
# Define sets of domains by upper/lower indices. Each set will be run on a separate job.
# Notice we skip domain 1 -- it is too large, we get memory failures. Solution
# would be to not merge subdomains during raster creation.
NJOBS = 12 # Number of jobs (for each percentile)
dbounds = round(seq(1, max_domains, length=NJOBS))
n = length(dbounds)
uppers = dbounds[-1]
lowers = dbounds[-n] + 1 # Skip domain 1
percentiles = c("0.16", "0.84")
for(pp in percentiles){
for(i in 1:length(lowers)){
script = template_script
script = gsub('_PERCENTILE_', pp, script)
script = gsub('_LOWER_', lowers[i], script)
script = gsub('_UPPER_', uppers[i], script)
script = gsub('_RUNDIR_', rundir, script)
outfile = template_file
outfile = gsub('PERCENTILE', pp, outfile)
outfile = gsub('LOWER', lowers[i], outfile)
outfile = gsub('UPPER', uppers[i], outfile)
outfile = gsub('RUNDIR', rundir, outfile)
cat(script, file=outfile, sep="\n")
}
}
|
# Is normality testing 'essentially useless'?
#https://stats.stackexchange.com/questions/2492/is-normality-testing-essentially-useless
#The last line checks which fraction of the simulations for every
#sample size deviate significantly from normality.
#So in 83% of the cases, a sample of 5000 observations deviates
#significantly from normality according to Shapiro-Wilks.
#Yet, if you see the qq plots, you would never ever decide on a deviation from normality.
#Below you see as an example the qq-plots for one set of random samples
set.seed(981677672)
x <- replicate(100, { # generates 100 different tests on each distribution
c(shapiro.test(rnorm(10)+c(1,0,2,0,1))$p.value, #$
shapiro.test(rnorm(100)+c(1,0,2,0,1))$p.value, #$
shapiro.test(rnorm(1000)+c(1,0,2,0,1))$p.value, #$
shapiro.test(rnorm(5000)+c(1,0,2,0,1))$p.value) #$
} # rnorm gives a random draw from the normal distribution
)
rownames(x) <- c("n10","n100","n1000","n5000")
rowMeans(x<0.05) # the proportion of significant deviations
#--------------------------------------------------------------------------------------------
qqnorm(x[1,]); qqline(y, col = 2)
qqnorm(x[2,]); qqline(y, col = 2)
qqnorm(x[3,]); qqline(y, col = 2)
qqnorm(x[4,]); qqline(y, col = 2)
#--------------------------------------------------------------------------------------------
library(nortest)
y <- replicate(100, { # generates 100 different tests on each distribution
c(ad.test(rnorm(10)+c(1,0,2,0,1))$p.value, #$
ad.test(rnorm(100)+c(1,0,2,0,1))$p.value, #$
ad.test(rnorm(1000)+c(1,0,2,0,1))$p.value, #$
ad.test(rnorm(5000)+c(1,0,2,0,1))$p.value) #$
} # rnorm gives a random draw from the normal distribution
)
rownames(y) <- c("n10","n100","n1000","n5000")
rowMeans(y<0.05) # the proportion of significant deviations
#--------------------------------------------------------------------------------------------
qqnorm(y[1,]); qqline(y, col = 2)
qqnorm(y[2,]); qqline(y, col = 2)
qqnorm(y[3,]); qqline(y, col = 2)
qqnorm(y[4,]); qqline(y, col = 2)
#--------------------------------------------------------------------------------------------
# Use the Shapiro Wilk because it's often powerful, widely available and many people are familiar with it (removing the need to explain in detail what it is if you use it in a paper) -- just don't use it under the illusion that it's "the best normality test". There isn't one best normality test.
| /Livro de Estatistica Basica/Teste de normalidade.R | no_license | DATAUNIRIO/Modelos_Basicos_e_Testes | R | false | false | 2,440 | r |
# Is normality testing 'essentially useless'?
#https://stats.stackexchange.com/questions/2492/is-normality-testing-essentially-useless
#The last line checks which fraction of the simulations for every
#sample size deviate significantly from normality.
#So in 83% of the cases, a sample of 5000 observations deviates
#significantly from normality according to Shapiro-Wilks.
#Yet, if you see the qq plots, you would never ever decide on a deviation from normality.
#Below you see as an example the qq-plots for one set of random samples
set.seed(981677672)
x <- replicate(100, { # generates 100 different tests on each distribution
c(shapiro.test(rnorm(10)+c(1,0,2,0,1))$p.value, #$
shapiro.test(rnorm(100)+c(1,0,2,0,1))$p.value, #$
shapiro.test(rnorm(1000)+c(1,0,2,0,1))$p.value, #$
shapiro.test(rnorm(5000)+c(1,0,2,0,1))$p.value) #$
} # rnorm gives a random draw from the normal distribution
)
rownames(x) <- c("n10","n100","n1000","n5000")
rowMeans(x<0.05) # the proportion of significant deviations
#--------------------------------------------------------------------------------------------
qqnorm(x[1,]); qqline(y, col = 2)
qqnorm(x[2,]); qqline(y, col = 2)
qqnorm(x[3,]); qqline(y, col = 2)
qqnorm(x[4,]); qqline(y, col = 2)
#--------------------------------------------------------------------------------------------
library(nortest)
y <- replicate(100, { # generates 100 different tests on each distribution
c(ad.test(rnorm(10)+c(1,0,2,0,1))$p.value, #$
ad.test(rnorm(100)+c(1,0,2,0,1))$p.value, #$
ad.test(rnorm(1000)+c(1,0,2,0,1))$p.value, #$
ad.test(rnorm(5000)+c(1,0,2,0,1))$p.value) #$
} # rnorm gives a random draw from the normal distribution
)
rownames(y) <- c("n10","n100","n1000","n5000")
rowMeans(y<0.05) # the proportion of significant deviations
#--------------------------------------------------------------------------------------------
qqnorm(y[1,]); qqline(y, col = 2)
qqnorm(y[2,]); qqline(y, col = 2)
qqnorm(y[3,]); qqline(y, col = 2)
qqnorm(y[4,]); qqline(y, col = 2)
#--------------------------------------------------------------------------------------------
# Use the Shapiro Wilk because it's often powerful, widely available and many people are familiar with it (removing the need to explain in detail what it is if you use it in a paper) -- just don't use it under the illusion that it's "the best normality test". There isn't one best normality test.
|
Restore <-
function (break_point, gold_std)
{
if (break_point > 0) {
if (break_point == 1) {
alpha = read.table("alpha.txt")
count = length(alpha[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(alpha[k, ])
k = k - 1
}
}
else {
if (break_point == 2) {
theta = read.table("theta.txt")
count = length(theta[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(theta[k, ])
k = k - 1
}
}
else {
if (break_point == 3) {
s1 = read.table("Sens1.txt")
count = length(s1[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(s1[k, ])
k = k - 1
}
}
else {
if (break_point == 4) {
c1 = read.table("Spec1.txt")
count = length(c1[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(c1[k, ])
k = k - 1
}
}
else {
if (break_point == 5) {
pi = read.table("PI.txt")
count = length(pi[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(pi[k, ])
k = k - 1
}
}
else {
if (break_point == 6) {
lambda = read.table("LAMBDA.txt")
count = length(lambda[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(lambda[k, ])
k = k - 1
}
}
else {
if (break_point == 7) {
sig.alph = read.table("sigma.alpha.txt")
count = length(sig.alph[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(sig.alph[k, ])
k = k - 1
}
}
else {
if (break_point == 8) {
ctheta = read.table("capital_THETA.txt")
count = length(ctheta[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(ctheta[k, ])
k = k - 1
}
}
else {
if (break_point == 9) {
sig.thet = read.table("sigma.theta.txt")
count = length(sig.thet[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(sig.thet[k, ])
k = k - 1
}
}
else {
if (break_point == 10) {
beta = read.table("beta.txt")
count = length(beta[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = beta[k, 1]
k = k - 1
}
}
else {
if (break_point == 11) {
s2 = read.table("Sens2.txt")
count = length(s2[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(s2[k, ])
k = k - 1
}
}
else {
if (break_point == 11) {
c2 = read.table("Spec2.txt")
count = length(c2[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(c2[k, ])
k = k - 1
}
}
}
}
}
}
}
}
}
}
}
}
}
vec.alpha = read.table("alpha.txt")[k, ]
vec.theta = as.vector(read.table("theta.txt")[k, ])
vec.S1 = read.table("Sens1.txt")[k, ]
vec.C1 = read.table("Spec1.txt")[k, ]
vec.PI = read.table("PI.txt")[k, ]
vec.LAMBDA = read.table("LAMBDA.txt")[k, 1]
vec.sigma.alpha = read.table("sigma.alpha.txt")[k, 1]
vec.THETA = read.table("capital_THETA.txt")[k, 1]
vec.sigma.theta = read.table("sigma.theta.txt")[k, 1]
vec.beta = read.table("beta.txt")[k, 1]
columns = length(vec.alpha[k, ])
write.table(rbind(vec.alpha, vec.theta, vec.S1, vec.C1,
vec.PI), file = "Restore.txt", append = TRUE, row.names = FALSE,
col.names = FALSE)
write(c(vec.LAMBDA, vec.sigma.alpha, vec.THETA, vec.sigma.theta,
vec.beta), file = "Restore2.txt", append = TRUE)
write(paste("______________________________________________________"),
file = "Restore_index.txt", append = TRUE)
write(paste("\t Restore.txt "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 1 : alpha parameters for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 2 : theta parameters for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 3 : sensitivity of test under evaluation (S1) for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 4 : specificity of test under evaluation (C1) for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 5 : prevalence for all M = ", columns,
" study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("______________________________________________________"),
file = "Restore_index.txt", append = TRUE)
write(paste("\t Restore2.txt "), file = "Restore_index.txt",
append = TRUE)
write(paste("Column 1 : LAMBDA parameter\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Column 2 : sigma alpha parameter\t "),
file = "Restore_index.txt", append = TRUE)
write(paste("Column 3 : THETA parameter\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Column 4 : sigma theta parameter\t "),
file = "Restore_index.txt", append = TRUE)
write(paste("Column 5 : beta parameter\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("______________________________________________________"),
file = "Restore_index.txt", append = TRUE)
if (gold_std == FALSE) {
vec.S2 = read.table("Sens2.txt")[k, ]
vec.C2 = read.table("Spec2.txt")[k, ]
refstd = length(read.table("Sens2.txt")[k, ])
write(t(cbind(vec.S2, vec.C2)), file = "Restore3.txt",
append = TRUE, ncolumns = refstd)
write(paste("\t Restore3.txt "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 1 : sensitivity of reference test (S2) \t "),
file = "Restore_index.txt", append = TRUE)
write(paste("Row 2 : specificity of reference test (C2) \t "),
file = "Restore_index.txt", append = TRUE)
}
}
else {
if (break_point == 0) {
columns = NULL
}
}
}
| /src/R/HSROC/R/Restore.R | no_license | bwallace/OpenMeta-analyst- | R | false | false | 9,462 | r | Restore <-
function (break_point, gold_std)
{
if (break_point > 0) {
if (break_point == 1) {
alpha = read.table("alpha.txt")
count = length(alpha[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(alpha[k, ])
k = k - 1
}
}
else {
if (break_point == 2) {
theta = read.table("theta.txt")
count = length(theta[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(theta[k, ])
k = k - 1
}
}
else {
if (break_point == 3) {
s1 = read.table("Sens1.txt")
count = length(s1[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(s1[k, ])
k = k - 1
}
}
else {
if (break_point == 4) {
c1 = read.table("Spec1.txt")
count = length(c1[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(c1[k, ])
k = k - 1
}
}
else {
if (break_point == 5) {
pi = read.table("PI.txt")
count = length(pi[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(pi[k, ])
k = k - 1
}
}
else {
if (break_point == 6) {
lambda = read.table("LAMBDA.txt")
count = length(lambda[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(lambda[k, ])
k = k - 1
}
}
else {
if (break_point == 7) {
sig.alph = read.table("sigma.alpha.txt")
count = length(sig.alph[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(sig.alph[k, ])
k = k - 1
}
}
else {
if (break_point == 8) {
ctheta = read.table("capital_THETA.txt")
count = length(ctheta[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(ctheta[k, ])
k = k - 1
}
}
else {
if (break_point == 9) {
sig.thet = read.table("sigma.theta.txt")
count = length(sig.thet[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(sig.thet[k, ])
k = k - 1
}
}
else {
if (break_point == 10) {
beta = read.table("beta.txt")
count = length(beta[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = beta[k, 1]
k = k - 1
}
}
else {
if (break_point == 11) {
s2 = read.table("Sens2.txt")
count = length(s2[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(s2[k, ])
k = k - 1
}
}
else {
if (break_point == 11) {
c2 = read.table("Spec2.txt")
count = length(c2[, 1])
k = count
x = 0
while (is.na(x) == FALSE) {
x = sum(c2[k, ])
k = k - 1
}
}
}
}
}
}
}
}
}
}
}
}
}
vec.alpha = read.table("alpha.txt")[k, ]
vec.theta = as.vector(read.table("theta.txt")[k, ])
vec.S1 = read.table("Sens1.txt")[k, ]
vec.C1 = read.table("Spec1.txt")[k, ]
vec.PI = read.table("PI.txt")[k, ]
vec.LAMBDA = read.table("LAMBDA.txt")[k, 1]
vec.sigma.alpha = read.table("sigma.alpha.txt")[k, 1]
vec.THETA = read.table("capital_THETA.txt")[k, 1]
vec.sigma.theta = read.table("sigma.theta.txt")[k, 1]
vec.beta = read.table("beta.txt")[k, 1]
columns = length(vec.alpha[k, ])
write.table(rbind(vec.alpha, vec.theta, vec.S1, vec.C1,
vec.PI), file = "Restore.txt", append = TRUE, row.names = FALSE,
col.names = FALSE)
write(c(vec.LAMBDA, vec.sigma.alpha, vec.THETA, vec.sigma.theta,
vec.beta), file = "Restore2.txt", append = TRUE)
write(paste("______________________________________________________"),
file = "Restore_index.txt", append = TRUE)
write(paste("\t Restore.txt "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 1 : alpha parameters for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 2 : theta parameters for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 3 : sensitivity of test under evaluation (S1) for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 4 : specificity of test under evaluation (C1) for all M = ",
columns, " study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 5 : prevalence for all M = ", columns,
" study(ies)\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("______________________________________________________"),
file = "Restore_index.txt", append = TRUE)
write(paste("\t Restore2.txt "), file = "Restore_index.txt",
append = TRUE)
write(paste("Column 1 : LAMBDA parameter\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Column 2 : sigma alpha parameter\t "),
file = "Restore_index.txt", append = TRUE)
write(paste("Column 3 : THETA parameter\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("Column 4 : sigma theta parameter\t "),
file = "Restore_index.txt", append = TRUE)
write(paste("Column 5 : beta parameter\t "), file = "Restore_index.txt",
append = TRUE)
write(paste("______________________________________________________"),
file = "Restore_index.txt", append = TRUE)
if (gold_std == FALSE) {
vec.S2 = read.table("Sens2.txt")[k, ]
vec.C2 = read.table("Spec2.txt")[k, ]
refstd = length(read.table("Sens2.txt")[k, ])
write(t(cbind(vec.S2, vec.C2)), file = "Restore3.txt",
append = TRUE, ncolumns = refstd)
write(paste("\t Restore3.txt "), file = "Restore_index.txt",
append = TRUE)
write(paste("Row 1 : sensitivity of reference test (S2) \t "),
file = "Restore_index.txt", append = TRUE)
write(paste("Row 2 : specificity of reference test (C2) \t "),
file = "Restore_index.txt", append = TRUE)
}
}
else {
if (break_point == 0) {
columns = NULL
}
}
}
|
#' Create favicons from package logo
#'
#' This function auto-detects the location of your package logo (with the name
#' `logo.svg` (recommended format) or `logo.png`) and runs it through the
#' <https://realfavicongenerator.net> API to build a complete set of favicons
#' with different sizes, as needed for modern web usage.
#'
#' You only need to run the function once. The favicon set will be stored in
#' `pkgdown/favicon` and copied by [init_site()] to the relevant location when
#' the website is rebuilt.
#'
#' Once complete, you should add `pkgdown/` to `.Rbuildignore ` to avoid a NOTE
#' during package checking.
#'
#' @inheritParams as_pkgdown
#' @param overwrite If `TRUE`, re-create favicons from package logo.
#' @export
build_favicons <- function(pkg = ".", overwrite = FALSE) {
rlang::check_installed("openssl")
pkg <- as_pkgdown(pkg)
rule("Building favicons")
logo_path <- find_logo(pkg$src_path)
if (is.null(logo_path)) {
stop("Can't find package logo PNG or SVG to build favicons.", call. = FALSE)
}
if (has_favicons(pkg) && !overwrite) {
message("Favicons already exist in `pkgdown/`. Set `overwrite = TRUE` to re-create.")
return(invisible())
}
message("Building favicons with realfavicongenerator.net...")
logo <- readBin(logo_path, what = "raw", n = fs::file_info(logo_path)$size)
json_request <- list(
"favicon_generation" = list(
"api_key" = "87d5cd739b05c00416c4a19cd14a8bb5632ea563",
"master_picture" = list(
"type" = "inline",
"content" = openssl::base64_encode(logo)
),
"favicon_design" = list(
"desktop_browser" = list(),
"ios" = list(
"picture_aspect" = "no_change",
"assets" = list(
"ios6_and_prior_icons" = FALSE,
"ios7_and_later_icons" = TRUE,
"precomposed_icons" = FALSE,
"declare_only_default_icon" = TRUE
)
)
)
)
)
resp <- httr::RETRY(
"POST",
"https://realfavicongenerator.net/api/favicon",
body = json_request,
encode = "json",
quiet = TRUE
)
if (httr::http_error(resp)) {
stop("API request failed.", call. = FALSE)
}
content <- httr::content(resp)
result <- content$favicon_generation_result
if (!identical(result$result$status, "success")) {
stop(
"API request failed. ", "
Please submit bug report to <https://github.com/r-lib/pkgdown/issues>",
call. = FALSE
)
}
tmp <- tempfile()
on.exit(unlink(tmp))
result <- httr::RETRY(
"GET",
result$favicon$package_url,
httr::write_disk(tmp),
quiet = TRUE
)
tryCatch({
utils::unzip(tmp, exdir = path(pkg$src_path, "pkgdown", "favicon"))
},
warning = function(e) {
stop("Your logo file couldn't be processed and may be corrupt.", call. = FALSE)
},
error = function(e) {
stop("Your logo file couldn't be processed and may be corrupt.", call. = FALSE)
})
invisible()
}
#' Deprecated as of pkgdown 1.4.0
#' @rdname build_favicons
#' @inheritParams build_favicons
#' @export
build_favicon <- function(pkg, overwrite) {
message(
"`build_favicon()` is deprecated as of pkgdown 1.4.0. ",
"Please use `build_favicons()` instead."
)
build_favicons(pkg, overwrite)
}
copy_favicons <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
favicons <- path(pkg$src_path, "pkgdown", "favicon")
if (!dir_exists(favicons))
return()
dir_copy_to(pkg, favicons, pkg$dst_path)
}
has_favicons <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
file.exists(path(pkg$src_path, "pkgdown", "favicon"))
}
find_logo <- function(path) {
path_first_existing(
c(
path(path, "logo.svg"),
path(path, "man", "figures", "logo.svg"),
path(path, "logo.png"),
path(path, "man", "figures", "logo.png")
)
)
}
has_logo <- function(pkg) {
logo_path <- find_logo(pkg$src_path)
!is.null(logo_path)
}
| /R/build-favicons.R | permissive | isabella232/pkgdown | R | false | false | 3,928 | r | #' Create favicons from package logo
#'
#' This function auto-detects the location of your package logo (with the name
#' `logo.svg` (recommended format) or `logo.png`) and runs it through the
#' <https://realfavicongenerator.net> API to build a complete set of favicons
#' with different sizes, as needed for modern web usage.
#'
#' You only need to run the function once. The favicon set will be stored in
#' `pkgdown/favicon` and copied by [init_site()] to the relevant location when
#' the website is rebuilt.
#'
#' Once complete, you should add `pkgdown/` to `.Rbuildignore ` to avoid a NOTE
#' during package checking.
#'
#' @inheritParams as_pkgdown
#' @param overwrite If `TRUE`, re-create favicons from package logo.
#' @export
build_favicons <- function(pkg = ".", overwrite = FALSE) {
rlang::check_installed("openssl")
pkg <- as_pkgdown(pkg)
rule("Building favicons")
logo_path <- find_logo(pkg$src_path)
if (is.null(logo_path)) {
stop("Can't find package logo PNG or SVG to build favicons.", call. = FALSE)
}
if (has_favicons(pkg) && !overwrite) {
message("Favicons already exist in `pkgdown/`. Set `overwrite = TRUE` to re-create.")
return(invisible())
}
message("Building favicons with realfavicongenerator.net...")
logo <- readBin(logo_path, what = "raw", n = fs::file_info(logo_path)$size)
json_request <- list(
"favicon_generation" = list(
"api_key" = "87d5cd739b05c00416c4a19cd14a8bb5632ea563",
"master_picture" = list(
"type" = "inline",
"content" = openssl::base64_encode(logo)
),
"favicon_design" = list(
"desktop_browser" = list(),
"ios" = list(
"picture_aspect" = "no_change",
"assets" = list(
"ios6_and_prior_icons" = FALSE,
"ios7_and_later_icons" = TRUE,
"precomposed_icons" = FALSE,
"declare_only_default_icon" = TRUE
)
)
)
)
)
resp <- httr::RETRY(
"POST",
"https://realfavicongenerator.net/api/favicon",
body = json_request,
encode = "json",
quiet = TRUE
)
if (httr::http_error(resp)) {
stop("API request failed.", call. = FALSE)
}
content <- httr::content(resp)
result <- content$favicon_generation_result
if (!identical(result$result$status, "success")) {
stop(
"API request failed. ", "
Please submit bug report to <https://github.com/r-lib/pkgdown/issues>",
call. = FALSE
)
}
tmp <- tempfile()
on.exit(unlink(tmp))
result <- httr::RETRY(
"GET",
result$favicon$package_url,
httr::write_disk(tmp),
quiet = TRUE
)
tryCatch({
utils::unzip(tmp, exdir = path(pkg$src_path, "pkgdown", "favicon"))
},
warning = function(e) {
stop("Your logo file couldn't be processed and may be corrupt.", call. = FALSE)
},
error = function(e) {
stop("Your logo file couldn't be processed and may be corrupt.", call. = FALSE)
})
invisible()
}
#' Deprecated as of pkgdown 1.4.0
#' @rdname build_favicons
#' @inheritParams build_favicons
#' @export
build_favicon <- function(pkg, overwrite) {
message(
"`build_favicon()` is deprecated as of pkgdown 1.4.0. ",
"Please use `build_favicons()` instead."
)
build_favicons(pkg, overwrite)
}
copy_favicons <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
favicons <- path(pkg$src_path, "pkgdown", "favicon")
if (!dir_exists(favicons))
return()
dir_copy_to(pkg, favicons, pkg$dst_path)
}
has_favicons <- function(pkg = ".") {
pkg <- as_pkgdown(pkg)
file.exists(path(pkg$src_path, "pkgdown", "favicon"))
}
find_logo <- function(path) {
path_first_existing(
c(
path(path, "logo.svg"),
path(path, "man", "figures", "logo.svg"),
path(path, "logo.png"),
path(path, "man", "figures", "logo.png")
)
)
}
has_logo <- function(pkg) {
logo_path <- find_logo(pkg$src_path)
!is.null(logo_path)
}
|
# functions to manipulate the estimation model.
#' Change dataset from OM into format for EM
#' @param OM_datfile Filename of the datfile produced by the OM within the
#' EM_dir.
#' @param EM_datfile Filename of the datfile from the original EM within the
#' EM_dir.
#' @param EM_dir Absolute or relative path to the Estimation model directory.
#' @param do_checks Should checks on the data be performed? Defaults to TRUE.
#' @template verbose
#' @author Kathryn Doering
#' @importFrom r4ss SS_readstarter SS_readdat SS_writedat SS_writestarter
#' @return the new EM data file. Side effect is saving over the OM_dat file in
#' EM_dir.
#' @examples
#' \dontrun{
#' # TODO: Add example
#' }
change_dat <- function(OM_datfile, EM_datfile, EM_dir, do_checks = TRUE,
verbose = FALSE) {
EM_dir <- normalizePath(EM_dir)
# checks
assertive.types::assert_is_a_string(OM_datfile)
assertive.types::assert_is_a_string(EM_dir)
check_dir(EM_dir)
assertive.types::assert_is_a_bool(do_checks)
assertive.types::assert_is_a_bool(verbose)
# read in the dat files
EM_dat <- SS_readdat(file.path(EM_dir, EM_datfile), verbose = FALSE)
OM_dat <- SS_readdat(file.path(EM_dir, OM_datfile), verbose = FALSE)
# remove extra years of data in the OM data file.
new_EM_dat <- get_EM_dat(OM_dat = OM_dat, EM_dat = EM_dat,
do_checks = do_checks)
# write out the modified files that can be used in future EM run
SS_writedat(new_EM_dat, file.path(EM_dir, OM_datfile), verbose = FALSE,
overwrite = TRUE)
new_EM_dat
}
#' Change the OM data to match the format of the EM data
#'
#' This does the technical part of changing the EM data
#' @param OM_dat An SS data file read in by as a list read in using r4ss from
#' the operating model
#' @param EM_dat An SS data file read in by as a list read in using r4ss from
#' the estimation model
#' @param do_checks Should checks on the data be performed? Defaults to TRUE.
#' @author Kathryn Doering
#' @return A data list in the same format that can be read/written by r4ss that
#' has index. lcomps, and age comps from OM_dat, but with the same structure as
#' EM_dat.
get_EM_dat <- function(OM_dat, EM_dat, do_checks = TRUE) {
new_dat <- EM_dat # start by copying over to get the correct formatting.
# TODO: add in code to copy over mean size and mean size at age obs.
# add in index
if (do_checks) {
check_OM_dat(OM_dat, EM_dat)
}
dat <- list(OM_dat = OM_dat, EM_dat = EM_dat)
CPUEs <- lapply(dat, function(x) {
tmp <- combine_cols(x, "CPUE", c("year", "seas", "index"))
})
# match 1 way: match each EM obs with an OM obs. extract only these OM obs.
matches <- which(CPUEs[[1]][, "combo"] %in% CPUEs[[2]][, "combo"])
# extract only the rows of interest and get rid of the "combo" column
new_dat$CPUE <- CPUEs[[1]][matches, -ncol(CPUEs[[1]])]
# add in lcomps
if (OM_dat$use_lencomp == 1) {
lcomps <- lapply(dat, function(x) {
tmp <- combine_cols(x, "lencomp",
c("Yr", "Seas", "FltSvy", "Gender", "Part"))
})
matches_l <- which(lcomps[[1]][, "combo"] %in% lcomps[[2]][, "combo"])
new_dat$lencomp <- lcomps[[1]][matches_l, -ncol(lcomps[[1]])]
}
# add in age comps
acomps <- lapply(dat, function(x) {
tmp <- combine_cols(x, "agecomp",
c("Yr", "Seas", "FltSvy", "Gender", "Part", "Lbin_lo", "Lbin_hi"))
})
matches_a <- which(acomps[[1]][, "combo"] %in% acomps[[2]][, "combo"])
new_dat$agecomp <- acomps[[1]][matches_a, -ncol(acomps[[1]])]
# TODO: check this for other types of data, esp. mean size at age, k
# and mean size.
# return
new_dat
}
#' Run the estimation model
#'
#' Runs the estimation model and performs checks if desired.
#'
#' @param EM_dir Absolute or relative path to the estimation model directory
#' @param hess Get the hessian during model run? Defaults to FALSE. Not
#' estimating the hessian will speed up the run, but no estimates of error will
#' be generated.
#' @param check_converged Perform checks to see if the model converged? Defaults
#' to TRUE.
#' @param set_use_par Should input values be read from the .par file? If TRUE,
#' will change setting in the starter file; otherwise, will use the setting
#' already in the starter file, which may or may not read from the .par file.
#' @template verbose
#' @export
#' @author Kathryn Doering
#' @importFrom r4ss SS_readforecast SS_writeforecast SS_readstarter SS_writestarter SS_read_summary
run_EM <- function(EM_dir,
hess = FALSE,
check_converged = TRUE,
set_use_par = FALSE,
verbose = FALSE) {
EM_dir <- normalizePath(EM_dir)
# checks
check_dir(EM_dir)
# set up to run the EM
if (set_use_par == TRUE) {
start <- SS_readstarter(file.path(EM_dir, "starter.ss"), verbose = FALSE)
start$init_values_src <- 1
SS_writestarter(start, dir = EM_dir, overwrite = TRUE, verbose = FALSE,
warn = FALSE)
}
if (hess == TRUE) {
options <- ""
} else {
options <- "-nohess"
}
run_ss_model(EM_dir, options, verbose = verbose)
if (check_converged == TRUE) {
# TODO: add additional checks for convergence, and if additional model runs
# should be done. perhaps user defined?
warn <- readLines(file.path(EM_dir, "warning.sso"))
grad_warn <- grep("^Final gradient\\:\\s+\\d*\\.\\d*\\sis larger than final_conv\\:", warn)
if (length(grad_warn) > 0) {
warning("Estimation model did not converge this iteration based on the",
" convergence criterion set in the starter.ss file.")
}
}
}
#' Add new data to an existing EM dataset
#'
#' This should be used for the feedback loops when an EM is used.
#' @param OM_dat An valid SS data file read in using r4ss. In particular,
#' this should be sampled data.
#' @param EM_datfile Datafile name run in previous iterations with the EM.
#' Assumed to exist in EM_dir.
#' @param sample_struct Includes which years and fleets should be
#' added from the OM into the EM for different types of data. If NULL, the data
#' structure will try to be infered from the pattern found for each of the
#' datatypes within EM_datfile.
#' @param EM_dir Absolute or relative path to the Estimation model directory.
#' @param do_checks Should checks on the data be performed? Defaults to TRUE.
#' @param new_datfile_name An optional name of a file to write the new datafile
#' to. If NULL, a new datafile will not be written.
#' @template verbose
#' @return A new SS datafile containing the data in EM_datfile with new data
#' from OM_dat appended
#' @importFrom r4ss SS_readdat SS_writedat
#' @importFrom stats na.omit
#' @author Kathryn Doering
add_new_dat <- function(OM_dat,
EM_datfile,
sample_struct,
EM_dir,
do_checks = TRUE,
new_datfile_name = NULL,
verbose = FALSE) {
if (do_checks) {
# TODO: do input checks: check OM_dat is valid r4ss list, check data. only do if
# do_checks = TRUE?
if (OM_dat$type != "Stock_Synthesis_data_file") {
r4ss_obj_err("OM_dat", "data list")
}
}
# Read in EM_datfile
EM_dat <- SS_readdat(file.path(EM_dir, EM_datfile), verbose = FALSE)
new_EM_dat <- EM_dat
new_EM_dat$endyr <- OM_dat$endyr # want to be the same as the OM
# add the data from OM_dat into EM_dat
# checks in relation to OM_dat: check that years, fleets, etc. ar valid
# extract data from OM_dat based on valid data structure
extracted_dat <-
mapply(
function(df, df_name, OM_dat) {
OM_df <- OM_dat[[df_name]]
OM_df[, 3] <- abs(OM_df[, 3]) # get rid of negative fleet values from OM
new_dat <- merge(df, OM_df, all.x = TRUE, all.y = FALSE)
# warn if there were matches not found for OM_df, but remove to continue
if (any(is.na(new_dat))) {
warning("Some values specified in sample_struct (list component ", df_name,
") were not found in OM_dat, so they will not be added to ",
"the EM_dat.")
new_dat <- na.omit(new_dat)
}
new_dat
},
df = sample_struct, df_name = names(sample_struct),
MoreArgs = list(OM_dat = OM_dat),
SIMPLIFY = FALSE, USE.NAMES = TRUE)
# insert this data into the EM_datfile
for (n in names(extracted_dat)) {
new_EM_dat[[n]] <- rbind(new_EM_dat[[n]], extracted_dat[[n]])
}
# write the new datafile if new_datfile_name isn't NULL
if (!is.null(new_datfile_name)) {
SS_writedat(new_EM_dat,
file.path(EM_dir, new_datfile_name),
overwrite = TRUE,
verbose = FALSE)
}
new_EM_dat
}
#' Change the years in the forecast file
#'
#' This is both to increment years forward and/or to change absolute years to
#' relative years.
#' @param fore A forecasting file read into R using r4ss::SS_readforecast()
#' @param make_yrs_rel Should the absolute years in the forecast file be changed
#' to relative years? Defaults to TRUE.
#' @param nyrs_increment The number of years to increment forecasting period years.
#' If NULL (the default value), will not be incremented.
#' @param nyrs_fore The number of years of forecasting to do. If NULL, do not
#' change the number of forecasting years already specified in \code{fore}
#' @param mod_styr The first year of the model
#' @param mod_endyr The last year of the model \code{fore} assumes when read in.
#' Note that the assumed model year will be different for the output if
#' nyrs_increment is not NULL.
#' @author Kathryn Doering
#' @importFrom assertive.base assert_is_identical_to_true
#' @return A forecasting file as an R list object
change_yrs_fcast <- function(fore,
make_yrs_rel = TRUE,
nyrs_increment = NULL,
nyrs_fore = NULL,
mod_styr,
mod_endyr) {
if (make_yrs_rel == TRUE) {
# x is the year
# styr is the model start year
# endyr is the model end year
make_yrs_rel <- function(x, styr, endyr) {
if (x > 0) { # means these are absolute years and not relative.
if (x == styr) {
x <- -999
} else if (x == endyr) {
x <- 0
} else if (x > styr & x < endyr) {
x <- x - endyr # make it relative to endyr
} else {
stop("Year in fcast file out of range. Please change to be within ",
"start and end yrs. Check Bmark_years, Fcast_years")
}
}
x
}
# change benchmark years
new_bmark_yrs <- lapply(fore[["Bmark_years"]],
make_yrs_rel,
styr = mod_styr,
endyr = mod_endyr)
new_bmark_yrs <- unlist(new_bmark_yrs)
names(new_bmark_yrs) <- names(fore[["Bmark_years"]])
fore[["Bmark_years"]] <- new_bmark_yrs
# change forecast years
new_fcast_yrs <- lapply(fore[["Fcast_years"]],
make_yrs_rel,
styr = mod_styr,
endyr = mod_endyr)
new_fcast_yrs <- unlist(new_fcast_yrs)
names(new_fcast_yrs) <- names(fore[["Fcast_years"]])
fore[["Fcast_years"]] <- new_fcast_yrs
}
if (!is.null(nyrs_increment)) {
# first year for caps and allocations
fore[["FirstYear_for_caps_and_allocations"]] <-
fore[["FirstYear_for_caps_and_allocations"]] + nyrs_increment
assert_is_identical_to_true(
fore[["FirstYear_for_caps_and_allocations"]] > mod_endyr)
# deal with allocation
if (fore[["N_allocation_groups"]] > 0) {
tmp_allocation <- fore[["allocation_among_groups"]]
if (any(tmp_allocation$Year < mod_endyr)) {
if (length(tmp_allocation$Year) == 1) { # increment forward if only one assignment
fore$allocation_among_groups$Year <-
fore$allocation_among_groups$Year + nyrs_increment
} else {
# TODO: develop smarter ways to deal with Time varying allocation
stop("Time-varying allocation in the forecasting file cannot yet be",
" used in SSMSE. Please request development of this feature.")
}
}
}
}
if (!is.null(nyrs_fore)) {
fore[["Nforecastyrs"]] <- nyrs_fore
}
# get rid of Forecatch, if any. Add a warning to the user about this.
# may beed to treat this differently in the futured
if (!is.null(fore[["ForeCatch"]])) {
warning("Removing ForeCatch from the EM forecasting file.")
fore[["ForeCatch"]] <- NULL
}
fore
}
| /R/manipulate_EM.R | no_license | doering-kat/SSMSE | R | false | false | 12,696 | r | # functions to manipulate the estimation model.
#' Change dataset from OM into format for EM
#' @param OM_datfile Filename of the datfile produced by the OM within the
#' EM_dir.
#' @param EM_datfile Filename of the datfile from the original EM within the
#' EM_dir.
#' @param EM_dir Absolute or relative path to the Estimation model directory.
#' @param do_checks Should checks on the data be performed? Defaults to TRUE.
#' @template verbose
#' @author Kathryn Doering
#' @importFrom r4ss SS_readstarter SS_readdat SS_writedat SS_writestarter
#' @return the new EM data file. Side effect is saving over the OM_dat file in
#' EM_dir.
#' @examples
#' \dontrun{
#' # TODO: Add example
#' }
change_dat <- function(OM_datfile, EM_datfile, EM_dir, do_checks = TRUE,
verbose = FALSE) {
EM_dir <- normalizePath(EM_dir)
# checks
assertive.types::assert_is_a_string(OM_datfile)
assertive.types::assert_is_a_string(EM_dir)
check_dir(EM_dir)
assertive.types::assert_is_a_bool(do_checks)
assertive.types::assert_is_a_bool(verbose)
# read in the dat files
EM_dat <- SS_readdat(file.path(EM_dir, EM_datfile), verbose = FALSE)
OM_dat <- SS_readdat(file.path(EM_dir, OM_datfile), verbose = FALSE)
# remove extra years of data in the OM data file.
new_EM_dat <- get_EM_dat(OM_dat = OM_dat, EM_dat = EM_dat,
do_checks = do_checks)
# write out the modified files that can be used in future EM run
SS_writedat(new_EM_dat, file.path(EM_dir, OM_datfile), verbose = FALSE,
overwrite = TRUE)
new_EM_dat
}
#' Change the OM data to match the format of the EM data
#'
#' This does the technical part of changing the EM data
#' @param OM_dat An SS data file read in by as a list read in using r4ss from
#' the operating model
#' @param EM_dat An SS data file read in by as a list read in using r4ss from
#' the estimation model
#' @param do_checks Should checks on the data be performed? Defaults to TRUE.
#' @author Kathryn Doering
#' @return A data list in the same format that can be read/written by r4ss that
#' has index. lcomps, and age comps from OM_dat, but with the same structure as
#' EM_dat.
get_EM_dat <- function(OM_dat, EM_dat, do_checks = TRUE) {
new_dat <- EM_dat # start by copying over to get the correct formatting.
# TODO: add in code to copy over mean size and mean size at age obs.
# add in index
if (do_checks) {
check_OM_dat(OM_dat, EM_dat)
}
dat <- list(OM_dat = OM_dat, EM_dat = EM_dat)
CPUEs <- lapply(dat, function(x) {
tmp <- combine_cols(x, "CPUE", c("year", "seas", "index"))
})
# match 1 way: match each EM obs with an OM obs. extract only these OM obs.
matches <- which(CPUEs[[1]][, "combo"] %in% CPUEs[[2]][, "combo"])
# extract only the rows of interest and get rid of the "combo" column
new_dat$CPUE <- CPUEs[[1]][matches, -ncol(CPUEs[[1]])]
# add in lcomps
if (OM_dat$use_lencomp == 1) {
lcomps <- lapply(dat, function(x) {
tmp <- combine_cols(x, "lencomp",
c("Yr", "Seas", "FltSvy", "Gender", "Part"))
})
matches_l <- which(lcomps[[1]][, "combo"] %in% lcomps[[2]][, "combo"])
new_dat$lencomp <- lcomps[[1]][matches_l, -ncol(lcomps[[1]])]
}
# add in age comps
acomps <- lapply(dat, function(x) {
tmp <- combine_cols(x, "agecomp",
c("Yr", "Seas", "FltSvy", "Gender", "Part", "Lbin_lo", "Lbin_hi"))
})
matches_a <- which(acomps[[1]][, "combo"] %in% acomps[[2]][, "combo"])
new_dat$agecomp <- acomps[[1]][matches_a, -ncol(acomps[[1]])]
# TODO: check this for other types of data, esp. mean size at age, k
# and mean size.
# return
new_dat
}
#' Run the estimation model
#'
#' Runs the estimation model and performs checks if desired.
#'
#' @param EM_dir Absolute or relative path to the estimation model directory
#' @param hess Get the hessian during model run? Defaults to FALSE. Not
#' estimating the hessian will speed up the run, but no estimates of error will
#' be generated.
#' @param check_converged Perform checks to see if the model converged? Defaults
#' to TRUE.
#' @param set_use_par Should input values be read from the .par file? If TRUE,
#' will change setting in the starter file; otherwise, will use the setting
#' already in the starter file, which may or may not read from the .par file.
#' @template verbose
#' @export
#' @author Kathryn Doering
#' @importFrom r4ss SS_readforecast SS_writeforecast SS_readstarter SS_writestarter SS_read_summary
run_EM <- function(EM_dir,
hess = FALSE,
check_converged = TRUE,
set_use_par = FALSE,
verbose = FALSE) {
EM_dir <- normalizePath(EM_dir)
# checks
check_dir(EM_dir)
# set up to run the EM
if (set_use_par == TRUE) {
start <- SS_readstarter(file.path(EM_dir, "starter.ss"), verbose = FALSE)
start$init_values_src <- 1
SS_writestarter(start, dir = EM_dir, overwrite = TRUE, verbose = FALSE,
warn = FALSE)
}
if (hess == TRUE) {
options <- ""
} else {
options <- "-nohess"
}
run_ss_model(EM_dir, options, verbose = verbose)
if (check_converged == TRUE) {
# TODO: add additional checks for convergence, and if additional model runs
# should be done. perhaps user defined?
warn <- readLines(file.path(EM_dir, "warning.sso"))
grad_warn <- grep("^Final gradient\\:\\s+\\d*\\.\\d*\\sis larger than final_conv\\:", warn)
if (length(grad_warn) > 0) {
warning("Estimation model did not converge this iteration based on the",
" convergence criterion set in the starter.ss file.")
}
}
}
#' Add new data to an existing EM dataset
#'
#' This should be used for the feedback loops when an EM is used.
#' @param OM_dat An valid SS data file read in using r4ss. In particular,
#' this should be sampled data.
#' @param EM_datfile Datafile name run in previous iterations with the EM.
#' Assumed to exist in EM_dir.
#' @param sample_struct Includes which years and fleets should be
#' added from the OM into the EM for different types of data. If NULL, the data
#' structure will try to be infered from the pattern found for each of the
#' datatypes within EM_datfile.
#' @param EM_dir Absolute or relative path to the Estimation model directory.
#' @param do_checks Should checks on the data be performed? Defaults to TRUE.
#' @param new_datfile_name An optional name of a file to write the new datafile
#' to. If NULL, a new datafile will not be written.
#' @template verbose
#' @return A new SS datafile containing the data in EM_datfile with new data
#' from OM_dat appended
#' @importFrom r4ss SS_readdat SS_writedat
#' @importFrom stats na.omit
#' @author Kathryn Doering
add_new_dat <- function(OM_dat,
EM_datfile,
sample_struct,
EM_dir,
do_checks = TRUE,
new_datfile_name = NULL,
verbose = FALSE) {
if (do_checks) {
# TODO: do input checks: check OM_dat is valid r4ss list, check data. only do if
# do_checks = TRUE?
if (OM_dat$type != "Stock_Synthesis_data_file") {
r4ss_obj_err("OM_dat", "data list")
}
}
# Read in EM_datfile
EM_dat <- SS_readdat(file.path(EM_dir, EM_datfile), verbose = FALSE)
new_EM_dat <- EM_dat
new_EM_dat$endyr <- OM_dat$endyr # want to be the same as the OM
# add the data from OM_dat into EM_dat
# checks in relation to OM_dat: check that years, fleets, etc. ar valid
# extract data from OM_dat based on valid data structure
extracted_dat <-
mapply(
function(df, df_name, OM_dat) {
OM_df <- OM_dat[[df_name]]
OM_df[, 3] <- abs(OM_df[, 3]) # get rid of negative fleet values from OM
new_dat <- merge(df, OM_df, all.x = TRUE, all.y = FALSE)
# warn if there were matches not found for OM_df, but remove to continue
if (any(is.na(new_dat))) {
warning("Some values specified in sample_struct (list component ", df_name,
") were not found in OM_dat, so they will not be added to ",
"the EM_dat.")
new_dat <- na.omit(new_dat)
}
new_dat
},
df = sample_struct, df_name = names(sample_struct),
MoreArgs = list(OM_dat = OM_dat),
SIMPLIFY = FALSE, USE.NAMES = TRUE)
# insert this data into the EM_datfile
for (n in names(extracted_dat)) {
new_EM_dat[[n]] <- rbind(new_EM_dat[[n]], extracted_dat[[n]])
}
# write the new datafile if new_datfile_name isn't NULL
if (!is.null(new_datfile_name)) {
SS_writedat(new_EM_dat,
file.path(EM_dir, new_datfile_name),
overwrite = TRUE,
verbose = FALSE)
}
new_EM_dat
}
#' Change the years in the forecast file
#'
#' This is both to increment years forward and/or to change absolute years to
#' relative years.
#' @param fore A forecasting file read into R using r4ss::SS_readforecast()
#' @param make_yrs_rel Should the absolute years in the forecast file be changed
#' to relative years? Defaults to TRUE.
#' @param nyrs_increment The number of years to increment forecasting period years.
#' If NULL (the default value), will not be incremented.
#' @param nyrs_fore The number of years of forecasting to do. If NULL, do not
#' change the number of forecasting years already specified in \code{fore}
#' @param mod_styr The first year of the model
#' @param mod_endyr The last year of the model \code{fore} assumes when read in.
#' Note that the assumed model year will be different for the output if
#' nyrs_increment is not NULL.
#' @author Kathryn Doering
#' @importFrom assertive.base assert_is_identical_to_true
#' @return A forecasting file as an R list object
change_yrs_fcast <- function(fore,
make_yrs_rel = TRUE,
nyrs_increment = NULL,
nyrs_fore = NULL,
mod_styr,
mod_endyr) {
if (make_yrs_rel == TRUE) {
# x is the year
# styr is the model start year
# endyr is the model end year
make_yrs_rel <- function(x, styr, endyr) {
if (x > 0) { # means these are absolute years and not relative.
if (x == styr) {
x <- -999
} else if (x == endyr) {
x <- 0
} else if (x > styr & x < endyr) {
x <- x - endyr # make it relative to endyr
} else {
stop("Year in fcast file out of range. Please change to be within ",
"start and end yrs. Check Bmark_years, Fcast_years")
}
}
x
}
# change benchmark years
new_bmark_yrs <- lapply(fore[["Bmark_years"]],
make_yrs_rel,
styr = mod_styr,
endyr = mod_endyr)
new_bmark_yrs <- unlist(new_bmark_yrs)
names(new_bmark_yrs) <- names(fore[["Bmark_years"]])
fore[["Bmark_years"]] <- new_bmark_yrs
# change forecast years
new_fcast_yrs <- lapply(fore[["Fcast_years"]],
make_yrs_rel,
styr = mod_styr,
endyr = mod_endyr)
new_fcast_yrs <- unlist(new_fcast_yrs)
names(new_fcast_yrs) <- names(fore[["Fcast_years"]])
fore[["Fcast_years"]] <- new_fcast_yrs
}
if (!is.null(nyrs_increment)) {
# first year for caps and allocations
fore[["FirstYear_for_caps_and_allocations"]] <-
fore[["FirstYear_for_caps_and_allocations"]] + nyrs_increment
assert_is_identical_to_true(
fore[["FirstYear_for_caps_and_allocations"]] > mod_endyr)
# deal with allocation
if (fore[["N_allocation_groups"]] > 0) {
tmp_allocation <- fore[["allocation_among_groups"]]
if (any(tmp_allocation$Year < mod_endyr)) {
if (length(tmp_allocation$Year) == 1) { # increment forward if only one assignment
fore$allocation_among_groups$Year <-
fore$allocation_among_groups$Year + nyrs_increment
} else {
# TODO: develop smarter ways to deal with Time varying allocation
stop("Time-varying allocation in the forecasting file cannot yet be",
" used in SSMSE. Please request development of this feature.")
}
}
}
}
if (!is.null(nyrs_fore)) {
fore[["Nforecastyrs"]] <- nyrs_fore
}
# get rid of Forecatch, if any. Add a warning to the user about this.
# may beed to treat this differently in the futured
if (!is.null(fore[["ForeCatch"]])) {
warning("Removing ForeCatch from the EM forecasting file.")
fore[["ForeCatch"]] <- NULL
}
fore
}
|
test_that("Rescaling x coordinates works", {
dimensions <- list(pitch_opta, pitch_wyscout, pitch_statsbomb, pitch_international)
expect_equal_rescaled_x <- function(dim1, dim2) {
rescaler <- rescale_coordinates(pitch_opta, pitch_international)
x_dimensions <- c("length", "penalty_box_length", "penalty_spot_distance",
"six_yard_box_length", "origin_x")
for (dim in x_dimensions) {
expect_equal(
rescaler$x(pitch_opta[[dim]]),
pitch_international[[dim]]
)
}
}
for (dim1 in dimensions) {
for (dim2 in dimensions) {
expect_equal_rescaled_x(dim1, dim2)
}
}
})
test_that("Rescaling y coordinates works", {
expect_equal_rescaled_y <- function(dim1, dim2) {
rescaler <- rescale_coordinates(dim1, dim2)
ybreaks1 <- ggsoccer:::get_ybreaks(dim1)
ybreaks2 <- ggsoccer:::get_ybreaks(dim2)
finite_ybreaks1 <- ybreaks1[is.finite(ybreaks1)]
finite_ybreaks2 <- ybreaks2[is.finite(ybreaks2)]
expect_equal(rescaler$y(finite_ybreaks1), finite_ybreaks2)
}
dimensions <- list(pitch_opta, pitch_wyscout, pitch_statsbomb, pitch_international)
for (dim1 in dimensions) {
for (dim2 in dimensions) {
expect_equal_rescaled_y(dim1, dim2)
}
}
})
| /tests/testthat/test-rescale-coordinates.R | permissive | RobWHickman/ggsoccer | R | false | false | 1,265 | r | test_that("Rescaling x coordinates works", {
dimensions <- list(pitch_opta, pitch_wyscout, pitch_statsbomb, pitch_international)
expect_equal_rescaled_x <- function(dim1, dim2) {
rescaler <- rescale_coordinates(pitch_opta, pitch_international)
x_dimensions <- c("length", "penalty_box_length", "penalty_spot_distance",
"six_yard_box_length", "origin_x")
for (dim in x_dimensions) {
expect_equal(
rescaler$x(pitch_opta[[dim]]),
pitch_international[[dim]]
)
}
}
for (dim1 in dimensions) {
for (dim2 in dimensions) {
expect_equal_rescaled_x(dim1, dim2)
}
}
})
test_that("Rescaling y coordinates works", {
expect_equal_rescaled_y <- function(dim1, dim2) {
rescaler <- rescale_coordinates(dim1, dim2)
ybreaks1 <- ggsoccer:::get_ybreaks(dim1)
ybreaks2 <- ggsoccer:::get_ybreaks(dim2)
finite_ybreaks1 <- ybreaks1[is.finite(ybreaks1)]
finite_ybreaks2 <- ybreaks2[is.finite(ybreaks2)]
expect_equal(rescaler$y(finite_ybreaks1), finite_ybreaks2)
}
dimensions <- list(pitch_opta, pitch_wyscout, pitch_statsbomb, pitch_international)
for (dim1 in dimensions) {
for (dim2 in dimensions) {
expect_equal_rescaled_y(dim1, dim2)
}
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spinBayes-package.R
\docType{package}
\name{spinBayes-package}
\alias{spinBayes-package}
\title{Semi-Parametric Gene-Environment Interaction via Bayesian Variable Selection}
\description{
Within the Bayesian framework, we propose a partially linear varying coefficient model (PLVC) for G×E interactions. The varying coefficient functions capture the possible non-linear G×E interaction, and the linear component models the G×E interactions with linear assumptions. The changing of basis with B splines is adopted to separate the coefficient functions with varying, non-zero constant and zero forms, corresponding to cases of nonlinear interaction, main effect only (no interaction) and no genetic interaction at all.
}
\details{
The user friendly, integrated interface BVCfit() allows users to flexibly choose the fitting methods they prefer. There are three arguments in BVCfit() that control the fitting method
\tabular{rl}{
sparse: \tab whether to use the spike-and-slab priors to achieve sparsity. \cr\cr
VC: \tab whether to separate the coefficient functions with varying effects \cr \tab and non-zero constant (main) effects.\cr\cr
structural: \tab whether to use varying coefficient functions for modeling \cr \tab non-linear GxE interactions.
}
BVCfit() returns a BVCfit object that contains the posterior estimates of each coefficients.
S3 generic functions BVSelection(), predict() and print() are implemented for BVCfit objects.
BVSelection() takes a BVCfit object and returns the variable selection results.
predict() takes a BVCfit object and returns the predicted values for new observations.
}
\references{
Ren, J., Zhou, F., Li, X., Chen, Q., Zhang, H., Ma, S., Jiang, Y., Wu, C. (2019) Semi-parametric Bayesian variable selection for gene-environment interactions.
\url{https://arxiv.org/abs/1906.01057}
Wu, C., Li, S., and Cui, Y. (2012). Genetic Association Studies: An Information Content Perspective.
\href{https://doi.org/10.2174/138920212803251382}{\emph{Current Genomics}, 13(7), 566–573}
Wu, C. and Cui, Y. (2013). A novel method for identifying nonlinear gene–environment interactions in case–control association studies.
\href{https://doi.org/10.1007/s00439-013-1350-z}{\emph{Human Genetics}, 132(12):1413–1425}
Wu, C. and Cui, Y. (2013). Boosting signals in gene–based association studies via efficient SNP selection.
\href{https://doi.org/10.1093/bib/bbs087}{\emph{Briefings in Bioinformatics}, 15(2):279–291}
Wu, C., Cui, Y., and Ma, S. (2014). Integrative analysis of gene–environment interactions under a multi–response partially linear varying coefficient model.
\href{https://doi.org/10.1002/sim.6287}{\emph{Statistics in Medicine}, 33(28), 4988–4998}
Wu, C., and Ma, S. (2015). A selective review of robust variable selection with applications in bioinformatics.
\href{https://doi.org/10.1093/bib/bbu046}{\emph{Briefings in Bioinformatics}, 16(5), 873–883}
Wu, C., Shi, X., Cui, Y. and Ma, S. (2015). A penalized robust semiparametric approach for gene-environment interactions.
\href{https://doi.org/10.1002/sim.6609}{\emph{Statistics in Medicine}, 34 (30): 4016–4030}
Wu, C., Zhong, P.-S., and Cui, Y. (2018). Additive varying–coefficient model for nonlinear gene–environment interactions.
{\emph{ Statistical Applications in Genetics and Molecular Biology}, 17(2)}
Wu, C., Jiang, Y., Ren, J., Cui, Y., Ma, S. (2018). Dissecting gene-environment interactions: A penalized robust approach accounting for hierarchical structures.
\href{https://doi.org/10.1002/sim.7518}{\emph{Statistics in Medicine}, 37:437–456}
Wu, C., Zhou, F., Ren, J., Li, X., Jiang, Y., Ma, S. (2019). A Selective Review of Multi-Level Omics Data Integration Using Variable Selection.
\href{https://doi.org/10.3390/ht8010004}{\emph{High-Throughput}, 8(1)}
}
\seealso{
\code{\link{BVCfit}}
}
\keyword{overview}
| /fuzzedpackages/spinBayes/man/spinBayes-package.Rd | no_license | akhikolla/testpackages | R | false | true | 3,994 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spinBayes-package.R
\docType{package}
\name{spinBayes-package}
\alias{spinBayes-package}
\title{Semi-Parametric Gene-Environment Interaction via Bayesian Variable Selection}
\description{
Within the Bayesian framework, we propose a partially linear varying coefficient model (PLVC) for G×E interactions. The varying coefficient functions capture the possible non-linear G×E interaction, and the linear component models the G×E interactions with linear assumptions. The changing of basis with B splines is adopted to separate the coefficient functions with varying, non-zero constant and zero forms, corresponding to cases of nonlinear interaction, main effect only (no interaction) and no genetic interaction at all.
}
\details{
The user friendly, integrated interface BVCfit() allows users to flexibly choose the fitting methods they prefer. There are three arguments in BVCfit() that control the fitting method
\tabular{rl}{
sparse: \tab whether to use the spike-and-slab priors to achieve sparsity. \cr\cr
VC: \tab whether to separate the coefficient functions with varying effects \cr \tab and non-zero constant (main) effects.\cr\cr
structural: \tab whether to use varying coefficient functions for modeling \cr \tab non-linear GxE interactions.
}
BVCfit() returns a BVCfit object that contains the posterior estimates of each coefficients.
S3 generic functions BVSelection(), predict() and print() are implemented for BVCfit objects.
BVSelection() takes a BVCfit object and returns the variable selection results.
predict() takes a BVCfit object and returns the predicted values for new observations.
}
\references{
Ren, J., Zhou, F., Li, X., Chen, Q., Zhang, H., Ma, S., Jiang, Y., Wu, C. (2019) Semi-parametric Bayesian variable selection for gene-environment interactions.
\url{https://arxiv.org/abs/1906.01057}
Wu, C., Li, S., and Cui, Y. (2012). Genetic Association Studies: An Information Content Perspective.
\href{https://doi.org/10.2174/138920212803251382}{\emph{Current Genomics}, 13(7), 566–573}
Wu, C. and Cui, Y. (2013). A novel method for identifying nonlinear gene–environment interactions in case–control association studies.
\href{https://doi.org/10.1007/s00439-013-1350-z}{\emph{Human Genetics}, 132(12):1413–1425}
Wu, C. and Cui, Y. (2013). Boosting signals in gene–based association studies via efficient SNP selection.
\href{https://doi.org/10.1093/bib/bbs087}{\emph{Briefings in Bioinformatics}, 15(2):279–291}
Wu, C., Cui, Y., and Ma, S. (2014). Integrative analysis of gene–environment interactions under a multi–response partially linear varying coefficient model.
\href{https://doi.org/10.1002/sim.6287}{\emph{Statistics in Medicine}, 33(28), 4988–4998}
Wu, C., and Ma, S. (2015). A selective review of robust variable selection with applications in bioinformatics.
\href{https://doi.org/10.1093/bib/bbu046}{\emph{Briefings in Bioinformatics}, 16(5), 873–883}
Wu, C., Shi, X., Cui, Y. and Ma, S. (2015). A penalized robust semiparametric approach for gene-environment interactions.
\href{https://doi.org/10.1002/sim.6609}{\emph{Statistics in Medicine}, 34 (30): 4016–4030}
Wu, C., Zhong, P.-S., and Cui, Y. (2018). Additive varying–coefficient model for nonlinear gene–environment interactions.
{\emph{ Statistical Applications in Genetics and Molecular Biology}, 17(2)}
Wu, C., Jiang, Y., Ren, J., Cui, Y., Ma, S. (2018). Dissecting gene-environment interactions: A penalized robust approach accounting for hierarchical structures.
\href{https://doi.org/10.1002/sim.7518}{\emph{Statistics in Medicine}, 37:437–456}
Wu, C., Zhou, F., Ren, J., Li, X., Jiang, Y., Ma, S. (2019). A Selective Review of Multi-Level Omics Data Integration Using Variable Selection.
\href{https://doi.org/10.3390/ht8010004}{\emph{High-Throughput}, 8(1)}
}
\seealso{
\code{\link{BVCfit}}
}
\keyword{overview}
|
#' Dirichlet multinomial GLM likelihood ratio test for a single cluster
#'
#' @param xFull [samples] x [covariates] matrix for the alternative model
#' @param xNull [samples] x [covariates] matrix for the null model
#' @param y [samples] x [introns] matrix of intron usage counts
#' @param concShape Gamma shape parameter for concentration parameter
#' @param concShape Gamma rate parameter for concentration parameter
#' @param robust Whether to include an outlier model (i.e. use dm_glm_multi_conc_robust rather than dm_glm_multi_conc)
#' @param outlier_prior_a Only used for robust model. The outlier probability outlier_prob ~ Beta(outlier_prior_a,outlier_prior_b)
#' @param outlier_prior_b Only used for robust model. The outlier probability outlier_prob ~ Beta(outlier_prior_a,outlier_prior_b)
#' @param fit_null Optionally the fitted null model (used in \code{\link{splicing_qtl}} to save repeatedly fitting the null for each cis-SNP)
#' @importFrom rstan optimizing
#' @export
dirichlet_multinomial_anova_ht <- function(xFull,xNull,y,concShape=1.0001,concRate=1e-4, fit_null=NULL, robust=T, outlier_prior_a=1.01, outlier_prior_b=100, M=2, ...) {
K=ncol(y)
model_to_use=if (robust) stanmodels$dm_glm_ht_robust else stanmodels$dm_glm_heavy_tail
dat_null=list(N=nrow(xNull), K=K, M=M, P=ncol(xNull), y=y, x=xNull, concShape=concShape,concRate=concRate, outlier_prior_a=outlier_prior_a, outlier_prior_b=outlier_prior_b)
# fit null model
if (is.null(fit_null)) fit_null=rstan::optimizing(model_to_use, data=dat_null, as_vector=F, ...)
colnames(fit_null$par$beta_raw)=colnames(y)
rownames(fit_null$par$beta_raw)=colnames(xNull)
dat_full=dat_null
dat_full$P=ncol(xFull)
dat_full$x=xFull
init=fit_null$par
init$beta_scale=rep(1,ncol(xFull))
init$theta=sanitize_simplex(fit_null$par$theta)
# beta_raw must live _in_ the simplex
init$beta_raw=matrix(1e-4,ncol(xFull),K)
init$beta_raw[1:ncol(xNull),]=t( apply( fit_null$par$beta_raw, 1, sanitize_simplex ) )
init$beta_raw=sweep(init$beta_raw, 1, rowSums(init$beta_raw), "/")
init$beta_scale[1:ncol(xNull)]=fit_null$par$beta_scale
stopifnot(all(is.finite(unlist(init))))
# fit fit model
fit_full=rstan::optimizing(model_to_use, data=dat_full, init=init, as_vector=F, ...)
colnames(fit_full$par$beta_raw)=colnames(y)
rownames(fit_full$par$beta_raw)=colnames(xFull)
loglr=fit_full$value-fit_null$value
df=( ncol(xFull)-ncol(xNull) )*(K-1)
refit_null_flag=F
lrtp=pchisq( 2.0*loglr, lower.tail = F , df=df )
if (lrtp < .001) {
init=fit_full$par
init$beta_raw=init$beta_raw[seq_len(dat_null$P),,drop=F]
init$beta_raw=t( apply( init$beta_raw, 1, sanitize_simplex ) )
init$beta_scale=as.array(init$beta_scale[seq_len(dat_null$P)])
refit_null=rstan::optimizing(model_to_use, data=dat_null, init=init, as_vector=F, ...)
if (refit_null$value > fit_null$value) {
refit_null_flag=T
fit_null=refit_null
loglr=fit_full$value-fit_null$value
}
}
list( loglr=loglr, df=df, lrtp=pchisq( 2.0*loglr, lower.tail = F , df=df ), fit_null=fit_null, fit_full=fit_full, refit_null_flag=refit_null_flag)
}
| /other_ideas/dm_glm_heavy_tail.R | no_license | rmagoglia/leafcutter | R | false | false | 3,167 | r |
#' Dirichlet multinomial GLM likelihood ratio test for a single cluster
#'
#' @param xFull [samples] x [covariates] matrix for the alternative model
#' @param xNull [samples] x [covariates] matrix for the null model
#' @param y [samples] x [introns] matrix of intron usage counts
#' @param concShape Gamma shape parameter for concentration parameter
#' @param concShape Gamma rate parameter for concentration parameter
#' @param robust Whether to include an outlier model (i.e. use dm_glm_multi_conc_robust rather than dm_glm_multi_conc)
#' @param outlier_prior_a Only used for robust model. The outlier probability outlier_prob ~ Beta(outlier_prior_a,outlier_prior_b)
#' @param outlier_prior_b Only used for robust model. The outlier probability outlier_prob ~ Beta(outlier_prior_a,outlier_prior_b)
#' @param fit_null Optionally the fitted null model (used in \code{\link{splicing_qtl}} to save repeatedly fitting the null for each cis-SNP)
#' @importFrom rstan optimizing
#' @export
dirichlet_multinomial_anova_ht <- function(xFull,xNull,y,concShape=1.0001,concRate=1e-4, fit_null=NULL, robust=T, outlier_prior_a=1.01, outlier_prior_b=100, M=2, ...) {
K=ncol(y)
model_to_use=if (robust) stanmodels$dm_glm_ht_robust else stanmodels$dm_glm_heavy_tail
dat_null=list(N=nrow(xNull), K=K, M=M, P=ncol(xNull), y=y, x=xNull, concShape=concShape,concRate=concRate, outlier_prior_a=outlier_prior_a, outlier_prior_b=outlier_prior_b)
# fit null model
if (is.null(fit_null)) fit_null=rstan::optimizing(model_to_use, data=dat_null, as_vector=F, ...)
colnames(fit_null$par$beta_raw)=colnames(y)
rownames(fit_null$par$beta_raw)=colnames(xNull)
dat_full=dat_null
dat_full$P=ncol(xFull)
dat_full$x=xFull
init=fit_null$par
init$beta_scale=rep(1,ncol(xFull))
init$theta=sanitize_simplex(fit_null$par$theta)
# beta_raw must live _in_ the simplex
init$beta_raw=matrix(1e-4,ncol(xFull),K)
init$beta_raw[1:ncol(xNull),]=t( apply( fit_null$par$beta_raw, 1, sanitize_simplex ) )
init$beta_raw=sweep(init$beta_raw, 1, rowSums(init$beta_raw), "/")
init$beta_scale[1:ncol(xNull)]=fit_null$par$beta_scale
stopifnot(all(is.finite(unlist(init))))
# fit fit model
fit_full=rstan::optimizing(model_to_use, data=dat_full, init=init, as_vector=F, ...)
colnames(fit_full$par$beta_raw)=colnames(y)
rownames(fit_full$par$beta_raw)=colnames(xFull)
loglr=fit_full$value-fit_null$value
df=( ncol(xFull)-ncol(xNull) )*(K-1)
refit_null_flag=F
lrtp=pchisq( 2.0*loglr, lower.tail = F , df=df )
if (lrtp < .001) {
init=fit_full$par
init$beta_raw=init$beta_raw[seq_len(dat_null$P),,drop=F]
init$beta_raw=t( apply( init$beta_raw, 1, sanitize_simplex ) )
init$beta_scale=as.array(init$beta_scale[seq_len(dat_null$P)])
refit_null=rstan::optimizing(model_to_use, data=dat_null, init=init, as_vector=F, ...)
if (refit_null$value > fit_null$value) {
refit_null_flag=T
fit_null=refit_null
loglr=fit_full$value-fit_null$value
}
}
list( loglr=loglr, df=df, lrtp=pchisq( 2.0*loglr, lower.tail = F , df=df ), fit_null=fit_null, fit_full=fit_full, refit_null_flag=refit_null_flag)
}
|
context("Update a dataset")
if (run.integration.tests) {
with_test_authentication({
with(test.dataset(df), {
test_that("Can update numeric variable with values", {
ds$v3 <- 9:28
test <- as.vector(ds$v3) - df$v3
expect_true(all(test == 1))
})
ds$v3 <- 1
test_that("Value recycling on insert is consistent with R", {
expect_true(all(as.vector(ds$v3) == 1))
})
ds$v3[1:10] <- 2
test_that("Update numeric with R numeric filter and values", {
expect_equivalent(mean(ds$v3), 1.5)
})
ds$v3[ds$v3 == 1] <- 3
test_that("Update numeric with LogicalExpression filter", {
expect_equivalent(mean(ds$v3), 2.5)
})
ds[ds$v3 == 2, "v3"] <- 4
test_that("Update with LogicalExpression within dataset", {
expect_equivalent(mean(ds$v3), 3.5)
})
ds$v3 <- c(rep(5, 10), rep(7, 10))
test_that("Just update the values", {
expect_equivalent(mean(ds$v3), 6)
})
test_that("Can update numeric variable with expresssion", {
ds$v3 <- ds$v3 + 2
expect_equivalent(as.vector(ds$v3), c(rep(7, 10), rep(9, 10)))
})
test_that("Can filter on is.na", {
ds$v3[is.na(ds$v2)] <- 0
expect_equivalent(as.vector(ds$v3),
c(rep(7, 10), rep(9, 5), rep(0, 5)))
})
test_that("Can update text", {
ds$v2[is.na(ds$v1)] <- "z"
expect_identical(as.vector(ds$v2)[1:8],
c(rep("z", 5), "f", "g", "h"))
ds[ds$v2 %in% "z", "v2"] <- "y"
expect_identical(as.vector(ds$v2)[1:8],
c(rep("y", 5), "f", "g", "h"))
})
test_that("Can update datetime", {
newvals <- as.Date(0:12, origin="1985-10-26")
ds$v5[ds$v5 >= as.Date("1955-11-12")] <- newvals
expect_identical(max(ds$v5), as.Date("1985-11-07"))
})
date.before <- rep(c("2014-04-15", "2014-08-15"), 2)
date.after <- c("2014-04-15", "2014-09-15", "2014-04-15",
"2014-09-15")
date.df <- data.frame(wave=as.Date(date.before))
with(test.dataset(date.df, "date.ds"), {
test_that("Another datetime update", {
expect_identical(as.vector(date.ds$wave),
as.Date(date.before))
date.ds$wave[date.ds$wave == as.Date("2014-08-15")] <- as.Date("2014-09-15")
expect_identical(as.vector(date.ds$wave),
as.Date(date.after))
})
})
## Categorical
ds$v4[is.na(ds$v2)] <- "B"
test_that("Can update categorical variables with character", {
expect_equivalent(table(ds$v4)["B"], c(B=13L))
})
ds$v4[is.na(ds$v2)] <- factor("C")
test_that("Can update categorical with factor", {
expect_equivalent(table(ds$v4)["C"], c(C=12L))
})
ds$v4[is.na(ds$v2)] <- c(2,1,2,1,2)
test_that("Can update categorical with numeric (ids)", {
expect_equivalent(table(ds$v4), table(df$v4))
})
test_that("Validation on categorical update", {
expect_error(ds$v4[is.na(ds$v2)] <- as.factor(LETTERS[1:5]),
"Input values A, D, and E are not present in the category names of variable")
})
})
})
}
| /tests/testthat/test-update.R | no_license | digideskio/rcrunch | R | false | false | 3,770 | r | context("Update a dataset")
if (run.integration.tests) {
with_test_authentication({
with(test.dataset(df), {
test_that("Can update numeric variable with values", {
ds$v3 <- 9:28
test <- as.vector(ds$v3) - df$v3
expect_true(all(test == 1))
})
ds$v3 <- 1
test_that("Value recycling on insert is consistent with R", {
expect_true(all(as.vector(ds$v3) == 1))
})
ds$v3[1:10] <- 2
test_that("Update numeric with R numeric filter and values", {
expect_equivalent(mean(ds$v3), 1.5)
})
ds$v3[ds$v3 == 1] <- 3
test_that("Update numeric with LogicalExpression filter", {
expect_equivalent(mean(ds$v3), 2.5)
})
ds[ds$v3 == 2, "v3"] <- 4
test_that("Update with LogicalExpression within dataset", {
expect_equivalent(mean(ds$v3), 3.5)
})
ds$v3 <- c(rep(5, 10), rep(7, 10))
test_that("Just update the values", {
expect_equivalent(mean(ds$v3), 6)
})
test_that("Can update numeric variable with expresssion", {
ds$v3 <- ds$v3 + 2
expect_equivalent(as.vector(ds$v3), c(rep(7, 10), rep(9, 10)))
})
test_that("Can filter on is.na", {
ds$v3[is.na(ds$v2)] <- 0
expect_equivalent(as.vector(ds$v3),
c(rep(7, 10), rep(9, 5), rep(0, 5)))
})
test_that("Can update text", {
ds$v2[is.na(ds$v1)] <- "z"
expect_identical(as.vector(ds$v2)[1:8],
c(rep("z", 5), "f", "g", "h"))
ds[ds$v2 %in% "z", "v2"] <- "y"
expect_identical(as.vector(ds$v2)[1:8],
c(rep("y", 5), "f", "g", "h"))
})
test_that("Can update datetime", {
newvals <- as.Date(0:12, origin="1985-10-26")
ds$v5[ds$v5 >= as.Date("1955-11-12")] <- newvals
expect_identical(max(ds$v5), as.Date("1985-11-07"))
})
date.before <- rep(c("2014-04-15", "2014-08-15"), 2)
date.after <- c("2014-04-15", "2014-09-15", "2014-04-15",
"2014-09-15")
date.df <- data.frame(wave=as.Date(date.before))
with(test.dataset(date.df, "date.ds"), {
test_that("Another datetime update", {
expect_identical(as.vector(date.ds$wave),
as.Date(date.before))
date.ds$wave[date.ds$wave == as.Date("2014-08-15")] <- as.Date("2014-09-15")
expect_identical(as.vector(date.ds$wave),
as.Date(date.after))
})
})
## Categorical
ds$v4[is.na(ds$v2)] <- "B"
test_that("Can update categorical variables with character", {
expect_equivalent(table(ds$v4)["B"], c(B=13L))
})
ds$v4[is.na(ds$v2)] <- factor("C")
test_that("Can update categorical with factor", {
expect_equivalent(table(ds$v4)["C"], c(C=12L))
})
ds$v4[is.na(ds$v2)] <- c(2,1,2,1,2)
test_that("Can update categorical with numeric (ids)", {
expect_equivalent(table(ds$v4), table(df$v4))
})
test_that("Validation on categorical update", {
expect_error(ds$v4[is.na(ds$v2)] <- as.factor(LETTERS[1:5]),
"Input values A, D, and E are not present in the category names of variable")
})
})
})
}
|
#### FIRST LOOK of df_6 ####
str(df_6_camp_event)
summary(df_6_camp_event)
#### START CLEANING df_6 ####
df_6_camp_event_clean <- df_6_camp_event
#### CLEANING DATA TYPES in df_6 ####
## formatting dates and times ##
df_6_camp_event_clean <- df_6_camp_event_clean %>%
mutate(EVENT_DATETIME = as.POSIXct(EVENT_DATE, format="%Y-%m-%dT%H:%M:%S")) %>%
mutate(EVENT_HOUR = hour(EVENT_DATETIME)) %>%
mutate(EVENT_DATE = as.Date(EVENT_DATETIME))
#### CONSISTENCY CHECK ID_CLI in df_1/df_6 ####
cons_idcli_df1_df6 <- df_1_cli_fid_clean %>%
select(ID_CLI) %>%
distinct() %>%
mutate(is_in_df_1 = 1) %>%
distinct() %>%
full_join(df_6_camp_event_clean %>%
select(ID_CLI) %>%
distinct() %>%
mutate(is_in_df_6 = 1) %>%
distinct()
, by = "ID_CLI"
) %>%
group_by(is_in_df_1, is_in_df_6) %>%
summarise(NUM_ID_CLIs = n_distinct(ID_CLI)) %>%
as.data.frame()
cons_idcli_df1_df6
#!!! NOTE: all ID_CLI in df_6 are mapped in df_1, but not all ID_CLI in df_1 are mapped in df_6 !!!#
#### CONSISTENCY CHECK ID_CAMP in df_5/df_6 ####
cons_idcamp_df5_df6 <- df_5_camp_cat_clean %>%
select(ID_CAMP) %>%
distinct() %>%
mutate(is_in_df_5 = 1) %>%
distinct() %>%
full_join(df_6_camp_event_clean %>%
select(ID_CAMP) %>%
distinct() %>%
mutate(is_in_df_6 = 1) %>%
distinct()
, by = "ID_CAMP"
) %>%
group_by(is_in_df_5, is_in_df_6) %>%
summarise(NUM_ID_CAMPs = n_distinct(ID_CAMP)) %>%
as.data.frame()
cons_idcamp_df5_df6
#!!! NOTE: all ID_CAMP in df_6 are mapped in df_5, but not all ID_CAMP in df_5 are mapped in df_6 !!!#
#### RESHAPING df_6 ####
## remapping TYPE_EVENT values "E" [ERROR] and "B" [BOUNCE] into a level "F" [FAILURE] ##
df_6_camp_event_clean <- df_6_camp_event_clean %>%
mutate(TYP_EVENT = as.factor(if_else(TYP_EVENT == "E" | TYP_EVENT == "B", "F", as.character(TYP_EVENT))))
## adding type from df_5 ##
df_6_camp_event_clean <- df_6_camp_event_clean %>%
left_join(df_5_camp_cat_clean
, by = "ID_CAMP")
## organize the data adding to each sending event the corresponding opens/clicks/fails
# sends
df_sends <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "S") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_S = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, SEND_DATE = EVENT_DATE) %>%
as.data.frame()
# opens
# there could be multiple opens of the same communication
# 1- count the open events
# 2- consider explicitely only the first open
df_opens_prep <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "V") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_O = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, OPEN_DATETIME = EVENT_DATETIME
, OPEN_DATE = EVENT_DATE)
total_opens <- df_opens_prep %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
summarise(NUM_OPENs = n_distinct(ID_EVENT_O))
df_opens <- df_opens_prep %>%
left_join(total_opens
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY")) %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
filter(OPEN_DATETIME == min(OPEN_DATETIME)) %>%
filter(row_number() == 1) %>%
ungroup() %>%
as.data.frame()
# clicks
# there could be multiple clicks of the same communication
# 1- count the click events
# 2- consider explicitely only the first click
df_clicks_prep <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "C") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_C = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, CLICK_DATETIME = EVENT_DATETIME
, CLICK_DATE = EVENT_DATE)
total_clicks <- df_clicks_prep %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
summarise(NUM_CLICKs = n_distinct(ID_EVENT_C))
df_clicks <- df_clicks_prep %>%
left_join(total_clicks
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY")) %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
filter(CLICK_DATETIME == min(CLICK_DATETIME)) %>%
filter(row_number() == 1) %>%
ungroup() %>%
as.data.frame()
# fails
df_fails <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "F") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_F = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, FAIL_DATETIME = EVENT_DATETIME
, FAIL_DATE = EVENT_DATE) %>%
group_by(ID_CLI, ID_CAMP, ID_DELIVERY) %>%
filter(FAIL_DATETIME == min(FAIL_DATETIME)) %>%
filter(row_number() == 1) %>%
ungroup() %>%
as.data.frame()
# combine sends opens clicks and fails
df_6_camp_event_clean_final <- df_sends %>%
left_join(df_opens
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY", "TYP_CAMP")
) %>%
filter(is.na(OPEN_DATE) | SEND_DATE <= OPEN_DATE) %>%
left_join(df_clicks
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY", "TYP_CAMP")
) %>%
filter(is.na(CLICK_DATE) | OPEN_DATE <= CLICK_DATE) %>%
left_join(df_fails
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY", "TYP_CAMP")
) %>%
filter(is.na(FAIL_DATE) | SEND_DATE <= FAIL_DATE) %>%
mutate(OPENED = !is.na(ID_EVENT_O)) %>%
mutate(CLICKED = !is.na(ID_EVENT_C)) %>%
mutate(FAILED = !is.na(ID_EVENT_F)) %>%
mutate(DAYS_TO_OPEN = as.integer(OPEN_DATE - SEND_DATE)) %>%
select(ID_EVENT_S
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, SEND_DATE
, OPENED
, OPEN_DATE
, DAYS_TO_OPEN
, NUM_OPENs
, CLICKED
, CLICK_DATE
, NUM_CLICKs
, FAILED
)
#### EXPLORE VARIABLES in df_6 ####
### GENERAL OVERVIEW ###
## compute aggregate
df6_overview <- df_6_camp_event_clean_final %>%
summarise(MIN_DATE = min(SEND_DATE) # data minima
, MAX_DATE = max(SEND_DATE) # data massima
, TOT_EVENTs = n_distinct(ID_EVENT_S) #numero totale di eventi
, TOT_CLIs = n_distinct(ID_CLI)) # numero totale di click
df6_overview
### GENERAL OVERVIEW by TYP_CAMP ###
## compute aggregate
df6_overviewbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP) %>%
summarise(MIN_DATE = min(SEND_DATE)
, MAX_DATE = max(SEND_DATE)
, TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI))
df6_overviewbytyp
## plot aggregate
plot_df6_overviewbytyp <- (
ggplot(data=df6_overviewbytyp
, aes(x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", fill="steelblue") +
theme_minimal()
)
plot_df6_overviewbytyp
### Variable OPENED ###
## compute aggregate
df6_dist_opened <- df_6_camp_event_clean_final %>%
group_by(OPENED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
mutate(TYP_CAMP = 'ALL') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/df6_overview$TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/df6_overview$TOT_CLIs)
df6_dist_opened
## plot aggregate
plot_df6_dist_opened <- (
ggplot(data=df6_dist_opened
, aes(fill=OPENED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", position="fill") +
theme_minimal()
)
plot_df6_dist_opened
### Variable OPENED by TYP_CAMP ###
## compute aggregate
df6_dist_openedbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP, OPENED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
left_join(df6_overviewbytyp %>%
select(TYP_CAMP
, ALL_TOT_EVENTs = TOT_EVENTs
, ALL_TOT_CLIs = TOT_CLIs)
, by='TYP_CAMP') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/ALL_TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/ALL_TOT_CLIs) %>%
select(TYP_CAMP
, OPENED
, TOT_EVENTs
, TOT_CLIs
, PERCENT_EVENTs
, PERCENT_CLIs
)
df6_dist_openedbytyp
## plot aggregate
plot_df6_dist_openedbytyp <- (
ggplot(data=df6_dist_openedbytyp
, aes(fill=OPENED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity") +
theme_minimal()
)
plot_df6_dist_openedbytyp
## plot aggregate percent
plot_df6_dist_openedbytyp_percent <- (
ggplot(data=df6_dist_openedbytyp
, aes(fill=OPENED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(position="fill", stat="identity") +
theme_minimal()
)
plot_df6_dist_openedbytyp_percent
### Variable DAYS_TO_OPEN
## compute aggregate
df6_dist_daystoopen <- df_6_camp_event_clean_final %>%
filter(OPENED) %>%
group_by(ID_CLI) %>%
summarise(AVG_DAYS_TO_OPEN = floor(mean(DAYS_TO_OPEN))) %>%
ungroup() %>%
group_by(AVG_DAYS_TO_OPEN) %>%
summarise(TOT_CLIs = n_distinct(ID_CLI))
df6_dist_daystoopen
## plot aggregate
plot_df6_dist_daystoopen <- (
ggplot(data=df6_dist_daystoopen %>%
filter(AVG_DAYS_TO_OPEN < 14)
, aes(x=AVG_DAYS_TO_OPEN, y=TOT_CLIs)) +
geom_bar(stat="identity", fill="steelblue") +
theme_minimal()
)
plot_df6_dist_daystoopen
### DAYS_TO_OPEN vs CUMULATE PERCENT ###
## compute aggregate
df6_dist_daystoopen_vs_cumulate <- df6_dist_daystoopen %>%
arrange(AVG_DAYS_TO_OPEN) %>%
mutate(PERCENT_COVERED = cumsum(TOT_CLIs)/sum(TOT_CLIs))
## plot aggregate
plot_df6_dist_daystoopen_vs_cumulate <- (
ggplot(data=df6_dist_daystoopen_vs_cumulate %>%
filter(AVG_DAYS_TO_OPEN < 14)
, aes(x=AVG_DAYS_TO_OPEN, y=PERCENT_COVERED)) +
geom_line() +
geom_point() +
labs(title = "Average Days to Open a Mail",
x = "Average Days to Open",
y = "Percent Covered") + #-- Labs
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_continuous(breaks=seq(0,14,2), minor_breaks=0:14) +
theme_minimal()
)
plot_df6_dist_daystoopen_vs_cumulate
#### ???? TO DO df_6 ???? ####
# EXPLORE the following relevant variables in df_6_camp_event_clean_final:
# - CLICKED/CLICKED by TYP_CAMP
## compute the distribution of the variable CLICKED
df6_dist_clicked <- df_6_camp_event_clean_final %>%
group_by(CLICKED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
mutate(TYP_CAMP = 'ALL') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/df6_overview$TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/df6_overview$TOT_CLIs)
df6_dist_clicked
## plot distribution of the variable CLICKED
plot_df6_dist_clicked <- (
ggplot(data=df6_dist_clicked
, aes(fill=CLICKED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", position="fill") +
theme_minimal()
)
plot_df6_dist_clicked
### Variable CLICKED by TYP_CAMP ###
## compute the distribution of the variable CLICKED by TYP_CAMP
df6_dist_clickedbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP, CLICKED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
left_join(df6_overviewbytyp %>%
select(TYP_CAMP
, ALL_TOT_EVENTs = TOT_EVENTs
, ALL_TOT_CLIs = TOT_CLIs)
, by='TYP_CAMP') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/ALL_TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/ALL_TOT_CLIs) %>%
select(TYP_CAMP
, CLICKED
, TOT_EVENTs
, TOT_CLIs
, PERCENT_EVENTs
, PERCENT_CLIs
)
df6_dist_clickedbytyp
## plot distribution of the variable CLICKED by TYP_CAMP
plot_df6_dist_clickedbytyp <- (
ggplot(data=df6_dist_clickedbytyp
, aes(fill=CLICKED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity") +
theme_minimal()
)
plot_df6_dist_clickedbytyp
## plot percent distribution of the variable CLICKED by TYP_CAMP
plot_df6_dist_clickedbytyp_percent <- (
ggplot(data=df6_dist_clickedbytyp
, aes(fill=CLICKED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(position="fill", stat="identity") +
theme_minimal()
)
plot_df6_dist_clickedbytyp_percent
# - FAILED/FAILED by TYP_CAP
## compute the distribution of the variable FAILED
df6_dist_failed <- df_6_camp_event_clean_final %>%
group_by(FAILED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
mutate(TYP_CAMP = 'ALL') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/df6_overview$TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/df6_overview$TOT_CLIs)
df6_dist_failed
## plot distribution of the variable FAILED
plot_df6_dist_failed <- (
ggplot(data=df6_dist_failed
, aes(fill=FAILED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", position="fill") +
theme_minimal()
)
plot_df6_dist_failed
### Variable FAILED by TYP_CAMP ###
## compute the distribution of the variable FAILED by TYP_CAMP
df6_dist_failedbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP, FAILED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
left_join(df6_overviewbytyp %>%
select(TYP_CAMP
, ALL_TOT_EVENTs = TOT_EVENTs
, ALL_TOT_CLIs = TOT_CLIs)
, by='TYP_CAMP') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/ALL_TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/ALL_TOT_CLIs) %>%
select(TYP_CAMP
, FAILED
, TOT_EVENTs
, TOT_CLIs
, PERCENT_EVENTs
, PERCENT_CLIs
)
df6_dist_failedbytyp
## plot distribution of the variable FAILED by TYP_CAMP
plot_df6_dist_failedbytyp <- (
ggplot(data=df6_dist_failedbytyp
, aes(fill=FAILED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity") +
theme_minimal()
)
plot_df6_dist_failedbytyp
## plot percent distribution of the variable FAILED by TYP_CAMP
plot_df6_dist_failedbytyp_percent <- (
ggplot(data=df6_dist_failedbytyp
, aes(fill=FAILED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(position="fill", stat="identity") +
theme_minimal()
)
plot_df6_dist_failedbytyp_percent
## compute the distribution of the variable NUM_OPENs
df6_dist_num_opens <- df_6_camp_event_clean_final %>%
group_by(NUM_OPENs) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)) %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/sum(TOT_EVENTs))
df6_dist_num_opens
## plot distribution of the variable NUM_OPENs
plot_df6_dist_num_opens <- (
ggplot(data=df6_dist_num_opens
, aes(x=NUM_OPENs, y=TOT_EVENTs)) +
geom_bar(stat="identity", fill="steelblue" ) +
xlim(0,15)+
theme_minimal()
)
plot_df6_dist_num_opens
## compute the distribution of the variable NUM_CLICKs
df6_dist_num_clicks <- df_6_camp_event_clean_final %>%
group_by(NUM_CLICKs) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)) %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/sum(TOT_EVENTs))%>%
arrange(desc(PERCENT_EVENTs))
df6_dist_num_clicks
## plot distribution of the variable NUM_CLICKs
plot_df6_dist_num_clicks <- (
ggplot(data=df6_dist_num_clicks
, aes(x=NUM_CLICKs, y=TOT_EVENTs)) +
geom_bar(stat="identity", fill="steelblue" ) +
xlim(0,15)+
theme_minimal()
)
plot_df6_dist_num_clicks
#### FINAL REVIEW df_6_clean ####
str(df_6_camp_event_clean_final)
summary(df_6_camp_event_clean_final)
| /script/C06_preparation_df6.R | no_license | gmuoio/Digital_and_Web_Marketing | R | false | false | 15,191 | r | #### FIRST LOOK of df_6 ####
str(df_6_camp_event)
summary(df_6_camp_event)
#### START CLEANING df_6 ####
df_6_camp_event_clean <- df_6_camp_event
#### CLEANING DATA TYPES in df_6 ####
## formatting dates and times ##
df_6_camp_event_clean <- df_6_camp_event_clean %>%
mutate(EVENT_DATETIME = as.POSIXct(EVENT_DATE, format="%Y-%m-%dT%H:%M:%S")) %>%
mutate(EVENT_HOUR = hour(EVENT_DATETIME)) %>%
mutate(EVENT_DATE = as.Date(EVENT_DATETIME))
#### CONSISTENCY CHECK ID_CLI in df_1/df_6 ####
cons_idcli_df1_df6 <- df_1_cli_fid_clean %>%
select(ID_CLI) %>%
distinct() %>%
mutate(is_in_df_1 = 1) %>%
distinct() %>%
full_join(df_6_camp_event_clean %>%
select(ID_CLI) %>%
distinct() %>%
mutate(is_in_df_6 = 1) %>%
distinct()
, by = "ID_CLI"
) %>%
group_by(is_in_df_1, is_in_df_6) %>%
summarise(NUM_ID_CLIs = n_distinct(ID_CLI)) %>%
as.data.frame()
cons_idcli_df1_df6
#!!! NOTE: all ID_CLI in df_6 are mapped in df_1, but not all ID_CLI in df_1 are mapped in df_6 !!!#
#### CONSISTENCY CHECK ID_CAMP in df_5/df_6 ####
cons_idcamp_df5_df6 <- df_5_camp_cat_clean %>%
select(ID_CAMP) %>%
distinct() %>%
mutate(is_in_df_5 = 1) %>%
distinct() %>%
full_join(df_6_camp_event_clean %>%
select(ID_CAMP) %>%
distinct() %>%
mutate(is_in_df_6 = 1) %>%
distinct()
, by = "ID_CAMP"
) %>%
group_by(is_in_df_5, is_in_df_6) %>%
summarise(NUM_ID_CAMPs = n_distinct(ID_CAMP)) %>%
as.data.frame()
cons_idcamp_df5_df6
#!!! NOTE: all ID_CAMP in df_6 are mapped in df_5, but not all ID_CAMP in df_5 are mapped in df_6 !!!#
#### RESHAPING df_6 ####
## remapping TYPE_EVENT values "E" [ERROR] and "B" [BOUNCE] into a level "F" [FAILURE] ##
df_6_camp_event_clean <- df_6_camp_event_clean %>%
mutate(TYP_EVENT = as.factor(if_else(TYP_EVENT == "E" | TYP_EVENT == "B", "F", as.character(TYP_EVENT))))
## adding type from df_5 ##
df_6_camp_event_clean <- df_6_camp_event_clean %>%
left_join(df_5_camp_cat_clean
, by = "ID_CAMP")
## organize the data adding to each sending event the corresponding opens/clicks/fails
# sends
df_sends <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "S") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_S = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, SEND_DATE = EVENT_DATE) %>%
as.data.frame()
# opens
# there could be multiple opens of the same communication
# 1- count the open events
# 2- consider explicitely only the first open
df_opens_prep <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "V") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_O = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, OPEN_DATETIME = EVENT_DATETIME
, OPEN_DATE = EVENT_DATE)
total_opens <- df_opens_prep %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
summarise(NUM_OPENs = n_distinct(ID_EVENT_O))
df_opens <- df_opens_prep %>%
left_join(total_opens
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY")) %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
filter(OPEN_DATETIME == min(OPEN_DATETIME)) %>%
filter(row_number() == 1) %>%
ungroup() %>%
as.data.frame()
# clicks
# there could be multiple clicks of the same communication
# 1- count the click events
# 2- consider explicitely only the first click
df_clicks_prep <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "C") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_C = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, CLICK_DATETIME = EVENT_DATETIME
, CLICK_DATE = EVENT_DATE)
total_clicks <- df_clicks_prep %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
summarise(NUM_CLICKs = n_distinct(ID_EVENT_C))
df_clicks <- df_clicks_prep %>%
left_join(total_clicks
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY")) %>%
group_by(ID_CLI
, ID_CAMP
, ID_DELIVERY) %>%
filter(CLICK_DATETIME == min(CLICK_DATETIME)) %>%
filter(row_number() == 1) %>%
ungroup() %>%
as.data.frame()
# fails
df_fails <- df_6_camp_event_clean %>%
filter(TYP_EVENT == "F") %>%
select(-TYP_EVENT) %>%
select(ID_EVENT_F = ID_EVENT
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, FAIL_DATETIME = EVENT_DATETIME
, FAIL_DATE = EVENT_DATE) %>%
group_by(ID_CLI, ID_CAMP, ID_DELIVERY) %>%
filter(FAIL_DATETIME == min(FAIL_DATETIME)) %>%
filter(row_number() == 1) %>%
ungroup() %>%
as.data.frame()
# combine sends opens clicks and fails
df_6_camp_event_clean_final <- df_sends %>%
left_join(df_opens
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY", "TYP_CAMP")
) %>%
filter(is.na(OPEN_DATE) | SEND_DATE <= OPEN_DATE) %>%
left_join(df_clicks
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY", "TYP_CAMP")
) %>%
filter(is.na(CLICK_DATE) | OPEN_DATE <= CLICK_DATE) %>%
left_join(df_fails
, by = c("ID_CLI", "ID_CAMP", "ID_DELIVERY", "TYP_CAMP")
) %>%
filter(is.na(FAIL_DATE) | SEND_DATE <= FAIL_DATE) %>%
mutate(OPENED = !is.na(ID_EVENT_O)) %>%
mutate(CLICKED = !is.na(ID_EVENT_C)) %>%
mutate(FAILED = !is.na(ID_EVENT_F)) %>%
mutate(DAYS_TO_OPEN = as.integer(OPEN_DATE - SEND_DATE)) %>%
select(ID_EVENT_S
, ID_CLI
, ID_CAMP
, TYP_CAMP
, ID_DELIVERY
, SEND_DATE
, OPENED
, OPEN_DATE
, DAYS_TO_OPEN
, NUM_OPENs
, CLICKED
, CLICK_DATE
, NUM_CLICKs
, FAILED
)
#### EXPLORE VARIABLES in df_6 ####
### GENERAL OVERVIEW ###
## compute aggregate
df6_overview <- df_6_camp_event_clean_final %>%
summarise(MIN_DATE = min(SEND_DATE) # data minima
, MAX_DATE = max(SEND_DATE) # data massima
, TOT_EVENTs = n_distinct(ID_EVENT_S) #numero totale di eventi
, TOT_CLIs = n_distinct(ID_CLI)) # numero totale di click
df6_overview
### GENERAL OVERVIEW by TYP_CAMP ###
## compute aggregate
df6_overviewbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP) %>%
summarise(MIN_DATE = min(SEND_DATE)
, MAX_DATE = max(SEND_DATE)
, TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI))
df6_overviewbytyp
## plot aggregate
plot_df6_overviewbytyp <- (
ggplot(data=df6_overviewbytyp
, aes(x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", fill="steelblue") +
theme_minimal()
)
plot_df6_overviewbytyp
### Variable OPENED ###
## compute aggregate
df6_dist_opened <- df_6_camp_event_clean_final %>%
group_by(OPENED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
mutate(TYP_CAMP = 'ALL') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/df6_overview$TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/df6_overview$TOT_CLIs)
df6_dist_opened
## plot aggregate
plot_df6_dist_opened <- (
ggplot(data=df6_dist_opened
, aes(fill=OPENED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", position="fill") +
theme_minimal()
)
plot_df6_dist_opened
### Variable OPENED by TYP_CAMP ###
## compute aggregate
df6_dist_openedbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP, OPENED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
left_join(df6_overviewbytyp %>%
select(TYP_CAMP
, ALL_TOT_EVENTs = TOT_EVENTs
, ALL_TOT_CLIs = TOT_CLIs)
, by='TYP_CAMP') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/ALL_TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/ALL_TOT_CLIs) %>%
select(TYP_CAMP
, OPENED
, TOT_EVENTs
, TOT_CLIs
, PERCENT_EVENTs
, PERCENT_CLIs
)
df6_dist_openedbytyp
## plot aggregate
plot_df6_dist_openedbytyp <- (
ggplot(data=df6_dist_openedbytyp
, aes(fill=OPENED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity") +
theme_minimal()
)
plot_df6_dist_openedbytyp
## plot aggregate percent
plot_df6_dist_openedbytyp_percent <- (
ggplot(data=df6_dist_openedbytyp
, aes(fill=OPENED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(position="fill", stat="identity") +
theme_minimal()
)
plot_df6_dist_openedbytyp_percent
### Variable DAYS_TO_OPEN
## compute aggregate
df6_dist_daystoopen <- df_6_camp_event_clean_final %>%
filter(OPENED) %>%
group_by(ID_CLI) %>%
summarise(AVG_DAYS_TO_OPEN = floor(mean(DAYS_TO_OPEN))) %>%
ungroup() %>%
group_by(AVG_DAYS_TO_OPEN) %>%
summarise(TOT_CLIs = n_distinct(ID_CLI))
df6_dist_daystoopen
## plot aggregate
plot_df6_dist_daystoopen <- (
ggplot(data=df6_dist_daystoopen %>%
filter(AVG_DAYS_TO_OPEN < 14)
, aes(x=AVG_DAYS_TO_OPEN, y=TOT_CLIs)) +
geom_bar(stat="identity", fill="steelblue") +
theme_minimal()
)
plot_df6_dist_daystoopen
### DAYS_TO_OPEN vs CUMULATE PERCENT ###
## compute aggregate
df6_dist_daystoopen_vs_cumulate <- df6_dist_daystoopen %>%
arrange(AVG_DAYS_TO_OPEN) %>%
mutate(PERCENT_COVERED = cumsum(TOT_CLIs)/sum(TOT_CLIs))
## plot aggregate
plot_df6_dist_daystoopen_vs_cumulate <- (
ggplot(data=df6_dist_daystoopen_vs_cumulate %>%
filter(AVG_DAYS_TO_OPEN < 14)
, aes(x=AVG_DAYS_TO_OPEN, y=PERCENT_COVERED)) +
geom_line() +
geom_point() +
labs(title = "Average Days to Open a Mail",
x = "Average Days to Open",
y = "Percent Covered") + #-- Labs
theme(plot.title = element_text(hjust = 0.5)) +
scale_x_continuous(breaks=seq(0,14,2), minor_breaks=0:14) +
theme_minimal()
)
plot_df6_dist_daystoopen_vs_cumulate
#### ???? TO DO df_6 ???? ####
# EXPLORE the following relevant variables in df_6_camp_event_clean_final:
# - CLICKED/CLICKED by TYP_CAMP
## compute the distribution of the variable CLICKED
df6_dist_clicked <- df_6_camp_event_clean_final %>%
group_by(CLICKED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
mutate(TYP_CAMP = 'ALL') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/df6_overview$TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/df6_overview$TOT_CLIs)
df6_dist_clicked
## plot distribution of the variable CLICKED
plot_df6_dist_clicked <- (
ggplot(data=df6_dist_clicked
, aes(fill=CLICKED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", position="fill") +
theme_minimal()
)
plot_df6_dist_clicked
### Variable CLICKED by TYP_CAMP ###
## compute the distribution of the variable CLICKED by TYP_CAMP
df6_dist_clickedbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP, CLICKED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
left_join(df6_overviewbytyp %>%
select(TYP_CAMP
, ALL_TOT_EVENTs = TOT_EVENTs
, ALL_TOT_CLIs = TOT_CLIs)
, by='TYP_CAMP') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/ALL_TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/ALL_TOT_CLIs) %>%
select(TYP_CAMP
, CLICKED
, TOT_EVENTs
, TOT_CLIs
, PERCENT_EVENTs
, PERCENT_CLIs
)
df6_dist_clickedbytyp
## plot distribution of the variable CLICKED by TYP_CAMP
plot_df6_dist_clickedbytyp <- (
ggplot(data=df6_dist_clickedbytyp
, aes(fill=CLICKED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity") +
theme_minimal()
)
plot_df6_dist_clickedbytyp
## plot percent distribution of the variable CLICKED by TYP_CAMP
plot_df6_dist_clickedbytyp_percent <- (
ggplot(data=df6_dist_clickedbytyp
, aes(fill=CLICKED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(position="fill", stat="identity") +
theme_minimal()
)
plot_df6_dist_clickedbytyp_percent
# - FAILED/FAILED by TYP_CAP
## compute the distribution of the variable FAILED
df6_dist_failed <- df_6_camp_event_clean_final %>%
group_by(FAILED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
mutate(TYP_CAMP = 'ALL') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/df6_overview$TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/df6_overview$TOT_CLIs)
df6_dist_failed
## plot distribution of the variable FAILED
plot_df6_dist_failed <- (
ggplot(data=df6_dist_failed
, aes(fill=FAILED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity", position="fill") +
theme_minimal()
)
plot_df6_dist_failed
### Variable FAILED by TYP_CAMP ###
## compute the distribution of the variable FAILED by TYP_CAMP
df6_dist_failedbytyp <- df_6_camp_event_clean_final %>%
group_by(TYP_CAMP, FAILED) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)
, TOT_CLIs = n_distinct(ID_CLI)) %>%
left_join(df6_overviewbytyp %>%
select(TYP_CAMP
, ALL_TOT_EVENTs = TOT_EVENTs
, ALL_TOT_CLIs = TOT_CLIs)
, by='TYP_CAMP') %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/ALL_TOT_EVENTs
, PERCENT_CLIs = TOT_CLIs/ALL_TOT_CLIs) %>%
select(TYP_CAMP
, FAILED
, TOT_EVENTs
, TOT_CLIs
, PERCENT_EVENTs
, PERCENT_CLIs
)
df6_dist_failedbytyp
## plot distribution of the variable FAILED by TYP_CAMP
plot_df6_dist_failedbytyp <- (
ggplot(data=df6_dist_failedbytyp
, aes(fill=FAILED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(stat="identity") +
theme_minimal()
)
plot_df6_dist_failedbytyp
## plot percent distribution of the variable FAILED by TYP_CAMP
plot_df6_dist_failedbytyp_percent <- (
ggplot(data=df6_dist_failedbytyp
, aes(fill=FAILED, x=TYP_CAMP, y=TOT_EVENTs)) +
geom_bar(position="fill", stat="identity") +
theme_minimal()
)
plot_df6_dist_failedbytyp_percent
## compute the distribution of the variable NUM_OPENs
df6_dist_num_opens <- df_6_camp_event_clean_final %>%
group_by(NUM_OPENs) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)) %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/sum(TOT_EVENTs))
df6_dist_num_opens
## plot distribution of the variable NUM_OPENs
plot_df6_dist_num_opens <- (
ggplot(data=df6_dist_num_opens
, aes(x=NUM_OPENs, y=TOT_EVENTs)) +
geom_bar(stat="identity", fill="steelblue" ) +
xlim(0,15)+
theme_minimal()
)
plot_df6_dist_num_opens
## compute the distribution of the variable NUM_CLICKs
df6_dist_num_clicks <- df_6_camp_event_clean_final %>%
group_by(NUM_CLICKs) %>%
summarise(TOT_EVENTs = n_distinct(ID_EVENT_S)) %>%
mutate(PERCENT_EVENTs = TOT_EVENTs/sum(TOT_EVENTs))%>%
arrange(desc(PERCENT_EVENTs))
df6_dist_num_clicks
## plot distribution of the variable NUM_CLICKs
plot_df6_dist_num_clicks <- (
ggplot(data=df6_dist_num_clicks
, aes(x=NUM_CLICKs, y=TOT_EVENTs)) +
geom_bar(stat="identity", fill="steelblue" ) +
xlim(0,15)+
theme_minimal()
)
plot_df6_dist_num_clicks
#### FINAL REVIEW df_6_clean ####
str(df_6_camp_event_clean_final)
summary(df_6_camp_event_clean_final)
|
#' Get all answer options for a question in a survey
#'
#' (This convenience function is not directly mapped to a remote procedure.)
#'
#' @param questionID ID of the question
# [LimeSurvey API BUG]
#' @param lang Language code for the survey language (\strong{Note:} The API expects
#' one of the survey languages as part of the request rather than falling back to
#' the default language of the survey. However, you can look up that default
#' language using \code{\link{lsGetSurveyProperties}()})
#' @param lsAPIurl \emph{(optional)} The URL of the \emph{LimeSurvey RemoteControl 2} JSON-RPC API
#' @param sessionKey \emph{(optional)} Authentication token, see \code{\link{lsGetSessionKey}()}
#'
#' @return A table of answer options
#'
#' @examples \dontrun{
#' lsGetQuestionProperties("13", "en")
#' lsGetQuestionProperties(questionID = "13", lang = "en", properties = list("mandatory"))
#' }
#'
#' @seealso \code{\link{lsGetQuestionProperties}()}
#'
#' @export
#'
ls_getAnswerOptions = function(questionID,
lang,
lsAPIurl = getOption("lsAPIurl"),
sessionKey = NULL) {
if (is.null(questionID))
stop("Need to specify questionID.")
properties = lsGetQuestionProperties(questionID, lang = lang, properties = list("answeroptions"))
answerOptionsList = properties$answeroptions
if (!is.list(answerOptionsList))
stop("No available answer options for this specific question.")
nAnswers = NROW(answerOptionsList)
answerOptionsDF = data.frame(answerCode = character(nAnswers),
answerText = character(nAnswers),
stringsAsFactors = FALSE)
for (i in 1:nAnswers) {
answerOptionsDF[i, ]$answerCode = names(answerOptionsList[i])
answerOptionsDF[i, ]$answerText = answerOptionsList[[i]]$answer
}
answerOptionsDF
}
| /R/ls_getAnswerOptions.R | permissive | k127/LimeRick | R | false | false | 1,948 | r | #' Get all answer options for a question in a survey
#'
#' (This convenience function is not directly mapped to a remote procedure.)
#'
#' @param questionID ID of the question
# [LimeSurvey API BUG]
#' @param lang Language code for the survey language (\strong{Note:} The API expects
#' one of the survey languages as part of the request rather than falling back to
#' the default language of the survey. However, you can look up that default
#' language using \code{\link{lsGetSurveyProperties}()})
#' @param lsAPIurl \emph{(optional)} The URL of the \emph{LimeSurvey RemoteControl 2} JSON-RPC API
#' @param sessionKey \emph{(optional)} Authentication token, see \code{\link{lsGetSessionKey}()}
#'
#' @return A table of answer options
#'
#' @examples \dontrun{
#' lsGetQuestionProperties("13", "en")
#' lsGetQuestionProperties(questionID = "13", lang = "en", properties = list("mandatory"))
#' }
#'
#' @seealso \code{\link{lsGetQuestionProperties}()}
#'
#' @export
#'
ls_getAnswerOptions = function(questionID,
lang,
lsAPIurl = getOption("lsAPIurl"),
sessionKey = NULL) {
if (is.null(questionID))
stop("Need to specify questionID.")
properties = lsGetQuestionProperties(questionID, lang = lang, properties = list("answeroptions"))
answerOptionsList = properties$answeroptions
if (!is.list(answerOptionsList))
stop("No available answer options for this specific question.")
nAnswers = NROW(answerOptionsList)
answerOptionsDF = data.frame(answerCode = character(nAnswers),
answerText = character(nAnswers),
stringsAsFactors = FALSE)
for (i in 1:nAnswers) {
answerOptionsDF[i, ]$answerCode = names(answerOptionsList[i])
answerOptionsDF[i, ]$answerText = answerOptionsList[[i]]$answer
}
answerOptionsDF
}
|
library(splitr)
library(dplyr)
library(RPostgres)
library(DBI)
library(purrr)
library(furrr)
library(future)
devtools::load_all()
# Load elements to database
con <- dbConnect(drv=RPostgres::Postgres(),
user=Sys.getenv('USER'),
password=Sys.getenv('PASSWORD'),
host=Sys.getenv('HOST',),
port=Sys.getenv('PORT'),
dbname=Sys.getenv('DBNAME'))
# Alternative to connection object
# cred <- list(drv=RPostgres::Postgres(),
# user=Sys.getenv('USER'),
# password=Sys.getenv('PASSWORD'),
# host=Sys.getenv('HOST',),
# port=Sys.getenv('PORT'),
# dbname=Sys.getenv('DBNAME'),
# options=glue::glue("-c search_path=hysplit"),
# maxSize=30
# )
cred <- list(drv=RPostgres::Postgres(),
user=Sys.getenv('USER'),
password=Sys.getenv('PASSWORD'),
host=Sys.getenv('HOST',),
port=Sys.getenv('PORT'),
dbname=Sys.getenv('DBNAME'),
maxSize=30
)
# dirtywind::load_plant_data(conn = con,
# schema = 'hysplit',
# table_name = 'coal_plants',
# save_local = TRUE,
# overwrite = FALSE)
# Build parameter data.frame to run HYSPLIT
query <- "
select distinct on (facility_id, latitude, longitude, facility_name) facility_id,
latitude,
longitude,
facility_name,
stack_height
from hysplit.coal_plants
where year = 2005;
"
query_df <- dbGetQuery(con, query)
paramemter_df <- model_inputs_unit(query = query,
con=con,
timedelta = '1 month',
start_date = as.Date('2005-01-01'),
end_date = as.Date('2005-12-31'),
duration = 72,
daily_hours = c(0, 6, 12, 18))
###############################################################################
################################### NOT RUN ###################################
###############################################################################
plants_2006 <- read.csv('data/coal_plant_inventory_all_years.csv') %>%
dplyr::select( facility_id,
latitude,
longitude,
facility_name,
stack_height,
year) %>%
filter(year == 2006) %>%
group_by(facility_id,
latitude,
longitude,
facility_name,
stack_height) %>%
distinct() %>%
write.csv('data/plants_2006.csv',
row.names = FALSE)
paramemter_df <- model_inputs_unit(timedelta = '1 month',
start_date = as.Date('2006-01-01'),
end_date = as.Date('2006-12-31'),
duration = 72,
daily_hours = c(0, 6, 12, 18),
local_file = 'data/plants_2006.csv')
###############################################################################
###############################################################################
public_ids <- c(
'52.35.6.124',
'34.208.111.91',
'34.209.41.9'
)
cls <- make_cluster_ec2(public_ids)
plan(list(tweak(cluster, workers = cls), multisession))
creds_aws <- list(
user=Sys.getenv('USER'),
password=Sys.getenv('PASSWORD'),
host='db.cicala-projects.com',
port='5432',
dbname=Sys.getenv('DBNAME'),
maxSize=30
)
system.time(
test_hysplit <-
paramemter_df %>%
mutate(model_traj = furrr::future_pmap(list(
'lat' = latitude,
'lon' = longitude,
'height' = stack_height,
'name_source' = facility_name,
'id_source' = facility_id,
'duration' = duration,
'days' = seq_dates,
'daily_hours' = daily_hours,
'direction' = 'forward',
'met_type' = 'reanalysis',
'met_dir' = '/home/ubuntu/met',
'exec_dir' = "/home/ubuntu/hysplit",
'clean_up' = FALSE,
'db' = TRUE,
'schema' = 'hysplit_partitions',
'table_name' = 'trajectories_master',
'cred'= list(creds_aws)
),
dirtywind::hysplit_trajectory_parallel_master)
)
)
parallel_hysplit(parameters_df=parameter_barry,
creds=creds,
met_type='reanalysis',
clean_up=TRUE,
public_ip=c("34.220.174.56", "34.219.10.249"),
ec2=FALSE)
| /examples/distribute_computing_ec2.R | permissive | vrathi0/dirtywind | R | false | false | 5,374 | r | library(splitr)
library(dplyr)
library(RPostgres)
library(DBI)
library(purrr)
library(furrr)
library(future)
devtools::load_all()
# Load elements to database
con <- dbConnect(drv=RPostgres::Postgres(),
user=Sys.getenv('USER'),
password=Sys.getenv('PASSWORD'),
host=Sys.getenv('HOST',),
port=Sys.getenv('PORT'),
dbname=Sys.getenv('DBNAME'))
# Alternative to connection object
# cred <- list(drv=RPostgres::Postgres(),
# user=Sys.getenv('USER'),
# password=Sys.getenv('PASSWORD'),
# host=Sys.getenv('HOST',),
# port=Sys.getenv('PORT'),
# dbname=Sys.getenv('DBNAME'),
# options=glue::glue("-c search_path=hysplit"),
# maxSize=30
# )
cred <- list(drv=RPostgres::Postgres(),
user=Sys.getenv('USER'),
password=Sys.getenv('PASSWORD'),
host=Sys.getenv('HOST',),
port=Sys.getenv('PORT'),
dbname=Sys.getenv('DBNAME'),
maxSize=30
)
# dirtywind::load_plant_data(conn = con,
# schema = 'hysplit',
# table_name = 'coal_plants',
# save_local = TRUE,
# overwrite = FALSE)
# Build parameter data.frame to run HYSPLIT
query <- "
select distinct on (facility_id, latitude, longitude, facility_name) facility_id,
latitude,
longitude,
facility_name,
stack_height
from hysplit.coal_plants
where year = 2005;
"
query_df <- dbGetQuery(con, query)
paramemter_df <- model_inputs_unit(query = query,
con=con,
timedelta = '1 month',
start_date = as.Date('2005-01-01'),
end_date = as.Date('2005-12-31'),
duration = 72,
daily_hours = c(0, 6, 12, 18))
###############################################################################
################################### NOT RUN ###################################
###############################################################################
plants_2006 <- read.csv('data/coal_plant_inventory_all_years.csv') %>%
dplyr::select( facility_id,
latitude,
longitude,
facility_name,
stack_height,
year) %>%
filter(year == 2006) %>%
group_by(facility_id,
latitude,
longitude,
facility_name,
stack_height) %>%
distinct() %>%
write.csv('data/plants_2006.csv',
row.names = FALSE)
paramemter_df <- model_inputs_unit(timedelta = '1 month',
start_date = as.Date('2006-01-01'),
end_date = as.Date('2006-12-31'),
duration = 72,
daily_hours = c(0, 6, 12, 18),
local_file = 'data/plants_2006.csv')
###############################################################################
###############################################################################
public_ids <- c(
'52.35.6.124',
'34.208.111.91',
'34.209.41.9'
)
cls <- make_cluster_ec2(public_ids)
plan(list(tweak(cluster, workers = cls), multisession))
creds_aws <- list(
user=Sys.getenv('USER'),
password=Sys.getenv('PASSWORD'),
host='db.cicala-projects.com',
port='5432',
dbname=Sys.getenv('DBNAME'),
maxSize=30
)
system.time(
test_hysplit <-
paramemter_df %>%
mutate(model_traj = furrr::future_pmap(list(
'lat' = latitude,
'lon' = longitude,
'height' = stack_height,
'name_source' = facility_name,
'id_source' = facility_id,
'duration' = duration,
'days' = seq_dates,
'daily_hours' = daily_hours,
'direction' = 'forward',
'met_type' = 'reanalysis',
'met_dir' = '/home/ubuntu/met',
'exec_dir' = "/home/ubuntu/hysplit",
'clean_up' = FALSE,
'db' = TRUE,
'schema' = 'hysplit_partitions',
'table_name' = 'trajectories_master',
'cred'= list(creds_aws)
),
dirtywind::hysplit_trajectory_parallel_master)
)
)
parallel_hysplit(parameters_df=parameter_barry,
creds=creds,
met_type='reanalysis',
clean_up=TRUE,
public_ip=c("34.220.174.56", "34.219.10.249"),
ec2=FALSE)
|
### R Skript zu "Data I/O"
### Kurs "Einführung in die moderne Datenanalyse mit R"
### Datum: Februar 2020
### Autor: The R Bootcamp
### Daten von Festplatte lesen ----------------------------
# Finde die Datei Tourismus.csv auf deinem Computer.
# Lese die Datei mittels read.csv() ein. Denke an den Auto-Complete Trick!
read.csv("1_Data/Tourismus.csv")
# Stelle sicher, dass die Daten im Objekt mit Namen `daten` gespeichert sind.
daten <- read_csv("1_Data/Tourismus.csv")
### Daten leben in data.frames ----------------------------
# Überprüfe die Klasse von `daten` mittels class()
class(daten)
# Überprüfe die Dimensionen von `daten` mittels dim()
dim(daten)
# Lass dir die Namen der Variablen in `daten` anzeigen mittels names()
names(daten)
# Extrahiere die Variable `Land` mittels $
daten$Land
# Kreiere ein neues Objekt, dass die Variable `Land` enthält.
land <- daten$Land
# Erstelle eine neue Variable names 'Naechte' mit $ als das Produkt von Dauer und Besucher.
daten$Naechte <- daten$Dauer * daten$Besucher
### Daten auf die Festplatte schreiben ----------------------------
# Schreibe den Datensatz Tourismus zurück auf die Festplatte mit write.csv()
write.csv(daten, "1_Data/Tourismus_neu.csv")
| /TheRBootcamp/.Rproj.user/F47E2A59/sources/s-147F6DF9/72E3282F-contents | no_license | therbootcamp/EDA_2021Sep | R | false | false | 1,231 | ### R Skript zu "Data I/O"
### Kurs "Einführung in die moderne Datenanalyse mit R"
### Datum: Februar 2020
### Autor: The R Bootcamp
### Daten von Festplatte lesen ----------------------------
# Finde die Datei Tourismus.csv auf deinem Computer.
# Lese die Datei mittels read.csv() ein. Denke an den Auto-Complete Trick!
read.csv("1_Data/Tourismus.csv")
# Stelle sicher, dass die Daten im Objekt mit Namen `daten` gespeichert sind.
daten <- read_csv("1_Data/Tourismus.csv")
### Daten leben in data.frames ----------------------------
# Überprüfe die Klasse von `daten` mittels class()
class(daten)
# Überprüfe die Dimensionen von `daten` mittels dim()
dim(daten)
# Lass dir die Namen der Variablen in `daten` anzeigen mittels names()
names(daten)
# Extrahiere die Variable `Land` mittels $
daten$Land
# Kreiere ein neues Objekt, dass die Variable `Land` enthält.
land <- daten$Land
# Erstelle eine neue Variable names 'Naechte' mit $ als das Produkt von Dauer und Besucher.
daten$Naechte <- daten$Dauer * daten$Besucher
### Daten auf die Festplatte schreiben ----------------------------
# Schreibe den Datensatz Tourismus zurück auf die Festplatte mit write.csv()
write.csv(daten, "1_Data/Tourismus_neu.csv")
| |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{get_datasets}
\alias{get_datasets}
\title{Get a data frame with information on all available datasets.}
\usage{
get_datasets(...)
}
\arguments{
\item{...}{Additional parameters passed to \code{data.frame} (e.g.
stringsAsFactors = FALSE).}
}
\value{
A data frame.
}
\description{
Returns a data frame with two variables: \code{id} and \code{description}
}
\examples{
\dontrun{datasets <- get_datasets()}
\dontrun{head(datasets)}
}
\seealso{
\code{\link{search_dataset}} to search for a specific data set or a
keyword in the description, and \code{\link{get_data_structure}} to get the
dimensions of specified data set.
}
| /man/get_datasets.Rd | no_license | kevin11h/OECD | R | false | true | 718 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{get_datasets}
\alias{get_datasets}
\title{Get a data frame with information on all available datasets.}
\usage{
get_datasets(...)
}
\arguments{
\item{...}{Additional parameters passed to \code{data.frame} (e.g.
stringsAsFactors = FALSE).}
}
\value{
A data frame.
}
\description{
Returns a data frame with two variables: \code{id} and \code{description}
}
\examples{
\dontrun{datasets <- get_datasets()}
\dontrun{head(datasets)}
}
\seealso{
\code{\link{search_dataset}} to search for a specific data set or a
keyword in the description, and \code{\link{get_data_structure}} to get the
dimensions of specified data set.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/globalstd.R
\name{getcorcluster}
\alias{getcorcluster}
\title{Get Pseudo-Spectrum as peaks cluster based on correlation analysis.}
\usage{
getcorcluster(list, corcutoff = 0.9, rtcutoff = 10, accuracy = 4)
}
\arguments{
\item{list}{a list with peaks intensity}
\item{corcutoff}{cutoff of the correlation coefficient, default 0.9}
\item{rtcutoff}{cutoff of the distances in cluster, default 10}
\item{accuracy}{measured mass or mass to charge ratio in digits, default 4}
}
\value{
list with Pseudo-Spectrum index
}
\description{
Get Pseudo-Spectrum as peaks cluster based on correlation analysis.
}
\examples{
data(spmeinvivo)
cluster <- getcorcluster(spmeinvivo)
}
| /man/getcorcluster.Rd | no_license | yufree/pmd | R | false | true | 745 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/globalstd.R
\name{getcorcluster}
\alias{getcorcluster}
\title{Get Pseudo-Spectrum as peaks cluster based on correlation analysis.}
\usage{
getcorcluster(list, corcutoff = 0.9, rtcutoff = 10, accuracy = 4)
}
\arguments{
\item{list}{a list with peaks intensity}
\item{corcutoff}{cutoff of the correlation coefficient, default 0.9}
\item{rtcutoff}{cutoff of the distances in cluster, default 10}
\item{accuracy}{measured mass or mass to charge ratio in digits, default 4}
}
\value{
list with Pseudo-Spectrum index
}
\description{
Get Pseudo-Spectrum as peaks cluster based on correlation analysis.
}
\examples{
data(spmeinvivo)
cluster <- getcorcluster(spmeinvivo)
}
|
## import required libraries
library("here")
library("SpatialExperiment")
library("scran")
library("scater")
library("dplyr")
library("spatialLIBD")
library("sessioninfo")
library("tidyr")
## Load basic SPE data
spe <- readRDS(
here::here(
"processed-data", "07_spot_qc", "spe_wholegenome_postqc.rds"
)
)
controls <- c("V10A27106_A1_Br3874", "V10A27004_A1_Br3874", "V10T31036_A1_Br3874")
# last one shouldn't be used for pTau
## find max of NpTau, PpTau
path_df <- data.frame(
spot_id = colnames(spe),
diagnosis = spe$diagnosis,
sample_id = spe$sample_id,
NAbeta = spe$NAbeta,
NpTau = spe$NpTau,
PAbeta = spe$PAbeta,
PpTau = spe$PpTau
)
## Just for NpTau/PpTau
path_df |>
dplyr::filter(sample_id %in% controls) |>
summarise_if(is.numeric, max, na.rm = TRUE)
# NAbeta NpTau PAbeta PpTau
# 1 4 8 0.1983471 0.01433482
## Just for NAbeta/PAbeta
path_df |>
dplyr::filter(sample_id %in% controls[c(1, 3)]) |>
summarise_if(is.numeric, max, na.rm = TRUE)
# NAbeta NpTau PAbeta PpTau
# 1 3 8 0.149126 0.01433482
## Frequency of unique NAbeta values across all controls
path_df |>
dplyr::filter(sample_id %in% controls) |>
count(NAbeta) |>
group_by(NAbeta) |>
mutate(prop = prop.table(n))
# '''
# NAbeta n prop
# <int> <int> <dbl>
# 1 0 12963 1
# 2 1 22 1
# 3 2 3 1
# 4 3 2 1
# 5 4 1 1
# '''
## Quantiles for NAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(q = list(quantile(NAbeta)), na.rm = TRUE) |>
unnest_wider(q)
# '''
# sample_id `0%` `25%` `50%` `75%` `100%` na.rm
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <lgl>
# 1 V10A27004_A1_Br3874 0 0 0 0 4 TRUE
# 2 V10A27106_A1_Br3874 0 0 0 0 2 TRUE
# 3 V10T31036_A1_Br3874 0 0 0 0 3 TRUE
# '''
## New percentiles for NAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(
percentiles = scales::percent(c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
NAbeta = quantile(NAbeta, c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
na.rm = TRUE
)
# Everything zero except 0.999 where NAbeta = 1
## Quantiles for PAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(q = list(quantile(PAbeta)), na.rm = TRUE) |>
unnest_wider(q)
# '''
# sample_id `0%` `25%` `50%` `75%` `100%` na.rm
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <lgl>
# 1 V10A27004_A1_Br3874 0 0 0 0 0.198 TRUE
# 2 V10A27106_A1_Br3874 0 0 0 0 0.0649 TRUE
# 3 V10T31036_A1_Br3874 0 0 0 0 0.149 TRUE
# '''
## New percentiles for PAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(
percentiles = scales::percent(c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
NAbeta = quantile(PAbeta, c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
na.rm = TRUE
)
## for 004 and 1036 99.9% is 0.108 and 0.0543 respectively. Zeros for everything else.
path_df_AD <- path_df |> dplyr::filter(!sample_id %in% controls)
count(path_df_AD) # 25124 total spots in all AD samples
thresholded <- path_df_AD |> dplyr::filter(NAbeta > 1 | PAbeta > 0.108)
count(thresholded)
# 1 2004
path_df_AD |>
dplyr::filter(NAbeta >= 1 | PAbeta >= 0.108) |>
count()
# n
# 1 2861
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
| /code/09_pathology_vs_BayesSpace/02_pathology_thresholds.R | no_license | LieberInstitute/Visium_SPG_AD | R | false | false | 3,692 | r | ## import required libraries
library("here")
library("SpatialExperiment")
library("scran")
library("scater")
library("dplyr")
library("spatialLIBD")
library("sessioninfo")
library("tidyr")
## Load basic SPE data
spe <- readRDS(
here::here(
"processed-data", "07_spot_qc", "spe_wholegenome_postqc.rds"
)
)
controls <- c("V10A27106_A1_Br3874", "V10A27004_A1_Br3874", "V10T31036_A1_Br3874")
# last one shouldn't be used for pTau
## find max of NpTau, PpTau
path_df <- data.frame(
spot_id = colnames(spe),
diagnosis = spe$diagnosis,
sample_id = spe$sample_id,
NAbeta = spe$NAbeta,
NpTau = spe$NpTau,
PAbeta = spe$PAbeta,
PpTau = spe$PpTau
)
## Just for NpTau/PpTau
path_df |>
dplyr::filter(sample_id %in% controls) |>
summarise_if(is.numeric, max, na.rm = TRUE)
# NAbeta NpTau PAbeta PpTau
# 1 4 8 0.1983471 0.01433482
## Just for NAbeta/PAbeta
path_df |>
dplyr::filter(sample_id %in% controls[c(1, 3)]) |>
summarise_if(is.numeric, max, na.rm = TRUE)
# NAbeta NpTau PAbeta PpTau
# 1 3 8 0.149126 0.01433482
## Frequency of unique NAbeta values across all controls
path_df |>
dplyr::filter(sample_id %in% controls) |>
count(NAbeta) |>
group_by(NAbeta) |>
mutate(prop = prop.table(n))
# '''
# NAbeta n prop
# <int> <int> <dbl>
# 1 0 12963 1
# 2 1 22 1
# 3 2 3 1
# 4 3 2 1
# 5 4 1 1
# '''
## Quantiles for NAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(q = list(quantile(NAbeta)), na.rm = TRUE) |>
unnest_wider(q)
# '''
# sample_id `0%` `25%` `50%` `75%` `100%` na.rm
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <lgl>
# 1 V10A27004_A1_Br3874 0 0 0 0 4 TRUE
# 2 V10A27106_A1_Br3874 0 0 0 0 2 TRUE
# 3 V10T31036_A1_Br3874 0 0 0 0 3 TRUE
# '''
## New percentiles for NAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(
percentiles = scales::percent(c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
NAbeta = quantile(NAbeta, c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
na.rm = TRUE
)
# Everything zero except 0.999 where NAbeta = 1
## Quantiles for PAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(q = list(quantile(PAbeta)), na.rm = TRUE) |>
unnest_wider(q)
# '''
# sample_id `0%` `25%` `50%` `75%` `100%` na.rm
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <lgl>
# 1 V10A27004_A1_Br3874 0 0 0 0 0.198 TRUE
# 2 V10A27106_A1_Br3874 0 0 0 0 0.0649 TRUE
# 3 V10T31036_A1_Br3874 0 0 0 0 0.149 TRUE
# '''
## New percentiles for PAbeta
path_df |>
dplyr::filter(sample_id %in% controls) |>
group_by(sample_id) |>
summarise(
percentiles = scales::percent(c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
NAbeta = quantile(PAbeta, c(0.95, 0.96, 0.97, 0.98, 0.99, 0.999)),
na.rm = TRUE
)
## for 004 and 1036 99.9% is 0.108 and 0.0543 respectively. Zeros for everything else.
path_df_AD <- path_df |> dplyr::filter(!sample_id %in% controls)
count(path_df_AD) # 25124 total spots in all AD samples
thresholded <- path_df_AD |> dplyr::filter(NAbeta > 1 | PAbeta > 0.108)
count(thresholded)
# 1 2004
path_df_AD |>
dplyr::filter(NAbeta >= 1 | PAbeta >= 0.108) |>
count()
# n
# 1 2861
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
|
normalise <- function(x) {
x / sum(x)
}
| /R/normalise.R | permissive | pmcharrison/seqopt | R | false | false | 42 | r | normalise <- function(x) {
x / sum(x)
}
|
## Dependencies
library(shiny)
library(leaflet)
source('BaseR.R') ## Source of all the photos and such
ui <- fluidPage(
titlePanel('World Tour 2018'), ## title
fluidRow(
column(3,
h4('Trips'), ## Subtitle
selectInput('trips','Choose Trip', ## Dropdown
choices = tbl[,1], ## Names of places
selected = NULL
),
textOutput('copy'), ## Content of trips goes here
tags$br(),
uiOutput('pics') ### Photos displayed here
),
mainPanel(
h4('Map'),
leafletOutput('Map') ## My map of trips
)
)
)
| /World Tour/ui.R | no_license | jnt0009/World-Tour | R | false | false | 683 | r |
## Dependencies
library(shiny)
library(leaflet)
source('BaseR.R') ## Source of all the photos and such
ui <- fluidPage(
titlePanel('World Tour 2018'), ## title
fluidRow(
column(3,
h4('Trips'), ## Subtitle
selectInput('trips','Choose Trip', ## Dropdown
choices = tbl[,1], ## Names of places
selected = NULL
),
textOutput('copy'), ## Content of trips goes here
tags$br(),
uiOutput('pics') ### Photos displayed here
),
mainPanel(
h4('Map'),
leafletOutput('Map') ## My map of trips
)
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.