content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
data <- read.csv("sites.csv", encoding="UTF-8")
print(is.data.frame(data))
print(ncol(data))
print(nrow(data))
print(data)
| /r/csv.r | no_license | summerfang/study | R | false | false | 123 | r | data <- read.csv("sites.csv", encoding="UTF-8")
print(is.data.frame(data))
print(ncol(data))
print(nrow(data))
print(data)
|
#
# Contains methods for calculating distance matrices
#
# Computes a distance matrix for 16S gene sequences:
generate_distance_matrix16S <- function(filename) {
usearch_out <- paste(tmp_dir, '/', basename(filename),"_out.mx",sep='')
#num_threads <- detectCores()
#if (num_threads > max_threads)
# num_threads <- max_threads
#system(paste(usearch, "-distmx_brute -calc_distmx", filename, "-distmxout", usearch_out, "-format square"))
#system(paste(usearch, "-calc_distmx", filename, "-distmxout", usearch_out, "-format square -threads", num_threads))
#reads <- read.fasta(filename)
#identity_matrix <- BuildDistanceMatrixUSEARCH_v8(filename, usearch_out)
#identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0001, 0.1)
#identity_matrix
system(paste(usearch, "-acceptall -allpairs_global", filename, "-uc", usearch_out))
identity_matrix <- BuildIdentityMatrixUSEARCH(filename, filename, usearch_out, F)
identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0000001, 0.00001)
identity_matrix
}
generate_distance_matrix <- function(fnameREF16S, fnameREF="", fnameEMP="", dt=0.04) {
# This function calculates distance matrix between reference marker sequences and empirical reads.
# fnameREF - guide (reference marker) reads
# fnameEMP - empirical reads of microbial community of interest
# Merging reference and empirical (consensus) sequences together:
ref_emp <- paste(tmp_dir, '/', basename(fnameREF), "_", basename(fnameEMP), sep='' )
system(paste("cat", fnameREF, fnameEMP, '>', ref_emp))
# Computing distance matrices for both reference and empirical (clustered) sequences:
dmx_out <- paste(tmp_dir, '/', basename(ref_emp), ".dmx",sep='')
#-distmx_brute
#system(paste(usearch, "-distmx_brute -calc_distmx", ref_emp, "-distmxout", dmx_out, "-format square"))
#system(paste(usearch, "-calc_distmx", ref_emp, "-distmxout", dmx_out, "-format square -threads", num_threads))
#identity_matrix <- BuildDistanceMatrixUSEARCH_v8(ref_emp, dmx_out)
#identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0001, 0.1)
#identity_matrix
system(paste(usearch, "-acceptall -allpairs_global", ref_emp, "-uc", dmx_out)) # &
identity_matrix <- BuildIdentityMatrixUSEARCH(fnameREF16S, ref_emp, dmx_out, T)
identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0000001, 0.00001)
# Merge highly similar objects:
d1 = identity_matrix
sz = dim(d1)[1]
del = c()
m_table <- c()
for (i in 1:sz) {
for (j in 1:dim(score16S)[1]) {
if ((d1[i,j]<dt) && (i > dim(score16S)[1]) && (i != j)) {
#print(c(i,j))
del = c(del,i)
if ((rownames(identity_matrix)[i] %in% m_table) == F)
m_table = c(m_table, colnames(identity_matrix)[j], rownames(identity_matrix)[i])
}
}
}
keep.idx <- setdiff(seq_len(nrow(d1)), del)
identity_matrix = d1[keep.idx, keep.idx]
identity_matrix
if (length(m_table>=2)) {
m_table <- matrix(m_table, ncol=2, byrow=T)
}
list(identity_matrix, m_table)
}
# This function reads/generates distance matrices for all provided guide sequences.
#readDistanceMatrices <- function(use_custom_matrix = F, work_libs) {
readDistanceMatrices <- function(ref_16S, work_libs, dt=0.04) {
scoresV <- list()
merged_table <- list()
for(i in 1:length(libs)) {
cat("Library: ", basename(work_libs[i,1]),'\n')
distances <- generate_distance_matrix(fnameREF16S=ref_16S, fnameREF=refs[i], fnameEMP=work_libs[i,1], dt)
scoresV[[i]] <- distances[[1]]
merged_table[[i]] <- distances[[2]]
}
list(scoresV, merged_table)
} | /src/methods-Dist.R | no_license | izhbannikov/MetAmp | R | false | false | 4,037 | r | #
# Contains methods for calculating distance matrices
#
# Computes a distance matrix for 16S gene sequences:
generate_distance_matrix16S <- function(filename) {
usearch_out <- paste(tmp_dir, '/', basename(filename),"_out.mx",sep='')
#num_threads <- detectCores()
#if (num_threads > max_threads)
# num_threads <- max_threads
#system(paste(usearch, "-distmx_brute -calc_distmx", filename, "-distmxout", usearch_out, "-format square"))
#system(paste(usearch, "-calc_distmx", filename, "-distmxout", usearch_out, "-format square -threads", num_threads))
#reads <- read.fasta(filename)
#identity_matrix <- BuildDistanceMatrixUSEARCH_v8(filename, usearch_out)
#identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0001, 0.1)
#identity_matrix
system(paste(usearch, "-acceptall -allpairs_global", filename, "-uc", usearch_out))
identity_matrix <- BuildIdentityMatrixUSEARCH(filename, filename, usearch_out, F)
identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0000001, 0.00001)
identity_matrix
}
generate_distance_matrix <- function(fnameREF16S, fnameREF="", fnameEMP="", dt=0.04) {
# This function calculates distance matrix between reference marker sequences and empirical reads.
# fnameREF - guide (reference marker) reads
# fnameEMP - empirical reads of microbial community of interest
# Merging reference and empirical (consensus) sequences together:
ref_emp <- paste(tmp_dir, '/', basename(fnameREF), "_", basename(fnameEMP), sep='' )
system(paste("cat", fnameREF, fnameEMP, '>', ref_emp))
# Computing distance matrices for both reference and empirical (clustered) sequences:
dmx_out <- paste(tmp_dir, '/', basename(ref_emp), ".dmx",sep='')
#-distmx_brute
#system(paste(usearch, "-distmx_brute -calc_distmx", ref_emp, "-distmxout", dmx_out, "-format square"))
#system(paste(usearch, "-calc_distmx", ref_emp, "-distmxout", dmx_out, "-format square -threads", num_threads))
#identity_matrix <- BuildDistanceMatrixUSEARCH_v8(ref_emp, dmx_out)
#identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0001, 0.1)
#identity_matrix
system(paste(usearch, "-acceptall -allpairs_global", ref_emp, "-uc", dmx_out)) # &
identity_matrix <- BuildIdentityMatrixUSEARCH(fnameREF16S, ref_emp, dmx_out, T)
identity_matrix[which(duplicated(identity_matrix)==T),] <- identity_matrix[which(duplicated(identity_matrix)==T),] + runif(length(which(duplicated(identity_matrix)==T)), 0.0000001, 0.00001)
# Merge highly similar objects:
d1 = identity_matrix
sz = dim(d1)[1]
del = c()
m_table <- c()
for (i in 1:sz) {
for (j in 1:dim(score16S)[1]) {
if ((d1[i,j]<dt) && (i > dim(score16S)[1]) && (i != j)) {
#print(c(i,j))
del = c(del,i)
if ((rownames(identity_matrix)[i] %in% m_table) == F)
m_table = c(m_table, colnames(identity_matrix)[j], rownames(identity_matrix)[i])
}
}
}
keep.idx <- setdiff(seq_len(nrow(d1)), del)
identity_matrix = d1[keep.idx, keep.idx]
identity_matrix
if (length(m_table>=2)) {
m_table <- matrix(m_table, ncol=2, byrow=T)
}
list(identity_matrix, m_table)
}
# This function reads/generates distance matrices for all provided guide sequences.
#readDistanceMatrices <- function(use_custom_matrix = F, work_libs) {
readDistanceMatrices <- function(ref_16S, work_libs, dt=0.04) {
scoresV <- list()
merged_table <- list()
for(i in 1:length(libs)) {
cat("Library: ", basename(work_libs[i,1]),'\n')
distances <- generate_distance_matrix(fnameREF16S=ref_16S, fnameREF=refs[i], fnameEMP=work_libs[i,1], dt)
scoresV[[i]] <- distances[[1]]
merged_table[[i]] <- distances[[2]]
}
list(scoresV, merged_table)
} |
\name{jsCanvas}
\alias{jsCanvas}
\title{Open JavaScript Canvas Graphics Device}
\description{
This function opens a new R graphics device and
becomes the active device to which graphics
commands are sent. This device maps the graphical
commands to JavaScript code. When the device is closed,
the JavaScript is either written to a file
or made available as a character vector.
This JavaScript code can then be added to an HTML document
and rendered on a JavaScript canvas.
}
\usage{
jsCanvas(file = character(), dim = c(1000, 800), col = "black", fill = "transparent",
ps = 10, wrapup = writeCode,
canvasId = "canvas", multiCanvas = FALSE, ...,
runPlotCommand = !missing(file) && !is.character(substitute(file)))
}
\arguments{
\item{file}{the name of a file or a connection object to which the
generated JavaScript code will be written when the device is closed.
This can be an empty vector (the default) in which case the code is
not written to a file. Instead, the code can be accessed from the
device.
This argument can also be an R call or expression and
it will be evaluated and the resulting JavaScript code
returned directly. This allows one to open, plot and close
the device in a single action and retrieve the JavaScript
code in one operation.
}
\item{dim}{the dimensions to use for the HTML canvas device. These are pixels.}
\item{col}{the default drawing color}
\item{fill}{the default fill color}
\item{ps}{the initial point size}
\item{wrapup}{a function that is invoked when the device is closed.
This is called with a list of containing code for each of the
separate "pages" created in R, i.e. each time we call NewFrame.
The content of each element of the page list is a character vector
containing the JavaScript code for that page.
The second argument is the file or connection to which to write the
generated JavaScript code. Additional parameters passed to
\code{jsCanvas} are also passed directly in this function call.
The default is to call \code{writeCode} in the package.
This turns the code into functions. One might want to use the code
directly, e.g. to evaluate the commands rather than defining a function.
}
\item{canvasId}{the identifier(s) (i.e. name(s)) of the canvases on
which to draw.}
\item{multiCanvas}{a logical value that controls whether the functions
arrange to draw on the separate canvases for each plot (\code{TRUE})
or on the same canvas (\code{FALSE})}
\item{\dots}{additional arguments that are passed on to the function
specified by \code{wrapup}}
\item{runPlotCommand}{this is a logical value that controls whether to
interpret the \code{file} argument as R commands to run to create
the plot or as the name of a file.
}
}
%\details{}
\value{
If \code{runPlotCommand} is \code{TRUE} (e.g. if one passes a
non-literal value for \code{file}), a charcter vector
containing the generated JavaScript code is returned.
Otherwise, an object of class \code{JavaScriptCanvasDevice}
is returned. This contains a reference to the C-level
data structure representing the device and one can access
the fields in the device and the R functions that implement
the device.
}
\references{
R Internals Manual for details on graphics devices.
}
\author{
Duncan Temple lang
}
\seealso{
\code{\link{htmlCanvas}}
The RGraphics Device package.
}
\examples{
js.code = jsCanvas({ plot(1:10); plot(density(rnorm(100)))})
dev = jsCanvas()
plot(1:10)
plot(density(rnorm(100)))
f = getCode(dev) # currently need to return the function before device is closed
dev.off()
f()
jsCanvas("myJSCode.js")
plot(1:10)
plot(density(rnorm(100)))
dev.off()
# This version writes to a file, but also controls the name of the
# javascript functions - myPlot1 and myPlot2 - and arranges that
# each of them is to draw on the canvas named plotCanvas.
# We can then call these JavaScript functions at different times and
# they will draw on that canvas.
# We also inline the supporting JavaScript code.
jsCanvas("myJSCode.js", funName = "myPlot", canvasId = 'plotCanvas',
multiCanvas = FALSE, inlineJS = TRUE)
plot(1:10)
plot(density(rnorm(100)))
dev.off()
}
\keyword{graphics}
\keyword{device}
\keyword{dynamic}
| /man/jsCanvas.Rd | no_license | omegahat/RJSCanvasDevice | R | false | false | 4,379 | rd | \name{jsCanvas}
\alias{jsCanvas}
\title{Open JavaScript Canvas Graphics Device}
\description{
This function opens a new R graphics device and
becomes the active device to which graphics
commands are sent. This device maps the graphical
commands to JavaScript code. When the device is closed,
the JavaScript is either written to a file
or made available as a character vector.
This JavaScript code can then be added to an HTML document
and rendered on a JavaScript canvas.
}
\usage{
jsCanvas(file = character(), dim = c(1000, 800), col = "black", fill = "transparent",
ps = 10, wrapup = writeCode,
canvasId = "canvas", multiCanvas = FALSE, ...,
runPlotCommand = !missing(file) && !is.character(substitute(file)))
}
\arguments{
\item{file}{the name of a file or a connection object to which the
generated JavaScript code will be written when the device is closed.
This can be an empty vector (the default) in which case the code is
not written to a file. Instead, the code can be accessed from the
device.
This argument can also be an R call or expression and
it will be evaluated and the resulting JavaScript code
returned directly. This allows one to open, plot and close
the device in a single action and retrieve the JavaScript
code in one operation.
}
\item{dim}{the dimensions to use for the HTML canvas device. These are pixels.}
\item{col}{the default drawing color}
\item{fill}{the default fill color}
\item{ps}{the initial point size}
\item{wrapup}{a function that is invoked when the device is closed.
This is called with a list of containing code for each of the
separate "pages" created in R, i.e. each time we call NewFrame.
The content of each element of the page list is a character vector
containing the JavaScript code for that page.
The second argument is the file or connection to which to write the
generated JavaScript code. Additional parameters passed to
\code{jsCanvas} are also passed directly in this function call.
The default is to call \code{writeCode} in the package.
This turns the code into functions. One might want to use the code
directly, e.g. to evaluate the commands rather than defining a function.
}
\item{canvasId}{the identifier(s) (i.e. name(s)) of the canvases on
which to draw.}
\item{multiCanvas}{a logical value that controls whether the functions
arrange to draw on the separate canvases for each plot (\code{TRUE})
or on the same canvas (\code{FALSE})}
\item{\dots}{additional arguments that are passed on to the function
specified by \code{wrapup}}
\item{runPlotCommand}{this is a logical value that controls whether to
interpret the \code{file} argument as R commands to run to create
the plot or as the name of a file.
}
}
%\details{}
\value{
If \code{runPlotCommand} is \code{TRUE} (e.g. if one passes a
non-literal value for \code{file}), a charcter vector
containing the generated JavaScript code is returned.
Otherwise, an object of class \code{JavaScriptCanvasDevice}
is returned. This contains a reference to the C-level
data structure representing the device and one can access
the fields in the device and the R functions that implement
the device.
}
\references{
R Internals Manual for details on graphics devices.
}
\author{
Duncan Temple lang
}
\seealso{
\code{\link{htmlCanvas}}
The RGraphics Device package.
}
\examples{
js.code = jsCanvas({ plot(1:10); plot(density(rnorm(100)))})
dev = jsCanvas()
plot(1:10)
plot(density(rnorm(100)))
f = getCode(dev) # currently need to return the function before device is closed
dev.off()
f()
jsCanvas("myJSCode.js")
plot(1:10)
plot(density(rnorm(100)))
dev.off()
# This version writes to a file, but also controls the name of the
# javascript functions - myPlot1 and myPlot2 - and arranges that
# each of them is to draw on the canvas named plotCanvas.
# We can then call these JavaScript functions at different times and
# they will draw on that canvas.
# We also inline the supporting JavaScript code.
jsCanvas("myJSCode.js", funName = "myPlot", canvasId = 'plotCanvas',
multiCanvas = FALSE, inlineJS = TRUE)
plot(1:10)
plot(density(rnorm(100)))
dev.off()
}
\keyword{graphics}
\keyword{device}
\keyword{dynamic}
|
#' Feature Extraction
extract_features.openclose <- function(matches,odd_details,pMissThreshold=0.01,trainStart,testStart){
details = copy(odd_details)
matches = copy(matches)
details=details[order(OddChangeDateTime)]
feature_odd_details=details[,list(Odd_Open=odd[1],Odd_Close=odd[.N]),list(matchId,betType,oddtype,bookmaker)]
feature_odd_details = merge(matches[,list(matchId,Match_Date)], feature_odd_details,by="matchId")
#HANDLE MISSINGS
details_temp = dcast(feature_odd_details, matchId+betType ~ paste0("Odd_Close_",bookmaker)+oddtype, value.var = c("Odd_Close"))
details_melt = melt(details_temp, id.vars = c("matchId","betType"), measure.vars = names(details_temp)[names(details_temp) %like% "Odd_Close"], value.name = "odd")
details_melt[,c("OpenClose","bookmaker","oddtype"):=tstrsplit(variable,split="_",keep=c(2:4))]
details_melt[,variable:=NULL]
details_melt = merge(matches[,list(matchId,Match_Date)], details_melt,by="matchId",all=T)
bookieMissingness = details_melt[Match_Date >= trainStart,list(.N,percMiss=sum(is.na(odd))/.N),by=list(bookmaker,betType)]
bookiesToKeep = unique(bookieMissingness[percMiss <= pMissThreshold]$bookmaker)
cat("Number of bookmakers with proportion of missings below",pMissThreshold,"since",as.character(trainStart),":",length(bookiesToKeep),"\n")
nonmissingBookmakers_sinceTestStart = unique(details_melt[Match_Date >= testStart, list(.N,NA_SUM=sum(is.na(odd))),by=list(bookmaker,betType)][NA_SUM==0]$bookmaker)
bookiesToKeep = intersect(bookiesToKeep,nonmissingBookmakers_sinceTestStart)
cat("Number of bookmakers with no missings since testStart", as.character(testStart), ":", length(bookiesToKeep), "\n")
details = dcast(feature_odd_details,matchId~oddtype+bookmaker,value.var = c("Odd_Open","Odd_Close"))
columnsToKeep = grep(paste(bookiesToKeep,collapse="|"),names(details),value=T)
details = details[,c('matchId',columnsToKeep),with=F]
#HANDLE MISSINGS END
details = merge(matches[,-c('Home','Away','Home_Score','Away_Score','Match_Result','Result_Home','Result_Tie','Result_Away','type'),with=F],
details,by="matchId",all=T)
details[,outcome:=ifelse(Total_Score>2.5,"OVER","under")]
return(features = details)
}
#temp4=extract_features.openclose(temp2, temp3, testStart=as.Date('2018-11-16'), trainStart=as.Date('2012-07-15'))
#features = temp4
| /files/feature_extraction.r | no_license | pjournal/etm01-TheDenominator | R | false | false | 2,452 | r | #' Feature Extraction
extract_features.openclose <- function(matches,odd_details,pMissThreshold=0.01,trainStart,testStart){
details = copy(odd_details)
matches = copy(matches)
details=details[order(OddChangeDateTime)]
feature_odd_details=details[,list(Odd_Open=odd[1],Odd_Close=odd[.N]),list(matchId,betType,oddtype,bookmaker)]
feature_odd_details = merge(matches[,list(matchId,Match_Date)], feature_odd_details,by="matchId")
#HANDLE MISSINGS
details_temp = dcast(feature_odd_details, matchId+betType ~ paste0("Odd_Close_",bookmaker)+oddtype, value.var = c("Odd_Close"))
details_melt = melt(details_temp, id.vars = c("matchId","betType"), measure.vars = names(details_temp)[names(details_temp) %like% "Odd_Close"], value.name = "odd")
details_melt[,c("OpenClose","bookmaker","oddtype"):=tstrsplit(variable,split="_",keep=c(2:4))]
details_melt[,variable:=NULL]
details_melt = merge(matches[,list(matchId,Match_Date)], details_melt,by="matchId",all=T)
bookieMissingness = details_melt[Match_Date >= trainStart,list(.N,percMiss=sum(is.na(odd))/.N),by=list(bookmaker,betType)]
bookiesToKeep = unique(bookieMissingness[percMiss <= pMissThreshold]$bookmaker)
cat("Number of bookmakers with proportion of missings below",pMissThreshold,"since",as.character(trainStart),":",length(bookiesToKeep),"\n")
nonmissingBookmakers_sinceTestStart = unique(details_melt[Match_Date >= testStart, list(.N,NA_SUM=sum(is.na(odd))),by=list(bookmaker,betType)][NA_SUM==0]$bookmaker)
bookiesToKeep = intersect(bookiesToKeep,nonmissingBookmakers_sinceTestStart)
cat("Number of bookmakers with no missings since testStart", as.character(testStart), ":", length(bookiesToKeep), "\n")
details = dcast(feature_odd_details,matchId~oddtype+bookmaker,value.var = c("Odd_Open","Odd_Close"))
columnsToKeep = grep(paste(bookiesToKeep,collapse="|"),names(details),value=T)
details = details[,c('matchId',columnsToKeep),with=F]
#HANDLE MISSINGS END
details = merge(matches[,-c('Home','Away','Home_Score','Away_Score','Match_Result','Result_Home','Result_Tie','Result_Away','type'),with=F],
details,by="matchId",all=T)
details[,outcome:=ifelse(Total_Score>2.5,"OVER","under")]
return(features = details)
}
#temp4=extract_features.openclose(temp2, temp3, testStart=as.Date('2018-11-16'), trainStart=as.Date('2012-07-15'))
#features = temp4
|
### params
datasetSize <- 5000; # which dataset to use (1000,5000,10000)
maxGeneSelectionSize <- 30; # lower to emphasize smaller chromosomes from the first population; it takes 1 when 0 and 0 when 2*maxGeneSelectionSize
populationSize <- 1000;
numGenerations <- 100;
weight.chromosomeLength <- 0.1; # higher to emphasize smaller chromosomes (less genes involved)
weight.accuracy <- 0.3; # higher to emphasize general classificator performance
weight.specificity <- 0.6; # higher to emphasize better crossed classification performance
# in cols, the guessed classes
# in rows, the actual classes
# matrix[actual,guessed] == the relative score of having guessed a class for the corrsponding actual value
scoreByClassesMatrix <- data.frame(
#c("real/guessed","AML","CML","ALL","CLL","NO"),
#c( "AML", - , - , - , - , - ),
#c( "CML", - , - , - , - , - ),
#c( "ALL", - , - , - , - , - ),
#c( "CLL", - , - , - , - , - ),
#c( "NO", - , - , - , - , - )
c(1.00, 0.25, 0.90, 0.10, 0.00),
c(0.90, 1.00, 0.80, 0.70, 0.25),
c(0.90, 0.10, 1.00, 0.25, 0.00),
c(0.70, 0.80, 0.90, 1.00, 0.25),
c(0.50, 0.75, 0.50, 0.75, 1.00)
);
colnames(scoreByClassesMatrix) <- c("AML","CML","ALL","CLL","NO");
rownames(scoreByClassesMatrix) <- c("AML","CML","ALL","CLL","NO"); | /config.R | no_license | dependmyse/leukemia | R | false | false | 1,411 | r | ### params
datasetSize <- 5000; # which dataset to use (1000,5000,10000)
maxGeneSelectionSize <- 30; # lower to emphasize smaller chromosomes from the first population; it takes 1 when 0 and 0 when 2*maxGeneSelectionSize
populationSize <- 1000;
numGenerations <- 100;
weight.chromosomeLength <- 0.1; # higher to emphasize smaller chromosomes (less genes involved)
weight.accuracy <- 0.3; # higher to emphasize general classificator performance
weight.specificity <- 0.6; # higher to emphasize better crossed classification performance
# in cols, the guessed classes
# in rows, the actual classes
# matrix[actual,guessed] == the relative score of having guessed a class for the corrsponding actual value
scoreByClassesMatrix <- data.frame(
#c("real/guessed","AML","CML","ALL","CLL","NO"),
#c( "AML", - , - , - , - , - ),
#c( "CML", - , - , - , - , - ),
#c( "ALL", - , - , - , - , - ),
#c( "CLL", - , - , - , - , - ),
#c( "NO", - , - , - , - , - )
c(1.00, 0.25, 0.90, 0.10, 0.00),
c(0.90, 1.00, 0.80, 0.70, 0.25),
c(0.90, 0.10, 1.00, 0.25, 0.00),
c(0.70, 0.80, 0.90, 1.00, 0.25),
c(0.50, 0.75, 0.50, 0.75, 1.00)
);
colnames(scoreByClassesMatrix) <- c("AML","CML","ALL","CLL","NO");
rownames(scoreByClassesMatrix) <- c("AML","CML","ALL","CLL","NO"); |
Enter file contents here
setwd("C:\\Users\\jmugeiro\\Documents\\R_Working_Directory\\Coursera")
data <- read.csv("household_power_consumption.txt",
sep=";",
colClasses=c(rep("character",2),rep("numeric",7)),
na.strings="?")
data$Timestamp <- strptime(paste(data$Date,data$Time),format="%d/%m/%Y") ##%H:%M:%S")
data$Date=NULL
data$Time=NULL
jcm_data = subset(data,as.Date(data$Timestamp) >= "2007-02-01"
& as.Date(data$Timestamp) < "2007-02-03")
png(filename="plot1.png", height=480, width=480, bg="transparent")
hist(jcm_data$Global_active_power,
col="red",
main="Global Active Power",
##sub=" Feito por José Mugeiro ",
xlab="Global Active Power (kilowatts)" ,
ylab="Frequency"
)
dev.off()
| /plot1.R | no_license | jmugeiro/Exploratory-Data-Analysis | R | false | false | 806 | r | Enter file contents here
setwd("C:\\Users\\jmugeiro\\Documents\\R_Working_Directory\\Coursera")
data <- read.csv("household_power_consumption.txt",
sep=";",
colClasses=c(rep("character",2),rep("numeric",7)),
na.strings="?")
data$Timestamp <- strptime(paste(data$Date,data$Time),format="%d/%m/%Y") ##%H:%M:%S")
data$Date=NULL
data$Time=NULL
jcm_data = subset(data,as.Date(data$Timestamp) >= "2007-02-01"
& as.Date(data$Timestamp) < "2007-02-03")
png(filename="plot1.png", height=480, width=480, bg="transparent")
hist(jcm_data$Global_active_power,
col="red",
main="Global Active Power",
##sub=" Feito por José Mugeiro ",
xlab="Global Active Power (kilowatts)" ,
ylab="Frequency"
)
dev.off()
|
\encoding{UTF-8}
\name{demo_datasets_gen}
\alias{demo_datasets_gen}
\title{To generate all demo datasets for testing purposes}
\description{
To generate all demo datasets for testing purposes
On the top menu, enter '99' (no quote) to run this.
}
\keyword{misc} | /man/demo_datasets_gen.rd | no_license | cran/bear | R | false | false | 286 | rd | \encoding{UTF-8}
\name{demo_datasets_gen}
\alias{demo_datasets_gen}
\title{To generate all demo datasets for testing purposes}
\description{
To generate all demo datasets for testing purposes
On the top menu, enter '99' (no quote) to run this.
}
\keyword{misc} |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/add_files.R
\name{add_files}
\alias{add_files}
\alias{delete_files}
\alias{rename_files}
\alias{update_files}
\title{Add files to a gist object}
\usage{
add_files(gist, ...)
update_files(gist, ...)
delete_files(gist, ...)
rename_files(gist, ...)
}
\arguments{
\item{gist}{A gist object or something coerceable to a gist}
\item{...}{Curl options passed on to \code{\link[httr]{GET}}}
}
\description{
Add files to a gist object
}
\examples{
\dontrun{
add_files("~/stuff.Rmd")
update_files()
delete_files()
rename_files()
}
}
| /man/files.Rd | permissive | FranzKrah/gistr | R | false | false | 614 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/add_files.R
\name{add_files}
\alias{add_files}
\alias{delete_files}
\alias{rename_files}
\alias{update_files}
\title{Add files to a gist object}
\usage{
add_files(gist, ...)
update_files(gist, ...)
delete_files(gist, ...)
rename_files(gist, ...)
}
\arguments{
\item{gist}{A gist object or something coerceable to a gist}
\item{...}{Curl options passed on to \code{\link[httr]{GET}}}
}
\description{
Add files to a gist object
}
\examples{
\dontrun{
add_files("~/stuff.Rmd")
update_files()
delete_files()
rename_files()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tslib.R
\name{ggtsdiag}
\alias{ggtsdiag}
\title{Plots time-series diagnostics}
\usage{
ggtsdiag(
object,
gof.lag = 10,
conf.int = TRUE,
conf.int.colour = "#0000FF",
conf.int.linetype = "dashed",
conf.int.fill = NULL,
conf.int.alpha = 0.3,
ad.colour = "#888888",
ad.linetype = "dashed",
ad.size = 0.2,
nrow = NULL,
ncol = 1,
...
)
}
\arguments{
\item{object}{A fitted time-series model}
\item{gof.lag}{The maximum number of lags for a Portmanteau goodness-of-fit test}
\item{conf.int}{Logical flag indicating whether to plot confidence intervals}
\item{conf.int.colour}{line colour for confidence intervals}
\item{conf.int.linetype}{line type for confidence intervals}
\item{conf.int.fill}{fill colour for confidence intervals}
\item{conf.int.alpha}{alpha for confidence intervals}
\item{ad.colour}{Line colour for additional lines}
\item{ad.linetype}{Line type for additional lines}
\item{ad.size}{Fill colour for additional lines}
\item{nrow}{Number of facet/subplot rows}
\item{ncol}{Number of facet/subplot columns}
\item{...}{other keywords}
}
\value{
ggplot
}
\description{
Plots time-series diagnostics
}
\examples{
ggtsdiag(arima(AirPassengers))
}
| /man/ggtsdiag.Rd | no_license | dereksonderegger/ggfortify | R | false | true | 1,271 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tslib.R
\name{ggtsdiag}
\alias{ggtsdiag}
\title{Plots time-series diagnostics}
\usage{
ggtsdiag(
object,
gof.lag = 10,
conf.int = TRUE,
conf.int.colour = "#0000FF",
conf.int.linetype = "dashed",
conf.int.fill = NULL,
conf.int.alpha = 0.3,
ad.colour = "#888888",
ad.linetype = "dashed",
ad.size = 0.2,
nrow = NULL,
ncol = 1,
...
)
}
\arguments{
\item{object}{A fitted time-series model}
\item{gof.lag}{The maximum number of lags for a Portmanteau goodness-of-fit test}
\item{conf.int}{Logical flag indicating whether to plot confidence intervals}
\item{conf.int.colour}{line colour for confidence intervals}
\item{conf.int.linetype}{line type for confidence intervals}
\item{conf.int.fill}{fill colour for confidence intervals}
\item{conf.int.alpha}{alpha for confidence intervals}
\item{ad.colour}{Line colour for additional lines}
\item{ad.linetype}{Line type for additional lines}
\item{ad.size}{Fill colour for additional lines}
\item{nrow}{Number of facet/subplot rows}
\item{ncol}{Number of facet/subplot columns}
\item{...}{other keywords}
}
\value{
ggplot
}
\description{
Plots time-series diagnostics
}
\examples{
ggtsdiag(arima(AirPassengers))
}
|
with(adde3b9d0d46c4c25ba250fce659ba5dd, {ROOT <- 'D:/ATS2.0/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';FRAME909970$DATA_COLLECTION_TIME[FRAME909970$DATA_COLLECTION_TIME == 0] <- 0;}); | /1c4fa71c-191c-4da9-8102-b247ffddc5d3/R/Temp/aDHo73VvHxrMb.R | no_license | ayanmanna8/test | R | false | false | 272 | r | with(adde3b9d0d46c4c25ba250fce659ba5dd, {ROOT <- 'D:/ATS2.0/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';FRAME909970$DATA_COLLECTION_TIME[FRAME909970$DATA_COLLECTION_TIME == 0] <- 0;}); |
# TODO: enforce use of #iterator syntax in init section for vars with length > 1
# TODO: Check array tags used in the init and body sections for validity.
# TODO: Make sure that classify tags accurately interprets all tags and errors if uninterpretable tag.
# TODO: Allow for conditional tags to use a list, such as [[nclass#class == 5]]
#note that there's a bit of trickery in interpreting list tags
#they varnames are stored as only the prefix in the initCollection (no #iterator)
#and they are referenced in the body as var#iterator
#At this point, doesn't enforce proper use of iterator with a list
#setwd("C:/Users/Michael Hallquist/Documents/Automation_Sandbox")
#createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/LSPD Covariate Template.txt")
#system.time(createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/LSPD Template.txt"))
#createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/LSPD Template New Init.txt")
#createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/L2 Multimodel Template No iter.txt")
#need to sort out why is.na is working for lookupValue in replaceBodyTags
#in particular, why isn't the current value carrying over from the previous looping iteration?
#SOME THOUGHTS RE DOCUMENTATION
#foreach tags may only be with respect to an iterator... could not have some random foreach var
#' Split a data frame into a list by rows
#'
#' Takes a data frame and returns a list with an element for each row of the data frame.
#' This is an internal function.
#'
#' @param df An object inheriting from class \code{data.frame}
#'
#' @return A list where each element is a one row data frame
#' @keywords internal
#' @examples
#' # small example using built in data
#' MplusAutomation:::splitDFByRow(mtcars)
splitDFByRow <- function(df) {
stopifnot(inherits(df, "data.frame"))
lapply(seq.int(nrow(df)), function(i) df[i, ])
}
#' Classifies Tags
#'
#' Accepts a vector of tags to be classified as well as the iterators.
#' Tags are classified as \sQuote{iterator}, \sQuote{array}, \sQuote{conditional}, or
#' \sQuote{simple}. This is an internal function.
#'
#' @param tagVector A vector of tags to be classified
#' @param iteratorsVector a vector of the iterators to correctly classify tags
#' @return A character vector the same length as the vectors to be tagged
#' @keywords internal
classifyTags <- function(tagVector, iteratorsVector) {
#accepts a vector of tags to be classified
#also needs a vector of the iterators to correctly classify tags
#returns a vector of tag types
#creates an empty character vector of the same length as tagVector (each element defaults to "")
tagType <- vector(mode="character", length=length(tagVector))
#default to missing for tag type (replaced below)
#tagData$tagType <- NA_character_
# named list of the regexs to match for
# the names of each elements are used later to classify tags
RegEx <- list(
iterator = paste0("\\[\\[\\s*(", paste(iteratorsVector, collapse="|"), ")\\s*\\]\\]"),
array = paste0("\\[\\[\\s*\\b([\\w\\.]+)#(", paste(iteratorsVector, collapse="|"), ")\\b\\s*\\]\\]"),
#optional forward slash for closing tags
#could the alternation syntax be problematic if variable names overlaps
#(e.g., x matching xy)? Use word boundaries?
#any reason to limit this to iterators?!
conditional = paste0("\\[\\[\\s*/*(", paste(iteratorsVector, collapse="|"), ")\\s*[!><=]+\\s*\\d+\\s*\\]\\]"),
#simple tags -- not wrt iterators, not conditional
#use negative lookahead to skip tags that are iterators
simple = paste0("\\[\\[\\s*(?!", paste(iteratorsVector, collapse="|"), ")[\\w+\\.]+\\s*\\]\\]"))
Positions <- lapply(RegEx, grep, x = tagVector, perl = TRUE)
# assert no duplicates, i.e., tag cannot match multiples classes
stopifnot(!any(duplicated(unlist(Positions))))
for (n in names(Positions)) {
tagType[Positions[[n]]] <- n
}
return(tagType)
}
#' Get Initial Tags
#'
#' An internal function
#'
#' @param initCollection A list?
#' @return The initMatches
#' @keywords internal
getInitTags <- function(initCollection) {
initMatches <- c()
for (i in 1:length(initCollection)) {
if (storage.mode(initCollection[[i]]) == "character") {
matches <- friendlyGregexpr("\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]", initCollection[[i]], perl=T)
#if there are matches for this item, add its position in the list pos
#the idea is that the list has elements and the elements can be vectors
#thus, a match may occur for initCollection[[5]][3] if the fifth element of the list is a vector
#and the match is the third element.
if (!is.null(matches)) matches$listpos <- i
initMatches <- rbind(initMatches, matches)
}
}
#successfully creates a data.frame of the sort below.
# element start end tag listpos
#1 1 1 11 [[classes]] 14
#2 1 19 38 [[groupnames#group]] 14
#3 1 40 63 [[outcomenames#outcome]] 14
#4 1 65 84 [[modelnames#model]] 14
#5 1 85 112 [[zeroclassnames#zeroclass]] 14
#6 1 6 29 [[outcomenames#outcome]] 15
#7 1 31 50 [[groupnames#group]] 15
#8 1 73 92 [[modelnames#model]] 15
#9 1 1 9 [[hello]] 17
#10 2 1 10 [[hello2]] 17
#classify tags in terms of simple, array, iterator, conditional, foreach
if (!is.null(initMatches) && nrow(initMatches) > 0) {
initMatches$tagType <- classifyTags(initMatches$tag, initCollection$iterators)
#chop off the [[ ]] portion of the tags, along with any leading or trailing space
#this makes it easier to use the sub function to update current values
initMatches$tag <- sapply(initMatches$tag, function(tag) {
return(sub("\\[\\[\\s*([\\s\\w=><!#/]+)\\s*\\]\\]", "\\1", tag, perl=TRUE))
})
}
#return empty data frame if no matches
if (is.null(initMatches)) return(data.frame())
else return(initMatches)
}
#' Parses tags in the body section
#'
#' Parses tags in the body section (character vector) and
#' init collection (list of vars defined in the init section).
#' This is an internal function.
#'
#' @param bodySection The body
#' @param initCollection The initial collection
#' @return A list with three elements, where each list represents the location,
#' start character, end character, tag type, etc. of each tag.
#' \describe{
#' \item{initTags}{initMatches}
#' \item{bodyTags}{bodyMatches}
#' \item{bodyText}{bodySection}
#' }
#' @keywords internal
parseTags <- function(bodySection, initCollection) {
#first handle init tags
initMatches <- getInitTags(initCollection)
initMatches$currentValue <- NA_character_
bodyTagRegex <- "\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]"
bodyMatches <- friendlyGregexpr(bodyTagRegex, bodySection, perl=TRUE)
if (is.null(bodyMatches)) stop("No tags found in body section of template file.")
bodyMatches$tagType <- classifyTags(bodyMatches$tag, initCollection$iterators)
#okay, now every tag is categorized
#the notion here is to substitute in the running value for a given variable
#then we'll do a mass substitute for each model
bodyMatches$currentValue <- NA_character_
#chop off the [[ ]] portion of the tags, along with any leading or trailing space
bodyMatches$tag <- sapply(bodyMatches$tag, function(tag) {
return(sub("\\[\\[\\s*([\\s\\w=><!#/]+)\\s*\\]\\]", "\\1", tag, perl=TRUE))
})
#return a three-element list with constituent data frames for init and body tags.
return(list(initTags=initMatches, bodyTags=bodyMatches, bodyText=bodySection))
}
#' Create Mplus Input Files from Template
#'
#' The \code{createModels} function processes a single Mplus template file and creates a group of related
#' model input files. Definitions and examples for the template language are provided in the MplusAutomation
#' vignette and are not duplicated here at the moment. See this documentation: \code{vignette("Vignette", package="MplusAutomation")}
#'
#' @param templatefile The filename (absolute or relative path) of an Mplus template file to be processed. Example \dQuote{C:/MplusTemplate.txt}
#' @return No value is returned by this function. It is solely used to process an Mplus template file.
#' @author Michael Hallquist
#' @keywords interface
#' @export
#' @examples
#' \dontrun{
#' createModels("L2 Multimodel Template No iter.txt")
#' }
createModels <- function(templatefile) {
# should probably have the function cd to wherever the template file is located (if given as abs path)
# todo: allow for direct runs?
if (!file.exists(templatefile)) stop("Template file not found.")
readfile <- scan(templatefile, what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE)
# divide into init versus body
startinit <- grep("[[init]]", readfile, fixed=T)
endinit <- grep("[[/init]]", readfile, fixed=T)
if (length(startinit) != 1 || length(endinit) != 1) {
stop("Unable to find init section in template file.")
}
# extract init section
initSection <- readfile[(startinit+1):(endinit-1)]
# extract body section
bodySection <- readfile[(endinit+1):length(readfile)]
# convert the init text into a list object containing parsed init instructions
initCollection <- processInit(initSection)
templateTags <- parseTags(bodySection, initCollection)
# lookup values for simple tags, which won't vary by iterator
templateTags <- lookupSimpleTags(templateTags, initCollection)
# kick off the recursive replace
if (length(initCollection$iterators) > 0) {
recurseReplace(templateTags, initCollection)
}
}
#' Simple tag lookup
#'
#' The purpose of this function is to set the currentValue column
#' for the bodyTags and initTags data.frames for simple tags only.
#' Most values will be replaced at the bottom level of recursion,
#' but simple tags do not change over iterations, so can be set one time.
#'
#' @param templateTags The template tags
#' @param initCollection The initial collection
#' @return A tag.
#' @keywords internal
lookupSimpleTags <- function(templateTags, initCollection) {
# #locate simple tags in body
# simpleBodyPositions <- which(templateTags$bodyTags$tagType=="simple")
#
# #replace tag with value
# templateTags$bodyTags$currentValue[simpleBodyPositions] <- sapply(templateTags$bodyTags$tag[simpleBodyPositions],
# function(value) {
# currentValue <- eval(parse(text=paste("initCollection$", value, sep="")))
# if (regexpr("\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]", currentValue, perl=TRUE) > 0) {
# #The replacement tag itself contains additional tags.
# #Thus, not a simple replacement. This replacement needs to be deferred until
# #we have iterated to the bottom of the tree and have all needed information
# #set a deferred value to be replace later
# currentValue <- "..deferred.."
# }
# return(currentValue)
# })
#locate simple tags in init
simpleInitPositions <- which(templateTags$initTags$tagType=="simple")
templateTags$initTags$currentValue[simpleInitPositions] <- sapply(
templateTags$initTags$tag[simpleInitPositions],
function(value) {
return(eval(parse(text=paste0("initCollection$", value))))
})
return(templateTags)
}
#' Updates current values
#'
#' Body tags currentValues are substituted at the bottom-most level
#' after init collection is finalized (recursively process any nested tags)
#'
#' @param templateTags The template tags
#' @param initCollection Initial collection
#' @return Updated current value or the original if no match.
#' @keywords internal
updateCurrentValues <- function(templateTags, initCollection) {
#Better idea: only updateCurrentValues for init tags collection
#And only update init collection for the respective iterator
#Only need to update values for a given iterator....
#The issue is that values for a given iterator shouldn't change when another iterator is active
#need to replace array and iterator tags for this iterator
#locate iterator tags in init
initIteratorPositions <- which(
templateTags$initTags$tagType=="iterator" &
templateTags$initTags$tag == initCollection$curIteratorName)
#set the current value to the position in the looping process for this iterator
templateTags$initTags$currentValue[initIteratorPositions] <- initCollection$curItPos[initCollection$curIteratorDepth]
#allow for iterator lookups here... just to an is.na check in the replaceBodyTags
#locate iterator tags in body
bodyIteratorPositions <- which(
templateTags$bodyTags$tagType == "iterator" &
templateTags$bodyTags$tag == initCollection$curIteratorName)
templateTags$bodyTags$currentValue[bodyIteratorPositions] <- initCollection$curItPos[initCollection$curIteratorDepth]
# Next, handle array tags
# figure out the iterator for each array tag and only select
# those that are relevant to the current iterator
initArrayPositions <- which(templateTags$initTags$tagType=="array")
# only update values if any array tags are found
# (generates an error otherwise because of weird format from splitter_a
if (length(initArrayPositions) > 0) {
# use plyr's splitter_a function to divide dataset by row (builds a big list)
# 20Jul2010: Had to call splitter function directly, ignoring namespace because plyr 1.0 hid this.
# divideByRow <- plyr:::splitter_a(templateTags$initTags[initArrayPositions,], 1)
# actually, splitter_a no longer has the same return type (it's now an environment)
# would have to call row$data$tag... just replace with homespun function defined above.
divideByRow <- splitDFByRow(templateTags$initTags[initArrayPositions,])
#for each element of the list, check for a match with this iterator and return the value of interest
#if the array tag is not for this iterator, return the current value unchanged
templateTags$initTags$currentValue[initArrayPositions] <- unlist(sapply(divideByRow,
function(row) {
split <- strsplit(row$tag, split="#", fixed=TRUE)[[1]]
if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
if (split[2] == initCollection$curIteratorName) {
currentValue <- eval(parse(text =
paste0("initCollection$", split[1], "[",
initCollection$curItPos[initCollection$curIteratorDepth], "]")))
if (is.null(currentValue)) {
stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
}
return(currentValue)
} else {
# return unchanged current value if not this iterator
return(row$currentValue)
}
}))
}
# for now, we don't use any current values for body tags collection (handled at bottom)
# #conduct same process for body tags: locate array tags and update values for this iterator
# bodyArrayPositions <- which(templateTags$bodyTags$tagType=="array")
#
# #use plyr's splitter_a function to divide dataset by row (builds a big list)
# divideByRow <- splitter_a(templateTags$bodyTags[bodyArrayPositions,], 1)
#
# #for each element of the list, check for a match with this iterator and return the value of interest
# templateTags$bodyTags$currentValue[bodyArrayPositions] <- unlist(sapply(divideByRow,
# function(row) {
# split <- strsplit(row$tag, split="#", fixed=TRUE)[[1]]
# if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
#
# if (split[2] == initCollection$curIteratorName) {
# currentValue <- eval(parse(text=paste("initCollection$", split[1], "[", initCollection$curItPos[initCollection$curIteratorDepth], "]", sep="")))
# if (regexpr("\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]", currentValue, perl=TRUE) > 0) {
# #The replacement tag itself contains additional tags.
# #Thus, not a simple replacement. This replacement needs to be deferred until
# #we have iterated to the bottom of the tree and have all needed information
# #set a deferred value to be replace later
# currentValue <- "..deferred.."
# }
# if (is.null(currentValue)) stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
# return(currentValue)
# }
# else return(row$currentValue) #return unchanged current value if not this iterator
# }))
return(templateTags)
}
#' Recursive replace
#'
#' To do: fill in some details
#'
#' @param templateTags The template tags
#' @param initCollection The list of all arguments parsed from the init section
#' @param curiterator An integer that tracks of the depth of recursion through the iterators. Defaults to 1.
#' @return Does not look like it returns anything
#' @keywords internal
recurseReplace <- function(templateTags, initCollection, curiterator=1L) {
#bodySection is the character vector representing each line of the body section
#bodyTags is a data.frame documenting the location and type of all tags in bodySection
#initTags is a data.frame documenting the location and type of all tags in initCollection
if (!is.list(initCollection)) {
stop("Argument list passed to recurseReplace is not a list")
}
# check that curiterator is indeed a whole number
stopifnot(curiterator %% 1 == 0)
thisIterator <- initCollection$iterators[curiterator]
#set the current iterator for the collection (used by replaceTags)
initCollection$curIteratorName <- thisIterator
initCollection$curIteratorDepth <- curiterator
#would it work better to use a named array here?
#like curItVals <- c(1, 3, 5, 2) for iterators a, b, c, d
#then names(curItVals) <- c("a", "b", "c", "d")
for (i in initCollection[[thisIterator]]) {
#set the current position within this iterator for use in replace tags
#create a vector of iterator positions for use in replaceTags
#initCollection$curItPos[curiterator] <- i
#add the iterator name to the vector of iterator positions
#this has the same effect as above (appending as it recurses), but allows for name-based lookup
initCollection$curItPos[thisIterator] <- i
#print(paste("current iterator is:", thisIterator, ", position:", as.character(i)))
#process foreach commands
#For now, take this out
#bodySection <- processForEachTags(bodySection, initCollection)
#update the current values for this iterator and this iteration
#this applies for every iterator and iteration, not just processing
#at the deepest level. The function only updates array and iterator
#tags that match this iterator, thus minimizing redundant work.
#the latest is that only init collection tags will be updated
#then body tags are replaced at the bottom level after init collection is finalized
templateTags <- updateCurrentValues(templateTags, initCollection)
if (curiterator < length(initCollection$iterators)) {
#if not at deepest level, recurse to the next level by adding 1 to the iterator
#NOTE to self: consider adding a "foreachReplacements" collection to templateTags
#that contains the expansions of these tags (appended per iteration)
#this avoids having to think about reparsing the tags based on new code created by foreach
recurseReplace(templateTags, initCollection, curiterator = curiterator+1)
} else {
#we have reached the bottom of the iteration tree
#simple, array, and iterator tags should be up to date in the templateTags collection
#first delete conditional tags from the body section, reduce subsequent processing burden
#need to return templateTags collection from processConditionalTags (including bodyText)
#need to use a copy of templateTags to avoid it affecting subsequent loop iterations
finalTemplateTags <- processConditionalTags(templateTags, initCollection)
#the body section to write is stored in the templateTags collection
toWrite <- finalTemplateTags$bodyText
#create a separate initCollection with the appropriate values substituted.
finalInitCollection <- replaceInitTags(finalTemplateTags$initTags, initCollection)
#finalize init collection values (in cases of nested tags)
#wades through init collection for any remaining tags and replaces them
finalInitCollection <- finalizeInitCollection(finalInitCollection)
#update bodySection with tag values from finalized init tags
toWrite <- replaceBodyTags(toWrite, finalTemplateTags$bodyTags, finalInitCollection)
filename <- finalInitCollection$filename
cat(paste("writing file: ", filename, "\n", sep=""))
curdir <- getwd()
#figure out the output directory
outputDir <- finalInitCollection$outputDirectory
if (!file.exists(outputDir)) {
dir.create(outputDir, recursive=TRUE)
}
setwd(outputDir)
#make sure that no line is more than 90 chars
toWrite <- unlist(lapply(toWrite, function(line) {
if (nchar(line) > 90) {
strwrap(line, width=85, exdent=5)
} else {
line
}
}))
writeLines(toWrite, con = filename, sep = "\n")
setwd(curdir)
}
}
}
#' Replace Init Tags
#'
#' To do: fill in some details
#'
#' @param initTags Init tags
#' @param initCollection The list of all arguments parsed from the init section
#' @return Returns updated initCollection
#' @keywords internal
replaceInitTags <- function(initTags, initCollection) {
targetRows <- which(initTags$tagType %in% c("simple", "iterator", "array"))
targetTags <- initTags[targetRows, ]
targetTags$rownumber <- 1:nrow(targetTags)
#going to re-use this chunk in finalizeSubstitutions, so functionalize...
#consider the looping replacement here
for (i in 1:nrow(targetTags)) {
row <- targetTags[i, ]
stringToChange <- initCollection[[row$listpos]][row$element]
if(row$start > 1) {
preTag <- substr(stringToChange, 1, row$start - 1)
} else {
preTag <- ""
}
if (row$end < nchar(stringToChange)) {
postTag <- substr(stringToChange, row$end+1, nchar(stringToChange))
} else {
postTag <- ""
}
initCollection[[row$listpos]][row$element] <- paste0(preTag, row$currentValue, postTag)
subsequentRows <- which(
targetTags$rownumber > i &
targetTags$listpos == row$listpos &
targetTags$element == row$element)
if (length(subsequentRows > 0)) {
#need to offset subsequent start/stops by the difference
#between the tag and replacement lengths
diffLength <- nchar(row$currentValue) - (row$end - row$start + 1)
#update rows in targetTags that have additional tags on the same row
#need to offset by the diffLength
targetTags[subsequentRows,"start"] <- targetTags[subsequentRows,"start"] + diffLength
targetTags[subsequentRows,"end"] <- targetTags[subsequentRows,"end"] + diffLength
}
}
#refresh the initTags collection with the replaced values
#need to dump the rownumber to align the data.frames
# (templateTags doesn't have a rownumber field)
targetTags$rownumber <- NULL
initTags[targetRows, ] <- targetTags
#return(initTags)
#browser()
return(initCollection)
}
#' Replace Body Tags
#'
#' To do: fill in some details
#'
#' @param bodySection character vector of body section of Mplus syntax
#' @param bodyTags collection of tags used inside of the template body
#' @param initCollection The list of all arguments parsed from the init section
#' @return Returns updated bodySection
#' @keywords internal
replaceBodyTags <- function(bodySection, bodyTags, initCollection) {
if (length(bodySection) <= 0) stop("Empty body section")
#need to ponder issues where a replaced tag still contains another tag
#hmm, actually seems futile to do a replacement in the init section
#these are already set by update values.... won't affect the body section
# so we need to finalize the tag substitutions...
# the idea is that we need to convert all tags to literals in the initCollection
# once this is done, then we replace all deferred tags in the body section
#don't update current values if initcollection value contains any tag
#if so, replace at the last minute (check this in Init)
#set a "deferred" status in currentValue if replacement contains tags
targetTags <- with(bodyTags, bodyTags[tagType %in% c("simple", "iterator", "array"), ])
targetTags$rownumber <- 1:nrow(targetTags)
#print(targetTags)
#stop("test")
#could improve this by replacing identical tags at once
#like ddply by the tag
for (i in 1:nrow(targetTags)) {
row <- targetTags[i, ]
stringToChange <- bodySection[row$element]
if (row$start > 1) {
preTag <- substr(stringToChange, 1, row$start-1)
} else {
preTag <- ""
}
if (row$end < nchar(stringToChange)) {
postTag <- substr(stringToChange, row$end+1, nchar(stringToChange))
} else {
postTag <- ""
}
#lookup value as needed
if (is.na(row$currentValue)) {
row$currentValue <- lookupValue(row$tag, row$tagType, initCollection)
}
#row$currentValue <- lookupValue(row$tag, row$tagType, initCollection)
bodySection[row$element] <- paste0(preTag, row$currentValue, postTag)
#need to offset subsequent start/stops by the difference between the tag and replacement lengths
diffLength <- nchar(row$currentValue) - (row$end - row$start + 1)
subsequentRows <- which(
targetTags$rownumber > i &
targetTags$element == row$element)
if (length(subsequentRows > 0)) {
#update rows in targetTags that have additional tags on the same row
#need to offset by the diffLength
targetTags[subsequentRows,"start"] <- targetTags[subsequentRows,"start"] + diffLength
targetTags[subsequentRows,"end"] <- targetTags[subsequentRows,"end"] + diffLength
}
}
return(bodySection)
}
#' Lookup values
#'
#' To do: fill in some details
#'
#' @param tag name of tag for which we want to know the current value
#' @param tagType type of tag (simple, array, etc.) for the tag to lookup
#' @param initCollection The list of all arguments parsed from the init section
#' @return Current value
#' @keywords internal
lookupValue <- function(tag, tagType, initCollection) {
#redundant with finalize code... re-use
if (missing(tag)) stop("No tag provided")
if (missing(tagType)) stop("No tag type provided")
if (tagType == "simple") {
return(eval(parse(text=paste0("initCollection$", tag))))
}
else if (tagType == "array") {
split <- strsplit(tag, split="#", fixed=TRUE)[[1]]
if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
#find where in the iterator depth this iterator lies
#iteratorPosition <- grep(paste("\\b", split[2], "\\b", sep=""), initCollection$iterators, perl=T)
#use named array look-up
iteratorPosition <- initCollection$curItPos[split[2]]
#note that the padding performed by processInit should handle non-contiguous iteratorPosition values here.
currentValue <- eval(parse(text=paste0("initCollection$", split[1], "[", iteratorPosition, "]")))
if (is.null(currentValue)) {
stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
}
return(currentValue)
}
}
#' Finalize Init Collection
#'
#' this function should handle initTags that still contain tags
#' once the initCollection is finalized, then process the deferred body tags
#' the notion is that the substitutions will be handled in an inefficient manner -- using lots
#' of regular expression parsing, not using the matched tags data.frame
#'
#' we only need to handle simple and array tags
#' iterators should always be integers
#' foreach and conditional are not relevant
#'
#' iterate over init tags until no tags are left
#' here, the init collection should already have had most of its tags substituted by
#' replaceInitTags above.
#'
#' @param initCollection The list of all arguments parsed from the init section
#' @return Finalized initCollection
#' @keywords internal
finalizeInitCollection <- function(initCollection) {
tagsRemain <- TRUE
numIterations <- 1
while(tagsRemain) {
initTags <- getInitTags(initCollection)
if (nrow(initTags) == 0) break #if no tags found, then substitution complete
#update: iterator tags can be nested within other tag types and not updated until here.
initTags <- with(initTags, initTags[tagType %in% c("simple", "iterator", "array"),])
if (nrow(initTags) == 0) break #some tags, but none of the simple or array variety, which we want to replace
#use plyr's splitter_a function to divide dataset by row (builds a big list)
#divideByRow <- plyr:::splitter_a(initTags, 1)
divideByRow <- splitDFByRow(initTags)
#for each element of the list, check for a match with this iterator and return the value of interest
initTags$currentValue <- unlist(sapply(divideByRow,
function(row) {
if (row$tagType == "simple") {
return(eval(parse(text=paste0("initCollection$", row$tag))))
}
else if (row$tagType == "iterator") {
#an iterator tag was nested
return(initCollection$curItPos[row$tag])
}
else if (row$tagType == "array") {
split <- strsplit(row$tag, split="#", fixed=TRUE)[[1]]
if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
#find where in the iterator depth this iterator lies
#iteratorPosition <- grep(paste("\\b", split[2], "\\b", sep=""), initCollection$iterators, perl=T)
#use named array look-up
iteratorPosition <- initCollection$curItPos[split[2]]
currentValue <- eval(parse(text=paste0("initCollection$", split[1], "[", iteratorPosition, "]")))
if (is.null(currentValue)) {
stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
}
return(currentValue)
}
}
))
#now we have a list of curent values for any init tags
#and we want to update the init collection with their values... just as with above.
initCollection <- replaceInitTags(initTags, initCollection)
numIterations <- numIterations + 1
if (numIterations > 20) stop("While replacing tags in init section, looped over variables 20 times without completing substitutions.\n Check for circular definitions within init section.")
}
#browser()
return(initCollection)
}
#' Evaluate Conditional
#'
#' Note that at thie point the comparator must be a number (not another variable).
#'
#' @param tag A tag
#' @param initCollection The list of all arguments parsed from the init section
#' @return A boolean value indicating whether the conditional is true
#' @keywords internal
evaluateConditional <- function(tag, initCollection) {
#evaluate whether tag is true
#first divide up into name, operator, and value
regexp <- "(\\w+)\\s*([!><=]+)\\s*(\\w+)"
conditional <- unlist(strapply(tag, regexp, c))
if (length(conditional) < 3) {
stop("Error in conditional tag: does not contain variable, operator, and value. Tag = ", tag)
}
#convert simple equals to logical equals
if (conditional[2] == "=") conditional[2] <- "=="
#obsolete b/c using named array
#iteratorPosition <- grep(paste("\\b", conditional[1], "\\b", sep=""), initCollection$iterators, perl=T)
#return a boolean value indicating whether the conditional is true
return(eval(parse(text=paste0("initCollection$curItPos[conditional[1]]", conditional[2], conditional[3]))))
}
#' Clip String
#'
#' To do: add any details.
#'
#' @param string A string to be clipped
#' @param start The character position to start at
#' @param end The character position to end at
#' @return A string from start to end
#' @keywords internal
clipString <- function(string, start, end) {
#if the string is shorter than the length of the clip, then nothing remains
if (nchar(string) <= end-start+1) return("")
if(start > 1) preString <- substr(string, 1, start-1)
else preString <- ""
if(end < nchar(string)) postString <- substr(string, end+1, nchar(string))
else postString <- ""
return(paste0(preString, postString))
}
#' Process Conditional Tags
#'
#' To do: add details.
#'
#' @param templateTags A template tag
#' @param initCollection The list of all arguments parsed from the init section
#' @return Processed templateTags
#' @keywords internal
processConditionalTags <- function(templateTags, initCollection) {
#require(gsubfn) #moving to import strategy
#find all conditional tags in the body section and remove them from the templateTags and bodyText pieces...
conditionalTagIndices <- which(templateTags$bodyTags$tagType=="conditional")
#return templateTags unharmed if there are no conditional tags (creates error below otherwise)
if (length(conditionalTagIndices) == 0) return(templateTags)
openClose <- ifelse(substr(templateTags$bodyTags$tag[conditionalTagIndices], 1, 1)=="/", "close", "open")
allOpen <- conditionalTagIndices[openClose=="open"]
bodyTagsToDrop <- c()
bodyLinesToDrop <- c()
for (i in allOpen) {
#should be able to decide whether to skip an iteration if the affected lines are already in bodyLinesToDrop
thisTag <- templateTags$bodyTags$tag[i]
#evaluate truth of conditional
conditionalTrue <- evaluateConditional(thisTag, initCollection)
#only look for closing tags after the opening and accept the first exact match
close <- conditionalTagIndices[
templateTags$bodyTags$tag[conditionalTagIndices] == paste0("/", thisTag) &
templateTags$bodyTags$element[conditionalTagIndices] >= templateTags$bodyTags$element[i]][1]
sameLine <- FALSE
#in case of same line match, check to make sure close follows opening on that line
#the conditions above could match when a closing tag precedes opening tag on the same line
if (templateTags$bodyTags$element[close]==templateTags$bodyTags$element[i]) {
sameLine <- TRUE
close <- conditionalTagIndices[
openClose == "close" &
templateTags$bodyTags$tag[conditionalTagIndices] == paste0("/", thisTag) &
templateTags$bodyTags$element[conditionalTagIndices] == templateTags$bodyTags$element[i] &
templateTags$bodyTags$start[conditionalTagIndices] > templateTags$bodyTags$end[i]][1]
if (!close > 0) stop("Could not find closing tag for conditional:", thisTag)
}
#skip this iteration if the opening and closing tags in question are already in the drop pile
#these lines (and the lines between, if necessary) will already be dropped, so don't process
if (templateTags$bodyTags$element[i] %in% bodyLinesToDrop &&
templateTags$bodyTags$element[close] %in% bodyLinesToDrop) next
#first check for tags to drop from the bodyTags collection (don't want these parsed later)
if (conditionalTrue) {
#only remove starting and ending tags
bodyTagsToDrop <- c(bodyTagsToDrop, i, close)
} else {
#if conditional false, then remove all tags between conditional tags
#first, dump all lines in the bodyTags section that fall between elements
bodyTagsToDrop <- c(bodyTagsToDrop, i:close)
#conditional is not true
#so dump the tags and all space between
#really, the only difference here from the calculation below is that
#bodyLinesToDrop should encompass the space between opening and closing
#and the clips below should dump the rest of the line when multiple tags on same line
#no need to rewrite code for clipping out tags
#don't clip the tag lines themselves because this is handled below (whole line goes if nchar <= 0)
#print(bodyLinesToDrop)
#browser()
#only drop lines between matching open/close tags if not on the same line
#otherwise, the clipping code below handles everything correctly
#if on the same line, then element + 1:close - 1 will lead to something like 58:56, which is bad
if (!sameLine) {
bodyLinesToDrop <- c(bodyLinesToDrop,
(templateTags$bodyTags$element[i]+1):(templateTags$bodyTags$element[close]-1))
}
}
#then dump lines from the syntax section itself
#handle same line issues, then delete whole lines between tags
#as with replaceTags substitution, need to handle situation where tag is on line with other stuff
#thus, need to update bodyTags collection, too to reflect new start/stop positions
#when the conditional is true, just remove the tags and leave the syntax
#dump the opening tag on the line
#if the conditional is true, just use the last pos of the opening tag for the clip
if (conditionalTrue) endPos <- templateTags$bodyTags$end[i]
#want to clip the rest of the line
else if (!conditionalTrue && sameLine == FALSE) endPos <- nchar(templateTags$bodyText[templateTags$bodyTags$element[i]])
#just clip anything between open tag and first element of close tag (close tag itself handled by code below)
else if (!conditionalTrue && sameLine == TRUE) endPos <- templateTags$bodyTags$start[close] - 1
templateTags$bodyText[templateTags$bodyTags$element[i]] <- clipString(
templateTags$bodyText[templateTags$bodyTags$element[i]],
templateTags$bodyTags$start[i], endPos)
if (nchar(trimSpace(templateTags$bodyText[templateTags$bodyTags$element[i]])) <= 0) {
#no characters remain, so dump line
bodyLinesToDrop <- c(bodyLinesToDrop, templateTags$bodyTags$element[i])
} else {
#if there is other text on this line, it may contain tags that need to be adjusted given the clip
subsequentTags <- which(
templateTags$bodyTags$element == templateTags$bodyTags$element[i] &
templateTags$bodyTags$start > endPos)
if (length(subsequentTags > 0)) {
#calculate length of opening tag
openLength <- endPos - templateTags$bodyTags$start[i] + 1
templateTags$bodyTags[subsequentTags,"start"] <- templateTags$bodyTags[subsequentTags,"start"] - openLength
templateTags$bodyTags[subsequentTags,"end"] <- templateTags$bodyTags[subsequentTags,"end"] - openLength
#print("openlength")
#browser()
}
}
#okay, we've handled issues related to the opening tag, now handle closing tag
#for the closing tag, just need to clip the tag itself (spacing handled above)
templateTags$bodyText[templateTags$bodyTags$element[close]] <- clipString(
templateTags$bodyText[templateTags$bodyTags$element[close]],
templateTags$bodyTags$start[close],
templateTags$bodyTags$end[close])
if (nchar(trimSpace(templateTags$bodyText[templateTags$bodyTags$element[close]])) <= 0) {
#no characters remain, so dump line
bodyLinesToDrop <- c(bodyLinesToDrop, templateTags$bodyTags$element[close])
} else {
#only look for additional tags if nchar > 0
#redundant code with above... must be a way to consolidate
#if there is other text on then end line, it may contain tags that need to be adjusted given the clip
subsequentTags <- which(
templateTags$bodyTags$element == templateTags$bodyTags$element[close] &
templateTags$bodyTags$start > templateTags$bodyTags$end[close])
if (length(subsequentTags > 0)) {
closeLength <- templateTags$bodyTags$end[close] - templateTags$bodyTags$start[close] + 1
templateTags$bodyTags[subsequentTags,"start"] <- templateTags$bodyTags[subsequentTags,"start"] - closeLength
templateTags$bodyTags[subsequentTags,"end"] <- templateTags$bodyTags[subsequentTags,"end"] - closeLength
#print("closelength")
#browser()
}
}
}
#print(bodyLinesToDrop)
#print(bodyTagsToDrop)
#drop all bad body lines
#only keep unique bodyTagsToDrop (and sort for clarity in debugging)
#hard to imagine that bodyTagsToDrop could be NULL at this point (given the return when no conditional tags above)
#but if it were NULL, the bodyTags collection would be dumped by the NULL*-1 evaluation
if (!is.null(bodyTagsToDrop)) {
bodyTagsToDrop <- sort(unique(bodyTagsToDrop))
templateTags$bodyTags <- templateTags$bodyTags[bodyTagsToDrop*-1, ]
}
#need to check whether bodyLinesToDrop is NULL. If it is, then we must not attempt the subset
#(it will delete the whole character vector)
if (!is.null(bodyLinesToDrop)) {
#only retain unique bodyLinesToDrop (in theory handled by the "next" code above, but good to be safe)
bodyLinesToDrop <- sort(unique(bodyLinesToDrop))
templateTags$bodyText <- templateTags$bodyText[bodyLinesToDrop*-1]
#need to move up the line markers in the bodyTags collection based on the lines dropped
templateTags$bodyTags <- ddply(templateTags$bodyTags, "element", function(subDF) {
numMoveUp <- length(which(bodyLinesToDrop < subDF$element[1]))
subDF$element <- subDF$element - numMoveUp
return(subDF)
})
}
return(templateTags)
}
#' Process the Init Section
#'
#' To do: add details.
#'
#' @param initsection The list of all arguments parsed from the init section
#' @return arglist
#' @importFrom gsubfn strapply
#' @keywords internal
processInit <- function(initsection) {
#combine multi-line statements by searching for semi-colon
assignments <- grep("^\\s*.+\\s*=", initsection, perl=TRUE)
#check for valid variable names
valid <- grep("^\\s*[A-Za-z\\.]+[\\w\\.#]*\\s*=", initsection[assignments], perl=TRUE)
if (length(valid) < length(assignments)) {
badvars <- initsection[assignments[which(!1:length(assignments) %in% valid)]]
stop(paste(c("Invalid variable definitions in init section.",
"Variables must begin with a letter or a period.",
"Variables may contain only the following characters: letters, numbers, underscores, periods, and a single pound sign for list variables.",
"Problematic variable(s):", badvars), collapse="\n "))
}
#preallocate vector of strings to process
argstoprocess <- vector("character", length(assignments))
#loop through each line containing an assignment
for (i in 1:length(assignments)) {
argstoprocess[i] = initsection[assignments[i]]
#if line does not terminate in semicolon, then read subsequent lines until semicolon found
#start file position at n+1 line
filepos = assignments[i] + 1
while (length(grep(";\\s*$", argstoprocess[i], perl=TRUE)) != 1) {
#cat("multi-line: ", unlist(argstoprocess[i]), fill=T)
argstoprocess[i] = paste(argstoprocess[i], initsection[filepos])
filepos = filepos + 1
}
}
#will return a list (one element per argstoprocess) with a three-element vector (name, iterator, value)
#note that the regexp implicitly dumps the semicolon and any trailing spaces
arglist <- strapply(argstoprocess, "^\\s*(\\w+[\\w\\.]*)(#[\\w\\.]+)?\\s*=\\s*(.+);\\s*$",
function(name, iterator, value) {
return(c(name, iterator, value))
}, perl=TRUE)
#copy the first element (name) of each vector into the list names
names(arglist) <- make.names(sapply(arglist, '[', 1))
#1. parse values into vectors according to spaces and quotes
#2. add iterator attribute to be processed after iterators are setup below
#3. implicitly drop name by not including element[1]
arglist <- lapply(arglist, function(element) {
output <- friendlyGregexpr("(\"[^\"]*\"|[^\\s]+)", element[3])$tag
output <- gsub("\"", "", output)
#the regexp above matches the # itself.
#need to trim off in cases where iterator defined
if (nchar(element[2]) > 0) {
element[2] <- substr(element[2], 2, nchar(element[2]))
}
attr(output, "iterator") <- element[2]
return(output)
})
if (is.null(arglist$iterators)) {
stop("No iterators in init section. Cannot process template.")
}
#convert iterators from string to list
#arglist$iterators <- unlist(strsplit(as.character(arglist$iterate_wrt), "\\s*,\\s*", perl=T))
#process sequence text for each iterator
for (thisIt in arglist$iterators) {
if (is.null(arglist[[thisIt]])) {
stop("Variable specified in iterators list, but not defined: ", thisIt)
}
#expand colon notation as needed
#use do.call to combine elements of list returned by lapply
#if there are many elements (e.g., 1 3 5), then lapply returns an element for each
#one, but we just want a combined array. In the case of colon expansion, want to c that together
#with any other elements... Maybe in the future when we support non-contiguous iterators.
arglist[[thisIt]] <- do.call("c", lapply(arglist[[thisIt]], function(x) {
if (length(grep(":", x)) > 0) {
return(strapply(x, "(\\d+)\\s*:\\s*(\\d+)", function(start, stop) return(start:stop))[[1]])
} else {
return(as.numeric(x))
}
}))
#sort as ascending and only keep unique values
if (length(unique(arglist[[thisIt]])) < length(arglist[[thisIt]])) {
stop("Problem with iterator: ", thisIt, "\n Non-unique values specified: ",
paste(arglist[[thisIt]], collapse=", "))
}
arglist[[thisIt]] <- sort(unique(arglist[[thisIt]]))
}
#now that iterators are defined, ensure that list tags match
#pad vectors accordingly
arglist <- lapply(arglist, function(element) {
#if the iterator is defined, then this is a list tag
#need to make sure it is properly padded
iteratorAttr <- attr(element, "iterator")
if (!is.null(iteratorAttr) && nchar(iteratorAttr) > 0) {
iteratorValues <- arglist[[iteratorAttr]]
#make sure that the length of the values vector
#matches the length of the iterator vector
if (length(element) != length(iteratorValues)) {
stop("Variable locked to iterator: ", iteratorAttr,
", but has different length.\n Values: ",
paste(element, collapse=", "),
"\n Should be length: ", length(iteratorValues))
}
if (length(element) < max(iteratorValues)) {
#pad
updatedElement <- c()
listElement <- 1
#build a vector of the same length as the max of the iterator
#only insert list values for defined indices. Otherwise pad
for (i in 1:max(iteratorValues)) {
if (i %in% iteratorValues) {
updatedElement[i] <- element[listElement]
listElement <- listElement + 1
} else {
updatedElement[i] <- ""
}
}
element <- updatedElement
attr(element, "iterator") <- iteratorAttr #re-add attribute
}
}
return(element)
})
#default output directory to the current directory
if (is.null(arglist$outputDirectory)) {
warning("No output directory specified. Defaulting to the current directory.")
arglist$outputDirectory <- getwd()
}
if (is.null(arglist$filename)) {
stop("No definition provided for the output filename. The filename definition is required.")
}
return(arglist)
}
| /R/createModels.R | no_license | michaelhallquist/MplusAutomation | R | false | false | 47,191 | r | # TODO: enforce use of #iterator syntax in init section for vars with length > 1
# TODO: Check array tags used in the init and body sections for validity.
# TODO: Make sure that classify tags accurately interprets all tags and errors if uninterpretable tag.
# TODO: Allow for conditional tags to use a list, such as [[nclass#class == 5]]
#note that there's a bit of trickery in interpreting list tags
#they varnames are stored as only the prefix in the initCollection (no #iterator)
#and they are referenced in the body as var#iterator
#At this point, doesn't enforce proper use of iterator with a list
#setwd("C:/Users/Michael Hallquist/Documents/Automation_Sandbox")
#createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/LSPD Covariate Template.txt")
#system.time(createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/LSPD Template.txt"))
#createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/LSPD Template New Init.txt")
#createModels("C:/Users/Michael Hallquist/Documents/Automation_Sandbox/L2 Multimodel Template No iter.txt")
#need to sort out why is.na is working for lookupValue in replaceBodyTags
#in particular, why isn't the current value carrying over from the previous looping iteration?
#SOME THOUGHTS RE DOCUMENTATION
#foreach tags may only be with respect to an iterator... could not have some random foreach var
#' Split a data frame into a list by rows
#'
#' Takes a data frame and returns a list with an element for each row of the data frame.
#' This is an internal function.
#'
#' @param df An object inheriting from class \code{data.frame}
#'
#' @return A list where each element is a one row data frame
#' @keywords internal
#' @examples
#' # small example using built in data
#' MplusAutomation:::splitDFByRow(mtcars)
splitDFByRow <- function(df) {
stopifnot(inherits(df, "data.frame"))
lapply(seq.int(nrow(df)), function(i) df[i, ])
}
#' Classifies Tags
#'
#' Accepts a vector of tags to be classified as well as the iterators.
#' Tags are classified as \sQuote{iterator}, \sQuote{array}, \sQuote{conditional}, or
#' \sQuote{simple}. This is an internal function.
#'
#' @param tagVector A vector of tags to be classified
#' @param iteratorsVector a vector of the iterators to correctly classify tags
#' @return A character vector the same length as the vectors to be tagged
#' @keywords internal
classifyTags <- function(tagVector, iteratorsVector) {
#accepts a vector of tags to be classified
#also needs a vector of the iterators to correctly classify tags
#returns a vector of tag types
#creates an empty character vector of the same length as tagVector (each element defaults to "")
tagType <- vector(mode="character", length=length(tagVector))
#default to missing for tag type (replaced below)
#tagData$tagType <- NA_character_
# named list of the regexs to match for
# the names of each elements are used later to classify tags
RegEx <- list(
iterator = paste0("\\[\\[\\s*(", paste(iteratorsVector, collapse="|"), ")\\s*\\]\\]"),
array = paste0("\\[\\[\\s*\\b([\\w\\.]+)#(", paste(iteratorsVector, collapse="|"), ")\\b\\s*\\]\\]"),
#optional forward slash for closing tags
#could the alternation syntax be problematic if variable names overlaps
#(e.g., x matching xy)? Use word boundaries?
#any reason to limit this to iterators?!
conditional = paste0("\\[\\[\\s*/*(", paste(iteratorsVector, collapse="|"), ")\\s*[!><=]+\\s*\\d+\\s*\\]\\]"),
#simple tags -- not wrt iterators, not conditional
#use negative lookahead to skip tags that are iterators
simple = paste0("\\[\\[\\s*(?!", paste(iteratorsVector, collapse="|"), ")[\\w+\\.]+\\s*\\]\\]"))
Positions <- lapply(RegEx, grep, x = tagVector, perl = TRUE)
# assert no duplicates, i.e., tag cannot match multiples classes
stopifnot(!any(duplicated(unlist(Positions))))
for (n in names(Positions)) {
tagType[Positions[[n]]] <- n
}
return(tagType)
}
#' Get Initial Tags
#'
#' An internal function
#'
#' @param initCollection A list?
#' @return The initMatches
#' @keywords internal
getInitTags <- function(initCollection) {
initMatches <- c()
for (i in 1:length(initCollection)) {
if (storage.mode(initCollection[[i]]) == "character") {
matches <- friendlyGregexpr("\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]", initCollection[[i]], perl=T)
#if there are matches for this item, add its position in the list pos
#the idea is that the list has elements and the elements can be vectors
#thus, a match may occur for initCollection[[5]][3] if the fifth element of the list is a vector
#and the match is the third element.
if (!is.null(matches)) matches$listpos <- i
initMatches <- rbind(initMatches, matches)
}
}
#successfully creates a data.frame of the sort below.
# element start end tag listpos
#1 1 1 11 [[classes]] 14
#2 1 19 38 [[groupnames#group]] 14
#3 1 40 63 [[outcomenames#outcome]] 14
#4 1 65 84 [[modelnames#model]] 14
#5 1 85 112 [[zeroclassnames#zeroclass]] 14
#6 1 6 29 [[outcomenames#outcome]] 15
#7 1 31 50 [[groupnames#group]] 15
#8 1 73 92 [[modelnames#model]] 15
#9 1 1 9 [[hello]] 17
#10 2 1 10 [[hello2]] 17
#classify tags in terms of simple, array, iterator, conditional, foreach
if (!is.null(initMatches) && nrow(initMatches) > 0) {
initMatches$tagType <- classifyTags(initMatches$tag, initCollection$iterators)
#chop off the [[ ]] portion of the tags, along with any leading or trailing space
#this makes it easier to use the sub function to update current values
initMatches$tag <- sapply(initMatches$tag, function(tag) {
return(sub("\\[\\[\\s*([\\s\\w=><!#/]+)\\s*\\]\\]", "\\1", tag, perl=TRUE))
})
}
#return empty data frame if no matches
if (is.null(initMatches)) return(data.frame())
else return(initMatches)
}
#' Parses tags in the body section
#'
#' Parses tags in the body section (character vector) and
#' init collection (list of vars defined in the init section).
#' This is an internal function.
#'
#' @param bodySection The body
#' @param initCollection The initial collection
#' @return A list with three elements, where each list represents the location,
#' start character, end character, tag type, etc. of each tag.
#' \describe{
#' \item{initTags}{initMatches}
#' \item{bodyTags}{bodyMatches}
#' \item{bodyText}{bodySection}
#' }
#' @keywords internal
parseTags <- function(bodySection, initCollection) {
#first handle init tags
initMatches <- getInitTags(initCollection)
initMatches$currentValue <- NA_character_
bodyTagRegex <- "\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]"
bodyMatches <- friendlyGregexpr(bodyTagRegex, bodySection, perl=TRUE)
if (is.null(bodyMatches)) stop("No tags found in body section of template file.")
bodyMatches$tagType <- classifyTags(bodyMatches$tag, initCollection$iterators)
#okay, now every tag is categorized
#the notion here is to substitute in the running value for a given variable
#then we'll do a mass substitute for each model
bodyMatches$currentValue <- NA_character_
#chop off the [[ ]] portion of the tags, along with any leading or trailing space
bodyMatches$tag <- sapply(bodyMatches$tag, function(tag) {
return(sub("\\[\\[\\s*([\\s\\w=><!#/]+)\\s*\\]\\]", "\\1", tag, perl=TRUE))
})
#return a three-element list with constituent data frames for init and body tags.
return(list(initTags=initMatches, bodyTags=bodyMatches, bodyText=bodySection))
}
#' Create Mplus Input Files from Template
#'
#' The \code{createModels} function processes a single Mplus template file and creates a group of related
#' model input files. Definitions and examples for the template language are provided in the MplusAutomation
#' vignette and are not duplicated here at the moment. See this documentation: \code{vignette("Vignette", package="MplusAutomation")}
#'
#' @param templatefile The filename (absolute or relative path) of an Mplus template file to be processed. Example \dQuote{C:/MplusTemplate.txt}
#' @return No value is returned by this function. It is solely used to process an Mplus template file.
#' @author Michael Hallquist
#' @keywords interface
#' @export
#' @examples
#' \dontrun{
#' createModels("L2 Multimodel Template No iter.txt")
#' }
createModels <- function(templatefile) {
# should probably have the function cd to wherever the template file is located (if given as abs path)
# todo: allow for direct runs?
if (!file.exists(templatefile)) stop("Template file not found.")
readfile <- scan(templatefile, what="character", sep="\n", strip.white=FALSE, blank.lines.skip=FALSE, quiet=TRUE)
# divide into init versus body
startinit <- grep("[[init]]", readfile, fixed=T)
endinit <- grep("[[/init]]", readfile, fixed=T)
if (length(startinit) != 1 || length(endinit) != 1) {
stop("Unable to find init section in template file.")
}
# extract init section
initSection <- readfile[(startinit+1):(endinit-1)]
# extract body section
bodySection <- readfile[(endinit+1):length(readfile)]
# convert the init text into a list object containing parsed init instructions
initCollection <- processInit(initSection)
templateTags <- parseTags(bodySection, initCollection)
# lookup values for simple tags, which won't vary by iterator
templateTags <- lookupSimpleTags(templateTags, initCollection)
# kick off the recursive replace
if (length(initCollection$iterators) > 0) {
recurseReplace(templateTags, initCollection)
}
}
#' Simple tag lookup
#'
#' The purpose of this function is to set the currentValue column
#' for the bodyTags and initTags data.frames for simple tags only.
#' Most values will be replaced at the bottom level of recursion,
#' but simple tags do not change over iterations, so can be set one time.
#'
#' @param templateTags The template tags
#' @param initCollection The initial collection
#' @return A tag.
#' @keywords internal
lookupSimpleTags <- function(templateTags, initCollection) {
# #locate simple tags in body
# simpleBodyPositions <- which(templateTags$bodyTags$tagType=="simple")
#
# #replace tag with value
# templateTags$bodyTags$currentValue[simpleBodyPositions] <- sapply(templateTags$bodyTags$tag[simpleBodyPositions],
# function(value) {
# currentValue <- eval(parse(text=paste("initCollection$", value, sep="")))
# if (regexpr("\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]", currentValue, perl=TRUE) > 0) {
# #The replacement tag itself contains additional tags.
# #Thus, not a simple replacement. This replacement needs to be deferred until
# #we have iterated to the bottom of the tree and have all needed information
# #set a deferred value to be replace later
# currentValue <- "..deferred.."
# }
# return(currentValue)
# })
#locate simple tags in init
simpleInitPositions <- which(templateTags$initTags$tagType=="simple")
templateTags$initTags$currentValue[simpleInitPositions] <- sapply(
templateTags$initTags$tag[simpleInitPositions],
function(value) {
return(eval(parse(text=paste0("initCollection$", value))))
})
return(templateTags)
}
#' Updates current values
#'
#' Body tags currentValues are substituted at the bottom-most level
#' after init collection is finalized (recursively process any nested tags)
#'
#' @param templateTags The template tags
#' @param initCollection Initial collection
#' @return Updated current value or the original if no match.
#' @keywords internal
updateCurrentValues <- function(templateTags, initCollection) {
#Better idea: only updateCurrentValues for init tags collection
#And only update init collection for the respective iterator
#Only need to update values for a given iterator....
#The issue is that values for a given iterator shouldn't change when another iterator is active
#need to replace array and iterator tags for this iterator
#locate iterator tags in init
initIteratorPositions <- which(
templateTags$initTags$tagType=="iterator" &
templateTags$initTags$tag == initCollection$curIteratorName)
#set the current value to the position in the looping process for this iterator
templateTags$initTags$currentValue[initIteratorPositions] <- initCollection$curItPos[initCollection$curIteratorDepth]
#allow for iterator lookups here... just to an is.na check in the replaceBodyTags
#locate iterator tags in body
bodyIteratorPositions <- which(
templateTags$bodyTags$tagType == "iterator" &
templateTags$bodyTags$tag == initCollection$curIteratorName)
templateTags$bodyTags$currentValue[bodyIteratorPositions] <- initCollection$curItPos[initCollection$curIteratorDepth]
# Next, handle array tags
# figure out the iterator for each array tag and only select
# those that are relevant to the current iterator
initArrayPositions <- which(templateTags$initTags$tagType=="array")
# only update values if any array tags are found
# (generates an error otherwise because of weird format from splitter_a
if (length(initArrayPositions) > 0) {
# use plyr's splitter_a function to divide dataset by row (builds a big list)
# 20Jul2010: Had to call splitter function directly, ignoring namespace because plyr 1.0 hid this.
# divideByRow <- plyr:::splitter_a(templateTags$initTags[initArrayPositions,], 1)
# actually, splitter_a no longer has the same return type (it's now an environment)
# would have to call row$data$tag... just replace with homespun function defined above.
divideByRow <- splitDFByRow(templateTags$initTags[initArrayPositions,])
#for each element of the list, check for a match with this iterator and return the value of interest
#if the array tag is not for this iterator, return the current value unchanged
templateTags$initTags$currentValue[initArrayPositions] <- unlist(sapply(divideByRow,
function(row) {
split <- strsplit(row$tag, split="#", fixed=TRUE)[[1]]
if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
if (split[2] == initCollection$curIteratorName) {
currentValue <- eval(parse(text =
paste0("initCollection$", split[1], "[",
initCollection$curItPos[initCollection$curIteratorDepth], "]")))
if (is.null(currentValue)) {
stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
}
return(currentValue)
} else {
# return unchanged current value if not this iterator
return(row$currentValue)
}
}))
}
# for now, we don't use any current values for body tags collection (handled at bottom)
# #conduct same process for body tags: locate array tags and update values for this iterator
# bodyArrayPositions <- which(templateTags$bodyTags$tagType=="array")
#
# #use plyr's splitter_a function to divide dataset by row (builds a big list)
# divideByRow <- splitter_a(templateTags$bodyTags[bodyArrayPositions,], 1)
#
# #for each element of the list, check for a match with this iterator and return the value of interest
# templateTags$bodyTags$currentValue[bodyArrayPositions] <- unlist(sapply(divideByRow,
# function(row) {
# split <- strsplit(row$tag, split="#", fixed=TRUE)[[1]]
# if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
#
# if (split[2] == initCollection$curIteratorName) {
# currentValue <- eval(parse(text=paste("initCollection$", split[1], "[", initCollection$curItPos[initCollection$curIteratorDepth], "]", sep="")))
# if (regexpr("\\[\\[\\s*[\\s\\w=><!#/]+\\s*\\]\\]", currentValue, perl=TRUE) > 0) {
# #The replacement tag itself contains additional tags.
# #Thus, not a simple replacement. This replacement needs to be deferred until
# #we have iterated to the bottom of the tree and have all needed information
# #set a deferred value to be replace later
# currentValue <- "..deferred.."
# }
# if (is.null(currentValue)) stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
# return(currentValue)
# }
# else return(row$currentValue) #return unchanged current value if not this iterator
# }))
return(templateTags)
}
#' Recursive replace
#'
#' To do: fill in some details
#'
#' @param templateTags The template tags
#' @param initCollection The list of all arguments parsed from the init section
#' @param curiterator An integer that tracks of the depth of recursion through the iterators. Defaults to 1.
#' @return Does not look like it returns anything
#' @keywords internal
recurseReplace <- function(templateTags, initCollection, curiterator=1L) {
#bodySection is the character vector representing each line of the body section
#bodyTags is a data.frame documenting the location and type of all tags in bodySection
#initTags is a data.frame documenting the location and type of all tags in initCollection
if (!is.list(initCollection)) {
stop("Argument list passed to recurseReplace is not a list")
}
# check that curiterator is indeed a whole number
stopifnot(curiterator %% 1 == 0)
thisIterator <- initCollection$iterators[curiterator]
#set the current iterator for the collection (used by replaceTags)
initCollection$curIteratorName <- thisIterator
initCollection$curIteratorDepth <- curiterator
#would it work better to use a named array here?
#like curItVals <- c(1, 3, 5, 2) for iterators a, b, c, d
#then names(curItVals) <- c("a", "b", "c", "d")
for (i in initCollection[[thisIterator]]) {
#set the current position within this iterator for use in replace tags
#create a vector of iterator positions for use in replaceTags
#initCollection$curItPos[curiterator] <- i
#add the iterator name to the vector of iterator positions
#this has the same effect as above (appending as it recurses), but allows for name-based lookup
initCollection$curItPos[thisIterator] <- i
#print(paste("current iterator is:", thisIterator, ", position:", as.character(i)))
#process foreach commands
#For now, take this out
#bodySection <- processForEachTags(bodySection, initCollection)
#update the current values for this iterator and this iteration
#this applies for every iterator and iteration, not just processing
#at the deepest level. The function only updates array and iterator
#tags that match this iterator, thus minimizing redundant work.
#the latest is that only init collection tags will be updated
#then body tags are replaced at the bottom level after init collection is finalized
templateTags <- updateCurrentValues(templateTags, initCollection)
if (curiterator < length(initCollection$iterators)) {
#if not at deepest level, recurse to the next level by adding 1 to the iterator
#NOTE to self: consider adding a "foreachReplacements" collection to templateTags
#that contains the expansions of these tags (appended per iteration)
#this avoids having to think about reparsing the tags based on new code created by foreach
recurseReplace(templateTags, initCollection, curiterator = curiterator+1)
} else {
#we have reached the bottom of the iteration tree
#simple, array, and iterator tags should be up to date in the templateTags collection
#first delete conditional tags from the body section, reduce subsequent processing burden
#need to return templateTags collection from processConditionalTags (including bodyText)
#need to use a copy of templateTags to avoid it affecting subsequent loop iterations
finalTemplateTags <- processConditionalTags(templateTags, initCollection)
#the body section to write is stored in the templateTags collection
toWrite <- finalTemplateTags$bodyText
#create a separate initCollection with the appropriate values substituted.
finalInitCollection <- replaceInitTags(finalTemplateTags$initTags, initCollection)
#finalize init collection values (in cases of nested tags)
#wades through init collection for any remaining tags and replaces them
finalInitCollection <- finalizeInitCollection(finalInitCollection)
#update bodySection with tag values from finalized init tags
toWrite <- replaceBodyTags(toWrite, finalTemplateTags$bodyTags, finalInitCollection)
filename <- finalInitCollection$filename
cat(paste("writing file: ", filename, "\n", sep=""))
curdir <- getwd()
#figure out the output directory
outputDir <- finalInitCollection$outputDirectory
if (!file.exists(outputDir)) {
dir.create(outputDir, recursive=TRUE)
}
setwd(outputDir)
#make sure that no line is more than 90 chars
toWrite <- unlist(lapply(toWrite, function(line) {
if (nchar(line) > 90) {
strwrap(line, width=85, exdent=5)
} else {
line
}
}))
writeLines(toWrite, con = filename, sep = "\n")
setwd(curdir)
}
}
}
#' Replace Init Tags
#'
#' To do: fill in some details
#'
#' @param initTags Init tags
#' @param initCollection The list of all arguments parsed from the init section
#' @return Returns updated initCollection
#' @keywords internal
replaceInitTags <- function(initTags, initCollection) {
targetRows <- which(initTags$tagType %in% c("simple", "iterator", "array"))
targetTags <- initTags[targetRows, ]
targetTags$rownumber <- 1:nrow(targetTags)
#going to re-use this chunk in finalizeSubstitutions, so functionalize...
#consider the looping replacement here
for (i in 1:nrow(targetTags)) {
row <- targetTags[i, ]
stringToChange <- initCollection[[row$listpos]][row$element]
if(row$start > 1) {
preTag <- substr(stringToChange, 1, row$start - 1)
} else {
preTag <- ""
}
if (row$end < nchar(stringToChange)) {
postTag <- substr(stringToChange, row$end+1, nchar(stringToChange))
} else {
postTag <- ""
}
initCollection[[row$listpos]][row$element] <- paste0(preTag, row$currentValue, postTag)
subsequentRows <- which(
targetTags$rownumber > i &
targetTags$listpos == row$listpos &
targetTags$element == row$element)
if (length(subsequentRows > 0)) {
#need to offset subsequent start/stops by the difference
#between the tag and replacement lengths
diffLength <- nchar(row$currentValue) - (row$end - row$start + 1)
#update rows in targetTags that have additional tags on the same row
#need to offset by the diffLength
targetTags[subsequentRows,"start"] <- targetTags[subsequentRows,"start"] + diffLength
targetTags[subsequentRows,"end"] <- targetTags[subsequentRows,"end"] + diffLength
}
}
#refresh the initTags collection with the replaced values
#need to dump the rownumber to align the data.frames
# (templateTags doesn't have a rownumber field)
targetTags$rownumber <- NULL
initTags[targetRows, ] <- targetTags
#return(initTags)
#browser()
return(initCollection)
}
#' Replace Body Tags
#'
#' To do: fill in some details
#'
#' @param bodySection character vector of body section of Mplus syntax
#' @param bodyTags collection of tags used inside of the template body
#' @param initCollection The list of all arguments parsed from the init section
#' @return Returns updated bodySection
#' @keywords internal
replaceBodyTags <- function(bodySection, bodyTags, initCollection) {
if (length(bodySection) <= 0) stop("Empty body section")
#need to ponder issues where a replaced tag still contains another tag
#hmm, actually seems futile to do a replacement in the init section
#these are already set by update values.... won't affect the body section
# so we need to finalize the tag substitutions...
# the idea is that we need to convert all tags to literals in the initCollection
# once this is done, then we replace all deferred tags in the body section
#don't update current values if initcollection value contains any tag
#if so, replace at the last minute (check this in Init)
#set a "deferred" status in currentValue if replacement contains tags
targetTags <- with(bodyTags, bodyTags[tagType %in% c("simple", "iterator", "array"), ])
targetTags$rownumber <- 1:nrow(targetTags)
#print(targetTags)
#stop("test")
#could improve this by replacing identical tags at once
#like ddply by the tag
for (i in 1:nrow(targetTags)) {
row <- targetTags[i, ]
stringToChange <- bodySection[row$element]
if (row$start > 1) {
preTag <- substr(stringToChange, 1, row$start-1)
} else {
preTag <- ""
}
if (row$end < nchar(stringToChange)) {
postTag <- substr(stringToChange, row$end+1, nchar(stringToChange))
} else {
postTag <- ""
}
#lookup value as needed
if (is.na(row$currentValue)) {
row$currentValue <- lookupValue(row$tag, row$tagType, initCollection)
}
#row$currentValue <- lookupValue(row$tag, row$tagType, initCollection)
bodySection[row$element] <- paste0(preTag, row$currentValue, postTag)
#need to offset subsequent start/stops by the difference between the tag and replacement lengths
diffLength <- nchar(row$currentValue) - (row$end - row$start + 1)
subsequentRows <- which(
targetTags$rownumber > i &
targetTags$element == row$element)
if (length(subsequentRows > 0)) {
#update rows in targetTags that have additional tags on the same row
#need to offset by the diffLength
targetTags[subsequentRows,"start"] <- targetTags[subsequentRows,"start"] + diffLength
targetTags[subsequentRows,"end"] <- targetTags[subsequentRows,"end"] + diffLength
}
}
return(bodySection)
}
#' Lookup values
#'
#' To do: fill in some details
#'
#' @param tag name of tag for which we want to know the current value
#' @param tagType type of tag (simple, array, etc.) for the tag to lookup
#' @param initCollection The list of all arguments parsed from the init section
#' @return Current value
#' @keywords internal
lookupValue <- function(tag, tagType, initCollection) {
#redundant with finalize code... re-use
if (missing(tag)) stop("No tag provided")
if (missing(tagType)) stop("No tag type provided")
if (tagType == "simple") {
return(eval(parse(text=paste0("initCollection$", tag))))
}
else if (tagType == "array") {
split <- strsplit(tag, split="#", fixed=TRUE)[[1]]
if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
#find where in the iterator depth this iterator lies
#iteratorPosition <- grep(paste("\\b", split[2], "\\b", sep=""), initCollection$iterators, perl=T)
#use named array look-up
iteratorPosition <- initCollection$curItPos[split[2]]
#note that the padding performed by processInit should handle non-contiguous iteratorPosition values here.
currentValue <- eval(parse(text=paste0("initCollection$", split[1], "[", iteratorPosition, "]")))
if (is.null(currentValue)) {
stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
}
return(currentValue)
}
}
#' Finalize Init Collection
#'
#' this function should handle initTags that still contain tags
#' once the initCollection is finalized, then process the deferred body tags
#' the notion is that the substitutions will be handled in an inefficient manner -- using lots
#' of regular expression parsing, not using the matched tags data.frame
#'
#' we only need to handle simple and array tags
#' iterators should always be integers
#' foreach and conditional are not relevant
#'
#' iterate over init tags until no tags are left
#' here, the init collection should already have had most of its tags substituted by
#' replaceInitTags above.
#'
#' @param initCollection The list of all arguments parsed from the init section
#' @return Finalized initCollection
#' @keywords internal
finalizeInitCollection <- function(initCollection) {
tagsRemain <- TRUE
numIterations <- 1
while(tagsRemain) {
initTags <- getInitTags(initCollection)
if (nrow(initTags) == 0) break #if no tags found, then substitution complete
#update: iterator tags can be nested within other tag types and not updated until here.
initTags <- with(initTags, initTags[tagType %in% c("simple", "iterator", "array"),])
if (nrow(initTags) == 0) break #some tags, but none of the simple or array variety, which we want to replace
#use plyr's splitter_a function to divide dataset by row (builds a big list)
#divideByRow <- plyr:::splitter_a(initTags, 1)
divideByRow <- splitDFByRow(initTags)
#for each element of the list, check for a match with this iterator and return the value of interest
initTags$currentValue <- unlist(sapply(divideByRow,
function(row) {
if (row$tagType == "simple") {
return(eval(parse(text=paste0("initCollection$", row$tag))))
}
else if (row$tagType == "iterator") {
#an iterator tag was nested
return(initCollection$curItPos[row$tag])
}
else if (row$tagType == "array") {
split <- strsplit(row$tag, split="#", fixed=TRUE)[[1]]
if (length(split) != 2) stop("array tag missing iterator: ", row$tag)
#find where in the iterator depth this iterator lies
#iteratorPosition <- grep(paste("\\b", split[2], "\\b", sep=""), initCollection$iterators, perl=T)
#use named array look-up
iteratorPosition <- initCollection$curItPos[split[2]]
currentValue <- eval(parse(text=paste0("initCollection$", split[1], "[", iteratorPosition, "]")))
if (is.null(currentValue)) {
stop("When replacing tag: ", row$tag, ", could not find corresponding value.")
}
return(currentValue)
}
}
))
#now we have a list of curent values for any init tags
#and we want to update the init collection with their values... just as with above.
initCollection <- replaceInitTags(initTags, initCollection)
numIterations <- numIterations + 1
if (numIterations > 20) stop("While replacing tags in init section, looped over variables 20 times without completing substitutions.\n Check for circular definitions within init section.")
}
#browser()
return(initCollection)
}
#' Evaluate Conditional
#'
#' Note that at thie point the comparator must be a number (not another variable).
#'
#' @param tag A tag
#' @param initCollection The list of all arguments parsed from the init section
#' @return A boolean value indicating whether the conditional is true
#' @keywords internal
evaluateConditional <- function(tag, initCollection) {
#evaluate whether tag is true
#first divide up into name, operator, and value
regexp <- "(\\w+)\\s*([!><=]+)\\s*(\\w+)"
conditional <- unlist(strapply(tag, regexp, c))
if (length(conditional) < 3) {
stop("Error in conditional tag: does not contain variable, operator, and value. Tag = ", tag)
}
#convert simple equals to logical equals
if (conditional[2] == "=") conditional[2] <- "=="
#obsolete b/c using named array
#iteratorPosition <- grep(paste("\\b", conditional[1], "\\b", sep=""), initCollection$iterators, perl=T)
#return a boolean value indicating whether the conditional is true
return(eval(parse(text=paste0("initCollection$curItPos[conditional[1]]", conditional[2], conditional[3]))))
}
#' Clip String
#'
#' To do: add any details.
#'
#' @param string A string to be clipped
#' @param start The character position to start at
#' @param end The character position to end at
#' @return A string from start to end
#' @keywords internal
clipString <- function(string, start, end) {
#if the string is shorter than the length of the clip, then nothing remains
if (nchar(string) <= end-start+1) return("")
if(start > 1) preString <- substr(string, 1, start-1)
else preString <- ""
if(end < nchar(string)) postString <- substr(string, end+1, nchar(string))
else postString <- ""
return(paste0(preString, postString))
}
#' Process Conditional Tags
#'
#' To do: add details.
#'
#' @param templateTags A template tag
#' @param initCollection The list of all arguments parsed from the init section
#' @return Processed templateTags
#' @keywords internal
processConditionalTags <- function(templateTags, initCollection) {
#require(gsubfn) #moving to import strategy
#find all conditional tags in the body section and remove them from the templateTags and bodyText pieces...
conditionalTagIndices <- which(templateTags$bodyTags$tagType=="conditional")
#return templateTags unharmed if there are no conditional tags (creates error below otherwise)
if (length(conditionalTagIndices) == 0) return(templateTags)
openClose <- ifelse(substr(templateTags$bodyTags$tag[conditionalTagIndices], 1, 1)=="/", "close", "open")
allOpen <- conditionalTagIndices[openClose=="open"]
bodyTagsToDrop <- c()
bodyLinesToDrop <- c()
for (i in allOpen) {
#should be able to decide whether to skip an iteration if the affected lines are already in bodyLinesToDrop
thisTag <- templateTags$bodyTags$tag[i]
#evaluate truth of conditional
conditionalTrue <- evaluateConditional(thisTag, initCollection)
#only look for closing tags after the opening and accept the first exact match
close <- conditionalTagIndices[
templateTags$bodyTags$tag[conditionalTagIndices] == paste0("/", thisTag) &
templateTags$bodyTags$element[conditionalTagIndices] >= templateTags$bodyTags$element[i]][1]
sameLine <- FALSE
#in case of same line match, check to make sure close follows opening on that line
#the conditions above could match when a closing tag precedes opening tag on the same line
if (templateTags$bodyTags$element[close]==templateTags$bodyTags$element[i]) {
sameLine <- TRUE
close <- conditionalTagIndices[
openClose == "close" &
templateTags$bodyTags$tag[conditionalTagIndices] == paste0("/", thisTag) &
templateTags$bodyTags$element[conditionalTagIndices] == templateTags$bodyTags$element[i] &
templateTags$bodyTags$start[conditionalTagIndices] > templateTags$bodyTags$end[i]][1]
if (!close > 0) stop("Could not find closing tag for conditional:", thisTag)
}
#skip this iteration if the opening and closing tags in question are already in the drop pile
#these lines (and the lines between, if necessary) will already be dropped, so don't process
if (templateTags$bodyTags$element[i] %in% bodyLinesToDrop &&
templateTags$bodyTags$element[close] %in% bodyLinesToDrop) next
#first check for tags to drop from the bodyTags collection (don't want these parsed later)
if (conditionalTrue) {
#only remove starting and ending tags
bodyTagsToDrop <- c(bodyTagsToDrop, i, close)
} else {
#if conditional false, then remove all tags between conditional tags
#first, dump all lines in the bodyTags section that fall between elements
bodyTagsToDrop <- c(bodyTagsToDrop, i:close)
#conditional is not true
#so dump the tags and all space between
#really, the only difference here from the calculation below is that
#bodyLinesToDrop should encompass the space between opening and closing
#and the clips below should dump the rest of the line when multiple tags on same line
#no need to rewrite code for clipping out tags
#don't clip the tag lines themselves because this is handled below (whole line goes if nchar <= 0)
#print(bodyLinesToDrop)
#browser()
#only drop lines between matching open/close tags if not on the same line
#otherwise, the clipping code below handles everything correctly
#if on the same line, then element + 1:close - 1 will lead to something like 58:56, which is bad
if (!sameLine) {
bodyLinesToDrop <- c(bodyLinesToDrop,
(templateTags$bodyTags$element[i]+1):(templateTags$bodyTags$element[close]-1))
}
}
#then dump lines from the syntax section itself
#handle same line issues, then delete whole lines between tags
#as with replaceTags substitution, need to handle situation where tag is on line with other stuff
#thus, need to update bodyTags collection, too to reflect new start/stop positions
#when the conditional is true, just remove the tags and leave the syntax
#dump the opening tag on the line
#if the conditional is true, just use the last pos of the opening tag for the clip
if (conditionalTrue) endPos <- templateTags$bodyTags$end[i]
#want to clip the rest of the line
else if (!conditionalTrue && sameLine == FALSE) endPos <- nchar(templateTags$bodyText[templateTags$bodyTags$element[i]])
#just clip anything between open tag and first element of close tag (close tag itself handled by code below)
else if (!conditionalTrue && sameLine == TRUE) endPos <- templateTags$bodyTags$start[close] - 1
templateTags$bodyText[templateTags$bodyTags$element[i]] <- clipString(
templateTags$bodyText[templateTags$bodyTags$element[i]],
templateTags$bodyTags$start[i], endPos)
if (nchar(trimSpace(templateTags$bodyText[templateTags$bodyTags$element[i]])) <= 0) {
#no characters remain, so dump line
bodyLinesToDrop <- c(bodyLinesToDrop, templateTags$bodyTags$element[i])
} else {
#if there is other text on this line, it may contain tags that need to be adjusted given the clip
subsequentTags <- which(
templateTags$bodyTags$element == templateTags$bodyTags$element[i] &
templateTags$bodyTags$start > endPos)
if (length(subsequentTags > 0)) {
#calculate length of opening tag
openLength <- endPos - templateTags$bodyTags$start[i] + 1
templateTags$bodyTags[subsequentTags,"start"] <- templateTags$bodyTags[subsequentTags,"start"] - openLength
templateTags$bodyTags[subsequentTags,"end"] <- templateTags$bodyTags[subsequentTags,"end"] - openLength
#print("openlength")
#browser()
}
}
#okay, we've handled issues related to the opening tag, now handle closing tag
#for the closing tag, just need to clip the tag itself (spacing handled above)
templateTags$bodyText[templateTags$bodyTags$element[close]] <- clipString(
templateTags$bodyText[templateTags$bodyTags$element[close]],
templateTags$bodyTags$start[close],
templateTags$bodyTags$end[close])
if (nchar(trimSpace(templateTags$bodyText[templateTags$bodyTags$element[close]])) <= 0) {
#no characters remain, so dump line
bodyLinesToDrop <- c(bodyLinesToDrop, templateTags$bodyTags$element[close])
} else {
#only look for additional tags if nchar > 0
#redundant code with above... must be a way to consolidate
#if there is other text on then end line, it may contain tags that need to be adjusted given the clip
subsequentTags <- which(
templateTags$bodyTags$element == templateTags$bodyTags$element[close] &
templateTags$bodyTags$start > templateTags$bodyTags$end[close])
if (length(subsequentTags > 0)) {
closeLength <- templateTags$bodyTags$end[close] - templateTags$bodyTags$start[close] + 1
templateTags$bodyTags[subsequentTags,"start"] <- templateTags$bodyTags[subsequentTags,"start"] - closeLength
templateTags$bodyTags[subsequentTags,"end"] <- templateTags$bodyTags[subsequentTags,"end"] - closeLength
#print("closelength")
#browser()
}
}
}
#print(bodyLinesToDrop)
#print(bodyTagsToDrop)
#drop all bad body lines
#only keep unique bodyTagsToDrop (and sort for clarity in debugging)
#hard to imagine that bodyTagsToDrop could be NULL at this point (given the return when no conditional tags above)
#but if it were NULL, the bodyTags collection would be dumped by the NULL*-1 evaluation
if (!is.null(bodyTagsToDrop)) {
bodyTagsToDrop <- sort(unique(bodyTagsToDrop))
templateTags$bodyTags <- templateTags$bodyTags[bodyTagsToDrop*-1, ]
}
#need to check whether bodyLinesToDrop is NULL. If it is, then we must not attempt the subset
#(it will delete the whole character vector)
if (!is.null(bodyLinesToDrop)) {
#only retain unique bodyLinesToDrop (in theory handled by the "next" code above, but good to be safe)
bodyLinesToDrop <- sort(unique(bodyLinesToDrop))
templateTags$bodyText <- templateTags$bodyText[bodyLinesToDrop*-1]
#need to move up the line markers in the bodyTags collection based on the lines dropped
templateTags$bodyTags <- ddply(templateTags$bodyTags, "element", function(subDF) {
numMoveUp <- length(which(bodyLinesToDrop < subDF$element[1]))
subDF$element <- subDF$element - numMoveUp
return(subDF)
})
}
return(templateTags)
}
#' Process the Init Section
#'
#' To do: add details.
#'
#' @param initsection The list of all arguments parsed from the init section
#' @return arglist
#' @importFrom gsubfn strapply
#' @keywords internal
processInit <- function(initsection) {
#combine multi-line statements by searching for semi-colon
assignments <- grep("^\\s*.+\\s*=", initsection, perl=TRUE)
#check for valid variable names
valid <- grep("^\\s*[A-Za-z\\.]+[\\w\\.#]*\\s*=", initsection[assignments], perl=TRUE)
if (length(valid) < length(assignments)) {
badvars <- initsection[assignments[which(!1:length(assignments) %in% valid)]]
stop(paste(c("Invalid variable definitions in init section.",
"Variables must begin with a letter or a period.",
"Variables may contain only the following characters: letters, numbers, underscores, periods, and a single pound sign for list variables.",
"Problematic variable(s):", badvars), collapse="\n "))
}
#preallocate vector of strings to process
argstoprocess <- vector("character", length(assignments))
#loop through each line containing an assignment
for (i in 1:length(assignments)) {
argstoprocess[i] = initsection[assignments[i]]
#if line does not terminate in semicolon, then read subsequent lines until semicolon found
#start file position at n+1 line
filepos = assignments[i] + 1
while (length(grep(";\\s*$", argstoprocess[i], perl=TRUE)) != 1) {
#cat("multi-line: ", unlist(argstoprocess[i]), fill=T)
argstoprocess[i] = paste(argstoprocess[i], initsection[filepos])
filepos = filepos + 1
}
}
#will return a list (one element per argstoprocess) with a three-element vector (name, iterator, value)
#note that the regexp implicitly dumps the semicolon and any trailing spaces
arglist <- strapply(argstoprocess, "^\\s*(\\w+[\\w\\.]*)(#[\\w\\.]+)?\\s*=\\s*(.+);\\s*$",
function(name, iterator, value) {
return(c(name, iterator, value))
}, perl=TRUE)
#copy the first element (name) of each vector into the list names
names(arglist) <- make.names(sapply(arglist, '[', 1))
#1. parse values into vectors according to spaces and quotes
#2. add iterator attribute to be processed after iterators are setup below
#3. implicitly drop name by not including element[1]
arglist <- lapply(arglist, function(element) {
output <- friendlyGregexpr("(\"[^\"]*\"|[^\\s]+)", element[3])$tag
output <- gsub("\"", "", output)
#the regexp above matches the # itself.
#need to trim off in cases where iterator defined
if (nchar(element[2]) > 0) {
element[2] <- substr(element[2], 2, nchar(element[2]))
}
attr(output, "iterator") <- element[2]
return(output)
})
if (is.null(arglist$iterators)) {
stop("No iterators in init section. Cannot process template.")
}
#convert iterators from string to list
#arglist$iterators <- unlist(strsplit(as.character(arglist$iterate_wrt), "\\s*,\\s*", perl=T))
#process sequence text for each iterator
for (thisIt in arglist$iterators) {
if (is.null(arglist[[thisIt]])) {
stop("Variable specified in iterators list, but not defined: ", thisIt)
}
#expand colon notation as needed
#use do.call to combine elements of list returned by lapply
#if there are many elements (e.g., 1 3 5), then lapply returns an element for each
#one, but we just want a combined array. In the case of colon expansion, want to c that together
#with any other elements... Maybe in the future when we support non-contiguous iterators.
arglist[[thisIt]] <- do.call("c", lapply(arglist[[thisIt]], function(x) {
if (length(grep(":", x)) > 0) {
return(strapply(x, "(\\d+)\\s*:\\s*(\\d+)", function(start, stop) return(start:stop))[[1]])
} else {
return(as.numeric(x))
}
}))
#sort as ascending and only keep unique values
if (length(unique(arglist[[thisIt]])) < length(arglist[[thisIt]])) {
stop("Problem with iterator: ", thisIt, "\n Non-unique values specified: ",
paste(arglist[[thisIt]], collapse=", "))
}
arglist[[thisIt]] <- sort(unique(arglist[[thisIt]]))
}
#now that iterators are defined, ensure that list tags match
#pad vectors accordingly
arglist <- lapply(arglist, function(element) {
#if the iterator is defined, then this is a list tag
#need to make sure it is properly padded
iteratorAttr <- attr(element, "iterator")
if (!is.null(iteratorAttr) && nchar(iteratorAttr) > 0) {
iteratorValues <- arglist[[iteratorAttr]]
#make sure that the length of the values vector
#matches the length of the iterator vector
if (length(element) != length(iteratorValues)) {
stop("Variable locked to iterator: ", iteratorAttr,
", but has different length.\n Values: ",
paste(element, collapse=", "),
"\n Should be length: ", length(iteratorValues))
}
if (length(element) < max(iteratorValues)) {
#pad
updatedElement <- c()
listElement <- 1
#build a vector of the same length as the max of the iterator
#only insert list values for defined indices. Otherwise pad
for (i in 1:max(iteratorValues)) {
if (i %in% iteratorValues) {
updatedElement[i] <- element[listElement]
listElement <- listElement + 1
} else {
updatedElement[i] <- ""
}
}
element <- updatedElement
attr(element, "iterator") <- iteratorAttr #re-add attribute
}
}
return(element)
})
#default output directory to the current directory
if (is.null(arglist$outputDirectory)) {
warning("No output directory specified. Defaulting to the current directory.")
arglist$outputDirectory <- getwd()
}
if (is.null(arglist$filename)) {
stop("No definition provided for the output filename. The filename definition is required.")
}
return(arglist)
}
|
# Necessary packages
library(data.table)
library(rvest)
library(xml2)
# Website to scrape
url <- "https://www.numberfire.com/nfl/fantasy/fantasy-football-projections"
# Read in data
page <- rvest::html_table(xml2::read_html(url)
, header = TRUE
, fill = TRUE)
# Combine and reshape files
projections1 <- data.table::data.table(page[[1]])
projections2 <- data.table::data.table(page[[2]])
projections_dfs <- cbind(projections1, projections2)
colnames(projections_dfs) <- c("v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24")
projections_dfs <- projections_dfs[-1, ]
projections_dfs <- projections_dfs[, gsub1 := gsub("\t", "", v1), by = v1][, gsub2 := gsub("\n", "_", gsub1), by = gsub1]
projections_dfs <- projections_dfs[, c("v25", "v26", "v27") := tstrsplit(gsub2, "_", fixed = TRUE)]
projections_dfs <- projections_dfs[, .(PLAYER = v25
, POSITION = substr(gsub(".*\\((.*)\\).*", "\\1", v27), 1, 2)
, TEAM = trimws(substr(gsub(".*\\((.*)\\).*", "\\1", v27), 4, nchar(gsub(".*\\((.*)\\).*", "\\1", v27))))
, POINTS_DK = as.numeric(gsub("[\\$,]","", v19))
, SALARY_DK = as.numeric(gsub("[\\$,]","", v20)))]
# Second website to scrape
url <- "https://www.numberfire.com/nfl/fantasy/fantasy-football-projections/d"
# Read in data
page <- rvest::html_table(xml2::read_html(url)
, header = TRUE
, fill = TRUE)
# Combine and reshape files
projections1 <- data.table::data.table(page[[1]])
projections2 <- data.table::data.table(page[[2]])
projections_dst <- cbind(projections1, projections2)
colnames(projections_dst) <- c("v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21")
projections_dst <- projections_dst[-1, ]
projections_dst <- projections_dst[, gsub1 := gsub("\t", "", v1), by = v1][, gsub2 := gsub("\n", "_", gsub1), by = gsub1]
projections_dst <- projections_dst[, c("v22", "v23", "v24") := tstrsplit(gsub2, "_", fixed = TRUE)]
projections_dst <- projections_dst[, .(PLAYER = v22
, POSITION = "DST"
, TEAM = trimws(substr(gsub(".*\\((.*)\\).*", "\\1", v24), 4, nchar(gsub(".*\\((.*)\\).*", "\\1", v24))))
, POINTS_DK = as.numeric(gsub("[\\$,]","", v16))
, SALARY_DK = as.numeric(gsub("[\\$,]","", v17)))]
df_merge <- data.table::rbindlist(list(projections_dfs, projections_dst))
# Export data
data.table::fwrite(df_merge, "Output/example_scrape_output.csv")
| /example_scrape.R | permissive | tomasokal/github_actions_examples | R | false | false | 2,980 | r | # Necessary packages
library(data.table)
library(rvest)
library(xml2)
# Website to scrape
url <- "https://www.numberfire.com/nfl/fantasy/fantasy-football-projections"
# Read in data
page <- rvest::html_table(xml2::read_html(url)
, header = TRUE
, fill = TRUE)
# Combine and reshape files
projections1 <- data.table::data.table(page[[1]])
projections2 <- data.table::data.table(page[[2]])
projections_dfs <- cbind(projections1, projections2)
colnames(projections_dfs) <- c("v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24")
projections_dfs <- projections_dfs[-1, ]
projections_dfs <- projections_dfs[, gsub1 := gsub("\t", "", v1), by = v1][, gsub2 := gsub("\n", "_", gsub1), by = gsub1]
projections_dfs <- projections_dfs[, c("v25", "v26", "v27") := tstrsplit(gsub2, "_", fixed = TRUE)]
projections_dfs <- projections_dfs[, .(PLAYER = v25
, POSITION = substr(gsub(".*\\((.*)\\).*", "\\1", v27), 1, 2)
, TEAM = trimws(substr(gsub(".*\\((.*)\\).*", "\\1", v27), 4, nchar(gsub(".*\\((.*)\\).*", "\\1", v27))))
, POINTS_DK = as.numeric(gsub("[\\$,]","", v19))
, SALARY_DK = as.numeric(gsub("[\\$,]","", v20)))]
# Second website to scrape
url <- "https://www.numberfire.com/nfl/fantasy/fantasy-football-projections/d"
# Read in data
page <- rvest::html_table(xml2::read_html(url)
, header = TRUE
, fill = TRUE)
# Combine and reshape files
projections1 <- data.table::data.table(page[[1]])
projections2 <- data.table::data.table(page[[2]])
projections_dst <- cbind(projections1, projections2)
colnames(projections_dst) <- c("v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21")
projections_dst <- projections_dst[-1, ]
projections_dst <- projections_dst[, gsub1 := gsub("\t", "", v1), by = v1][, gsub2 := gsub("\n", "_", gsub1), by = gsub1]
projections_dst <- projections_dst[, c("v22", "v23", "v24") := tstrsplit(gsub2, "_", fixed = TRUE)]
projections_dst <- projections_dst[, .(PLAYER = v22
, POSITION = "DST"
, TEAM = trimws(substr(gsub(".*\\((.*)\\).*", "\\1", v24), 4, nchar(gsub(".*\\((.*)\\).*", "\\1", v24))))
, POINTS_DK = as.numeric(gsub("[\\$,]","", v16))
, SALARY_DK = as.numeric(gsub("[\\$,]","", v17)))]
df_merge <- data.table::rbindlist(list(projections_dfs, projections_dst))
# Export data
data.table::fwrite(df_merge, "Output/example_scrape_output.csv")
|
context("canvasXpress Web Charts - Correlation")
ifelse(interactive(), source("tests/cX-function.R"), source("../cX-function.R"))
test_that("cXcorrelation1", {
result <- cXcorrelation1()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXcorrelation2", {
result <- cXcorrelation2()
if (interactive()) { print(result) }
warning('no legend showing')
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
| /tests/testthat/test-correlation.R | no_license | ginberg/canvasXpress | R | false | false | 558 | r | context("canvasXpress Web Charts - Correlation")
ifelse(interactive(), source("tests/cX-function.R"), source("../cX-function.R"))
test_that("cXcorrelation1", {
result <- cXcorrelation1()
if (interactive()) { print(result) }
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
test_that("cXcorrelation2", {
result <- cXcorrelation2()
if (interactive()) { print(result) }
warning('no legend showing')
expect_s3_class(result, "canvasXpress")
expect_s3_class(result, "htmlwidget")
})
|
context("test_month_functions")
test_that("in_month function work", {
in_july <- in_month("Jul")
expect_true(test_date(dmy("12/07/1990"), in_july))
expect_true(test_date(dmy("13/07/1990"), in_july))
expect_false(test_date(dmy("12/06/1990"), in_july))
expect_false(test_date(dmy("13/06/1990"), in_july))
})
test_that("in_month function works with numbers", {
in_july <- in_month(7)
expect_true(test_date(dmy("12/07/1990"), in_july))
expect_true(test_date(dmy("13/07/1990"), in_july))
expect_false(test_date(dmy("12/06/1990"), in_july))
expect_false(test_date(dmy("13/06/1990"), in_july))
})
test_that("in_month function works with two numbers", {
in_july_or_aug <- in_month(7, 8)
expect_true(test_date(dmy("12/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("12/08/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/08/1990"), in_july_or_aug))
expect_false(test_date(dmy("12/06/1990"), in_july_or_aug))
expect_false(test_date(dmy("13/06/1990"), in_july_or_aug))
result <- schedule(in_july_or_aug, during = 2000)
expected_result <- seq.Date(from = dmy("01/07/2000"),
to = dmy("31/08/2000"),
by = "1 day")
expect_equal(result, expected_result)
})
test_that("in_month function works with two numbers, one in Feb", {
in_feb_march <- in_month(2, 3)
result <- schedule(in_feb_march, during = 2001)
expected_result <- seq.Date(from = dmy("01/02/2001"),
to = dmy("31/03/2001"),
by = "1 day")
expect_equal(result, expected_result)
})
test_that("in_month function works with string numbers", {
in_july <- in_month("7")
expect_true(test_date(dmy("12/07/1990"), in_july))
expect_true(test_date(dmy("13/07/1990"), in_july))
expect_false(test_date(dmy("12/06/1990"), in_july))
expect_false(test_date(dmy("13/06/1990"), in_july))
})
test_that("in_month function works with two string numbers", {
in_july_or_aug <- in_month("7", "8")
expect_true(test_date(dmy("12/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("12/08/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/08/1990"), in_july_or_aug))
expect_false(test_date(dmy("12/06/1990"), in_july_or_aug))
expect_false(test_date(dmy("13/06/1990"), in_july_or_aug))
})
test_that("in_month function errors when given invalid spec", {
expect_error(in_month("Febr"))
expect_error(in_month(13))
})
test_that("in_month function works with two months", {
expected_dates <- seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day")
Jan_and_Feb <- in_month("Jan", "Feb")
expect_equal(expected_dates, schedule(Jan_and_Feb, during = 2000))
})
test_that("in_month function works with mutliple months", {
expected_dates <- c(seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day"),
seq.Date(from = dmy("01/12/2000"),
to = dmy("31/12/2000"),
by = "1 day"))
Jan_Feb_and_Dec <- in_month("Jan", "Feb", "Dec")
expect_equal(schedule(Jan_Feb_and_Dec, during = 2000), expected_dates)
})
test_that("in_month function works with mutliple months specified as integers", {
expected_dates <- c(seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day"),
seq.Date(from = dmy("01/12/2000"),
to = dmy("31/12/2000"),
by = "1 day"))
Jan_Feb_and_Dec <- in_month(1, 2, 12)
expect_equal(schedule(Jan_Feb_and_Dec, during = 2000), expected_dates)
})
test_that("in_month function works with mixed month specifications", {
expected_dates <- c(seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day"),
seq.Date(from = dmy("01/12/2000"),
to = dmy("31/12/2000"),
by = "1 day"))
Jan_Feb_and_Dec <- in_month(1, "Feb", "December")
expect_equal(schedule(Jan_Feb_and_Dec, during = 2000), expected_dates)
})
| /tests/testthat/test_month_functions.R | permissive | jameslairdsmith/gs | R | false | false | 4,451 | r | context("test_month_functions")
test_that("in_month function work", {
in_july <- in_month("Jul")
expect_true(test_date(dmy("12/07/1990"), in_july))
expect_true(test_date(dmy("13/07/1990"), in_july))
expect_false(test_date(dmy("12/06/1990"), in_july))
expect_false(test_date(dmy("13/06/1990"), in_july))
})
test_that("in_month function works with numbers", {
in_july <- in_month(7)
expect_true(test_date(dmy("12/07/1990"), in_july))
expect_true(test_date(dmy("13/07/1990"), in_july))
expect_false(test_date(dmy("12/06/1990"), in_july))
expect_false(test_date(dmy("13/06/1990"), in_july))
})
test_that("in_month function works with two numbers", {
in_july_or_aug <- in_month(7, 8)
expect_true(test_date(dmy("12/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("12/08/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/08/1990"), in_july_or_aug))
expect_false(test_date(dmy("12/06/1990"), in_july_or_aug))
expect_false(test_date(dmy("13/06/1990"), in_july_or_aug))
result <- schedule(in_july_or_aug, during = 2000)
expected_result <- seq.Date(from = dmy("01/07/2000"),
to = dmy("31/08/2000"),
by = "1 day")
expect_equal(result, expected_result)
})
test_that("in_month function works with two numbers, one in Feb", {
in_feb_march <- in_month(2, 3)
result <- schedule(in_feb_march, during = 2001)
expected_result <- seq.Date(from = dmy("01/02/2001"),
to = dmy("31/03/2001"),
by = "1 day")
expect_equal(result, expected_result)
})
test_that("in_month function works with string numbers", {
in_july <- in_month("7")
expect_true(test_date(dmy("12/07/1990"), in_july))
expect_true(test_date(dmy("13/07/1990"), in_july))
expect_false(test_date(dmy("12/06/1990"), in_july))
expect_false(test_date(dmy("13/06/1990"), in_july))
})
test_that("in_month function works with two string numbers", {
in_july_or_aug <- in_month("7", "8")
expect_true(test_date(dmy("12/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("12/08/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/07/1990"), in_july_or_aug))
expect_true(test_date(dmy("13/08/1990"), in_july_or_aug))
expect_false(test_date(dmy("12/06/1990"), in_july_or_aug))
expect_false(test_date(dmy("13/06/1990"), in_july_or_aug))
})
test_that("in_month function errors when given invalid spec", {
expect_error(in_month("Febr"))
expect_error(in_month(13))
})
test_that("in_month function works with two months", {
expected_dates <- seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day")
Jan_and_Feb <- in_month("Jan", "Feb")
expect_equal(expected_dates, schedule(Jan_and_Feb, during = 2000))
})
test_that("in_month function works with mutliple months", {
expected_dates <- c(seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day"),
seq.Date(from = dmy("01/12/2000"),
to = dmy("31/12/2000"),
by = "1 day"))
Jan_Feb_and_Dec <- in_month("Jan", "Feb", "Dec")
expect_equal(schedule(Jan_Feb_and_Dec, during = 2000), expected_dates)
})
test_that("in_month function works with mutliple months specified as integers", {
expected_dates <- c(seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day"),
seq.Date(from = dmy("01/12/2000"),
to = dmy("31/12/2000"),
by = "1 day"))
Jan_Feb_and_Dec <- in_month(1, 2, 12)
expect_equal(schedule(Jan_Feb_and_Dec, during = 2000), expected_dates)
})
test_that("in_month function works with mixed month specifications", {
expected_dates <- c(seq.Date(from = dmy("01/01/2000"),
to = dmy("29/02/2000"),
by = "1 day"),
seq.Date(from = dmy("01/12/2000"),
to = dmy("31/12/2000"),
by = "1 day"))
Jan_Feb_and_Dec <- in_month(1, "Feb", "December")
expect_equal(schedule(Jan_Feb_and_Dec, during = 2000), expected_dates)
})
|
##' Compute frequencies
##'
##' @param x factor
##' @param showNA showNA
##' @param total total
##' @param digits digits
##' @author David Hajage
##' @keywords internal
freq <- function(x, showNA = c("no", "ifany", "always"), total = FALSE, digits = 2) {
total <- sum(total)
nn <- table(x, useNA = showNA)
n <- as.character(nn)
names(n) <- rownames(nn)
p <- paste("(", as.character(round(100*prop.table(table(x)), digits)), "%)", sep = "")
if (length(n) != length(p)) {
p <- c(p, "NA" = "")
}
if (total) {
n <- c(n, Total = as.character(sum(table(x))))
p <- c(p, Total = "(100%)")
}
value <- paste.matrix(n, p)
nom <- names(n)
nom[is.na(nom)] <- "NA"
results <- data.frame("variable" = nom, value = value)
# Si NA n'est pas dans le facteur, on met la colonne apres "Total"
if ((any(results$variable == "NA") & any(results$variable == "Total")) & !anyNA(levels(x))) {
tmp <- results[results$variable == "NA", ]
results <- rbind(results[results$variable != "NA", ], tmp)
}
results
}
##' Compute frequencies (data.frame input)
##'
##' @param df data.frame
##' @param showNA showNA
##' @author David Hajage
##' @keywords internal
##' @importFrom Hmisc label
##' @importFrom plyr mapvalues
freq.data.frame <- function(df, showNA = c("no", "ifany", "always"), total = FALSE, digits = 2, label = FALSE) {
noms.df <- names(df)
if (label) {
labs.df <- sapply(df, label)
labs.df[labs.df == ""] <- noms.df[labs.df == ""]
# names(df) <- noms.df
} else {
labs.df <- noms.df
}
dfl <- as.list(df)
results <- llply(dfl, freq, showNA = showNA, total = total, digits = digits)
n.df <- sapply(results, nrow)
for (i in 1:length(results)) {
results[[i]] <- cbind(".id" = noms.df[i], results[[i]])
}
results <- rbind.list(results)
results$label <- mapvalues(results$`.id`, from = noms.df, to = labs.df)
results <- results[, c(".id", "label", names(results)[!(names(results) %in% c(".id", "label"))])]
attr(results, "noms.lig") <- noms.df
attr(results, "noms.col") <- character(0)
attr(results, "labs.lig") <- labs.df
attr(results, "labs.col") <- character(0)
attr(results, "n.lig") <- n.df
attr(results, "n.col") <- numeric(0)
results
}
## ##' Ascii for freq object.
## ##'
## ##' Ascii method for freq object (internal).
## ##'
## ##' @export
## ##' @method ascii freq
## ##' @import ascii
## ##' @param x a freq object
## ##' @param format see \code{?ascii} in \code{ascii} package
## ##' @param digits see \code{?ascii} in \code{ascii} package
## ##' @param include.rownames see \code{?ascii} in \code{ascii} package
## ##' @param rownames see \code{?ascii} in \code{ascii} package
## ##' @param include.colnames see \code{?ascii} in \code{ascii} package
## ##' @param header see \code{?ascii} in \code{ascii} package
## ##' @param lgroup see \code{?ascii} in \code{ascii} package
## ##' @param n.lgroup see \code{?ascii} in \code{ascii} package
## ##' @param ... other arguments passed to \code{ascii}
## ##' @author David Hajage
## ##' @keywords univar
## ascii.freq <- function(x, format = "nice", digits = 3, include.rownames = FALSE, include.colnames = TRUE, header = TRUE, lgroup = attr(x, "lgroup"), n.lgroup = attr(x, "n.lgroup"), ...) {
## class(x) <- class(x)[-1]
## ascii(x, include.colnames = include.colnames, include.rownames = include.rownames, header = header, lgroup = lgroup, n.lgroup = n.lgroup, format = format, digits = digits, ...)
## }
## ##' Print freq object.
## ##'
## ##' Print freq object (internal).
## ##'
## ##' @export
## ##' @import ascii
## ##' @method print freq
## ##' @param x a freq object
## ##' @param type type of output (see \code{?ascii} in \code{ascii}
## ##' package)
## ##' @param lstyle see \code{?ascii} in \code{ascii} package
## ##' @param ... other arguments passed to \code{ascii}
## ##' @author David Hajage
## ##' @keywords univar
## print.freq <- function(x, type = "rest", lstyle = "", ...) {
## print(ascii.freq(x, lstyle = lstyle, ...), type = type)
## invisible(x)
## }
## ##' as.data.frame for freq object.
## ##'
## ##' as.data.frame for freq object (internal).
## ##'
## ##' @export
## ##' @param x a freq object
## ##' @param ... not used
## ##' @author David Hajage
## ##' @keywords internal
## as.data.frame.freq <- function(x, ...) {
## xx <- unclass(x)
## var <- unlist(mapply(rep, attr(x, "lgroup")[[2]], attr(x, "n.lgroup")[[2]], SIMPLIFY = FALSE))
## levels <- attr(x, "lgroup")[[1]]
## data.frame(var = var, levels = levels, xx, row.names = NULL, check.names = FALSE)
## }
## ##' Test if \code{x} is an freq object
## ##'
## ##' @export
## ##' @param x a freq object
## is.freq <- function(x)
## inherits(x, "freq")
| /R/freq.r | no_license | DanChaltiel/biostat2 | R | false | false | 4,760 | r | ##' Compute frequencies
##'
##' @param x factor
##' @param showNA showNA
##' @param total total
##' @param digits digits
##' @author David Hajage
##' @keywords internal
freq <- function(x, showNA = c("no", "ifany", "always"), total = FALSE, digits = 2) {
total <- sum(total)
nn <- table(x, useNA = showNA)
n <- as.character(nn)
names(n) <- rownames(nn)
p <- paste("(", as.character(round(100*prop.table(table(x)), digits)), "%)", sep = "")
if (length(n) != length(p)) {
p <- c(p, "NA" = "")
}
if (total) {
n <- c(n, Total = as.character(sum(table(x))))
p <- c(p, Total = "(100%)")
}
value <- paste.matrix(n, p)
nom <- names(n)
nom[is.na(nom)] <- "NA"
results <- data.frame("variable" = nom, value = value)
# Si NA n'est pas dans le facteur, on met la colonne apres "Total"
if ((any(results$variable == "NA") & any(results$variable == "Total")) & !anyNA(levels(x))) {
tmp <- results[results$variable == "NA", ]
results <- rbind(results[results$variable != "NA", ], tmp)
}
results
}
##' Compute frequencies (data.frame input)
##'
##' @param df data.frame
##' @param showNA showNA
##' @author David Hajage
##' @keywords internal
##' @importFrom Hmisc label
##' @importFrom plyr mapvalues
freq.data.frame <- function(df, showNA = c("no", "ifany", "always"), total = FALSE, digits = 2, label = FALSE) {
noms.df <- names(df)
if (label) {
labs.df <- sapply(df, label)
labs.df[labs.df == ""] <- noms.df[labs.df == ""]
# names(df) <- noms.df
} else {
labs.df <- noms.df
}
dfl <- as.list(df)
results <- llply(dfl, freq, showNA = showNA, total = total, digits = digits)
n.df <- sapply(results, nrow)
for (i in 1:length(results)) {
results[[i]] <- cbind(".id" = noms.df[i], results[[i]])
}
results <- rbind.list(results)
results$label <- mapvalues(results$`.id`, from = noms.df, to = labs.df)
results <- results[, c(".id", "label", names(results)[!(names(results) %in% c(".id", "label"))])]
attr(results, "noms.lig") <- noms.df
attr(results, "noms.col") <- character(0)
attr(results, "labs.lig") <- labs.df
attr(results, "labs.col") <- character(0)
attr(results, "n.lig") <- n.df
attr(results, "n.col") <- numeric(0)
results
}
## ##' Ascii for freq object.
## ##'
## ##' Ascii method for freq object (internal).
## ##'
## ##' @export
## ##' @method ascii freq
## ##' @import ascii
## ##' @param x a freq object
## ##' @param format see \code{?ascii} in \code{ascii} package
## ##' @param digits see \code{?ascii} in \code{ascii} package
## ##' @param include.rownames see \code{?ascii} in \code{ascii} package
## ##' @param rownames see \code{?ascii} in \code{ascii} package
## ##' @param include.colnames see \code{?ascii} in \code{ascii} package
## ##' @param header see \code{?ascii} in \code{ascii} package
## ##' @param lgroup see \code{?ascii} in \code{ascii} package
## ##' @param n.lgroup see \code{?ascii} in \code{ascii} package
## ##' @param ... other arguments passed to \code{ascii}
## ##' @author David Hajage
## ##' @keywords univar
## ascii.freq <- function(x, format = "nice", digits = 3, include.rownames = FALSE, include.colnames = TRUE, header = TRUE, lgroup = attr(x, "lgroup"), n.lgroup = attr(x, "n.lgroup"), ...) {
## class(x) <- class(x)[-1]
## ascii(x, include.colnames = include.colnames, include.rownames = include.rownames, header = header, lgroup = lgroup, n.lgroup = n.lgroup, format = format, digits = digits, ...)
## }
## ##' Print freq object.
## ##'
## ##' Print freq object (internal).
## ##'
## ##' @export
## ##' @import ascii
## ##' @method print freq
## ##' @param x a freq object
## ##' @param type type of output (see \code{?ascii} in \code{ascii}
## ##' package)
## ##' @param lstyle see \code{?ascii} in \code{ascii} package
## ##' @param ... other arguments passed to \code{ascii}
## ##' @author David Hajage
## ##' @keywords univar
## print.freq <- function(x, type = "rest", lstyle = "", ...) {
## print(ascii.freq(x, lstyle = lstyle, ...), type = type)
## invisible(x)
## }
## ##' as.data.frame for freq object.
## ##'
## ##' as.data.frame for freq object (internal).
## ##'
## ##' @export
## ##' @param x a freq object
## ##' @param ... not used
## ##' @author David Hajage
## ##' @keywords internal
## as.data.frame.freq <- function(x, ...) {
## xx <- unclass(x)
## var <- unlist(mapply(rep, attr(x, "lgroup")[[2]], attr(x, "n.lgroup")[[2]], SIMPLIFY = FALSE))
## levels <- attr(x, "lgroup")[[1]]
## data.frame(var = var, levels = levels, xx, row.names = NULL, check.names = FALSE)
## }
## ##' Test if \code{x} is an freq object
## ##'
## ##' @export
## ##' @param x a freq object
## is.freq <- function(x)
## inherits(x, "freq")
|
# figure/title captioner
t.cap <- captioner::captioner(prefix="Table.")
f.cap <- captioner::captioner(prefix="Figure.")
t.ref <- function(label){
stringr::str_extract(t.cap(label), "[^:]*")
}
f.ref <- function(label){
stringr::str_extract(f.cap(label), "[^:]*")
} | /project5_wine/source/captioner.R | no_license | Lidiia25/Predictive_Modeling_in_R | R | false | false | 269 | r | # figure/title captioner
t.cap <- captioner::captioner(prefix="Table.")
f.cap <- captioner::captioner(prefix="Figure.")
t.ref <- function(label){
stringr::str_extract(t.cap(label), "[^:]*")
}
f.ref <- function(label){
stringr::str_extract(f.cap(label), "[^:]*")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_nflverse.R
\name{load_teams}
\alias{load_teams}
\title{Load NFL Team Graphics, Colors, and Logos}
\usage{
load_teams()
}
\value{
A tibble of team-level image URLs and hex color codes.
}
\description{
Loads team graphics, colors, and logos - useful for plots!
}
\examples{
\donttest{
load_teams()
}
}
\seealso{
\url{https://github.com/nflverse/nflfastr-data}
}
| /man/load_teams.Rd | permissive | ssavala/nflreadr | R | false | true | 446 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_nflverse.R
\name{load_teams}
\alias{load_teams}
\title{Load NFL Team Graphics, Colors, and Logos}
\usage{
load_teams()
}
\value{
A tibble of team-level image URLs and hex color codes.
}
\description{
Loads team graphics, colors, and logos - useful for plots!
}
\examples{
\donttest{
load_teams()
}
}
\seealso{
\url{https://github.com/nflverse/nflfastr-data}
}
|
% Generated by roxygen2 (4.0.2.9000): do not edit by hand
% Please edit documentation in R/require.package.R
\name{require.package}
\alias{.require.package}
\alias{require.package}
\title{Require a package for use in the project}
\usage{
require.package(package.name, attach = TRUE)
.require.package(package.name)
}
\arguments{
\item{package.name}{A character vector containing the package name.
Must be a valid package name installed on the system.}
\item{attach}{Should the package be attached to the search path (as with
\code{\link{library}}) or not (as with \code{\link{loadNamespace}})?
Defaults to \code{TRUE}. (Internal code will use \code{FALSE} by default
unless a compatibility switch is set, see below.)}
}
\value{
No value is returned; this function is called for its side effects.
}
\description{
This functions will require the given package. If the package is not installed
it will stop execution and print a message to the user instructing them which
package to install and which function caused the error.
}
\details{
The function \code{.require.package} is called by internal code. It will
attach the package to the search path (with a warning) only if the
compatibility configuration \code{attach_internal_libraries} is set to
\code{TRUE}. Normally, packages used for loading data are not
needed on the search path, but not loading them might break existing code.
In a forthcoming version this compatibility setting will be removed,
and no packages will be attached to the search path by internal code.
}
\examples{
\dontrun{require.package('PackageName')}
}
| /man/require.package.Rd | no_license | krlmlr/LoadMyData | R | false | false | 1,583 | rd | % Generated by roxygen2 (4.0.2.9000): do not edit by hand
% Please edit documentation in R/require.package.R
\name{require.package}
\alias{.require.package}
\alias{require.package}
\title{Require a package for use in the project}
\usage{
require.package(package.name, attach = TRUE)
.require.package(package.name)
}
\arguments{
\item{package.name}{A character vector containing the package name.
Must be a valid package name installed on the system.}
\item{attach}{Should the package be attached to the search path (as with
\code{\link{library}}) or not (as with \code{\link{loadNamespace}})?
Defaults to \code{TRUE}. (Internal code will use \code{FALSE} by default
unless a compatibility switch is set, see below.)}
}
\value{
No value is returned; this function is called for its side effects.
}
\description{
This functions will require the given package. If the package is not installed
it will stop execution and print a message to the user instructing them which
package to install and which function caused the error.
}
\details{
The function \code{.require.package} is called by internal code. It will
attach the package to the search path (with a warning) only if the
compatibility configuration \code{attach_internal_libraries} is set to
\code{TRUE}. Normally, packages used for loading data are not
needed on the search path, but not loading them might break existing code.
In a forthcoming version this compatibility setting will be removed,
and no packages will be attached to the search path by internal code.
}
\examples{
\dontrun{require.package('PackageName')}
}
|
# Load required packages
library(RMySQL)
library(dplyr)
library(rvest)
library(RCurl)
# Close open connections
all_cons <- dbListConnections(MySQL())
for(con in all_cons) dbDisconnect(con)
# Connect to db via ssh ssh stats@looker.transferwise.com -L 3307:172.18.1.43:3306
con <- dbConnect(MySQL(),
user = .env$usr,
password = .env$pw,
host = '127.0.0.1',
port = 3307,
dbname='obfuscated')
# Select new businesses with a website
query = "select user_profile.user_id, webpage from user_profile left join tmp_website_description w on w.user_id = user_profile.user_id where length(webpage) > 1 and date_created > '2016-06-01' and w.user_id is null;"
users = dbGetQuery(con, query)
users$user_id = as.numeric(as.character(users$user_id))
# Fix URLs
www = which(tolower(substring(users$webpage,1,3))=="www")
users$webpage[www] = sapply(users$webpage[www], function(x) paste('http://', x, sep=''))
http = which(tolower(substring(users$webpage,1,4))!="http")
users$webpage[http] = sapply(users$webpage[http], function(x) paste('http://', x, sep=''))
# Create list to store descriptions if it doesn't exist
description = list()
# Fetch descriptions from companies' websites
for(i in (length(description)+1):nrow(users)){
print(paste(i, i/nrow(users)))
result = tryCatch({
read_html(users$webpage[i])
}, warning = function(w) {
read_html(users$webpage[i])
}, error = function(e) {
"error"
}, finally = {
})
if(result[1]=='error') next()
description[[i]] = result %>% html_nodes('meta[name=description]') %>% as.character
if(length(description[[i]])==0){
description[[i]] = result %>% html_nodes('p[class="hero textalign-c"]') %>% as.character
}
if(length(description[[i]])==0){
description[[i]] = result %>% html_nodes('title') %>% as.character
}
print(description[[i]])
}
# Create a backup just in case
description_save = description
#description = description_save
# If there are multiple metatags named desription, select the last one
for(i in 1:length(description)){
k = length(description[[i]])
if(k == 0){
description[[i]] = NA
} else {
description[[i]] = description[[i]][k]
}
}
# Convert list to vector and clean out HTML
content_start = sapply(description, function(x) gregexpr('content=', x, ignore.case = TRUE))
content_start = sapply(content_start, `[[`, 1)
names(content_start) = 1:length(content_start)
content_node = grep('content', description, ignore.case=T)
description[content_node] = substring(description[content_node], content_start[content_node]+7, nchar(description[content_node]))
description = unlist(description)
names(description) = 1:length(description)
description = sapply(description, URLdecode)
description = gsub("<.*?>", '', description, ignore.case=T)
description = gsub("[^0-9A-Za-z.,///' ]", "", description)
description = gsub("/", "", description)
description = gsub("[ ]{2,}", "", description)
# Add user id
x = data.frame(user_id = users$user_id[1:length(description)], website_description = description)
# Close open connections and open a new one
all_cons <- dbListConnections(MySQL())
for(con in all_cons) dbDisconnect(con)
con <- dbConnect(MySQL(),
user = .env$usr,
password = .env$pw,
host = '127.0.0.1',
port = 3307,
dbname='obfuscated')
# Write to db and add index
dbWriteTable(con, 'tmp_website_description', x, row.names=F, append=T)
dbSendQuery(con, 'alter table tmp_website_description add index (user_id)')
| /website description scraping.R | no_license | 321k/website-description-scraper | R | false | false | 3,615 | r | # Load required packages
library(RMySQL)
library(dplyr)
library(rvest)
library(RCurl)
# Close open connections
all_cons <- dbListConnections(MySQL())
for(con in all_cons) dbDisconnect(con)
# Connect to db via ssh ssh stats@looker.transferwise.com -L 3307:172.18.1.43:3306
con <- dbConnect(MySQL(),
user = .env$usr,
password = .env$pw,
host = '127.0.0.1',
port = 3307,
dbname='obfuscated')
# Select new businesses with a website
query = "select user_profile.user_id, webpage from user_profile left join tmp_website_description w on w.user_id = user_profile.user_id where length(webpage) > 1 and date_created > '2016-06-01' and w.user_id is null;"
users = dbGetQuery(con, query)
users$user_id = as.numeric(as.character(users$user_id))
# Fix URLs
www = which(tolower(substring(users$webpage,1,3))=="www")
users$webpage[www] = sapply(users$webpage[www], function(x) paste('http://', x, sep=''))
http = which(tolower(substring(users$webpage,1,4))!="http")
users$webpage[http] = sapply(users$webpage[http], function(x) paste('http://', x, sep=''))
# Create list to store descriptions if it doesn't exist
description = list()
# Fetch descriptions from companies' websites
for(i in (length(description)+1):nrow(users)){
print(paste(i, i/nrow(users)))
result = tryCatch({
read_html(users$webpage[i])
}, warning = function(w) {
read_html(users$webpage[i])
}, error = function(e) {
"error"
}, finally = {
})
if(result[1]=='error') next()
description[[i]] = result %>% html_nodes('meta[name=description]') %>% as.character
if(length(description[[i]])==0){
description[[i]] = result %>% html_nodes('p[class="hero textalign-c"]') %>% as.character
}
if(length(description[[i]])==0){
description[[i]] = result %>% html_nodes('title') %>% as.character
}
print(description[[i]])
}
# Create a backup just in case
description_save = description
#description = description_save
# If there are multiple metatags named desription, select the last one
for(i in 1:length(description)){
k = length(description[[i]])
if(k == 0){
description[[i]] = NA
} else {
description[[i]] = description[[i]][k]
}
}
# Convert list to vector and clean out HTML
content_start = sapply(description, function(x) gregexpr('content=', x, ignore.case = TRUE))
content_start = sapply(content_start, `[[`, 1)
names(content_start) = 1:length(content_start)
content_node = grep('content', description, ignore.case=T)
description[content_node] = substring(description[content_node], content_start[content_node]+7, nchar(description[content_node]))
description = unlist(description)
names(description) = 1:length(description)
description = sapply(description, URLdecode)
description = gsub("<.*?>", '', description, ignore.case=T)
description = gsub("[^0-9A-Za-z.,///' ]", "", description)
description = gsub("/", "", description)
description = gsub("[ ]{2,}", "", description)
# Add user id
x = data.frame(user_id = users$user_id[1:length(description)], website_description = description)
# Close open connections and open a new one
all_cons <- dbListConnections(MySQL())
for(con in all_cons) dbDisconnect(con)
con <- dbConnect(MySQL(),
user = .env$usr,
password = .env$pw,
host = '127.0.0.1',
port = 3307,
dbname='obfuscated')
# Write to db and add index
dbWriteTable(con, 'tmp_website_description', x, row.names=F, append=T)
dbSendQuery(con, 'alter table tmp_website_description add index (user_id)')
|
snippify <- function(file, path, ...) {
f <- file(file)
lines <- readLines(f)
close(f)
results <- character(0)
headers <- which(grepl("## ----", lines))
if (length(headers) < 1) return(results)
start <- headers
end <- c(tail(headers,-1) - 1, length(lines))
for (i in 1:length(start)) {
message(lines[start[i]])
chunkName <- sub("(## ----)(?<chunk>[^,]*)(.*)", "\\2", lines[start[i]],
perl = TRUE)
chunkName <- sub("-*$", "", chunkName)
if (is.character(chunkName) &&
nchar(chunkName > 0) &&
! grepl("-sol", chunkName) &&
! grepl("=", chunkName)) {
chunkPath<- file.path(path, paste0(chunkName,".R"))
message(paste(" ** Writing", chunkPath))
results <- c(results, chunkPath)
con <- file(chunkPath, "w")
writeLines( lines[ (start[i] + 1) : end[i] ], con)
close(con)
}
}
}
require(knitr)
setwd("snipping")
purl("/Users/rpruim/projects/github/fast2e/Rnw/amsfast2.Rnw")
snippify("amsfast2.R", path="../../inst/snippet")
setwd("..")
| /bin/snippify.R | no_license | anhnguyendepocen/fastR2 | R | false | false | 1,068 | r |
snippify <- function(file, path, ...) {
f <- file(file)
lines <- readLines(f)
close(f)
results <- character(0)
headers <- which(grepl("## ----", lines))
if (length(headers) < 1) return(results)
start <- headers
end <- c(tail(headers,-1) - 1, length(lines))
for (i in 1:length(start)) {
message(lines[start[i]])
chunkName <- sub("(## ----)(?<chunk>[^,]*)(.*)", "\\2", lines[start[i]],
perl = TRUE)
chunkName <- sub("-*$", "", chunkName)
if (is.character(chunkName) &&
nchar(chunkName > 0) &&
! grepl("-sol", chunkName) &&
! grepl("=", chunkName)) {
chunkPath<- file.path(path, paste0(chunkName,".R"))
message(paste(" ** Writing", chunkPath))
results <- c(results, chunkPath)
con <- file(chunkPath, "w")
writeLines( lines[ (start[i] + 1) : end[i] ], con)
close(con)
}
}
}
require(knitr)
setwd("snipping")
purl("/Users/rpruim/projects/github/fast2e/Rnw/amsfast2.Rnw")
snippify("amsfast2.R", path="../../inst/snippet")
setwd("..")
|
#' RR and their confidence intervals for Cox models
#'
#' Computess risk ratios and their confidence intervals for Cox models
#'
#' @param model a \code{coxph} object
#' @param alpha type I error, 0.05 by default
#' @param sided 1 or 2 for one or two-sided
#' @return A matrix with the estimaed coefficients of the Cox model, their s.e., z-values, p-values, RR and CI of the RR
#' @author Hugo Varet
#' @examples
#' cgd$time=cgd$tstop-cgd$tstart
#' IC_RR_coxph(coxph(Surv(time,status)~sex+age,data=cgd),alpha=0.05,sided=1)
IC_RR_coxph=function(model,alpha=0.05,sided=2){
# model must come from the coxph() function
tab=matrix(nrow=nrow(summary(model)$coefficients),ncol=ncol(summary(model)$coefficients)+3,
dimnames = list(c(rownames(summary(model)$coefficients)),c(colnames(summary(model)$coefficients),"RR","IC.inf","IC.sup")))
tab[,1:ncol(summary(model)$coefficients)]=round(summary(model)$coefficients,digits=3)
tab[,"RR"]=round(exp(summary(model)$coefficients[,"coef"]),digits=3)
tab[,"IC.inf"]=round(exp(summary(model)$coefficients[,"coef"]-qnorm(1-alpha/2)*summary(model)$coefficients[,"se(coef)"]),digits=3)
tab[,"IC.sup"]=round(exp(summary(model)$coefficients[,"coef"]+qnorm(1-alpha/2)*summary(model)$coefficients[,"se(coef)"]),digits=3)
# warning: if one-sided test, we divide the p-value by 2
if (sided==1){
tab[,5]=tab[,5]/2
}
# remove exp(coef) column
if (nrow(tab)==1){tab=t(as.matrix(tab[,-2]))} else{tab=tab[,-2]}
rownames(tab)=rownames(summary(model)$coefficients)
signif=ifelse(tab[,"Pr(>|z|)"]>=0.1,"",ifelse(tab[,"Pr(>|z|)"]>=0.05,".",ifelse(tab[,"Pr(>|z|)"]>=0.01,"*",ifelse(tab[,"Pr(>|z|)"]>=0.001,"**","***"))))
res=cbind(tab,signif)
colnames(res)=c(colnames(tab),"")
return(noquote(res))
}
#N=100
#time=rexp(N)
#eps=sample(0:1,N,T)
#x=rnorm(N)
#y=rbinom(N,1,0.5)
#
#library(survival)
#IC_RR_coxph(coxph(Surv(time,eps)~x+y))
#IC_RR_coxph(coxph(Surv(time,eps)~x))
| /R/IC_RR_coxph.r | no_license | cran/packHV | R | false | false | 1,978 | r | #' RR and their confidence intervals for Cox models
#'
#' Computess risk ratios and their confidence intervals for Cox models
#'
#' @param model a \code{coxph} object
#' @param alpha type I error, 0.05 by default
#' @param sided 1 or 2 for one or two-sided
#' @return A matrix with the estimaed coefficients of the Cox model, their s.e., z-values, p-values, RR and CI of the RR
#' @author Hugo Varet
#' @examples
#' cgd$time=cgd$tstop-cgd$tstart
#' IC_RR_coxph(coxph(Surv(time,status)~sex+age,data=cgd),alpha=0.05,sided=1)
IC_RR_coxph=function(model,alpha=0.05,sided=2){
# model must come from the coxph() function
tab=matrix(nrow=nrow(summary(model)$coefficients),ncol=ncol(summary(model)$coefficients)+3,
dimnames = list(c(rownames(summary(model)$coefficients)),c(colnames(summary(model)$coefficients),"RR","IC.inf","IC.sup")))
tab[,1:ncol(summary(model)$coefficients)]=round(summary(model)$coefficients,digits=3)
tab[,"RR"]=round(exp(summary(model)$coefficients[,"coef"]),digits=3)
tab[,"IC.inf"]=round(exp(summary(model)$coefficients[,"coef"]-qnorm(1-alpha/2)*summary(model)$coefficients[,"se(coef)"]),digits=3)
tab[,"IC.sup"]=round(exp(summary(model)$coefficients[,"coef"]+qnorm(1-alpha/2)*summary(model)$coefficients[,"se(coef)"]),digits=3)
# warning: if one-sided test, we divide the p-value by 2
if (sided==1){
tab[,5]=tab[,5]/2
}
# remove exp(coef) column
if (nrow(tab)==1){tab=t(as.matrix(tab[,-2]))} else{tab=tab[,-2]}
rownames(tab)=rownames(summary(model)$coefficients)
signif=ifelse(tab[,"Pr(>|z|)"]>=0.1,"",ifelse(tab[,"Pr(>|z|)"]>=0.05,".",ifelse(tab[,"Pr(>|z|)"]>=0.01,"*",ifelse(tab[,"Pr(>|z|)"]>=0.001,"**","***"))))
res=cbind(tab,signif)
colnames(res)=c(colnames(tab),"")
return(noquote(res))
}
#N=100
#time=rexp(N)
#eps=sample(0:1,N,T)
#x=rnorm(N)
#y=rbinom(N,1,0.5)
#
#library(survival)
#IC_RR_coxph(coxph(Surv(time,eps)~x+y))
#IC_RR_coxph(coxph(Surv(time,eps)~x))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_kpv.R
\name{read_kpv}
\alias{read_kpv}
\title{read_kpv Function}
\usage{
read_kpv(file = "/Volumes/langdoc/langs/kpv/kpv_udo20120330SazinaJS-dream/kpv_udo20120330SazinaJS-dream.eaf",
SS_tier = "wordT", SA_tier = "orthT", DEF_tier = "refT",
simplify = TRUE)
}
\arguments{
\item{file}{The path to ELAN file which we want to parse}
\item{SS_tier}{Linguistic type of the Symbolic Subdivision tier (usually contains the tokenized wordforms)}
\item{SA_tier}{Linguistic type of the Symbolic Association tier}
\item{DEF_tier}{Linguistic type of the independent tier}
}
\description{
This is an updated version of the old function, which has been renamed to read_eaf_old(). Instead of trying to do everything this function is supposedly used with `plyr` in order to parse multiple files. The function parses ELAN files. Dummy tokens that contain no information are erased automatically. The files that are for some reason not parsable are skipped. In actual use the biggest problems are connected to structural irregularity of ELAN files in corpus. It is used ideally in connection with scripts that are able to parse IMDI or CMDI files. Please use `log_eaf()` function to see which files have been changed recently, those are usually the ones containing problems.
}
\examples{
read_kpv(path = "corpora/kpv/session_1.eaf", DEF_tier = "refT", SA_tier = "orthT", SS_tier = "wordT")
}
\keyword{ELAN}
| /man/read_kpv.Rd | permissive | langdoc/FRelan | R | false | true | 1,481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_kpv.R
\name{read_kpv}
\alias{read_kpv}
\title{read_kpv Function}
\usage{
read_kpv(file = "/Volumes/langdoc/langs/kpv/kpv_udo20120330SazinaJS-dream/kpv_udo20120330SazinaJS-dream.eaf",
SS_tier = "wordT", SA_tier = "orthT", DEF_tier = "refT",
simplify = TRUE)
}
\arguments{
\item{file}{The path to ELAN file which we want to parse}
\item{SS_tier}{Linguistic type of the Symbolic Subdivision tier (usually contains the tokenized wordforms)}
\item{SA_tier}{Linguistic type of the Symbolic Association tier}
\item{DEF_tier}{Linguistic type of the independent tier}
}
\description{
This is an updated version of the old function, which has been renamed to read_eaf_old(). Instead of trying to do everything this function is supposedly used with `plyr` in order to parse multiple files. The function parses ELAN files. Dummy tokens that contain no information are erased automatically. The files that are for some reason not parsable are skipped. In actual use the biggest problems are connected to structural irregularity of ELAN files in corpus. It is used ideally in connection with scripts that are able to parse IMDI or CMDI files. Please use `log_eaf()` function to see which files have been changed recently, those are usually the ones containing problems.
}
\examples{
read_kpv(path = "corpora/kpv/session_1.eaf", DEF_tier = "refT", SA_tier = "orthT", SS_tier = "wordT")
}
\keyword{ELAN}
|
#' @title R client for the CKAN API
#'
#' @description ckanr is a full client for the CKAN API, wrapping all
#' APIs, including for reading and writing data. Please get in touch
#' (\url{https://github.com/ropensci/ckanr/issues}) if you have problems, or
#' have use cases that we don't cover yet.
#'
#' @section CKAN API:
#'
#' Document for the CKAN API is at \url{http://docs.ckan.org/en/latest/api/index.html}.
#' We'll always be following the lastest version of the API.
#'
#' @section ckanr package API:
#'
#' The functions can be grouped into those for setup, packages,
#' resources, tags, organizations, and groups.
#'
#' \itemize{
#' \item Setup - The main one is \code{\link{ckanr_setup}} - and many related
#' functions, e.g., \code{\link{get_default_key}}
#' \item Packages - Create a package with \code{\link{package_create}}, and see
#' other functions starting with \code{package_*}
#' \item Resources - Create a package with \code{\link{resource_create}}, and see
#' other functions starting with \code{resource_*}
#' \item Tags - List tags with \code{\link{tag_list}}, and see
#' other functions starting with \code{tag_*}
#' \item Organizations - List organizations with \code{\link{organization_list}},
#' and show a specific organization with \code{\link{organization_show}}
#' }
#'
#' @importFrom methods is
#' @importFrom stats na.omit
#' @importFrom utils read.csv unzip
#' @importFrom httr GET POST upload_file write_disk add_headers content
#' stop_for_status http_condition content_type_json
#' @importFrom jsonlite fromJSON
#' @name ckanr-package
#' @aliases ckanr
#' @docType package
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
#' @author Imanuel Costigan \email{i.costigan@@me.com}
#' @author Wush Wu
#' @author Florian Mayer \email{florian.wendelin.mayer@@gmail.com}
#' @keywords package
NULL
| /R/ckanr-package.R | permissive | dspim/ckanr | R | false | false | 1,850 | r | #' @title R client for the CKAN API
#'
#' @description ckanr is a full client for the CKAN API, wrapping all
#' APIs, including for reading and writing data. Please get in touch
#' (\url{https://github.com/ropensci/ckanr/issues}) if you have problems, or
#' have use cases that we don't cover yet.
#'
#' @section CKAN API:
#'
#' Document for the CKAN API is at \url{http://docs.ckan.org/en/latest/api/index.html}.
#' We'll always be following the lastest version of the API.
#'
#' @section ckanr package API:
#'
#' The functions can be grouped into those for setup, packages,
#' resources, tags, organizations, and groups.
#'
#' \itemize{
#' \item Setup - The main one is \code{\link{ckanr_setup}} - and many related
#' functions, e.g., \code{\link{get_default_key}}
#' \item Packages - Create a package with \code{\link{package_create}}, and see
#' other functions starting with \code{package_*}
#' \item Resources - Create a package with \code{\link{resource_create}}, and see
#' other functions starting with \code{resource_*}
#' \item Tags - List tags with \code{\link{tag_list}}, and see
#' other functions starting with \code{tag_*}
#' \item Organizations - List organizations with \code{\link{organization_list}},
#' and show a specific organization with \code{\link{organization_show}}
#' }
#'
#' @importFrom methods is
#' @importFrom stats na.omit
#' @importFrom utils read.csv unzip
#' @importFrom httr GET POST upload_file write_disk add_headers content
#' stop_for_status http_condition content_type_json
#' @importFrom jsonlite fromJSON
#' @name ckanr-package
#' @aliases ckanr
#' @docType package
#' @author Scott Chamberlain \email{myrmecocystus@@gmail.com}
#' @author Imanuel Costigan \email{i.costigan@@me.com}
#' @author Wush Wu
#' @author Florian Mayer \email{florian.wendelin.mayer@@gmail.com}
#' @keywords package
NULL
|
test_that("recipes_extension_check", {
expect_snapshot(
recipes::recipes_extension_check(
pkg = "textrecipes"
)
)
})
| /tests/testthat/test-extension_check.R | permissive | tidymodels/textrecipes | R | false | false | 135 | r | test_that("recipes_extension_check", {
expect_snapshot(
recipes::recipes_extension_check(
pkg = "textrecipes"
)
)
})
|
# read in the file
hpc <- read.table("./household_power_consumption.txt", header=TRUE, sep = ";", na.strings="?")
# combine the Date and Time variables in a string
dt <- paste(as.character(hpc$Date), as.character(hpc$Time))
# convert the type of the newly formed string to class Date
datetime <- strptime(dt, "%d/%m/%Y %H:%M:%S")
# strip the Date and Time variables from the original data set
mydata <- subset(hpc, select = Global_active_power:Sub_metering_3)
# add the newly formed datetime variable to the stripped data set
mydata <- data.frame(datetime, mydata)
# take the subset of the data that interests us
usedata <- subset(mydata, datetime >= "2007-02-01" & datetime < "2007-02-03")
# open the png device
png(file="plot4.png")
# decide on the geometry of the 4 plots: (2,2)- row oriented
par(mfrow=c(2,2))
# plot (1,1)
with(usedata, plot(datetime, Global_active_power, xlab="", ylab="Global Active Power", type="n"))
with(usedata, lines(datetime, Global_active_power))
# plot (1,2)
with(usedata, plot(datetime, Voltage, ylab="Voltage", type="n"))
with(usedata, lines(datetime, Voltage))
# plot (2,1)
with(usedata, plot(datetime, Sub_metering_1, xlab="", ylab="Energy sub metering", type="n"))
with(usedata, lines(datetime, Sub_metering_1))
with(usedata, lines(datetime, Sub_metering_2, col="red"))
with(usedata, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", lty=1, col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), bty="n")
# plot (2,2)
with(usedata, plot(datetime, Global_reactive_power, type="n"))
with(usedata, lines(datetime, Global_reactive_power))
# close the png device
dev.off()
| /plot4.R | no_license | yaseminyf/ExData_Plotting1 | R | false | false | 1,667 | r | # read in the file
hpc <- read.table("./household_power_consumption.txt", header=TRUE, sep = ";", na.strings="?")
# combine the Date and Time variables in a string
dt <- paste(as.character(hpc$Date), as.character(hpc$Time))
# convert the type of the newly formed string to class Date
datetime <- strptime(dt, "%d/%m/%Y %H:%M:%S")
# strip the Date and Time variables from the original data set
mydata <- subset(hpc, select = Global_active_power:Sub_metering_3)
# add the newly formed datetime variable to the stripped data set
mydata <- data.frame(datetime, mydata)
# take the subset of the data that interests us
usedata <- subset(mydata, datetime >= "2007-02-01" & datetime < "2007-02-03")
# open the png device
png(file="plot4.png")
# decide on the geometry of the 4 plots: (2,2)- row oriented
par(mfrow=c(2,2))
# plot (1,1)
with(usedata, plot(datetime, Global_active_power, xlab="", ylab="Global Active Power", type="n"))
with(usedata, lines(datetime, Global_active_power))
# plot (1,2)
with(usedata, plot(datetime, Voltage, ylab="Voltage", type="n"))
with(usedata, lines(datetime, Voltage))
# plot (2,1)
with(usedata, plot(datetime, Sub_metering_1, xlab="", ylab="Energy sub metering", type="n"))
with(usedata, lines(datetime, Sub_metering_1))
with(usedata, lines(datetime, Sub_metering_2, col="red"))
with(usedata, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", lty=1, col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), bty="n")
# plot (2,2)
with(usedata, plot(datetime, Global_reactive_power, type="n"))
with(usedata, lines(datetime, Global_reactive_power))
# close the png device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsplot-class.R
\name{gsplot}
\alias{gsplot}
\alias{gsplot.default}
\alias{gsplot.list}
\title{gsplot}
\usage{
gsplot(x = NULL, ...)
\method{gsplot}{default}(..., created = Sys.Date(),
gsplot.version = packageDescription(getPackageName(), fields = "Version"),
config.file = NA, theme = NA, frame.plot = TRUE)
\method{gsplot}{list}(x)
}
\arguments{
\item{x}{list}
\item{\dots}{Further graphical parameters may also be supplied as arguments. See 'Details'.}
\item{created}{vector of length one giving the date the gsplot object was created. Defaults to
using \code{Sys.Date()}. Output class matches that of the input.}
\item{gsplot.version}{vector of length one giving the version of the gsplot package used to create the
object. Defaults to calling \code{packageDescription()}. Output class matches that of the input.}
\item{config.file}{path to the file that will only be used for setting
par in this one gsplot object. If \code{NA} (default), par is set by the global options set by
load_config().}
\item{theme}{There are several built in themes (see \link{Themes}). Additionally, the user can create a \code{gsplot}
object in their workspace. This argument then takes the name of that object (either built-in or custom).
If \code{NA} (default), no theme is used.}
\item{frame.plot}{a logical indicating whether a box should be drawn around the plot. Default is \code{TRUE}.}
}
\value{
gsplot
}
\description{
Used to change the class of inputs to "gsplot".
}
\examples{
gsplot()
gsplot(theme = theme.hadley)
gs_config <- gsplot(config.file =
system.file("extdata", "lineScatter.yaml", package = "gsplot")) \%>\%
lines(1:10, 1:10)
gs_config
gs <- gsplot(theme = theme.hadley) \%>\%
points(1:10, 1:10, xlab="Index")
gs
}
| /man/gsplot.Rd | permissive | USGS-R/gsplot | R | false | true | 1,824 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsplot-class.R
\name{gsplot}
\alias{gsplot}
\alias{gsplot.default}
\alias{gsplot.list}
\title{gsplot}
\usage{
gsplot(x = NULL, ...)
\method{gsplot}{default}(..., created = Sys.Date(),
gsplot.version = packageDescription(getPackageName(), fields = "Version"),
config.file = NA, theme = NA, frame.plot = TRUE)
\method{gsplot}{list}(x)
}
\arguments{
\item{x}{list}
\item{\dots}{Further graphical parameters may also be supplied as arguments. See 'Details'.}
\item{created}{vector of length one giving the date the gsplot object was created. Defaults to
using \code{Sys.Date()}. Output class matches that of the input.}
\item{gsplot.version}{vector of length one giving the version of the gsplot package used to create the
object. Defaults to calling \code{packageDescription()}. Output class matches that of the input.}
\item{config.file}{path to the file that will only be used for setting
par in this one gsplot object. If \code{NA} (default), par is set by the global options set by
load_config().}
\item{theme}{There are several built in themes (see \link{Themes}). Additionally, the user can create a \code{gsplot}
object in their workspace. This argument then takes the name of that object (either built-in or custom).
If \code{NA} (default), no theme is used.}
\item{frame.plot}{a logical indicating whether a box should be drawn around the plot. Default is \code{TRUE}.}
}
\value{
gsplot
}
\description{
Used to change the class of inputs to "gsplot".
}
\examples{
gsplot()
gsplot(theme = theme.hadley)
gs_config <- gsplot(config.file =
system.file("extdata", "lineScatter.yaml", package = "gsplot")) \%>\%
lines(1:10, 1:10)
gs_config
gs <- gsplot(theme = theme.hadley) \%>\%
points(1:10, 1:10, xlab="Index")
gs
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_x_density.R
\name{calc_x_density}
\alias{calc_x_density}
\title{Calculate the density for x given parameterization theta_x}
\usage{
calc_x_density(x, theta_x)
}
\arguments{
\item{x}{A vector of ages at which to calculate the density}
\item{theta_x}{List with the fit type and parameter vector}
}
\value{
The vector of densities
}
\description{
\code{calc_x_density} calculates the density at x given a parameterization for the density of theta_x. Currently, the exponential distribution, Weibull mixtures, and uniform distribution are supported
}
\author{
Michael Holton Price <MichaelHoltonPrice@gmail.com>
}
| /man/calc_x_density.Rd | permissive | eehh-stanford/yada | R | false | true | 694 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_x_density.R
\name{calc_x_density}
\alias{calc_x_density}
\title{Calculate the density for x given parameterization theta_x}
\usage{
calc_x_density(x, theta_x)
}
\arguments{
\item{x}{A vector of ages at which to calculate the density}
\item{theta_x}{List with the fit type and parameter vector}
}
\value{
The vector of densities
}
\description{
\code{calc_x_density} calculates the density at x given a parameterization for the density of theta_x. Currently, the exponential distribution, Weibull mixtures, and uniform distribution are supported
}
\author{
Michael Holton Price <MichaelHoltonPrice@gmail.com>
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{lcs}
\alias{lcs}
\title{Compute the length of the longest common subsequence of two numeric vectors.}
\usage{
lcs(first, second, trace = FALSE)
}
\arguments{
\item{first}{numeric vector}
\item{second}{numeric vector}
\item{trace}{bool}
}
\value{
length of the lcs
}
\description{
Compute the length of the longest common subsequence of two numeric vectors.
}
\examples{
lcs(charToRaw("GAGAGTAGATAG"), charToRaw("ATA"))
lcs(charToRaw("FOOBAR"), charToRaw("BARFOO"))
}
| /man/lcs.Rd | permissive | jtattersall09403/r-snippets | R | false | false | 576 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{lcs}
\alias{lcs}
\title{Compute the length of the longest common subsequence of two numeric vectors.}
\usage{
lcs(first, second, trace = FALSE)
}
\arguments{
\item{first}{numeric vector}
\item{second}{numeric vector}
\item{trace}{bool}
}
\value{
length of the lcs
}
\description{
Compute the length of the longest common subsequence of two numeric vectors.
}
\examples{
lcs(charToRaw("GAGAGTAGATAG"), charToRaw("ATA"))
lcs(charToRaw("FOOBAR"), charToRaw("BARFOO"))
}
|
\name{fac.ar1mat}
\alias{fac.ar1mat}
\title{forms the ar1 correlation matrix for a (generalized) factor}
\description{Form the correlation matrix for a (generalized) factor where the
correlation between the levels follows an autocorrelation of order 1 (ar1)
pattern.}
\usage{fac.ar1mat(factor, rho)}
\arguments{
\item{factor}{The (generalized) \code{\link{factor}} for which the correlation between
its levels displays an ar1 pattern.}
\item{rho}{The correlation parameter for the ar1 process.}
}
\value{An n x n \code{\link{matrix}}, where n is the length of the
\code{\link{factor}}.}
\details{
The method is:
a) form an \code{n x n} matrix of all pairwise differences in the numeric values
corresponding to the observed levels of the factor by taking the
difference between the following two n x n matrices are equal: 1) each row
contains the numeric values corresponding to the observed levels of the
factor, and 2) each column contains the numeric values corresponding to
the observed levels of the factor,
b) replace each element of the pairwise difference matrix with rho raised to
the absolute value of the difference.
}
\author{Chris Brien}
\seealso{\code{\link{fac.vcmat}}, \code{\link{fac.meanop}},
\code{\link{fac.sumop}} in package \pkg{dae}.}
\examples{
## set up a two-level factor and a three-level factor, both of length 12
A <- factor(rep(1:2, each=6))
B <- factor(rep(1:3, each=2, times=2))
## create a 12 x 12 ar1 matrix corrresponding to B
ar1.B <- fac.ar1mat(B, 0.6)
}
\keyword{array}
| /man/fac.ar1mat.Rd | no_license | cran/dae | R | false | false | 1,628 | rd | \name{fac.ar1mat}
\alias{fac.ar1mat}
\title{forms the ar1 correlation matrix for a (generalized) factor}
\description{Form the correlation matrix for a (generalized) factor where the
correlation between the levels follows an autocorrelation of order 1 (ar1)
pattern.}
\usage{fac.ar1mat(factor, rho)}
\arguments{
\item{factor}{The (generalized) \code{\link{factor}} for which the correlation between
its levels displays an ar1 pattern.}
\item{rho}{The correlation parameter for the ar1 process.}
}
\value{An n x n \code{\link{matrix}}, where n is the length of the
\code{\link{factor}}.}
\details{
The method is:
a) form an \code{n x n} matrix of all pairwise differences in the numeric values
corresponding to the observed levels of the factor by taking the
difference between the following two n x n matrices are equal: 1) each row
contains the numeric values corresponding to the observed levels of the
factor, and 2) each column contains the numeric values corresponding to
the observed levels of the factor,
b) replace each element of the pairwise difference matrix with rho raised to
the absolute value of the difference.
}
\author{Chris Brien}
\seealso{\code{\link{fac.vcmat}}, \code{\link{fac.meanop}},
\code{\link{fac.sumop}} in package \pkg{dae}.}
\examples{
## set up a two-level factor and a three-level factor, both of length 12
A <- factor(rep(1:2, each=6))
B <- factor(rep(1:3, each=2, times=2))
## create a 12 x 12 ar1 matrix corrresponding to B
ar1.B <- fac.ar1mat(B, 0.6)
}
\keyword{array}
|
setwd("~/Desktop/coursera/R/data")
pollutantmean <- function(directory, pollutant, id=1:332){
data <- c()
for (idx in id) {
filename <- paste(directory, sprintf("%03d.csv", idx), sep='/')
filedata <- read.csv(filename)[, pollutant]
filedata <- filedata[!is.na(filedata)]
data <- c(data, filedata)
}
mean(data)
}
complete <- function(directory, id=1:332) {
response <- data.frame()
for (idx in id) {
filename <- paste(directory, sprintf("%03d.csv", idx), sep='/')
filedata <- read.csv(filename)
filedata <- filedata[complete.cases(filedata), ]
response <- rbind(response, c(idx, nrow(filedata)))
}
names(response) <- c("id", "nobs")
response
}
corr <- function(directory, threshold=0) {
response <- c()
for (idx in 1:332) {
filename <- paste(directory, sprintf("%03d.csv", idx), sep='/')
filedata <- read.csv(filename)
filedata <- filedata[complete.cases(filedata), ]
if (nrow(filedata) >= threshold && nrow(filedata) > 0) {
cr <- cor(filedata$sulfate, filedata$nitrate)
response <- c(response, cr)
}
}
response
}
| /R/programming_assignment1.R | no_license | AntoniRamon/datasciencecoursera | R | false | false | 1,106 | r | setwd("~/Desktop/coursera/R/data")
pollutantmean <- function(directory, pollutant, id=1:332){
data <- c()
for (idx in id) {
filename <- paste(directory, sprintf("%03d.csv", idx), sep='/')
filedata <- read.csv(filename)[, pollutant]
filedata <- filedata[!is.na(filedata)]
data <- c(data, filedata)
}
mean(data)
}
complete <- function(directory, id=1:332) {
response <- data.frame()
for (idx in id) {
filename <- paste(directory, sprintf("%03d.csv", idx), sep='/')
filedata <- read.csv(filename)
filedata <- filedata[complete.cases(filedata), ]
response <- rbind(response, c(idx, nrow(filedata)))
}
names(response) <- c("id", "nobs")
response
}
corr <- function(directory, threshold=0) {
response <- c()
for (idx in 1:332) {
filename <- paste(directory, sprintf("%03d.csv", idx), sep='/')
filedata <- read.csv(filename)
filedata <- filedata[complete.cases(filedata), ]
if (nrow(filedata) >= threshold && nrow(filedata) > 0) {
cr <- cor(filedata$sulfate, filedata$nitrate)
response <- c(response, cr)
}
}
response
}
|
# QualityControlStats.R
# Geoffrey Hannigan
# Schloss Lab
# University of Michigan
###################
# Set Environment #
###################
write("PROGRESS: Calculating QC Stats", stderr())
library("optparse")
library("ggplot2")
library("wesanderson")
library("cowplot")
option_list <- list(
make_option(c("-i", "--input"),
type = "character",
default = NULL,
help = "Input count file.",
metavar = "character"), make_option(c("-m", "--metadata"),
type = "character",
default = NULL,
help = "Metadata table with disease states.",
metavar = "character"),
make_option(c("-o", "--out"),
type = "character",
default = NULL,
help = "Output pdf to draw resulting plot.",
metavar = "character"),
make_option(c("-s", "--sdepth"),
type = "integer",
default = 10000,
help = "Subsampling depth.",
metavar = "character")
)
opt_parser <- OptionParser(option_list = option_list);
opt <- parse_args(opt_parser);
################
# Run Analysis #
################
input <- read.delim("./data/ProjectSeqDepth.tsv", header=FALSE, sep="\t")
metadata <- read.delim("./data/metadata/NexteraXT003Map.tsv", header=FALSE, sep="\t")[,c(2,26,27,30)]
head(metadata)
# DNA concentration
dnaconcplot <- ggplot(metadata, aes(x = V2, y = V26, fill = V30)) +
theme_classic() +
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
legend.title=element_blank(),
legend.position="none"
) +
geom_bar(stat="identity") +
coord_flip() +
scale_fill_manual(values = c(wes_palette("Royal1")[c(1,2,4)], "lightblue"), name = "Disease") +
ylab("VLP Genomic DNA Yield (ng/uL)") +
xlab("Prepared Samples")
# Sampling Depth
inputmerge <- merge(input, metadata, by.x="V2", by.y="V2")
depthplot <- ggplot(inputmerge, aes(x = V2, y = V1, fill = V30)) +
theme_classic() +
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
legend.title=element_blank()
) +
geom_bar(stat="identity") +
coord_flip() +
scale_fill_manual(values = c(wes_palette("Royal1")[c(1,2,4)], "lightblue"), name = "Disease") +
ylab("VLP Sequence Count Yield") +
xlab("") +
geom_hline(yintercept = 1000000, linetype = "dashed")
gridplot <- plot_grid(dnaconcplot, depthplot, labels = c("A", "B"))
gridplot
pdf("./figures/qualitycontrol.pdf", height=4, width=10)
gridplot
dev.off()
| /bin/QualityControlStats.R | permissive | skjq/Hannigan_CRCVirome_mBio_2018 | R | false | false | 2,386 | r | # QualityControlStats.R
# Geoffrey Hannigan
# Schloss Lab
# University of Michigan
###################
# Set Environment #
###################
write("PROGRESS: Calculating QC Stats", stderr())
library("optparse")
library("ggplot2")
library("wesanderson")
library("cowplot")
option_list <- list(
make_option(c("-i", "--input"),
type = "character",
default = NULL,
help = "Input count file.",
metavar = "character"), make_option(c("-m", "--metadata"),
type = "character",
default = NULL,
help = "Metadata table with disease states.",
metavar = "character"),
make_option(c("-o", "--out"),
type = "character",
default = NULL,
help = "Output pdf to draw resulting plot.",
metavar = "character"),
make_option(c("-s", "--sdepth"),
type = "integer",
default = 10000,
help = "Subsampling depth.",
metavar = "character")
)
opt_parser <- OptionParser(option_list = option_list);
opt <- parse_args(opt_parser);
################
# Run Analysis #
################
input <- read.delim("./data/ProjectSeqDepth.tsv", header=FALSE, sep="\t")
metadata <- read.delim("./data/metadata/NexteraXT003Map.tsv", header=FALSE, sep="\t")[,c(2,26,27,30)]
head(metadata)
# DNA concentration
dnaconcplot <- ggplot(metadata, aes(x = V2, y = V26, fill = V30)) +
theme_classic() +
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
legend.title=element_blank(),
legend.position="none"
) +
geom_bar(stat="identity") +
coord_flip() +
scale_fill_manual(values = c(wes_palette("Royal1")[c(1,2,4)], "lightblue"), name = "Disease") +
ylab("VLP Genomic DNA Yield (ng/uL)") +
xlab("Prepared Samples")
# Sampling Depth
inputmerge <- merge(input, metadata, by.x="V2", by.y="V2")
depthplot <- ggplot(inputmerge, aes(x = V2, y = V1, fill = V30)) +
theme_classic() +
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
legend.title=element_blank()
) +
geom_bar(stat="identity") +
coord_flip() +
scale_fill_manual(values = c(wes_palette("Royal1")[c(1,2,4)], "lightblue"), name = "Disease") +
ylab("VLP Sequence Count Yield") +
xlab("") +
geom_hline(yintercept = 1000000, linetype = "dashed")
gridplot <- plot_grid(dnaconcplot, depthplot, labels = c("A", "B"))
gridplot
pdf("./figures/qualitycontrol.pdf", height=4, width=10)
gridplot
dev.off()
|
library(tidyverse)
data("diamonds")
library(hexbin)
ggplot(diamonds,aes(carat,price))+geom_hex()
ggsave("diamonds.jpg")
write_csv(diamonds,"diamonds.csv")
view(diamonds)
| /Programacion_en_R/MI PRIMER SCRIPT.R | no_license | jbartolo97/MasterDataScienceCunef2019 | R | false | false | 170 | r | library(tidyverse)
data("diamonds")
library(hexbin)
ggplot(diamonds,aes(carat,price))+geom_hex()
ggsave("diamonds.jpg")
write_csv(diamonds,"diamonds.csv")
view(diamonds)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parametry_zadan.R
\name{parametry_zadan}
\alias{parametry_zadan}
\title{Parametry zadan}
\usage{
parametry_zadan(x, maks = NULL, min = NULL, na.rm = TRUE,
verbose = TRUE)
}
\arguments{
\item{x}{macierz typu \code{numeric} lub ramka danych (data frame)
zawierająca zmienne typu \code{numeric}}
\item{maks}{opcjonalnie wektor liczb całkowitych opisujący maksymalną
liczbę puntków możliwych do uzyskania za poszczególne zadania}
\item{min}{opcjonalnie wektor liczb całkowitych opisujący minimalną
wartość, jaką może przyjąć wynik poszczególnych zadań}
\item{na.rm}{wartość logiczna - czy przy obliczeniach ignorować braki danych}
\item{verbose}{wartość logiczna - czy wydrukować wyniki analizy}
}
\value{
Funkcja zwraca milcząco listę z parametrami zadań.
}
\description{
Funkcja szacuje parametry zadań: łatwości/trudności, moc
różnicującą, rzetelność testu z wyłączeniem zadania.
}
\examples{
parametry_zadan(wynikiSymTest)
}
\seealso{
\code{\link{latwosc}}, \code{\link{moc_roznicujaca}},
\code{\link{alfa_c}}, \code{\link{wykres_tmr}}
}
| /man/parametry_zadan.Rd | permissive | tzoltak/KTT | R | false | true | 1,163 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parametry_zadan.R
\name{parametry_zadan}
\alias{parametry_zadan}
\title{Parametry zadan}
\usage{
parametry_zadan(x, maks = NULL, min = NULL, na.rm = TRUE,
verbose = TRUE)
}
\arguments{
\item{x}{macierz typu \code{numeric} lub ramka danych (data frame)
zawierająca zmienne typu \code{numeric}}
\item{maks}{opcjonalnie wektor liczb całkowitych opisujący maksymalną
liczbę puntków możliwych do uzyskania za poszczególne zadania}
\item{min}{opcjonalnie wektor liczb całkowitych opisujący minimalną
wartość, jaką może przyjąć wynik poszczególnych zadań}
\item{na.rm}{wartość logiczna - czy przy obliczeniach ignorować braki danych}
\item{verbose}{wartość logiczna - czy wydrukować wyniki analizy}
}
\value{
Funkcja zwraca milcząco listę z parametrami zadań.
}
\description{
Funkcja szacuje parametry zadań: łatwości/trudności, moc
różnicującą, rzetelność testu z wyłączeniem zadania.
}
\examples{
parametry_zadan(wynikiSymTest)
}
\seealso{
\code{\link{latwosc}}, \code{\link{moc_roznicujaca}},
\code{\link{alfa_c}}, \code{\link{wykres_tmr}}
}
|
library(crayon)
get_symbols <- function() {
wheel <- c('DD', '7', 'BBB', 'BB', 'B', 'C', '0')
sample(wheel, size = 3, replace = TRUE,
prob = c(0.03, 0.03, 0.06, 0.1, 0.25, 0.01, 0.52))
}
score <- function(symbols){
# 识别情形
same <- symbols[1] == symbols[2] && symbols[2] == symbols[3]
bars <- symbols %in% c('B', 'BB', 'BBB')
# 计算中奖金额
if (same) {
payouts <- c("DD" = 100,
"7" = 80,
"BBB" = 40,
"BB" = 25,
"B" = 10,
"C" = 10,
"0" = 0
)
prize <- unname(payouts[symbols[1]])
} else if (all(bars)) {
prize <- 5
} else {
cherries <- sum(symbols == 'C')
prize <- c(0, 2, 5)[cherries + 1]
}
# 根据钻石的个数调整中奖金额
diamonds <- sum(symbols == 'DD')
prize * 2 ^ diamonds
}
slot_display <- function(prize){
#提取符号输出结果
symbols <- attr(prize, 'symbols')
#将所有符号压缩为一个字符串
symbols <- paste(symbols, collapse = " ")
string <- paste(symbols, prize, '###', sep = "\n$")
cat(string)
}
print.slots <- function(x, ...) {
slot_display(x)
}
play <- function() {
symbols <- get_symbols()
# prize <- score(symbols)
# attr(prize, 'symbols') <- symbols
# prize
structure(score(symbols), symbols = symbols, class = 'slots')
}
| /03Hands-On Programming with R/ch8S3/i5add_slots_to_play.R | no_license | greatabel/RStudy | R | false | false | 1,460 | r | library(crayon)
get_symbols <- function() {
wheel <- c('DD', '7', 'BBB', 'BB', 'B', 'C', '0')
sample(wheel, size = 3, replace = TRUE,
prob = c(0.03, 0.03, 0.06, 0.1, 0.25, 0.01, 0.52))
}
score <- function(symbols){
# 识别情形
same <- symbols[1] == symbols[2] && symbols[2] == symbols[3]
bars <- symbols %in% c('B', 'BB', 'BBB')
# 计算中奖金额
if (same) {
payouts <- c("DD" = 100,
"7" = 80,
"BBB" = 40,
"BB" = 25,
"B" = 10,
"C" = 10,
"0" = 0
)
prize <- unname(payouts[symbols[1]])
} else if (all(bars)) {
prize <- 5
} else {
cherries <- sum(symbols == 'C')
prize <- c(0, 2, 5)[cherries + 1]
}
# 根据钻石的个数调整中奖金额
diamonds <- sum(symbols == 'DD')
prize * 2 ^ diamonds
}
slot_display <- function(prize){
#提取符号输出结果
symbols <- attr(prize, 'symbols')
#将所有符号压缩为一个字符串
symbols <- paste(symbols, collapse = " ")
string <- paste(symbols, prize, '###', sep = "\n$")
cat(string)
}
print.slots <- function(x, ...) {
slot_display(x)
}
play <- function() {
symbols <- get_symbols()
# prize <- score(symbols)
# attr(prize, 'symbols') <- symbols
# prize
structure(score(symbols), symbols = symbols, class = 'slots')
}
|
smoothParameterRealdata<-function(mxBeta,covEvector,vxlNberSeq,vxlNbySeqid,vxlDistSeq,xMatrix,covEvalue,sigmaError)
{
result=list(fnlBeta=0,test=0);
inlBeta = t(mxBeta);
xDesign = xMatrix;
ch = 1.1;
ss = 1:10;
hSeq = ch^ss;
chiSeq =qchisq(0.8/(ss[4:10]-2),1);
n = dim(xDesign)[1];
Cn =log(n)*qchisq(0.95,1);
IXX=solve(t(xDesign)%*%xDesign);
result1=aspV2NYU(inlBeta,covEvector,hSeq,vxlNberSeq,vxlNbySeqid,vxlDistSeq,xDesign,chiSeq,sigmaError,Cn,IXX,covEvalue);
result[[1]]=result1[[1]];
result[[2]]=result1[[3]];
result
} | /R/smoothParameterRealdata.R | no_license | BIG-S2/SVCM | R | false | false | 583 | r | smoothParameterRealdata<-function(mxBeta,covEvector,vxlNberSeq,vxlNbySeqid,vxlDistSeq,xMatrix,covEvalue,sigmaError)
{
result=list(fnlBeta=0,test=0);
inlBeta = t(mxBeta);
xDesign = xMatrix;
ch = 1.1;
ss = 1:10;
hSeq = ch^ss;
chiSeq =qchisq(0.8/(ss[4:10]-2),1);
n = dim(xDesign)[1];
Cn =log(n)*qchisq(0.95,1);
IXX=solve(t(xDesign)%*%xDesign);
result1=aspV2NYU(inlBeta,covEvector,hSeq,vxlNberSeq,vxlNbySeqid,vxlDistSeq,xDesign,chiSeq,sigmaError,Cn,IXX,covEvalue);
result[[1]]=result1[[1]];
result[[2]]=result1[[3]];
result
} |
##############################################################
##################### Install packages #######################
##############################################################
# Install a package
install.packages("lme4")
# Load a package
library(lme4)
##############################################################
##################### Organize dataFrame #####################
##############################################################
# You data frame should always be in a "long" format, i.e. eac
# h line is one observation.
# You should have
# subject sex condition measurement
# 1 M control 7.9
# 1 M cond1 12.3
# 1 M cond2 10.7
# 2 F control 6.3
# 2 F cond1 10.6
# 2 F cond2 11.1
# 3 F control 9.5
# 3 F cond1 13.1
# 3 F cond2 13.8
# 4 M control 11.5
# 4 M cond1 13.4
# 4 M cond2 12.9
# and NOT :
# subject sex control cond1 cond2
# 1 M 7.9 12.3 10.7
# 2 F 6.3 10.6 11.1
# 3 F 9.5 13.1 13.8
# 4 M 11.5 13.4 12.9
# To go from wide to long, you can :
library(tidyr) # You need the "tidyr" library
# The arguments to gather():
# - data: Data object
# - key: Name of new key column (made from names of data columns)
# - value: Name of new value column
# - ...: Names of source columns that contain values
# - factor_key: Treat the new key column as a factor (instead of character vector)
df_long = gather(df_bad, key=condition, value=measurement,
c('control, cond1, cond2'), factor_key=TRUE)
##############################################################
##################### Load a dataFrame #######################
##############################################################
# Set working directory
dname = "/media/jacques/DATA/2019_statsLearning" # Include your path
setwd(dname)
# Load using read.csv for CSV files
fname = "dataFrame.csv" # Include your file name
sep = "," # By default the separator is ","
df = read.csv(fname, sep = sep)
# Or load using readxlsx for XLS files
fname = "dataFrame.xls" # Include your file name
sheet = 1
df = read.xls(fname, sheet = sheet)
# Or load using read.xlsx for XLSX files
library(xlsx) # You need the "xlsx" library
fname = "dataFrame.xlsx" # Include your file name
df = read.xlsx(fname, sheetName = sheetName)
##############################################################
######### For this tutorial, a preloaded dataset #############
##############################################################
library(MASS)
data("birthwt")
df = birthwt
##############################################################
##################### Explore dataFrame ######################
##############################################################
# Access lines
df[4,] # Line
df[1:10,] # Multiple lines
df[df$age < 16,] # Specific lines
# Access one column
df[,2]
df$age
# Access one data point
df[4, 2]
df[4,]$age
# Summary of the model columns
summary(df) # Print summary of all variables
head(df, 4) # Plot the first 4 lines
# Get data class
class(df$race)
# From class numeric to class factor
df$race_factor = factor(df$race, labels = c("white", "black", "other"))
df$smoke_factor = factor(df$smoke, labels = c('non-smoker', 'smoker'))
# levels(df$condition_factor) = c('Control', 'Experimental') # Change factor order
# From class factor to class numeric
df$session = as.numeric(as.character(df$session)) # Example
##############################################################
#################### Summarize dataFrame #####################
##############################################################
# Aggregate data (average)
FUN = mean # Aggregate using averaging
df_average = aggregate(bwt ~ low, data=df, FUN=FUN)
df_average
# Using summarySE
library(Rmisc) # You need the "Rmisc" library
df_average = summarySE(measurevar="bwt", groupvars=c("smoke_factor"), data=df)
df_average
##############################################################
################# Basic plotting functions ###################
##############################################################
# Basing plotting
plot(df_average$smoke_factor, df_average$bwt)
# Better plotting
library(ggplot2) # You need the "ggplot2" library
colors = c("red", "#2371AE") # Define colors by group (see http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf)
p <- ggplot(data=df, aes(x=smoke_factor, y=bwt)) + # Basic data
geom_boxplot(fill=colors) + # Boxplot
geom_jitter(width=0.2, size=1) + # Add points and jitter
scale_x_discrete(name = "Smoking status") + # x-axis name
scale_y_continuous(name = "Birth weight (g)") + # y-axis name
ggtitle("Birth weight \n by smoking status") + # Title
theme_bw() + # Remove background
theme(panel.grid.major = element_blank(), # Esthetics ++
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title = element_text(size=14, face="bold", hjust=0.5),
axis.line = element_line(size=0.5, colour="black"))
p
# Save plot
bmp('bwt_smoke.bmp')
p
dev.off()
| /1_basicManipulations.R | no_license | DCP-INS/basic_R_scripts | R | false | false | 5,365 | r |
##############################################################
##################### Install packages #######################
##############################################################
# Install a package
install.packages("lme4")
# Load a package
library(lme4)
##############################################################
##################### Organize dataFrame #####################
##############################################################
# You data frame should always be in a "long" format, i.e. eac
# h line is one observation.
# You should have
# subject sex condition measurement
# 1 M control 7.9
# 1 M cond1 12.3
# 1 M cond2 10.7
# 2 F control 6.3
# 2 F cond1 10.6
# 2 F cond2 11.1
# 3 F control 9.5
# 3 F cond1 13.1
# 3 F cond2 13.8
# 4 M control 11.5
# 4 M cond1 13.4
# 4 M cond2 12.9
# and NOT :
# subject sex control cond1 cond2
# 1 M 7.9 12.3 10.7
# 2 F 6.3 10.6 11.1
# 3 F 9.5 13.1 13.8
# 4 M 11.5 13.4 12.9
# To go from wide to long, you can :
library(tidyr) # You need the "tidyr" library
# The arguments to gather():
# - data: Data object
# - key: Name of new key column (made from names of data columns)
# - value: Name of new value column
# - ...: Names of source columns that contain values
# - factor_key: Treat the new key column as a factor (instead of character vector)
df_long = gather(df_bad, key=condition, value=measurement,
c('control, cond1, cond2'), factor_key=TRUE)
##############################################################
##################### Load a dataFrame #######################
##############################################################
# Set working directory
dname = "/media/jacques/DATA/2019_statsLearning" # Include your path
setwd(dname)
# Load using read.csv for CSV files
fname = "dataFrame.csv" # Include your file name
sep = "," # By default the separator is ","
df = read.csv(fname, sep = sep)
# Or load using readxlsx for XLS files
fname = "dataFrame.xls" # Include your file name
sheet = 1
df = read.xls(fname, sheet = sheet)
# Or load using read.xlsx for XLSX files
library(xlsx) # You need the "xlsx" library
fname = "dataFrame.xlsx" # Include your file name
df = read.xlsx(fname, sheetName = sheetName)
##############################################################
######### For this tutorial, a preloaded dataset #############
##############################################################
library(MASS)
data("birthwt")
df = birthwt
##############################################################
##################### Explore dataFrame ######################
##############################################################
# Access lines
df[4,] # Line
df[1:10,] # Multiple lines
df[df$age < 16,] # Specific lines
# Access one column
df[,2]
df$age
# Access one data point
df[4, 2]
df[4,]$age
# Summary of the model columns
summary(df) # Print summary of all variables
head(df, 4) # Plot the first 4 lines
# Get data class
class(df$race)
# From class numeric to class factor
df$race_factor = factor(df$race, labels = c("white", "black", "other"))
df$smoke_factor = factor(df$smoke, labels = c('non-smoker', 'smoker'))
# levels(df$condition_factor) = c('Control', 'Experimental') # Change factor order
# From class factor to class numeric
df$session = as.numeric(as.character(df$session)) # Example
##############################################################
#################### Summarize dataFrame #####################
##############################################################
# Aggregate data (average)
FUN = mean # Aggregate using averaging
df_average = aggregate(bwt ~ low, data=df, FUN=FUN)
df_average
# Using summarySE
library(Rmisc) # You need the "Rmisc" library
df_average = summarySE(measurevar="bwt", groupvars=c("smoke_factor"), data=df)
df_average
##############################################################
################# Basic plotting functions ###################
##############################################################
# Basing plotting
plot(df_average$smoke_factor, df_average$bwt)
# Better plotting
library(ggplot2) # You need the "ggplot2" library
colors = c("red", "#2371AE") # Define colors by group (see http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf)
p <- ggplot(data=df, aes(x=smoke_factor, y=bwt)) + # Basic data
geom_boxplot(fill=colors) + # Boxplot
geom_jitter(width=0.2, size=1) + # Add points and jitter
scale_x_discrete(name = "Smoking status") + # x-axis name
scale_y_continuous(name = "Birth weight (g)") + # y-axis name
ggtitle("Birth weight \n by smoking status") + # Title
theme_bw() + # Remove background
theme(panel.grid.major = element_blank(), # Esthetics ++
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
plot.title = element_text(size=14, face="bold", hjust=0.5),
axis.line = element_line(size=0.5, colour="black"))
p
# Save plot
bmp('bwt_smoke.bmp')
p
dev.off()
|
testlist <- list(x = c(NaN, 2.55883663077293e-307, -1.47411866582824e-154, 9.74576447753751e-113, 5.07588390157017e-116, -5.15256539533595e-36 ), y = c(2.33251001679308e-308, -4.19987381709656e-140, NaN, NaN, NaN, -1.27860979078323e+306, 1.60078058684869e-112, 8.98637298518713e-243, 1.6690200483343e-308, -7.98824203857711e-280, -3.31318983418522e+304, 2.41737079499033e+35, 0, 0, 0, 0, 0, 0, 0, 0, 5.59621318408958e-275, 6.00547027244145e-145, -1.06335755117874e-176, -4.02250795184041e-87, 1.50661711236485e-312, 3.53369545912445e+72, 5.21812868452043e+279, NaN, 1.01126361913189e-314, -1.06820023622602e-255, 1.84938131873151e-207, -1.34765550658401e+28, 7.18029963922638e-228, 7.77537021256653e+36, 1.62969278106055e-309, -6.67115915940654e+306, 8.90771002988687e-159, 9.34665314051611e-307, 0, -2.52682438650838e+24, -1.34765550943381e+28, -6.67115915940654e+306, NaN, 5.0453059431966e+182, 3.91107221590192e-274, -1.34765550943377e+28, 8.38205310411369e-309, 3.75454999954138e+72, 6.20030355631779e-305, -2.63532704496456e-82, 3.53369545917587e+72, -4.02643840005466e-87, 1.03211490025416e-296, 5.2181291954879e+279, -2.42208305039457e+24, -1.34765550943381e+28, -6.67115915940654e+306, 8.90771002988687e-159, 9.34665314051611e-307, 5.65896446765599e+294, -7.29621263978862e-36, -7.72134029854232e-84, 2.18011030958806e-106, NaN, 9.36335270938441e-97, 1.62597454369523e-260, 2.85520664284671e+161, -2.12539132320598e+294, 3.06571538971512e-115, -4.03496294643223e-87, 2.54672248582809e-313, 1.42760928130527e+181, 2.35004462984818e+179, 3.87206480335272e-310, 0, 0, 0, 1.14638071046037e-282, 2.48671376813086e-316, 1.42467979129749e-317, 7.49893742607449e-312, 7.69847017287844e+218, 8.38203192702121e-309, -1.27860844879169e+306, 1.59875023337884e-112, 6.93840843682029e+75, 2.29891511777549e-317, NaN, 2.12199579047121e-314, 0, 0, 0, 3.70898021189245e+299, 1.24998608397835e-321, 0, 2.59032689326815e-318, 1.37437423389239e-309, -1.34497461904945e-284, 5.21813059839958e+279 ))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609956248-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 2,074 | r | testlist <- list(x = c(NaN, 2.55883663077293e-307, -1.47411866582824e-154, 9.74576447753751e-113, 5.07588390157017e-116, -5.15256539533595e-36 ), y = c(2.33251001679308e-308, -4.19987381709656e-140, NaN, NaN, NaN, -1.27860979078323e+306, 1.60078058684869e-112, 8.98637298518713e-243, 1.6690200483343e-308, -7.98824203857711e-280, -3.31318983418522e+304, 2.41737079499033e+35, 0, 0, 0, 0, 0, 0, 0, 0, 5.59621318408958e-275, 6.00547027244145e-145, -1.06335755117874e-176, -4.02250795184041e-87, 1.50661711236485e-312, 3.53369545912445e+72, 5.21812868452043e+279, NaN, 1.01126361913189e-314, -1.06820023622602e-255, 1.84938131873151e-207, -1.34765550658401e+28, 7.18029963922638e-228, 7.77537021256653e+36, 1.62969278106055e-309, -6.67115915940654e+306, 8.90771002988687e-159, 9.34665314051611e-307, 0, -2.52682438650838e+24, -1.34765550943381e+28, -6.67115915940654e+306, NaN, 5.0453059431966e+182, 3.91107221590192e-274, -1.34765550943377e+28, 8.38205310411369e-309, 3.75454999954138e+72, 6.20030355631779e-305, -2.63532704496456e-82, 3.53369545917587e+72, -4.02643840005466e-87, 1.03211490025416e-296, 5.2181291954879e+279, -2.42208305039457e+24, -1.34765550943381e+28, -6.67115915940654e+306, 8.90771002988687e-159, 9.34665314051611e-307, 5.65896446765599e+294, -7.29621263978862e-36, -7.72134029854232e-84, 2.18011030958806e-106, NaN, 9.36335270938441e-97, 1.62597454369523e-260, 2.85520664284671e+161, -2.12539132320598e+294, 3.06571538971512e-115, -4.03496294643223e-87, 2.54672248582809e-313, 1.42760928130527e+181, 2.35004462984818e+179, 3.87206480335272e-310, 0, 0, 0, 1.14638071046037e-282, 2.48671376813086e-316, 1.42467979129749e-317, 7.49893742607449e-312, 7.69847017287844e+218, 8.38203192702121e-309, -1.27860844879169e+306, 1.59875023337884e-112, 6.93840843682029e+75, 2.29891511777549e-317, NaN, 2.12199579047121e-314, 0, 0, 0, 3.70898021189245e+299, 1.24998608397835e-321, 0, 2.59032689326815e-318, 1.37437423389239e-309, -1.34497461904945e-284, 5.21813059839958e+279 ))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# AUTHOR: Philippe Massicotte
#
# DESCRIPTION: Script to plot daily SST map (requested by Dany Dumont).
#
# ftp://eclipse.ncdc.noaa.gov/pub/OI-daily-v2/NetCDF/2016/AVHRR/
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
rm(list = ls())
source("R/read_sst.R")
plot_sst <- function(df, baffin, grat) {
proj <- "+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
# *************************************************************************
# Create a raster from the XYY points.
# *************************************************************************
r <- raster::rasterFromXYZ(
df[, 1:3],
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"
) %>%
crop(baffin) %>%
projectRaster(crs = proj) %>%
rasterToPoints(spatial = TRUE) %>%
as.data.frame()
baffin <- spTransform(baffin, CRS = proj)
# *************************************************************************
# Create the plot.
# *************************************************************************
p <- ggplot(data = baffin, aes(x = long, y = lat, group = group)) +
geom_polygon(fill = "gray") +
geom_raster(
data = r,
aes(x = x, y = y, fill = sst),
inherit.aes = FALSE,
interpolate = FALSE
) +
# geom_path(data = track, aes(x = long, y = lat), color = "red") +
geom_path(
data = grat,
aes(x = long, y = lat, group = group),
color = "gray50",
lwd = 0.1
) +
coord_fixed(ylim = c(-4.5e06, -1e06),
xlim = c(-2600000, -100000)) +
theme(panel.grid.minor = element_blank()) +
theme(panel.grid.major = element_blank()) +
theme(panel.background = element_blank()) +
theme(plot.background = element_rect(fill = "white")) +
xlab("Longitude") +
ylab("Latitude") +
scale_fill_viridis(na.value = "gray", limits = c(-2, 15), oob = scales::squish) +
ggtitle(unique(df$date))
fn <- sprintf("graphs/sst/sst_%s.pdf", unique(df$date))
ggsave(fn, p)
}
baffin <- readOGR("data/shapefiles/baffin/", "baffin")
proj <- "+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
grat <- readOGR("data/shapefiles/ne_10m_graticules_all/", "ne_10m_graticules_10") %>%
spTransform(CRS = proj) %>%
fortify()
track <- readOGR("data/doc.kml", "Tracks") %>%
spTransform(CRS = proj) %>%
fortify()
files <- list.files("data/sst/", pattern = ".nc$", recursive = TRUE, full.names = TRUE)
res <- lapply(files, read_sst)
p <- lapply(res, plot_sst, baffin = baffin, grat = grat)
# Animation ---------------------------------------------------------------
files <- list.files("graphs/sst/", ".pdf", full.names = TRUE)
lapply(files, function(x){
xx <- tools::file_path_sans_ext(x)
cmd <- sprintf("convert -density 72 %s -quality 50 %s.png", x, xx)
system(cmd)
})
system("convert graphs/sst/*.png -delay 3 -loop 0 /home/pmassicotte/Desktop/annimation.gif")
# Remove png files
files <- list.files("graphs/sst/", ".png", full.names = TRUE)
unlink(files)
| /R/plot_sst.R | no_license | PMassicotte/green-edge | R | false | false | 3,259 | r | # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
# AUTHOR: Philippe Massicotte
#
# DESCRIPTION: Script to plot daily SST map (requested by Dany Dumont).
#
# ftp://eclipse.ncdc.noaa.gov/pub/OI-daily-v2/NetCDF/2016/AVHRR/
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
rm(list = ls())
source("R/read_sst.R")
plot_sst <- function(df, baffin, grat) {
proj <- "+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
# *************************************************************************
# Create a raster from the XYY points.
# *************************************************************************
r <- raster::rasterFromXYZ(
df[, 1:3],
crs = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs +towgs84=0,0,0"
) %>%
crop(baffin) %>%
projectRaster(crs = proj) %>%
rasterToPoints(spatial = TRUE) %>%
as.data.frame()
baffin <- spTransform(baffin, CRS = proj)
# *************************************************************************
# Create the plot.
# *************************************************************************
p <- ggplot(data = baffin, aes(x = long, y = lat, group = group)) +
geom_polygon(fill = "gray") +
geom_raster(
data = r,
aes(x = x, y = y, fill = sst),
inherit.aes = FALSE,
interpolate = FALSE
) +
# geom_path(data = track, aes(x = long, y = lat), color = "red") +
geom_path(
data = grat,
aes(x = long, y = lat, group = group),
color = "gray50",
lwd = 0.1
) +
coord_fixed(ylim = c(-4.5e06, -1e06),
xlim = c(-2600000, -100000)) +
theme(panel.grid.minor = element_blank()) +
theme(panel.grid.major = element_blank()) +
theme(panel.background = element_blank()) +
theme(plot.background = element_rect(fill = "white")) +
xlab("Longitude") +
ylab("Latitude") +
scale_fill_viridis(na.value = "gray", limits = c(-2, 15), oob = scales::squish) +
ggtitle(unique(df$date))
fn <- sprintf("graphs/sst/sst_%s.pdf", unique(df$date))
ggsave(fn, p)
}
baffin <- readOGR("data/shapefiles/baffin/", "baffin")
proj <- "+proj=stere +lat_0=90 +lat_ts=70 +lon_0=-45 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
grat <- readOGR("data/shapefiles/ne_10m_graticules_all/", "ne_10m_graticules_10") %>%
spTransform(CRS = proj) %>%
fortify()
track <- readOGR("data/doc.kml", "Tracks") %>%
spTransform(CRS = proj) %>%
fortify()
files <- list.files("data/sst/", pattern = ".nc$", recursive = TRUE, full.names = TRUE)
res <- lapply(files, read_sst)
p <- lapply(res, plot_sst, baffin = baffin, grat = grat)
# Animation ---------------------------------------------------------------
files <- list.files("graphs/sst/", ".pdf", full.names = TRUE)
lapply(files, function(x){
xx <- tools::file_path_sans_ext(x)
cmd <- sprintf("convert -density 72 %s -quality 50 %s.png", x, xx)
system(cmd)
})
system("convert graphs/sst/*.png -delay 3 -loop 0 /home/pmassicotte/Desktop/annimation.gif")
# Remove png files
files <- list.files("graphs/sst/", ".png", full.names = TRUE)
unlink(files)
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidycensus)
library(tidyverse)
library(shinythemes)
source("Maps.R")
# Define UI for application
ui <- navbarPage(
"Final Project Title",
tabPanel("Model",
fluidPage(theme = shinytheme("cyborg"),
titlePanel("Model Title"),
sidebarLayout(
sidebarPanel(
selectInput(
"plot_type",
"Plot Type",
c("Orange County" = "a", "New York County" = "b")
)),
mainPanel(plotOutput("map"))))),
tabPanel("Discussion",
titlePanel("Discussion Title"),
p("Tour of the modeling choices you made and
an explanation of why you made them")),
tabPanel("About",
titlePanel("About"),
h3("Project Background and Motivations"),
p("Hello, this is a practice shiny app that I have created for my data course"),
h3("About Me"),
p("My name is Nana-Korantema Koranteng and I study the Middle East.
You can reach me at nanakorantema_koranteng@g.harvard.edu.")))
# Define server logic required to draw a histogram
server <- function(input, output) {
output$map <- renderPlot({
if(input$plot_type == "a"){
map
}
else if(input$plot_type == "b"){
map_2
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /Recitation-4/app.R | no_license | nanakorantema/Census_Data | R | false | false | 1,770 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidycensus)
library(tidyverse)
library(shinythemes)
source("Maps.R")
# Define UI for application
ui <- navbarPage(
"Final Project Title",
tabPanel("Model",
fluidPage(theme = shinytheme("cyborg"),
titlePanel("Model Title"),
sidebarLayout(
sidebarPanel(
selectInput(
"plot_type",
"Plot Type",
c("Orange County" = "a", "New York County" = "b")
)),
mainPanel(plotOutput("map"))))),
tabPanel("Discussion",
titlePanel("Discussion Title"),
p("Tour of the modeling choices you made and
an explanation of why you made them")),
tabPanel("About",
titlePanel("About"),
h3("Project Background and Motivations"),
p("Hello, this is a practice shiny app that I have created for my data course"),
h3("About Me"),
p("My name is Nana-Korantema Koranteng and I study the Middle East.
You can reach me at nanakorantema_koranteng@g.harvard.edu.")))
# Define server logic required to draw a histogram
server <- function(input, output) {
output$map <- renderPlot({
if(input$plot_type == "a"){
map
}
else if(input$plot_type == "b"){
map_2
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
\name{TERGMdirTrans-package}
\alias{TERGMdirTrans-package}
\alias{TERGMdirTrans}
\docType{package}
\title{
\packageTitle{TERGMdirTrans}
}
\description{
\packageDescription{TERGMdirTrans}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{TERGMdirTrans}
\packageIndices{TERGMdirTrans}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{TERGMdirTrans}
Maintainer: \packageMaintainer{TERGMdirTrans}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation ~~
~~ directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
| /TERGMdirTrans/man/TERGMdirTrans-package.Rd | no_license | Dawnxissdd/dynERGM_C | R | false | false | 847 | rd | \name{TERGMdirTrans-package}
\alias{TERGMdirTrans-package}
\alias{TERGMdirTrans}
\docType{package}
\title{
\packageTitle{TERGMdirTrans}
}
\description{
\packageDescription{TERGMdirTrans}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{TERGMdirTrans}
\packageIndices{TERGMdirTrans}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
\packageAuthor{TERGMdirTrans}
Maintainer: \packageMaintainer{TERGMdirTrans}
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation ~~
~~ directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{cox.old}
\alias{cox.old}
\title{Cox Proportional Hazards Regression (TAKE 2)}
\usage{
cox.old(
data,
yvar,
...,
starttime = NULL,
return.split.data = FALSE,
args5 = list(cens.model = "cox", model = "fg")
)
}
\description{
STILL WRITING THIS
}
| /man/cox.old.Rd | no_license | ying14/yingtools2 | R | false | true | 352 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{cox.old}
\alias{cox.old}
\title{Cox Proportional Hazards Regression (TAKE 2)}
\usage{
cox.old(
data,
yvar,
...,
starttime = NULL,
return.split.data = FALSE,
args5 = list(cens.model = "cox", model = "fg")
)
}
\description{
STILL WRITING THIS
}
|
###########################################
##### Linear regression on Real estate data
###########################################
# 1. Importing data
library(car)
library(MASS)
library(dplyr)
library(tidyr)
setwd("E:\\Study\\R Projects\\day3")
house_prices <- read.csv(file.choose(), sep = "|")
names(house_prices)
str(house_prices)
#tbl_df(house_prices)
#glimpse(house_prices)
attach(house_prices)
# 2. Simple linear regression model Price v/s Sqaure feet
lm.1 <- lm(Price ~ SqFt, data=house_prices)
summary(lm.1)
co <- cor(house_prices[c(-7,-8)])
#pairs(house_prices[c(-7,-8)])
plot(SqFt, Price, main="Scatter plot", xlab="Square feet", ylab="Price")
abline(lm.1,col="red",lwd=3)
# 3 Prepare data and split
# 3.1 Create dummy variables
house_prices$brick_d<-ifelse(house_prices$Brick=="Yes",1,0)
house_prices$east<-ifelse(house_prices$Neighborhood=="East",1,0)
house_prices$north<-ifelse(house_prices$Neighborhood=="North",1,0)
# 3.2 Split your dataset
set.seed(110)
sub <- sample(nrow(house_prices), floor(nrow(house_prices) * 0.6))
training_data <- house_prices[sub,]
validation_data <- house_prices[-sub,]
sub
set.seed(100)
# 4 Build multiple regression model
# 4.1 Build model with all variables
lm.fit1 <- lm(Price ~ SqFt+Bathrooms+Bedrooms+Offers+
north+east+brick_d, data=training_data)
summary(lm.fit1)
# 4.2 Using stepwise, we reduce variables if possible
lm.fit1.step <- stepAIC(lm.fit1)
summary(lm.fit1.step)
# 5 Check for multicollinearity
vif(lm.fit1)
# 6 Predict values on training and validation data sets
# 6.1 Predict values on training set
training_data$predict.price <- predict(lm.fit1)
training_data$error <- residuals(lm.fit1)
attach(training_data)
plot(predict.price, error)
# 6.2 Predict values on validation set
validation_data$predict.price <- predict(lm.fit1,newdata=validation_data)
validation_data$error <- validation_data$predict.price - validation_data$Price
# 6.3 Check residual plots
hist(training_data$error)
hist(validation_data$error)
# 6.4 Correlation
a<-cor(training_data$Price,training_data$predict.price)
b<-cor(validation_data$Price,validation_data$predict.price)
a*a
b*b
| /linear_regression_real_estate.R | no_license | ashishtele/Linear-regression---Real-estate | R | false | false | 2,151 | r | ###########################################
##### Linear regression on Real estate data
###########################################
# 1. Importing data
library(car)
library(MASS)
library(dplyr)
library(tidyr)
setwd("E:\\Study\\R Projects\\day3")
house_prices <- read.csv(file.choose(), sep = "|")
names(house_prices)
str(house_prices)
#tbl_df(house_prices)
#glimpse(house_prices)
attach(house_prices)
# 2. Simple linear regression model Price v/s Sqaure feet
lm.1 <- lm(Price ~ SqFt, data=house_prices)
summary(lm.1)
co <- cor(house_prices[c(-7,-8)])
#pairs(house_prices[c(-7,-8)])
plot(SqFt, Price, main="Scatter plot", xlab="Square feet", ylab="Price")
abline(lm.1,col="red",lwd=3)
# 3 Prepare data and split
# 3.1 Create dummy variables
house_prices$brick_d<-ifelse(house_prices$Brick=="Yes",1,0)
house_prices$east<-ifelse(house_prices$Neighborhood=="East",1,0)
house_prices$north<-ifelse(house_prices$Neighborhood=="North",1,0)
# 3.2 Split your dataset
set.seed(110)
sub <- sample(nrow(house_prices), floor(nrow(house_prices) * 0.6))
training_data <- house_prices[sub,]
validation_data <- house_prices[-sub,]
sub
set.seed(100)
# 4 Build multiple regression model
# 4.1 Build model with all variables
lm.fit1 <- lm(Price ~ SqFt+Bathrooms+Bedrooms+Offers+
north+east+brick_d, data=training_data)
summary(lm.fit1)
# 4.2 Using stepwise, we reduce variables if possible
lm.fit1.step <- stepAIC(lm.fit1)
summary(lm.fit1.step)
# 5 Check for multicollinearity
vif(lm.fit1)
# 6 Predict values on training and validation data sets
# 6.1 Predict values on training set
training_data$predict.price <- predict(lm.fit1)
training_data$error <- residuals(lm.fit1)
attach(training_data)
plot(predict.price, error)
# 6.2 Predict values on validation set
validation_data$predict.price <- predict(lm.fit1,newdata=validation_data)
validation_data$error <- validation_data$predict.price - validation_data$Price
# 6.3 Check residual plots
hist(training_data$error)
hist(validation_data$error)
# 6.4 Correlation
a<-cor(training_data$Price,training_data$predict.price)
b<-cor(validation_data$Price,validation_data$predict.price)
a*a
b*b
|
mapdeckMeshDependency <- function() {
list(
createHtmlDependency(
name = "mesh",
version = "1.0.0",
src = system.file("htmlwidgets/lib/mesh", package = "mapdeck"),
script = c("mesh.js"),
all_files = FALSE
)
)
}
find_mesh_index <- function( data ) {
switch(
data[["primitivetype"]]
, "quad" = "ib"
, "triangle" = "it"
)
}
#' Add Mesh
#'
#' Adds polygons to the map from a \code{quadmesh} object
#'
#' @inheritParams add_polygon
#'
#' @inheritSection add_arc legend
#' @inheritSection add_arc id
#'
#' @details
#'
#' \code{add_mesh} supports quadmesh objects
#'
#' @export
add_mesh <- function(
map,
data = get_map_data(map),
fill_opacity = NULL,
elevation = NULL,
tooltip = NULL,
auto_highlight = FALSE,
highlight_colour = "#AAFFFFFF",
light_settings = list(),
layer_id = NULL,
id = NULL,
palette = "viridis",
na_colour = "#808080FF",
legend = FALSE,
legend_options = NULL,
legend_format = NULL,
update_view = TRUE,
focus_layer = FALSE,
digits = 6,
transitions = NULL
) {
#if( is.null( stroke_colour )) stroke_colour <- fill_colour
experimental_layer( "mesh" )
if(!inherits(data, "mesh3d")) {
stop("expecting mesh3d object")
}
l <- list()
fill_colour = "average_z"
l[["fill_colour"]] <- force( fill_colour )
l[["fill_opacity"]] <- resolve_opacity( fill_opacity )
l[["elevation"]] <- force( elevation )
l[["tooltip"]] <- force( tooltip )
l[["id"]] <- force( id )
l[["na_colour"]] <- force( na_colour )
vertex <- "vb"
index <- find_mesh_index( data )
## check:
if ( data[["primitivetype"]] == "quad" & is.null( data[["ib"]] ) ) {
stop("badly formed mesh3d type. Found quad and expecting ib index")
}
if ( data[["primitivetype"]] == "triangle" & is.null( data[["it"]] ) ) {
stop("badly formed mesh3d type. Found triangle and expecting it index")
}
l <- resolve_palette( l, palette )
l <- resolve_legend( l, legend )
l <- resolve_legend_options( l, legend_options )
l <- resolve_data( data, l, c("POLYGON","MULTIPOLYGON") )
bbox <- init_bbox()
update_view <- force( update_view )
focus_layer <- force( focus_layer )
is_extruded <- TRUE
# if( !is.null( l[["stroke_width"]] ) | !is.null( l[["stroke_colour"]] ) ) {
# is_extruded <- FALSE
# if( !is.null( elevation ) ) {
# message("stroke provided, ignoring elevation")
# }
# if( is.null( l[["stroke_width"]] ) ) {
# l[["stroke_width"]] <- 1L
# }
# }
if ( !is.null(l[["data"]]) ) {
data <- l[["data"]]
l[["data"]] <- NULL
}
## sf objects come with a bounding box
if( !is.null(l[["bbox"]] ) ) {
bbox <- l[["bbox"]]
l[["bbox"]] <- NULL
}
checkHexAlpha(highlight_colour)
layer_id <- layerId(layer_id, "polygon")
map <- addDependency(map, mapdeckMeshDependency())
tp <- l[["data_type"]]
l[["data_type"]] <- NULL
jsfunc <- "add_mesh"
if ( tp == "mesh" ) {
# geometry_column <- c( "geometry" )
geometry_column <- c( vertex, index )
shape <- rcpp_mesh_geojson( data, l, geometry_column, digits )
}
# geometry_column <- c( "geometry" ) ## This is where we woudl also specify 'origin' or 'destination'
# shape <- rcpp_polygon_geojson( data, l, geometry_column )
# } else if ( tp == "sfencoded" ) {
# geometry_column <- "polyline"
# shape <- rcpp_polygon_polyline( data, l, geometry_column )
# jsfunc <- "add_polygon_polyline"
# }
# return( shape )
light_settings <- jsonify::to_json(light_settings, unbox = T)
js_transitions <- resolve_transitions( transitions, "polygon" )
if( inherits( legend, "json" ) ) {
shape[["legend"]] <- legend
} else {
shape[["legend"]] <- resolve_legend_format( shape[["legend"]], legend_format )
}
invoke_method(
map, jsfunc, map_type( map ), shape[["data"]], layer_id, light_settings,
auto_highlight, highlight_colour, shape[["legend"]], bbox, update_view, focus_layer,
js_transitions, is_extruded
)
}
#' @rdname clear
#' @export
clear_mesh <- function( map, layer_id = NULL) {
layer_id <- layerId(layer_id, "mesh")
invoke_method(map, "md_layer_clear", map_type( map ), layer_id, "mesh" )
}
| /R/map_layer_mesh.R | no_license | nemochina2008/mapdeck | R | false | false | 4,040 | r | mapdeckMeshDependency <- function() {
list(
createHtmlDependency(
name = "mesh",
version = "1.0.0",
src = system.file("htmlwidgets/lib/mesh", package = "mapdeck"),
script = c("mesh.js"),
all_files = FALSE
)
)
}
find_mesh_index <- function( data ) {
switch(
data[["primitivetype"]]
, "quad" = "ib"
, "triangle" = "it"
)
}
#' Add Mesh
#'
#' Adds polygons to the map from a \code{quadmesh} object
#'
#' @inheritParams add_polygon
#'
#' @inheritSection add_arc legend
#' @inheritSection add_arc id
#'
#' @details
#'
#' \code{add_mesh} supports quadmesh objects
#'
#' @export
add_mesh <- function(
map,
data = get_map_data(map),
fill_opacity = NULL,
elevation = NULL,
tooltip = NULL,
auto_highlight = FALSE,
highlight_colour = "#AAFFFFFF",
light_settings = list(),
layer_id = NULL,
id = NULL,
palette = "viridis",
na_colour = "#808080FF",
legend = FALSE,
legend_options = NULL,
legend_format = NULL,
update_view = TRUE,
focus_layer = FALSE,
digits = 6,
transitions = NULL
) {
#if( is.null( stroke_colour )) stroke_colour <- fill_colour
experimental_layer( "mesh" )
if(!inherits(data, "mesh3d")) {
stop("expecting mesh3d object")
}
l <- list()
fill_colour = "average_z"
l[["fill_colour"]] <- force( fill_colour )
l[["fill_opacity"]] <- resolve_opacity( fill_opacity )
l[["elevation"]] <- force( elevation )
l[["tooltip"]] <- force( tooltip )
l[["id"]] <- force( id )
l[["na_colour"]] <- force( na_colour )
vertex <- "vb"
index <- find_mesh_index( data )
## check:
if ( data[["primitivetype"]] == "quad" & is.null( data[["ib"]] ) ) {
stop("badly formed mesh3d type. Found quad and expecting ib index")
}
if ( data[["primitivetype"]] == "triangle" & is.null( data[["it"]] ) ) {
stop("badly formed mesh3d type. Found triangle and expecting it index")
}
l <- resolve_palette( l, palette )
l <- resolve_legend( l, legend )
l <- resolve_legend_options( l, legend_options )
l <- resolve_data( data, l, c("POLYGON","MULTIPOLYGON") )
bbox <- init_bbox()
update_view <- force( update_view )
focus_layer <- force( focus_layer )
is_extruded <- TRUE
# if( !is.null( l[["stroke_width"]] ) | !is.null( l[["stroke_colour"]] ) ) {
# is_extruded <- FALSE
# if( !is.null( elevation ) ) {
# message("stroke provided, ignoring elevation")
# }
# if( is.null( l[["stroke_width"]] ) ) {
# l[["stroke_width"]] <- 1L
# }
# }
if ( !is.null(l[["data"]]) ) {
data <- l[["data"]]
l[["data"]] <- NULL
}
## sf objects come with a bounding box
if( !is.null(l[["bbox"]] ) ) {
bbox <- l[["bbox"]]
l[["bbox"]] <- NULL
}
checkHexAlpha(highlight_colour)
layer_id <- layerId(layer_id, "polygon")
map <- addDependency(map, mapdeckMeshDependency())
tp <- l[["data_type"]]
l[["data_type"]] <- NULL
jsfunc <- "add_mesh"
if ( tp == "mesh" ) {
# geometry_column <- c( "geometry" )
geometry_column <- c( vertex, index )
shape <- rcpp_mesh_geojson( data, l, geometry_column, digits )
}
# geometry_column <- c( "geometry" ) ## This is where we woudl also specify 'origin' or 'destination'
# shape <- rcpp_polygon_geojson( data, l, geometry_column )
# } else if ( tp == "sfencoded" ) {
# geometry_column <- "polyline"
# shape <- rcpp_polygon_polyline( data, l, geometry_column )
# jsfunc <- "add_polygon_polyline"
# }
# return( shape )
light_settings <- jsonify::to_json(light_settings, unbox = T)
js_transitions <- resolve_transitions( transitions, "polygon" )
if( inherits( legend, "json" ) ) {
shape[["legend"]] <- legend
} else {
shape[["legend"]] <- resolve_legend_format( shape[["legend"]], legend_format )
}
invoke_method(
map, jsfunc, map_type( map ), shape[["data"]], layer_id, light_settings,
auto_highlight, highlight_colour, shape[["legend"]], bbox, update_view, focus_layer,
js_transitions, is_extruded
)
}
#' @rdname clear
#' @export
clear_mesh <- function( map, layer_id = NULL) {
layer_id <- layerId(layer_id, "mesh")
invoke_method(map, "md_layer_clear", map_type( map ), layer_id, "mesh" )
}
|
print(paste0("Loading R packages"))
#Function to download, install and load the required libraries only when needed
packages <- function(x) {
x <- as.character(match.call()[[2]])
if (!require(x, character.only = TRUE)) {
install.packages(pkgs = x, repos = "http://cran.r-project.org")
require(x, character.only = TRUE)
}
}
#move optparse to the top to load opts first
suppressMessages(packages(optparse))
#List of required libraries to be loaded
source("https://bioconductor.org/biocLite.R")
if (!"ShortRead" %in% installed.packages()) biocLite(ShortRead)
if (!"Biostrings" %in% installed.packages()) biocLite(Biostrings)
#List of required libraries to be loaded
suppressMessages(packages(tools))
suppressMessages(packages(ShortRead))
suppressMessages(packages(Biostrings))
suppressMessages(packages(stringi))
suppressMessages(packages(ggplot2))
suppressMessages(packages(cowplot))
#List of pasring options loaded
option_list<- list(
make_option(c("-s", "--start"), type="integer", action = "store", default = NA,
help="Input the first nucleotide reference genomic location."),
make_option(c("-e", "--end"), type="integer", action = "store" ,default = NA,
help="Input the last nucleotide reference genomic location."),
make_option(c("-a", "--adapter"), type="character", action = "store" ,default = NA,
help="Input the RACE adapter nucleotide sequence. [default: NO]"),
make_option(c("-m", "--mismatch"), type="integer", default = 0,
help="Input number of mismatches that are allowed during alignement. [default: %default]"),
make_option(c("-p", "--plot"), action="store_true", default = FALSE,
help="Print output graph between the specified genomic locations. [default: NO]"),
make_option(c("-t", "--tmap"), action="store_true", default = FALSE,
help="Use the Tmap aligner instead of Bowtie. [default: NO]"),
make_option(c("--notsv"), action="store_true", default = FALSE ,
help="Do not write output tsv file. [default: NO]"),
make_option(c("-i", "--iterate"), action="store_true", default= FALSE,
help="Create the alternative references to cover all the possible SNPs \n
of the reference between the genomics locations specified by -s and -e\n
and then perform global alignment and generate graph.[default: NO] ")
)
opt = parse_args(OptionParser(description = "RACE-SEQ-lite\n
This is a custom R script for the downstream analysis of RACE-seq data.\n
The pipeline uses common bioinformatics command line packages such as BOWTIE, SAMTOOLS and BEDTOOLS that should be installed system-wide.\n
The script reads the necessary input files from your workind directory and outputs a graph or a tsv file.\n
One fasta and one fastq file can only be in the working directory",
usage = "Rscript %prog [options] -s <integer> -e <integer> -m <integer> \n",
option_list = option_list,
add_help_option = TRUE,
epilogue = "Thank you for using RACE-SEQ lite.
\nFor documentation visit: https://github.com/pantastheo/RACE-SEQ-lite.
\nAuth: Pantazis Theotokis 2018
\nContact: p.theotokis@imperial.ac.uk
\n"))
str<- opt$s
end<- opt$e
mismatch<- opt$mismatch
RACE_adapter<- opt$a
#Check if the necessary fasta and fastq files are located in the working directory.
#If TRUE load read the files.
#If FALSE exit with a message.
if(!is.na(opt$s) & !is.na(opt$e)) {
#input the reference sequence in .fasta format
reference<- list.files(".", pattern ="fasta", all.files = F, full.names = F)
print(paste0("Reading reference file in fasta format."))
print(paste0("File " ,reference, " found in working directory."))
if ((length(reference))==0) {
stop("No input .fasta reference file available in working directory.")} else if ((length(reference))>=2) {
stop("More than one .fasta reference file in working directory.")
}
#input the data in .fastq or .fastq.gz format
input_data<- list.files(".", pattern="fastq", all.files = F, full.names = F)
print(paste0("Reading data input file in fastq format."))
print(paste0("File " ,input_data, " found in working directory."))
if ((length(input_data))==0) {
stop("No input .fastq file available in working directory.")
} else if ((length(input_data))>=2) {
stop("More than one .fastq file in working directory. \nFor paired end reads please concatenate and run again")
}
} else
stop("Please input Start and End nucleotide reference genomic locations \nOr type [option] -h for help")
#If iterate option is TRUE run the script that will generate all the alternative references and will perform alignment.
if (opt$i==FALSE){
#reading and transforming reference sequence
nt_reference <-strsplit((toString(readBStringSet(reference))), NULL , fixed = T)
nt_reference<- data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F)
#set output names
input_name<- file_path_sans_ext(input_data)
filename <- paste("mm", mismatch, sep = "")
out_name <- paste("read_count_", filename, sep="")
#If tmap TRUE perform alignmment using TMAP
if (opt$t==TRUE){
#check if the tmap aligner is installed
if (system("which tmap")==0) {
print(paste0("Generating tmap index files"))
prefix<-"tmap"
#build the index
CMD_tmapindex<- paste("tmap index -f", reference , sep=" ")
system(CMD_tmapindex)
#perform alignment with tmap and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", reference," -r ", input_data, " | samtools view -bt ", reference," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)
} else {
#Check if cutadapt installed and perform adapter trimming
if (system("which cutadapt")==0){
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("cutadapt -g ", RACE_adapter, " -e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data," |tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", reference," -i fastq | samtools view -bt ", reference," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)
} else {
stop("Cutadapt software is not installed or not in $PATH. Please see documentation for installation.")}
}
} else {
stop("Tmap software is not installed or not in $PATH. Please see documentation for installation.")}
}
else {
#If tmap FALSE perform alignment using bowtie, which is the default option.
#check if bowtie aligner is installed
if (system("which bowtie")==0) {
print(paste0("Generating bowtie index files"))
prefix<-"bowtie"
#build the index
CMD_bowindex<- paste("bowtie-build -q -f", reference, "index", sep=" ")
system(CMD_bowindex)
#perform alignment with bowtie and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("bowtie -p 4 -S -k 1 -v", mismatch, "index", input_data," | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)
} else {
if (system("which cutadapt")==0) {
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("cutadapt -g", RACE_adapter, "-e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data,"|bowtie -p 4 -S -k 1 -v", mismatch, "index - | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)
}else {
stop("Cutadapt software is not installed or not in $PATH. Please see documentation for installation.")}
}
} else {
stop("Bowtie software is not installed or not in $PATH. Please see documentation for installation.")}}
#read and merge ref and reads
reads<- read.delim(out_name, header = F )
dataframe<- data.frame(reads, nt_reference , stringsAsFactors = F)
#calculating the % and log10 columns
dataframe[,5] <- (dataframe[,3]/sum(dataframe[,3])*100)
dataframe[,6] <- (log10(dataframe[,3]))
dataframe[dataframe== -Inf] <-0
#focusing on target region can be ajusted acording to experiment
binding_region <- dataframe[str:end,]
#function to delete files created
del_files<- function(pattern){
fl_rm<-list.files(".", pattern = pattern, all.files = F, full.names = F)
for(i in fl_rm){
i<-paste("rm", i , sep = " ")
system(i)
}
}
#delete generated files using fuunction
del_files("read_count")
del_files("fasta.tmap.")
del_files("aligned.bam")
del_files("out.sam")
del_files("index")
#print the wildtype alignment in tsv format table
if (opt$notsv==FALSE){
print(paste0("Writing results to output tsv"))
write.table(binding_region, file = paste0(input_name,"_",prefix, "_", filename, ".tsv") , sep = "\t",
col.names = c("reference", "position", "count", "nucleotide", "percentage", "log10" ),
row.names = F )
}
if (opt$p==TRUE){
#create wildtype linear & log scale graph
pdf(paste0(input_name,"_",prefix, "_", filename, ".pdf"), width=20)
print(paste0("Generating graph with ",mismatch," mismatches."))
#in 100% linear scale
mp <- barplot(binding_region[,5],
xlab="Binding site",
names.arg=(binding_region[,4]),
las=1,
cex.names = 2.2,
col="darkgrey" ,
main="Novel 5' Ends in linear",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,100))
title(ylab="Novel 5\' Ends (%)", line=2, cex.lab=1.5)
text(mp,binding_region[,5]+5 ,cex = 1.3, adj = 0 ,labels=binding_region[,3] ,srt=90)
#in log10 logarithmic scale
mp <- barplot(binding_region[,6],
xlab="Binding site",
names.arg=(binding_region[,4]),
las=1,
cex.names = 2.2,
col="darkgrey",
main="Novel 5' Ends in logarithmic",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,10))
title(ylab=expression("Novel 5\' Ends (log"[10]*")"), line=2, cex.lab=1.5)
text(mp,binding_region[,6]+0.5 ,cex = 1.3, adj = 0 ,labels=binding_region[,3] ,srt=90)
dev.off()
}
} else {
#Use the combined script to iterate the all the possible SNPs in the reference sequence between the specified genomic locations
#This is a massive code repetition that needs tidying up.
#reading and transforming reference sequence
nt_reference <-strsplit((toString(readBStringSet(reference))), NULL , fixed = T)
nt_reference<- data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F)
#set output names
input_name<- file_path_sans_ext(input_data)
filename <- paste("mm", mismatch, sep = "")
out_name <- paste("read_count_", filename, sep="")
#read the original wildtype reference
replicon_str <- (toString(readBStringSet(reference)))
#read and transform the reference
ref_replace <- function(str, end, reference) {
nt_reference <-strsplit((toString(readBStringSet(reference))), NULL , fixed = T)
nt <-data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F)
a = 0
count_names <- data.frame(NA)
nt_sub<- data.frame(NA)
for (i in nt_reference[str:end]) {
if (nt[str + a, 1] == "A") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "C"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "C", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"C"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "T"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "T", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"T"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "G"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "G", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"G"
a = a + 1
}
if (nt[str + a, 1] == "C") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "A"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "A", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"A"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "T"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "T", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"T"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "G"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "G", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"G"
a = a + 1
}
if (nt[str + a, 1] == "T") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "A"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "A", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"A"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "C"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "C", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"C"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "G"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "G", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"G"
a = a + 1
}
if (nt[str + a, 1] == "G") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "A"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "A", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"A"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "T"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "T", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"T"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "C"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "C", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"C"
a = a + 1
}
}
names_list <-
data.frame(count_names[((((end - str) + 1) * 3) + 1):1,], stringsAsFactors = F)
names_list[(nrow(names_list)),] <- "wildtype_siRNA"
nt_sub_list <-
data.frame(nt_sub[((((end - str) + 1) * 3) + 1):1,], stringsAsFactors = F)
#nt_sub_list[(nrow(nt_sub_list)),] <- "substitution"
ref_list <- nt[str:end, 1:((((end - str) + 1) * 3) + 1)]
trans_list <-
as.data.frame(t(ref_list[, ncol(ref_list):1]), stringsAsFactors = F)
for (i in (c(1:nrow(names_list)))) {
names_list[i, 2] <-
paste(trans_list[i, 1:(ncol(trans_list))],sep = "", collapse = "")
}
names_list<- cbind(names_list, nt_sub_list)
colnames(names_list) <- c("name", "target", "nucleotide")
names_list <-
rbind((names_list[(nrow(names_list)),]), names_list)
names_list <- names_list[-c(nrow(names_list)),]
return(names_list)
}
#call the ref_replace function
target <- ref_replace(str, end, reference)
#transform the reference and create the output tables
dataframe_counts <- data.frame(nt_reference, stringsAsFactors = F)
dataframe_log10 <- data.frame(nt_reference , stringsAsFactors = F)
dataframe_linear <- data.frame(nt_reference , stringsAsFactors = F)
#extract the wildtype siRNA sequence
siRNA_ref <- subseq((replicon_str), start = str, end = end)
#check if packages are installed on system
#If not exit with a message
if (system("which bowtie")==0) {
print("Bowtie aligner installed and on $PATH")}
else {stop("Bowtie software is not installed or not on $PATH. Please see documentation for installation.")}
if (opt$t==TRUE ){
if (system("which tmap")==0) {
print("Tmap aligner installed and on $PATH")}
else if (system("which tmap")==1) {
print(paste0("Tmap software is not installed or not on $PATH."))
stop("Please see documentation for installation.")}
}
if (!is.na(opt$a)){
if (system("which cutadapt")==0) {
print("Cutadapter trimmer installed and on $PATH")}
else if (system("which Cutadapt")==1){
print(paste0("Cutadapt trimmer is not installed or not on $PATH."))
stop("Please see documentation for installation.")}
}
#set the counter
counter <- 100
#run the script for mismatch references in loop
for (i in target$target) {
print(paste("working on alignment",counter - 99 ,"of",length(target$target)))
#reference and output prefix name
prefix <- i
#read the mismatch and substitute the original sequence with the mismach sequence
sub_ref <-stri_replace_all_fixed(replicon_str, pattern = siRNA_ref, replacement = i)
new_fasta_ref <- paste(prefix, "_ref.fasta", sep = "")
#write the new reference in .fasta format
writeFasta (DNAStringSet(sub_ref), new_fasta_ref, mode = "w")
#input the reference sequence in .fasta format
mm_ref <-list.files(".",pattern = prefix,all.files = F,full.names = F)
#reading and transforming reference sequence
ref_str <-strsplit((toString(readBStringSet(mm_ref))), NULL , fixed = T)
ref_str <- data.frame(lapply(ref_str, function(x) toupper(x)), stringsAsFactors = F)
if (opt$t==TRUE){
prefix<-"tmap"
#build the index
CMD_tmapindex<- paste("tmap index -f", mm_ref , sep=" ")
system(CMD_tmapindex)
#perform alignment with tmap and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", mm_ref," -r ", input_data, " | samtools view -bt ", mm_ref," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)
} else {
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("cutadapt -g ", RACE_adapter, " -e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data," |tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", mm_ref," -i fastq | samtools view -bt ", mm_ref," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)}
} else {
prefix<-"bowtie"
#build the index
CMD_bowindex<- paste("bowtie-build -q -f", mm_ref, "index", sep=" ")
system(CMD_bowindex)
#perform alignment with bowtie and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("bowtie -p 4 -S -k 1 -v", mismatch, "index", input_data," | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)
} else {
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("cutadapt -g", RACE_adapter, "-e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data,"|bowtie -p 4 -S -k 1 -v", mismatch, "index - | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)}
}
#read and merge ref and reads
reads <- read.delim(out_name, header = F, stringsAsFactors = F)
dataframe_counts <-data.frame(dataframe_counts, reads[, 3], stringsAsFactors = F)
dataframe_log10 <-data.frame(dataframe_log10, (log10(reads[, 3])), stringsAsFactors = F)
dataframe_linear <-data.frame(dataframe_linear, (reads[, 3] / sum(reads[, 3]) * 100), stringsAsFactors = F)
#function to delete read_count and index file created
del_files<- function(pattern){
fl_rm<-list.files(".", pattern = pattern, all.files = F, full.names = F)
for(i in fl_rm){
i<-paste("rm", i , sep = " ")
system(i)
}
}
#delete files generated during alignment
del_files("read_count")
del_files("index")
del_files(new_fasta_ref)
#remove R environment variables
rm(reads)
rm(sub_ref)
rm(mm_ref)
rm(ref_str)
rm(refs)
counter <- counter + 1
}
#create CSV files#
nt_reference <- data.frame(nt_reference, check.rows = T)
dataframe_counts[1] <- NULL
dataframe_log10[1] <- NULL
dataframe_linear[1] <- NULL
#name the mismatches columns acording to position and nt tranformation
colnames(nt_reference) <- "nucleotide"
colnames(dataframe_counts) <- target$name
colnames(dataframe_log10) <- target$name
colnames(dataframe_linear) <- target$name
#merge and write csv tables in log and linear
CSV_log <-data.frame(nt_reference,dataframe_log10,check.names = T,check.rows = T)
CSV_linear <-data.frame(nt_reference,dataframe_linear,check.names = T,check.rows = T)
CSV_log[CSV_log == -Inf] <- 0
#focusing on target region can be ajusted acording to experiment
N <- CSV_log[str:end,]
#create and write wildtype table in .csv
binding_region <-data.frame(nt_reference[str:end,],
dataframe_counts[str:end, 1],
dataframe_linear[str:end, 1],
dataframe_log10[str:end, 1],
stringsAsFactors = F)
binding_region[binding_region == -Inf] <- 0
colnames(binding_region) <- c("nucleotide", "counts", "linear", "log10")
#write log10 siRNA region in .tsv
if (opt$notsv==FALSE){
write.table(binding_region, file = paste0(input_name,"_", filename, ".tsv"),sep = "\t",quote = F,row.names = F)
#binding_region <- read.table("siRNA22_1_mm0.tsv", sep = "\t", quote = F, row.names = F, header = T)
}
if (opt$p==TRUE){
#create wildtype linear & log scale graph
pdf(paste0(input_name,"_",prefix, "_", filename, ".pdf"), width=20)
print(paste0("Generating wildtype graph with ",mismatch," mismatches."))
#in 100% linear scale
mp <- barplot(binding_region[,3],
xlab="Binding site",
names.arg=(binding_region[,1]),
las=1,
cex.names = 2.2,
col="darkgrey" ,
main="Novel 5' Ends in linear",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,100))
title(ylab="Novel 5\' Ends (%)", line=2, cex.lab=1.5)
text(mp,binding_region[,3]+5 ,cex = 1.3, adj = 0 ,labels=binding_region[,2] ,srt=90)
#in log10 logarithmic scale
mp <- barplot(binding_region[,4],
xlab="Binding site",
names.arg=(binding_region[,1]),
las=1,
cex.names = 2.2,
col="darkgrey",
main="Novel 5' Ends in logarithmic",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,10))
title(ylab=expression("Novel 5\' Ends (log"[10]*")"), line=2, cex.lab=1.5)
text(mp,binding_region[,4]+0.5 ,cex = 1.3, adj = 0 ,labels=binding_region[,2] ,srt=90)
dev.off()
}
#ggplot2 graph function
print(paste0("Generating multiplot with ",mismatch," mismatches."))
plot_RACEseq<- function(N){
N[,1]<- as.character(N[,1])
values = c("C"="blue", "G"="black",
"A"="green", "T"= "red", "black"="black")
ggraph4 <- function(N, a, b) {
gg<- ggplot(data = N, aes(x = seq_along(N[,1]))) +
geom_col(aes(y = N[,2]), size=1.4,width = 0.4, fill= "lightgrey", colour="black") +
geom_line(aes(y = N[,a], colour = (strsplit((colnames(N)), "_")[[a]][[4]])), size=1.4) +
geom_line(aes(y = N[,(a+1)], colour = (strsplit((colnames(N)), "_")[[(a+1)]][[4]])), size=1.4) +
geom_line(aes(y = N[,(a+2)], colour = (strsplit((colnames(N)), "_")[[(a+2)]][[4]])), size=1.4) +
scale_colour_manual("Wildtype",
breaks = c("C", "G", "A", "T", "black"),
values = values)+
xlab("Binding Site") +
scale_x_discrete() +
coord_cartesian(xlim = c(1, (nrow(N))) ) +
scale_y_continuous(expression("Novel 5\' Ends (log"[10] * ")"), limits = c(-0.3,8) ) +
geom_text(data = NULL,x = c(1:(nrow(N))),y = -0.4,label = N[, 1], size=5) +
geom_text(data = NULL,x = (((nrow(N)) + 1) - b),y = -0.4,label = N[(((nrow(N)) + 1) - b), 1],size = 5, colour = "orange2") +
theme(legend.position = c(.95, .95),
legend.justification = c("right", "top"),
legend.margin = margin(1, 1, 1, 1))+
labs(title=paste0("Nucleotide position ", b))
return(gg)
}
si01 <- ggraph4(N, 3, 1)
si02 <- ggraph4(N, 6, 2)
si03 <- ggraph4(N, 9, 3)
si04 <- ggraph4(N, 12, 4)
si05 <- ggraph4(N, 15, 5)
si06 <- ggraph4(N, 18, 6)
si07 <- ggraph4(N, 21, 7)
si08 <- ggraph4(N, 24, 8)
si09 <- ggraph4(N, 27, 9)
si10 <- ggraph4(N, 30, 10)
si11 <- ggraph4(N, 33, 11)
si12 <- ggraph4(N, 36, 12)
si13 <- ggraph4(N, 39, 13)
si14 <- ggraph4(N, 43, 14)
si15 <- ggraph4(N, 45, 15)
plot2_8<- plot_grid(si02, si03, si04, si05, si06, si07, labels = "AUTO", label_size = 14 , hjust = 0, ncol = 2)
ggsave(filename=paste0(input_name,"_multiplot_seed_mm",mismatch,".pdf"), plot = plot2_8, scale = 1.8 )
plot9_12<- plot_grid(si09, si10, si11, si12, labels = "AUTO", label_size = 14 , hjust = 0)
ggsave(filename=paste0(input_name,"_multiplot_cleavage_mm",mismatch,".pdf"), plot = plot9_12, scale = 0.8, width= 20.4 ,height = 7.8 )
}
plot_RACEseq(N)
}
| /src/RACEseqMM.r | no_license | fxstubbe/RACE-SEQ-lite | R | false | false | 26,478 | r |
print(paste0("Loading R packages"))
#Function to download, install and load the required libraries only when needed
packages <- function(x) {
x <- as.character(match.call()[[2]])
if (!require(x, character.only = TRUE)) {
install.packages(pkgs = x, repos = "http://cran.r-project.org")
require(x, character.only = TRUE)
}
}
#move optparse to the top to load opts first
suppressMessages(packages(optparse))
#List of required libraries to be loaded
source("https://bioconductor.org/biocLite.R")
if (!"ShortRead" %in% installed.packages()) biocLite(ShortRead)
if (!"Biostrings" %in% installed.packages()) biocLite(Biostrings)
#List of required libraries to be loaded
suppressMessages(packages(tools))
suppressMessages(packages(ShortRead))
suppressMessages(packages(Biostrings))
suppressMessages(packages(stringi))
suppressMessages(packages(ggplot2))
suppressMessages(packages(cowplot))
#List of pasring options loaded
option_list<- list(
make_option(c("-s", "--start"), type="integer", action = "store", default = NA,
help="Input the first nucleotide reference genomic location."),
make_option(c("-e", "--end"), type="integer", action = "store" ,default = NA,
help="Input the last nucleotide reference genomic location."),
make_option(c("-a", "--adapter"), type="character", action = "store" ,default = NA,
help="Input the RACE adapter nucleotide sequence. [default: NO]"),
make_option(c("-m", "--mismatch"), type="integer", default = 0,
help="Input number of mismatches that are allowed during alignement. [default: %default]"),
make_option(c("-p", "--plot"), action="store_true", default = FALSE,
help="Print output graph between the specified genomic locations. [default: NO]"),
make_option(c("-t", "--tmap"), action="store_true", default = FALSE,
help="Use the Tmap aligner instead of Bowtie. [default: NO]"),
make_option(c("--notsv"), action="store_true", default = FALSE ,
help="Do not write output tsv file. [default: NO]"),
make_option(c("-i", "--iterate"), action="store_true", default= FALSE,
help="Create the alternative references to cover all the possible SNPs \n
of the reference between the genomics locations specified by -s and -e\n
and then perform global alignment and generate graph.[default: NO] ")
)
opt = parse_args(OptionParser(description = "RACE-SEQ-lite\n
This is a custom R script for the downstream analysis of RACE-seq data.\n
The pipeline uses common bioinformatics command line packages such as BOWTIE, SAMTOOLS and BEDTOOLS that should be installed system-wide.\n
The script reads the necessary input files from your workind directory and outputs a graph or a tsv file.\n
One fasta and one fastq file can only be in the working directory",
usage = "Rscript %prog [options] -s <integer> -e <integer> -m <integer> \n",
option_list = option_list,
add_help_option = TRUE,
epilogue = "Thank you for using RACE-SEQ lite.
\nFor documentation visit: https://github.com/pantastheo/RACE-SEQ-lite.
\nAuth: Pantazis Theotokis 2018
\nContact: p.theotokis@imperial.ac.uk
\n"))
str<- opt$s
end<- opt$e
mismatch<- opt$mismatch
RACE_adapter<- opt$a
#Check if the necessary fasta and fastq files are located in the working directory.
#If TRUE load read the files.
#If FALSE exit with a message.
if(!is.na(opt$s) & !is.na(opt$e)) {
#input the reference sequence in .fasta format
reference<- list.files(".", pattern ="fasta", all.files = F, full.names = F)
print(paste0("Reading reference file in fasta format."))
print(paste0("File " ,reference, " found in working directory."))
if ((length(reference))==0) {
stop("No input .fasta reference file available in working directory.")} else if ((length(reference))>=2) {
stop("More than one .fasta reference file in working directory.")
}
#input the data in .fastq or .fastq.gz format
input_data<- list.files(".", pattern="fastq", all.files = F, full.names = F)
print(paste0("Reading data input file in fastq format."))
print(paste0("File " ,input_data, " found in working directory."))
if ((length(input_data))==0) {
stop("No input .fastq file available in working directory.")
} else if ((length(input_data))>=2) {
stop("More than one .fastq file in working directory. \nFor paired end reads please concatenate and run again")
}
} else
stop("Please input Start and End nucleotide reference genomic locations \nOr type [option] -h for help")
#If iterate option is TRUE run the script that will generate all the alternative references and will perform alignment.
if (opt$i==FALSE){
#reading and transforming reference sequence
nt_reference <-strsplit((toString(readBStringSet(reference))), NULL , fixed = T)
nt_reference<- data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F)
#set output names
input_name<- file_path_sans_ext(input_data)
filename <- paste("mm", mismatch, sep = "")
out_name <- paste("read_count_", filename, sep="")
#If tmap TRUE perform alignmment using TMAP
if (opt$t==TRUE){
#check if the tmap aligner is installed
if (system("which tmap")==0) {
print(paste0("Generating tmap index files"))
prefix<-"tmap"
#build the index
CMD_tmapindex<- paste("tmap index -f", reference , sep=" ")
system(CMD_tmapindex)
#perform alignment with tmap and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", reference," -r ", input_data, " | samtools view -bt ", reference," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)
} else {
#Check if cutadapt installed and perform adapter trimming
if (system("which cutadapt")==0){
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("cutadapt -g ", RACE_adapter, " -e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data," |tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", reference," -i fastq | samtools view -bt ", reference," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)
} else {
stop("Cutadapt software is not installed or not in $PATH. Please see documentation for installation.")}
}
} else {
stop("Tmap software is not installed or not in $PATH. Please see documentation for installation.")}
}
else {
#If tmap FALSE perform alignment using bowtie, which is the default option.
#check if bowtie aligner is installed
if (system("which bowtie")==0) {
print(paste0("Generating bowtie index files"))
prefix<-"bowtie"
#build the index
CMD_bowindex<- paste("bowtie-build -q -f", reference, "index", sep=" ")
system(CMD_bowindex)
#perform alignment with bowtie and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("bowtie -p 4 -S -k 1 -v", mismatch, "index", input_data," | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)
} else {
if (system("which cutadapt")==0) {
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("cutadapt -g", RACE_adapter, "-e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data,"|bowtie -p 4 -S -k 1 -v", mismatch, "index - | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)
}else {
stop("Cutadapt software is not installed or not in $PATH. Please see documentation for installation.")}
}
} else {
stop("Bowtie software is not installed or not in $PATH. Please see documentation for installation.")}}
#read and merge ref and reads
reads<- read.delim(out_name, header = F )
dataframe<- data.frame(reads, nt_reference , stringsAsFactors = F)
#calculating the % and log10 columns
dataframe[,5] <- (dataframe[,3]/sum(dataframe[,3])*100)
dataframe[,6] <- (log10(dataframe[,3]))
dataframe[dataframe== -Inf] <-0
#focusing on target region can be ajusted acording to experiment
binding_region <- dataframe[str:end,]
#function to delete files created
del_files<- function(pattern){
fl_rm<-list.files(".", pattern = pattern, all.files = F, full.names = F)
for(i in fl_rm){
i<-paste("rm", i , sep = " ")
system(i)
}
}
#delete generated files using fuunction
del_files("read_count")
del_files("fasta.tmap.")
del_files("aligned.bam")
del_files("out.sam")
del_files("index")
#print the wildtype alignment in tsv format table
if (opt$notsv==FALSE){
print(paste0("Writing results to output tsv"))
write.table(binding_region, file = paste0(input_name,"_",prefix, "_", filename, ".tsv") , sep = "\t",
col.names = c("reference", "position", "count", "nucleotide", "percentage", "log10" ),
row.names = F )
}
if (opt$p==TRUE){
#create wildtype linear & log scale graph
pdf(paste0(input_name,"_",prefix, "_", filename, ".pdf"), width=20)
print(paste0("Generating graph with ",mismatch," mismatches."))
#in 100% linear scale
mp <- barplot(binding_region[,5],
xlab="Binding site",
names.arg=(binding_region[,4]),
las=1,
cex.names = 2.2,
col="darkgrey" ,
main="Novel 5' Ends in linear",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,100))
title(ylab="Novel 5\' Ends (%)", line=2, cex.lab=1.5)
text(mp,binding_region[,5]+5 ,cex = 1.3, adj = 0 ,labels=binding_region[,3] ,srt=90)
#in log10 logarithmic scale
mp <- barplot(binding_region[,6],
xlab="Binding site",
names.arg=(binding_region[,4]),
las=1,
cex.names = 2.2,
col="darkgrey",
main="Novel 5' Ends in logarithmic",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,10))
title(ylab=expression("Novel 5\' Ends (log"[10]*")"), line=2, cex.lab=1.5)
text(mp,binding_region[,6]+0.5 ,cex = 1.3, adj = 0 ,labels=binding_region[,3] ,srt=90)
dev.off()
}
} else {
#Use the combined script to iterate the all the possible SNPs in the reference sequence between the specified genomic locations
#This is a massive code repetition that needs tidying up.
#reading and transforming reference sequence
nt_reference <-strsplit((toString(readBStringSet(reference))), NULL , fixed = T)
nt_reference<- data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F)
#set output names
input_name<- file_path_sans_ext(input_data)
filename <- paste("mm", mismatch, sep = "")
out_name <- paste("read_count_", filename, sep="")
#read the original wildtype reference
replicon_str <- (toString(readBStringSet(reference)))
#read and transform the reference
ref_replace <- function(str, end, reference) {
nt_reference <-strsplit((toString(readBStringSet(reference))), NULL , fixed = T)
nt <-data.frame(lapply(nt_reference, function(x) toupper(x)), stringsAsFactors = F)
a = 0
count_names <- data.frame(NA)
nt_sub<- data.frame(NA)
for (i in nt_reference[str:end]) {
if (nt[str + a, 1] == "A") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "C"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "C", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"C"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "T"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "T", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"T"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "G"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "G", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"G"
a = a + 1
}
if (nt[str + a, 1] == "C") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "A"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "A", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"A"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "T"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "T", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"T"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "G"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "G", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"G"
a = a + 1
}
if (nt[str + a, 1] == "T") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "A"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "A", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"A"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "C"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "C", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"C"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "G"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "G", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"G"
a = a + 1
}
if (nt[str + a, 1] == "G") {
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "A"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "A", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"A"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "T"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "T", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"T"
nt[, (ncol(nt) + 1)] <- nt[, 1]
nt[str + a, ncol(nt)] <- "C"
count_names[nrow(count_names) + 1,] <-
paste((str + a), nt[(str + a), 1], "to", "C", sep = "_")
nt_sub[nrow(nt_sub) + 1,] <-"C"
a = a + 1
}
}
names_list <-
data.frame(count_names[((((end - str) + 1) * 3) + 1):1,], stringsAsFactors = F)
names_list[(nrow(names_list)),] <- "wildtype_siRNA"
nt_sub_list <-
data.frame(nt_sub[((((end - str) + 1) * 3) + 1):1,], stringsAsFactors = F)
#nt_sub_list[(nrow(nt_sub_list)),] <- "substitution"
ref_list <- nt[str:end, 1:((((end - str) + 1) * 3) + 1)]
trans_list <-
as.data.frame(t(ref_list[, ncol(ref_list):1]), stringsAsFactors = F)
for (i in (c(1:nrow(names_list)))) {
names_list[i, 2] <-
paste(trans_list[i, 1:(ncol(trans_list))],sep = "", collapse = "")
}
names_list<- cbind(names_list, nt_sub_list)
colnames(names_list) <- c("name", "target", "nucleotide")
names_list <-
rbind((names_list[(nrow(names_list)),]), names_list)
names_list <- names_list[-c(nrow(names_list)),]
return(names_list)
}
#call the ref_replace function
target <- ref_replace(str, end, reference)
#transform the reference and create the output tables
dataframe_counts <- data.frame(nt_reference, stringsAsFactors = F)
dataframe_log10 <- data.frame(nt_reference , stringsAsFactors = F)
dataframe_linear <- data.frame(nt_reference , stringsAsFactors = F)
#extract the wildtype siRNA sequence
siRNA_ref <- subseq((replicon_str), start = str, end = end)
#check if packages are installed on system
#If not exit with a message
if (system("which bowtie")==0) {
print("Bowtie aligner installed and on $PATH")}
else {stop("Bowtie software is not installed or not on $PATH. Please see documentation for installation.")}
if (opt$t==TRUE ){
if (system("which tmap")==0) {
print("Tmap aligner installed and on $PATH")}
else if (system("which tmap")==1) {
print(paste0("Tmap software is not installed or not on $PATH."))
stop("Please see documentation for installation.")}
}
if (!is.na(opt$a)){
if (system("which cutadapt")==0) {
print("Cutadapter trimmer installed and on $PATH")}
else if (system("which Cutadapt")==1){
print(paste0("Cutadapt trimmer is not installed or not on $PATH."))
stop("Please see documentation for installation.")}
}
#set the counter
counter <- 100
#run the script for mismatch references in loop
for (i in target$target) {
print(paste("working on alignment",counter - 99 ,"of",length(target$target)))
#reference and output prefix name
prefix <- i
#read the mismatch and substitute the original sequence with the mismach sequence
sub_ref <-stri_replace_all_fixed(replicon_str, pattern = siRNA_ref, replacement = i)
new_fasta_ref <- paste(prefix, "_ref.fasta", sep = "")
#write the new reference in .fasta format
writeFasta (DNAStringSet(sub_ref), new_fasta_ref, mode = "w")
#input the reference sequence in .fasta format
mm_ref <-list.files(".",pattern = prefix,all.files = F,full.names = F)
#reading and transforming reference sequence
ref_str <-strsplit((toString(readBStringSet(mm_ref))), NULL , fixed = T)
ref_str <- data.frame(lapply(ref_str, function(x) toupper(x)), stringsAsFactors = F)
if (opt$t==TRUE){
prefix<-"tmap"
#build the index
CMD_tmapindex<- paste("tmap index -f", mm_ref , sep=" ")
system(CMD_tmapindex)
#perform alignment with tmap and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", mm_ref," -r ", input_data, " | samtools view -bt ", mm_ref," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)
} else {
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using tmap"))
CMD_tmap<- paste("cutadapt -g ", RACE_adapter, " -e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data," |tmap map1 -a 0 -g 3 --max-mismatches ",mismatch," -f ", mm_ref," -i fastq | samtools view -bt ", mm_ref," - | genomeCoverageBed -d -5 -ibam stdin > ",out_name, sep="")
system(CMD_tmap)}
} else {
prefix<-"bowtie"
#build the index
CMD_bowindex<- paste("bowtie-build -q -f", mm_ref, "index", sep=" ")
system(CMD_bowindex)
#perform alignment with bowtie and read count using bedtools
if (is.na(opt$a)){
#no adapter trimming
print(paste0("Performing alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("bowtie -p 4 -S -k 1 -v", mismatch, "index", input_data," | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)
} else {
#adapter trimming using cutadapt
print(paste0("Performing adapter trimming and alignment with ", mismatch, " mismatch using bowtie"))
CMD_bow<- paste("cutadapt -g", RACE_adapter, "-e0 --no-indels -m10 --discard-untrimmed --quiet ", input_data,"|bowtie -p 4 -S -k 1 -v", mismatch, "index - | samtools view -bS - | genomeCoverageBed -d -5 -ibam stdin >", out_name, sep=" ")
system(CMD_bow)}
}
#read and merge ref and reads
reads <- read.delim(out_name, header = F, stringsAsFactors = F)
dataframe_counts <-data.frame(dataframe_counts, reads[, 3], stringsAsFactors = F)
dataframe_log10 <-data.frame(dataframe_log10, (log10(reads[, 3])), stringsAsFactors = F)
dataframe_linear <-data.frame(dataframe_linear, (reads[, 3] / sum(reads[, 3]) * 100), stringsAsFactors = F)
#function to delete read_count and index file created
del_files<- function(pattern){
fl_rm<-list.files(".", pattern = pattern, all.files = F, full.names = F)
for(i in fl_rm){
i<-paste("rm", i , sep = " ")
system(i)
}
}
#delete files generated during alignment
del_files("read_count")
del_files("index")
del_files(new_fasta_ref)
#remove R environment variables
rm(reads)
rm(sub_ref)
rm(mm_ref)
rm(ref_str)
rm(refs)
counter <- counter + 1
}
#create CSV files#
nt_reference <- data.frame(nt_reference, check.rows = T)
dataframe_counts[1] <- NULL
dataframe_log10[1] <- NULL
dataframe_linear[1] <- NULL
#name the mismatches columns acording to position and nt tranformation
colnames(nt_reference) <- "nucleotide"
colnames(dataframe_counts) <- target$name
colnames(dataframe_log10) <- target$name
colnames(dataframe_linear) <- target$name
#merge and write csv tables in log and linear
CSV_log <-data.frame(nt_reference,dataframe_log10,check.names = T,check.rows = T)
CSV_linear <-data.frame(nt_reference,dataframe_linear,check.names = T,check.rows = T)
CSV_log[CSV_log == -Inf] <- 0
#focusing on target region can be ajusted acording to experiment
N <- CSV_log[str:end,]
#create and write wildtype table in .csv
binding_region <-data.frame(nt_reference[str:end,],
dataframe_counts[str:end, 1],
dataframe_linear[str:end, 1],
dataframe_log10[str:end, 1],
stringsAsFactors = F)
binding_region[binding_region == -Inf] <- 0
colnames(binding_region) <- c("nucleotide", "counts", "linear", "log10")
#write log10 siRNA region in .tsv
if (opt$notsv==FALSE){
write.table(binding_region, file = paste0(input_name,"_", filename, ".tsv"),sep = "\t",quote = F,row.names = F)
#binding_region <- read.table("siRNA22_1_mm0.tsv", sep = "\t", quote = F, row.names = F, header = T)
}
if (opt$p==TRUE){
#create wildtype linear & log scale graph
pdf(paste0(input_name,"_",prefix, "_", filename, ".pdf"), width=20)
print(paste0("Generating wildtype graph with ",mismatch," mismatches."))
#in 100% linear scale
mp <- barplot(binding_region[,3],
xlab="Binding site",
names.arg=(binding_region[,1]),
las=1,
cex.names = 2.2,
col="darkgrey" ,
main="Novel 5' Ends in linear",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,100))
title(ylab="Novel 5\' Ends (%)", line=2, cex.lab=1.5)
text(mp,binding_region[,3]+5 ,cex = 1.3, adj = 0 ,labels=binding_region[,2] ,srt=90)
#in log10 logarithmic scale
mp <- barplot(binding_region[,4],
xlab="Binding site",
names.arg=(binding_region[,1]),
las=1,
cex.names = 2.2,
col="darkgrey",
main="Novel 5' Ends in logarithmic",
cex.main=2.3,
cex.lab=1.5,
ylim=c(0,10))
title(ylab=expression("Novel 5\' Ends (log"[10]*")"), line=2, cex.lab=1.5)
text(mp,binding_region[,4]+0.5 ,cex = 1.3, adj = 0 ,labels=binding_region[,2] ,srt=90)
dev.off()
}
#ggplot2 graph function
print(paste0("Generating multiplot with ",mismatch," mismatches."))
plot_RACEseq<- function(N){
N[,1]<- as.character(N[,1])
values = c("C"="blue", "G"="black",
"A"="green", "T"= "red", "black"="black")
ggraph4 <- function(N, a, b) {
gg<- ggplot(data = N, aes(x = seq_along(N[,1]))) +
geom_col(aes(y = N[,2]), size=1.4,width = 0.4, fill= "lightgrey", colour="black") +
geom_line(aes(y = N[,a], colour = (strsplit((colnames(N)), "_")[[a]][[4]])), size=1.4) +
geom_line(aes(y = N[,(a+1)], colour = (strsplit((colnames(N)), "_")[[(a+1)]][[4]])), size=1.4) +
geom_line(aes(y = N[,(a+2)], colour = (strsplit((colnames(N)), "_")[[(a+2)]][[4]])), size=1.4) +
scale_colour_manual("Wildtype",
breaks = c("C", "G", "A", "T", "black"),
values = values)+
xlab("Binding Site") +
scale_x_discrete() +
coord_cartesian(xlim = c(1, (nrow(N))) ) +
scale_y_continuous(expression("Novel 5\' Ends (log"[10] * ")"), limits = c(-0.3,8) ) +
geom_text(data = NULL,x = c(1:(nrow(N))),y = -0.4,label = N[, 1], size=5) +
geom_text(data = NULL,x = (((nrow(N)) + 1) - b),y = -0.4,label = N[(((nrow(N)) + 1) - b), 1],size = 5, colour = "orange2") +
theme(legend.position = c(.95, .95),
legend.justification = c("right", "top"),
legend.margin = margin(1, 1, 1, 1))+
labs(title=paste0("Nucleotide position ", b))
return(gg)
}
si01 <- ggraph4(N, 3, 1)
si02 <- ggraph4(N, 6, 2)
si03 <- ggraph4(N, 9, 3)
si04 <- ggraph4(N, 12, 4)
si05 <- ggraph4(N, 15, 5)
si06 <- ggraph4(N, 18, 6)
si07 <- ggraph4(N, 21, 7)
si08 <- ggraph4(N, 24, 8)
si09 <- ggraph4(N, 27, 9)
si10 <- ggraph4(N, 30, 10)
si11 <- ggraph4(N, 33, 11)
si12 <- ggraph4(N, 36, 12)
si13 <- ggraph4(N, 39, 13)
si14 <- ggraph4(N, 43, 14)
si15 <- ggraph4(N, 45, 15)
plot2_8<- plot_grid(si02, si03, si04, si05, si06, si07, labels = "AUTO", label_size = 14 , hjust = 0, ncol = 2)
ggsave(filename=paste0(input_name,"_multiplot_seed_mm",mismatch,".pdf"), plot = plot2_8, scale = 1.8 )
plot9_12<- plot_grid(si09, si10, si11, si12, labels = "AUTO", label_size = 14 , hjust = 0)
ggsave(filename=paste0(input_name,"_multiplot_cleavage_mm",mismatch,".pdf"), plot = plot9_12, scale = 0.8, width= 20.4 ,height = 7.8 )
}
plot_RACEseq(N)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kzp2.R
\name{kzp2}
\alias{kzp2}
\alias{smooth.kzp2}
\alias{kzp2.summary}
\title{Check Images' Motion Scales with 2D KZ Periodogram Signals}
\usage{
kzp2(x, k = 1, m = dim(x)/k, ...)
smooth.kzp2(rpg, dpct = 0.01, w = round(dim(rpg)/4), k = 1, ...)
kzp2.summary(spg, rg.x, rg.y, num = 10)
}
\arguments{
\item{x}{Data array of 2D wave field. Missing values are allowed.
Limited to 2D arrays for current version.}
\item{k}{The number of iterations for the KZFT. Default is 1.}
\item{m}{The window size for a regular Fourier transform.
Default value is set to data array size.}
\item{...}{Arguments to be passed to methods.
\itemize{
\item \code{k : } The number of iteration times of KZFT
\item \code{n : } The sampling frequency rate as a multiplication
of the Fourier frequencies
\item \code{p : } The distance between two successive intervals as
a percentage of the total length of the data series
}}
\item{rpg}{Array of raw 2D periodogram. Usually it is part of output of \code{kzp2}.}
\item{dpct}{A pre-specified percentage of total variation.
Default value is 1\%.}
\item{w}{Smoothing window size.}
\item{spg}{Array of smoothed 2D periodogram. It could be output of \code{summary.kzp2}.}
\item{rg.x}{Frequency range for x direction. Defaults to c(0, 0.5).}
\item{rg.y}{Frequency range for y direction.
Defaults to the same value of the range for x direction.}
\item{num}{Wave numbers. Defaults to 10.}
}
\value{
Returned value of function \code{kzp2} is a data list of
periodogram information, including data array \emph{kzp2d} for 2D
periodogram values, and two frequency vectors, \emph{freq.x} and
\emph{freq.y} for \emph{x} and \emph{y} direction, respectively.
\code{smooth.kzp2} only outputs the array of smoothed values.
\code{kzp2.summary} returns a data list for suggested wave
parameters, including frequency and direction values.
}
\description{
Functions used to reveal directional and scale information
with 2D KZ periodograms for spatial motions covered by heavy noises.
One can get 2D raw periodogram with function \code{kzp2}, and smooth the
2D periodogram with function \code{smooth.kzp2}.
Function \code{summary.kzp2} can help to summarize direction and frequency
information from smoothed 2D KZ periodogram. The input should be a 2D KZ
periodogram data with frequency range (0, 0.5] on both x- and y- axis.
}
\details{
KZ 2D raw spectrum is calculated based on \code{kz.ft}.
The smoothing method is an extension of \code{kzft::smooth.kzp}.
See introduction of DZ method in \code{kzft::smooth.kzp} for more
information.
}
\examples{
dx <- 100 # x range
dy <- 120 # y range
b <- expand.grid(x=1:dx, y=1:dy)
q1 <- pi/6; f1 <- 0.2;
b$v1 <- sin(f1*2*pi*(b$x*cos(q1)+b$y*sin(q1))+100*runif(1))
q2 <- pi/4; f2 <- 0.08;
b$v2 <- sin(f2*2*pi*(b$x*cos(q2)+b$y*sin(q2))+100*runif(1))
a <- array(0,c(dx,dy))
a[as.matrix(b[,1:2])] <- b$v1 + 1.5*b$v2
a <- a + 10*matrix(rnorm(dx*dy,0,1),ncol=dy)
rp <- kzp2(a) # raw 2D spectrum
fy <- rp$freq.y; fx <- rp$freq.x; rp <- rp$kzp2d
# smoothing 2D spectrum 2 times
sp <- smooth.kzp2(rp,0.01,k=2)
par(mfrow=c(2,1), cex=0.5)
persp(x=fx, y=fy, z=rp, expand =0.5,
main = "Raw 2D KZ Periodogram", ltheta=40, shade=0.75,
theta=-30, phi=15, zlab="",xlab="x", ylab="y",
ticktype="detailed", col="lightblue")
persp(x=fx, y=fy, z=sp, expand =0.5,
main = "Smoothed 2D KZ Periodogram", ltheta=40, shade=0.75,
theta=-30, phi=25, zlab="",xlab="x", ylab="y",
ticktype="detailed", col="lightblue")
par(mfrow=c(1,1), cex=1)
kzp2.summary(sp) # direction & frequency
}
\seealso{
\code{\link{kzpdr}}, \code{\link{kzpdr.eval}}, \code{\link{kzpdr.spikes}}
}
\concept{
Kolmogorov-Zurbenko periodogram
2-dimensional periodogram
2D periodogram
}
\keyword{2D-periodogram}
| /man/kzp2.Rd | no_license | Rmonsoon/kzfs | R | false | true | 4,021 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kzp2.R
\name{kzp2}
\alias{kzp2}
\alias{smooth.kzp2}
\alias{kzp2.summary}
\title{Check Images' Motion Scales with 2D KZ Periodogram Signals}
\usage{
kzp2(x, k = 1, m = dim(x)/k, ...)
smooth.kzp2(rpg, dpct = 0.01, w = round(dim(rpg)/4), k = 1, ...)
kzp2.summary(spg, rg.x, rg.y, num = 10)
}
\arguments{
\item{x}{Data array of 2D wave field. Missing values are allowed.
Limited to 2D arrays for current version.}
\item{k}{The number of iterations for the KZFT. Default is 1.}
\item{m}{The window size for a regular Fourier transform.
Default value is set to data array size.}
\item{...}{Arguments to be passed to methods.
\itemize{
\item \code{k : } The number of iteration times of KZFT
\item \code{n : } The sampling frequency rate as a multiplication
of the Fourier frequencies
\item \code{p : } The distance between two successive intervals as
a percentage of the total length of the data series
}}
\item{rpg}{Array of raw 2D periodogram. Usually it is part of output of \code{kzp2}.}
\item{dpct}{A pre-specified percentage of total variation.
Default value is 1\%.}
\item{w}{Smoothing window size.}
\item{spg}{Array of smoothed 2D periodogram. It could be output of \code{summary.kzp2}.}
\item{rg.x}{Frequency range for x direction. Defaults to c(0, 0.5).}
\item{rg.y}{Frequency range for y direction.
Defaults to the same value of the range for x direction.}
\item{num}{Wave numbers. Defaults to 10.}
}
\value{
Returned value of function \code{kzp2} is a data list of
periodogram information, including data array \emph{kzp2d} for 2D
periodogram values, and two frequency vectors, \emph{freq.x} and
\emph{freq.y} for \emph{x} and \emph{y} direction, respectively.
\code{smooth.kzp2} only outputs the array of smoothed values.
\code{kzp2.summary} returns a data list for suggested wave
parameters, including frequency and direction values.
}
\description{
Functions used to reveal directional and scale information
with 2D KZ periodograms for spatial motions covered by heavy noises.
One can get 2D raw periodogram with function \code{kzp2}, and smooth the
2D periodogram with function \code{smooth.kzp2}.
Function \code{summary.kzp2} can help to summarize direction and frequency
information from smoothed 2D KZ periodogram. The input should be a 2D KZ
periodogram data with frequency range (0, 0.5] on both x- and y- axis.
}
\details{
KZ 2D raw spectrum is calculated based on \code{kz.ft}.
The smoothing method is an extension of \code{kzft::smooth.kzp}.
See introduction of DZ method in \code{kzft::smooth.kzp} for more
information.
}
\examples{
dx <- 100 # x range
dy <- 120 # y range
b <- expand.grid(x=1:dx, y=1:dy)
q1 <- pi/6; f1 <- 0.2;
b$v1 <- sin(f1*2*pi*(b$x*cos(q1)+b$y*sin(q1))+100*runif(1))
q2 <- pi/4; f2 <- 0.08;
b$v2 <- sin(f2*2*pi*(b$x*cos(q2)+b$y*sin(q2))+100*runif(1))
a <- array(0,c(dx,dy))
a[as.matrix(b[,1:2])] <- b$v1 + 1.5*b$v2
a <- a + 10*matrix(rnorm(dx*dy,0,1),ncol=dy)
rp <- kzp2(a) # raw 2D spectrum
fy <- rp$freq.y; fx <- rp$freq.x; rp <- rp$kzp2d
# smoothing 2D spectrum 2 times
sp <- smooth.kzp2(rp,0.01,k=2)
par(mfrow=c(2,1), cex=0.5)
persp(x=fx, y=fy, z=rp, expand =0.5,
main = "Raw 2D KZ Periodogram", ltheta=40, shade=0.75,
theta=-30, phi=15, zlab="",xlab="x", ylab="y",
ticktype="detailed", col="lightblue")
persp(x=fx, y=fy, z=sp, expand =0.5,
main = "Smoothed 2D KZ Periodogram", ltheta=40, shade=0.75,
theta=-30, phi=25, zlab="",xlab="x", ylab="y",
ticktype="detailed", col="lightblue")
par(mfrow=c(1,1), cex=1)
kzp2.summary(sp) # direction & frequency
}
\seealso{
\code{\link{kzpdr}}, \code{\link{kzpdr.eval}}, \code{\link{kzpdr.spikes}}
}
\concept{
Kolmogorov-Zurbenko periodogram
2-dimensional periodogram
2D periodogram
}
\keyword{2D-periodogram}
|
# Import a lookup table e.g. mfdb_import_taxonomy(mdb, "species", read.csv('species.csv'))
# data_in should have columns id, name, description
mfdb_import_taxonomy <- function (mdb, table_name, data_in, extra_cols = c('description')) {
# Is table_name one of the recognised tables?
if (!(table_name %in% mfdb_taxonomy | table_name %in% mfdb_cs_taxonomy)) {
stop("Unknown taxonomy table ", table_name)
}
# Check there's something to do first
if (nrow(data_in) == 0) {
mdb$logger$info(paste0("Taxonomy ", table_name ," no updates to make"))
return()
}
if (!('t_group' %in% names(data_in))) {
data_in$t_group <- c(NA)
}
extra_cols <- c('t_group', extra_cols)
# Order incoming data by id
id_col <- paste0(table_name, '_id')
data_in <- data_in[c('id', 'name', extra_cols)]
names(data_in) <- c(id_col, 'name', extra_cols)
# Crush factors in data.frame, convert integer names to character
for (n in names(data_in)) {
if (n == "name" || is.factor(data_in[[n]])) {
data_in[[n]] <- as.character(data_in[[n]])
}
}
# Fetch all existing ids, quit if all are there
existing <- mfdb_fetch(mdb,
"SELECT ", id_col, ", name, ", paste(extra_cols, collapse = ", "),
" FROM ", table_name,
" ORDER BY 1")
# Throw away rows which don't need updating
if (nrow(existing) > 0) {
data_in <- data_in[!(data_in$name %in% merge(
existing[, c('name', extra_cols)],
data_in[, c('name', extra_cols)])$name), ]
if (nrow(data_in) == 0) {
mdb$logger$info(paste0("Taxonomy ", table_name ," up-to-date"))
return()
}
}
mdb$logger$info(paste0("Taxonomy ", table_name ," needs updating"))
mfdb_transaction(mdb, {
# New rows should be inserted
new_data <- data_in[data_in$name %in% setdiff(data_in$name, existing$name), ]
# If some ids appear in both new and existing, give them new IDs.
overlapping_ids <- intersect(new_data[[id_col]], existing[[id_col]])
if (length(overlapping_ids) > 0) {
new_data[match(overlapping_ids, new_data[[id_col]]), id_col] <-
seq(max(existing[[id_col]]) + 1, length.out = length(overlapping_ids))
}
mfdb_insert(mdb, table_name, new_data)
# Rows with matching names should be updated, but existing ids kept
if (nrow(existing) > 0) mfdb_update(mdb,
table_name,
merge(existing[, c(id_col, 'name')], data_in[, c('name', extra_cols)]),
where = c())
})
invisible(NULL)
}
# Import any cs_specific taxonomies
mfdb_import_cs_taxonomy <- function(mdb, taxonomy_name, data_in) {
if (!(taxonomy_name %in% mfdb_cs_taxonomy)) {
stop(
"Unknown taxonomy name '", taxonomy_name,
"' should be one of ", paste(mfdb_cs_taxonomy, collapse = ", "))
}
if (taxonomy_name == 'areacell') {
extra_cols <- c('size')
} else if (taxonomy_name == 'vessel') {
extra_cols <- c('vessel_type_id', 'full_name', 'length', 'power', 'tonnage')
} else if (taxonomy_name == 'tow') {
extra_cols <- c('latitude', 'longitude', 'depth', 'length')
} else {
extra_cols <- c('description')
}
mfdb_import_taxonomy(mdb, taxonomy_name,
data.frame(
id = sanitise_col(mdb, data_in, 'id', default = seq_len(length(data_in$name))),
name = sanitise_col(mdb, data_in, 'name'),
size = sanitise_col(mdb, data_in, 'size', default = c(NA)),
vessel_type_id = sanitise_col(mdb, data_in, 'vessel_type', lookup = 'vessel_type', default = c(NA)),
full_name = sanitise_col(mdb, data_in, 'full_name', default = c(NA)),
length = sanitise_col(mdb, data_in, 'length', default = c(NA)),
power = sanitise_col(mdb, data_in, 'power', default = c(NA)),
tonnage = sanitise_col(mdb, data_in, 'tonnage', default = c(NA)),
latitude = sanitise_col(mdb, data_in, 'latitude', default = c(NA)),
longitude = sanitise_col(mdb, data_in, 'longitude', default = c(NA)),
depth = sanitise_col(mdb, data_in, 'depth', default = c(NA)),
description = sanitise_col(mdb, data_in, 'description', default = c("")),
stringsAsFactors = FALSE),
extra_cols = extra_cols)
if (taxonomy_name == 'areacell' && 'division' %in% colnames(data_in)) {
# Import division data if available
division_data <- data_in[,c('name', 'division'), drop = FALSE]
colnames(division_data) <- c('areacell', 'division')
mfdb_import_division(mdb, division_data)
}
invisible(NULL)
}
mfdb_import_area <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'areacell', data_in)
mfdb_import_sampling_type <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'sampling_type', data_in)
mfdb_import_tow_taxonomy <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'tow', data_in)
mfdb_import_vessel_taxonomy <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'vessel', data_in)
# Import divisions
mfdb_import_division <- function (mdb, data_in) {
if(is.data.frame(data_in)) {
if (length(intersect(colnames(data_in), c('division', 'areacell'))) < 2) {
stop("data.frame needs both division and areacell columns")
}
data_in <- data.frame(
division = sanitise_col(mdb, data_in, 'division'),
areacell_id = sanitise_col(mdb, data_in, 'areacell', lookup = 'areacell'),
stringsAsFactors = FALSE)
} else if(is.list(data_in)) {
data_in <- data.frame(
division = unlist(lapply(names(data_in), function(n) { rep(n, length(data_in[[n]])) })),
areacell_id = sanitise_col(mdb, data.frame(areacell = unlist(data_in)), 'areacell', lookup = 'areacell'))
} else {
stop("data_in should be a list of areacell vectors")
}
mfdb_transaction(mdb, {
dbSendQuery(mdb$db, paste0(
"DELETE FROM division",
" WHERE division IN ", sql_quote(unique(data_in$division), always_bracket = TRUE),
""))
res <- mfdb_insert(mdb, 'division', data_in)
})
}
| /R/mfdb_import_taxonomy.R | no_license | pfrater/mfdb | R | false | false | 6,291 | r | # Import a lookup table e.g. mfdb_import_taxonomy(mdb, "species", read.csv('species.csv'))
# data_in should have columns id, name, description
mfdb_import_taxonomy <- function (mdb, table_name, data_in, extra_cols = c('description')) {
# Is table_name one of the recognised tables?
if (!(table_name %in% mfdb_taxonomy | table_name %in% mfdb_cs_taxonomy)) {
stop("Unknown taxonomy table ", table_name)
}
# Check there's something to do first
if (nrow(data_in) == 0) {
mdb$logger$info(paste0("Taxonomy ", table_name ," no updates to make"))
return()
}
if (!('t_group' %in% names(data_in))) {
data_in$t_group <- c(NA)
}
extra_cols <- c('t_group', extra_cols)
# Order incoming data by id
id_col <- paste0(table_name, '_id')
data_in <- data_in[c('id', 'name', extra_cols)]
names(data_in) <- c(id_col, 'name', extra_cols)
# Crush factors in data.frame, convert integer names to character
for (n in names(data_in)) {
if (n == "name" || is.factor(data_in[[n]])) {
data_in[[n]] <- as.character(data_in[[n]])
}
}
# Fetch all existing ids, quit if all are there
existing <- mfdb_fetch(mdb,
"SELECT ", id_col, ", name, ", paste(extra_cols, collapse = ", "),
" FROM ", table_name,
" ORDER BY 1")
# Throw away rows which don't need updating
if (nrow(existing) > 0) {
data_in <- data_in[!(data_in$name %in% merge(
existing[, c('name', extra_cols)],
data_in[, c('name', extra_cols)])$name), ]
if (nrow(data_in) == 0) {
mdb$logger$info(paste0("Taxonomy ", table_name ," up-to-date"))
return()
}
}
mdb$logger$info(paste0("Taxonomy ", table_name ," needs updating"))
mfdb_transaction(mdb, {
# New rows should be inserted
new_data <- data_in[data_in$name %in% setdiff(data_in$name, existing$name), ]
# If some ids appear in both new and existing, give them new IDs.
overlapping_ids <- intersect(new_data[[id_col]], existing[[id_col]])
if (length(overlapping_ids) > 0) {
new_data[match(overlapping_ids, new_data[[id_col]]), id_col] <-
seq(max(existing[[id_col]]) + 1, length.out = length(overlapping_ids))
}
mfdb_insert(mdb, table_name, new_data)
# Rows with matching names should be updated, but existing ids kept
if (nrow(existing) > 0) mfdb_update(mdb,
table_name,
merge(existing[, c(id_col, 'name')], data_in[, c('name', extra_cols)]),
where = c())
})
invisible(NULL)
}
# Import any cs_specific taxonomies
mfdb_import_cs_taxonomy <- function(mdb, taxonomy_name, data_in) {
if (!(taxonomy_name %in% mfdb_cs_taxonomy)) {
stop(
"Unknown taxonomy name '", taxonomy_name,
"' should be one of ", paste(mfdb_cs_taxonomy, collapse = ", "))
}
if (taxonomy_name == 'areacell') {
extra_cols <- c('size')
} else if (taxonomy_name == 'vessel') {
extra_cols <- c('vessel_type_id', 'full_name', 'length', 'power', 'tonnage')
} else if (taxonomy_name == 'tow') {
extra_cols <- c('latitude', 'longitude', 'depth', 'length')
} else {
extra_cols <- c('description')
}
mfdb_import_taxonomy(mdb, taxonomy_name,
data.frame(
id = sanitise_col(mdb, data_in, 'id', default = seq_len(length(data_in$name))),
name = sanitise_col(mdb, data_in, 'name'),
size = sanitise_col(mdb, data_in, 'size', default = c(NA)),
vessel_type_id = sanitise_col(mdb, data_in, 'vessel_type', lookup = 'vessel_type', default = c(NA)),
full_name = sanitise_col(mdb, data_in, 'full_name', default = c(NA)),
length = sanitise_col(mdb, data_in, 'length', default = c(NA)),
power = sanitise_col(mdb, data_in, 'power', default = c(NA)),
tonnage = sanitise_col(mdb, data_in, 'tonnage', default = c(NA)),
latitude = sanitise_col(mdb, data_in, 'latitude', default = c(NA)),
longitude = sanitise_col(mdb, data_in, 'longitude', default = c(NA)),
depth = sanitise_col(mdb, data_in, 'depth', default = c(NA)),
description = sanitise_col(mdb, data_in, 'description', default = c("")),
stringsAsFactors = FALSE),
extra_cols = extra_cols)
if (taxonomy_name == 'areacell' && 'division' %in% colnames(data_in)) {
# Import division data if available
division_data <- data_in[,c('name', 'division'), drop = FALSE]
colnames(division_data) <- c('areacell', 'division')
mfdb_import_division(mdb, division_data)
}
invisible(NULL)
}
mfdb_import_area <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'areacell', data_in)
mfdb_import_sampling_type <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'sampling_type', data_in)
mfdb_import_tow_taxonomy <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'tow', data_in)
mfdb_import_vessel_taxonomy <- function(mdb, data_in) mfdb_import_cs_taxonomy(mdb, 'vessel', data_in)
# Import divisions
mfdb_import_division <- function (mdb, data_in) {
if(is.data.frame(data_in)) {
if (length(intersect(colnames(data_in), c('division', 'areacell'))) < 2) {
stop("data.frame needs both division and areacell columns")
}
data_in <- data.frame(
division = sanitise_col(mdb, data_in, 'division'),
areacell_id = sanitise_col(mdb, data_in, 'areacell', lookup = 'areacell'),
stringsAsFactors = FALSE)
} else if(is.list(data_in)) {
data_in <- data.frame(
division = unlist(lapply(names(data_in), function(n) { rep(n, length(data_in[[n]])) })),
areacell_id = sanitise_col(mdb, data.frame(areacell = unlist(data_in)), 'areacell', lookup = 'areacell'))
} else {
stop("data_in should be a list of areacell vectors")
}
mfdb_transaction(mdb, {
dbSendQuery(mdb$db, paste0(
"DELETE FROM division",
" WHERE division IN ", sql_quote(unique(data_in$division), always_bracket = TRUE),
""))
res <- mfdb_insert(mdb, 'division', data_in)
})
}
|
#!/home/statsadmin/R/bin/Rscript
source('Step_0_init.R')
args <- commandArgs()
idx <- as.numeric(args[length(args)])
.libPaths('ysidi/lib')
dt.full.X <- readRDS(file = sprintf('dtfullfm_%d.rds',idx))
set.seed(8762+idx)
#generate mcar for 5-25% by 5% DO
dt.mcar <- miss.apply.do(dt.full.X, b.trt=log(1), b.y=log(1), b.X=log(1), do=0.10)
dt.mcar.check <- dt.mcar%>%
dplyr::mutate(do.H0 = purrr::map2(t.H0.m, 0.10, check.miss),
do.H0 = purrr::map_dbl(do.H0, as.numeric),
do.H1 = purrr::map2(t.H1.m, 0.10, check.miss),
do.H1 = purrr::map_dbl(do.H1, as.numeric))%>%
dplyr::select(-t.H0.m, -t.H1.m)
saveRDS(dt.mcar.check,file = sprintf('fmdochmcar10_%d.rds',idx))
saveRDS(dt.mcar,file = sprintf('dtfmmcar10_%d.rds',idx))
#check missingness mechanism: randomly select two scenarios and calculate
#proportion of assigned missing for each probability value (rounded by 2 decimal points)
# for both MAR and MNAR there should be positive correlation
check.mech.mcar <- dt.mcar%>%
dplyr::mutate(mech.H0 = purrr::map(t.H0.m , check.mech.p),
mech.H1 = purrr::map(t.H1.m , check.mech.p))%>%
dplyr::select(-t.H0.m, -t.H1.m)
saveRDS(check.mech.mcar,file = sprintf('mechmcarfm10_%d.rds',idx))
| /Step3_1_impose_miss/Old pgms/step_3_1_type_fm_missing_mcar_percent_10.R | no_license | yuliasidi/Binomial_PE_Progs | R | false | false | 1,262 | r | #!/home/statsadmin/R/bin/Rscript
source('Step_0_init.R')
args <- commandArgs()
idx <- as.numeric(args[length(args)])
.libPaths('ysidi/lib')
dt.full.X <- readRDS(file = sprintf('dtfullfm_%d.rds',idx))
set.seed(8762+idx)
#generate mcar for 5-25% by 5% DO
dt.mcar <- miss.apply.do(dt.full.X, b.trt=log(1), b.y=log(1), b.X=log(1), do=0.10)
dt.mcar.check <- dt.mcar%>%
dplyr::mutate(do.H0 = purrr::map2(t.H0.m, 0.10, check.miss),
do.H0 = purrr::map_dbl(do.H0, as.numeric),
do.H1 = purrr::map2(t.H1.m, 0.10, check.miss),
do.H1 = purrr::map_dbl(do.H1, as.numeric))%>%
dplyr::select(-t.H0.m, -t.H1.m)
saveRDS(dt.mcar.check,file = sprintf('fmdochmcar10_%d.rds',idx))
saveRDS(dt.mcar,file = sprintf('dtfmmcar10_%d.rds',idx))
#check missingness mechanism: randomly select two scenarios and calculate
#proportion of assigned missing for each probability value (rounded by 2 decimal points)
# for both MAR and MNAR there should be positive correlation
check.mech.mcar <- dt.mcar%>%
dplyr::mutate(mech.H0 = purrr::map(t.H0.m , check.mech.p),
mech.H1 = purrr::map(t.H1.m , check.mech.p))%>%
dplyr::select(-t.H0.m, -t.H1.m)
saveRDS(check.mech.mcar,file = sprintf('mechmcarfm10_%d.rds',idx))
|
Scenario = 6
Rep = 1
h2 = 'HH'
corr.archtq = 'corr.n0.skew1'
seed = 329
| /beocatintro/setup-1.R | no_license | Brosky1899/CIS625 | R | false | false | 72 | r | Scenario = 6
Rep = 1
h2 = 'HH'
corr.archtq = 'corr.n0.skew1'
seed = 329
|
% Generated by roxygen2 (4.0.1.99): do not edit by hand
\name{gtable_add_rows}
\alias{gtable_add_rows}
\title{Add new rows in specified position.}
\usage{
gtable_add_rows(x, heights, pos = -1)
}
\arguments{
\item{x}{a \code{\link{gtable}} object}
\item{heights}{a unit vector giving the heights of the new rows}
\item{pos}{new row will be added below this position. Defaults to
adding row on bottom. \code{0} adds on the top.}
}
\description{
Add new rows in specified position.
}
\examples{
rect <- rectGrob(gp = gpar(fill = "#00000080"))
tab <- gtable(unit(rep(1, 3), "null"), unit(rep(1, 3), "null"))
tab <- gtable_add_grobs(tab, rect, t = 1, l = 1, r = 3)
tab <- gtable_add_grobs(tab, rect, t = 1, b = 3, l = 1)
tab <- gtable_add_grobs(tab, rect, t = 1, b = 3, l = 3)
dim(tab)
plot(tab)
# Grobs will continue to span over new rows if added in the middle
tab2 <- gtable_add_rows(tab, unit(1, "null"), 1)
dim(tab2)
plot(tab2)
# But not when added to top (0) or bottom (-1, the default)
tab3 <- gtable_add_rows(tab, unit(1, "null"))
tab3 <- gtable_add_rows(tab3, unit(1, "null"), 0)
dim(tab3)
plot(tab3)
}
| /man/gtable_add_rows.Rd | no_license | christinabrady/gtable | R | false | false | 1,112 | rd | % Generated by roxygen2 (4.0.1.99): do not edit by hand
\name{gtable_add_rows}
\alias{gtable_add_rows}
\title{Add new rows in specified position.}
\usage{
gtable_add_rows(x, heights, pos = -1)
}
\arguments{
\item{x}{a \code{\link{gtable}} object}
\item{heights}{a unit vector giving the heights of the new rows}
\item{pos}{new row will be added below this position. Defaults to
adding row on bottom. \code{0} adds on the top.}
}
\description{
Add new rows in specified position.
}
\examples{
rect <- rectGrob(gp = gpar(fill = "#00000080"))
tab <- gtable(unit(rep(1, 3), "null"), unit(rep(1, 3), "null"))
tab <- gtable_add_grobs(tab, rect, t = 1, l = 1, r = 3)
tab <- gtable_add_grobs(tab, rect, t = 1, b = 3, l = 1)
tab <- gtable_add_grobs(tab, rect, t = 1, b = 3, l = 3)
dim(tab)
plot(tab)
# Grobs will continue to span over new rows if added in the middle
tab2 <- gtable_add_rows(tab, unit(1, "null"), 1)
dim(tab2)
plot(tab2)
# But not when added to top (0) or bottom (-1, the default)
tab3 <- gtable_add_rows(tab, unit(1, "null"))
tab3 <- gtable_add_rows(tab3, unit(1, "null"), 0)
dim(tab3)
plot(tab3)
}
|
library(googlesheets4)
library(dplyr, warn.conflicts = FALSE)
library(DBI)
# You may need to run gs_auth() to set this up
gs <- "14F6zjJQZRsf5PonOfZ0GJrYubvx5e_eHMV_hCGe42Qg"
permnos_addl <-
read_sheet(gs, sheet = "match_repair.csv") %>%
filter(!same_permco) %>%
select(file_name, permno, co_name) %>%
mutate(comment = "Cases resolved using company names in 2017")
diff_permcos <- read_sheet(gs, sheet = "diff_permcos")
add_man_matches <-
diff_permcos %>%
filter(correct != 'DN') %>%
mutate(permno = case_when(correct == 'Y' ~ permno.y,
correct == 'X' ~ permno.x,
correct == 'Z' ~ permno.z,
correct == 'NONE' ~ NA_real_),
co_name = NA_character_) %>%
select(file_name, permno, co_name) %>%
mutate(comment = "Cases resolved using company names in 2020")
permnos <-
read_sheet(gs, sheet = "manual_permno_matches") %>%
select(file_name, permno, co_name, comment) %>%
union(permnos_addl) %>%
union(add_man_matches)
pg_comment <- function(table, comment) {
sql <- paste0("COMMENT ON TABLE ", table, " IS '",
comment, " ON ", Sys.time() , "'")
rs <- dbExecute(pg, sql)
}
pg <- dbConnect(RPostgres::Postgres())
dbExecute(pg, "SET search_path TO se_links")
rs <- dbWriteTable(pg, "manual_permno_matches",
permnos,
overwrite=TRUE, row.names=FALSE)
rs <- dbExecute(pg, "ALTER TABLE manual_permno_matches OWNER TO se_links_access")
rs <- dbExecute(pg,
"DELETE FROM manual_permno_matches
WHERE file_name IN (
SELECT file_name
FROM manual_permno_matches
GROUP BY file_name
HAVING count(DISTINCT permno)>1)
AND comment != 'Fix by Nastia/Vincent in January 2015'")
rs <- dbExecute(pg, "CREATE INDEX ON manual_permno_matches (file_name)")
rs <- dbExecute(pg, "ALTER TABLE manual_permno_matches OWNER TO se_links")
rs <- dbExecute(pg, "GRANT SELECT ON manual_permno_matches TO se_links_access")
rs <- pg_comment("manual_permno_matches",
paste0("CREATED USING import_manual_permno_matches.R ON ", Sys.time()))
rs <- dbDisconnect(pg)
| /import_manual_permno_matches.R | no_license | iangow/se_links | R | false | false | 2,219 | r | library(googlesheets4)
library(dplyr, warn.conflicts = FALSE)
library(DBI)
# You may need to run gs_auth() to set this up
gs <- "14F6zjJQZRsf5PonOfZ0GJrYubvx5e_eHMV_hCGe42Qg"
permnos_addl <-
read_sheet(gs, sheet = "match_repair.csv") %>%
filter(!same_permco) %>%
select(file_name, permno, co_name) %>%
mutate(comment = "Cases resolved using company names in 2017")
diff_permcos <- read_sheet(gs, sheet = "diff_permcos")
add_man_matches <-
diff_permcos %>%
filter(correct != 'DN') %>%
mutate(permno = case_when(correct == 'Y' ~ permno.y,
correct == 'X' ~ permno.x,
correct == 'Z' ~ permno.z,
correct == 'NONE' ~ NA_real_),
co_name = NA_character_) %>%
select(file_name, permno, co_name) %>%
mutate(comment = "Cases resolved using company names in 2020")
permnos <-
read_sheet(gs, sheet = "manual_permno_matches") %>%
select(file_name, permno, co_name, comment) %>%
union(permnos_addl) %>%
union(add_man_matches)
pg_comment <- function(table, comment) {
sql <- paste0("COMMENT ON TABLE ", table, " IS '",
comment, " ON ", Sys.time() , "'")
rs <- dbExecute(pg, sql)
}
pg <- dbConnect(RPostgres::Postgres())
dbExecute(pg, "SET search_path TO se_links")
rs <- dbWriteTable(pg, "manual_permno_matches",
permnos,
overwrite=TRUE, row.names=FALSE)
rs <- dbExecute(pg, "ALTER TABLE manual_permno_matches OWNER TO se_links_access")
rs <- dbExecute(pg,
"DELETE FROM manual_permno_matches
WHERE file_name IN (
SELECT file_name
FROM manual_permno_matches
GROUP BY file_name
HAVING count(DISTINCT permno)>1)
AND comment != 'Fix by Nastia/Vincent in January 2015'")
rs <- dbExecute(pg, "CREATE INDEX ON manual_permno_matches (file_name)")
rs <- dbExecute(pg, "ALTER TABLE manual_permno_matches OWNER TO se_links")
rs <- dbExecute(pg, "GRANT SELECT ON manual_permno_matches TO se_links_access")
rs <- pg_comment("manual_permno_matches",
paste0("CREATED USING import_manual_permno_matches.R ON ", Sys.time()))
rs <- dbDisconnect(pg)
|
.onLoad <- function(libname, pkgname) {
library.dynam("showtext", pkgname, libname);
.add.default.font();
.pkg.env$.outline.funs = .Call("showtextLoadOutlineFuns",
PACKAGE = "showtext");
.pkg.env$.nseg = 10L;
.pkg.env$.gdd.save = .Call("showtextReturnNullPointer",
PACKAGE = "showtext");
.pkg.env$.dd.save = .Call("showtextLoadDevDesc",
PACKAGE = "showtext");
}
.onUnload <- function(libpath) {
.Call("showtextCleanOutlineFuns", .pkg.env$.outline.funs,
PACKAGE = "showtext");
.Call("showtextCleanDevDesc", .pkg.env$.dd.save,
PACKAGE = "showtext");
library.dynam.unload("showtext", libpath);
}
| /R/zzz.R | no_license | baptiste/showtext | R | false | false | 747 | r | .onLoad <- function(libname, pkgname) {
library.dynam("showtext", pkgname, libname);
.add.default.font();
.pkg.env$.outline.funs = .Call("showtextLoadOutlineFuns",
PACKAGE = "showtext");
.pkg.env$.nseg = 10L;
.pkg.env$.gdd.save = .Call("showtextReturnNullPointer",
PACKAGE = "showtext");
.pkg.env$.dd.save = .Call("showtextLoadDevDesc",
PACKAGE = "showtext");
}
.onUnload <- function(libpath) {
.Call("showtextCleanOutlineFuns", .pkg.env$.outline.funs,
PACKAGE = "showtext");
.Call("showtextCleanDevDesc", .pkg.env$.dd.save,
PACKAGE = "showtext");
library.dynam.unload("showtext", libpath);
}
|
# Data are contained in the file: exdata_data_household_power_consumption.zip, which can be unzipped using the R command: unzip("exdata_data_household_power_consumption.zip")
# Data of Electric power consumption were downloaded from the URL: , but originally comes from the UCI web site. The 9 variables are date (dd/mm/yy), time (hh:mm:ss), global active power (minute-averaged active power used by households, in kw), global reactive power (minute-averaged reactive power used by households, in kw), voltage (minute-averaged voltage, in volts), global intensity (minute-averaged current density used by households, in ampere), and sub-metering data: 1 (kitchen use in watt-hour of active energy), 2 (laundry room use in watt-hour of active energy), and 3 (electric water heaters and air-conditioner use in watt-hour of active energy).
# Only data from 2007-02-01 and 2007-02-02 are used in this exploratory data analysis.
#Read the data
colNames <- c("date", "time", "global_active_power", "global_reactive_power",
"voltage", "global_intensity", "sub_metering_1", "sub_metering_2", "sub_metering_3")
data <- read.csv("household_power_consumption.txt", sep = ";" , skip = 66636,
nrows = 2880, col.names = colNames)
#create a datetime column which combines date and time variables
data$datetime <- NULL
data$datetime <- paste(data$date, data$time)
data$datetime <- strptime(data$datetime, "%d/%m/%Y %H:%M:%S", tz = "GMT")
#Create plot 4, which consists of four grouped plots
library(datasets)
png(file = "plot4.png", bg = "white", height = 480, width = 480)
par(mfrow = c(2,2))
#first plot
with(data, plot(datetime,global_active_power, type="l", ylab = "Global Active Power",
xlab = ""))
#second plot
plot(data$datetime, data$voltage, type="l", xlab = "datetime", ylab = "Voltage")
#third plot
with(data, plot(datetime, sub_metering_1, type = "l", col= "black",
ylab = "Energy sub metering", xlab = ""))
lines(data$datetime, data$sub_metering_2, col = "red")
lines(data$datetime, data$sub_metering_3, col= "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd= 1, bty = "n")
#fourth plot
plot(data$datetime, data$global_reactive_power, type = "l", xlab = "datetime",
ylab = "Global_reactive_power")
dev.off() | /plot4.R | no_license | jbranlund/ExploratoryDataAnalysis1 | R | false | false | 2,340 | r | # Data are contained in the file: exdata_data_household_power_consumption.zip, which can be unzipped using the R command: unzip("exdata_data_household_power_consumption.zip")
# Data of Electric power consumption were downloaded from the URL: , but originally comes from the UCI web site. The 9 variables are date (dd/mm/yy), time (hh:mm:ss), global active power (minute-averaged active power used by households, in kw), global reactive power (minute-averaged reactive power used by households, in kw), voltage (minute-averaged voltage, in volts), global intensity (minute-averaged current density used by households, in ampere), and sub-metering data: 1 (kitchen use in watt-hour of active energy), 2 (laundry room use in watt-hour of active energy), and 3 (electric water heaters and air-conditioner use in watt-hour of active energy).
# Only data from 2007-02-01 and 2007-02-02 are used in this exploratory data analysis.
#Read the data
colNames <- c("date", "time", "global_active_power", "global_reactive_power",
"voltage", "global_intensity", "sub_metering_1", "sub_metering_2", "sub_metering_3")
data <- read.csv("household_power_consumption.txt", sep = ";" , skip = 66636,
nrows = 2880, col.names = colNames)
#create a datetime column which combines date and time variables
data$datetime <- NULL
data$datetime <- paste(data$date, data$time)
data$datetime <- strptime(data$datetime, "%d/%m/%Y %H:%M:%S", tz = "GMT")
#Create plot 4, which consists of four grouped plots
library(datasets)
png(file = "plot4.png", bg = "white", height = 480, width = 480)
par(mfrow = c(2,2))
#first plot
with(data, plot(datetime,global_active_power, type="l", ylab = "Global Active Power",
xlab = ""))
#second plot
plot(data$datetime, data$voltage, type="l", xlab = "datetime", ylab = "Voltage")
#third plot
with(data, plot(datetime, sub_metering_1, type = "l", col= "black",
ylab = "Energy sub metering", xlab = ""))
lines(data$datetime, data$sub_metering_2, col = "red")
lines(data$datetime, data$sub_metering_3, col= "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd= 1, bty = "n")
#fourth plot
plot(data$datetime, data$global_reactive_power, type = "l", xlab = "datetime",
ylab = "Global_reactive_power")
dev.off() |
#' @import dplyr
runs.analysis <- function(x) {
y <- x$y[x$include]
cl <- x$cl[x$include]
runs <- sign(y - cl)
runs <- runs[runs != 0 & !is.na(runs)]
n.useful <- length(runs)
n.obs <- length(y)
if (n.useful) {
run.lengths <- rle(runs)$lengths
n.runs <- length(run.lengths)
longest.run <- max(run.lengths)
longest.run.max <- round(log2(n.useful)) + 3 # Schilling 2012
n.crossings <- max(n.runs - 1, 0)
n.crossings.min <- stats::qbinom(0.05, # Chen 2010 (7)
max(n.useful - 1, 0), 0.5)
runs.signal <- longest.run > longest.run.max ||
n.crossings < n.crossings.min
} else {
longest.run <- NA
longest.run.max <- NA
n.crossings <- NA
n.crossings.min <- NA
runs.signal <- FALSE
}
x$n.obs <- n.obs
x$n.useful <- n.useful
x$runs.signal <- runs.signal
x$longest.run <- longest.run
x$longest.run.max <- longest.run.max
x$n.crossings <- n.crossings
x$n.crossings.min <- n.crossings.min
return(x)
}
qic.run <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl))
x$cl <- stats::median(x$y[base], na.rm = TRUE)
x$ucl <- as.numeric(NA)
x$lcl <- as.numeric(NA)
return(x)
}
qic.i <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl))
x$cl <- mean(x$y[base], na.rm = TRUE)
# Average moving range
mr <- abs(diff(x$y[base] - x$cl[base]))
amr <- mean(mr, na.rm = TRUE)
# Upper limit for moving ranges
ulmr <- 3.267 * amr
# Remove moving ranges greater than ulmr and recalculate amr, Nelson 1982
mr <- mr[mr < ulmr]
amr <- mean(mr, na.rm = TRUE)
# Calculate standard deviation, Montgomery, 6.33
stdev <- amr / 1.128
# Calculate control limits
x$lcl <- x$cl - 3 * stdev
x$ucl <- x$cl + 3 * stdev
return(x)
}
qic.mr <- function(x) {
base <- x$baseline & x$include
x$y <- c(NA, abs(diff(x$y)))
# Calculate centre line
if (anyNA(x$cl))
x$cl <- mean(x$y[base], na.rm = TRUE)
# Calculate upper limit for moving ranges
x$lcl <- 0
x$ucl <- 3.267 * x$cl
return(x)
}
qic.xbar <- function(x){
base <- x$baseline & x$include
var.n <- as.logical(length(unique(x$y.length)) - 1)
# Calculate centre line, Montgomery 6.30
if (anyNA(x$cl)) {
x$cl <- sum(x$y.length[base] * x$y.mean[base], na.rm = TRUE) /
sum(x$y.length[base], na.rm = TRUE)
}
# Calculate standard deviation and control limits, Montgomery 6.29 or 6.31
if (var.n) {
stdev <- sqrt(sum((x$y.length[base] - 1) * x$y.sd[base]^2, na.rm = TRUE) /
sum(x$y.length[base] - 1, na.rm = TRUE))
} else {
stdev <- mean(x$y.sd[base], na.rm = TRUE)
}
A3 <- a3(x$y.length)
x$ucl <- x$cl + A3 * stdev
x$lcl <- x$cl - A3 * stdev
return(x)
}
qic.s <- function(x){
base <- x$baseline & x$include
var.n <- as.logical(length(unique(x$y.length)) - 1)
x$y <- x$y.sd
# Calculate centre line and control limits
if (anyNA(x$cl)) {
if (var.n) { # Variable subgroup size: Montgomery 6.31
x$cl <- sqrt(sum((x$y.length[base] - 1) * x$y.sd[base]^2, na.rm = TRUE) /
sum(x$y.length[base] - 1, na.rm = TRUE))
# x$cl <- sum(x$y[base] * x$y.length[base]) / sum(x$y.length[base])
} else { # Constant subgroup size: Montgomery 6.29
x$cl <- mean(x$y.sd, na.rm = TRUE)
}
}
B3 <- b3(x$y.length)
B4 <- b4(x$y.length)
x$ucl <- B4 * x$cl
x$lcl <- B3 * x$cl
return(x)
}
qic.t <- function(x) {
if (min(x$y, na.rm = TRUE) <= 0) {
stop('Time between events must be greater than zero')
}
# Transform y variable and run I chart calculations
x$y <- x$y^(1 / 3.6)
x <- qic.i(x)
# Back transform centre line and control limits
x$y <- x$y^3.6
x$cl <- x$cl^3.6
x$ucl <- x$ucl^3.6
x$lcl <- x$lcl^3.6
x$lcl[x$lcl < 0 | is.nan(x$lcl)] <- 0
return(x)
}
qic.p <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) /
sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation
stdev <- sqrt(x$cl * (1 - x$cl) / x$n)
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$ucl[x$ucl > 1 & is.finite(x$ucl)] <- 1
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.pp <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) /
sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation
stdev <- sqrt(x$cl * (1 - x$cl) / x$n)
# Calculate standard deviation for Laney's P prime chart, incorporating
# between-subgroup variation.
z_i <- (x$y[base] - x$cl[base]) / stdev[base]
sigma_z <- mean(abs(diff(z_i)), na.rm = TRUE) / 1.128
stdev <- stdev * sigma_z
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$ucl[x$ucl > 1 & is.finite(x$ucl)] <- 1
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.c <- function(x){
base <- x$baseline & x$include
x$y <- x$y.sum
if (anyNA(x$cl)) {
x$cl <- mean(x$y[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery 7.17
stdev <- sqrt(x$cl)
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.u <- function(x){
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) / sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery 7.19
stdev <- sqrt(x$cl / x$n)
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.up <- function(x){
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) / sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery 7.19
stdev <- sqrt(x$cl / x$n)
# Calculate standard deviation for Laney's u-prime chart, incorporating
# between-subgroup variation.
z_i <- (x$y[base] - x$cl[base]) / stdev[base]
sigma_z <- mean(abs(diff(z_i)), na.rm = TRUE) / 1.128
stdev <- stdev * sigma_z
# Calculate limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.g <- function(x){
base <- x$baseline & x$include
# Calculate centre line
if (anyNA(x$cl)) {
x$cl <- mean(x$y[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery, p. 319
stdev <- sqrt(x$cl * (x$cl + 1))
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0] <- 0
# # Set centre line to theoretical median, Provost (2011) p. 228
# x$cl <- 0.693 * x$cl
# Set centre line to median
x$cl <- stats::median(x$y, na.rm = TRUE)
return(x)
}
c4 <- function(n) {
n[n <= 1] <- NA
# sqrt(2 / (n - 1)) * gamma(n / 2) / gamma((n - 1) / 2)
sqrt(2 / (n - 1)) * exp(lgamma(n / 2) - lgamma((n - 1) / 2))
}
c5 <- function(n) {
n[n <= 1] <- NA
sqrt(1 - c4(n) ^ 2)
}
a3 <- function(n) {
n[n <= 1] <- NA
3 / (c4(n) * sqrt(n))
}
b3 <- function(n) {
n[n <= 1] <- NA
pmax(0, 1 - 3 * c5(n) / c4(n))
}
b4 <- function(n) {
n[n <= 1] <- NA
1 + 3 * c5(n) / c4(n)
}
# a3 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 2.659, 1.954, 1.628, 1.427, 1.287, 1.182,
# 1.099, 1.032, 0.975, 0.927, 0.886, 0.850,
# 0.817, 0.789, 0.763, 0.739, 0.718, 0.698,
# 0.680, 0.663, 0.647, 0.633, 0.619, 0.606)
# # x <- 3 / (4 * (n - 1)) * (4 * n - 3) / sqrt(n)
# x <- 3 / c4(n) / sqrt(n)
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
#
# b3 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 0.000, 0.000, 0.000, 0.000, 0.030, 0.118,
# 0.185, 0.239, 0.284, 0.321, 0.354, 0.382,
# 0.406, 0.428, 0.448, 0.466, 0.482, 0.497,
# 0.510, 0.523, 0.534, 0.545, 0.555, 0.565)
# x <- 1 - (3 / c4(n) / sqrt(2 * (n - 1)))
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
#
# b4 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 3.267, 2.568, 2.266, 2.089, 1.970, 1.882,
# 1.815, 1.761, 1.716, 1.679, 1.646, 1.618,
# 1.594, 1.572, 1.552, 1.534, 1.518, 1.503,
# 1.490, 1.477, 1.466, 1.455, 1.445, 1.435)
# x <- 1 + (3 / c4(n) / sqrt(2 * (n - 1)))
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
#
# c4 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 0.7979, 0.8862, 0.9213, 0.9400, 0.9515, 0.9594,
# 0.9650, 0.9693, 0.9727, 0.9754, 0.9776, 0.9794,
# 0.9810, 0.9823, 0.9835, 0.9845, 0.9854, 0.9862,
# 0.9869, 0.9876, 0.9882, 0.9887, 0.9892, 0.9896)
#
# x <- 4 * (n - 1) / (4 * n - 3)
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
# Format line labels function
lab.format <- function(x, decimals = 1, percent = FALSE) {
if (percent) x <- x * 100
x <- sprintf(paste0("%.", decimals, "f"), x)
if (percent) x <- paste0(x, '%')
x
}
# Make parts function
makeparts <- function(x, n) {
x <- unique(c(0, x))
x <- x[x >= 0 & x < n]
x <- x[order(x)]
x <- rep(c(seq_along(x)), diff(c(x, n)))
}
# Fix notes function
fixnotes <- function(x) {
x <- gsub("\\|{2, }", "\\|", x)
x <- gsub("^\\||\\|$", "", x)
x <- gsub("\\|", " | ", x)
x <- gsub("^$", NA, x)
}
# Function for data aggregation and analysis
qic.agg <- function(d, got.n, part, agg.fun, freeze, exclude,
chart.fun, multiply, dots.only, chart, y.neg) {
x <- quo(x)
y <- quo(y)
n <- quo(n)
cl <- quo(cl)
target <- quo(target)
notes <- quo(notes)
facet1 <- quo(facet1)
facet2 <- quo(facet2)
d <- d %>%
filter(!is.na(!!x)) %>%
group_by(!!x, !!facet1, !!facet2) %>%
summarise(y.sum = sum(!!y, na.rm = TRUE),
y.length = sum(!is.na(!!y)),
y.mean = mean(!!y, na.rm = TRUE),
y.sd = stats::sd(!!y, na.rm = TRUE),
n = sum(!!n, na.rm = got.n),
y = ifelse(got.n,
y.sum / n,
do.call(agg.fun, list(y, na.rm = TRUE))),
cl = first(!!cl),
target = first(!!target),
notes = paste(!!notes, collapse = '|')
) %>%
group_by(facet1, facet2) %>%
mutate(part = makeparts(part, n()),
xx = seq_along(part)) %>%
ungroup() %>%
mutate(baseline = xx <= freeze,
include = !xx %in% exclude,
notes = fixnotes(notes))
d <- split(d, d[c('facet1', 'facet2', 'part')]) %>%
lapply(chart.fun) %>%
lapply(runs.analysis) %>%
lapply(function(x) {
within(x, {
y <- y * multiply
cl <- cl * multiply
lcl <- lcl * multiply
ucl <- ucl * multiply
cl.lab <- ifelse(xx == max(xx), cl, NA)
lcl.lab <- ifelse(xx == max(xx), lcl, NA)
ucl.lab <- ifelse(xx == max(xx), ucl, NA)
target.lab <- ifelse(xx == max(xx), target, NA)
})
})
d <- do.call(rbind, d) %>%
arrange(!!facet1, !!facet2, !!x)
# Remove control lines from missing subgroups
d$ucl[!is.finite(d$ucl)] <- NA
d$lcl[!is.finite(d$lcl)] <- NA
# Add sigma signals
d$sigma.signal <- d$y > d$ucl | d$y < d$lcl
d$sigma.signal[is.na(d$sigma.signal)] <- FALSE
# Ignore runs analysis if subgroups are categorical or if chart type is MR
if (dots.only || chart == 'mr')
d$runs.signal <- FALSE
# Prevent negative y axis if y.neg argument is FALSE
if (!y.neg & min(d$y, na.rm = TRUE) >= 0) {
d$lcl[d$lcl < 0] <- 0
d$lcl.lab[d$lcl.lab < 0] <- 0
}
return(d)
}
.onAttach <- function(libname, pkgname) {
options(qic.linecol = '#5DA5DA',
qic.signalcol = '#F15854',
qic.targetcol = '#059748',
qic.clshade = TRUE)
}
.onDetach <- function(libpath) {
options(qic.linecol = NULL,
qic.signalcol = NULL,
qic.targetcol = NULL,
qic.clshade = NULL)
} | /R/helper.functions.R | no_license | pwildenhain/qicharts2 | R | false | false | 12,961 | r | #' @import dplyr
runs.analysis <- function(x) {
y <- x$y[x$include]
cl <- x$cl[x$include]
runs <- sign(y - cl)
runs <- runs[runs != 0 & !is.na(runs)]
n.useful <- length(runs)
n.obs <- length(y)
if (n.useful) {
run.lengths <- rle(runs)$lengths
n.runs <- length(run.lengths)
longest.run <- max(run.lengths)
longest.run.max <- round(log2(n.useful)) + 3 # Schilling 2012
n.crossings <- max(n.runs - 1, 0)
n.crossings.min <- stats::qbinom(0.05, # Chen 2010 (7)
max(n.useful - 1, 0), 0.5)
runs.signal <- longest.run > longest.run.max ||
n.crossings < n.crossings.min
} else {
longest.run <- NA
longest.run.max <- NA
n.crossings <- NA
n.crossings.min <- NA
runs.signal <- FALSE
}
x$n.obs <- n.obs
x$n.useful <- n.useful
x$runs.signal <- runs.signal
x$longest.run <- longest.run
x$longest.run.max <- longest.run.max
x$n.crossings <- n.crossings
x$n.crossings.min <- n.crossings.min
return(x)
}
qic.run <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl))
x$cl <- stats::median(x$y[base], na.rm = TRUE)
x$ucl <- as.numeric(NA)
x$lcl <- as.numeric(NA)
return(x)
}
qic.i <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl))
x$cl <- mean(x$y[base], na.rm = TRUE)
# Average moving range
mr <- abs(diff(x$y[base] - x$cl[base]))
amr <- mean(mr, na.rm = TRUE)
# Upper limit for moving ranges
ulmr <- 3.267 * amr
# Remove moving ranges greater than ulmr and recalculate amr, Nelson 1982
mr <- mr[mr < ulmr]
amr <- mean(mr, na.rm = TRUE)
# Calculate standard deviation, Montgomery, 6.33
stdev <- amr / 1.128
# Calculate control limits
x$lcl <- x$cl - 3 * stdev
x$ucl <- x$cl + 3 * stdev
return(x)
}
qic.mr <- function(x) {
base <- x$baseline & x$include
x$y <- c(NA, abs(diff(x$y)))
# Calculate centre line
if (anyNA(x$cl))
x$cl <- mean(x$y[base], na.rm = TRUE)
# Calculate upper limit for moving ranges
x$lcl <- 0
x$ucl <- 3.267 * x$cl
return(x)
}
qic.xbar <- function(x){
base <- x$baseline & x$include
var.n <- as.logical(length(unique(x$y.length)) - 1)
# Calculate centre line, Montgomery 6.30
if (anyNA(x$cl)) {
x$cl <- sum(x$y.length[base] * x$y.mean[base], na.rm = TRUE) /
sum(x$y.length[base], na.rm = TRUE)
}
# Calculate standard deviation and control limits, Montgomery 6.29 or 6.31
if (var.n) {
stdev <- sqrt(sum((x$y.length[base] - 1) * x$y.sd[base]^2, na.rm = TRUE) /
sum(x$y.length[base] - 1, na.rm = TRUE))
} else {
stdev <- mean(x$y.sd[base], na.rm = TRUE)
}
A3 <- a3(x$y.length)
x$ucl <- x$cl + A3 * stdev
x$lcl <- x$cl - A3 * stdev
return(x)
}
qic.s <- function(x){
base <- x$baseline & x$include
var.n <- as.logical(length(unique(x$y.length)) - 1)
x$y <- x$y.sd
# Calculate centre line and control limits
if (anyNA(x$cl)) {
if (var.n) { # Variable subgroup size: Montgomery 6.31
x$cl <- sqrt(sum((x$y.length[base] - 1) * x$y.sd[base]^2, na.rm = TRUE) /
sum(x$y.length[base] - 1, na.rm = TRUE))
# x$cl <- sum(x$y[base] * x$y.length[base]) / sum(x$y.length[base])
} else { # Constant subgroup size: Montgomery 6.29
x$cl <- mean(x$y.sd, na.rm = TRUE)
}
}
B3 <- b3(x$y.length)
B4 <- b4(x$y.length)
x$ucl <- B4 * x$cl
x$lcl <- B3 * x$cl
return(x)
}
qic.t <- function(x) {
if (min(x$y, na.rm = TRUE) <= 0) {
stop('Time between events must be greater than zero')
}
# Transform y variable and run I chart calculations
x$y <- x$y^(1 / 3.6)
x <- qic.i(x)
# Back transform centre line and control limits
x$y <- x$y^3.6
x$cl <- x$cl^3.6
x$ucl <- x$ucl^3.6
x$lcl <- x$lcl^3.6
x$lcl[x$lcl < 0 | is.nan(x$lcl)] <- 0
return(x)
}
qic.p <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) /
sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation
stdev <- sqrt(x$cl * (1 - x$cl) / x$n)
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$ucl[x$ucl > 1 & is.finite(x$ucl)] <- 1
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.pp <- function(x) {
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) /
sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation
stdev <- sqrt(x$cl * (1 - x$cl) / x$n)
# Calculate standard deviation for Laney's P prime chart, incorporating
# between-subgroup variation.
z_i <- (x$y[base] - x$cl[base]) / stdev[base]
sigma_z <- mean(abs(diff(z_i)), na.rm = TRUE) / 1.128
stdev <- stdev * sigma_z
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$ucl[x$ucl > 1 & is.finite(x$ucl)] <- 1
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.c <- function(x){
base <- x$baseline & x$include
x$y <- x$y.sum
if (anyNA(x$cl)) {
x$cl <- mean(x$y[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery 7.17
stdev <- sqrt(x$cl)
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.u <- function(x){
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) / sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery 7.19
stdev <- sqrt(x$cl / x$n)
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.up <- function(x){
base <- x$baseline & x$include
if (anyNA(x$cl)) {
x$cl <- sum(x$y.sum[base], na.rm = TRUE) / sum(x$n[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery 7.19
stdev <- sqrt(x$cl / x$n)
# Calculate standard deviation for Laney's u-prime chart, incorporating
# between-subgroup variation.
z_i <- (x$y[base] - x$cl[base]) / stdev[base]
sigma_z <- mean(abs(diff(z_i)), na.rm = TRUE) / 1.128
stdev <- stdev * sigma_z
# Calculate limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0 & is.finite(x$lcl)] <- 0
return(x)
}
qic.g <- function(x){
base <- x$baseline & x$include
# Calculate centre line
if (anyNA(x$cl)) {
x$cl <- mean(x$y[base], na.rm = TRUE)
}
# Calculate standard deviation, Montgomery, p. 319
stdev <- sqrt(x$cl * (x$cl + 1))
# Calculate control limits
x$ucl <- x$cl + 3 * stdev
x$lcl <- x$cl - 3 * stdev
x$lcl[x$lcl < 0] <- 0
# # Set centre line to theoretical median, Provost (2011) p. 228
# x$cl <- 0.693 * x$cl
# Set centre line to median
x$cl <- stats::median(x$y, na.rm = TRUE)
return(x)
}
c4 <- function(n) {
n[n <= 1] <- NA
# sqrt(2 / (n - 1)) * gamma(n / 2) / gamma((n - 1) / 2)
sqrt(2 / (n - 1)) * exp(lgamma(n / 2) - lgamma((n - 1) / 2))
}
c5 <- function(n) {
n[n <= 1] <- NA
sqrt(1 - c4(n) ^ 2)
}
a3 <- function(n) {
n[n <= 1] <- NA
3 / (c4(n) * sqrt(n))
}
b3 <- function(n) {
n[n <= 1] <- NA
pmax(0, 1 - 3 * c5(n) / c4(n))
}
b4 <- function(n) {
n[n <= 1] <- NA
1 + 3 * c5(n) / c4(n)
}
# a3 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 2.659, 1.954, 1.628, 1.427, 1.287, 1.182,
# 1.099, 1.032, 0.975, 0.927, 0.886, 0.850,
# 0.817, 0.789, 0.763, 0.739, 0.718, 0.698,
# 0.680, 0.663, 0.647, 0.633, 0.619, 0.606)
# # x <- 3 / (4 * (n - 1)) * (4 * n - 3) / sqrt(n)
# x <- 3 / c4(n) / sqrt(n)
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
#
# b3 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 0.000, 0.000, 0.000, 0.000, 0.030, 0.118,
# 0.185, 0.239, 0.284, 0.321, 0.354, 0.382,
# 0.406, 0.428, 0.448, 0.466, 0.482, 0.497,
# 0.510, 0.523, 0.534, 0.545, 0.555, 0.565)
# x <- 1 - (3 / c4(n) / sqrt(2 * (n - 1)))
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
#
# b4 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 3.267, 2.568, 2.266, 2.089, 1.970, 1.882,
# 1.815, 1.761, 1.716, 1.679, 1.646, 1.618,
# 1.594, 1.572, 1.552, 1.534, 1.518, 1.503,
# 1.490, 1.477, 1.466, 1.455, 1.445, 1.435)
# x <- 1 + (3 / c4(n) / sqrt(2 * (n - 1)))
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
#
# c4 <- function(n) {
# n[n == 0] <- NA
# tbl <- c(NA,
# 0.7979, 0.8862, 0.9213, 0.9400, 0.9515, 0.9594,
# 0.9650, 0.9693, 0.9727, 0.9754, 0.9776, 0.9794,
# 0.9810, 0.9823, 0.9835, 0.9845, 0.9854, 0.9862,
# 0.9869, 0.9876, 0.9882, 0.9887, 0.9892, 0.9896)
#
# x <- 4 * (n - 1) / (4 * n - 3)
# w <- which(n <= 25)
# x[w] <- tbl[n[w]]
# x[is.nan(x)] <- NA
# return(x)
# }
# Format line labels function
lab.format <- function(x, decimals = 1, percent = FALSE) {
if (percent) x <- x * 100
x <- sprintf(paste0("%.", decimals, "f"), x)
if (percent) x <- paste0(x, '%')
x
}
# Make parts function
makeparts <- function(x, n) {
x <- unique(c(0, x))
x <- x[x >= 0 & x < n]
x <- x[order(x)]
x <- rep(c(seq_along(x)), diff(c(x, n)))
}
# Fix notes function
fixnotes <- function(x) {
x <- gsub("\\|{2, }", "\\|", x)
x <- gsub("^\\||\\|$", "", x)
x <- gsub("\\|", " | ", x)
x <- gsub("^$", NA, x)
}
# Function for data aggregation and analysis
qic.agg <- function(d, got.n, part, agg.fun, freeze, exclude,
chart.fun, multiply, dots.only, chart, y.neg) {
x <- quo(x)
y <- quo(y)
n <- quo(n)
cl <- quo(cl)
target <- quo(target)
notes <- quo(notes)
facet1 <- quo(facet1)
facet2 <- quo(facet2)
d <- d %>%
filter(!is.na(!!x)) %>%
group_by(!!x, !!facet1, !!facet2) %>%
summarise(y.sum = sum(!!y, na.rm = TRUE),
y.length = sum(!is.na(!!y)),
y.mean = mean(!!y, na.rm = TRUE),
y.sd = stats::sd(!!y, na.rm = TRUE),
n = sum(!!n, na.rm = got.n),
y = ifelse(got.n,
y.sum / n,
do.call(agg.fun, list(y, na.rm = TRUE))),
cl = first(!!cl),
target = first(!!target),
notes = paste(!!notes, collapse = '|')
) %>%
group_by(facet1, facet2) %>%
mutate(part = makeparts(part, n()),
xx = seq_along(part)) %>%
ungroup() %>%
mutate(baseline = xx <= freeze,
include = !xx %in% exclude,
notes = fixnotes(notes))
d <- split(d, d[c('facet1', 'facet2', 'part')]) %>%
lapply(chart.fun) %>%
lapply(runs.analysis) %>%
lapply(function(x) {
within(x, {
y <- y * multiply
cl <- cl * multiply
lcl <- lcl * multiply
ucl <- ucl * multiply
cl.lab <- ifelse(xx == max(xx), cl, NA)
lcl.lab <- ifelse(xx == max(xx), lcl, NA)
ucl.lab <- ifelse(xx == max(xx), ucl, NA)
target.lab <- ifelse(xx == max(xx), target, NA)
})
})
d <- do.call(rbind, d) %>%
arrange(!!facet1, !!facet2, !!x)
# Remove control lines from missing subgroups
d$ucl[!is.finite(d$ucl)] <- NA
d$lcl[!is.finite(d$lcl)] <- NA
# Add sigma signals
d$sigma.signal <- d$y > d$ucl | d$y < d$lcl
d$sigma.signal[is.na(d$sigma.signal)] <- FALSE
# Ignore runs analysis if subgroups are categorical or if chart type is MR
if (dots.only || chart == 'mr')
d$runs.signal <- FALSE
# Prevent negative y axis if y.neg argument is FALSE
if (!y.neg & min(d$y, na.rm = TRUE) >= 0) {
d$lcl[d$lcl < 0] <- 0
d$lcl.lab[d$lcl.lab < 0] <- 0
}
return(d)
}
.onAttach <- function(libname, pkgname) {
options(qic.linecol = '#5DA5DA',
qic.signalcol = '#F15854',
qic.targetcol = '#059748',
qic.clshade = TRUE)
}
.onDetach <- function(libpath) {
options(qic.linecol = NULL,
qic.signalcol = NULL,
qic.targetcol = NULL,
qic.clshade = NULL)
} |
# Method
pow <- function(x, y)
{
x <- as.numeric(x)
y <- as.integer(y)
prod(rep(x, y))
}
#Operator
"%pow%" <- function(x,y) pow(x,y)
for(i in 0:1000000000) {
pow(2017, 12) # 81
19.88 %pow% 12 # 6.25
}
| /Scripts/newTask/exponentiation-operator/r/exponentiation-operator.r | no_license | stefanos1316/Rosetta_Code_Research_MSR | R | false | false | 216 | r | # Method
pow <- function(x, y)
{
x <- as.numeric(x)
y <- as.integer(y)
prod(rep(x, y))
}
#Operator
"%pow%" <- function(x,y) pow(x,y)
for(i in 0:1000000000) {
pow(2017, 12) # 81
19.88 %pow% 12 # 6.25
}
|
\name{HillP}
\alias{HillP}
\title{
Parallelised Hill's diversity index.
}
\description{
Parallelised Hill's diversity index.
}
\usage{
HillP(rasterm, w, alpha, na.tolerance, debugging)
}
\arguments{
\item{rasterm}{input data.}
\item{w}{half of the side of the square moving window.}
\item{alpha}{alpha value for order of diversity in Hill's Index.}
\item{na.tolerance}{a numeric value \eqn{(0.0-1.0)} which indicates the proportion of NA values that will be tolerated to calculate Rao's index in each moving window over \emph{x}. If the relative proportion of NA's in a moving window is bigger than na.tolerance, then the value of the window will be set as NA, otherwise Rao's index will be calculated considering the non-NA values. Default values is 0.0 (i.e., no tolerance for NA's).}
\item{debugging}{a boolean variable set to FALSE by default. If TRUE, additional messages will be printed. For de-bugging only.}
}
\value{
Matrix or a list of matrixes with the Hill index computed through moving window of the given size.
}
\author{
Marcantonio Matteo \email{marcantoniomatteo@gmail.com} \cr
Martina Iannacito \email{martina.iannacito@inria.fr} \cr
Duccio Rocchini \email{duccio.rocchini@unibo.it} \cr
}
\seealso{
\code{\link{Hill}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{internal}
| /man/HillP.Rd | no_license | yangxhcaf/rasterdiv | R | false | false | 1,350 | rd | \name{HillP}
\alias{HillP}
\title{
Parallelised Hill's diversity index.
}
\description{
Parallelised Hill's diversity index.
}
\usage{
HillP(rasterm, w, alpha, na.tolerance, debugging)
}
\arguments{
\item{rasterm}{input data.}
\item{w}{half of the side of the square moving window.}
\item{alpha}{alpha value for order of diversity in Hill's Index.}
\item{na.tolerance}{a numeric value \eqn{(0.0-1.0)} which indicates the proportion of NA values that will be tolerated to calculate Rao's index in each moving window over \emph{x}. If the relative proportion of NA's in a moving window is bigger than na.tolerance, then the value of the window will be set as NA, otherwise Rao's index will be calculated considering the non-NA values. Default values is 0.0 (i.e., no tolerance for NA's).}
\item{debugging}{a boolean variable set to FALSE by default. If TRUE, additional messages will be printed. For de-bugging only.}
}
\value{
Matrix or a list of matrixes with the Hill index computed through moving window of the given size.
}
\author{
Marcantonio Matteo \email{marcantoniomatteo@gmail.com} \cr
Martina Iannacito \email{martina.iannacito@inria.fr} \cr
Duccio Rocchini \email{duccio.rocchini@unibo.it} \cr
}
\seealso{
\code{\link{Hill}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{internal}
|
# Import libraries ----
library('tidyverse')
library('survival')
source(here::here("lib", "redaction_functions.R"))
# import data ----
data_vaccinated <- read_rds(
here::here("output", "data", "data_vaccinated.rds")
)
# 30-day event rates ----
eventrate_table_1 <- function(data, outcome, variable, days=30L){
dat <- data %>%
filter(tte_end >= days)
tab <- redacted_summary_catcat(
as.character((dat[[paste0("tte_", outcome)]] <= days)*dat[[paste0("ind_", outcome)]]),
dat[[variable]],
redaction_threshold = 0L,
redaction_accuracy = 7L
) %>%
filter(.level1=="1") %>%
select(
.level2, n, pct
)
names(tab) <- c(variable, glue::glue("{outcome}_n"), glue::glue("{outcome}_%"))
tab
}
eventrate_table_all <- function(data, variable, days=14L){
total_followup <- data %>%
filter(tte_end >= days) %>%
{redacted_summary_cat(
.[[variable]],
redaction_threshold=0L,
redaction_accuracy=7L
)} %>%
select(.level, n) %>%
set_names(c(variable, "n"))
list(
total_followup,
#eventrate_table_1(data, "seconddose", variable, days),
eventrate_table_1(data, "posPC", variable, days),
eventrate_table_1(data, "postest", variable, days),
eventrate_table_1(data, "admitted", variable, days),
eventrate_table_1(data, "coviddeath", variable, days),
eventrate_table_1(data, "noncoviddeath", variable, days),
eventrate_table_1(data, "death", variable, days)
) %>%
reduce(full_join, by=variable)
}
# output tables ----
dir.create(here::here("output", "tte", "tables"), showWarnings = FALSE, recursive=TRUE)
c(
"sex", "ageband", "imd", "ethnicity", "region",
"bmi",
"infection_before_vax"
# "chronic_cardiac_disease",
# "current_copd",
# "dmards",
# "dialysis",
# "solid_organ_transplantation",
# "chemo_or_radio",
# "intel_dis_incl_downs_syndrome",
# "lung_cancer",
# "cancer_excl_lung_and_haem",
# "haematological_cancer",
# "bone_marrow_transplant",
# "cystic_fibrosis",
# "sickle_cell_disease",
# "permanant_immunosuppression",
# "temporary_immunosuppression",
# "psychosis_schiz_bipolar",
# "asplenia",
# "dementia"
) %>%
set_names(.) %>%
map(~{eventrate_table_all(data_vaccinated, ., 14L)}) %>%
enframe() %>%
transmute(
x=value,
path=paste0(here::here("output", "tte", "tables", paste0("event_rates_at_14days_", name, ".csv"))),
na="-"
) %>%
pwalk(write_csv)
| /analysis/R/tte_tables.R | permissive | opensafely/post-vax-outcomes-report | R | false | false | 2,444 | r | # Import libraries ----
library('tidyverse')
library('survival')
source(here::here("lib", "redaction_functions.R"))
# import data ----
data_vaccinated <- read_rds(
here::here("output", "data", "data_vaccinated.rds")
)
# 30-day event rates ----
eventrate_table_1 <- function(data, outcome, variable, days=30L){
dat <- data %>%
filter(tte_end >= days)
tab <- redacted_summary_catcat(
as.character((dat[[paste0("tte_", outcome)]] <= days)*dat[[paste0("ind_", outcome)]]),
dat[[variable]],
redaction_threshold = 0L,
redaction_accuracy = 7L
) %>%
filter(.level1=="1") %>%
select(
.level2, n, pct
)
names(tab) <- c(variable, glue::glue("{outcome}_n"), glue::glue("{outcome}_%"))
tab
}
eventrate_table_all <- function(data, variable, days=14L){
total_followup <- data %>%
filter(tte_end >= days) %>%
{redacted_summary_cat(
.[[variable]],
redaction_threshold=0L,
redaction_accuracy=7L
)} %>%
select(.level, n) %>%
set_names(c(variable, "n"))
list(
total_followup,
#eventrate_table_1(data, "seconddose", variable, days),
eventrate_table_1(data, "posPC", variable, days),
eventrate_table_1(data, "postest", variable, days),
eventrate_table_1(data, "admitted", variable, days),
eventrate_table_1(data, "coviddeath", variable, days),
eventrate_table_1(data, "noncoviddeath", variable, days),
eventrate_table_1(data, "death", variable, days)
) %>%
reduce(full_join, by=variable)
}
# output tables ----
dir.create(here::here("output", "tte", "tables"), showWarnings = FALSE, recursive=TRUE)
c(
"sex", "ageband", "imd", "ethnicity", "region",
"bmi",
"infection_before_vax"
# "chronic_cardiac_disease",
# "current_copd",
# "dmards",
# "dialysis",
# "solid_organ_transplantation",
# "chemo_or_radio",
# "intel_dis_incl_downs_syndrome",
# "lung_cancer",
# "cancer_excl_lung_and_haem",
# "haematological_cancer",
# "bone_marrow_transplant",
# "cystic_fibrosis",
# "sickle_cell_disease",
# "permanant_immunosuppression",
# "temporary_immunosuppression",
# "psychosis_schiz_bipolar",
# "asplenia",
# "dementia"
) %>%
set_names(.) %>%
map(~{eventrate_table_all(data_vaccinated, ., 14L)}) %>%
enframe() %>%
transmute(
x=value,
path=paste0(here::here("output", "tte", "tables", paste0("event_rates_at_14days_", name, ".csv"))),
na="-"
) %>%
pwalk(write_csv)
|
# Helper functions for run_benchmark
#' Helper Function to to generate the bools used to check if the current
#' locations/rds objects are the same as the previous one.
#'
#' @param .design input tibble used to provide the experimental design for each
#' benchmark run
#'
#' @keywords internal
#'
#' @details This is used to limit the number of times that any of the
#' prerequsites is loaded.
#' @export
format_design <- function(.design){
if (!"noise_crit" %in% colnames(.design)){
.design <- .design %>%
add_column('noise_crit' = NA)
}
if (!"weight_crit" %in% colnames(.design)){
.design <- .design %>%
add_column('weight_crit' = NA)
}
.design %>%
mutate(.source_bln = .data$source_loc %>% check_preced(),
.expr_bln = .data$bexpr_loc %>% check_preced(),
.meta_bln = .data$bmeta_loc %>% check_preced())
}
#' Helper Function that checks if the preceding vector element is the same
#' as the current element
#'
#' @param vector_loc character vector with directory paths
#'
#' @return logical values describing whether the location of the loaded files
#' has changes
#'
#' @keywords internal
check_preced <- function(vector_loc){
tib_loc <- tibble(current=vector_loc, behind=lag(vector_loc))
pmap_lgl(tib_loc, function(behind, current){
ifelse(is.na(behind) || behind!=current, FALSE, TRUE)
})
}
#' Helper Function to filter and format the gene set resource
#'
#' @param set_source Set Source (e.g. TF regulon sets, GO:term sets, etc)
#' @inheritParams input_tibble
#' @param .minsize minimum size of each set
#' @param .silent bool whether to silence wanring messages
#'
#' @importFrom stringr str_glue
#'
#' @return returns a filtered and formatted set source
#'
#' @details Filtering can be omitted if `filter_col` is `NA`.
filter_sets <- function(set_source,
source_col,
filter_col,
filter_crit,
.minsize,
.silent){
n_duprows <- sum(duplicated(set_source))
na_bool <- is.na(filter_col)
gs_filtered <- set_source %>%
{
if(na_bool){distinct(.)}
else if(!na_bool){
filter(., .data[[filter_col]] %in% filter_crit) %>%
distinct_at(vars(-.data[[filter_col]]), .keep_all = FALSE)
}
} %>%
group_by(.data[[source_col]]) %>%
add_count() %>%
filter(n >= .minsize) %>%
ungroup()
if (n_duprows & !.silent){
warning(str_glue("{n_duprows} rows were duplicated in the set resource! ",
"{sum(duplicated(gs_filtered))} duplicated rows ",
"remain after filtering."))
}
return(gs_filtered)
}
#' `base::readRDS` helper function that enables loading files from urls
#'
#' @inheritParams base::readRDS
#' @inheritDotParams base::readRDS
#'
#' @param .url_bool bool whether the location is a url or not
#'
#' @export
readRDS_helper <- function(file, .url_bool=FALSE, ...){
if(.url_bool){
readRDS(url(file, "rb", ...))
} else{
readRDS(file, ...)
}
}
#' Function to format benchmarking results
#'
#' @param .bench_res benchmarking results
#' @param .silent bool whether to silence warnings or not
#' @returns formatted benchmarking results
#' @importFrom rlang .data
#' @importFrom stringr str_glue_data
#'
#' @details If infinite values are present in the results, this function will
#' notify the user.
#' @export
bench_format <- function(.bench_res, .silent) {
res_format <- .bench_res %>%
unnest(.data$activity) %>%
# convert filter_criteria from character to string
rowwise() %>%
mutate(filter_crit = paste0(unlist(.data$filter_crit), collapse = "")) %>%
ungroup() %>%
# get statistic name
mutate(statistic = .data$activity %>%
map(function(tib)
unique(tib[["statistic"]]))) %>%
unnest(.data$statistic) %>%
select(.data$set_name, .data$bench_name, .data$filter_crit,
.data$statistic, .data$activity)
inf_sums <- res_format$activity %>%
map(function(x) sum(is.infinite(x$score))) %>%
setNames(
paste(
res_format$set_name,
res_format$bench_name,
res_format$statistic,
sep = "_"
)) %>%
enframe() %>% unnest(.data$value)
if (sum(inf_sums$value)) {
res_format <- res_format %>%
mutate(activity = .data$activity %>%
map(function(tib)
tib %>%
mutate_at(
vars(.data$score), ~ replace(., is.infinite(.), 0)
)))
warning(
inf_sums %>%
filter(.data$value > 0) %>%
str_glue_data("{.$value} infinite values were filtered",
" in {.$name}. \n ")
)
}
return(res_format)
}
# benchmark input --------------------------------------------------------------
#' Benchmark input tibble containing the (experimental) design for each of the
#' benchmark runs corresponding to rows
#' @name input_tibble
#'
#' @details A tibble with locations, options, and filter options for
#' the desired benchmark setting
#'
#' @param set_name user-defined name of the set resource
#' @param bench_name user-defined name of the benchmark data
#' @param stats_list List of statistics to run
#' @param opts_list Named list containing the options for each stat. method
#' @param bexpr_loc benchmark expression data location (.rds format tibble)
#' @param bmeta_loc benchmark metadata location (.rds format tibble)
#' @param source_loc set source (e.g. network resource, gene ontology sets,
#' kinase sets, etc.) location (.rds format tibble)
#' @param source_col name of the column with the source for the set source
#' @param target_col name of the column with the targets for the set source
#' @param filter_col name of the column by which we wish to filter
#' @param filter_crit criteria by which we wish to filter the `filter_col`
NULL
| /R/utils.R | permissive | ohsu-comp-bio/decoupleRBench | R | false | false | 5,912 | r | # Helper functions for run_benchmark
#' Helper Function to to generate the bools used to check if the current
#' locations/rds objects are the same as the previous one.
#'
#' @param .design input tibble used to provide the experimental design for each
#' benchmark run
#'
#' @keywords internal
#'
#' @details This is used to limit the number of times that any of the
#' prerequsites is loaded.
#' @export
format_design <- function(.design){
if (!"noise_crit" %in% colnames(.design)){
.design <- .design %>%
add_column('noise_crit' = NA)
}
if (!"weight_crit" %in% colnames(.design)){
.design <- .design %>%
add_column('weight_crit' = NA)
}
.design %>%
mutate(.source_bln = .data$source_loc %>% check_preced(),
.expr_bln = .data$bexpr_loc %>% check_preced(),
.meta_bln = .data$bmeta_loc %>% check_preced())
}
#' Helper Function that checks if the preceding vector element is the same
#' as the current element
#'
#' @param vector_loc character vector with directory paths
#'
#' @return logical values describing whether the location of the loaded files
#' has changes
#'
#' @keywords internal
check_preced <- function(vector_loc){
tib_loc <- tibble(current=vector_loc, behind=lag(vector_loc))
pmap_lgl(tib_loc, function(behind, current){
ifelse(is.na(behind) || behind!=current, FALSE, TRUE)
})
}
#' Helper Function to filter and format the gene set resource
#'
#' @param set_source Set Source (e.g. TF regulon sets, GO:term sets, etc)
#' @inheritParams input_tibble
#' @param .minsize minimum size of each set
#' @param .silent bool whether to silence wanring messages
#'
#' @importFrom stringr str_glue
#'
#' @return returns a filtered and formatted set source
#'
#' @details Filtering can be omitted if `filter_col` is `NA`.
filter_sets <- function(set_source,
source_col,
filter_col,
filter_crit,
.minsize,
.silent){
n_duprows <- sum(duplicated(set_source))
na_bool <- is.na(filter_col)
gs_filtered <- set_source %>%
{
if(na_bool){distinct(.)}
else if(!na_bool){
filter(., .data[[filter_col]] %in% filter_crit) %>%
distinct_at(vars(-.data[[filter_col]]), .keep_all = FALSE)
}
} %>%
group_by(.data[[source_col]]) %>%
add_count() %>%
filter(n >= .minsize) %>%
ungroup()
if (n_duprows & !.silent){
warning(str_glue("{n_duprows} rows were duplicated in the set resource! ",
"{sum(duplicated(gs_filtered))} duplicated rows ",
"remain after filtering."))
}
return(gs_filtered)
}
#' `base::readRDS` helper function that enables loading files from urls
#'
#' @inheritParams base::readRDS
#' @inheritDotParams base::readRDS
#'
#' @param .url_bool bool whether the location is a url or not
#'
#' @export
readRDS_helper <- function(file, .url_bool=FALSE, ...){
if(.url_bool){
readRDS(url(file, "rb", ...))
} else{
readRDS(file, ...)
}
}
#' Function to format benchmarking results
#'
#' @param .bench_res benchmarking results
#' @param .silent bool whether to silence warnings or not
#' @returns formatted benchmarking results
#' @importFrom rlang .data
#' @importFrom stringr str_glue_data
#'
#' @details If infinite values are present in the results, this function will
#' notify the user.
#' @export
bench_format <- function(.bench_res, .silent) {
res_format <- .bench_res %>%
unnest(.data$activity) %>%
# convert filter_criteria from character to string
rowwise() %>%
mutate(filter_crit = paste0(unlist(.data$filter_crit), collapse = "")) %>%
ungroup() %>%
# get statistic name
mutate(statistic = .data$activity %>%
map(function(tib)
unique(tib[["statistic"]]))) %>%
unnest(.data$statistic) %>%
select(.data$set_name, .data$bench_name, .data$filter_crit,
.data$statistic, .data$activity)
inf_sums <- res_format$activity %>%
map(function(x) sum(is.infinite(x$score))) %>%
setNames(
paste(
res_format$set_name,
res_format$bench_name,
res_format$statistic,
sep = "_"
)) %>%
enframe() %>% unnest(.data$value)
if (sum(inf_sums$value)) {
res_format <- res_format %>%
mutate(activity = .data$activity %>%
map(function(tib)
tib %>%
mutate_at(
vars(.data$score), ~ replace(., is.infinite(.), 0)
)))
warning(
inf_sums %>%
filter(.data$value > 0) %>%
str_glue_data("{.$value} infinite values were filtered",
" in {.$name}. \n ")
)
}
return(res_format)
}
# benchmark input --------------------------------------------------------------
#' Benchmark input tibble containing the (experimental) design for each of the
#' benchmark runs corresponding to rows
#' @name input_tibble
#'
#' @details A tibble with locations, options, and filter options for
#' the desired benchmark setting
#'
#' @param set_name user-defined name of the set resource
#' @param bench_name user-defined name of the benchmark data
#' @param stats_list List of statistics to run
#' @param opts_list Named list containing the options for each stat. method
#' @param bexpr_loc benchmark expression data location (.rds format tibble)
#' @param bmeta_loc benchmark metadata location (.rds format tibble)
#' @param source_loc set source (e.g. network resource, gene ontology sets,
#' kinase sets, etc.) location (.rds format tibble)
#' @param source_col name of the column with the source for the set source
#' @param target_col name of the column with the targets for the set source
#' @param filter_col name of the column by which we wish to filter
#' @param filter_crit criteria by which we wish to filter the `filter_col`
NULL
|
library(wpd)
sql <-
wpd_sql(
"
insert into imports_%s
select
aa.page_id,
2008,
string_agg((CASE when bb.page_view_count isnull THEN 0 else bb.page_view_count end)::text, ',')
from
(
select * from (select generate_series('2008-01-01'::date, '2008-12-31'::date, '1 day'::interval)::date as page_view_date) as a
cross join
(select page_id from dict_%s) as b
-- where page_id
) as aa
left join
(
select page_id, page_view_date, sum(page_view_count) as page_view_count
from page_views_%s_2008_import
-- where page_id
group by page_id, page_view_date
) as bb
on aa.page_id = bb.page_id and aa.page_view_date = bb.page_view_date
group by aa.page_id
;
",
sort(wpd_languages),
sort(wpd_languages),
sort(wpd_languages)
)
start <- Sys.time()
dbt_hlp_progress(0, length(sql))
for ( i in seq_along(sql) ){
cat(rev(sql)[i])
wpd_get_query_master(rev(sql)[i])
dbt_hlp_progress(i, length(sql), start)
}
| /scripts/db_import_aggregate_2008.R | no_license | petermeissner/wikipediadumbs | R | false | false | 1,103 | r |
library(wpd)
sql <-
wpd_sql(
"
insert into imports_%s
select
aa.page_id,
2008,
string_agg((CASE when bb.page_view_count isnull THEN 0 else bb.page_view_count end)::text, ',')
from
(
select * from (select generate_series('2008-01-01'::date, '2008-12-31'::date, '1 day'::interval)::date as page_view_date) as a
cross join
(select page_id from dict_%s) as b
-- where page_id
) as aa
left join
(
select page_id, page_view_date, sum(page_view_count) as page_view_count
from page_views_%s_2008_import
-- where page_id
group by page_id, page_view_date
) as bb
on aa.page_id = bb.page_id and aa.page_view_date = bb.page_view_date
group by aa.page_id
;
",
sort(wpd_languages),
sort(wpd_languages),
sort(wpd_languages)
)
start <- Sys.time()
dbt_hlp_progress(0, length(sql))
for ( i in seq_along(sql) ){
cat(rev(sql)[i])
wpd_get_query_master(rev(sql)[i])
dbt_hlp_progress(i, length(sql), start)
}
|
# Clase 9. NMDS permanova
#Cargar paquetes
library(tidyverse)
library(vegan)
# Cargar base de datos
Abundancia <- read.csv("Clase10_NMDS_Permanova/DAves.csv")
Sitios <- read.csv("Clase10_NMDS_Permanova/Sitios.csv")
UnaTabla <- cbind(Sitios, Abundancia)
# NMDS
# Analisis de similaridad
Aves_nmds <- metaMDS(Abundancia, try = 1000, k = 3, distance = "bray")
# stress
# ~0.1 muy bueno
# ~0.15 - 0.20 bueno
#~0.21 - 0.29 regular
# >0.30 no es bueno, malo
Aves_nmds$stress
# Prepara las tablas de las dim
Especies <- data.frame(Aves_nmds$species)
Especies$Especie <- rownames(Especies)
View(Especies)
Sistema <- data.frame(Aves_nmds$points,
Sistema = Sitios$Sistema)
View(Sistema)
# Grafico nmds
SistemasDAI <- Sistema %>%
filter(Sistema %in% c("BOS", "DAIB", "SOL"))
EspeciesImp <- Especies %>%
filter(Especie == "CARPUS")
ggplot()+
geom_point(data= SistemasDAI, aes(x= MDS1, y= MDS2, color= Sistema))+
stat_ellipse(data= SistemasDAI, size= 1, geom = "polygon", alpha= 0.5,
aes(x= MDS1, y= MDS2, color = Sistema, fill= Sistema))+
geom_text(data= EspeciesImp, size= 3, color = "black",
aes(x= MDS1, y= MDS2, label = Especie))+
theme_classic()
?stat_ellipse
ggplot()+
geom_point(data= SistemasDAI, aes(x= MDS2, y= MDS3, color= Sistema))+
geom_text(data= EspeciesImp, size= 3, color = "black", alpha= 0.7,
aes(x= MDS2, y= MDS3, label = Especie))+
stat_ellipse(data= SistemasDAI, geom = "polygon", alpha= 0.5,
aes(x= MDS2, y= MDS3, color = Sistema, fill= Sistema))+
theme_classic()
| /Clase10_NMDS_Permanova/clase9.R | no_license | DavidMurillo94/AEEAE2 | R | false | false | 1,659 | r | # Clase 9. NMDS permanova
#Cargar paquetes
library(tidyverse)
library(vegan)
# Cargar base de datos
Abundancia <- read.csv("Clase10_NMDS_Permanova/DAves.csv")
Sitios <- read.csv("Clase10_NMDS_Permanova/Sitios.csv")
UnaTabla <- cbind(Sitios, Abundancia)
# NMDS
# Analisis de similaridad
Aves_nmds <- metaMDS(Abundancia, try = 1000, k = 3, distance = "bray")
# stress
# ~0.1 muy bueno
# ~0.15 - 0.20 bueno
#~0.21 - 0.29 regular
# >0.30 no es bueno, malo
Aves_nmds$stress
# Prepara las tablas de las dim
Especies <- data.frame(Aves_nmds$species)
Especies$Especie <- rownames(Especies)
View(Especies)
Sistema <- data.frame(Aves_nmds$points,
Sistema = Sitios$Sistema)
View(Sistema)
# Grafico nmds
SistemasDAI <- Sistema %>%
filter(Sistema %in% c("BOS", "DAIB", "SOL"))
EspeciesImp <- Especies %>%
filter(Especie == "CARPUS")
ggplot()+
geom_point(data= SistemasDAI, aes(x= MDS1, y= MDS2, color= Sistema))+
stat_ellipse(data= SistemasDAI, size= 1, geom = "polygon", alpha= 0.5,
aes(x= MDS1, y= MDS2, color = Sistema, fill= Sistema))+
geom_text(data= EspeciesImp, size= 3, color = "black",
aes(x= MDS1, y= MDS2, label = Especie))+
theme_classic()
?stat_ellipse
ggplot()+
geom_point(data= SistemasDAI, aes(x= MDS2, y= MDS3, color= Sistema))+
geom_text(data= EspeciesImp, size= 3, color = "black", alpha= 0.7,
aes(x= MDS2, y= MDS3, label = Especie))+
stat_ellipse(data= SistemasDAI, geom = "polygon", alpha= 0.5,
aes(x= MDS2, y= MDS3, color = Sistema, fill= Sistema))+
theme_classic()
|
# functions to help reduce duplication and increase consistency in the docs
### ss ----
param_ss <- function(..., pname = "ss") {
template <- glue("
@param {pname} \\
Something that identifies a Google Sheet: its file ID, a URL from
which we can recover the ID, an instance of `googlesheets4_spreadsheet`
(returned by [gs4_get()]), or a [`dribble`][googledrive::dribble], which
is how googledrive represents Drive files. Processed through
[as_sheets_id()].
")
dots <- list2(...)
if (length(dots) > 0) {
template <- c(template, dots)
}
glue_collapse(template, sep = " ")
}
### sheet ----
param_sheet <- function(..., action, pname = "sheet") {
template <- glue("
@param {pname} \\
Sheet to {action}, in the sense of \"worksheet\" or \"tab\". \\
You can identify a sheet by name, with a string, or by position, \\
with a number.
")
dots <- list2(...)
if (length(dots) > 0) {
template <- c(template, dots)
}
glue_collapse(template, sep = " ")
}
param_before_after <- function(sheet_text) {
glue("
@param .before,.after \\
Optional specification of where to put the new {sheet_text}. \\
Specify, at most, one of `.before` and `.after`. Refer to an existing \\
sheet by name (via a string) or by position (via a number). If \\
unspecified, Sheets puts the new {sheet_text} at the end.
")
}
| /R/roxygen.R | permissive | sanjmeh/googlesheets4 | R | false | false | 1,390 | r | # functions to help reduce duplication and increase consistency in the docs
### ss ----
param_ss <- function(..., pname = "ss") {
template <- glue("
@param {pname} \\
Something that identifies a Google Sheet: its file ID, a URL from
which we can recover the ID, an instance of `googlesheets4_spreadsheet`
(returned by [gs4_get()]), or a [`dribble`][googledrive::dribble], which
is how googledrive represents Drive files. Processed through
[as_sheets_id()].
")
dots <- list2(...)
if (length(dots) > 0) {
template <- c(template, dots)
}
glue_collapse(template, sep = " ")
}
### sheet ----
param_sheet <- function(..., action, pname = "sheet") {
template <- glue("
@param {pname} \\
Sheet to {action}, in the sense of \"worksheet\" or \"tab\". \\
You can identify a sheet by name, with a string, or by position, \\
with a number.
")
dots <- list2(...)
if (length(dots) > 0) {
template <- c(template, dots)
}
glue_collapse(template, sep = " ")
}
param_before_after <- function(sheet_text) {
glue("
@param .before,.after \\
Optional specification of where to put the new {sheet_text}. \\
Specify, at most, one of `.before` and `.after`. Refer to an existing \\
sheet by name (via a string) or by position (via a number). If \\
unspecified, Sheets puts the new {sheet_text} at the end.
")
}
|
# library(sp)
# library(sf)
# library(rgdal)
library(geojsonio)
sp2geojsonList <- function(spObject){
# convert to sf
sfObject <- st_as_sf(spObject)
# transform
sfObject <- st_transform(sfObject, 3857)
# get higcharter accepted geojson_list
JSONlistObject <- geojson_list(sfObject)
return(JSONlistObject)
}
| /scripts/sp2geojsonList.R | no_license | strebuh/spatial_data_reviewer | R | false | false | 334 | r | # library(sp)
# library(sf)
# library(rgdal)
library(geojsonio)
sp2geojsonList <- function(spObject){
# convert to sf
sfObject <- st_as_sf(spObject)
# transform
sfObject <- st_transform(sfObject, 3857)
# get higcharter accepted geojson_list
JSONlistObject <- geojson_list(sfObject)
return(JSONlistObject)
}
|
#call in the raw datasets:
rm(list=ls())
library(rgdal)
library(maptools)
library(raster) ## To convert an "Extent" object to a "SpatialPolygons" object.
library(rgeos)
library(biomod2)
library(raster)
library(sp)
library(raster)
library(rgdal)
library(tidyverse)
#Call in maps as shape files
NAM <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/maps/NAM/NAM_adm1.shp", layer="NAM_adm1")
TZA <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/maps/TZA/TZA_adm2.shp", layer="TZA_adm2")
CAM <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/maps/CAM/CMR_adm2.shp", layer="CMR_adm2")
plot(NAM)
plot(TZA)
plot(CAM)
#names(NAM)
#print(NAM)
#TZA$NAME_2
#Call in the data files from FAO database
nam.df<-read_csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/Namibia_Crops.csv")
tza.df<-read_csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/Tanzania_Crops.csv")
cam.df<-read_csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/Cameroon_Crops.csv")
nam.df
tza.df
cam.df
colnames(nam.df)
#Turn the differently formatted dates int eh different file sinto Julian day (day of year)
colnames(nam.df)
colnames(nam.df)<-c("Country" , "AEZ" ,"Admin_areas","Ag_practices","Crop" , "Scientific_name" ,
"Botanical_family", "Other_names","Add_Info", "Planting_period_onset",
"Planting_period_end", "Sowing_Planting_rate" ,"Sowing_Planting_rate_unit" , "Preferred_sowing_plantin_period",
"Length_cropping_cycle" , "Harvesting_period_onset" ,"Harvesting_period_end", "Comments")
nam_juldays<-nam.df%>%
mutate(Plant_onset_julday=as.POSIXlt(gsub('-', '',Planting_period_onset), format = "%d%b")$yday)%>%
mutate(Plant_end_julday=as.POSIXlt(gsub('-', '',Planting_period_end), format = "%d%b")$yday)%>%
mutate(Harvest_onset_julday=as.POSIXlt(gsub('-', '',Harvesting_period_onset), format = "%d%b")$yday)%>%
mutate(Harvest_end_julday=as.POSIXlt(gsub('-', '',Harvesting_period_end), format = "%d%b")$yday)%>%
select(-Planting_period_onset, -Planting_period_end, -Harvesting_period_onset, -Harvesting_period_end)
cam_tza_df<-rbind(tza.df,cam.df)
colnames(cam_tza_df)<-colnames(nam.df)
others_juldays<-cam_tza_df%>%
mutate(Plant_onset_julday=as.POSIXlt(Planting_period_onset, format = "%d/%m")$yday)%>%
mutate(Plant_end_julday=as.POSIXlt(Planting_period_end, format = "%d/%m")$yday)%>%
mutate(Harvest_onset_julday=as.POSIXlt(Harvesting_period_onset, format = "%d/%m")$yday)%>%
mutate(Harvest_end_julday=as.POSIXlt(Harvesting_period_end, format = "%d/%m")$yday)%>%
select(-Planting_period_onset, -Planting_period_end, -Harvesting_period_onset, -Harvesting_period_end)
#Further clean some of the data, remove unwanted columns, and bind together as one
df<-rbind(nam_juldays,others_juldays)
df<-df%>%
select(-Other_names, -Add_Info, -Preferred_sowing_plantin_period, -Comments)
###Question 0: Does one country have more zones than the others?
zone_num<-df%>%
group_by(c(Country))%>%
distinct(AEZ, .keep_all = TRUE)%>%
summarise(Zone_number=n())
colnames(zone_num)<-c("Country", "Zone_number")
#ggplot at highest administration area - COUNTRY
ggplot(zone_num, aes(x=Country, y=Zone_number))+
geom_bar(stat="identity", width = 0.4)+
labs(y="Number of Agricultural Zones")
###QUESTION 1: Is there variation in the diversity of crops (total numbers) between the three countries
#Some of the regions have crops listed twice (for summer and winter seasons, so remove duplicates
tot_crop_num<-df%>%
distinct(AEZ, Crop, .keep_all = TRUE)%>%
group_by(c(Country))%>%
summarise(Diversity=n())
colnames(tot_crop_num)<-c("Country", "C.Diversity")
#ggplot at highest administration area - COUNTRY
ggplot(tot_crop_num, aes(x=Country, y=C.Diversity))+
geom_bar(stat="identity", width = 0.4)+
labs(y="Crop Diversity")
#Cameroon has fewer crops followed by Namibia and Tanzania
#There is not much different between the diversity of crops grown in each county
regional_crop_num<-df%>%
distinct(AEZ, Crop, .keep_all = TRUE)%>%
group_by(c(AEZ))%>%
summarise(Diversity=n())
colnames(regional_crop_num)<-c("AEZ", "R.Diversity")
reg_crop_num<-left_join(regional_crop_num, df, by="AEZ")%>%
group_by(AEZ)
#ggplot diversity within each region in the country
ggplot(reg_crop_num, aes(x=Country, y=R.Diversity))+
geom_boxplot() +
labs(y="Diversity within Agricultural Zones")
##ggplot for the proportion of the total countries crops that are grown in each region
reg_crop_prop<-left_join(reg_crop_num, tot_crop_num, by="Country")%>%
group_by(AEZ)
#ggplot diversity within each region in the country
ggplot(reg_crop_prop, aes(x=Country, y=R.Diversity))+
geom_boxplot() +
labs(y="Proportion of Total Crops grown in Agricultural Zones")
library(lmerTest)
library(emmeans)
m1<-glm(R.Diversity~Country, data=df3, family=poisson)
anova(m1)
summary(m1)
emmeans(m1, pairwise~Country)
plot(m1)#these look ok, except for the outlier in Tanzania
#regional diversity of the countries are all different from each other
#Cameroon>Namibia>Tanzania
df3$propotion.diversity<-(df3$R.Diversity/df3$C.Diversity)*100
ggplot(df3, aes(x=Country, y=propotion.diversity))+
geom_boxplot() +
labs(y="Proportion of Country's crops grown in region")
library(lmerTest)
library(emmeans)
str(df2)
m1<-glm(propotion.diversity~Country, data=df3, family=poisson)
anova(m1)
summary(m1)
emmeans(m1, pairwise~Country)
plot(m1)#these look ok, except for the outlier in Tanzania
#
#
#While cameroon has fewer zones (5 vs 9), and grows a smaller number of different crops;
#the crops are grown more evenly across the zones in comparison to Namibia and Tanz,
#who's zones have relatively less diversity
#Agricultural practises:
ggplot(df, aes(x=Country, y=Zone_number))+
geom_boxplot()
###Question 2: Given the differences in regional diversity, what is driving these patterns?
#A) Climate driven - Climate
#B) Socially driven - Cameroon farmers employ a greater range of methods than other two coutries
#C) Economonically driven - Is there a link between subsistence or commercial farming with crop diversity?
#D) Comination of two - Cameroon capitalises on longer/multiple growing seasons - if you count multi9ple season crops - higher diversity in Cameroon than the other two
#A-C required georeferencing information to extract climate/socieconomic data, so start with #D
#Climate and crop diversity - no georeferencing in the datasets, so need to link the regions(zones) to
#pre-existing shape or raster files
#Namibia and the zones - already defined (I think)
#call in zones.csv and rasterize
library(raster)
zones<-read.csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/AEZ16_CLAS--SSA.csv")
head(zones)
#check that these can be plotted:
#select out only Namibia from this file (other two don't have zones matched)
zone_NAM<-subset(zones, ISO3=="NAM")
head(zone_NAM); nrow(zone_NAM)
#turn the "AEZCLAS into a number (for the zone) - allocate a number to each zone (don't worry if it's correlct at the moment)
zns<-levels(zone_NAM$AEZ16_CLAS)
cods<-seq(1,length(zns),1)
zone_dict<-as.data.frame(cbind(zns,cods))
#generate another column in the namibia zone file to include the numeric zone code for mapping
for (i in (1:nrow(zone_NAM))){
zone_NAM$zone_code[i]<-zone_dict$cods[zone_dict$zns==zone_NAM$AEZ16_CLAS[i]]
}
#plot the raster of the AEZs for Namibia
str(RDF)
library(raster)
z<-zone_NAM$zone_code
RR<-as.matrix(cbind(zone_NAM$Y, zone_NAM$X, z), header=TRUE)
colnames(RR)<-c("y","x","z")
# set up an 'empty' raster, here via an extent object derived from your data
e <- extent(RR[,1:2])
num.lats<-length(levels(as.factor(RR[,2])))
num.lons<-length(levels(as.factor(RR[,1])))
r <- raster(e, ncol=num.lons-13, nrow=num.lats-10)
# you need to provide a function 'fun' for when there are multiple points per cell
x <- rasterize(RR[, 1:2], r, RR[,3], fun=mean)
plot(x)
crs.geo <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84") # geographical, datum WGS84
proj4string(x) <- crs.geo
#call in the namibia crop file and allocate diversity to the regions:
reg_crops_NAM<-subset(reg_crop_num, Country=="Namibia")
reg_crops_NAM<-distinct(reg_crops_NAM, AEZ, R.Diversity)
potential_NAM_zones<-data.frame(t("Zone 01", "Zone 02", "Zone 03", "Zone 04", "Zone 05",
"Zone 06", "Zone 07", "Zone 08", "Zone 09", "Zone 10",
"Zone 11", "Zone 12", "Zone 13", "Zone 14", "Zone 15",
"Zone 16"))
potential_NAM_zones<-as.tibble(t(potential_NAM_zones))
colnames(potential_NAM_zones)<-"AEZ"
###the zones in teh namibia file don't correspond to the zones in the global file,
#so allocate manually based on administrative areas description in the file
""
list<-("Zone 01"=6, "Zone 02"=2, "Zone 03", "Zone 04"=9, "Zone 05",
"Zone 06", "Zone 07"=0, "Zone 08", "Zone 09", "Zone 10"=0,
"Zone 11"=, "Zone 12"=0, "Zone 13"=0, "Zone 14"=0, "Zone 15"=0,
"Zone 16"=0)
dff<-left_join(potential_NAM_zones,reg_crops_NAM, by="AEZ", .keep_all=TRUE)%>%
mutate(Diversity=ifelse(is.na(R.Diversity)==TRUE, 0 ,R.Diversity))%>%
select(-R.Diversity)
colnames(zone_NAM)[9]<-"AEZ1"
dff$AEZ1<-as.integer(seq(1,nrow(dff),1))
zone_NAM<-as.tibble(zone_NAM)
zone_NAM_diversity<-left_join(zone_NAM,dff, by="AEZ1", .keep_all=TRUE)
#plot the diversity and the zones to make a brick:
# set up an 'empty' raster, here via an extent object derived from your data
z<-zone_NAM_diversity$Diversity
RR<-as.matrix(cbind(zone_NAM_diversity$X, zone_NAM_diversity$Y, z), header=TRUE)
colnames(RR)<-c("x","y","z")
e <- extent(RR[,1:2])
num.lats<-length(levels(as.factor(RR[,2])))
num.lons<-length(levels(as.factor(RR[,1])))
r <- raster(e, ncol=num.lons-13, nrow=num.lats-10)
# need to provide a function 'fun' for when there are multiple points per cell
x <- rasterize(RR[, 1:2], r, RR[,3], fun=mean)
plot(x)
crs.geo <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84") # geographical, datum WGS84
proj4string(x) <- crs.geo
plot(NAM, add=TRUE)
###Question 3: With climate change, will Cameroon be more robust than other two???
gaez <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/GAEZ_shapes/gaez18.shp", layer="gaez18")
plot(gaez)
| /Stats_project/RScripts/data.exploration.R | no_license | madeleine-barton/stats_project | R | false | false | 10,431 | r |
#call in the raw datasets:
rm(list=ls())
library(rgdal)
library(maptools)
library(raster) ## To convert an "Extent" object to a "SpatialPolygons" object.
library(rgeos)
library(biomod2)
library(raster)
library(sp)
library(raster)
library(rgdal)
library(tidyverse)
#Call in maps as shape files
NAM <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/maps/NAM/NAM_adm1.shp", layer="NAM_adm1")
TZA <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/maps/TZA/TZA_adm2.shp", layer="TZA_adm2")
CAM <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/maps/CAM/CMR_adm2.shp", layer="CMR_adm2")
plot(NAM)
plot(TZA)
plot(CAM)
#names(NAM)
#print(NAM)
#TZA$NAME_2
#Call in the data files from FAO database
nam.df<-read_csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/Namibia_Crops.csv")
tza.df<-read_csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/Tanzania_Crops.csv")
cam.df<-read_csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/Cameroon_Crops.csv")
nam.df
tza.df
cam.df
colnames(nam.df)
#Turn the differently formatted dates int eh different file sinto Julian day (day of year)
colnames(nam.df)
colnames(nam.df)<-c("Country" , "AEZ" ,"Admin_areas","Ag_practices","Crop" , "Scientific_name" ,
"Botanical_family", "Other_names","Add_Info", "Planting_period_onset",
"Planting_period_end", "Sowing_Planting_rate" ,"Sowing_Planting_rate_unit" , "Preferred_sowing_plantin_period",
"Length_cropping_cycle" , "Harvesting_period_onset" ,"Harvesting_period_end", "Comments")
nam_juldays<-nam.df%>%
mutate(Plant_onset_julday=as.POSIXlt(gsub('-', '',Planting_period_onset), format = "%d%b")$yday)%>%
mutate(Plant_end_julday=as.POSIXlt(gsub('-', '',Planting_period_end), format = "%d%b")$yday)%>%
mutate(Harvest_onset_julday=as.POSIXlt(gsub('-', '',Harvesting_period_onset), format = "%d%b")$yday)%>%
mutate(Harvest_end_julday=as.POSIXlt(gsub('-', '',Harvesting_period_end), format = "%d%b")$yday)%>%
select(-Planting_period_onset, -Planting_period_end, -Harvesting_period_onset, -Harvesting_period_end)
cam_tza_df<-rbind(tza.df,cam.df)
colnames(cam_tza_df)<-colnames(nam.df)
others_juldays<-cam_tza_df%>%
mutate(Plant_onset_julday=as.POSIXlt(Planting_period_onset, format = "%d/%m")$yday)%>%
mutate(Plant_end_julday=as.POSIXlt(Planting_period_end, format = "%d/%m")$yday)%>%
mutate(Harvest_onset_julday=as.POSIXlt(Harvesting_period_onset, format = "%d/%m")$yday)%>%
mutate(Harvest_end_julday=as.POSIXlt(Harvesting_period_end, format = "%d/%m")$yday)%>%
select(-Planting_period_onset, -Planting_period_end, -Harvesting_period_onset, -Harvesting_period_end)
#Further clean some of the data, remove unwanted columns, and bind together as one
df<-rbind(nam_juldays,others_juldays)
df<-df%>%
select(-Other_names, -Add_Info, -Preferred_sowing_plantin_period, -Comments)
###Question 0: Does one country have more zones than the others?
zone_num<-df%>%
group_by(c(Country))%>%
distinct(AEZ, .keep_all = TRUE)%>%
summarise(Zone_number=n())
colnames(zone_num)<-c("Country", "Zone_number")
#ggplot at highest administration area - COUNTRY
ggplot(zone_num, aes(x=Country, y=Zone_number))+
geom_bar(stat="identity", width = 0.4)+
labs(y="Number of Agricultural Zones")
###QUESTION 1: Is there variation in the diversity of crops (total numbers) between the three countries
#Some of the regions have crops listed twice (for summer and winter seasons, so remove duplicates
tot_crop_num<-df%>%
distinct(AEZ, Crop, .keep_all = TRUE)%>%
group_by(c(Country))%>%
summarise(Diversity=n())
colnames(tot_crop_num)<-c("Country", "C.Diversity")
#ggplot at highest administration area - COUNTRY
ggplot(tot_crop_num, aes(x=Country, y=C.Diversity))+
geom_bar(stat="identity", width = 0.4)+
labs(y="Crop Diversity")
#Cameroon has fewer crops followed by Namibia and Tanzania
#There is not much different between the diversity of crops grown in each county
regional_crop_num<-df%>%
distinct(AEZ, Crop, .keep_all = TRUE)%>%
group_by(c(AEZ))%>%
summarise(Diversity=n())
colnames(regional_crop_num)<-c("AEZ", "R.Diversity")
reg_crop_num<-left_join(regional_crop_num, df, by="AEZ")%>%
group_by(AEZ)
#ggplot diversity within each region in the country
ggplot(reg_crop_num, aes(x=Country, y=R.Diversity))+
geom_boxplot() +
labs(y="Diversity within Agricultural Zones")
##ggplot for the proportion of the total countries crops that are grown in each region
reg_crop_prop<-left_join(reg_crop_num, tot_crop_num, by="Country")%>%
group_by(AEZ)
#ggplot diversity within each region in the country
ggplot(reg_crop_prop, aes(x=Country, y=R.Diversity))+
geom_boxplot() +
labs(y="Proportion of Total Crops grown in Agricultural Zones")
library(lmerTest)
library(emmeans)
m1<-glm(R.Diversity~Country, data=df3, family=poisson)
anova(m1)
summary(m1)
emmeans(m1, pairwise~Country)
plot(m1)#these look ok, except for the outlier in Tanzania
#regional diversity of the countries are all different from each other
#Cameroon>Namibia>Tanzania
df3$propotion.diversity<-(df3$R.Diversity/df3$C.Diversity)*100
ggplot(df3, aes(x=Country, y=propotion.diversity))+
geom_boxplot() +
labs(y="Proportion of Country's crops grown in region")
library(lmerTest)
library(emmeans)
str(df2)
m1<-glm(propotion.diversity~Country, data=df3, family=poisson)
anova(m1)
summary(m1)
emmeans(m1, pairwise~Country)
plot(m1)#these look ok, except for the outlier in Tanzania
#
#
#While cameroon has fewer zones (5 vs 9), and grows a smaller number of different crops;
#the crops are grown more evenly across the zones in comparison to Namibia and Tanz,
#who's zones have relatively less diversity
#Agricultural practises:
ggplot(df, aes(x=Country, y=Zone_number))+
geom_boxplot()
###Question 2: Given the differences in regional diversity, what is driving these patterns?
#A) Climate driven - Climate
#B) Socially driven - Cameroon farmers employ a greater range of methods than other two coutries
#C) Economonically driven - Is there a link between subsistence or commercial farming with crop diversity?
#D) Comination of two - Cameroon capitalises on longer/multiple growing seasons - if you count multi9ple season crops - higher diversity in Cameroon than the other two
#A-C required georeferencing information to extract climate/socieconomic data, so start with #D
#Climate and crop diversity - no georeferencing in the datasets, so need to link the regions(zones) to
#pre-existing shape or raster files
#Namibia and the zones - already defined (I think)
#call in zones.csv and rasterize
library(raster)
zones<-read.csv("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/Raw_Data/AEZ16_CLAS--SSA.csv")
head(zones)
#check that these can be plotted:
#select out only Namibia from this file (other two don't have zones matched)
zone_NAM<-subset(zones, ISO3=="NAM")
head(zone_NAM); nrow(zone_NAM)
#turn the "AEZCLAS into a number (for the zone) - allocate a number to each zone (don't worry if it's correlct at the moment)
zns<-levels(zone_NAM$AEZ16_CLAS)
cods<-seq(1,length(zns),1)
zone_dict<-as.data.frame(cbind(zns,cods))
#generate another column in the namibia zone file to include the numeric zone code for mapping
for (i in (1:nrow(zone_NAM))){
zone_NAM$zone_code[i]<-zone_dict$cods[zone_dict$zns==zone_NAM$AEZ16_CLAS[i]]
}
#plot the raster of the AEZs for Namibia
str(RDF)
library(raster)
z<-zone_NAM$zone_code
RR<-as.matrix(cbind(zone_NAM$Y, zone_NAM$X, z), header=TRUE)
colnames(RR)<-c("y","x","z")
# set up an 'empty' raster, here via an extent object derived from your data
e <- extent(RR[,1:2])
num.lats<-length(levels(as.factor(RR[,2])))
num.lons<-length(levels(as.factor(RR[,1])))
r <- raster(e, ncol=num.lons-13, nrow=num.lats-10)
# you need to provide a function 'fun' for when there are multiple points per cell
x <- rasterize(RR[, 1:2], r, RR[,3], fun=mean)
plot(x)
crs.geo <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84") # geographical, datum WGS84
proj4string(x) <- crs.geo
#call in the namibia crop file and allocate diversity to the regions:
reg_crops_NAM<-subset(reg_crop_num, Country=="Namibia")
reg_crops_NAM<-distinct(reg_crops_NAM, AEZ, R.Diversity)
potential_NAM_zones<-data.frame(t("Zone 01", "Zone 02", "Zone 03", "Zone 04", "Zone 05",
"Zone 06", "Zone 07", "Zone 08", "Zone 09", "Zone 10",
"Zone 11", "Zone 12", "Zone 13", "Zone 14", "Zone 15",
"Zone 16"))
potential_NAM_zones<-as.tibble(t(potential_NAM_zones))
colnames(potential_NAM_zones)<-"AEZ"
###the zones in teh namibia file don't correspond to the zones in the global file,
#so allocate manually based on administrative areas description in the file
""
list<-("Zone 01"=6, "Zone 02"=2, "Zone 03", "Zone 04"=9, "Zone 05",
"Zone 06", "Zone 07"=0, "Zone 08", "Zone 09", "Zone 10"=0,
"Zone 11"=, "Zone 12"=0, "Zone 13"=0, "Zone 14"=0, "Zone 15"=0,
"Zone 16"=0)
dff<-left_join(potential_NAM_zones,reg_crops_NAM, by="AEZ", .keep_all=TRUE)%>%
mutate(Diversity=ifelse(is.na(R.Diversity)==TRUE, 0 ,R.Diversity))%>%
select(-R.Diversity)
colnames(zone_NAM)[9]<-"AEZ1"
dff$AEZ1<-as.integer(seq(1,nrow(dff),1))
zone_NAM<-as.tibble(zone_NAM)
zone_NAM_diversity<-left_join(zone_NAM,dff, by="AEZ1", .keep_all=TRUE)
#plot the diversity and the zones to make a brick:
# set up an 'empty' raster, here via an extent object derived from your data
z<-zone_NAM_diversity$Diversity
RR<-as.matrix(cbind(zone_NAM_diversity$X, zone_NAM_diversity$Y, z), header=TRUE)
colnames(RR)<-c("x","y","z")
e <- extent(RR[,1:2])
num.lats<-length(levels(as.factor(RR[,2])))
num.lons<-length(levels(as.factor(RR[,1])))
r <- raster(e, ncol=num.lons-13, nrow=num.lats-10)
# need to provide a function 'fun' for when there are multiple points per cell
x <- rasterize(RR[, 1:2], r, RR[,3], fun=mean)
plot(x)
crs.geo <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84") # geographical, datum WGS84
proj4string(x) <- crs.geo
plot(NAM, add=TRUE)
###Question 3: With climate change, will Cameroon be more robust than other two???
gaez <- readOGR("C:/Users/bar823/Documents/MGB_docs/Data_School/stats_proj/Stats_project/GAEZ_shapes/gaez18.shp", layer="gaez18")
plot(gaez)
|
library(zbank)
### Name: zb_publications
### Title: Publications
### Aliases: zb_publications zb_publications_
### ** Examples
## Not run:
##D zb_publications(id = "427D7953-E8FC-41E8-BEA7-8AE644E6DE77")
##D zb_publications(query = "pyle")
## End(Not run)
| /data/genthat_extracted_code/zbank/examples/zb_publications.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 264 | r | library(zbank)
### Name: zb_publications
### Title: Publications
### Aliases: zb_publications zb_publications_
### ** Examples
## Not run:
##D zb_publications(id = "427D7953-E8FC-41E8-BEA7-8AE644E6DE77")
##D zb_publications(query = "pyle")
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{longDF}
\alias{longDF}
\title{Longitudinal example dataset}
\format{
A simulated data frame with 329 rows and 21 variables with data from
100 subjects:
\describe{
\item{C1}{continuous, complete baseline variable}
\item{C2}{continuous, incomplete baseline variable}
\item{B1}{binary, complete baseline variable}
\item{B2}{binary, incomplete baseline variable}
\item{M1}{unordered factor; complete baseline variable}
\item{M2}{unordered factor; incomplete baseline variable}
\item{O1}{ordered factor; complete baseline variable}
\item{O2}{ordered factor; incomplete baseline variable}
\item{P1}{count variable; complete baseline variable}
\item{P2}{count variable; incomplete baseline variable}
\item{c1}{continuous, complete longitudinal variable}
\item{c2}{continuous incomplete longitudinal variable}
\item{b1}{binary, complete longitudinal variable}
\item{b2}{binary incomplete longitudinal variable}
\item{o1}{ordered factor; complete longitudinal variable}
\item{o2}{ordered factor; incomplete longitudinal variable}
\item{p1}{count variable; complete longitudinal variable}
\item{p2}{count variable; incomplete longitudinal variable}
\item{id}{id (grouping) variable}
\item{time}{continuous complete longitudinal variable}
\item{y}{continuous, longitudinal (outcome) variable}
}
}
\usage{
data(longDF)
}
\description{
A simulated longitudinal dataset.
}
\keyword{datasets}
| /man/longDF.Rd | no_license | NErler/JointAI | R | false | true | 1,488 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{longDF}
\alias{longDF}
\title{Longitudinal example dataset}
\format{
A simulated data frame with 329 rows and 21 variables with data from
100 subjects:
\describe{
\item{C1}{continuous, complete baseline variable}
\item{C2}{continuous, incomplete baseline variable}
\item{B1}{binary, complete baseline variable}
\item{B2}{binary, incomplete baseline variable}
\item{M1}{unordered factor; complete baseline variable}
\item{M2}{unordered factor; incomplete baseline variable}
\item{O1}{ordered factor; complete baseline variable}
\item{O2}{ordered factor; incomplete baseline variable}
\item{P1}{count variable; complete baseline variable}
\item{P2}{count variable; incomplete baseline variable}
\item{c1}{continuous, complete longitudinal variable}
\item{c2}{continuous incomplete longitudinal variable}
\item{b1}{binary, complete longitudinal variable}
\item{b2}{binary incomplete longitudinal variable}
\item{o1}{ordered factor; complete longitudinal variable}
\item{o2}{ordered factor; incomplete longitudinal variable}
\item{p1}{count variable; complete longitudinal variable}
\item{p2}{count variable; incomplete longitudinal variable}
\item{id}{id (grouping) variable}
\item{time}{continuous complete longitudinal variable}
\item{y}{continuous, longitudinal (outcome) variable}
}
}
\usage{
data(longDF)
}
\description{
A simulated longitudinal dataset.
}
\keyword{datasets}
|
#' @title Double-Selection Plus Instrumental Variable Estimator
#' @description A three-step approach to estimate the endogenous treatment effect using high-dimensional instruments and double selection. It is applicable in the following scenarios: first, there is a known endogeneity problem for the treatment variable. Second, the treatment effect model has a large number of control variables, such as the large micro survey data.
#' @param y Response variable, an N x 1 vector.
#' @param x Control variables, an N x p1 matrix.
#' @param z Instrumental variables, an N x p2 matrix.
#' @param D Endogenous treatment variable.
#' @param family Quantitative for family="gaussian", or family="poisson" (non-negative counts). For family="binomial" should be either a factor with two levels, or a two-column matrix of counts or proportions (the second column is treated as the target class; for a factor, the last level in alphabetical order is the target class). For family="multinomial", can be a nc>=2 level factor, or a matrix with nc columns of counts or proportions. For either "binomial" or "multinomial", if y is presented as a vector, it will be coerced into a factor. For family="cox", y should be a two-column matrix with columns named 'time' and 'status'. The latter is a binary variable, with '1' indicating death, and '0' indicating right censored. The function Surv() in package survival produces such a matrix. For family="mgaussian", y is a matrix of quantitative responses.
#' @param criterion The criterion by which to select the regularization parameter. One of "BIC", "EBIC", default is "BIC".
#' @param alpha The elasticnet mixing parameter, with 0<=alpha<= 1. alpha=1 is the lasso penalty, and alpha=0 the ridge penalty.
#' @param nlambda The number of lambda values, default is 100.
#' @param ... other arguments, see help(glmnet).
#' @details The DS-IV algorithm consists of the following three steps: In the first step,
#'regress the outcome variable y on control variables x using the
#'regularization method, estimate the coefficients beta and select the important control
#'variables set denoted by c1. In the second step, regress the treatment variable
#'d on instrumental variables w and control variables x, estimate the
#'optimal instrument d and obtain the second important control variables set
#'denoted by cx. In the third step, obtain the DS-IV estimator of the endogenous
#'of the endogenous treatment effect based on the estimated optimal instrument d
#'and the union (c3) of the selected control variables.
#' @return An object of type \code{DSIV} which is a list with the following
#' components:
#' \item{yhat}{The estimated value of y.}
#' \item{betaD}{The coefficient of endogenous variable D.}
#' \item{betaX}{The coefficient of control variables x.}
#' \item{c1}{Variable indication of the selected in the first step (control variables x).}
#' \item{cx}{Variable indication of selected control variables in the second step.}
#' \item{cz}{Variable indication of selected instrumental variables in the second step.}
#' \item{c2}{Variable indication of the selected in the second step. The number less than or equal to p1 is an indication of control variables, the number greater than p1 and less than or equal to (p1 + p2) is an indication of instrument variables.}
#' \item{c3}{Union of c1 and cx on control variables.}
#' \item{family}{Same as above.}
#' \item{criterion}{Same as above.}
#' @author Qingliang Fan, KongYu He, Wei Zhong
#' @references Wei Zhong, Yang Gao, Wei Zhou and Qingliang Fan (2020), “Endogenous Treatment Effect Estimation Using High-Dimensional Instruments and Double Selection”, working paper
#' @examples
#'library(naivereg)
#'data("DSIVdata")
#'y=DSIVdata[,1]
#'x=DSIVdata[,2:51]
#'z=DSIVdata[,52:71]
#'D=DSIVdata[,72]
#'res = DSIV(y,x,z,D,family='gaussian', criterion='EBIC')
#'res$c1 #Variable indication of the selected in the first step (control variables x).
#'res$cx #Variable indication of selected control variables in the second step.
#'res$cz #Variable indication of selected instrumental variables in the second step.
#'res$c3 #Union of c1 and cx on control variables
#' @export
######################################
DSIV <- function(y,x,z,D,family = c("gaussian", "binomial", "poisson", "multinomial","cox", "mgaussian"),criterion=c("BIC","EBIC"),alpha = 1
,nlambda = 100,...) {
if(missing(criterion)){
criterion='EBIC'
}
if(missing(family)){
family='gaussian'
}
#The first stage of variable selection of double selection method
n = length(y)
data<-as.data.frame(cbind(y,x))
x1=model.matrix(y~.,data)[,-1]
y1=data[,1]
fit1<-glmnet(x1,y1,family=family,alpha=alpha,nlambda=nlambda,...)
pr1<-predict(fit1,x1, s = NULL,type="link", exact = FALSE)
loss<-colMeans((pr1-y1)^2)
#calculate IC
IC<-rep(0,ncol(fit1$beta))
for (nn in 1:ncol(fit1$beta)) {
IC[nn] <- switch(criterion,
BIC = n*log(loss[nn])+fit1$df[nn]*log(n),
EBIC = log(loss[nn])+fit1$df[nn]*(log(n)+0.2*log(ncol(x1)))/n)
}
best.lambda<-fit1$lambda[which.min(IC)]
fit11<-glmnet(x1,y1,family=family,alpha=alpha,lambda=best.lambda,nlambda=nlambda,...)
c1<-which(as.numeric(fit11$beta)!=0)
#the second stage of variable selection of double selection method
data<-as.data.frame(cbind(D,x,z))
x2=model.matrix(D~.,data)[,-1]
y2=data[,1]
fit2<-glmnet(x2,y2,family=family,alpha=alpha,nlambda=nlambda,...)
pr2<-predict(fit2,x2, s = NULL,type="link", exact = FALSE)
loss1<-colMeans((pr2-y2)^2)
#calculate IC
IC1<-rep(0,ncol(fit2$beta))
for (nn in 1:ncol(fit1$beta)) {
IC1[nn] <- switch(criterion,
BIC = n*log(loss1[nn])+fit2$df[nn]*log(n),
EBIC = log(loss1[nn])+fit2$df[nn]*(log(n)+0.2*log(ncol(x2)))/n)
}
best.lambda1<-fit2$lambda[which.min(IC1)]
fit21<-glmnet(x2,y2,family=family,alpha=alpha,lambda=best.lambda1,nlambda=nlambda,...)
x3<-cbind(x,z)
c2<-which(as.numeric(fit21$beta)!=0)
xxxx<-x3[,c2]
########refit and estimate the optimal IV
datanew2<-as.data.frame(cbind(D,xxxx))
fitnew2<-lm(D~.-1,datanew2)
DDD<-fitnew2$fitted.values
cx<-c2[c2<=ncol(x)]
cx<-cx[cx!=0]
cz<-c2[c2>ncol(x)]
c1<-c1[c1<ncol(x)+1]
for (i in 1:length(c1)) {
for (j in 1:length(cx)) {
if(is.na(cx[j])==T){
cx<-0
break
}else if(cx[j]==c1[i]){
cx[j]<-0
}
}
}
c3<-c(c1,cx)
c4<-c3[c3>0]
xx<-x[,c4]
###DS-IV estimator
pp<-xx%*%solve(t(xx)%*%xx)%*%t(xx)
ii<-matrix(rep(0,nrow(xx)^2),nrow(xx),nrow(xx))
for(t in 1:nrow(xx)){
ii[t,t]<-1
}
m<-ii-pp
betaD<-solve(t(DDD)%*%m%*%D)%*%t(DDD)%*%m%*%y
datanew<-as.data.frame(cbind(y,DDD,xx))
fitnew<-lm(y~.-1,datanew)
betaX <- fitnew$coef[-1]
yhat<-as.vector(betaD)*D-xx%*%betaX
list(yhat=yhat,betaD=betaD,betaX=betaX,c1=c1,cx=cx,cz=cz,c2=c2,c3=c4,family = family,criterion=criterion)
}
| /R/DSIV.R | no_license | cran/naivereg | R | false | false | 7,063 | r | #' @title Double-Selection Plus Instrumental Variable Estimator
#' @description A three-step approach to estimate the endogenous treatment effect using high-dimensional instruments and double selection. It is applicable in the following scenarios: first, there is a known endogeneity problem for the treatment variable. Second, the treatment effect model has a large number of control variables, such as the large micro survey data.
#' @param y Response variable, an N x 1 vector.
#' @param x Control variables, an N x p1 matrix.
#' @param z Instrumental variables, an N x p2 matrix.
#' @param D Endogenous treatment variable.
#' @param family Quantitative for family="gaussian", or family="poisson" (non-negative counts). For family="binomial" should be either a factor with two levels, or a two-column matrix of counts or proportions (the second column is treated as the target class; for a factor, the last level in alphabetical order is the target class). For family="multinomial", can be a nc>=2 level factor, or a matrix with nc columns of counts or proportions. For either "binomial" or "multinomial", if y is presented as a vector, it will be coerced into a factor. For family="cox", y should be a two-column matrix with columns named 'time' and 'status'. The latter is a binary variable, with '1' indicating death, and '0' indicating right censored. The function Surv() in package survival produces such a matrix. For family="mgaussian", y is a matrix of quantitative responses.
#' @param criterion The criterion by which to select the regularization parameter. One of "BIC", "EBIC", default is "BIC".
#' @param alpha The elasticnet mixing parameter, with 0<=alpha<= 1. alpha=1 is the lasso penalty, and alpha=0 the ridge penalty.
#' @param nlambda The number of lambda values, default is 100.
#' @param ... other arguments, see help(glmnet).
#' @details The DS-IV algorithm consists of the following three steps: In the first step,
#'regress the outcome variable y on control variables x using the
#'regularization method, estimate the coefficients beta and select the important control
#'variables set denoted by c1. In the second step, regress the treatment variable
#'d on instrumental variables w and control variables x, estimate the
#'optimal instrument d and obtain the second important control variables set
#'denoted by cx. In the third step, obtain the DS-IV estimator of the endogenous
#'of the endogenous treatment effect based on the estimated optimal instrument d
#'and the union (c3) of the selected control variables.
#' @return An object of type \code{DSIV} which is a list with the following
#' components:
#' \item{yhat}{The estimated value of y.}
#' \item{betaD}{The coefficient of endogenous variable D.}
#' \item{betaX}{The coefficient of control variables x.}
#' \item{c1}{Variable indication of the selected in the first step (control variables x).}
#' \item{cx}{Variable indication of selected control variables in the second step.}
#' \item{cz}{Variable indication of selected instrumental variables in the second step.}
#' \item{c2}{Variable indication of the selected in the second step. The number less than or equal to p1 is an indication of control variables, the number greater than p1 and less than or equal to (p1 + p2) is an indication of instrument variables.}
#' \item{c3}{Union of c1 and cx on control variables.}
#' \item{family}{Same as above.}
#' \item{criterion}{Same as above.}
#' @author Qingliang Fan, KongYu He, Wei Zhong
#' @references Wei Zhong, Yang Gao, Wei Zhou and Qingliang Fan (2020), “Endogenous Treatment Effect Estimation Using High-Dimensional Instruments and Double Selection”, working paper
#' @examples
#'library(naivereg)
#'data("DSIVdata")
#'y=DSIVdata[,1]
#'x=DSIVdata[,2:51]
#'z=DSIVdata[,52:71]
#'D=DSIVdata[,72]
#'res = DSIV(y,x,z,D,family='gaussian', criterion='EBIC')
#'res$c1 #Variable indication of the selected in the first step (control variables x).
#'res$cx #Variable indication of selected control variables in the second step.
#'res$cz #Variable indication of selected instrumental variables in the second step.
#'res$c3 #Union of c1 and cx on control variables
#' @export
######################################
DSIV <- function(y,x,z,D,family = c("gaussian", "binomial", "poisson", "multinomial","cox", "mgaussian"),criterion=c("BIC","EBIC"),alpha = 1
,nlambda = 100,...) {
if(missing(criterion)){
criterion='EBIC'
}
if(missing(family)){
family='gaussian'
}
#The first stage of variable selection of double selection method
n = length(y)
data<-as.data.frame(cbind(y,x))
x1=model.matrix(y~.,data)[,-1]
y1=data[,1]
fit1<-glmnet(x1,y1,family=family,alpha=alpha,nlambda=nlambda,...)
pr1<-predict(fit1,x1, s = NULL,type="link", exact = FALSE)
loss<-colMeans((pr1-y1)^2)
#calculate IC
IC<-rep(0,ncol(fit1$beta))
for (nn in 1:ncol(fit1$beta)) {
IC[nn] <- switch(criterion,
BIC = n*log(loss[nn])+fit1$df[nn]*log(n),
EBIC = log(loss[nn])+fit1$df[nn]*(log(n)+0.2*log(ncol(x1)))/n)
}
best.lambda<-fit1$lambda[which.min(IC)]
fit11<-glmnet(x1,y1,family=family,alpha=alpha,lambda=best.lambda,nlambda=nlambda,...)
c1<-which(as.numeric(fit11$beta)!=0)
#the second stage of variable selection of double selection method
data<-as.data.frame(cbind(D,x,z))
x2=model.matrix(D~.,data)[,-1]
y2=data[,1]
fit2<-glmnet(x2,y2,family=family,alpha=alpha,nlambda=nlambda,...)
pr2<-predict(fit2,x2, s = NULL,type="link", exact = FALSE)
loss1<-colMeans((pr2-y2)^2)
#calculate IC
IC1<-rep(0,ncol(fit2$beta))
for (nn in 1:ncol(fit1$beta)) {
IC1[nn] <- switch(criterion,
BIC = n*log(loss1[nn])+fit2$df[nn]*log(n),
EBIC = log(loss1[nn])+fit2$df[nn]*(log(n)+0.2*log(ncol(x2)))/n)
}
best.lambda1<-fit2$lambda[which.min(IC1)]
fit21<-glmnet(x2,y2,family=family,alpha=alpha,lambda=best.lambda1,nlambda=nlambda,...)
x3<-cbind(x,z)
c2<-which(as.numeric(fit21$beta)!=0)
xxxx<-x3[,c2]
########refit and estimate the optimal IV
datanew2<-as.data.frame(cbind(D,xxxx))
fitnew2<-lm(D~.-1,datanew2)
DDD<-fitnew2$fitted.values
cx<-c2[c2<=ncol(x)]
cx<-cx[cx!=0]
cz<-c2[c2>ncol(x)]
c1<-c1[c1<ncol(x)+1]
for (i in 1:length(c1)) {
for (j in 1:length(cx)) {
if(is.na(cx[j])==T){
cx<-0
break
}else if(cx[j]==c1[i]){
cx[j]<-0
}
}
}
c3<-c(c1,cx)
c4<-c3[c3>0]
xx<-x[,c4]
###DS-IV estimator
pp<-xx%*%solve(t(xx)%*%xx)%*%t(xx)
ii<-matrix(rep(0,nrow(xx)^2),nrow(xx),nrow(xx))
for(t in 1:nrow(xx)){
ii[t,t]<-1
}
m<-ii-pp
betaD<-solve(t(DDD)%*%m%*%D)%*%t(DDD)%*%m%*%y
datanew<-as.data.frame(cbind(y,DDD,xx))
fitnew<-lm(y~.-1,datanew)
betaX <- fitnew$coef[-1]
yhat<-as.vector(betaD)*D-xx%*%betaX
list(yhat=yhat,betaD=betaD,betaX=betaX,c1=c1,cx=cx,cz=cz,c2=c2,c3=c4,family = family,criterion=criterion)
}
|
# Set the seed so we get the same random numbers every time we run this code
set.seed(007)
# Generate a sequence of 10000 random numbers from a normal distribution with a mean of zero and a standard deviation of 3
# This represents variations in sea level due to climate variability
sea.lvl.var <- rnorm(10000, mean = 0, sd = 3)
# Create a vector of possible storm surges
surge <- 0:3
# Create a vector to store the results of our model run
flood.prob.low <- c()
flood.prob.high <- c()
# Initializing variables
## Counter variables for number of anomalies greater than 5m
n.low <- 0
n.high <- 0
## Store the results of a single anomaly calculation
anomaly.low <- 0
anomaly.high <- 0
for (i in 1:4) { # For each of the four surge possibilities
n.low <- 0 # Set the count back to zero
n.high <- 0
for (j in 1:10000){ # For each of the random numbers
anomaly.low <- sea.lvl.var[j] - 1.25 + surge[i] # Calculate the anomaly for each tide level
anomaly.high <- sea.lvl.var[j] + 1.25 + surge[i]
if (anomaly.low > 5) { # Is the anomaly greater than 5m for low tide?
n.low <- n.low + 1 # If so, increase the count by one
}
if (anomaly.high > 5) { # Is the anomaly greater than 5m for high tide?
n.high <- n.high + 1 # If so, increase the count by one
}
}
flood.prob.low[i] <- n.low/10000 # Finally, store the risk probability for each of the four surge possibilities
flood.prob.high[i] <- n.high/10000
}
| /ass_4_model_baseline.R | no_license | madeline-oliver/esm_203_ass_4 | R | false | false | 1,451 | r | # Set the seed so we get the same random numbers every time we run this code
set.seed(007)
# Generate a sequence of 10000 random numbers from a normal distribution with a mean of zero and a standard deviation of 3
# This represents variations in sea level due to climate variability
sea.lvl.var <- rnorm(10000, mean = 0, sd = 3)
# Create a vector of possible storm surges
surge <- 0:3
# Create a vector to store the results of our model run
flood.prob.low <- c()
flood.prob.high <- c()
# Initializing variables
## Counter variables for number of anomalies greater than 5m
n.low <- 0
n.high <- 0
## Store the results of a single anomaly calculation
anomaly.low <- 0
anomaly.high <- 0
for (i in 1:4) { # For each of the four surge possibilities
n.low <- 0 # Set the count back to zero
n.high <- 0
for (j in 1:10000){ # For each of the random numbers
anomaly.low <- sea.lvl.var[j] - 1.25 + surge[i] # Calculate the anomaly for each tide level
anomaly.high <- sea.lvl.var[j] + 1.25 + surge[i]
if (anomaly.low > 5) { # Is the anomaly greater than 5m for low tide?
n.low <- n.low + 1 # If so, increase the count by one
}
if (anomaly.high > 5) { # Is the anomaly greater than 5m for high tide?
n.high <- n.high + 1 # If so, increase the count by one
}
}
flood.prob.low[i] <- n.low/10000 # Finally, store the risk probability for each of the four surge possibilities
flood.prob.high[i] <- n.high/10000
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
testModelSelectionCoeffs <- function() {
bhexFV <- h2o.uploadFile(locate("smalldata/logreg/prostate.csv"))
Y <- "GLEASON"
X <- c("AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS")
Log.info("Build the MaxRGLM model")
numModel <- 7
allsubsetsModel <- h2o.modelSelection(y=Y, x=X, seed=12345, training_frame = bhexFV, max_predictor_number=numModel,
mode="allsubsets")
coeffsAllsubsets <- h2o.coef(allsubsetsModel)
coeffsNormAllsubsets <- h2o.coef_norm(allsubsetsModel)
numModel = length(coeffsAllsubsets)
maxrModel <- h2o.modelSelection(y=Y, x=X, seed=12345, training_frame = bhexFV, max_predictor_number=numModel,
mode="maxr")
coeffsMaxr <- h2o.coef(maxrModel)
coeffsNormMaxr <- h2o.coef_norm(maxrModel)
# check coefficients obtained in different ways are the same.
for (ind in c(1:numModel)) {
coeffsModelAllsubsets <- coeffsAllsubsets[[ind]]
coeffsNormModelAllsubsets <- coeffsNormAllsubsets[[ind]]
coeffsTempAllsubsets <- h2o.coef(h2o.getModel(allsubsetsModel@model$best_model_ids[[ind]]$name))
coeffsNormTempAllsubsets <- h2o.coef_norm(h2o.getModel(allsubsetsModel@model$best_model_ids[[ind]]$name))
expect_equal(coeffsModelAllsubsets, coeffsTempAllsubsets, tolerance=1e-6)
expect_equal(coeffsNormModelAllsubsets, coeffsNormTempAllsubsets, tolerance=1e-6)
coeffsModelMaxr <- coeffsMaxr[[ind]]
coeffsNormModelMaxr <- coeffsNormMaxr[[ind]]
coeffsTempMaxr <- h2o.coef(h2o.getModel(maxrModel@model$best_model_ids[[ind]]$name))
coeffsNormTempMaxr <- h2o.coef_norm(h2o.getModel(maxrModel@model$best_model_ids[[ind]]$name))
expect_equal(coeffsModelMaxr, coeffsTempMaxr, tolerance=1e-6)
expect_equal(coeffsNormModelMaxr, coeffsNormTempMaxr, tolerance=1e-6)
expect_equal(coeffsNormModelAllsubsets[order(coeffsNormModelAllsubsets)], coeffsNormTempMaxr[order(coeffsNormTempMaxr)], tolerance=1e-6)
}
}
doTest("ModelSelection with allsubsets, maxr: test h2o.coef() and h2o.coef_norm()", testModelSelectionCoeffs)
| /h2o-r/tests/testdir_algos/modelselection/runit_PUBDEV_8427_modelselection_coeffs.R | permissive | timgates42/h2o-3 | R | false | false | 2,120 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
testModelSelectionCoeffs <- function() {
bhexFV <- h2o.uploadFile(locate("smalldata/logreg/prostate.csv"))
Y <- "GLEASON"
X <- c("AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS")
Log.info("Build the MaxRGLM model")
numModel <- 7
allsubsetsModel <- h2o.modelSelection(y=Y, x=X, seed=12345, training_frame = bhexFV, max_predictor_number=numModel,
mode="allsubsets")
coeffsAllsubsets <- h2o.coef(allsubsetsModel)
coeffsNormAllsubsets <- h2o.coef_norm(allsubsetsModel)
numModel = length(coeffsAllsubsets)
maxrModel <- h2o.modelSelection(y=Y, x=X, seed=12345, training_frame = bhexFV, max_predictor_number=numModel,
mode="maxr")
coeffsMaxr <- h2o.coef(maxrModel)
coeffsNormMaxr <- h2o.coef_norm(maxrModel)
# check coefficients obtained in different ways are the same.
for (ind in c(1:numModel)) {
coeffsModelAllsubsets <- coeffsAllsubsets[[ind]]
coeffsNormModelAllsubsets <- coeffsNormAllsubsets[[ind]]
coeffsTempAllsubsets <- h2o.coef(h2o.getModel(allsubsetsModel@model$best_model_ids[[ind]]$name))
coeffsNormTempAllsubsets <- h2o.coef_norm(h2o.getModel(allsubsetsModel@model$best_model_ids[[ind]]$name))
expect_equal(coeffsModelAllsubsets, coeffsTempAllsubsets, tolerance=1e-6)
expect_equal(coeffsNormModelAllsubsets, coeffsNormTempAllsubsets, tolerance=1e-6)
coeffsModelMaxr <- coeffsMaxr[[ind]]
coeffsNormModelMaxr <- coeffsNormMaxr[[ind]]
coeffsTempMaxr <- h2o.coef(h2o.getModel(maxrModel@model$best_model_ids[[ind]]$name))
coeffsNormTempMaxr <- h2o.coef_norm(h2o.getModel(maxrModel@model$best_model_ids[[ind]]$name))
expect_equal(coeffsModelMaxr, coeffsTempMaxr, tolerance=1e-6)
expect_equal(coeffsNormModelMaxr, coeffsNormTempMaxr, tolerance=1e-6)
expect_equal(coeffsNormModelAllsubsets[order(coeffsNormModelAllsubsets)], coeffsNormTempMaxr[order(coeffsNormTempMaxr)], tolerance=1e-6)
}
}
doTest("ModelSelection with allsubsets, maxr: test h2o.coef() and h2o.coef_norm()", testModelSelectionCoeffs)
|
#' @name as.data.frame.dust
#'
#' @title Convert \code{dust} Object to Data Frame
#' @description Sprinkles are applied to the \code{dust} object
#' as if it were being prepared for printing to the console.
#' However, instead of printing, the object is returned
#' as a single data frame.
#'
#' @param x A \code{dust} object.
#' @param ... Arguments to be passed to other methods. Currently unused.
#' @param sprinkled Logical. If \code{TRUE}, the sprinkles attached to the
#' \code{dust} object are applied before returning the data frame.
#' Sprinkles are applied via the same mechanism that prints to the console,
#' so only sprinkles that are applicable to console output are used.
#' When \code{FALSE}, \code{pixiedust} attempts to reconstruct the
#' data frame (or tidied output from \code{broom::tidy}
#' originally given to \code{dust}.
#'
#' @details In its current state, this can be a fairly ineffcient function
#' as the table, if the longtable option is in use, will be built in
#' a \code{for} loop and bound together using \code{rbind}. This isn't
#' really intended for large tables, but may be of assistance when
#' there isn't a sprinkle that does what you want to do. (You can
#' at least pull out the object as a data frame and do your own
#' post processing).
#'
#' @author Benjamin Nutter
#'
#' @examples
#' fit <- lm(mpg ~ qsec + factor(am) + wt * factor(gear), data = mtcars)
#' Dust <- dust(fit) %>%
#' sprinkle(cols = 2:4, round = 2) %>%
#' sprinkle(cols = 5, fn = quote(pvalString(value))) %>%
#' sprinkle(cols = 3, font_color = "#DA70D6") %>%
#' sprinkle_print_method("html")
#'
#' as.data.frame(Dust)
#'
#' @export
as.data.frame.dust <- function(x, ..., sprinkled = TRUE){
if (sprinkled){
return(print_dust_console(x, return_df = TRUE))
}
else {
X <- dplyr::select(x$body,
row, col, value) %>%
tidyr::spread(col, value) %>%
dplyr::select(-row)
col_names <- dplyr::group_by(x$body, col) %>%
dplyr::summarise(col_name = col_name[1])
col_names <- col_names$col_name
classes <- dplyr::group_by(x$body, col) %>%
dplyr::summarise(col_class = col_class[1])
classes <- sprintf("as.%s", classes$col_class)
for (i in seq_along(X)){
X[[i]] <- get(classes[i])(X[[i]])
}
names(X) <- col_names
X
}
}
#' @rdname as.data.frame.dust
#' @export
as.data.frame.dust_list <- function(x, ...)
{
lapply(x,
as.data.frame.dust)
}
utils::globalVariables(c("value", "col_name", "col_class"))
| /pixiedust/R/as.data.frame.dust.R | no_license | ingted/R-Examples | R | false | false | 2,687 | r | #' @name as.data.frame.dust
#'
#' @title Convert \code{dust} Object to Data Frame
#' @description Sprinkles are applied to the \code{dust} object
#' as if it were being prepared for printing to the console.
#' However, instead of printing, the object is returned
#' as a single data frame.
#'
#' @param x A \code{dust} object.
#' @param ... Arguments to be passed to other methods. Currently unused.
#' @param sprinkled Logical. If \code{TRUE}, the sprinkles attached to the
#' \code{dust} object are applied before returning the data frame.
#' Sprinkles are applied via the same mechanism that prints to the console,
#' so only sprinkles that are applicable to console output are used.
#' When \code{FALSE}, \code{pixiedust} attempts to reconstruct the
#' data frame (or tidied output from \code{broom::tidy}
#' originally given to \code{dust}.
#'
#' @details In its current state, this can be a fairly ineffcient function
#' as the table, if the longtable option is in use, will be built in
#' a \code{for} loop and bound together using \code{rbind}. This isn't
#' really intended for large tables, but may be of assistance when
#' there isn't a sprinkle that does what you want to do. (You can
#' at least pull out the object as a data frame and do your own
#' post processing).
#'
#' @author Benjamin Nutter
#'
#' @examples
#' fit <- lm(mpg ~ qsec + factor(am) + wt * factor(gear), data = mtcars)
#' Dust <- dust(fit) %>%
#' sprinkle(cols = 2:4, round = 2) %>%
#' sprinkle(cols = 5, fn = quote(pvalString(value))) %>%
#' sprinkle(cols = 3, font_color = "#DA70D6") %>%
#' sprinkle_print_method("html")
#'
#' as.data.frame(Dust)
#'
#' @export
as.data.frame.dust <- function(x, ..., sprinkled = TRUE){
if (sprinkled){
return(print_dust_console(x, return_df = TRUE))
}
else {
X <- dplyr::select(x$body,
row, col, value) %>%
tidyr::spread(col, value) %>%
dplyr::select(-row)
col_names <- dplyr::group_by(x$body, col) %>%
dplyr::summarise(col_name = col_name[1])
col_names <- col_names$col_name
classes <- dplyr::group_by(x$body, col) %>%
dplyr::summarise(col_class = col_class[1])
classes <- sprintf("as.%s", classes$col_class)
for (i in seq_along(X)){
X[[i]] <- get(classes[i])(X[[i]])
}
names(X) <- col_names
X
}
}
#' @rdname as.data.frame.dust
#' @export
as.data.frame.dust_list <- function(x, ...)
{
lapply(x,
as.data.frame.dust)
}
utils::globalVariables(c("value", "col_name", "col_class"))
|
setwd("C:/Users/Sammi-Jo/Desktop/Archive")
library(broom) # Helps make the regression results tidier
library(tidyverse) # Helps make programming with R easier
library(brms) # Allows for Bayesian inference
### Loading in the Poll data
example_poll <- read_csv("outputs/data/example_poll.csv")
head(example_poll)
### Multilevel Logistic Model for different states
model_states <- brm(supports_ALP ~ gender + age_group +
(1|state),
data = example_poll,
family = bernoulli())
summary(model_states) | /sample code/PostStratificationCode-brms-multilevel.R | no_license | usmanmasoodsadiq/sta304_ps3 | R | false | false | 587 | r | setwd("C:/Users/Sammi-Jo/Desktop/Archive")
library(broom) # Helps make the regression results tidier
library(tidyverse) # Helps make programming with R easier
library(brms) # Allows for Bayesian inference
### Loading in the Poll data
example_poll <- read_csv("outputs/data/example_poll.csv")
head(example_poll)
### Multilevel Logistic Model for different states
model_states <- brm(supports_ALP ~ gender + age_group +
(1|state),
data = example_poll,
family = bernoulli())
summary(model_states) |
Brewery <- setRefClass(
'Brewery',
contains = 'Middleware',
fields = c('url','root','opt'),
methods = list(
initialize = function(url,root,...){
url <<- paste('^',url,sep='')
root <<- root
opts <- list(...)
if (length(opts)>0){
opt <<- try(list2env(opts),silent=TRUE)
if (inherits(opt,'try-error'))
stop('Optional arguments must be named')
} else {
opt <<- new.env()
}
callSuper()
},
call = function(env){
req <- Rook::Request$new(env)
res <- Rook::Response$new()
opt[['req']] <<- req;
opt[['res']] <<- res;
path = env[["PATH_INFO"]]
file_path = file.path(root,path)
if (grepl(url,path) && !grepl(paste(url,'$',sep=''),path) && file.exists(file_path)){
oldwd <- setwd(dirname(file_path))
on.exit(setwd(oldwd))
res$write(
paste(capture.output(brew(basename(file_path),envir=opt)),
collapse="\n")
)
res$finish()
} else {
app$call(env)
}
}
)
)
| /R/Brewery.R | no_license | nizze/Rook | R | false | false | 1,183 | r | Brewery <- setRefClass(
'Brewery',
contains = 'Middleware',
fields = c('url','root','opt'),
methods = list(
initialize = function(url,root,...){
url <<- paste('^',url,sep='')
root <<- root
opts <- list(...)
if (length(opts)>0){
opt <<- try(list2env(opts),silent=TRUE)
if (inherits(opt,'try-error'))
stop('Optional arguments must be named')
} else {
opt <<- new.env()
}
callSuper()
},
call = function(env){
req <- Rook::Request$new(env)
res <- Rook::Response$new()
opt[['req']] <<- req;
opt[['res']] <<- res;
path = env[["PATH_INFO"]]
file_path = file.path(root,path)
if (grepl(url,path) && !grepl(paste(url,'$',sep=''),path) && file.exists(file_path)){
oldwd <- setwd(dirname(file_path))
on.exit(setwd(oldwd))
res$write(
paste(capture.output(brew(basename(file_path),envir=opt)),
collapse="\n")
)
res$finish()
} else {
app$call(env)
}
}
)
)
|
context("functional sequences")
test_that("fseq functions work", {
a <- . %>% cos %>% sin %>% tan
b <- function(x) tan(sin(cos(x)))
expect_that(a(1:10), is_identical_to(b(1:10)))
})
| /tests/testthat/test-fseq.r | no_license | moodymudskipper/fastpipe | R | false | false | 192 | r | context("functional sequences")
test_that("fseq functions work", {
a <- . %>% cos %>% sin %>% tan
b <- function(x) tan(sin(cos(x)))
expect_that(a(1:10), is_identical_to(b(1:10)))
})
|
#' \code{BEDMatrix}: A Wrapper for Binary PED Files
#'
#' The BEDMatrix package provides a wrapper around binary PED (also known as
#' BED) files, one of the genotype/phenotype file formats of
#' \href{http://pngu.mgh.harvard.edu/~purcell/plink/}{PLINK}, the whole genome
#' association analysis toolset. \code{BEDMatrix} objects are created by simply
#' providing the path to a BED file and once created, they behave similarly to
#' regular matrices with the advantage that genotypes are retrieved on demand
#' without loading the entire file into memory. This allows handling of very
#' large files with limited use of memory. Technically, a \code{BEDMatrix} is a
#' memory-mapped matrix backed by a binary PED file.
#'
#' @docType package
#' @name BEDMatrix-package
#' @useDynLib BEDMatrix
#' @import methods Rcpp
#' @aliases NULL
NULL
loadModule("mod_BEDMatrix", TRUE)
release_questions <- function() {
c("Have you updated the NEWS file?")
}
| /BEDMatrix/R/package.R | no_license | ingted/R-Examples | R | false | false | 954 | r | #' \code{BEDMatrix}: A Wrapper for Binary PED Files
#'
#' The BEDMatrix package provides a wrapper around binary PED (also known as
#' BED) files, one of the genotype/phenotype file formats of
#' \href{http://pngu.mgh.harvard.edu/~purcell/plink/}{PLINK}, the whole genome
#' association analysis toolset. \code{BEDMatrix} objects are created by simply
#' providing the path to a BED file and once created, they behave similarly to
#' regular matrices with the advantage that genotypes are retrieved on demand
#' without loading the entire file into memory. This allows handling of very
#' large files with limited use of memory. Technically, a \code{BEDMatrix} is a
#' memory-mapped matrix backed by a binary PED file.
#'
#' @docType package
#' @name BEDMatrix-package
#' @useDynLib BEDMatrix
#' @import methods Rcpp
#' @aliases NULL
NULL
loadModule("mod_BEDMatrix", TRUE)
release_questions <- function() {
c("Have you updated the NEWS file?")
}
|
library(lazyWeave)
### Name: lazy.page.break
### Title: Start New Page in LaTeX
### Aliases: lazy.page.break
### ** Examples
## Not run:
##D lazy.write(
##D lazy.file.start(),
##D lazy.text("First we type something on the first page"),
##D lazy.page.break(),
##D lazy.text("Then we type something on the second page"),
##D lazy.file.end(),
##D OutFile="Example 1.tex")
##D
##D unlink("Example 1.tex")
## End(Not run)
| /data/genthat_extracted_code/lazyWeave/examples/lazy.page.break.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 426 | r | library(lazyWeave)
### Name: lazy.page.break
### Title: Start New Page in LaTeX
### Aliases: lazy.page.break
### ** Examples
## Not run:
##D lazy.write(
##D lazy.file.start(),
##D lazy.text("First we type something on the first page"),
##D lazy.page.break(),
##D lazy.text("Then we type something on the second page"),
##D lazy.file.end(),
##D OutFile="Example 1.tex")
##D
##D unlink("Example 1.tex")
## End(Not run)
|
setConstructorS3("FXSettleDates", function(FXCurr=NULL,tradeDate=NULL,transaction=NULL,...)
{
this <- extend(RObject(), "FXSettleDates",
.FXCurr = NULL,
.tradeDate = NULL,
.transaction = NULL,
.spotSettleDates = NULL,
.settleDate = NULL,
.expiryDate = NULL
)
constructorNeeds(this,FXCurr = "FXCurr")
if(!inStaticConstructor(this))
{
this$.FXCurr <- FXCurr
transactionList <- FXTenor$getTenors()
assert(any(transaction == transactionList),paste(transaction,"is not a valid transaction"))
this$.transaction <- transaction
this$.tradeDate <- as.POSIXct(tradeDate)
}
this
})
setMethodS3("getExpirySettleDate","FXSettleDates",function(this,FXCurr=NULL, tradeDate=NULL, transaction=NULL,holidayList=NULL,...)
{
settleDates <- FXSettleDates(FXCurr,tradeDate,transaction)
## All spot transaction settle T+2, except usd/cad and usd/try which are T+1
settleOffset <- 2
if (settleDates$.FXCurr$over() == "usd" && ((settleDates$.FXCurr$under() == "cad") ||
(settleDates$.FXCurr$under() == "try"))) settleOffset <- 1
if (is.null(holidayList)) holidayList <- FXHolidayDates$getHolidayDates(settleDates$.FXCurr)
unit = "d"
NumUnits <- settleOffset
settleDates$.expiryDate <- as.POSIXct(tradeDate)
settleDates$.settleDate <- getFincadDateAdjust(settleDates$.expiryDate,unit,NumUnits,holidayList)
settleDates$.expiryDate <- as.POSIXct(trunc(settleDates$.expiryDate,"day"))
settleDates$.settleDate <- as.POSIXct(trunc(settleDates$.settleDate,"day"))
if (transaction=="spot")
{
temp.list <- list(expiryDate = settleDates$.expiryDate,settleDate = settleDates$.settleDate)
return(temp.list)
}
settleDates$.settleDate <- settleDates$computeSettleDate(settleDates$.settleDate,settleDates$.transaction,holidayList)
unit = "d"
NumUnits <- settleOffset
settleDates$.expiryDate <- getFincadDateAdjust(settleDates$.settleDate,unit,-NumUnits,holidayList)
temp.list <- list(expiryDate = settleDates$.expiryDate,settleDate = settleDates$.settleDate)
return(temp.list)
})
setMethodS3("computeSettleDate","FXSettleDates",function(this,startDate=NULL, transaction=NULL,holidayList=NULL,...)
{
parse.list <- parseSimpleTenor(transaction)
unit <- parse.list$unit
NumUnits <- parse.list$numUnits
settleDate <- getFincadDateAdjust(startDate,unit,NumUnits,holidayList)
return(settleDate)
})
| /R/src/QFFX/R/FXSettleDates.R | no_license | rsheftel/ratel | R | false | false | 2,521 | r | setConstructorS3("FXSettleDates", function(FXCurr=NULL,tradeDate=NULL,transaction=NULL,...)
{
this <- extend(RObject(), "FXSettleDates",
.FXCurr = NULL,
.tradeDate = NULL,
.transaction = NULL,
.spotSettleDates = NULL,
.settleDate = NULL,
.expiryDate = NULL
)
constructorNeeds(this,FXCurr = "FXCurr")
if(!inStaticConstructor(this))
{
this$.FXCurr <- FXCurr
transactionList <- FXTenor$getTenors()
assert(any(transaction == transactionList),paste(transaction,"is not a valid transaction"))
this$.transaction <- transaction
this$.tradeDate <- as.POSIXct(tradeDate)
}
this
})
setMethodS3("getExpirySettleDate","FXSettleDates",function(this,FXCurr=NULL, tradeDate=NULL, transaction=NULL,holidayList=NULL,...)
{
settleDates <- FXSettleDates(FXCurr,tradeDate,transaction)
## All spot transaction settle T+2, except usd/cad and usd/try which are T+1
settleOffset <- 2
if (settleDates$.FXCurr$over() == "usd" && ((settleDates$.FXCurr$under() == "cad") ||
(settleDates$.FXCurr$under() == "try"))) settleOffset <- 1
if (is.null(holidayList)) holidayList <- FXHolidayDates$getHolidayDates(settleDates$.FXCurr)
unit = "d"
NumUnits <- settleOffset
settleDates$.expiryDate <- as.POSIXct(tradeDate)
settleDates$.settleDate <- getFincadDateAdjust(settleDates$.expiryDate,unit,NumUnits,holidayList)
settleDates$.expiryDate <- as.POSIXct(trunc(settleDates$.expiryDate,"day"))
settleDates$.settleDate <- as.POSIXct(trunc(settleDates$.settleDate,"day"))
if (transaction=="spot")
{
temp.list <- list(expiryDate = settleDates$.expiryDate,settleDate = settleDates$.settleDate)
return(temp.list)
}
settleDates$.settleDate <- settleDates$computeSettleDate(settleDates$.settleDate,settleDates$.transaction,holidayList)
unit = "d"
NumUnits <- settleOffset
settleDates$.expiryDate <- getFincadDateAdjust(settleDates$.settleDate,unit,-NumUnits,holidayList)
temp.list <- list(expiryDate = settleDates$.expiryDate,settleDate = settleDates$.settleDate)
return(temp.list)
})
setMethodS3("computeSettleDate","FXSettleDates",function(this,startDate=NULL, transaction=NULL,holidayList=NULL,...)
{
parse.list <- parseSimpleTenor(transaction)
unit <- parse.list$unit
NumUnits <- parse.list$numUnits
settleDate <- getFincadDateAdjust(startDate,unit,NumUnits,holidayList)
return(settleDate)
})
|
####################################################################################################
####################################################################################################
## Clip time series to desired boxes: Landsat time series, Sentinel, RapidEye, Spot, NDVI+NDWI trend
## Contact remi.dannunzio@fao.org
## 2017/09/11
####################################################################################################
####################################################################################################
# Options -----------------------------------------------------------------
options(stringsAsFactors=FALSE)
library(Hmisc)
library(sp)
library(rgdal)
library(raster)
library(plyr)
library(foreign)
library(dplyr)
library(rgeos)
##########################################################################################################################################################################
################# Directory and FILE : ADAPT TO YOUR CONFIGURATION
##########################################################################################################################################################################
############################################################
############################################################
#################### SET PARAMETERS
## Setup the number of snippets to generate
how_many <- 1
#### Name of the directory where your Landsat data is
lsat_dir <- paste0(data_dir,"time_series_image_dir/landsat/")
#### Name of the directory where your Sentinel data is
stnl_dir <- paste0(data_dir,"time_series_image_dir/sentinel/")
#### Name of the directory where your data will be stored in output
dest_dir <- paste0(data_dir,"time_series_image_dir/clip_time_series/")
#### NAME MUST IN FORMAT paste0(lsat_basename,"YYYY_bbx.tif")
lsat_basename <- "median_roi_clip_lsat_"
stnl_basename <- "median_roi_clip_s2_"
## The export image will be in a 3 (height) x 6 (width) grid box
dim_v_grid <- 4
dim_h_grid <- 6
## setup year start and end for landsat
yr_str_lsat <- 2000
yr_end_lsat <- 2015
## setup year start and end for sentinel
yr_str_stnl <- 2016
yr_end_stnl <- 2017
## setup the visualisation parameters for the interpretation box size. in meters
interpretation_box_size <- 15
## setup the visualisation parameters for the level of zoom. in meters
outside_box_size <- 750
## position in landsat archive name of the "bounding box". Example: "median_hul_clip_lsat_1995_" == 27
bb_pos_lsat <- nchar(lsat_basename)+6
## position in sentinel archive name of the "bounding box". Example: "median_hul_clip_s2_1995_" == 25
bb_pos_stnl <- nchar(stnl_basename)+6
## Read the datafile
pts <- read.csv(paste0(sae_dir,"pts_CE_2018-07-19_example.csv")) ##### CHANGE TO MY VALUE HERE
head(pts)
names(pts)
## setup the correct names for the variables
map_code <- "map_class"
point_id <- "id"
xcoord <- "XCoordinate"
ycoord <- "YCoordinate"
##########################################################################################################################################################################
################## SCRIPT AUTOMATICALLY RUNS FROM HERE
##########################################################################################################################################################################
dir.create(dest_dir)
#proj_utm <- proj4string(raster(paste0(rpdy_dir,list.files(rpdy_dir,pattern=glob2rx("*.tif"))[1])))
dev.off()
################# Create spatial point file
pt_df_geo <- SpatialPointsDataFrame(
coords = pts[,c(xcoord,ycoord)],
data = data.frame(pts[,c(point_id,map_code)]),
proj4string=CRS("+init=epsg:4326")
)
################ Create the index of the Landsat tiles
list_lsat <- list.files(lsat_dir,pattern=paste0(yr_str_lsat))
lp <- list()
for(file in list_lsat){
raster <- raster(paste(lsat_dir,file,sep=""))
e<-extent(raster)
poly <- Polygons(list(Polygon(cbind(
c(e@xmin,e@xmin,e@xmax,e@xmax,e@xmin),
c(e@ymin,e@ymax,e@ymax,e@ymin,e@ymin))
)),file)
lp <- append(lp,list(poly))
}
## Transform the list into a SPDF PRIMER ERROR
lsat_idx <-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:length(lp)),
data.frame(list_lsat),
match.ID = F
)
head(lsat_idx)
names(lsat_idx@data) <- "bb"
lsat_idx@data$bb <- substr(lsat_idx@data$bb,bb_pos_lsat,(nchar(lsat_idx@data$bb)-4))
lsat_idx@data
plot(lsat_idx)
################ Create the index of the Sentinel tiles
list_s2 <- list.files(stnl_dir,pattern=paste0("s2_"))
lp<-list()
for(file in list_s2){
raster <- raster(paste(stnl_dir,file,sep=""))
e<-extent(raster)
poly <- Polygons(list(Polygon(cbind(
c(e@xmin,e@xmin,e@xmax,e@xmax,e@xmin),
c(e@ymin,e@ymax,e@ymax,e@ymin,e@ymin))
)),file)
lp <- append(lp,list(poly))
}
## Transform the list into a SPDF
stnl_idx <-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:length(lp)),
data.frame(list_s2),
match.ID = F
)
names(stnl_idx@data) <- "bb"
stnl_idx@data$bb <- substr(stnl_idx@data$bb,bb_pos_stnl,(nchar(stnl_idx@data$bb)-4))
stnl_idx@data
plot(stnl_idx,add=T)
################# Project both into Lat-Lon EPSG:4326
proj4string(pt_df_geo) <- CRS("+init=epsg:4326")
proj4string(lsat_idx) <- CRS("+init=epsg:4326")
proj4string(stnl_idx) <- CRS("+init=epsg:4326")
################# Intersect points with index of imagery and append ID's of imagery to data.frame
pts_lsat <- over(pt_df_geo,lsat_idx)
pts_stnl <- over(pt_df_geo,stnl_idx)
pts<-cbind(pts,pts_lsat$bb)
pts<-cbind(pts,pts_stnl$bb)
################# Create the outside boundaries box (1km // twice 500m from center of box)
lp<-list()
ysize <- outside_box_size/111321
## Loop through all points
for(i in 1:nrow(pts)){
ymin <- pts[i,ycoord]-ysize
ymax <- pts[i,ycoord]+ysize
xmin <- pts[i,xcoord]-ysize*cos(pts[1,ycoord]*pi/180)
xmax <- pts[i,xcoord]+ysize*cos(pts[1,ycoord]*pi/180)
p <- Polygon(cbind(c(xmin,xmin,xmax,xmax,xmin),c(ymin,ymax,ymax,ymin,ymin)))
ps <- Polygons(list(p), pts[i,point_id])
lp <- append(lp,list(ps))
}
## Transform the list into a SPDF
outbox<-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:nrow(pts)),
pts[,c(map_code,point_id,xcoord,ycoord)],
match.ID = F
)
proj4string(outbox) <- CRS("+init=epsg:4326")
################# Create the 0.5 ha box (70/2 = 35m shift from center)
lp<-list()
ysize <- interpretation_box_size/111321
## Loop through all points
for(i in 1:nrow(pts)){
ymin <- pts[i,ycoord]-ysize
ymax <- pts[i,ycoord]+ysize
xmin <- pts[i,xcoord]-ysize*cos(pts[1,ycoord]*pi/180)
xmax <- pts[i,xcoord]+ysize*cos(pts[1,ycoord]*pi/180)
p <- Polygon(cbind(c(xmin,xmin,xmax,xmax,xmin),c(ymin,ymax,ymax,ymin,ymin)))
ps <- Polygons(list(p), pts[i,1])
lp <- append(lp,list(ps))
}
## Transform the list into a SPDF
inbox<-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:nrow(pts)),
pts[,c(map_code,point_id,xcoord,ycoord)],
match.ID = F
)
proj4string(inbox) <- CRS("+init=epsg:4326")
proj4string(inbox) <- proj4string(outbox) <- CRS("+init=epsg:4326")
################ Create the list of ID's to process
list_ids <- pts[,point_id]
# ID to process
listdone <- list()
listdone <- read.table(text=list.files(dest_dir),as.is=T,fill=T,sep="_")[,2]
listdone <- gsub(".png","",listdone)
listodo <- list_ids[!(list_ids %in% listdone)]
head(pts)
#####################################################################################
#####################################################################################
#####################################################################################
# Loop through all IDs ----------------------------------------------------
######################################################################################################
################# Loop through the IDs
## example.... the_id = "18"
head(pts)
dev.off()
to_go <- min(how_many,length(listodo))
for(the_id in listodo[1:to_go]){
print(paste0(to_go," remain to do"))
to_go <- to_go-1
####################################################################
################# Open the image output file
## Check which point is being processed
(the_pt <- pts[pts[,point_id]==the_id,])
out_name <- paste(dest_dir,"pt_",the_id,"_class",the_pt$map_class,".png",sep="")
png(file= out_name,
width= 400*dim_h_grid,
height=400*dim_v_grid)
####################################################################
##### Delimitations of the plot in geographic coordinates
one_poly <- outbox[outbox@data[,point_id]==the_id,]
in_poly <- inbox[inbox@data[,point_id]==the_id,]
margins <- extent(
one_poly@bbox["x","min"]-1/111321,
one_poly@bbox["x","max"]+1/111321,
one_poly@bbox["y","min"]-1/111321,
one_poly@bbox["y","max"]+1/111321)
###################################################################
################# Find the corresponding indexes
tryCatch({lsat_bbox <- the_pt[,"pts_lsat$bb"]},
error=function(e){print(paste0("no image available"))})
tryCatch({stnl_bbox <- the_pt[,"pts_stnl$bb"]},
error=function(e){print(paste0("no image available"))})
################# Set the layout
#dev.off()
## The export image will be in a 4 (height) x 5 (width) grid box
par(mfrow = c(dim_v_grid,dim_h_grid))
par(mar=c(0,0,0,0))
ndvi_trend <- data.frame(matrix(nrow=0,ncol=2))
names(ndvi_trend) <- c("year","mean")
ndwi_trend <- data.frame(matrix(nrow=0,ncol=2))
names(ndwi_trend) <- c("year","mean")
i <- 1
## year <- "2013"
####################################################################
################# Clip the landsat time series
for(year in c(yr_str_lsat:yr_end_lsat)){
print(year)
plot(margins,axes=F,xlab="",ylab="")
tryCatch({
lsat <- brick(paste(lsat_dir,lsat_basename,year,"_",lsat_bbox,".tif",sep=""))
lsat_clip<-crop(lsat,one_poly)
swir <- raster(lsat_clip,4)
nir <- raster(lsat_clip,3)
red <- raster(lsat_clip,2)
green<- raster(lsat_clip,1)
ndvi <- (nir-red)/(nir+red)
ndwi <- (nir-swir)/(nir+swir)
ndvi_trend[i,]$year <- year
ndvi_trend[i,]$mean <- cellStats(crop(ndvi,in_poly),stat='mean')
ndwi_trend[i,]$year <- year
ndwi_trend[i,]$mean <- cellStats(crop(ndwi,in_poly),stat='mean')
i <- i + 1
#Plot natural colours composite (NIR-RED-GREEN == 4-3-2 in L7 nomenclature)
stack <- stack(swir,nir,red)
plotRGB(stack,stretch="hist",add=T)
#plot(ndvi,add=T)
},error=function(e){print(paste0("no image available in ",year," for ",lsat_bbox))})
lines(in_poly,col="red",lwd=2)
rect(
xleft = margins@xmin,
ybottom = margins@ymax - outside_box_size/10/111320,
xright = margins@xmin + outside_box_size/1.9/111320,
ytop = margins@ymax,
col = "white",
border = NA)
title(main=paste("Landsat ",year,sep=""),font.main=2,cex.main=2,line=-3,adj=0.05)
}
####################################################################
################# Clip the sentinel tile
for(year in c(yr_str_stnl:yr_end_stnl)){
plot(margins,axes=F,xlab="",ylab="")
print(year)
the_pt
tryCatch({
stnl <- brick(paste(stnl_dir,stnl_basename,year,"_",stnl_bbox,".tif",sep=""))
stnl_clip<-crop(stnl,one_poly)
blu <- raster(stnl_clip,1)
grn <- raster(stnl_clip,2)
red <- raster(stnl_clip,3)
nir <- raster(stnl_clip,4)
ndvi <- (nir-red)/(nir+red)
#ndwi <- (nir-swir)/(nir+swir)
ndvi_trend[i,]$year <- year
ndvi_trend[i,]$mean <- cellStats(crop(ndvi,in_poly),stat='mean')
#ndwi_trend[i,]$year <- year
#ndwi_trend[i,]$mean <- cellStats(crop(ndwi,in_poly),stat='mean')
i <- i + 1
stackNat <- stack(red,grn,blu)
#stackVeg <- stack(nir,ndvi,grn)
#stackNIR <- stack(nir,red,grn)
plotRGB(stackNat,stretch="hist",add=T)
},error=function(e){print(paste0("no image available in ",year," for sentinel"))})
lines(in_poly,col="red",lwd=2)
rect(
xleft = margins@xmin,
ybottom = margins@ymax-100/111320,
xright = margins@xmin+500/111320,
ytop = margins@ymax,
col = "white",
border = NA)
title(main=paste0("Sentinel ",year),font.main=2,cex.main=2,line=-3,adj=0.05)
}
####################################################################
################# NDVI graph
par(mar=c(2,2,2,2))
tryCatch({
plot(ndvi_trend,
# yaxt='n',
# xaxt='n',
xlab="year",
ylab="",
ylim=c(0,1)
)
lines(ndvi_trend, pch=16,col="blue")
title(main="Annual mean ndvi",font.main=2,cex.main=2)
},error=function(e){print(paste0("problem with NDVI"))})
####################################################################
################# NDWI graph
par(mar=c(2,2,2,2))
tryCatch({
plot(ndwi_trend,
# yaxt='n',
# xaxt='n',
xlab="year",
ylab="",
ylim=c(0,1)
)
lines(ndwi_trend, pch=16,col="blue")
title(main="Annual mean ndwi",font.main=2,cex.main=2)
},error=function(e){print(paste0("problem with NDwI"))})
####################################################################
### Close the image file
dev.off()
####################################################################
### End the points loop
}
the_pt
| /scripts/scriptxx_clip_time_series.R | no_license | okekegenius/ws_nga_20180717 | R | false | false | 13,583 | r | ####################################################################################################
####################################################################################################
## Clip time series to desired boxes: Landsat time series, Sentinel, RapidEye, Spot, NDVI+NDWI trend
## Contact remi.dannunzio@fao.org
## 2017/09/11
####################################################################################################
####################################################################################################
# Options -----------------------------------------------------------------
options(stringsAsFactors=FALSE)
library(Hmisc)
library(sp)
library(rgdal)
library(raster)
library(plyr)
library(foreign)
library(dplyr)
library(rgeos)
##########################################################################################################################################################################
################# Directory and FILE : ADAPT TO YOUR CONFIGURATION
##########################################################################################################################################################################
############################################################
############################################################
#################### SET PARAMETERS
## Setup the number of snippets to generate
how_many <- 1
#### Name of the directory where your Landsat data is
lsat_dir <- paste0(data_dir,"time_series_image_dir/landsat/")
#### Name of the directory where your Sentinel data is
stnl_dir <- paste0(data_dir,"time_series_image_dir/sentinel/")
#### Name of the directory where your data will be stored in output
dest_dir <- paste0(data_dir,"time_series_image_dir/clip_time_series/")
#### NAME MUST IN FORMAT paste0(lsat_basename,"YYYY_bbx.tif")
lsat_basename <- "median_roi_clip_lsat_"
stnl_basename <- "median_roi_clip_s2_"
## The export image will be in a 3 (height) x 6 (width) grid box
dim_v_grid <- 4
dim_h_grid <- 6
## setup year start and end for landsat
yr_str_lsat <- 2000
yr_end_lsat <- 2015
## setup year start and end for sentinel
yr_str_stnl <- 2016
yr_end_stnl <- 2017
## setup the visualisation parameters for the interpretation box size. in meters
interpretation_box_size <- 15
## setup the visualisation parameters for the level of zoom. in meters
outside_box_size <- 750
## position in landsat archive name of the "bounding box". Example: "median_hul_clip_lsat_1995_" == 27
bb_pos_lsat <- nchar(lsat_basename)+6
## position in sentinel archive name of the "bounding box". Example: "median_hul_clip_s2_1995_" == 25
bb_pos_stnl <- nchar(stnl_basename)+6
## Read the datafile
pts <- read.csv(paste0(sae_dir,"pts_CE_2018-07-19_example.csv")) ##### CHANGE TO MY VALUE HERE
head(pts)
names(pts)
## setup the correct names for the variables
map_code <- "map_class"
point_id <- "id"
xcoord <- "XCoordinate"
ycoord <- "YCoordinate"
##########################################################################################################################################################################
################## SCRIPT AUTOMATICALLY RUNS FROM HERE
##########################################################################################################################################################################
dir.create(dest_dir)
#proj_utm <- proj4string(raster(paste0(rpdy_dir,list.files(rpdy_dir,pattern=glob2rx("*.tif"))[1])))
dev.off()
################# Create spatial point file
pt_df_geo <- SpatialPointsDataFrame(
coords = pts[,c(xcoord,ycoord)],
data = data.frame(pts[,c(point_id,map_code)]),
proj4string=CRS("+init=epsg:4326")
)
################ Create the index of the Landsat tiles
list_lsat <- list.files(lsat_dir,pattern=paste0(yr_str_lsat))
lp <- list()
for(file in list_lsat){
raster <- raster(paste(lsat_dir,file,sep=""))
e<-extent(raster)
poly <- Polygons(list(Polygon(cbind(
c(e@xmin,e@xmin,e@xmax,e@xmax,e@xmin),
c(e@ymin,e@ymax,e@ymax,e@ymin,e@ymin))
)),file)
lp <- append(lp,list(poly))
}
## Transform the list into a SPDF PRIMER ERROR
lsat_idx <-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:length(lp)),
data.frame(list_lsat),
match.ID = F
)
head(lsat_idx)
names(lsat_idx@data) <- "bb"
lsat_idx@data$bb <- substr(lsat_idx@data$bb,bb_pos_lsat,(nchar(lsat_idx@data$bb)-4))
lsat_idx@data
plot(lsat_idx)
################ Create the index of the Sentinel tiles
list_s2 <- list.files(stnl_dir,pattern=paste0("s2_"))
lp<-list()
for(file in list_s2){
raster <- raster(paste(stnl_dir,file,sep=""))
e<-extent(raster)
poly <- Polygons(list(Polygon(cbind(
c(e@xmin,e@xmin,e@xmax,e@xmax,e@xmin),
c(e@ymin,e@ymax,e@ymax,e@ymin,e@ymin))
)),file)
lp <- append(lp,list(poly))
}
## Transform the list into a SPDF
stnl_idx <-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:length(lp)),
data.frame(list_s2),
match.ID = F
)
names(stnl_idx@data) <- "bb"
stnl_idx@data$bb <- substr(stnl_idx@data$bb,bb_pos_stnl,(nchar(stnl_idx@data$bb)-4))
stnl_idx@data
plot(stnl_idx,add=T)
################# Project both into Lat-Lon EPSG:4326
proj4string(pt_df_geo) <- CRS("+init=epsg:4326")
proj4string(lsat_idx) <- CRS("+init=epsg:4326")
proj4string(stnl_idx) <- CRS("+init=epsg:4326")
################# Intersect points with index of imagery and append ID's of imagery to data.frame
pts_lsat <- over(pt_df_geo,lsat_idx)
pts_stnl <- over(pt_df_geo,stnl_idx)
pts<-cbind(pts,pts_lsat$bb)
pts<-cbind(pts,pts_stnl$bb)
################# Create the outside boundaries box (1km // twice 500m from center of box)
lp<-list()
ysize <- outside_box_size/111321
## Loop through all points
for(i in 1:nrow(pts)){
ymin <- pts[i,ycoord]-ysize
ymax <- pts[i,ycoord]+ysize
xmin <- pts[i,xcoord]-ysize*cos(pts[1,ycoord]*pi/180)
xmax <- pts[i,xcoord]+ysize*cos(pts[1,ycoord]*pi/180)
p <- Polygon(cbind(c(xmin,xmin,xmax,xmax,xmin),c(ymin,ymax,ymax,ymin,ymin)))
ps <- Polygons(list(p), pts[i,point_id])
lp <- append(lp,list(ps))
}
## Transform the list into a SPDF
outbox<-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:nrow(pts)),
pts[,c(map_code,point_id,xcoord,ycoord)],
match.ID = F
)
proj4string(outbox) <- CRS("+init=epsg:4326")
################# Create the 0.5 ha box (70/2 = 35m shift from center)
lp<-list()
ysize <- interpretation_box_size/111321
## Loop through all points
for(i in 1:nrow(pts)){
ymin <- pts[i,ycoord]-ysize
ymax <- pts[i,ycoord]+ysize
xmin <- pts[i,xcoord]-ysize*cos(pts[1,ycoord]*pi/180)
xmax <- pts[i,xcoord]+ysize*cos(pts[1,ycoord]*pi/180)
p <- Polygon(cbind(c(xmin,xmin,xmax,xmax,xmin),c(ymin,ymax,ymax,ymin,ymin)))
ps <- Polygons(list(p), pts[i,1])
lp <- append(lp,list(ps))
}
## Transform the list into a SPDF
inbox<-SpatialPolygonsDataFrame(
SpatialPolygons(lp,1:nrow(pts)),
pts[,c(map_code,point_id,xcoord,ycoord)],
match.ID = F
)
proj4string(inbox) <- CRS("+init=epsg:4326")
proj4string(inbox) <- proj4string(outbox) <- CRS("+init=epsg:4326")
################ Create the list of ID's to process
list_ids <- pts[,point_id]
# ID to process
listdone <- list()
listdone <- read.table(text=list.files(dest_dir),as.is=T,fill=T,sep="_")[,2]
listdone <- gsub(".png","",listdone)
listodo <- list_ids[!(list_ids %in% listdone)]
head(pts)
#####################################################################################
#####################################################################################
#####################################################################################
# Loop through all IDs ----------------------------------------------------
######################################################################################################
################# Loop through the IDs
## example.... the_id = "18"
head(pts)
dev.off()
to_go <- min(how_many,length(listodo))
for(the_id in listodo[1:to_go]){
print(paste0(to_go," remain to do"))
to_go <- to_go-1
####################################################################
################# Open the image output file
## Check which point is being processed
(the_pt <- pts[pts[,point_id]==the_id,])
out_name <- paste(dest_dir,"pt_",the_id,"_class",the_pt$map_class,".png",sep="")
png(file= out_name,
width= 400*dim_h_grid,
height=400*dim_v_grid)
####################################################################
##### Delimitations of the plot in geographic coordinates
one_poly <- outbox[outbox@data[,point_id]==the_id,]
in_poly <- inbox[inbox@data[,point_id]==the_id,]
margins <- extent(
one_poly@bbox["x","min"]-1/111321,
one_poly@bbox["x","max"]+1/111321,
one_poly@bbox["y","min"]-1/111321,
one_poly@bbox["y","max"]+1/111321)
###################################################################
################# Find the corresponding indexes
tryCatch({lsat_bbox <- the_pt[,"pts_lsat$bb"]},
error=function(e){print(paste0("no image available"))})
tryCatch({stnl_bbox <- the_pt[,"pts_stnl$bb"]},
error=function(e){print(paste0("no image available"))})
################# Set the layout
#dev.off()
## The export image will be in a 4 (height) x 5 (width) grid box
par(mfrow = c(dim_v_grid,dim_h_grid))
par(mar=c(0,0,0,0))
ndvi_trend <- data.frame(matrix(nrow=0,ncol=2))
names(ndvi_trend) <- c("year","mean")
ndwi_trend <- data.frame(matrix(nrow=0,ncol=2))
names(ndwi_trend) <- c("year","mean")
i <- 1
## year <- "2013"
####################################################################
################# Clip the landsat time series
for(year in c(yr_str_lsat:yr_end_lsat)){
print(year)
plot(margins,axes=F,xlab="",ylab="")
tryCatch({
lsat <- brick(paste(lsat_dir,lsat_basename,year,"_",lsat_bbox,".tif",sep=""))
lsat_clip<-crop(lsat,one_poly)
swir <- raster(lsat_clip,4)
nir <- raster(lsat_clip,3)
red <- raster(lsat_clip,2)
green<- raster(lsat_clip,1)
ndvi <- (nir-red)/(nir+red)
ndwi <- (nir-swir)/(nir+swir)
ndvi_trend[i,]$year <- year
ndvi_trend[i,]$mean <- cellStats(crop(ndvi,in_poly),stat='mean')
ndwi_trend[i,]$year <- year
ndwi_trend[i,]$mean <- cellStats(crop(ndwi,in_poly),stat='mean')
i <- i + 1
#Plot natural colours composite (NIR-RED-GREEN == 4-3-2 in L7 nomenclature)
stack <- stack(swir,nir,red)
plotRGB(stack,stretch="hist",add=T)
#plot(ndvi,add=T)
},error=function(e){print(paste0("no image available in ",year," for ",lsat_bbox))})
lines(in_poly,col="red",lwd=2)
rect(
xleft = margins@xmin,
ybottom = margins@ymax - outside_box_size/10/111320,
xright = margins@xmin + outside_box_size/1.9/111320,
ytop = margins@ymax,
col = "white",
border = NA)
title(main=paste("Landsat ",year,sep=""),font.main=2,cex.main=2,line=-3,adj=0.05)
}
####################################################################
################# Clip the sentinel tile
for(year in c(yr_str_stnl:yr_end_stnl)){
plot(margins,axes=F,xlab="",ylab="")
print(year)
the_pt
tryCatch({
stnl <- brick(paste(stnl_dir,stnl_basename,year,"_",stnl_bbox,".tif",sep=""))
stnl_clip<-crop(stnl,one_poly)
blu <- raster(stnl_clip,1)
grn <- raster(stnl_clip,2)
red <- raster(stnl_clip,3)
nir <- raster(stnl_clip,4)
ndvi <- (nir-red)/(nir+red)
#ndwi <- (nir-swir)/(nir+swir)
ndvi_trend[i,]$year <- year
ndvi_trend[i,]$mean <- cellStats(crop(ndvi,in_poly),stat='mean')
#ndwi_trend[i,]$year <- year
#ndwi_trend[i,]$mean <- cellStats(crop(ndwi,in_poly),stat='mean')
i <- i + 1
stackNat <- stack(red,grn,blu)
#stackVeg <- stack(nir,ndvi,grn)
#stackNIR <- stack(nir,red,grn)
plotRGB(stackNat,stretch="hist",add=T)
},error=function(e){print(paste0("no image available in ",year," for sentinel"))})
lines(in_poly,col="red",lwd=2)
rect(
xleft = margins@xmin,
ybottom = margins@ymax-100/111320,
xright = margins@xmin+500/111320,
ytop = margins@ymax,
col = "white",
border = NA)
title(main=paste0("Sentinel ",year),font.main=2,cex.main=2,line=-3,adj=0.05)
}
####################################################################
################# NDVI graph
par(mar=c(2,2,2,2))
tryCatch({
plot(ndvi_trend,
# yaxt='n',
# xaxt='n',
xlab="year",
ylab="",
ylim=c(0,1)
)
lines(ndvi_trend, pch=16,col="blue")
title(main="Annual mean ndvi",font.main=2,cex.main=2)
},error=function(e){print(paste0("problem with NDVI"))})
####################################################################
################# NDWI graph
par(mar=c(2,2,2,2))
tryCatch({
plot(ndwi_trend,
# yaxt='n',
# xaxt='n',
xlab="year",
ylab="",
ylim=c(0,1)
)
lines(ndwi_trend, pch=16,col="blue")
title(main="Annual mean ndwi",font.main=2,cex.main=2)
},error=function(e){print(paste0("problem with NDwI"))})
####################################################################
### Close the image file
dev.off()
####################################################################
### End the points loop
}
the_pt
|
#' Get a list of running/settled bets
#'
#' @param betids a vector of betids (overrides betlist) default = NULL
#' @param betlist Either 'SETTLED' or 'RUNNING' Default Behavior shows both
#' @param fromDate Iso8061 Date Default: 15 days prior in UTC, as.POSIXct(Sys.Date(), tz = 'UTC')-15*24*60*60
#' @param toDate Iso8061 Date Default: 1 day ahead in UTC (to counter possible fencepost situations), as.POSIXct(Sys.Date(), tz = 'UTC') + 24*60*60
#'
#' @return A list of bets and associated details
#' @export
#' @importFrom jsonlite fromJSON
#' @importFrom jsonlite rbind_pages
#' @examples
#' \donttest{
#' SetCredentials("TESTAPI","APITEST")
#' AcceptTermsAndConditions(accepted=TRUE)
#' GetBetsList()}
GetBetsList <-
function(betids = NULL,
betlist = c('SETTLED','RUNNING'),
fromDate = as.POSIXlt(Sys.Date(), tz = 'UTC') - 15*24*60*60,
toDate = as.POSIXlt(Sys.Date(), tz = 'UTC') + 24*60*60){
CheckTermsAndConditions()
message(Sys.time(),
'| Pulling Bet list from Range: ',
as.character(fromDate), ' to ',
as.character(toDate)
)
jsonlite::rbind_pages(lapply(betlist, function(betlist_type) {
r <- GET(paste0(.PinnacleAPI$url ,"/v1/bets"),
add_headers(Authorization = authorization(),
"Content-Type" = "application/json"),
query =
list(
betlist = betlist_type,
betids =
if (!is.null(betids)) paste0(betids, collapse = ',') else NULL,
fromDate = as.character(fromDate),
toDate = as.character(toDate),
appId = 'R'
)
)
res <- jsonlite::fromJSON(content(r,type = "text", encoding = "UTF-8"))
as.data.frame(unlist(res, recursive = FALSE))
}))
}
| /R/GetBets.R | no_license | marcoblume/pinnacle.API | R | false | false | 1,870 | r | #' Get a list of running/settled bets
#'
#' @param betids a vector of betids (overrides betlist) default = NULL
#' @param betlist Either 'SETTLED' or 'RUNNING' Default Behavior shows both
#' @param fromDate Iso8061 Date Default: 15 days prior in UTC, as.POSIXct(Sys.Date(), tz = 'UTC')-15*24*60*60
#' @param toDate Iso8061 Date Default: 1 day ahead in UTC (to counter possible fencepost situations), as.POSIXct(Sys.Date(), tz = 'UTC') + 24*60*60
#'
#' @return A list of bets and associated details
#' @export
#' @importFrom jsonlite fromJSON
#' @importFrom jsonlite rbind_pages
#' @examples
#' \donttest{
#' SetCredentials("TESTAPI","APITEST")
#' AcceptTermsAndConditions(accepted=TRUE)
#' GetBetsList()}
GetBetsList <-
function(betids = NULL,
betlist = c('SETTLED','RUNNING'),
fromDate = as.POSIXlt(Sys.Date(), tz = 'UTC') - 15*24*60*60,
toDate = as.POSIXlt(Sys.Date(), tz = 'UTC') + 24*60*60){
CheckTermsAndConditions()
message(Sys.time(),
'| Pulling Bet list from Range: ',
as.character(fromDate), ' to ',
as.character(toDate)
)
jsonlite::rbind_pages(lapply(betlist, function(betlist_type) {
r <- GET(paste0(.PinnacleAPI$url ,"/v1/bets"),
add_headers(Authorization = authorization(),
"Content-Type" = "application/json"),
query =
list(
betlist = betlist_type,
betids =
if (!is.null(betids)) paste0(betids, collapse = ',') else NULL,
fromDate = as.character(fromDate),
toDate = as.character(toDate),
appId = 'R'
)
)
res <- jsonlite::fromJSON(content(r,type = "text", encoding = "UTF-8"))
as.data.frame(unlist(res, recursive = FALSE))
}))
}
|
##Creating Protein part of the matrix the x-axis
pr1 <- abpp_pr[,2]
pr1 <- unique(pr1)
pr2 <- data.frame(Leading.Protein = pr1, stringsAsFactors=F)
##Creating the y-axis metabolites
metab1 <- metab_neg[,1:2]
bad <- str_detect(metab1[,2], "m/z")
metab2 <- metab1[!bad,]
bad <- str_detect(metab2[,2], "[0123456789][0123456789][0123456789][0123456789]n")
metab2 <- metab2[!bad,]
metab2[,3]<- str_c(metab2[,2], metab2[,1], sep="-")
metab3 <- metab2
apply(metabname_ID,1,function(x) {
metab3[metab2 == x[1]] <<- x[2]
})
metab3 <- metab3[,2:3]
metab_ids <- unique(metab3[,1])
##Compiling into a matrix
pr2[,2:22] <- " "
colnames(pr2) <- c("Leading.Protein", metab_ids)
##Populating the matrix with scores
for (j in 1:nrow(pr2)){
pattern <- str_extract(pr2[j,1],"[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}")
match <- str_detect(scored_gra1[,1], pattern)
sub_gra <- scored_gra1[match,]
for (i in 2:ncol(pr2)){
match2 <- str_detect(sub_gra[,2], colnames(pr2[i]))
sub_sub_gra <- sub_gra[match2,]
if (nrow(sub_sub_gra) > 0){
pr2[j,i] <- sub_sub_gra[1,5]
}
else {
pr2[j,i] <- "N/A"
}
}
}
| /matrix_creation.R | no_license | bornea/gxna_rework | R | false | false | 1,161 | r | ##Creating Protein part of the matrix the x-axis
pr1 <- abpp_pr[,2]
pr1 <- unique(pr1)
pr2 <- data.frame(Leading.Protein = pr1, stringsAsFactors=F)
##Creating the y-axis metabolites
metab1 <- metab_neg[,1:2]
bad <- str_detect(metab1[,2], "m/z")
metab2 <- metab1[!bad,]
bad <- str_detect(metab2[,2], "[0123456789][0123456789][0123456789][0123456789]n")
metab2 <- metab2[!bad,]
metab2[,3]<- str_c(metab2[,2], metab2[,1], sep="-")
metab3 <- metab2
apply(metabname_ID,1,function(x) {
metab3[metab2 == x[1]] <<- x[2]
})
metab3 <- metab3[,2:3]
metab_ids <- unique(metab3[,1])
##Compiling into a matrix
pr2[,2:22] <- " "
colnames(pr2) <- c("Leading.Protein", metab_ids)
##Populating the matrix with scores
for (j in 1:nrow(pr2)){
pattern <- str_extract(pr2[j,1],"[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}")
match <- str_detect(scored_gra1[,1], pattern)
sub_gra <- scored_gra1[match,]
for (i in 2:ncol(pr2)){
match2 <- str_detect(sub_gra[,2], colnames(pr2[i]))
sub_sub_gra <- sub_gra[match2,]
if (nrow(sub_sub_gra) > 0){
pr2[j,i] <- sub_sub_gra[1,5]
}
else {
pr2[j,i] <- "N/A"
}
}
}
|
panelperf <-function(data){
p.value <- c()
for (i in 1:ncol(data[,-c(1:3)])) {
res.aovsummary<- summary(aov(data[,3+i]~Product*Session,data = data))
p.value[i] <- round(res.aovsummary[[1]][[5]][1],2)
}
p.value
notsig <- p.value>=0.05
attribute <- colnames(data[,-c(1:3)])
res.p <- data.frame(cbind(attribute,p.value,notsig))
order <- as.integer(rownames(res.p))
res.p2 <- data.frame(cbind(res.p,order))
#res.p$order <- as.numeric(res.p$order)
require(ggplot2)
g <- ggplot(res.p2,aes(x=order,y=p.value))
g1 <- g+geom_bar(stat = "identity",aes(fill=notsig),width = 0.8)+geom_text(aes(label =p.value, vjust = 1.1, hjust = 0.5),color="grey30",size = 3.5)+theme(axis.text.x = element_text(size = 10, color = "grey20", angle = 45))+scale_fill_manual(values = c('lightseagreen','lightcoral'))+scale_x_continuous(breaks = res.p2$order,labels =res.p2$attribute)+labs(list(title = "Panel Performance (P.value<0.05)", y = "P.value", x = " "))+ guides(fill = "none")
return(list(g1,res.p))
}
| /R/panelperf.R | no_license | laynelv/isenso | R | false | false | 1,043 | r | panelperf <-function(data){
p.value <- c()
for (i in 1:ncol(data[,-c(1:3)])) {
res.aovsummary<- summary(aov(data[,3+i]~Product*Session,data = data))
p.value[i] <- round(res.aovsummary[[1]][[5]][1],2)
}
p.value
notsig <- p.value>=0.05
attribute <- colnames(data[,-c(1:3)])
res.p <- data.frame(cbind(attribute,p.value,notsig))
order <- as.integer(rownames(res.p))
res.p2 <- data.frame(cbind(res.p,order))
#res.p$order <- as.numeric(res.p$order)
require(ggplot2)
g <- ggplot(res.p2,aes(x=order,y=p.value))
g1 <- g+geom_bar(stat = "identity",aes(fill=notsig),width = 0.8)+geom_text(aes(label =p.value, vjust = 1.1, hjust = 0.5),color="grey30",size = 3.5)+theme(axis.text.x = element_text(size = 10, color = "grey20", angle = 45))+scale_fill_manual(values = c('lightseagreen','lightcoral'))+scale_x_continuous(breaks = res.p2$order,labels =res.p2$attribute)+labs(list(title = "Panel Performance (P.value<0.05)", y = "P.value", x = " "))+ guides(fill = "none")
return(list(g1,res.p))
}
|
library(SyNet)
### Name: outgearth
### Title: Generate a KML File with Marked Set of Points
### Aliases: outgearth
### Keywords: file
### ** Examples
#This example is driven by a new idea of a sympatry network partitioning.
#We will implement an algorithm based on the cliques found on the network and
#we will export the final classification into a KML file.
#######
#Step 1: Infer the network of co-extensive sympatry in the Sciobius example:
data(sciobius2x2)
aux <- gridinfer(dntable = sciobius2x2)
#######
#Step 2: Obtain the cliques
cliques <- netproperties(aux$sm)$Cliques
#######
#Step 3: Perform the new alogrithm on the data frame of cliques (1/0 table of species
#by cliques). Here, the maximum clique is selected and its members removed from the
#data frame. This task is repeated until no residual group can be extracted.
inc <- apply(cliques, 1, sum) #Number of cliques where a given species occurs
flag <- sum(cliques)
i <- 1 #counter
classes <- rep(NA, nrow(aux$sm))
while(flag > 0){
size <- apply(cliques, 2, sum) #Size of each clique
clsel <- which.max(size) #Identify a single largest clique
members <- which(cliques[,clsel]==1)
flag <- flag - sum(inc[members])
inc[members] <- 0
cliques[members,] <- 0 #Indirect way for removing species already classified
classes[members] <- paste("Group ", i)
i <- i + 1
}
split(aux$Label, classes) #Print on R console the resulting partition
#######
#Step 3: Create an object of class nampartition by hand and .
rslt <- c()
rslt$kind <- "grids"
rslt$status <- cbind(Taxa = aux$Label, Status = classes)
rslt$occupancy <- aux$occupancy
#Next, set coordinates in function of the geographical centre for each cell used in the
#Sciobius' example
rslt$coords <- matrix(c(14, -20), nrow = nrow(aux$coords), ncol = 2, byrow = TRUE) +
matrix(c(2, -2), nrow = nrow(aux$coords), ncol = 2, byrow = TRUE)*aux$coords
class(rslt) <- "nampartition"
#######
#Step 4: Create the KML file
## Not run:
##D outgearth(rslt)
##D #Save and then open the file with Google Earth.
##D
## End(Not run)
#I think that this simple algorithm is worthy of refinement.
| /data/genthat_extracted_code/SyNet/examples/outgearth.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,247 | r | library(SyNet)
### Name: outgearth
### Title: Generate a KML File with Marked Set of Points
### Aliases: outgearth
### Keywords: file
### ** Examples
#This example is driven by a new idea of a sympatry network partitioning.
#We will implement an algorithm based on the cliques found on the network and
#we will export the final classification into a KML file.
#######
#Step 1: Infer the network of co-extensive sympatry in the Sciobius example:
data(sciobius2x2)
aux <- gridinfer(dntable = sciobius2x2)
#######
#Step 2: Obtain the cliques
cliques <- netproperties(aux$sm)$Cliques
#######
#Step 3: Perform the new alogrithm on the data frame of cliques (1/0 table of species
#by cliques). Here, the maximum clique is selected and its members removed from the
#data frame. This task is repeated until no residual group can be extracted.
inc <- apply(cliques, 1, sum) #Number of cliques where a given species occurs
flag <- sum(cliques)
i <- 1 #counter
classes <- rep(NA, nrow(aux$sm))
while(flag > 0){
size <- apply(cliques, 2, sum) #Size of each clique
clsel <- which.max(size) #Identify a single largest clique
members <- which(cliques[,clsel]==1)
flag <- flag - sum(inc[members])
inc[members] <- 0
cliques[members,] <- 0 #Indirect way for removing species already classified
classes[members] <- paste("Group ", i)
i <- i + 1
}
split(aux$Label, classes) #Print on R console the resulting partition
#######
#Step 3: Create an object of class nampartition by hand and .
rslt <- c()
rslt$kind <- "grids"
rslt$status <- cbind(Taxa = aux$Label, Status = classes)
rslt$occupancy <- aux$occupancy
#Next, set coordinates in function of the geographical centre for each cell used in the
#Sciobius' example
rslt$coords <- matrix(c(14, -20), nrow = nrow(aux$coords), ncol = 2, byrow = TRUE) +
matrix(c(2, -2), nrow = nrow(aux$coords), ncol = 2, byrow = TRUE)*aux$coords
class(rslt) <- "nampartition"
#######
#Step 4: Create the KML file
## Not run:
##D outgearth(rslt)
##D #Save and then open the file with Google Earth.
##D
## End(Not run)
#I think that this simple algorithm is worthy of refinement.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tables.R
\name{AddSignificanceHighlightingToDataTable}
\alias{AddSignificanceHighlightingToDataTable}
\title{\code{AddSignificanceHighlightingToDataTable}}
\usage{
AddSignificanceHighlightingToDataTable(dt, columns.to.color, column.to.check,
red.value, blue.value)
}
\arguments{
\item{dt}{An HTML widget DataTable created e.g. by
\code{\link{DataTableWithRItemFormat}}}
\item{columns.to.color}{A character vector containing the column names of the
columns that should be colored.}
\item{column.to.check}{The name of the column whose values will be used to do
the coloring.}
\item{red.value}{A number specifying the upper bound for values in
\code{column.to.check} which will cause cells in \code{columns.to.color} to
be highlighted red. That is, when cells in column.to.check have a value
less than this, cells in columns.to.color will be colored red.}
\item{blue.value}{A number specifying the lower bound for coloring cells
blue, as above.}
}
\description{
Add red and blue highlighting to a data table conditionally on
the values in a specified column. Used to replicate Q's significance
highlighting in the table.
}
\examples{
my.df <- data.frame(First = c(1,2,3), Second = c("a", "b", "c"))
my.dt <- DataTableWithRItemFormat(my.df, caption = "A nice table")
my.dt <- AddSignificanceHighlightingToDataTable(my.dt,
columns.to.color = "Second", column.to.check = "First",
red.value = 1.01, blue.value = 2.99)
}
| /man/AddSignificanceHighlightingToDataTable.Rd | no_license | gkalnytskyi/flipFormat | R | false | true | 1,511 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tables.R
\name{AddSignificanceHighlightingToDataTable}
\alias{AddSignificanceHighlightingToDataTable}
\title{\code{AddSignificanceHighlightingToDataTable}}
\usage{
AddSignificanceHighlightingToDataTable(dt, columns.to.color, column.to.check,
red.value, blue.value)
}
\arguments{
\item{dt}{An HTML widget DataTable created e.g. by
\code{\link{DataTableWithRItemFormat}}}
\item{columns.to.color}{A character vector containing the column names of the
columns that should be colored.}
\item{column.to.check}{The name of the column whose values will be used to do
the coloring.}
\item{red.value}{A number specifying the upper bound for values in
\code{column.to.check} which will cause cells in \code{columns.to.color} to
be highlighted red. That is, when cells in column.to.check have a value
less than this, cells in columns.to.color will be colored red.}
\item{blue.value}{A number specifying the lower bound for coloring cells
blue, as above.}
}
\description{
Add red and blue highlighting to a data table conditionally on
the values in a specified column. Used to replicate Q's significance
highlighting in the table.
}
\examples{
my.df <- data.frame(First = c(1,2,3), Second = c("a", "b", "c"))
my.dt <- DataTableWithRItemFormat(my.df, caption = "A nice table")
my.dt <- AddSignificanceHighlightingToDataTable(my.dt,
columns.to.color = "Second", column.to.check = "First",
red.value = 1.01, blue.value = 2.99)
}
|
library(shiny)
ui <- fluidPage(
checkboxInput("chk",
label = h3("선택"),
value =
),
hr(),
fluidRow(column(3,verbatimTextOutput("value")))
)
server <- function(input, output, session) {
output$value <- renderText({input$chk})
}
shinyApp(ui, server)
| /shinyCheckbox.R | no_license | jxh100200/Shiny | R | false | false | 311 | r | library(shiny)
ui <- fluidPage(
checkboxInput("chk",
label = h3("선택"),
value =
),
hr(),
fluidRow(column(3,verbatimTextOutput("value")))
)
server <- function(input, output, session) {
output$value <- renderText({input$chk})
}
shinyApp(ui, server)
|
# set the working directory
setwd ("/DataScienceCourse_Coursera/exploratory-data-analysis_Course 5/SampleCode")
getwd ()
#Question 1.
#Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Using the base plotting system, make a plot showing the total PM2.5 emission
#from all sources for each of the years 1999, 2002, 2005, and 2008.
#our data is located in datafiles folder inside samplecode
#Source_Classification_Code.rds and summarySCC_PM25
#*****Read the data using readRDS****
NEI <- readRDS("datafiles/summarySCC_PM25.rds")
#let us make sure that it was read properly
head(NEI)
#variable names flips scc pollutant emissions type year
# lets us install the dplyr package and call the library dplyr if it does not exist
#The dplyr package makes these steps fast and easy
install.packages ("dplyr")
library(dplyr)
#create a data frame
NEIDF <- tbl_df(NEI)
#tabulate emission by year using summarize
EmbyYear = summarize(group_by(NEIDF, year), sum(Emissions))
# let us display it
EmbyYear
# a table 4 x 2
# year and sum(emissions) are the columns
# let us change the column headers
colnames(EmbyYear) <- c("Year", "Emissions")
#*** let us plot will use a bar plot**
# use main argument to specify title of plot
#col argument to specify the color of the bars
#xlab the label for the x axis
#ylab the label for the y axis
barplot(EmbyYear$Emissions,
names.arg=EmbyYear$Year,
col="red",
xlab='Years',
ylab='Emissions (PM 2.5)',
main = 'Emissions (PM 2.5) per year')
# let us improve the bar by dividing the emissions by million
#new column with emissions in million (for the y axis)
EmbyYear$EmissionsInMillions = EmbyYear$Emissions / 1000000
# let us replot the bar plot again
barplot(EmbyYear$EmissionsInMillions,
names.arg=EmbyYear$Year,
col="red",
xlab='Years',
ylab='Emissions (PM 2.5) in Millions',
main = 'Emissions (PM 2.5) per year')
# this is better looking
| /plot1.R | no_license | sohairzaki/coursera-exploratory-data-analysis-course-project-2 | R | false | false | 2,089 | r | # set the working directory
setwd ("/DataScienceCourse_Coursera/exploratory-data-analysis_Course 5/SampleCode")
getwd ()
#Question 1.
#Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Using the base plotting system, make a plot showing the total PM2.5 emission
#from all sources for each of the years 1999, 2002, 2005, and 2008.
#our data is located in datafiles folder inside samplecode
#Source_Classification_Code.rds and summarySCC_PM25
#*****Read the data using readRDS****
NEI <- readRDS("datafiles/summarySCC_PM25.rds")
#let us make sure that it was read properly
head(NEI)
#variable names flips scc pollutant emissions type year
# lets us install the dplyr package and call the library dplyr if it does not exist
#The dplyr package makes these steps fast and easy
install.packages ("dplyr")
library(dplyr)
#create a data frame
NEIDF <- tbl_df(NEI)
#tabulate emission by year using summarize
EmbyYear = summarize(group_by(NEIDF, year), sum(Emissions))
# let us display it
EmbyYear
# a table 4 x 2
# year and sum(emissions) are the columns
# let us change the column headers
colnames(EmbyYear) <- c("Year", "Emissions")
#*** let us plot will use a bar plot**
# use main argument to specify title of plot
#col argument to specify the color of the bars
#xlab the label for the x axis
#ylab the label for the y axis
barplot(EmbyYear$Emissions,
names.arg=EmbyYear$Year,
col="red",
xlab='Years',
ylab='Emissions (PM 2.5)',
main = 'Emissions (PM 2.5) per year')
# let us improve the bar by dividing the emissions by million
#new column with emissions in million (for the y axis)
EmbyYear$EmissionsInMillions = EmbyYear$Emissions / 1000000
# let us replot the bar plot again
barplot(EmbyYear$EmissionsInMillions,
names.arg=EmbyYear$Year,
col="red",
xlab='Years',
ylab='Emissions (PM 2.5) in Millions',
main = 'Emissions (PM 2.5) per year')
# this is better looking
|
library(arm)
library(lme4)
library(lattice)
library(tidyverse)
library(dplyr)
library(vioplot)
library(lme4)
library(arm)
library(bbmle) # Ben Bolker's library of mle functions
library(MASS)
# Question 1.
# Using the Char Height data set from class (char_with_fake.csv), construct a model with random effects for these data, using CharHt as the response variable. Assume that Transect is the only relevant grouping factor, and that Steepness (of the topography) and Diameter (of the trees) are the only available predictors. In reporting about the model please include:
# • a) A brief explanation of how you chose variables, and which (if any) you decided to allow to vary by group (Transect).
# • b) An assessment of how much variation there is in the group-level random effects.
# • c) A brief assessment of how well your selected model fits the data.
# Notes about this data set:
# This data set adds a fake, randomly generated predictor that is just noise (rnorm).
# Why are we doing this? Recall that the penalty term in AIC is designed to offset the improvement in fit you would typically get if you added a randomly generated explanator y variable to your model -- one that has no "true" relationship to the response variable.
d = read.csv("char_with_fake.csv", header = TRUE)
head(d)
# Look at variation in data across groups
boxplot(CharHt~Transect, d, xlab = "Transect")
# Tons of variation among groups
# there appears to be some variation in response to both explanatory variables
ggplot(d, aes(x=Diameter, y=CharHt)) +
geom_point() +
geom_smooth(span=2) +
theme_classic() +
ggtitle("Diameter vs CharHt") +
xlab("Diameter") +
ylab("CharHt") +
facet_wrap(~Transect)
ggplot(d, aes(x=Slope, y=CharHt)) +
geom_point() +
geom_smooth(span=2) +
theme_classic() +
ggtitle("Slope vs CharHt") +
xlab("Slope") +
ylab("CharHt") +
facet_wrap(~Transect)
# Normality?
qqnorm(d$CharHt, main = "Normal Q-Q plot", xlab = "Theoretical quantiles", ylab = "Sample quantiles")
# seems ok
# What about collinearity?
x = select(d, Slope, Diameter)
round(cor(x), 2)
# They're only correlated a little (0.2); I think this is ok
# Let's fit some models and compare them
# First, decide which predictor(s) to include
# Fixed effects only
m1 = lm(CharHt~Slope, data =d)
m2 = lm(CharHt~Diameter, data = d)
m3 = lm(CharHt~Slope+Diameter, data = d)
m4 = lm(CharHt~Slope*Diameter, data = d)
# Mixed models including both fixed and random effects
m5 <- lmer(CharHt~Slope+(1|Transect), data=d, REML = FALSE)
m6 <- lmer(CharHt~Diameter+(1|Transect), data=d, REML = FALSE)
m7 <- lmer(CharHt~Diameter+(1+Slope|Transect), data=d, REML = FALSE)
m8 <- lmer(CharHt~Slope+(1+Diameter|Transect), data=d, REML = FALSE)
m9 <- lmer(CharHt~Diameter+Slope+(1|Transect), data=d, REML=FALSE)
m10 <- lmer(CharHt~Diameter*Slope+(1|Transect), data=d, REML = FALSE)
AIC(m1, m2, m3, m4, m5, m6, m7, m8, m9, m10)
BIC(m1, m2, m3, m4, m5, m6, m7, m8, m9, m10)
# those that include the interaction are all worse than those without the interaction
# Stepwise check through all combinations of variables
# check for missing data
#sum(!complete.cases(d)) # no missing data
#m0 <- lm(CharHt~1, d)
#mfull <- lm(CharHt~., d)
#summary(mfull)
#AIC(m0, mfull)
#BIC(m0, mfull)
#stepAIC(d, )
# How much variation is there in the group-level random effects?
display(m8)
# Do we include the random effects or not?
# Notes for me from the help page:
# The marginal R2 value represents the variance explained by the fixed effects, defined as:
# R_GLMM(m)² = (σ_f²) / (σ_f² + σ_α² + σ_ε²)
# The conditional R2 value is interpreted as the variance explained by the entire model, including both fixed and random effects, and is calculated according to the equation:
# R_GLMM(c)² = (σ_f² + σ_α²) / (σ_f² + σ_α² + σ_ε²)
# where σ_f² is the variance of the fixed effect components, σ_α² is the variance of the random effects, and σ_ε² is the “observation-level” variance.
library(MuMIn)
r.squaredGLMM(m5)
r.squaredGLMM(m6)
r.squaredGLMM(m7)
r.squaredGLMM(m8)
r.squaredGLMM(m9)
r.squaredGLMM(m10)
# could use dredge to combine all combinations and rank them all
# Including the random effects does allow the model to explain more of the variation in the response variable.
# Testing model assumptions
par(mfrow=c(2, 1), mar=rep(3,4), mgp=c(2,1,0))
plot(m4, which=1:2)
# What about centering and scaling, in order to interpret the coefficients of the model? Should we write down our interpretations of the coefficients?
# Another way to assess model performance is with cross-validation
# The following example is outlined in Class 7 Script
# There are 6 fires in total: let's withhold 3 and allow the model to predict the last 3
d.fit <- filter(d, Fire %in% (1:3)) # training data
d.holdout <- filter(d, Fire %in% (4:6)) # prediction data
# Or, as in class script 7, use data on 1st fire to predict 2nd fire
d.fit <- filter(d, Fire ==1) # training data
d.holdout <- filter(d, Fire ==2) # prediction data
# Fit the models
char.m1 <- lmer(CharHt~Diameter+(1+Diameter|Transect), data=d.fit)
char.m2 <- lmer(CharHt~Diameter*fake+(1+Diameter|Transect), data=d.fit)
# Compare raw sum of squared error and penalized fit terms
# mean squared error of the model fit
mean(resid(char.m1)^2)
mean(resid(char.m2)^2)
mean((predict(char.m1, newdata=d.holdout, allow.new.levels = TRUE) - d.holdout$CharHt)^2)
mean((predict(char.m2, newdata=d.holdout, allow.new.levels = TRUE) - d.holdout$CharHt)^2)
# Which model does better in this cross-validation test?
# Visual display of model fits and predictions
par(mfrow=c(2,2))
plot(predict(char.m1)~d.fit$CharHt, main="Model 1 Fit")
abline(0,1)
plot(predict(char.m2)~d.fit$CharHt, main="Model 2 Fit")
abline(0,1)
plot(predict(char.m1, newdata=d.holdout, allow.new.levels = TRUE)~d.holdout$CharHt, main="Model 1 Predictive performance")
abline(0,1)
plot(predict(char.m2, newdata=d.holdout, allow.new.levels = TRUE)~d.holdout$CharHt, main="Model 2 Predictive performance")
abline(0,1)
# I'm not sure what is happening here; it appears that both models are good. Perhaps the signal in the data is strong enough that the model can handle the noise produced by the randomly generated data?
# Another you could try: Generate new "fake" data again, many times, and see how often the different model evaluation criteria favor a model that includes the fake data.
# Maybe it's worth trying this...
# There is another example in Class 8 Script that withholds different parts of the data - the equivalent in this data set would be withholding half the transects...
# To set up this comparison, let's first break the radon data set into two parts by county, so we can make predictions to counties that are "new", that is, not contained in the fitting data set.
radon1 <- filter(radon, county %in% counties[1:50])
radon2 <- filter(radon, county %in% counties[51:n.counties])
# Then we can hold out 10% of data from the part of the data set that we will use to fit the model, so we can also predict to "known" counties -- counties that are in the part of the data set used to fit the model.
holdout1 <- seq(2, as.integer(nrow(radon1)), by=10)
radon1.fit <- radon1[-holdout1,]
radon1.holdout <- radon1[holdout1,]
# Drop any holdout data that is from a county not included in the fitting data.
radon1.holdout <- filter(radon1.holdout, county %in% unique(radon1.fit$county))
# Fit our model to the data from the "known" counties, minus the holdout data. This is the same as model m2, above, but fitted to a subset of the data
m.sub <- lmer(y ~ x + u.full + (1|county), data=radon1.fit, REML=FALSE)
fits.re <- predict(m.sub, newdata=radon1.holdout) # by default predict includes the random effects in predictions
pdata <- data.frame(y=radon1.holdout$y, fits.re)
p.full <- ggplot(pdata, aes(fits.re, y)) + geom_point() + geom_smooth(method=lm) + theme_bw()
p.full
# 2) If the houses are in new counties that are not included in the data we used to fit the model, then we have no random effect estimates. To predict radon levels in these houses, we could use only the covariate values and coefficients.
fits.fixed <- predict(m.sub, newdata = radon2, re.form=NA) # tell predict() not to include random effects in prediction
pdata2 = data.frame(y = radon2$y, fits.fixed)
p.fixed <- ggplot(pdata2, aes(fits.fixed, y)) + geom_point() + geom_smooth(method=lm) + theme_bw()
grid.arrange(p.full, p.fixed, nrow=1, ncol=2)
# Model averaging?
## Question 2
# Get data on performance scores for pairs figure skating in the 1932 olympics (from http://www.stat.columbia.edu/~gelman/arm/examples/olympics/olympics1932.txt). This is formatted for R as “olympics.csv” on Smartsite in the homework folder.
# Let’s assume the question is: which is the bigger source of variation in the scores for skating programs, the judges or the skating pair? Fit a mixed model for these data with “score” as the response variable, and random effects for judge and skating pair (“judge” and “pair”). Interpret the results (coefficients and their standard errors, standard errors of the random effects). Is there a judge that tends to give consistently higher scores?
d2 = read.csv("olympics.csv", header = TRUE)
ggplot(data=d2, aes(x=judge, y=score, group = pair, color=pair)) +
geom_point()
boxplot(score~judge, d2, xlab = "Judge")
# just by looking at the graphs, judge 7 appears to give consistently higher scores than the others, and perhaps judge 2 as well
boxplot(score~pair, d2, xlab = "pair")
# out of curiosity, looked at variation among pairs; not much variation of scores within pairs
# Fit model
m16 <- lmer(score~ (1|judge), data = d2)
mskate <- lmer(score~ (1|judge) + (1|pair), data = d2)
# Interpet results
summary(mskate)
# coef.est coef.se
# (Intercept) 6.03 0.31
# pair -0.20 0.04
# judge -0.04 0.06
# Error terms:
# Groups Name Std.Dev.
# judge (Intercept) 0.30
# pair (Intercept) 0.18
# Residual 0.27
# standard error of coefficients for pair and judge are similar but slightly higher for judge
# pairs are ranked in descending order of average score; so it makes sense that there is a negative correlation between pair and score
# the number assigned to judge doesn't really mean anything so the coefficient for slope between judge and score is not easily interpretable
# the standard deviation in the error term for the random effect judge is again higher than that for pair
# I would conclude that judges explain more of the variation in scores than pairs do
| /Homeworks/HW2 Suglia.R | no_license | esuglia/applied-stats-class | R | false | false | 10,654 | r |
library(arm)
library(lme4)
library(lattice)
library(tidyverse)
library(dplyr)
library(vioplot)
library(lme4)
library(arm)
library(bbmle) # Ben Bolker's library of mle functions
library(MASS)
# Question 1.
# Using the Char Height data set from class (char_with_fake.csv), construct a model with random effects for these data, using CharHt as the response variable. Assume that Transect is the only relevant grouping factor, and that Steepness (of the topography) and Diameter (of the trees) are the only available predictors. In reporting about the model please include:
# • a) A brief explanation of how you chose variables, and which (if any) you decided to allow to vary by group (Transect).
# • b) An assessment of how much variation there is in the group-level random effects.
# • c) A brief assessment of how well your selected model fits the data.
# Notes about this data set:
# This data set adds a fake, randomly generated predictor that is just noise (rnorm).
# Why are we doing this? Recall that the penalty term in AIC is designed to offset the improvement in fit you would typically get if you added a randomly generated explanator y variable to your model -- one that has no "true" relationship to the response variable.
d = read.csv("char_with_fake.csv", header = TRUE)
head(d)
# Look at variation in data across groups
boxplot(CharHt~Transect, d, xlab = "Transect")
# Tons of variation among groups
# there appears to be some variation in response to both explanatory variables
ggplot(d, aes(x=Diameter, y=CharHt)) +
geom_point() +
geom_smooth(span=2) +
theme_classic() +
ggtitle("Diameter vs CharHt") +
xlab("Diameter") +
ylab("CharHt") +
facet_wrap(~Transect)
ggplot(d, aes(x=Slope, y=CharHt)) +
geom_point() +
geom_smooth(span=2) +
theme_classic() +
ggtitle("Slope vs CharHt") +
xlab("Slope") +
ylab("CharHt") +
facet_wrap(~Transect)
# Normality?
qqnorm(d$CharHt, main = "Normal Q-Q plot", xlab = "Theoretical quantiles", ylab = "Sample quantiles")
# seems ok
# What about collinearity?
x = select(d, Slope, Diameter)
round(cor(x), 2)
# They're only correlated a little (0.2); I think this is ok
# Let's fit some models and compare them
# First, decide which predictor(s) to include
# Fixed effects only
m1 = lm(CharHt~Slope, data =d)
m2 = lm(CharHt~Diameter, data = d)
m3 = lm(CharHt~Slope+Diameter, data = d)
m4 = lm(CharHt~Slope*Diameter, data = d)
# Mixed models including both fixed and random effects
m5 <- lmer(CharHt~Slope+(1|Transect), data=d, REML = FALSE)
m6 <- lmer(CharHt~Diameter+(1|Transect), data=d, REML = FALSE)
m7 <- lmer(CharHt~Diameter+(1+Slope|Transect), data=d, REML = FALSE)
m8 <- lmer(CharHt~Slope+(1+Diameter|Transect), data=d, REML = FALSE)
m9 <- lmer(CharHt~Diameter+Slope+(1|Transect), data=d, REML=FALSE)
m10 <- lmer(CharHt~Diameter*Slope+(1|Transect), data=d, REML = FALSE)
AIC(m1, m2, m3, m4, m5, m6, m7, m8, m9, m10)
BIC(m1, m2, m3, m4, m5, m6, m7, m8, m9, m10)
# those that include the interaction are all worse than those without the interaction
# Stepwise check through all combinations of variables
# check for missing data
#sum(!complete.cases(d)) # no missing data
#m0 <- lm(CharHt~1, d)
#mfull <- lm(CharHt~., d)
#summary(mfull)
#AIC(m0, mfull)
#BIC(m0, mfull)
#stepAIC(d, )
# How much variation is there in the group-level random effects?
display(m8)
# Do we include the random effects or not?
# Notes for me from the help page:
# The marginal R2 value represents the variance explained by the fixed effects, defined as:
# R_GLMM(m)² = (σ_f²) / (σ_f² + σ_α² + σ_ε²)
# The conditional R2 value is interpreted as the variance explained by the entire model, including both fixed and random effects, and is calculated according to the equation:
# R_GLMM(c)² = (σ_f² + σ_α²) / (σ_f² + σ_α² + σ_ε²)
# where σ_f² is the variance of the fixed effect components, σ_α² is the variance of the random effects, and σ_ε² is the “observation-level” variance.
library(MuMIn)
r.squaredGLMM(m5)
r.squaredGLMM(m6)
r.squaredGLMM(m7)
r.squaredGLMM(m8)
r.squaredGLMM(m9)
r.squaredGLMM(m10)
# could use dredge to combine all combinations and rank them all
# Including the random effects does allow the model to explain more of the variation in the response variable.
# Testing model assumptions
par(mfrow=c(2, 1), mar=rep(3,4), mgp=c(2,1,0))
plot(m4, which=1:2)
# What about centering and scaling, in order to interpret the coefficients of the model? Should we write down our interpretations of the coefficients?
# Another way to assess model performance is with cross-validation
# The following example is outlined in Class 7 Script
# There are 6 fires in total: let's withhold 3 and allow the model to predict the last 3
d.fit <- filter(d, Fire %in% (1:3)) # training data
d.holdout <- filter(d, Fire %in% (4:6)) # prediction data
# Or, as in class script 7, use data on 1st fire to predict 2nd fire
d.fit <- filter(d, Fire ==1) # training data
d.holdout <- filter(d, Fire ==2) # prediction data
# Fit the models
char.m1 <- lmer(CharHt~Diameter+(1+Diameter|Transect), data=d.fit)
char.m2 <- lmer(CharHt~Diameter*fake+(1+Diameter|Transect), data=d.fit)
# Compare raw sum of squared error and penalized fit terms
# mean squared error of the model fit
mean(resid(char.m1)^2)
mean(resid(char.m2)^2)
mean((predict(char.m1, newdata=d.holdout, allow.new.levels = TRUE) - d.holdout$CharHt)^2)
mean((predict(char.m2, newdata=d.holdout, allow.new.levels = TRUE) - d.holdout$CharHt)^2)
# Which model does better in this cross-validation test?
# Visual display of model fits and predictions
par(mfrow=c(2,2))
plot(predict(char.m1)~d.fit$CharHt, main="Model 1 Fit")
abline(0,1)
plot(predict(char.m2)~d.fit$CharHt, main="Model 2 Fit")
abline(0,1)
plot(predict(char.m1, newdata=d.holdout, allow.new.levels = TRUE)~d.holdout$CharHt, main="Model 1 Predictive performance")
abline(0,1)
plot(predict(char.m2, newdata=d.holdout, allow.new.levels = TRUE)~d.holdout$CharHt, main="Model 2 Predictive performance")
abline(0,1)
# I'm not sure what is happening here; it appears that both models are good. Perhaps the signal in the data is strong enough that the model can handle the noise produced by the randomly generated data?
# Another you could try: Generate new "fake" data again, many times, and see how often the different model evaluation criteria favor a model that includes the fake data.
# Maybe it's worth trying this...
# There is another example in Class 8 Script that withholds different parts of the data - the equivalent in this data set would be withholding half the transects...
# To set up this comparison, let's first break the radon data set into two parts by county, so we can make predictions to counties that are "new", that is, not contained in the fitting data set.
radon1 <- filter(radon, county %in% counties[1:50])
radon2 <- filter(radon, county %in% counties[51:n.counties])
# Then we can hold out 10% of data from the part of the data set that we will use to fit the model, so we can also predict to "known" counties -- counties that are in the part of the data set used to fit the model.
holdout1 <- seq(2, as.integer(nrow(radon1)), by=10)
radon1.fit <- radon1[-holdout1,]
radon1.holdout <- radon1[holdout1,]
# Drop any holdout data that is from a county not included in the fitting data.
radon1.holdout <- filter(radon1.holdout, county %in% unique(radon1.fit$county))
# Fit our model to the data from the "known" counties, minus the holdout data. This is the same as model m2, above, but fitted to a subset of the data
m.sub <- lmer(y ~ x + u.full + (1|county), data=radon1.fit, REML=FALSE)
fits.re <- predict(m.sub, newdata=radon1.holdout) # by default predict includes the random effects in predictions
pdata <- data.frame(y=radon1.holdout$y, fits.re)
p.full <- ggplot(pdata, aes(fits.re, y)) + geom_point() + geom_smooth(method=lm) + theme_bw()
p.full
# 2) If the houses are in new counties that are not included in the data we used to fit the model, then we have no random effect estimates. To predict radon levels in these houses, we could use only the covariate values and coefficients.
fits.fixed <- predict(m.sub, newdata = radon2, re.form=NA) # tell predict() not to include random effects in prediction
pdata2 = data.frame(y = radon2$y, fits.fixed)
p.fixed <- ggplot(pdata2, aes(fits.fixed, y)) + geom_point() + geom_smooth(method=lm) + theme_bw()
grid.arrange(p.full, p.fixed, nrow=1, ncol=2)
# Model averaging?
## Question 2
# Get data on performance scores for pairs figure skating in the 1932 olympics (from http://www.stat.columbia.edu/~gelman/arm/examples/olympics/olympics1932.txt). This is formatted for R as “olympics.csv” on Smartsite in the homework folder.
# Let’s assume the question is: which is the bigger source of variation in the scores for skating programs, the judges or the skating pair? Fit a mixed model for these data with “score” as the response variable, and random effects for judge and skating pair (“judge” and “pair”). Interpret the results (coefficients and their standard errors, standard errors of the random effects). Is there a judge that tends to give consistently higher scores?
d2 = read.csv("olympics.csv", header = TRUE)
ggplot(data=d2, aes(x=judge, y=score, group = pair, color=pair)) +
geom_point()
boxplot(score~judge, d2, xlab = "Judge")
# just by looking at the graphs, judge 7 appears to give consistently higher scores than the others, and perhaps judge 2 as well
boxplot(score~pair, d2, xlab = "pair")
# out of curiosity, looked at variation among pairs; not much variation of scores within pairs
# Fit model
m16 <- lmer(score~ (1|judge), data = d2)
mskate <- lmer(score~ (1|judge) + (1|pair), data = d2)
# Interpet results
summary(mskate)
# coef.est coef.se
# (Intercept) 6.03 0.31
# pair -0.20 0.04
# judge -0.04 0.06
# Error terms:
# Groups Name Std.Dev.
# judge (Intercept) 0.30
# pair (Intercept) 0.18
# Residual 0.27
# standard error of coefficients for pair and judge are similar but slightly higher for judge
# pairs are ranked in descending order of average score; so it makes sense that there is a negative correlation between pair and score
# the number assigned to judge doesn't really mean anything so the coefficient for slope between judge and score is not easily interpretable
# the standard deviation in the error term for the random effect judge is again higher than that for pair
# I would conclude that judges explain more of the variation in scores than pairs do
|
######### EXERCISE: #########
# Define `Pr_1` as the probability of the first son dying of SIDS
Pr_1 <- 1/8500
# Define `Pr_2` as the probability of the second son dying of SIDS
Pr_2 <- 1/100
# Define `Pr_B` as the probability of both sons dying of SIDS
Pr_B <- Pr_1*Pr_2
# Define Pr_A as the rate of mothers that are murderers
Pr_A <- 1/1000000
# Define Pr_BA as the probability that two children die without evidence of harm, given that their mother is a murderer
Pr_BA <- 0.50
# Define Pr_AB as the probability that a mother is a murderer, given that her two children died with no evidence of physical harm. Print this value to the console.
######### ANSWER #########
Pr_AB <- (Pr_BA*Pr_A)/Pr_B
Pr_AB | /04.DS-Inference and Modeling/S5-Bayesian Statistics/04. Exercise.R | no_license | dgpaniagua/data-science-assessments | R | false | false | 711 | r | ######### EXERCISE: #########
# Define `Pr_1` as the probability of the first son dying of SIDS
Pr_1 <- 1/8500
# Define `Pr_2` as the probability of the second son dying of SIDS
Pr_2 <- 1/100
# Define `Pr_B` as the probability of both sons dying of SIDS
Pr_B <- Pr_1*Pr_2
# Define Pr_A as the rate of mothers that are murderers
Pr_A <- 1/1000000
# Define Pr_BA as the probability that two children die without evidence of harm, given that their mother is a murderer
Pr_BA <- 0.50
# Define Pr_AB as the probability that a mother is a murderer, given that her two children died with no evidence of physical harm. Print this value to the console.
######### ANSWER #########
Pr_AB <- (Pr_BA*Pr_A)/Pr_B
Pr_AB |
suppressPackageStartupMessages(library(tibble))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(purrr))
library(ape)
load("OUTPUT_DOWNSAMPLE_NeuN/Hsap_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_NeuN/PanTro_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_NeuN/RheMac_Specific_DGE.RData")
round(mean(sapply(human_specific, nrow)))/6 #25.5
round(mean(sapply(pantro_specific, nrow)))/6 #11.3
round(mean(sapply(rhemac_specific, nrow)))/45 #10.3
# TREE
n <- read.tree(text="(RheMac:10.3,(PanTro:11.3,Hsap:25.5):1);")
pdf("ChangedXyear_Tree_NeuN_Downsample_Mean.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(n), type = "unrooted", cex = 0.6,
edge.color = "cyan4", edge.width = 2, edge.lty = 2,
tip.color = "cyan4")
edgelabels(n$edge.length, bg = "white",col="black", font=2)
dev.off()
rm(list=ls())
load("OUTPUT_DOWNSAMPLE_OLIG2/Hsap_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_OLIG2/PanTro_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_OLIG2/RheMac_Specific_DGE.RData")
round(mean(sapply(human_specific, nrow)))/6 #37.8
round(mean(sapply(pantro_specific, nrow)))/6 #17.5
round(mean(sapply(rhemac_specific, nrow)))/45 #10.7
o <- read.tree(text="(RheMac:10.7,(PanTro:17.5,Hsap:37.8):1);")
pdf("ChangedXyear_Tree_OLIG2_Downsample_Mean.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(o), type = "unrooted", cex = 0.6,
edge.color = "magenta4", edge.width = 2, edge.lty = 2,
tip.color = "magenta4")
edgelabels(o$edge.length, bg = "white",col="black", font=2)
dev.off()
# Isabel Tree
n <- read.tree(text="(RheMac:3.9,(PanTro:3.5,Hsap:9.5):1);")
pdf("ChangedXyear_Tree_NeuN_Downsample_Isabel.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(n), type = "unrooted", cex = 0.6,
edge.color = "cyan4", edge.width = 2, edge.lty = 2,
tip.color = "cyan4")
edgelabels(n$edge.length, bg = "white",col="black", font=2)
dev.off()
o <- read.tree(text="(RheMac:3.2,(PanTro:3,Hsap:12.7):1);")
pdf("ChangedXyear_Tree_OLIG2_Downsample_Isabel.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(o), type = "unrooted", cex = 0.6,
edge.color = "magenta4", edge.width = 2, edge.lty = 2,
tip.color = "magenta4")
edgelabels(o$edge.length, bg = "white",col="black", font=2)
dev.off()
| /DGE/CROSS-VALIDATIONS/DOWNSAMPLE/Tree_Downsample.R | no_license | BioV/Primates_CellType | R | false | false | 2,269 | r | suppressPackageStartupMessages(library(tibble))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(purrr))
library(ape)
load("OUTPUT_DOWNSAMPLE_NeuN/Hsap_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_NeuN/PanTro_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_NeuN/RheMac_Specific_DGE.RData")
round(mean(sapply(human_specific, nrow)))/6 #25.5
round(mean(sapply(pantro_specific, nrow)))/6 #11.3
round(mean(sapply(rhemac_specific, nrow)))/45 #10.3
# TREE
n <- read.tree(text="(RheMac:10.3,(PanTro:11.3,Hsap:25.5):1);")
pdf("ChangedXyear_Tree_NeuN_Downsample_Mean.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(n), type = "unrooted", cex = 0.6,
edge.color = "cyan4", edge.width = 2, edge.lty = 2,
tip.color = "cyan4")
edgelabels(n$edge.length, bg = "white",col="black", font=2)
dev.off()
rm(list=ls())
load("OUTPUT_DOWNSAMPLE_OLIG2/Hsap_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_OLIG2/PanTro_Specific_DGE.RData")
load("OUTPUT_DOWNSAMPLE_OLIG2/RheMac_Specific_DGE.RData")
round(mean(sapply(human_specific, nrow)))/6 #37.8
round(mean(sapply(pantro_specific, nrow)))/6 #17.5
round(mean(sapply(rhemac_specific, nrow)))/45 #10.7
o <- read.tree(text="(RheMac:10.7,(PanTro:17.5,Hsap:37.8):1);")
pdf("ChangedXyear_Tree_OLIG2_Downsample_Mean.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(o), type = "unrooted", cex = 0.6,
edge.color = "magenta4", edge.width = 2, edge.lty = 2,
tip.color = "magenta4")
edgelabels(o$edge.length, bg = "white",col="black", font=2)
dev.off()
# Isabel Tree
n <- read.tree(text="(RheMac:3.9,(PanTro:3.5,Hsap:9.5):1);")
pdf("ChangedXyear_Tree_NeuN_Downsample_Isabel.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(n), type = "unrooted", cex = 0.6,
edge.color = "cyan4", edge.width = 2, edge.lty = 2,
tip.color = "cyan4")
edgelabels(n$edge.length, bg = "white",col="black", font=2)
dev.off()
o <- read.tree(text="(RheMac:3.2,(PanTro:3,Hsap:12.7):1);")
pdf("ChangedXyear_Tree_OLIG2_Downsample_Isabel.pdf",width=5,height=5,useDingbats=FALSE)
plot(as.phylo(o), type = "unrooted", cex = 0.6,
edge.color = "magenta4", edge.width = 2, edge.lty = 2,
tip.color = "magenta4")
edgelabels(o$edge.length, bg = "white",col="black", font=2)
dev.off()
|
# sp functions:
if (!isClass("ppp"))
setClass("ppp")
if (!isClass("psp"))
setClass("psp")
if (!isClass("owin"))
setClass("owin")
if (!isClass("im"))
setClass("im")
if (!isClass("tess"))
setClass("tess")
#if (!isClass("RasterLayer"))
# setClass("RasterLayer")
as.SpatialPoints.ppp = function(from) {
mult <- 1
if (!is.null(from$window$units) && !is.null(from$window$units$multiplier))
mult <- from$window$units$multiplier
mx <- mult*from$x
storage.mode(mx) <- "double"
my <- mult*from$y
storage.mode(my) <- "double"
crds <- cbind(mx, my)
if (from$window$type == "rectangle") {
ow <- from$window
bbox <- rbind(mult*as.double(ow$xrange), mult*as.double(ow$yrange))
colnames(bbox) <- c("min", "max")
} else bbox <- NULL
SpatialPoints(coords=crds, bbox=bbox)
}
setAs("ppp", "SpatialPoints", as.SpatialPoints.ppp)
as.SpatialPointsDataFrame.ppp = function(from) {
SP <- as(from, "SpatialPoints")
SpatialPointsDataFrame(SP, data.frame(marks = from$marks))
}
setAs("ppp", "SpatialPointsDataFrame", as.SpatialPointsDataFrame.ppp)
as.SpatialGridDataFrame.ppp = function(from) {
w = from$window
if (w$type != "mask")
stop("window is not of type mask")
offset = c(w$xrange[1] + 0.5 * w$xstep, w$yrange[1] + 0.5 * w$ystep)
cellsize = c(diff(w$xrange)/w$dim[2], diff(w$yrange)/w$dim[1])
dim = c(w$dim[2], w$dim[1])
gt = GridTopology(offset, cellsize, dim)
m = t(w$m[nrow(w$m):1,])
m[!m] = NA
data = data.frame(mask = as.vector(m))
SpatialGridDataFrame(gt, data)
}
setAs("ppp", "SpatialGridDataFrame", as.SpatialGridDataFrame.ppp)
as.SpatialGridDataFrame.im = function(from) {
offset = c(from$xrange[1] + 0.5 * from$xstep, from$yrange[1] +
0.5 * from$ystep)
cellsize = c(diff(from$xrange)/from$dim[2], diff(from$yrange)/from$dim[1])
dim = c(from$dim[2], from$dim[1])
gt = GridTopology(offset, cellsize, dim)
m = t(from$v[nrow(from$v):1,])
data = data.frame(v = as.vector(m))
SpatialGridDataFrame(gt, data)
}
setAs("im", "SpatialGridDataFrame", as.SpatialGridDataFrame.im)
as.im.SpatialGridDataFrame = function(from) {
check_spatstat("spatstat.geom")
xi <- as.image.SpatialGridDataFrame(from)
spatstat.geom::im(t(xi$z), xcol=xi$x, yrow=xi$y)
}
setAs("SpatialGridDataFrame", "im", as.im.SpatialGridDataFrame)
#as.im.RasterLayer <- function(from)
#{
# if (!requireNamespace("spatstat", quietly = TRUE))
# stop("package spatstat required for coercion")
# if (!requireNamespace("raster", quietly = TRUE))
# stop("package raster required for coercion")
# if (!raster::hasValues(from)) stop("values required in RasterLayer object")
# if (raster::rotated(from)) {
# stop("\n Cannot coerce because the object is rotated.\n Either coerce to SpatialPoints* from\n or first use the \"rectify\" function")
# }
# rs <- raster::res(from)
# orig <- bbox(from)[, 1] + 0.5 * rs
# dm <- dim(from)[2:1]
# xx <- unname(orig[1] + cumsum(c(0, rep(rs[1], dm[1]-1))))
# yy <- unname(orig[2] + cumsum(c(0, rep(rs[2], dm[2]-1))))
# im <- spatstat::im(matrix(raster::values(from), ncol=dm[1], nrow=dm[2],
# byrow=TRUE)[dm[2]:1,], xcol=xx, yrow=yy)
# im
#}
# contributed by Matthew Lewis https://github.com/spatstat/spatstat/issues/132
as.im.RasterLayer <- function(from, factor.col.name = NULL)
{
check_spatstat("spatstat.geom")
if (!requireNamespace("raster", quietly = TRUE))
stop("package raster required for coercion")
if (!raster::hasValues(from)) stop("values required in RasterLayer object")
if (raster::rotated(from)) {
stop("\n Cannot coerce because the object is rotated.\n Either coerce to SpatialPoints* from\n or first use the \"rectify\" function")
}
rs <- raster::res(from)
orig <- bbox(from)[, 1] + 0.5 * rs
dm <- dim(from)[2:1]
xx <- unname(orig[1] + cumsum(c(0, rep(rs[1], dm[1]-1))))
yy <- unname(orig[2] + cumsum(c(0, rep(rs[2], dm[2]-1))))
## Use `from` to make a vector `val` with input values
val <- raster::values(from)
if(is.factor(from)){
lev <- levels(from)[[1]]
if(!is.null(factor.col.name)){
if(factor.col.name %in% colnames(lev)){
factor.col <- which(colnames(lev) == factor.col.name)
} else {
stop("'factor.col.name' is not a column name of the raster 'from'")
}
}else{
factor.col <- length(lev)
}
val <- factor(val, levels = lev$ID, labels = lev[[factor.col]])
}
## Assign dimensions to `val` as a matrix in raster layout:
dim(val) <- dm
## Transform to spatstat format
val <- spatstat.geom::transmat(val, from = list(x="-i", y="j"), to = "spatstat")
im <- spatstat.geom::im(val, xcol=xx, yrow=yy)
return(im)
}
| /R/sp_spat1.R | no_license | cran/maptools | R | false | false | 4,742 | r | # sp functions:
if (!isClass("ppp"))
setClass("ppp")
if (!isClass("psp"))
setClass("psp")
if (!isClass("owin"))
setClass("owin")
if (!isClass("im"))
setClass("im")
if (!isClass("tess"))
setClass("tess")
#if (!isClass("RasterLayer"))
# setClass("RasterLayer")
as.SpatialPoints.ppp = function(from) {
mult <- 1
if (!is.null(from$window$units) && !is.null(from$window$units$multiplier))
mult <- from$window$units$multiplier
mx <- mult*from$x
storage.mode(mx) <- "double"
my <- mult*from$y
storage.mode(my) <- "double"
crds <- cbind(mx, my)
if (from$window$type == "rectangle") {
ow <- from$window
bbox <- rbind(mult*as.double(ow$xrange), mult*as.double(ow$yrange))
colnames(bbox) <- c("min", "max")
} else bbox <- NULL
SpatialPoints(coords=crds, bbox=bbox)
}
setAs("ppp", "SpatialPoints", as.SpatialPoints.ppp)
as.SpatialPointsDataFrame.ppp = function(from) {
SP <- as(from, "SpatialPoints")
SpatialPointsDataFrame(SP, data.frame(marks = from$marks))
}
setAs("ppp", "SpatialPointsDataFrame", as.SpatialPointsDataFrame.ppp)
as.SpatialGridDataFrame.ppp = function(from) {
w = from$window
if (w$type != "mask")
stop("window is not of type mask")
offset = c(w$xrange[1] + 0.5 * w$xstep, w$yrange[1] + 0.5 * w$ystep)
cellsize = c(diff(w$xrange)/w$dim[2], diff(w$yrange)/w$dim[1])
dim = c(w$dim[2], w$dim[1])
gt = GridTopology(offset, cellsize, dim)
m = t(w$m[nrow(w$m):1,])
m[!m] = NA
data = data.frame(mask = as.vector(m))
SpatialGridDataFrame(gt, data)
}
setAs("ppp", "SpatialGridDataFrame", as.SpatialGridDataFrame.ppp)
as.SpatialGridDataFrame.im = function(from) {
offset = c(from$xrange[1] + 0.5 * from$xstep, from$yrange[1] +
0.5 * from$ystep)
cellsize = c(diff(from$xrange)/from$dim[2], diff(from$yrange)/from$dim[1])
dim = c(from$dim[2], from$dim[1])
gt = GridTopology(offset, cellsize, dim)
m = t(from$v[nrow(from$v):1,])
data = data.frame(v = as.vector(m))
SpatialGridDataFrame(gt, data)
}
setAs("im", "SpatialGridDataFrame", as.SpatialGridDataFrame.im)
as.im.SpatialGridDataFrame = function(from) {
check_spatstat("spatstat.geom")
xi <- as.image.SpatialGridDataFrame(from)
spatstat.geom::im(t(xi$z), xcol=xi$x, yrow=xi$y)
}
setAs("SpatialGridDataFrame", "im", as.im.SpatialGridDataFrame)
#as.im.RasterLayer <- function(from)
#{
# if (!requireNamespace("spatstat", quietly = TRUE))
# stop("package spatstat required for coercion")
# if (!requireNamespace("raster", quietly = TRUE))
# stop("package raster required for coercion")
# if (!raster::hasValues(from)) stop("values required in RasterLayer object")
# if (raster::rotated(from)) {
# stop("\n Cannot coerce because the object is rotated.\n Either coerce to SpatialPoints* from\n or first use the \"rectify\" function")
# }
# rs <- raster::res(from)
# orig <- bbox(from)[, 1] + 0.5 * rs
# dm <- dim(from)[2:1]
# xx <- unname(orig[1] + cumsum(c(0, rep(rs[1], dm[1]-1))))
# yy <- unname(orig[2] + cumsum(c(0, rep(rs[2], dm[2]-1))))
# im <- spatstat::im(matrix(raster::values(from), ncol=dm[1], nrow=dm[2],
# byrow=TRUE)[dm[2]:1,], xcol=xx, yrow=yy)
# im
#}
# contributed by Matthew Lewis https://github.com/spatstat/spatstat/issues/132
as.im.RasterLayer <- function(from, factor.col.name = NULL)
{
check_spatstat("spatstat.geom")
if (!requireNamespace("raster", quietly = TRUE))
stop("package raster required for coercion")
if (!raster::hasValues(from)) stop("values required in RasterLayer object")
if (raster::rotated(from)) {
stop("\n Cannot coerce because the object is rotated.\n Either coerce to SpatialPoints* from\n or first use the \"rectify\" function")
}
rs <- raster::res(from)
orig <- bbox(from)[, 1] + 0.5 * rs
dm <- dim(from)[2:1]
xx <- unname(orig[1] + cumsum(c(0, rep(rs[1], dm[1]-1))))
yy <- unname(orig[2] + cumsum(c(0, rep(rs[2], dm[2]-1))))
## Use `from` to make a vector `val` with input values
val <- raster::values(from)
if(is.factor(from)){
lev <- levels(from)[[1]]
if(!is.null(factor.col.name)){
if(factor.col.name %in% colnames(lev)){
factor.col <- which(colnames(lev) == factor.col.name)
} else {
stop("'factor.col.name' is not a column name of the raster 'from'")
}
}else{
factor.col <- length(lev)
}
val <- factor(val, levels = lev$ID, labels = lev[[factor.col]])
}
## Assign dimensions to `val` as a matrix in raster layout:
dim(val) <- dm
## Transform to spatstat format
val <- spatstat.geom::transmat(val, from = list(x="-i", y="j"), to = "spatstat")
im <- spatstat.geom::im(val, xcol=xx, yrow=yy)
return(im)
}
|
#' Return a sorted vector of nodes id
#'
#' @param g An igraph object of a DAG
#' @param random Boolean, whether the order of selected nodes is randomised in the process
#' @return A data frame with two columns: "id" is the names of nodes in g, and "id_num" is the topological ordering
#' @examples
#' df0 <- data.frame(from = c("a", "b"), to = c("b", "c"), stringsAsFactors = FALSE)
#' g0 <- igraph::graph_from_data_frame(df0, directed = TRUE)
#' topo_sort_kahn(g0)
#' @importFrom igraph as_data_frame
#' @importFrom igraph V
#' @importFrom igraph as_adjacency_matrix
#' @importFrom igraph is_dag
#' @export
topo_sort_kahn <- function(g, random = FALSE) {
if (!is_dag(g)) {
stop("g has to be a DAG")
}
e0 <- igraph::as_data_frame(g)
names(e0) <- c("citing", "cited")
v <- names(igraph::V(g))
v1 <- sort(unique(c(e0$citing, e0$cited)))
l <- setdiff(v, v1)
s0 <- sort(setdiff(e0$citing, e0$cited))
while (length(s0) > 0L) {
if (random) {
n0 <- sample(s0, 1L)
s0 <- s0[s0 != n0]
} else {
n0 <- s0[1L]
s0 <- s0[-1L]
}
l <- c(l, n0)
## outgoing edges of n0
i0 <- e0$citing == n0
e1 <- e0[i0, , drop = FALSE]
e0 <- e0[!i0, , drop = FALSE]
if (nrow(e1) != 0L) {
e2 <- setdiff(e1$cited, e0$cited)
if (random) {
e2 <- sample(e2, length(e2))
}
s0 <- c(s0, e2)
}
}
if (nrow(e0) > 0L) {
stop("topo_sort_kahn: graph has at least 1 cycle")
}
o <- match(l, v)
a <- as_adjacency_matrix(g)[o, o]
data.frame(
id = l,
id_num = seq_along(l),
stringsAsFactors = FALSE
)
}
#' Construct the giant component of the network from two data frames
#'
#' @param edgelist A data frame with (at least) two columns: from and to
#' @param nodelist NULL, or a data frame with (at least) one column: name, that contains the nodes to include
#' @param gc Boolean, if 'TRUE' (default) then the giant component is extracted, if 'FALSE' then the whole graph is returned
#' @importFrom dplyr semi_join
#' @importFrom igraph graph_from_data_frame decompose.graph gorder
#' @return An igraph object & a connected graph if gc is 'TRUE'
#' @examples
#' from <- c("1", "2", "4")
#' to <- c("2", "3", "5")
#' edges <- data.frame(from = from, to = to, stringsAsFactors = FALSE)
#' nodes <- data.frame(name = c("1", "2", "3", "4", "5"), stringsAsFactors = FALSE)
#' df_to_graph(edges, nodes)
#' @export
df_to_graph <- function(edgelist, nodelist = NULL, gc = TRUE) {
if (is.null(nodelist)) {
g <- igraph::graph_from_data_frame(edgelist)
} else {
df <- dplyr::semi_join(edgelist, nodelist, c("to" = "name")) # semi join as some nodes may have become obsolete
g <- igraph::graph_from_data_frame(df)
}
if (gc) {
l <- igraph::decompose.graph(g)
n <- length(l)
v <- rep(as.integer(NA), n)
for (i in seq(n)) {
v[i] <- igraph::gorder(l[[i]])
}
g <- l[[which.max(v)]]
}
g
}
#' Graph of dependencies of all CRAN packages
#'
#' \code{get_graph_all_packages} returns an igraph object representing the network of one type of dependencies of all CRAN packages.
#' @param type One of the following dependency words: "Depends", "Imports", "LinkingTo", "Suggests", "Reverse depends", "Reverse imports", "Reverse linking to", "Reverse suggests", up to letter case and space replaced by underscore
#' @param gc Boolean, if 'TRUE' (default) then the giant component is extracted, if 'FALSE' then the whole graph is returned
#' @return An igraph object & a connected graph if gc is 'TRUE'
#' @examples
#' \dontrun{
#' g0.cran.depends <- get_graph_all_packages("depends")
#' g1.cran.imports <- get_graph_all_packages("reverse imports")
#' }
#' @export
get_graph_all_packages <- function(type, gc = TRUE) {
## change params to align with others
type <- check_dep_word(type)
reverse <- substr(type, 1L, 7L) == "Reverse"
type <- ifelse(reverse, substr(type, 9L, nchar(type)), tolower(type))
type <- ifelse(type == "linkingto", "linking to", type)
df0 <- get_dep_all_packages()
df1 <- df0[df0$type == type & df0$reverse == reverse,] # edgelist
df2 <- data.frame(name = unique(df0$from))
df_to_graph(df1, df2, gc)
}
| /crandep/R/graph.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 4,385 | r | #' Return a sorted vector of nodes id
#'
#' @param g An igraph object of a DAG
#' @param random Boolean, whether the order of selected nodes is randomised in the process
#' @return A data frame with two columns: "id" is the names of nodes in g, and "id_num" is the topological ordering
#' @examples
#' df0 <- data.frame(from = c("a", "b"), to = c("b", "c"), stringsAsFactors = FALSE)
#' g0 <- igraph::graph_from_data_frame(df0, directed = TRUE)
#' topo_sort_kahn(g0)
#' @importFrom igraph as_data_frame
#' @importFrom igraph V
#' @importFrom igraph as_adjacency_matrix
#' @importFrom igraph is_dag
#' @export
topo_sort_kahn <- function(g, random = FALSE) {
if (!is_dag(g)) {
stop("g has to be a DAG")
}
e0 <- igraph::as_data_frame(g)
names(e0) <- c("citing", "cited")
v <- names(igraph::V(g))
v1 <- sort(unique(c(e0$citing, e0$cited)))
l <- setdiff(v, v1)
s0 <- sort(setdiff(e0$citing, e0$cited))
while (length(s0) > 0L) {
if (random) {
n0 <- sample(s0, 1L)
s0 <- s0[s0 != n0]
} else {
n0 <- s0[1L]
s0 <- s0[-1L]
}
l <- c(l, n0)
## outgoing edges of n0
i0 <- e0$citing == n0
e1 <- e0[i0, , drop = FALSE]
e0 <- e0[!i0, , drop = FALSE]
if (nrow(e1) != 0L) {
e2 <- setdiff(e1$cited, e0$cited)
if (random) {
e2 <- sample(e2, length(e2))
}
s0 <- c(s0, e2)
}
}
if (nrow(e0) > 0L) {
stop("topo_sort_kahn: graph has at least 1 cycle")
}
o <- match(l, v)
a <- as_adjacency_matrix(g)[o, o]
data.frame(
id = l,
id_num = seq_along(l),
stringsAsFactors = FALSE
)
}
#' Construct the giant component of the network from two data frames
#'
#' @param edgelist A data frame with (at least) two columns: from and to
#' @param nodelist NULL, or a data frame with (at least) one column: name, that contains the nodes to include
#' @param gc Boolean, if 'TRUE' (default) then the giant component is extracted, if 'FALSE' then the whole graph is returned
#' @importFrom dplyr semi_join
#' @importFrom igraph graph_from_data_frame decompose.graph gorder
#' @return An igraph object & a connected graph if gc is 'TRUE'
#' @examples
#' from <- c("1", "2", "4")
#' to <- c("2", "3", "5")
#' edges <- data.frame(from = from, to = to, stringsAsFactors = FALSE)
#' nodes <- data.frame(name = c("1", "2", "3", "4", "5"), stringsAsFactors = FALSE)
#' df_to_graph(edges, nodes)
#' @export
df_to_graph <- function(edgelist, nodelist = NULL, gc = TRUE) {
if (is.null(nodelist)) {
g <- igraph::graph_from_data_frame(edgelist)
} else {
df <- dplyr::semi_join(edgelist, nodelist, c("to" = "name")) # semi join as some nodes may have become obsolete
g <- igraph::graph_from_data_frame(df)
}
if (gc) {
l <- igraph::decompose.graph(g)
n <- length(l)
v <- rep(as.integer(NA), n)
for (i in seq(n)) {
v[i] <- igraph::gorder(l[[i]])
}
g <- l[[which.max(v)]]
}
g
}
#' Graph of dependencies of all CRAN packages
#'
#' \code{get_graph_all_packages} returns an igraph object representing the network of one type of dependencies of all CRAN packages.
#' @param type One of the following dependency words: "Depends", "Imports", "LinkingTo", "Suggests", "Reverse depends", "Reverse imports", "Reverse linking to", "Reverse suggests", up to letter case and space replaced by underscore
#' @param gc Boolean, if 'TRUE' (default) then the giant component is extracted, if 'FALSE' then the whole graph is returned
#' @return An igraph object & a connected graph if gc is 'TRUE'
#' @examples
#' \dontrun{
#' g0.cran.depends <- get_graph_all_packages("depends")
#' g1.cran.imports <- get_graph_all_packages("reverse imports")
#' }
#' @export
get_graph_all_packages <- function(type, gc = TRUE) {
## change params to align with others
type <- check_dep_word(type)
reverse <- substr(type, 1L, 7L) == "Reverse"
type <- ifelse(reverse, substr(type, 9L, nchar(type)), tolower(type))
type <- ifelse(type == "linkingto", "linking to", type)
df0 <- get_dep_all_packages()
df1 <- df0[df0$type == type & df0$reverse == reverse,] # edgelist
df2 <- data.frame(name = unique(df0$from))
df_to_graph(df1, df2, gc)
}
|
####################################
### how to clean NA in tag_types ###
####################################
### check NA in tag_type ###
test_miss_type<-subset(test, is.na(test$tag_type))
train_miss_type<-subset(train, is.na(train$tag_type))
train$tag_type<-as.character(train$tag_type)
test$tag_type<-as.character(test$tag_type)
### train set
train$tag_type[train$summary=="Abandoned Vehicle"]<-'abandoned_vehicles'
train$tag_type[train$desription=="This issue was reported to
the City of Oakland Public Works Agency via phone (510-615-5566),
email (pwacallcenter@oaklandnet.com), or web (www.oaklandpw.com)."]<-'trash'
train$tag_type[train$summary=="Trash/bulk Pick-ups"] <-'trash'
train$tag_type[train$summary=="Illegal Dumping - debris, appliances, etc."] <-'trash'
train$tag_type[agrep("Rodent", train$summary, ignore.case=TRUE)] <-'rodents'
train$tag_type[agrep("tree", train$summary, ignore.case=TRUE)] <-'tree'
train$tag_type[agrep("limbs", train$summary, ignore.case=TRUE)] <-'tree'
train$tag_type[agrep("grass", train$summary, ignore.case=TRUE)] <-'tree'
train$tag_type[agrep("pothole", train$summary, ignore.case=TRUE)] <-'pothole'
train$tag_type[agrep("Graffiti", train$summary, ignore.case=TRUE)] <-'graffiti'
train$tag_type[agrep("brush", train$summary, ignore.case=TRUE)] <-'brush_bulk'
train$tag_type[agrep("bulk", train$summary, ignore.case=TRUE)] <-'brush_bulk'
train$tag_type[agrep("bulk", train$description, ignore.case=TRUE)] <-'brush_bulk'
train$tag_type[agrep("light", train$summary, ignore.case=TRUE)] <-'street_light'
train$tag_type[agrep("signal", train$summary, ignore.case=TRUE)] <-'street_signal'
train$tag_type[agrep("prostitutes", train$summary, ignore.case=TRUE)] <-'prostitutes'
## integer similar tags
train$tag_type[train$tag_type=='abandoned_vehicle']<-'abandoned_vehicles'
train$tag_type[train$tag_type=="roadkill"] <-'road_safety'
train$tag_type[train$tag_type=="pedestrian_light"] <-'street_light'
train$tag_type[train$tag_type=="rodents"] <-'animal_problem'
train$tag_type[train$tag_type=="bad_driving"] <-'road_safety'
train$tag_type[train$tag_type=="noise_complaint"] <-'noise_odor'
train$tag_type[train$tag_type=="odor"] <-'noise_odor'
train$tag_type[train$tag_type=="zoning"] <-'traffic'
train$tag_type[train$tag_type=="drain_problem"] <-'pothole'
## change "small tages" into "other"
train$tag_type[train$tag_type=="public_art"] <-'other'
train$tag_type[train$tag_type=="public_concern"] <-'other'
train$tag_type[train$tag_type=="lost_and_found"] <-'other'
train$tag_type[is.na(train$tag_type)]<- "other"
train.other<-subset(train,tag_type=="other")
### test set
test$tag_type[test$tag_type=='abandoned_vehicle']<-'abandoned_vehicles'
test$tag_type[test$desription=="This issue was reported to
the City of Oakland Public Works Agency via phone (510-615-5566),
email (pwacallcenter@oaklandnet.com), or web (www.oaklandpw.com)."]<-'trash'
test$tag_type[agrep("Trash/bulk", test$summary, ignore.case=TRUE)] <-'trash'
test$tag_type[agrep("Dumping", test$summary, ignore.case=TRUE)] <-'trash'
test$tag_type[agrep("Rodent", test$summary, ignore.case=TRUE)] <-'rodents'
test$tag_type[agrep("tree", test$summary, ignore.case=TRUE)] <-'tree'
test$tag_type[agrep("limbs", test$summary, ignore.case=TRUE)] <-'tree'
test$tag_type[agrep("grass", test$summary, ignore.case=TRUE)] <-'tree'
test$tag_type[agrep("pothole", test$summary, ignore.case=TRUE)] <-'pothole'
test$tag_type[agrep("Graffiti", test$summary, ignore.case=TRUE)] <-'graffiti'
test$tag_type[agrep("brush", test$summary, ignore.case=TRUE)] <-'brush_bulk'
test$tag_type[agrep("bulk", test$summary, ignore.case=TRUE)] <-'brush_bulk'
test$tag_type[agrep("bulk", test$description, ignore.case=TRUE)] <-'brush_bulk'
test$tag_type[agrep("light", test$summary, ignore.case=TRUE)] <-'street_light'
test$tag_type[agrep("signal", test$summary, ignore.case=TRUE)] <-'street_signal'
test$tag_type[agrep("prostitutes", test$summary, ignore.case=TRUE)] <-'prostitutes'
## integer similar tags
test$tag_type[test$tag_type=="pedestrian_light"] <-'street_light'
test$tag_type[test$tag_type=="roadkill"] <-'road_safety'
test$tag_type[test$tag_type=="rodents"] <-'animal_problem'
test$tag_type[test$tag_type=="bad_driving"] <-'road_safety'
test$tag_type[test$tag_type=="noise_complaint"] <-'noise_odor'
test$tag_type[test$tag_type=="odor"] <-'noise_odor'
test$tag_type[test$tag_type=="zoning"] <-'traffic'
test$tag_type[test$tag_type=="drain_problem"] <-'pothole'
## change "small tages" into "other"
test$tag_type[test$tag_type=="bus_lane"] <-'other'
test$tag_type[is.na(test$tag_type)]<- "other"
test.other<-subset(test,tag_type=="other")
### combine into full data ###
train$tag_type<-as.factor(train$tag_type)
test$tag_type<-as.factor(test$tag_type)
fulldata<-rbind(test,train)
| /data_clean2.1.r | no_license | angelwu120/Kaggle_311 | R | false | false | 4,862 | r | ####################################
### how to clean NA in tag_types ###
####################################
### check NA in tag_type ###
test_miss_type<-subset(test, is.na(test$tag_type))
train_miss_type<-subset(train, is.na(train$tag_type))
train$tag_type<-as.character(train$tag_type)
test$tag_type<-as.character(test$tag_type)
### train set
train$tag_type[train$summary=="Abandoned Vehicle"]<-'abandoned_vehicles'
train$tag_type[train$desription=="This issue was reported to
the City of Oakland Public Works Agency via phone (510-615-5566),
email (pwacallcenter@oaklandnet.com), or web (www.oaklandpw.com)."]<-'trash'
train$tag_type[train$summary=="Trash/bulk Pick-ups"] <-'trash'
train$tag_type[train$summary=="Illegal Dumping - debris, appliances, etc."] <-'trash'
train$tag_type[agrep("Rodent", train$summary, ignore.case=TRUE)] <-'rodents'
train$tag_type[agrep("tree", train$summary, ignore.case=TRUE)] <-'tree'
train$tag_type[agrep("limbs", train$summary, ignore.case=TRUE)] <-'tree'
train$tag_type[agrep("grass", train$summary, ignore.case=TRUE)] <-'tree'
train$tag_type[agrep("pothole", train$summary, ignore.case=TRUE)] <-'pothole'
train$tag_type[agrep("Graffiti", train$summary, ignore.case=TRUE)] <-'graffiti'
train$tag_type[agrep("brush", train$summary, ignore.case=TRUE)] <-'brush_bulk'
train$tag_type[agrep("bulk", train$summary, ignore.case=TRUE)] <-'brush_bulk'
train$tag_type[agrep("bulk", train$description, ignore.case=TRUE)] <-'brush_bulk'
train$tag_type[agrep("light", train$summary, ignore.case=TRUE)] <-'street_light'
train$tag_type[agrep("signal", train$summary, ignore.case=TRUE)] <-'street_signal'
train$tag_type[agrep("prostitutes", train$summary, ignore.case=TRUE)] <-'prostitutes'
## integer similar tags
train$tag_type[train$tag_type=='abandoned_vehicle']<-'abandoned_vehicles'
train$tag_type[train$tag_type=="roadkill"] <-'road_safety'
train$tag_type[train$tag_type=="pedestrian_light"] <-'street_light'
train$tag_type[train$tag_type=="rodents"] <-'animal_problem'
train$tag_type[train$tag_type=="bad_driving"] <-'road_safety'
train$tag_type[train$tag_type=="noise_complaint"] <-'noise_odor'
train$tag_type[train$tag_type=="odor"] <-'noise_odor'
train$tag_type[train$tag_type=="zoning"] <-'traffic'
train$tag_type[train$tag_type=="drain_problem"] <-'pothole'
## change "small tages" into "other"
train$tag_type[train$tag_type=="public_art"] <-'other'
train$tag_type[train$tag_type=="public_concern"] <-'other'
train$tag_type[train$tag_type=="lost_and_found"] <-'other'
train$tag_type[is.na(train$tag_type)]<- "other"
train.other<-subset(train,tag_type=="other")
### test set
test$tag_type[test$tag_type=='abandoned_vehicle']<-'abandoned_vehicles'
test$tag_type[test$desription=="This issue was reported to
the City of Oakland Public Works Agency via phone (510-615-5566),
email (pwacallcenter@oaklandnet.com), or web (www.oaklandpw.com)."]<-'trash'
test$tag_type[agrep("Trash/bulk", test$summary, ignore.case=TRUE)] <-'trash'
test$tag_type[agrep("Dumping", test$summary, ignore.case=TRUE)] <-'trash'
test$tag_type[agrep("Rodent", test$summary, ignore.case=TRUE)] <-'rodents'
test$tag_type[agrep("tree", test$summary, ignore.case=TRUE)] <-'tree'
test$tag_type[agrep("limbs", test$summary, ignore.case=TRUE)] <-'tree'
test$tag_type[agrep("grass", test$summary, ignore.case=TRUE)] <-'tree'
test$tag_type[agrep("pothole", test$summary, ignore.case=TRUE)] <-'pothole'
test$tag_type[agrep("Graffiti", test$summary, ignore.case=TRUE)] <-'graffiti'
test$tag_type[agrep("brush", test$summary, ignore.case=TRUE)] <-'brush_bulk'
test$tag_type[agrep("bulk", test$summary, ignore.case=TRUE)] <-'brush_bulk'
test$tag_type[agrep("bulk", test$description, ignore.case=TRUE)] <-'brush_bulk'
test$tag_type[agrep("light", test$summary, ignore.case=TRUE)] <-'street_light'
test$tag_type[agrep("signal", test$summary, ignore.case=TRUE)] <-'street_signal'
test$tag_type[agrep("prostitutes", test$summary, ignore.case=TRUE)] <-'prostitutes'
## integer similar tags
test$tag_type[test$tag_type=="pedestrian_light"] <-'street_light'
test$tag_type[test$tag_type=="roadkill"] <-'road_safety'
test$tag_type[test$tag_type=="rodents"] <-'animal_problem'
test$tag_type[test$tag_type=="bad_driving"] <-'road_safety'
test$tag_type[test$tag_type=="noise_complaint"] <-'noise_odor'
test$tag_type[test$tag_type=="odor"] <-'noise_odor'
test$tag_type[test$tag_type=="zoning"] <-'traffic'
test$tag_type[test$tag_type=="drain_problem"] <-'pothole'
## change "small tages" into "other"
test$tag_type[test$tag_type=="bus_lane"] <-'other'
test$tag_type[is.na(test$tag_type)]<- "other"
test.other<-subset(test,tag_type=="other")
### combine into full data ###
train$tag_type<-as.factor(train$tag_type)
test$tag_type<-as.factor(test$tag_type)
fulldata<-rbind(test,train)
|
name <- c('Chika', 'Miyuki', 'Kaguya', 'Yuu')
sex <- c('f', 'm', 'f', 'm')
age <- c(18, 18, 18, 17)
council <- data.frame(name, sex, age)
council
| /DataFrame.R | no_license | producer82/R-Language-Study | R | false | false | 149 | r | name <- c('Chika', 'Miyuki', 'Kaguya', 'Yuu')
sex <- c('f', 'm', 'f', 'm')
age <- c(18, 18, 18, 17)
council <- data.frame(name, sex, age)
council
|
# Functions associated with initialization of the OM.
#' Create the OM
#'
#' This function manipulates the OM as needed so that it can be used as an
#' operating model.
#' @author Kathryn Doering & Nathan Vaughan
#' @template OM_out_dir
#' @template overwrite
#' @param nyrs Number of years beyond the years included in the OM to run the
#' MSE. A single integer value.
#' @param nyrs_assess The number of years between assessments. This is used to
#' structure the forecast file for use in the OM.
#' @param nscen The scenario number
#' @param scen_name The scenario name
#' @param niter the iteration number
#' @param writedat Should a new datafile be written?
#' @param future_om_dat An optional data_frame including changes that should
#' be made after the end year of the input model. Including parameter variations,
#' recruitment deviations, and implementation errors.
#' @param verify_OM Should the model be run without estimation and some basic
#' checks done to verify that the OM can run? Defaults to TRUE.
#' @template sample_struct_hist
#' @template sample_struct
#' @template seed
#' @template verbose
#' @return A modified datafile
#' @import r4ss
create_OM <- function(OM_out_dir,
overwrite = TRUE,
writedat = TRUE,
verbose = FALSE,
nyrs = NULL,
nyrs_assess = NULL,
nscen = 1,
scen_name = NULL,
niter = 1,
future_om_dat = NULL,
verify_OM = TRUE,
sample_struct_hist = NULL,
sample_struct = NULL,
seed = NULL) {
start <- r4ss::SS_readstarter(file.path(OM_out_dir, "starter.ss"),
verbose = FALSE
)
# modify starter to use as OM ----
if (is.null(seed)) {
seed <- stats::runif(1, 1, 99999999)
}
start[["init_values_src"]] <- 1
start[["detailed_age_structure"]] <- 1
start[["last_estimation_phase"]] <- 0
start[["depl_basis"]] <- 0
start[["depl_denom_frac"]] <- 1
start[["SPR_basis"]] <- 0
start[["F_report_units"]] <- 0
start[["F_report_basis"]] <- 0
start[["F_age_range"]] <- NULL
start[["ALK_tolerance"]] <- 0
start[["seed"]] <- seed
r4ss::SS_writestarter(start,
dir = OM_out_dir, verbose = FALSE,
overwrite = TRUE
)
# run model to get standardized output ----
run_ss_model(OM_out_dir, "-maxfn 0 -phase 50 -nohess",
debug_par_run = TRUE,
verbose = verbose
)
# read in files to use ----
dat <- r4ss::SS_readdat(
file = file.path(OM_out_dir, start[["datfile"]]),
verbose = FALSE, section = 1
)
forelist <- r4ss::SS_readforecast(
file = file.path(OM_out_dir, "forecast.ss"),
readAll = TRUE, verbose = FALSE
)
ctl <- r4ss::SS_readctl(file.path(OM_out_dir, start[["ctlfile"]]),
verbose = FALSE,
use_datlist = TRUE, datlist = dat
)
outlist <- r4ss::SS_output(OM_out_dir,
verbose = FALSE, printstats = FALSE,
covar = FALSE
)
parlist <- r4ss::SS_readpar_3.30(
parfile = file.path(OM_out_dir, "ss.par"),
datsource = dat, ctlsource = ctl,
verbose = FALSE
)
# model checks ----
if (ctl[["F_Method"]] == 1) {
stop(
"SSMSE cannot work with models that use F method 1 (Pope's ",
"approximation). Please use F method 2 or 3 instead (3 is ",
"recommended over method 1)."
)
}
# modify forecast file ----
currentNforecast <- forelist[["Nforecastyrs"]]
forelist[["benchmarks"]] <- 0 #
forelist[["Bmark_relF_Basis"]] <- 1 # error generated by SS3 if forecast off, and this is 2
forelist[["Forecast"]] <- 0 #
forelist[["Nforecastyrs"]] <- 1 #
forelist[["FirstYear_for_caps_and_allocations"]] <- dat[["endyr"]] + nyrs + 2 #
forelist[["stddev_of_log_catch_ratio"]] <- 0
# modify ctl file ----
# in the context of an OM, do not want to use the bias adjustment ramp, so just
# turn off and make the recdevs years always the same.
# UPDATE NOTE: For the OM we do not want bias adjustment in the future. However we
# do want the historic period to be consistent with the original assessment model.
# We therefore need to add advanced options if not already specified. I am also
# updating the extend EM process to fix the main recdevs end year. This way all new
# recdevs become late phase/forecast recdevs which are not subject to sum to zero
# constraints or bias adjustment.
if (!all(ctl[["time_vary_auto_generation"]] == 1)) {
warning("Turning off autogeneration of time varying lines in the control file of the OM")
ctl[["time_vary_auto_generation"]] <- rep(1, times = 5)
}
if (ctl[["recdev_adv"]] == 0) {
ctl[["recdev_adv"]] <- 1
ctl[["recdev_early_start"]] <- 0
ctl[["recdev_early_phase"]] <- -4
ctl[["Fcast_recr_phase"]] <- 0
ctl[["lambda4Fcast_recr_like"]] <- 0
ctl[["last_early_yr_nobias_adj"]] <- ctl[["MainRdevYrFirst"]] - 1
ctl[["first_yr_fullbias_adj"]] <- ctl[["MainRdevYrFirst"]]
ctl[["last_yr_fullbias_adj"]] <- ctl[["MainRdevYrLast"]]
ctl[["first_recent_yr_nobias_adj"]] <- ctl[["MainRdevYrLast"]] + 1
ctl[["max_bias_adj"]] <- 0.8
ctl[["period_of_cycles_in_recr"]] <- 0
ctl[["min_rec_dev"]] <- -10
ctl[["max_rec_dev"]] <- 10
ctl[["N_Read_recdevs"]] <- 0
ctl[["recdev_input"]] <- NULL
}
# if (ctl[["recdev_early_start"]] <= 0) {
# first_year <- ctl[["MainRdevYrFirst"]] + ctl[["recdev_early_start"]]
# } else if (ctl[["recdev_early_start"]] < ctl[["MainRdevYrFirst"]]) {
# first_year <- ctl[["recdev_early_start"]]
# } else {
# (
# first_year <- ctl[["MainRdevYrFirst"]]
# )
# }
first_year <- ctl[["MainRdevYrFirst"]]
# modify par file ----
# note: don't include early recdevs in in all_recdevs
all_recdevs <- as.data.frame(rbind(parlist[["recdev1"]], parlist[["recdev2"]], parlist[["recdev_forecast"]]))
# get recdevs for all model years
all_recdevs <- all_recdevs[all_recdevs[["year"]] >= first_year & all_recdevs[["year"]] <= (dat[["endyr"]]), ] #
new_recdevs_df <- data.frame(year = first_year:ctl[["MainRdevYrLast"]], recdev = NA)
fore_recdevs_df <- data.frame(year = (ctl[["MainRdevYrLast"]] + 1):(dat[["endyr"]] + nyrs + 1), recdev = NA) #
temp_yrs <- (first_year:(dat[["endyr"]] + nyrs + 1))
for (i in seq_along(temp_yrs)) { #
tmp_yr <- temp_yrs[i] #
if (tmp_yr <= ctl[["MainRdevYrLast"]]) {
step <- i
if (length(all_recdevs[all_recdevs[["year"]] == tmp_yr, "year"]) == 0) {
new_recdevs_df[i, "recdev"] <- 0 # just assume no rec devs
} else {
new_recdevs_df[i, "recdev"] <-
all_recdevs[all_recdevs[["year"]] == tmp_yr, "recdev"]
}
} else {
if (length(all_recdevs[all_recdevs[["year"]] == tmp_yr, "year"]) == 0) {
fore_recdevs_df[(i - step), "recdev"] <- 0
} else {
fore_recdevs_df[(i - step), "recdev"] <-
all_recdevs[all_recdevs[["year"]] == tmp_yr, "recdev"]
}
}
}
# add recdevs to the parlist
new_recdevs_mat <- as.matrix(new_recdevs_df)
new_fore_recdevs_mat <- as.matrix(fore_recdevs_df)
if (!is.null(parlist[["recdev1"]])) {
parlist[["recdev1"]] <- new_recdevs_mat
} else if (!is.null(parlist[["recdev2"]])) {
parlist[["recdev2"]] <- new_recdevs_mat
} else {
stop("no recdevs in initial OM model. something is wrong")
}
parlist[["recdev_forecast"]] <- new_fore_recdevs_mat
# use report.sso time series table to find the F's to put into the parlist.
F_list <- get_F(
timeseries = outlist[["timeseries"]],
fleetnames = dat[["fleetinfo"]][dat[["fleetinfo"]][["type"]] %in% c(1, 2), "fleetname"]
)
# SINGLE_RUN_MODS:
update_F_years <- (dat[["endyr"]] + 1):(dat[["endyr"]] + nyrs)
default_Catch <- data.frame(
year = sort(rep(update_F_years, (length(unique(dat[["catch"]][, "seas"])) * length(unique(dat[["catch"]][, "fleet"]))))),
seas = rep(sort(rep(unique(dat[["catch"]][, "seas"]), length(unique(dat[["catch"]][, "fleet"])))), length(update_F_years)),
fleet = rep(sort(unique(dat[["catch"]][, "fleet"])), (length(unique(dat[["catch"]][, "seas"])) * length(update_F_years))),
catch = rep(0.001, (length(unique(dat[["catch"]][, "seas"])) * length(unique(dat[["catch"]][, "fleet"])) * length(update_F_years))),
catch_se = rep(0.01, (length(unique(dat[["catch"]][, "seas"])) * length(unique(dat[["catch"]][, "fleet"])) * length(update_F_years)))
)
new_catch <- dat[["catch"]][((dat[["catch"]][, "year"] <= dat[["endyr"]] & dat[["catch"]][, "year"] >= (-dat[["endyr"]])) | dat[["catch"]][, "year"] == -9999), , drop = FALSE]
new_catch <- rbind(new_catch, default_Catch)
new_catch <- new_catch[order(new_catch[, "fleet"], new_catch[, "year"], new_catch[, "seas"]), ]
update_SE_catch <- function(new_catch, sample_struct_in) {
temp_samp <- sample_struct_in[sample_struct_in[, "year"] == new_catch[1] &
sample_struct_in[, "seas"] == new_catch[2] &
sample_struct_in[, "fleet"] == new_catch[3], "catch_se"]
if (length(temp_samp) == 1) {
new_SE <- temp_samp
} else {
new_SE <- new_catch[5]
}
return(new_SE)
}
if (!is.null(sample_struct[["catch"]])) {
new_catch[, "catch_se"] <- apply(new_catch, 1, update_SE_catch, sample_struct_in = sample_struct[["catch"]])
}
dat[["catch"]] <- new_catch
default_F <- F_list[["F_rate"]][F_list[["F_rate"]][, "year"] == dat[["endyr"]], c("year", "seas", "fleet", "F")]
new_F_rate <- rbind(F_list[["F_rate"]][, c("year", "seas", "fleet", "F")], F_list[["F_rate_fcast"]][, c("year", "seas", "fleet", "F")])
rownames(new_F_rate) <- c(F_list[["F_rate"]][, c("name")], F_list[["F_rate_fcast"]][, c("name")])
for (i in update_F_years) {
for (j in unique(new_F_rate[, "seas"])) {
for (k in unique(new_F_rate[, "fleet"])) {
temp_F_rate <- new_F_rate[new_F_rate[, "year"] == i & new_F_rate[, "seas"] == j & new_F_rate[, "fleet"] == k, , drop = FALSE]
if (length(temp_F_rate[, 1]) == 0) {
if (length(default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, "F"]) == 0) {
temp_F_rate[1, ] <- c(i, j, k, 0)
rownames(temp_F_rate[1, ]) <- paste0("F_fleet_", k, "_YR_", i, "_s_", j)
default_F <- rbind(default_F, temp_F_rate[1, , drop = FALSE])
new_F_rate <- rbind(new_F_rate, temp_F_rate[1, , drop = FALSE])
} else {
temp_F_rate[1, ] <- c(i, j, k, default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, "F"][1])
rownames(temp_F_rate[1, ]) <- paste0("F_fleet_", k, "_YR_", i, "_s_", j)
new_F_rate <- rbind(new_F_rate, temp_F_rate[1, , drop = FALSE])
}
} else {
if (length(default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, "F"]) == 0) {
default_F <- rbind(default_F, temp_F_rate[1, , drop = FALSE])
} else {
default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, ] <- temp_F_rate[1, , drop = FALSE]
}
}
}
}
}
new_F_rate <- new_F_rate[order(new_F_rate[, "fleet"], new_F_rate[, "year"], new_F_rate[, "seas"]), ]
rownames(new_F_rate) <- paste0(
"F_fleet_", new_F_rate[["fleet"]], "_YR_", new_F_rate[["year"]], "_s_",
new_F_rate[["seas"]]
)
# remove any years higher than the update_F_years (could have been in long
# forecast in original model)
new_F_rate <- new_F_rate[new_F_rate[["year"]] <= max(update_F_years), , drop = FALSE]
parlist[["F_rate"]] <- new_F_rate
parlist[["init_F"]] <- F_list[["init_F"]]
# note: may need to also add some code in add_OM_devs to use impl error if
# required in the future.
parlist[["Fcast_impl_error"]] <- NULL # note: this would not work for v 3.30.16 and before
ctl[["F_Method"]] <- 2 # Want all OMs to use F_Method = 2.
ctl[["F_setup"]] <- c(0.05, 1, 0) # need to specify some starting value Fs, although not used in OM
ctl[["F_iter"]] <- NULL # make sure list components used by other F methods are NULL:
ctl[["F_setup2"]] <- NULL # make sure list components used by other F methods are NULL:
if (!is.list(future_om_dat)) {
future_om_dat <- list()
}
single_run_files <- add_OM_devs(ctl = ctl, dat = dat, parlist = parlist, timeseries = outlist[["timeseries"]], future_om_dat = future_om_dat[["dev_vals"]])
dat <- single_run_files[["data"]] # SINGLE_RUN_MODS:
ctl <- single_run_files[["control"]] # SINGLE_RUN_MODS:
parlist <- single_run_files[["parameter"]] # SINGLE_RUN_MODS:
impl_error <- single_run_files[["impl_error"]]
if (is.null(impl_error)) {
impl_error <- data.frame(
"year" = (dat[["endyr"]] + 1):(dat[["endyr"]] + nyrs),
"error" = rep(1, nyrs)
)
}
# modify dat file ----
dat[["endyr"]] <- dat[["endyr"]] + nyrs # because OM goes through the last simulated year.
# remove the sampling components not needed
dat <- rm_sample_struct_hist(sample_struct = sample_struct_hist, dat = dat)
# Add in the historical sampling structure, as defined by the user
dat <- add_sample_struct(sample_struct = sample_struct_hist, dat = dat)
dat <- add_sample_struct(sample_struct = sample_struct, dat = dat)
# make sure tail compression is off.
# turn off tail compression
if (isTRUE(any(dat[["len_info"]][["mintailcomp"]] >= 0)) |
isTRUE(any(dat[["age_info"]][["mintailcomp"]] >= 0))) {
warning(
"Tail compression was on for some fleets in length comp and/or age ",
"comp for the operating model, but needs to be",
"turned off in an operating model. Turning off tail compression.",
" Note that this may change expected values for historical age or ",
" length composition."
)
if (!is.null(dat[["len_info"]])) dat[["len_info"]][["mintailcomp"]] <- -1
if (!is.null(dat[["age_info"]])) dat[["age_info"]][["mintailcomp"]] <- -1
}
# write all files
r4ss::SS_writectl(
ctllist = ctl, outfile = file.path(OM_out_dir, start[["ctlfile"]]),
overwrite = TRUE, verbose = FALSE
)
r4ss::SS_writeforecast(
mylist = forelist, dir = OM_out_dir, writeAll = TRUE,
overwrite = TRUE, verbose = FALSE
)
r4ss::SS_writepar_3.30(
parlist = parlist,
outfile = file.path(OM_out_dir, "ss.par"),
overwrite = TRUE
)
if (writedat) {
SS_writedat(dat, file.path(OM_out_dir, start[["datfile"]]),
overwrite = overwrite,
verbose = FALSE
)
}
if (verify_OM) {
# check that model runs and produces a control.ss_new file
if (file.exists(file.path(OM_out_dir, "control.ss_new"))) {
file.remove(file.path(OM_out_dir, "control.ss_new"))
}
run_ss_model(OM_out_dir, "-maxfn 0 -phase 50 -nohess",
verbose = verbose,
debug_par_run = TRUE
)
# TODO: maybe add the following check into the debug par run arg of run_ss_model?
check_par <- readLines(file.path(OM_out_dir, "ss.par"))
check_sum_val <- check_par[grep("checksum999", check_par) + 1]
if (as.numeric(check_sum_val) != 999) {
stop(
"The OM model created is not valid; likely, the par file was not of",
"the correct length because checksum999 of output is not 999.",
"Please open an issue in the SSMSE repository for assistance."
)
}
if (!file.exists(file.path(OM_out_dir, "control.ss_new"))) {
stop(
"The OM model created is not valid; it did not run and produce a\n",
"control.ss_new file. Please try running the OM model created\n",
"with the create_OM function manually with SS to diagnose the\n",
"problem."
)
}
# check model runs without producing nans in the data file
tmp_new_dat <- readLines(file.path(OM_out_dir, "data.ss_new"))
nan_vals <- grep("nan", tmp_new_dat)
if (length(nan_vals) > 0) {
stop(
"NAN values present in the data.ss_new om file, suggesting an issue ",
"setting up the OM. See ", file.path(OM_out_dir, "data.ss_new")
)
}
# check the names of F parameters in the Parameters section of the report
# file.
test_output <- r4ss::SS_output(OM_out_dir,
forecast = FALSE, verbose = FALSE,
warn = FALSE, covar = FALSE, readwt = FALSE,
printstats = FALSE
)
# check F's in the assumed order.
par_df <- test_output[["parameters"]]
init_F_pars <- par_df[grep("^InitF_", par_df[["Label"]]), ]
if (NROW(init_F_pars) != length(F_list[["init_F"]])) {
stop("Wrong number of init_F parameters assumed by create_OM function.")
}
if (NROW(init_F_pars) > 0) {
if (!all(init_F_pars[["Label"]] == names(F_list[["init_F"]]))) {
stop(
"Names of init_F parameters assumed by create_OM function and in\n",
"the PARAMETERS table of Report.sso function do not match."
)
}
}
F_rate_pars <- par_df[grep("^F_fleet_", par_df[["Label"]]), ]
if (NROW(F_rate_pars) != NROW(parlist[["F_rate"]])) {
stop("Wrong number of F_rate parameters assumed by create_OM function.")
}
if (NROW(F_rate_pars) > 0) {
if (!all(F_rate_pars[["Label"]] == rownames(parlist[["F_rate"]]))) {
stop(
"Names of F_rate parameters assumed by create_OM function and in\n",
"the PARAMETERS table of Report.sso function do not match."
)
}
}
}
ouput_list <- list(dat = dat, impl_error = impl_error)
return(ouput_list)
}
#' Initial run of the OM
#'
#' This function is used to initialize the OM and get either expected values
#' or bootstrap.
#' @template OM_dir
#' @param boot Return the bootstrap dataset? If TRUE, function returns the
#' number bootstrapped dataset specified in \code{nboot}. If FALSE, it returns
#' the expected values.
#' @param nboot The number bootstrapped data set. This value is only used if
#' \code{boot = TRUE}. Note that this numbering does NOT correspond with the
#' numbering in section of r4ss::SS_readdat. E.g., specifying section = 3 in
#' SS_readdat is equivalent to specifying nboot = 1.
#' @param init_run Is this the initial iteration of the OM? Defaults to FALSE.
#' @template verbose
#' @param debug_par_run If set to TRUE, and the run fails, a new folder called
#' error_check will be created, and the model will be run from control start
#' values instead of ss.par. The 2 par files are then compared to help debug
#' the issue with the model run. Defaults to TRUE.
#' @template sample_catch
#' @template seed
#' @author Kathryn Doering
#' @importFrom r4ss SS_readdat SS_readstarter SS_writestarter
run_OM <- function(OM_dir,
boot = TRUE,
nboot = 1,
init_run = FALSE,
verbose = FALSE,
debug_par_run = TRUE,
sample_catch = FALSE,
seed = NULL) {
# make sure OM generates the correct number of data sets.
if (boot) {
max_section <- nboot + 2
} else {
max_section <- 2
}
if (is.null(seed)) {
seed <- stats::runif(1, 1, 9999999)
}
start <- r4ss::SS_readstarter(file.path(OM_dir, "starter.ss"),
verbose = FALSE
)
start[["N_bootstraps"]] <- max_section
start[["seed"]] <- seed
r4ss::SS_writestarter(start,
dir = OM_dir, verbose = FALSE, overwrite = TRUE,
warn = FALSE
)
# run SS and get the data set
run_ss_model(OM_dir, "-maxfn 0 -phase 50 -nohess",
verbose = verbose,
debug_par_run = debug_par_run
)
dat <- r4ss::SS_readdat(file.path(OM_dir, "data.ss_new"),
section = max_section,
verbose = FALSE
)
# replace with the expected catch values if sample_catch is FALSE and using
# bootstrap
if (boot == TRUE & sample_catch == FALSE) {
exp_vals <- r4ss::SS_readdat(file.path(OM_dir, "data.ss_new"),
section = 2,
verbose = FALSE
)
dat[["catch"]] <- exp_vals[["catch"]]
}
return(dat)
}
#' Get the sampling scheme in a data file.
#'
#' Determine what the default sampling scheme is for a given data file.
#' Produces a list object with the sampling scheme, which can be modified, if
#' desired.
#' @param dat An SS data file
#' @param dat_types Types of data to include
# get the initial sampling values
get_init_samp_scheme <- function(dat,
dat_types = c(
"CPUE", "lencomp", "agecomp",
"meanbodywt", "MeanSize_at_Age_obs"
)) {
# TODO: write this. Can be used for EM and OM.
}
#' Remove the historical sampling structure
#'
#' @template sample_struct_hist
#' @param dat The data file, as read in using r4ss
rm_sample_struct_hist <- function(sample_struct_hist, dat) {
if (is.null(sample_struct_hist)) {
return(dat)
}
# remove the CPUE
dat[["CPUE"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "CPUE",
colnames = c("year", "seas", "index")
)
dat[["lencomp"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "lencomp",
colnames = c(
"Yr", "Seas", "FltSvy", "Gender",
"Part"
)
)
dat[["agecomp"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "agecomp",
colnames = c(
"Yr", "Seas", "FltSvy", "Gender",
"Part", "Ageerr", "Lbin_lo",
"Lbin_hi"
)
)
dat[["meanbodywt"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "meanbodywt",
colnames = c("Year", "Seas", "Fleet", "Partition", "Type", "Std_in")
)
dat[["MeanSize_at_Age_obs"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "MeanSize_at_Age_obs",
colnames = c("Yr", "Seas", "FltSvy", "Gender", "Part", "AgeErr", "N_")
)
dat
}
#' remove vals in 2 list components with the same name
#'
#' From 2 list components with the same name, remove vals that aren't in the
#' compare object
#'
#' @param return_obj the object (containing list component of name in obj) that
#' will be modified. Only combinations of the columns found in compare object
#' will be retained
#' @param compare_obj the object (containing list component of name_in_obj) that
#' return_obj will be compared to
#' @param name_in_obj the name of the list elements to use; the same name must
#' be in return_obj and compare_obj. This list element must be a data frame
#' with the same column names
#' @param colnames The column names within the name_in_obj list components to
#' compare.
#' @return return_obj[[name_in_obj]], modified to only include elements present
#' in compare_obj[[name_in_obj]].
#' @author Kathryn Doering
rm_vals <- function(return_obj, compare_obj, name_in_obj, colnames) {
# return early if nothing to compare.
if (is.null(compare_obj[[name_in_obj]]) | is.null(return_obj[[name_in_obj]])) {
return(return_obj[[name_in_obj]])
}
return_obj[[name_in_obj]] <- combine_cols(
dat_list = return_obj,
list_item = name_in_obj,
colnames = colnames
)
compare_obj[[name_in_obj]] <- combine_cols(
dat_list = compare_obj,
list_item = name_in_obj,
colnames = colnames
)
to_keep <- intersect(
compare_obj[[name_in_obj]][["combo"]],
return_obj[[name_in_obj]][["combo"]]
)
to_return <- return_obj[[name_in_obj]][
return_obj[[name_in_obj]][["combo"]] %in% to_keep,
!(colnames(return_obj[[name_in_obj]]) %in% "combo")
]
to_return
}
#' Add in years of sampling data needed
#'
#' @template sample_struct
#' @param dat A datafile as read in by r4ss::SS_readdat
add_sample_struct <- function(sample_struct, dat) {
if (is.null(sample_struct)) {
return(dat)
}
subset_yr_start <- dat[["styr"]]
subset_yr_end <- dat[["endyr"]]
tmp_CPUE <- sample_struct[["CPUE"]]
if (!is.null(tmp_CPUE)) {
tmp_CPUE <- tmp_CPUE[tmp_CPUE[["year"]] >= subset_yr_start &
tmp_CPUE[["year"]] <= subset_yr_end, ]
if (nrow(tmp_CPUE) > 0) {
tmp_CPUE[["obs"]] <- 1 # dummy observation
tmp_CPUE <- tmp_CPUE[, c("year", "seas", "index", "obs", "se_log")]
tmp_CPUE[["index"]] <- -abs(tmp_CPUE[["index"]])
dat[["CPUE"]] <- rbind(dat[["CPUE"]], tmp_CPUE)
}
}
# This method of adding new data doesn't work if len comp is not already
# turned on. Add warninig for now, but could potentially turn on len comp
# for the user in the OM?
if (dat[["use_lencomp"]] == 0 & !is.null(sample_struct[["lencomp"]])) {
warning(
"Length composition is not specified in the OM, but the lencomp ",
"sampling was requested through sample_struct. Please turn on ",
"length comp in the OM to allow lencomp sampling."
)
}
if (dat[["use_lencomp"]] == 1 & !is.null(sample_struct[["lencomp"]])) {
tmp_lencomp <- sample_struct[["lencomp"]]
tmp_lencomp <- tmp_lencomp[tmp_lencomp[["Yr"]] >= subset_yr_start &
tmp_lencomp[["Yr"]] <= subset_yr_end, ]
if (nrow(tmp_lencomp) > 0) {
# get col names
lencomp_dat_colnames <- colnames(dat[["lencomp"]])[7:ncol(dat[["lencomp"]])]
tmp_df_dat <- matrix(1,
nrow = nrow(tmp_lencomp),
ncol = length(lencomp_dat_colnames)
)
colnames(tmp_df_dat) <- lencomp_dat_colnames
tmp_lencomp <- cbind(tmp_lencomp, as.data.frame(tmp_df_dat))
tmp_lencomp[["FltSvy"]] <- -abs(tmp_lencomp[["FltSvy"]]) # make sure negative
dat[["lencomp"]] <- rbind(dat[["lencomp"]], tmp_lencomp)
}
}
# TODO: can write code that adds age comp obs when dat[["agecomp"]] is NULL.
if (is.null(dat[["agecomp"]]) & !is.null(sample_struct[["agecomp"]])) {
warning(
"Age composition is not specified in the OM, but the agecomp ",
"sampling was requested through sample_struct. Please turn on ",
"age comp in the OM by adding at least to allow agecomp ",
"sampling."
)
}
if (!is.null(dat[["agecomp"]]) & !is.null(sample_struct[["agecomp"]])) {
tmp_agecomp <- sample_struct[["agecomp"]]
tmp_agecomp <- tmp_agecomp[tmp_agecomp[["Yr"]] >= subset_yr_start &
tmp_agecomp[["Yr"]] <= subset_yr_end, ]
if (nrow(tmp_agecomp) > 0) {
# get col names
agecomp_dat_colnames <- colnames(dat[["agecomp"]])[10:ncol(dat[["agecomp"]])]
tmp_df_dat <- matrix(1,
nrow = nrow(tmp_agecomp),
ncol = length(agecomp_dat_colnames)
)
colnames(tmp_df_dat) <- agecomp_dat_colnames
tmp_agecomp <- cbind(tmp_agecomp, as.data.frame(tmp_df_dat))
tmp_agecomp[["FltSvy"]] <- -abs(tmp_agecomp[["FltSvy"]]) # make sure negative
dat[["agecomp"]] <- rbind(dat[["agecomp"]], tmp_agecomp)
}
}
## Mean size ----
if (is.null(dat[["meanbodywt"]]) & !is.null(sample_struct[["meanbodywt"]])) {
warning(
"Mean Size data is not specified in the OM, but the ",
"sampling was requested through sample_struct. Please turn on ",
"mean size data (i.e., meanbodywt) in the OM to allow mean size data ",
"sampling."
)
}
if (!is.null(dat[["meanbodywt"]]) & !is.null(sample_struct[["meanbodywt"]])) {
tmp_meanbodywt <- sample_struct[["meanbodywt"]]
tmp_meanbodywt <- tmp_meanbodywt[tmp_meanbodywt[["Year"]] >= subset_yr_start &
tmp_meanbodywt[["Year"]] <= subset_yr_end, ]
if (nrow(tmp_meanbodywt) > 0) {
# dummy observation negative to exclued from NLL? (or should the fleet be neg?)
tmp_meanbodywt[["Value"]] <- -1
tmp_meanbodywt <- tmp_meanbodywt[, c(
"Year", "Seas", "Fleet", "Partition",
"Type", "Value", "Std_in"
)]
tmp_meanbodywt[["Value"]] <- -abs(tmp_meanbodywt[["Value"]])
dat[["meanbodywt"]] <- rbind(dat[["meanbodywt"]], tmp_meanbodywt)
}
}
# Mean size at age ----
if (is.null(dat[["MeanSize_at_Age_obs"]]) & !is.null(sample_struct[["MeanSize_at_Age_obs"]])) {
warning(
"Mean Size at age data is not specified in the OM, but the ",
"sampling was requested through sample_struct. Please turn on ",
"mean size at age data (i.e., MeanSize_at_Age_obs) in the OM to allow mean size at age data ",
"sampling."
)
}
if (!is.null(dat[["MeanSize_at_Age_obs"]]) & !is.null(sample_struct[["MeanSize_at_Age_obs"]])) {
tmp_MeanSize_at_Age_obs <- sample_struct[["MeanSize_at_Age_obs"]]
tmp_MeanSize_at_Age_obs <- tmp_MeanSize_at_Age_obs[tmp_MeanSize_at_Age_obs[["Yr"]] >= subset_yr_start &
tmp_MeanSize_at_Age_obs[["Yr"]] <= subset_yr_end, ]
if (nrow(tmp_MeanSize_at_Age_obs) > 0) {
# get col names
sample_colnames <- grep("^[fm]\\d+$", colnames(dat[["MeanSize_at_Age_obs"]]),
ignore.case = FALSE, value = TRUE
)
MeanSize_at_Age_obs_dat_colnames <- colnames(dat[["MeanSize_at_Age_obs"]])[10:ncol(dat[["MeanSize_at_Age_obs"]])]
tmp_sample_df <- matrix(-1, # use negative 1, as this should exclude vals from the neg log likelihood
nrow = nrow(tmp_MeanSize_at_Age_obs),
ncol = length(sample_colnames)
)
colnames(tmp_sample_df) <- sample_colnames
# Need a sample size for each of the female and male obs in the row; SSMSE
# can only use the same sample size for all in a row currently.
N_colnames <- grep("^N_[fm]\\d+$", colnames(dat[["MeanSize_at_Age_obs"]]),
ignore.case = FALSE, value = TRUE
)
tmp_N_df <- lapply(tmp_MeanSize_at_Age_obs[["N_"]],
function(x, y) {
vec <- rep(x, times = length(y))
tmp_df <- data.frame(matrix(vec,
nrow = 1,
ncol = length(vec)
))
colnames(tmp_df) <- N_colnames
tmp_df
},
y = N_colnames
)
tmp_N_df <- do.call(rbind, tmp_N_df)
tmp_MeanSize_at_Age_obs <- tmp_MeanSize_at_Age_obs[, -7] # rm the sample size col
tmp_MeanSize_at_Age_obs[["Ignore"]] <- 2 # column for future features
tmp_MeanSize_at_Age_obs <- cbind(
tmp_MeanSize_at_Age_obs,
as.data.frame(tmp_sample_df),
tmp_N_df
)
# TODO: need to make fleet negative??? no sure for this data type.
# tmp_agecomp[["FltSvy"]] <- -abs(tmp_agecomp[["FltSvy"]]) # make sure negative
dat[["MeanSize_at_Age_obs"]] <- rbind(
dat[["MeanSize_at_Age_obs"]],
tmp_MeanSize_at_Age_obs
)
}
}
dat
}
| /R/initOM.R | permissive | nmfs-fish-tools/SSMSE | R | false | false | 30,252 | r | # Functions associated with initialization of the OM.
#' Create the OM
#'
#' This function manipulates the OM as needed so that it can be used as an
#' operating model.
#' @author Kathryn Doering & Nathan Vaughan
#' @template OM_out_dir
#' @template overwrite
#' @param nyrs Number of years beyond the years included in the OM to run the
#' MSE. A single integer value.
#' @param nyrs_assess The number of years between assessments. This is used to
#' structure the forecast file for use in the OM.
#' @param nscen The scenario number
#' @param scen_name The scenario name
#' @param niter the iteration number
#' @param writedat Should a new datafile be written?
#' @param future_om_dat An optional data_frame including changes that should
#' be made after the end year of the input model. Including parameter variations,
#' recruitment deviations, and implementation errors.
#' @param verify_OM Should the model be run without estimation and some basic
#' checks done to verify that the OM can run? Defaults to TRUE.
#' @template sample_struct_hist
#' @template sample_struct
#' @template seed
#' @template verbose
#' @return A modified datafile
#' @import r4ss
create_OM <- function(OM_out_dir,
overwrite = TRUE,
writedat = TRUE,
verbose = FALSE,
nyrs = NULL,
nyrs_assess = NULL,
nscen = 1,
scen_name = NULL,
niter = 1,
future_om_dat = NULL,
verify_OM = TRUE,
sample_struct_hist = NULL,
sample_struct = NULL,
seed = NULL) {
start <- r4ss::SS_readstarter(file.path(OM_out_dir, "starter.ss"),
verbose = FALSE
)
# modify starter to use as OM ----
if (is.null(seed)) {
seed <- stats::runif(1, 1, 99999999)
}
start[["init_values_src"]] <- 1
start[["detailed_age_structure"]] <- 1
start[["last_estimation_phase"]] <- 0
start[["depl_basis"]] <- 0
start[["depl_denom_frac"]] <- 1
start[["SPR_basis"]] <- 0
start[["F_report_units"]] <- 0
start[["F_report_basis"]] <- 0
start[["F_age_range"]] <- NULL
start[["ALK_tolerance"]] <- 0
start[["seed"]] <- seed
r4ss::SS_writestarter(start,
dir = OM_out_dir, verbose = FALSE,
overwrite = TRUE
)
# run model to get standardized output ----
run_ss_model(OM_out_dir, "-maxfn 0 -phase 50 -nohess",
debug_par_run = TRUE,
verbose = verbose
)
# read in files to use ----
dat <- r4ss::SS_readdat(
file = file.path(OM_out_dir, start[["datfile"]]),
verbose = FALSE, section = 1
)
forelist <- r4ss::SS_readforecast(
file = file.path(OM_out_dir, "forecast.ss"),
readAll = TRUE, verbose = FALSE
)
ctl <- r4ss::SS_readctl(file.path(OM_out_dir, start[["ctlfile"]]),
verbose = FALSE,
use_datlist = TRUE, datlist = dat
)
outlist <- r4ss::SS_output(OM_out_dir,
verbose = FALSE, printstats = FALSE,
covar = FALSE
)
parlist <- r4ss::SS_readpar_3.30(
parfile = file.path(OM_out_dir, "ss.par"),
datsource = dat, ctlsource = ctl,
verbose = FALSE
)
# model checks ----
if (ctl[["F_Method"]] == 1) {
stop(
"SSMSE cannot work with models that use F method 1 (Pope's ",
"approximation). Please use F method 2 or 3 instead (3 is ",
"recommended over method 1)."
)
}
# modify forecast file ----
currentNforecast <- forelist[["Nforecastyrs"]]
forelist[["benchmarks"]] <- 0 #
forelist[["Bmark_relF_Basis"]] <- 1 # error generated by SS3 if forecast off, and this is 2
forelist[["Forecast"]] <- 0 #
forelist[["Nforecastyrs"]] <- 1 #
forelist[["FirstYear_for_caps_and_allocations"]] <- dat[["endyr"]] + nyrs + 2 #
forelist[["stddev_of_log_catch_ratio"]] <- 0
# modify ctl file ----
# in the context of an OM, do not want to use the bias adjustment ramp, so just
# turn off and make the recdevs years always the same.
# UPDATE NOTE: For the OM we do not want bias adjustment in the future. However we
# do want the historic period to be consistent with the original assessment model.
# We therefore need to add advanced options if not already specified. I am also
# updating the extend EM process to fix the main recdevs end year. This way all new
# recdevs become late phase/forecast recdevs which are not subject to sum to zero
# constraints or bias adjustment.
if (!all(ctl[["time_vary_auto_generation"]] == 1)) {
warning("Turning off autogeneration of time varying lines in the control file of the OM")
ctl[["time_vary_auto_generation"]] <- rep(1, times = 5)
}
if (ctl[["recdev_adv"]] == 0) {
ctl[["recdev_adv"]] <- 1
ctl[["recdev_early_start"]] <- 0
ctl[["recdev_early_phase"]] <- -4
ctl[["Fcast_recr_phase"]] <- 0
ctl[["lambda4Fcast_recr_like"]] <- 0
ctl[["last_early_yr_nobias_adj"]] <- ctl[["MainRdevYrFirst"]] - 1
ctl[["first_yr_fullbias_adj"]] <- ctl[["MainRdevYrFirst"]]
ctl[["last_yr_fullbias_adj"]] <- ctl[["MainRdevYrLast"]]
ctl[["first_recent_yr_nobias_adj"]] <- ctl[["MainRdevYrLast"]] + 1
ctl[["max_bias_adj"]] <- 0.8
ctl[["period_of_cycles_in_recr"]] <- 0
ctl[["min_rec_dev"]] <- -10
ctl[["max_rec_dev"]] <- 10
ctl[["N_Read_recdevs"]] <- 0
ctl[["recdev_input"]] <- NULL
}
# if (ctl[["recdev_early_start"]] <= 0) {
# first_year <- ctl[["MainRdevYrFirst"]] + ctl[["recdev_early_start"]]
# } else if (ctl[["recdev_early_start"]] < ctl[["MainRdevYrFirst"]]) {
# first_year <- ctl[["recdev_early_start"]]
# } else {
# (
# first_year <- ctl[["MainRdevYrFirst"]]
# )
# }
first_year <- ctl[["MainRdevYrFirst"]]
# modify par file ----
# note: don't include early recdevs in in all_recdevs
all_recdevs <- as.data.frame(rbind(parlist[["recdev1"]], parlist[["recdev2"]], parlist[["recdev_forecast"]]))
# get recdevs for all model years
all_recdevs <- all_recdevs[all_recdevs[["year"]] >= first_year & all_recdevs[["year"]] <= (dat[["endyr"]]), ] #
new_recdevs_df <- data.frame(year = first_year:ctl[["MainRdevYrLast"]], recdev = NA)
fore_recdevs_df <- data.frame(year = (ctl[["MainRdevYrLast"]] + 1):(dat[["endyr"]] + nyrs + 1), recdev = NA) #
temp_yrs <- (first_year:(dat[["endyr"]] + nyrs + 1))
for (i in seq_along(temp_yrs)) { #
tmp_yr <- temp_yrs[i] #
if (tmp_yr <= ctl[["MainRdevYrLast"]]) {
step <- i
if (length(all_recdevs[all_recdevs[["year"]] == tmp_yr, "year"]) == 0) {
new_recdevs_df[i, "recdev"] <- 0 # just assume no rec devs
} else {
new_recdevs_df[i, "recdev"] <-
all_recdevs[all_recdevs[["year"]] == tmp_yr, "recdev"]
}
} else {
if (length(all_recdevs[all_recdevs[["year"]] == tmp_yr, "year"]) == 0) {
fore_recdevs_df[(i - step), "recdev"] <- 0
} else {
fore_recdevs_df[(i - step), "recdev"] <-
all_recdevs[all_recdevs[["year"]] == tmp_yr, "recdev"]
}
}
}
# add recdevs to the parlist
new_recdevs_mat <- as.matrix(new_recdevs_df)
new_fore_recdevs_mat <- as.matrix(fore_recdevs_df)
if (!is.null(parlist[["recdev1"]])) {
parlist[["recdev1"]] <- new_recdevs_mat
} else if (!is.null(parlist[["recdev2"]])) {
parlist[["recdev2"]] <- new_recdevs_mat
} else {
stop("no recdevs in initial OM model. something is wrong")
}
parlist[["recdev_forecast"]] <- new_fore_recdevs_mat
# use report.sso time series table to find the F's to put into the parlist.
F_list <- get_F(
timeseries = outlist[["timeseries"]],
fleetnames = dat[["fleetinfo"]][dat[["fleetinfo"]][["type"]] %in% c(1, 2), "fleetname"]
)
# SINGLE_RUN_MODS:
update_F_years <- (dat[["endyr"]] + 1):(dat[["endyr"]] + nyrs)
default_Catch <- data.frame(
year = sort(rep(update_F_years, (length(unique(dat[["catch"]][, "seas"])) * length(unique(dat[["catch"]][, "fleet"]))))),
seas = rep(sort(rep(unique(dat[["catch"]][, "seas"]), length(unique(dat[["catch"]][, "fleet"])))), length(update_F_years)),
fleet = rep(sort(unique(dat[["catch"]][, "fleet"])), (length(unique(dat[["catch"]][, "seas"])) * length(update_F_years))),
catch = rep(0.001, (length(unique(dat[["catch"]][, "seas"])) * length(unique(dat[["catch"]][, "fleet"])) * length(update_F_years))),
catch_se = rep(0.01, (length(unique(dat[["catch"]][, "seas"])) * length(unique(dat[["catch"]][, "fleet"])) * length(update_F_years)))
)
new_catch <- dat[["catch"]][((dat[["catch"]][, "year"] <= dat[["endyr"]] & dat[["catch"]][, "year"] >= (-dat[["endyr"]])) | dat[["catch"]][, "year"] == -9999), , drop = FALSE]
new_catch <- rbind(new_catch, default_Catch)
new_catch <- new_catch[order(new_catch[, "fleet"], new_catch[, "year"], new_catch[, "seas"]), ]
update_SE_catch <- function(new_catch, sample_struct_in) {
temp_samp <- sample_struct_in[sample_struct_in[, "year"] == new_catch[1] &
sample_struct_in[, "seas"] == new_catch[2] &
sample_struct_in[, "fleet"] == new_catch[3], "catch_se"]
if (length(temp_samp) == 1) {
new_SE <- temp_samp
} else {
new_SE <- new_catch[5]
}
return(new_SE)
}
if (!is.null(sample_struct[["catch"]])) {
new_catch[, "catch_se"] <- apply(new_catch, 1, update_SE_catch, sample_struct_in = sample_struct[["catch"]])
}
dat[["catch"]] <- new_catch
default_F <- F_list[["F_rate"]][F_list[["F_rate"]][, "year"] == dat[["endyr"]], c("year", "seas", "fleet", "F")]
new_F_rate <- rbind(F_list[["F_rate"]][, c("year", "seas", "fleet", "F")], F_list[["F_rate_fcast"]][, c("year", "seas", "fleet", "F")])
rownames(new_F_rate) <- c(F_list[["F_rate"]][, c("name")], F_list[["F_rate_fcast"]][, c("name")])
for (i in update_F_years) {
for (j in unique(new_F_rate[, "seas"])) {
for (k in unique(new_F_rate[, "fleet"])) {
temp_F_rate <- new_F_rate[new_F_rate[, "year"] == i & new_F_rate[, "seas"] == j & new_F_rate[, "fleet"] == k, , drop = FALSE]
if (length(temp_F_rate[, 1]) == 0) {
if (length(default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, "F"]) == 0) {
temp_F_rate[1, ] <- c(i, j, k, 0)
rownames(temp_F_rate[1, ]) <- paste0("F_fleet_", k, "_YR_", i, "_s_", j)
default_F <- rbind(default_F, temp_F_rate[1, , drop = FALSE])
new_F_rate <- rbind(new_F_rate, temp_F_rate[1, , drop = FALSE])
} else {
temp_F_rate[1, ] <- c(i, j, k, default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, "F"][1])
rownames(temp_F_rate[1, ]) <- paste0("F_fleet_", k, "_YR_", i, "_s_", j)
new_F_rate <- rbind(new_F_rate, temp_F_rate[1, , drop = FALSE])
}
} else {
if (length(default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, "F"]) == 0) {
default_F <- rbind(default_F, temp_F_rate[1, , drop = FALSE])
} else {
default_F[default_F[, "seas"] == j & default_F[, "fleet"] == k, ] <- temp_F_rate[1, , drop = FALSE]
}
}
}
}
}
new_F_rate <- new_F_rate[order(new_F_rate[, "fleet"], new_F_rate[, "year"], new_F_rate[, "seas"]), ]
rownames(new_F_rate) <- paste0(
"F_fleet_", new_F_rate[["fleet"]], "_YR_", new_F_rate[["year"]], "_s_",
new_F_rate[["seas"]]
)
# remove any years higher than the update_F_years (could have been in long
# forecast in original model)
new_F_rate <- new_F_rate[new_F_rate[["year"]] <= max(update_F_years), , drop = FALSE]
parlist[["F_rate"]] <- new_F_rate
parlist[["init_F"]] <- F_list[["init_F"]]
# note: may need to also add some code in add_OM_devs to use impl error if
# required in the future.
parlist[["Fcast_impl_error"]] <- NULL # note: this would not work for v 3.30.16 and before
ctl[["F_Method"]] <- 2 # Want all OMs to use F_Method = 2.
ctl[["F_setup"]] <- c(0.05, 1, 0) # need to specify some starting value Fs, although not used in OM
ctl[["F_iter"]] <- NULL # make sure list components used by other F methods are NULL:
ctl[["F_setup2"]] <- NULL # make sure list components used by other F methods are NULL:
if (!is.list(future_om_dat)) {
future_om_dat <- list()
}
single_run_files <- add_OM_devs(ctl = ctl, dat = dat, parlist = parlist, timeseries = outlist[["timeseries"]], future_om_dat = future_om_dat[["dev_vals"]])
dat <- single_run_files[["data"]] # SINGLE_RUN_MODS:
ctl <- single_run_files[["control"]] # SINGLE_RUN_MODS:
parlist <- single_run_files[["parameter"]] # SINGLE_RUN_MODS:
impl_error <- single_run_files[["impl_error"]]
if (is.null(impl_error)) {
impl_error <- data.frame(
"year" = (dat[["endyr"]] + 1):(dat[["endyr"]] + nyrs),
"error" = rep(1, nyrs)
)
}
# modify dat file ----
dat[["endyr"]] <- dat[["endyr"]] + nyrs # because OM goes through the last simulated year.
# remove the sampling components not needed
dat <- rm_sample_struct_hist(sample_struct = sample_struct_hist, dat = dat)
# Add in the historical sampling structure, as defined by the user
dat <- add_sample_struct(sample_struct = sample_struct_hist, dat = dat)
dat <- add_sample_struct(sample_struct = sample_struct, dat = dat)
# make sure tail compression is off.
# turn off tail compression
if (isTRUE(any(dat[["len_info"]][["mintailcomp"]] >= 0)) |
isTRUE(any(dat[["age_info"]][["mintailcomp"]] >= 0))) {
warning(
"Tail compression was on for some fleets in length comp and/or age ",
"comp for the operating model, but needs to be",
"turned off in an operating model. Turning off tail compression.",
" Note that this may change expected values for historical age or ",
" length composition."
)
if (!is.null(dat[["len_info"]])) dat[["len_info"]][["mintailcomp"]] <- -1
if (!is.null(dat[["age_info"]])) dat[["age_info"]][["mintailcomp"]] <- -1
}
# write all files
r4ss::SS_writectl(
ctllist = ctl, outfile = file.path(OM_out_dir, start[["ctlfile"]]),
overwrite = TRUE, verbose = FALSE
)
r4ss::SS_writeforecast(
mylist = forelist, dir = OM_out_dir, writeAll = TRUE,
overwrite = TRUE, verbose = FALSE
)
r4ss::SS_writepar_3.30(
parlist = parlist,
outfile = file.path(OM_out_dir, "ss.par"),
overwrite = TRUE
)
if (writedat) {
SS_writedat(dat, file.path(OM_out_dir, start[["datfile"]]),
overwrite = overwrite,
verbose = FALSE
)
}
if (verify_OM) {
# check that model runs and produces a control.ss_new file
if (file.exists(file.path(OM_out_dir, "control.ss_new"))) {
file.remove(file.path(OM_out_dir, "control.ss_new"))
}
run_ss_model(OM_out_dir, "-maxfn 0 -phase 50 -nohess",
verbose = verbose,
debug_par_run = TRUE
)
# TODO: maybe add the following check into the debug par run arg of run_ss_model?
check_par <- readLines(file.path(OM_out_dir, "ss.par"))
check_sum_val <- check_par[grep("checksum999", check_par) + 1]
if (as.numeric(check_sum_val) != 999) {
stop(
"The OM model created is not valid; likely, the par file was not of",
"the correct length because checksum999 of output is not 999.",
"Please open an issue in the SSMSE repository for assistance."
)
}
if (!file.exists(file.path(OM_out_dir, "control.ss_new"))) {
stop(
"The OM model created is not valid; it did not run and produce a\n",
"control.ss_new file. Please try running the OM model created\n",
"with the create_OM function manually with SS to diagnose the\n",
"problem."
)
}
# check model runs without producing nans in the data file
tmp_new_dat <- readLines(file.path(OM_out_dir, "data.ss_new"))
nan_vals <- grep("nan", tmp_new_dat)
if (length(nan_vals) > 0) {
stop(
"NAN values present in the data.ss_new om file, suggesting an issue ",
"setting up the OM. See ", file.path(OM_out_dir, "data.ss_new")
)
}
# check the names of F parameters in the Parameters section of the report
# file.
test_output <- r4ss::SS_output(OM_out_dir,
forecast = FALSE, verbose = FALSE,
warn = FALSE, covar = FALSE, readwt = FALSE,
printstats = FALSE
)
# check F's in the assumed order.
par_df <- test_output[["parameters"]]
init_F_pars <- par_df[grep("^InitF_", par_df[["Label"]]), ]
if (NROW(init_F_pars) != length(F_list[["init_F"]])) {
stop("Wrong number of init_F parameters assumed by create_OM function.")
}
if (NROW(init_F_pars) > 0) {
if (!all(init_F_pars[["Label"]] == names(F_list[["init_F"]]))) {
stop(
"Names of init_F parameters assumed by create_OM function and in\n",
"the PARAMETERS table of Report.sso function do not match."
)
}
}
F_rate_pars <- par_df[grep("^F_fleet_", par_df[["Label"]]), ]
if (NROW(F_rate_pars) != NROW(parlist[["F_rate"]])) {
stop("Wrong number of F_rate parameters assumed by create_OM function.")
}
if (NROW(F_rate_pars) > 0) {
if (!all(F_rate_pars[["Label"]] == rownames(parlist[["F_rate"]]))) {
stop(
"Names of F_rate parameters assumed by create_OM function and in\n",
"the PARAMETERS table of Report.sso function do not match."
)
}
}
}
ouput_list <- list(dat = dat, impl_error = impl_error)
return(ouput_list)
}
#' Initial run of the OM
#'
#' This function is used to initialize the OM and get either expected values
#' or bootstrap.
#' @template OM_dir
#' @param boot Return the bootstrap dataset? If TRUE, function returns the
#' number bootstrapped dataset specified in \code{nboot}. If FALSE, it returns
#' the expected values.
#' @param nboot The number bootstrapped data set. This value is only used if
#' \code{boot = TRUE}. Note that this numbering does NOT correspond with the
#' numbering in section of r4ss::SS_readdat. E.g., specifying section = 3 in
#' SS_readdat is equivalent to specifying nboot = 1.
#' @param init_run Is this the initial iteration of the OM? Defaults to FALSE.
#' @template verbose
#' @param debug_par_run If set to TRUE, and the run fails, a new folder called
#' error_check will be created, and the model will be run from control start
#' values instead of ss.par. The 2 par files are then compared to help debug
#' the issue with the model run. Defaults to TRUE.
#' @template sample_catch
#' @template seed
#' @author Kathryn Doering
#' @importFrom r4ss SS_readdat SS_readstarter SS_writestarter
run_OM <- function(OM_dir,
boot = TRUE,
nboot = 1,
init_run = FALSE,
verbose = FALSE,
debug_par_run = TRUE,
sample_catch = FALSE,
seed = NULL) {
# make sure OM generates the correct number of data sets.
if (boot) {
max_section <- nboot + 2
} else {
max_section <- 2
}
if (is.null(seed)) {
seed <- stats::runif(1, 1, 9999999)
}
start <- r4ss::SS_readstarter(file.path(OM_dir, "starter.ss"),
verbose = FALSE
)
start[["N_bootstraps"]] <- max_section
start[["seed"]] <- seed
r4ss::SS_writestarter(start,
dir = OM_dir, verbose = FALSE, overwrite = TRUE,
warn = FALSE
)
# run SS and get the data set
run_ss_model(OM_dir, "-maxfn 0 -phase 50 -nohess",
verbose = verbose,
debug_par_run = debug_par_run
)
dat <- r4ss::SS_readdat(file.path(OM_dir, "data.ss_new"),
section = max_section,
verbose = FALSE
)
# replace with the expected catch values if sample_catch is FALSE and using
# bootstrap
if (boot == TRUE & sample_catch == FALSE) {
exp_vals <- r4ss::SS_readdat(file.path(OM_dir, "data.ss_new"),
section = 2,
verbose = FALSE
)
dat[["catch"]] <- exp_vals[["catch"]]
}
return(dat)
}
#' Get the sampling scheme in a data file.
#'
#' Determine what the default sampling scheme is for a given data file.
#' Produces a list object with the sampling scheme, which can be modified, if
#' desired.
#' @param dat An SS data file
#' @param dat_types Types of data to include
# get the initial sampling values
get_init_samp_scheme <- function(dat,
dat_types = c(
"CPUE", "lencomp", "agecomp",
"meanbodywt", "MeanSize_at_Age_obs"
)) {
# TODO: write this. Can be used for EM and OM.
}
#' Remove the historical sampling structure
#'
#' @template sample_struct_hist
#' @param dat The data file, as read in using r4ss
rm_sample_struct_hist <- function(sample_struct_hist, dat) {
if (is.null(sample_struct_hist)) {
return(dat)
}
# remove the CPUE
dat[["CPUE"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "CPUE",
colnames = c("year", "seas", "index")
)
dat[["lencomp"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "lencomp",
colnames = c(
"Yr", "Seas", "FltSvy", "Gender",
"Part"
)
)
dat[["agecomp"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "agecomp",
colnames = c(
"Yr", "Seas", "FltSvy", "Gender",
"Part", "Ageerr", "Lbin_lo",
"Lbin_hi"
)
)
dat[["meanbodywt"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "meanbodywt",
colnames = c("Year", "Seas", "Fleet", "Partition", "Type", "Std_in")
)
dat[["MeanSize_at_Age_obs"]] <- rm_vals(
return_obj = dat,
compare_obj = sample_struct_hist,
name_in_obj = "MeanSize_at_Age_obs",
colnames = c("Yr", "Seas", "FltSvy", "Gender", "Part", "AgeErr", "N_")
)
dat
}
#' remove vals in 2 list components with the same name
#'
#' From 2 list components with the same name, remove vals that aren't in the
#' compare object
#'
#' @param return_obj the object (containing list component of name in obj) that
#' will be modified. Only combinations of the columns found in compare object
#' will be retained
#' @param compare_obj the object (containing list component of name_in_obj) that
#' return_obj will be compared to
#' @param name_in_obj the name of the list elements to use; the same name must
#' be in return_obj and compare_obj. This list element must be a data frame
#' with the same column names
#' @param colnames The column names within the name_in_obj list components to
#' compare.
#' @return return_obj[[name_in_obj]], modified to only include elements present
#' in compare_obj[[name_in_obj]].
#' @author Kathryn Doering
rm_vals <- function(return_obj, compare_obj, name_in_obj, colnames) {
# return early if nothing to compare.
if (is.null(compare_obj[[name_in_obj]]) | is.null(return_obj[[name_in_obj]])) {
return(return_obj[[name_in_obj]])
}
return_obj[[name_in_obj]] <- combine_cols(
dat_list = return_obj,
list_item = name_in_obj,
colnames = colnames
)
compare_obj[[name_in_obj]] <- combine_cols(
dat_list = compare_obj,
list_item = name_in_obj,
colnames = colnames
)
to_keep <- intersect(
compare_obj[[name_in_obj]][["combo"]],
return_obj[[name_in_obj]][["combo"]]
)
to_return <- return_obj[[name_in_obj]][
return_obj[[name_in_obj]][["combo"]] %in% to_keep,
!(colnames(return_obj[[name_in_obj]]) %in% "combo")
]
to_return
}
#' Add in years of sampling data needed
#'
#' @template sample_struct
#' @param dat A datafile as read in by r4ss::SS_readdat
add_sample_struct <- function(sample_struct, dat) {
if (is.null(sample_struct)) {
return(dat)
}
subset_yr_start <- dat[["styr"]]
subset_yr_end <- dat[["endyr"]]
tmp_CPUE <- sample_struct[["CPUE"]]
if (!is.null(tmp_CPUE)) {
tmp_CPUE <- tmp_CPUE[tmp_CPUE[["year"]] >= subset_yr_start &
tmp_CPUE[["year"]] <= subset_yr_end, ]
if (nrow(tmp_CPUE) > 0) {
tmp_CPUE[["obs"]] <- 1 # dummy observation
tmp_CPUE <- tmp_CPUE[, c("year", "seas", "index", "obs", "se_log")]
tmp_CPUE[["index"]] <- -abs(tmp_CPUE[["index"]])
dat[["CPUE"]] <- rbind(dat[["CPUE"]], tmp_CPUE)
}
}
# This method of adding new data doesn't work if len comp is not already
# turned on. Add warninig for now, but could potentially turn on len comp
# for the user in the OM?
if (dat[["use_lencomp"]] == 0 & !is.null(sample_struct[["lencomp"]])) {
warning(
"Length composition is not specified in the OM, but the lencomp ",
"sampling was requested through sample_struct. Please turn on ",
"length comp in the OM to allow lencomp sampling."
)
}
if (dat[["use_lencomp"]] == 1 & !is.null(sample_struct[["lencomp"]])) {
tmp_lencomp <- sample_struct[["lencomp"]]
tmp_lencomp <- tmp_lencomp[tmp_lencomp[["Yr"]] >= subset_yr_start &
tmp_lencomp[["Yr"]] <= subset_yr_end, ]
if (nrow(tmp_lencomp) > 0) {
# get col names
lencomp_dat_colnames <- colnames(dat[["lencomp"]])[7:ncol(dat[["lencomp"]])]
tmp_df_dat <- matrix(1,
nrow = nrow(tmp_lencomp),
ncol = length(lencomp_dat_colnames)
)
colnames(tmp_df_dat) <- lencomp_dat_colnames
tmp_lencomp <- cbind(tmp_lencomp, as.data.frame(tmp_df_dat))
tmp_lencomp[["FltSvy"]] <- -abs(tmp_lencomp[["FltSvy"]]) # make sure negative
dat[["lencomp"]] <- rbind(dat[["lencomp"]], tmp_lencomp)
}
}
# TODO: can write code that adds age comp obs when dat[["agecomp"]] is NULL.
if (is.null(dat[["agecomp"]]) & !is.null(sample_struct[["agecomp"]])) {
warning(
"Age composition is not specified in the OM, but the agecomp ",
"sampling was requested through sample_struct. Please turn on ",
"age comp in the OM by adding at least to allow agecomp ",
"sampling."
)
}
if (!is.null(dat[["agecomp"]]) & !is.null(sample_struct[["agecomp"]])) {
tmp_agecomp <- sample_struct[["agecomp"]]
tmp_agecomp <- tmp_agecomp[tmp_agecomp[["Yr"]] >= subset_yr_start &
tmp_agecomp[["Yr"]] <= subset_yr_end, ]
if (nrow(tmp_agecomp) > 0) {
# get col names
agecomp_dat_colnames <- colnames(dat[["agecomp"]])[10:ncol(dat[["agecomp"]])]
tmp_df_dat <- matrix(1,
nrow = nrow(tmp_agecomp),
ncol = length(agecomp_dat_colnames)
)
colnames(tmp_df_dat) <- agecomp_dat_colnames
tmp_agecomp <- cbind(tmp_agecomp, as.data.frame(tmp_df_dat))
tmp_agecomp[["FltSvy"]] <- -abs(tmp_agecomp[["FltSvy"]]) # make sure negative
dat[["agecomp"]] <- rbind(dat[["agecomp"]], tmp_agecomp)
}
}
## Mean size ----
if (is.null(dat[["meanbodywt"]]) & !is.null(sample_struct[["meanbodywt"]])) {
warning(
"Mean Size data is not specified in the OM, but the ",
"sampling was requested through sample_struct. Please turn on ",
"mean size data (i.e., meanbodywt) in the OM to allow mean size data ",
"sampling."
)
}
if (!is.null(dat[["meanbodywt"]]) & !is.null(sample_struct[["meanbodywt"]])) {
tmp_meanbodywt <- sample_struct[["meanbodywt"]]
tmp_meanbodywt <- tmp_meanbodywt[tmp_meanbodywt[["Year"]] >= subset_yr_start &
tmp_meanbodywt[["Year"]] <= subset_yr_end, ]
if (nrow(tmp_meanbodywt) > 0) {
# dummy observation negative to exclued from NLL? (or should the fleet be neg?)
tmp_meanbodywt[["Value"]] <- -1
tmp_meanbodywt <- tmp_meanbodywt[, c(
"Year", "Seas", "Fleet", "Partition",
"Type", "Value", "Std_in"
)]
tmp_meanbodywt[["Value"]] <- -abs(tmp_meanbodywt[["Value"]])
dat[["meanbodywt"]] <- rbind(dat[["meanbodywt"]], tmp_meanbodywt)
}
}
# Mean size at age ----
if (is.null(dat[["MeanSize_at_Age_obs"]]) & !is.null(sample_struct[["MeanSize_at_Age_obs"]])) {
warning(
"Mean Size at age data is not specified in the OM, but the ",
"sampling was requested through sample_struct. Please turn on ",
"mean size at age data (i.e., MeanSize_at_Age_obs) in the OM to allow mean size at age data ",
"sampling."
)
}
if (!is.null(dat[["MeanSize_at_Age_obs"]]) & !is.null(sample_struct[["MeanSize_at_Age_obs"]])) {
tmp_MeanSize_at_Age_obs <- sample_struct[["MeanSize_at_Age_obs"]]
tmp_MeanSize_at_Age_obs <- tmp_MeanSize_at_Age_obs[tmp_MeanSize_at_Age_obs[["Yr"]] >= subset_yr_start &
tmp_MeanSize_at_Age_obs[["Yr"]] <= subset_yr_end, ]
if (nrow(tmp_MeanSize_at_Age_obs) > 0) {
# get col names
sample_colnames <- grep("^[fm]\\d+$", colnames(dat[["MeanSize_at_Age_obs"]]),
ignore.case = FALSE, value = TRUE
)
MeanSize_at_Age_obs_dat_colnames <- colnames(dat[["MeanSize_at_Age_obs"]])[10:ncol(dat[["MeanSize_at_Age_obs"]])]
tmp_sample_df <- matrix(-1, # use negative 1, as this should exclude vals from the neg log likelihood
nrow = nrow(tmp_MeanSize_at_Age_obs),
ncol = length(sample_colnames)
)
colnames(tmp_sample_df) <- sample_colnames
# Need a sample size for each of the female and male obs in the row; SSMSE
# can only use the same sample size for all in a row currently.
N_colnames <- grep("^N_[fm]\\d+$", colnames(dat[["MeanSize_at_Age_obs"]]),
ignore.case = FALSE, value = TRUE
)
tmp_N_df <- lapply(tmp_MeanSize_at_Age_obs[["N_"]],
function(x, y) {
vec <- rep(x, times = length(y))
tmp_df <- data.frame(matrix(vec,
nrow = 1,
ncol = length(vec)
))
colnames(tmp_df) <- N_colnames
tmp_df
},
y = N_colnames
)
tmp_N_df <- do.call(rbind, tmp_N_df)
tmp_MeanSize_at_Age_obs <- tmp_MeanSize_at_Age_obs[, -7] # rm the sample size col
tmp_MeanSize_at_Age_obs[["Ignore"]] <- 2 # column for future features
tmp_MeanSize_at_Age_obs <- cbind(
tmp_MeanSize_at_Age_obs,
as.data.frame(tmp_sample_df),
tmp_N_df
)
# TODO: need to make fleet negative??? no sure for this data type.
# tmp_agecomp[["FltSvy"]] <- -abs(tmp_agecomp[["FltSvy"]]) # make sure negative
dat[["MeanSize_at_Age_obs"]] <- rbind(
dat[["MeanSize_at_Age_obs"]],
tmp_MeanSize_at_Age_obs
)
}
}
dat
}
|
# Assignment: R Project (STAT 6430)
# Names: Sally Gao, Stephen Mortensen, Kennan Grant
# Computing IDs: sg2zv, sam8sp, khg3je
# 1. What seems to be associated with a high rating?
# 2. What groups are most likely to provide higher ratings?
# copying Kennan
big_table <- reviews %>%
left_join(movies, by = "movie_id") %>%
left_join(reviewers, by = "reviewer_id") %>%
left_join(zip_codes, by = "zip_code")
# fill in Location as CANADA for zip codes with letters in them
big_table$Location[grepl("[[:alpha:]]", big_table$zip_code)]<- "CANADA"
big_table$occupation <- factor(big_table$occupation)
mean(big_table$rating) # 3.52986
big_table %>%
group_by(occupation) %>%
summarise (mean_rating = mean(rating), median_rating = median(rating)) -> occupation_summary
# healthcare workers give really low ratings - mean of 2.896220!!
# lawyers and doctors tend to give high ratings.
ggplot(data = big_table, mapping = aes(x = occupation, y = rating)) +
geom_boxplot() + coord_flip() | /Rproj-analysis-Sally.R | no_license | sally-gao/STAT-6430-R-Project | R | false | false | 1,011 | r | # Assignment: R Project (STAT 6430)
# Names: Sally Gao, Stephen Mortensen, Kennan Grant
# Computing IDs: sg2zv, sam8sp, khg3je
# 1. What seems to be associated with a high rating?
# 2. What groups are most likely to provide higher ratings?
# copying Kennan
big_table <- reviews %>%
left_join(movies, by = "movie_id") %>%
left_join(reviewers, by = "reviewer_id") %>%
left_join(zip_codes, by = "zip_code")
# fill in Location as CANADA for zip codes with letters in them
big_table$Location[grepl("[[:alpha:]]", big_table$zip_code)]<- "CANADA"
big_table$occupation <- factor(big_table$occupation)
mean(big_table$rating) # 3.52986
big_table %>%
group_by(occupation) %>%
summarise (mean_rating = mean(rating), median_rating = median(rating)) -> occupation_summary
# healthcare workers give really low ratings - mean of 2.896220!!
# lawyers and doctors tend to give high ratings.
ggplot(data = big_table, mapping = aes(x = occupation, y = rating)) +
geom_boxplot() + coord_flip() |
#!/home/genesky/software/r/3.5.1/bin/Rscript
.libPaths("/home/genesky/software/r/3.5.1/lib64/R/library/")
library(docopt)
"Usage: geo_mrna_chip_diff.r --series_number <string> --case_sample <string> --control_sample <string> --output_prefix <file> [--platform <string> --pvalue_cutoff <numeric> --case_group_name <string> --control_group_name <string> ]
Options:
--series_number <string> GEO 编号列表,包含了case_sample/control_sample 样本的芯片数据。多个GEO编号用逗号分隔。支持不同GEO中的样本进行对比,前提是芯片一致。例如: 'GSE82107'
--case_sample <string> case组样本编号,样本之间用逗号分隔 例如: 'GSM2183539,GSM2183540,GSM2183541,GSM2183542,GSM2183543,GSM2183544,GSM2183545,GSM2183546,GSM2183547,GSM2183548'
--control_sample <string> control组样本编号,样本之间用逗号分隔 例如: 'GSM2183532,GSM2183533,GSM2183534,GSM2183535,GSM2183536,GSM2183537,GSM2183538'
--case_group_name <string> case组名 [default: case]
--control_group_name <string> control组名 [default: control]
--output_prefix <file> 输出文件的前缀,例如: ./result 。 最终生成的结果为 result.diff.xls。
--pvalue_cutoff <numeric> pvalue 阈值,仅输出小于该阈值的结果 [default: 1]
--platform <string> 测序平台编号,例如 GPL96, GPL97 等,因为有的GEO下包含了多个测序平台的数据,不同平台之间是不能合并混用的。默认随机选择第一个平台,如果只有一个平台,可以忽略这个参数
注意:脚本内部使用pvalue 0.05, log2fc 1 进行显著差异判定" -> doc
opts <- docopt(doc, version = 'Program : geo_mrna_chip_diff v1.0 \n 甘斌 129\n')
series_number <- opts$series_number
platform <- opts$platform
case_sample_list <- opts$case_sample
control_sample_list <- opts$control_sample
output_prefix <- opts$output_prefix
pvalue_cutoff <- as.numeric(opts$pvalue_cutoff)
case_group_name <- opts$case_group_name
control_group_name <- opts$control_group_name
case_samples = unlist(strsplit(case_sample_list, ','))
control_samples = unlist(strsplit(control_sample_list, ','))
sample_group <- c(rep(case_group_name, length(case_samples)), rep(control_group_name, length(control_samples)))
# https://www.ncbi.nlm.nih.gov/geo/info/geo2r.html
##################
# (1) 加载R包
##################
library(Biobase)
library(GEOquery)
library(limma)
library(ggplot2)
library(pheatmap)
##################
# (2) 加载每个series的数据
##################
data_matrix <- '' # 汇总的表达量矩阵,matrix
chip_version <- '' # 芯片版本
probe_annotation <- '' # 探针注释
series_count <- 0
for (series in unlist(strsplit(series_number, ',')) )
{
series_count <- series_count + 1
message("loading series : ", series)
gset_tmp <- getGEO(series, GSEMatrix =TRUE, AnnotGPL=FALSE)
# 测序平台选择,默认选择第一个
if(is.null(platform))
{
gset_tmp <- gset_tmp[[1]]
}else{
idx <- grep(platform, attr(gset_tmp, "names"))
if(length(idx) == 0)
{
message("[ERROR] 测序平台没有找到, 请仔细确认编号 ", platform)
q()
}
gset_tmp <- gset_tmp[[idx]]
}
# 提取芯片版本、表达矩阵、注释数据库
chip_version_tmp <- gset_tmp@annotation
data_matrix_tmp <- exprs(gset_tmp)
message(series, " 选择芯片版本: ", chip_version_tmp)
# 芯片版本核对
if(chip_version != '' && chip_version != chip_version_tmp)
{
message("输入的series的芯片版本不一致,不能放在一起分析")
q()
}else {
chip_version <- chip_version_tmp
}
# 保存芯片探针注释信息
if(series_count == 1)
{
probe_annotation <- gset_tmp@featureData@data
}
# 保存表达矩阵
data_matrix_tmp <- exprs(gset_tmp)
# log2 转换
qx <- as.numeric(quantile(data_matrix_tmp, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC)
{
data_matrix_tmp[which(data_matrix_tmp <= 0)] <- NaN
data_matrix_tmp <- log2(data_matrix_tmp)
}
if(series_count == 1)
{
data_matrix <- data_matrix_tmp
}else {
# 两个批次数据行数不对
if(nrow(data_matrix) != nrow(data_matrix_tmp))
{
message("输入的series的探针数量不一致,不能放在一起分析");
q();
}
# 两个批次数据的探针编号不对应
if(identical(sort(rownames(data_matrix)), sort(rownames(data_matrix_tmp))) == FALSE)
{
message("输入的series的探针名称不一致,不能放在一起分析");
q();
}
# 可以合并
match_pos <- match(rownames(data_matrix), rownames(data_matrix_tmp)) # 根据探针名确定行对应关系
data_matrix <- cbind(data_matrix, data_matrix_tmp[match_pos,])
}
}
##################
# (3) 检查样本编号是否有问题 并 制作新的BioBase数据库
##################
message("build BioBase dataset")
if( sum( c(case_samples, control_samples) %in% colnames(data_matrix) ) != (length(case_samples) + length(control_samples)) ) { message("部分输入的case/control样本没有在series中找到,请仔细核对是否写错"); q(); }
data_matrix <- data_matrix[, c(case_samples, control_samples)] # 仅保留需要的样本
gset <- ExpressionSet(assayData = data_matrix,
annotation = chip_version
)
##################
# (4) 差异分析
##################
message("diff analysis")
sml <- paste("G", c(rep(1, length(case_samples)), rep(0, length(control_samples))), sep="")
fl <- as.factor(sml)
gset$description <- fl
design <- model.matrix(~ description + 0, gset)
colnames(design) <- levels(fl)
fit <- lmFit(gset, design)
cont.matrix <- makeContrasts(G1-G0, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", n=Inf, p.value=pvalue_cutoff) # 最终差异分析结果
##################
# (5) 差异分析结果整理输出
##################
message("write diff result")
result <- data.frame(probe = rownames(tT),
data_matrix[rownames(tT), ],
caseExpr = apply(data_matrix[rownames(tT), case_samples], 1, mean),
controlExpr = apply(data_matrix[rownames(tT), control_samples], 1, mean),
averageExpr = tT$AveExpr,
log2FC = tT$logFC,
pvalue = tT$P.Value,
fdr = tT$adj.P.Val,
probe_annotation[rownames(tT), ]
)
result$type <- "Not DEG"
result$type[result$pvalue < 0.05 & result$log2FC >= 1 ] <- "Up"
result$type[result$pvalue < 0.05 & result$log2FC <= -1] <- "Down"
result$type <- factor(result$type, levels = c("Up", "Down", "Not DEG"))
file_diff = paste0(output_prefix, ".diff.xls")
message("output diff file : ", file_diff)
write.table(result, file=file_diff, row.names=F, sep="\t", quote = FALSE)
##################
# (6)PCA 暂时不绘制,数据做过log2处理,存在极限值
##################
# mycol <- c(119,132,147,454,89,404,123,463,461,128,139,552,28,54,100,258,558,376,43,652,165,31,610,477,256,588,99,632,81,503,104,562,76,96,495,598,645,507,657,33,179,107,62)
# mycol <- colors()[rep(mycol, 50)]
# myshape <- rep(c(15,16,17,18,19,20,21,22,23,24,25,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14),4)
# pca_coordate <- paste(output_prefix, ".pca.site.txt", sep="")
# pca_pdf <- paste(output_prefix, ".pca.pdf", sep="")
# message("plot pca : ", pca_pdf)
# pca <- prcomp(t(data_matrix))
# summaryInfo <- summary(pca)
# pc1Pro <- 100 * summaryInfo$importance['Proportion of Variance', 'PC1'] # PC1主元解释百分比
# pc2Pro <- 100 * summaryInfo$importance['Proportion of Variance', 'PC2'] # PC2主元解释百分比
# scores <- as.data.frame(pca$x)
# scores$Group = factor(sample_group, levels = unique(sample_group)) # 设定成factor数据,且定义levels与输入顺序一致,这样就可以固定pca图上的分组顺序了
# write.csv(scores, file=pca_coordate)
# pdf(file=pca_pdf, width=13, height=13)
# p = ggplot(data=scores, aes(x = PC1, y = PC2, colour = Group, shape = Group)) +
# geom_hline(yintercept = 0, colour = "gray65") +
# geom_vline(xintercept = 0, colour = "gray65") +
# scale_shape_manual(values = myshape ) +
# scale_color_manual(values = mycol ) +
# geom_point(size = 3, alpha = 1) +
# ggtitle('PCA') + xlab(paste("PC1 ", "(", pc1Pro, "%)", plot_x_lab, sep="")) + ylab(paste("PC2 ", "(", pc2Pro, "%)", sep="")) + theme(plot.title = element_text(hjust = 0.5))
# p
# dev.off()
##################
# (7)MA plot
##################
ma_file <- paste(output_prefix, ".MA.pdf", sep="")
message("plot MA : ", ma_file)
pdf(ma_file)
ggplot(result, aes(x = averageExpr, y = log2FC , colour = type)) +
geom_point() +
scale_x_continuous(limits = c(0, 10)) +
scale_y_continuous(limits = c(-20, 20)) +
theme(legend.title = element_blank()) +
labs(x = "averageExpr", y="log2(FC)", tilte="MA plot")
dev.off()
##################
# (8)valcano plot
##################
volcano_file <- paste(output_prefix, ".volcano.pdf", sep="")
message("plot volcano : ", volcano_file)
pdf(volcano_file)
volcano_title = paste0("genes: ", case_group_name, '/', control_group_name)
ggplot(result, aes(x = log2FC, y = -log10(result$pvalue), color =type)) +
geom_point() +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5)) +
labs(x = bquote(paste(log[2],"(fold change)",sep="")), y = bquote(paste(-log[10],"(p value)",sep="")), title = volcano_title) +
scale_x_continuous(limits=c(-10,10))
dev.off()
##################
# (9)热图绘制
##################
message("热图绘制,数据过滤")
message(" 仅保留差异显著的基因")
result_diff <- result[result$type != 'Not DEG', ]
# 去掉标准差为0的基因,否则hclust可能
message(" 去掉表达量的标准差为0的基因")
result_diff <- result_diff[apply(result_diff[ , c(case_samples, control_samples)],1, function(x){ sd(x[!is.na(x)]) != 0}), ]
# na比例超过50%的去掉,有可能导致绘图报错
message(" 去掉表达量缺失样本比例超过50%的基因")
na_perc <- apply(result_diff[ , c(case_samples, control_samples)],1,function(x){sum(is.na(x)) / length(c(case_samples, control_samples))})
result_diff <- result_diff[na_perc <= 0.5, ]
# Top50 heatmap plot
if( nrow(result_diff) < 50 ){
data <- data.matrix(result_diff[ , c(case_samples, control_samples)]) # 小于50,全画
}else{
result_diff <- result_diff[order(result_diff$pvalue), ]
data <- data.matrix(result_diff[1:50, c(case_samples, control_samples)]) # 相对丰度最高的50个
}
heatmap_file <- paste(output_prefix, ".heatmap.top50.pdf", sep="")
message("plot heatmap top50 : ", heatmap_file)
pdf(heatmap_file , width=12, height = 12 )
# 样本注释
annotation_group <- data.frame(Group = factor(sml, levels = unique(sml)))
rownames(annotation_group) <- c(case_samples, control_samples)
myheatcol = colorRampPalette(c('green','black','red'))(100)
pheatmap(data,
scale = 'row',
# margins = c(8,10),
cluster_rows = T,
cluster_cols = T,
color = myheatcol,
show_rownames = T,
show_colnames = T,
annotation_col = annotation_group,
)
dev.off()
# heatmap plot
data <- data.matrix(result_diff[, c(case_samples, control_samples)])
heatmap_file <- paste(output_prefix, ".heatmap.pdf", sep="")
message("plot heatmap all : ", heatmap_file)
pdf(heatmap_file , width=12, height = 12 )
# 样本颜色
pheatmap(data,
scale = 'row',
# margins = c(8,10),
cluster_rows = T,
cluster_cols = T,
color = myheatcol,
show_rownames = F,
show_colnames = T,
annotation_col = annotation_group,
)
dev.off()
| /geo_mrna_chip_diff.r | no_license | ccoo22/personal_tools | R | false | false | 12,482 | r | #!/home/genesky/software/r/3.5.1/bin/Rscript
.libPaths("/home/genesky/software/r/3.5.1/lib64/R/library/")
library(docopt)
"Usage: geo_mrna_chip_diff.r --series_number <string> --case_sample <string> --control_sample <string> --output_prefix <file> [--platform <string> --pvalue_cutoff <numeric> --case_group_name <string> --control_group_name <string> ]
Options:
--series_number <string> GEO 编号列表,包含了case_sample/control_sample 样本的芯片数据。多个GEO编号用逗号分隔。支持不同GEO中的样本进行对比,前提是芯片一致。例如: 'GSE82107'
--case_sample <string> case组样本编号,样本之间用逗号分隔 例如: 'GSM2183539,GSM2183540,GSM2183541,GSM2183542,GSM2183543,GSM2183544,GSM2183545,GSM2183546,GSM2183547,GSM2183548'
--control_sample <string> control组样本编号,样本之间用逗号分隔 例如: 'GSM2183532,GSM2183533,GSM2183534,GSM2183535,GSM2183536,GSM2183537,GSM2183538'
--case_group_name <string> case组名 [default: case]
--control_group_name <string> control组名 [default: control]
--output_prefix <file> 输出文件的前缀,例如: ./result 。 最终生成的结果为 result.diff.xls。
--pvalue_cutoff <numeric> pvalue 阈值,仅输出小于该阈值的结果 [default: 1]
--platform <string> 测序平台编号,例如 GPL96, GPL97 等,因为有的GEO下包含了多个测序平台的数据,不同平台之间是不能合并混用的。默认随机选择第一个平台,如果只有一个平台,可以忽略这个参数
注意:脚本内部使用pvalue 0.05, log2fc 1 进行显著差异判定" -> doc
opts <- docopt(doc, version = 'Program : geo_mrna_chip_diff v1.0 \n 甘斌 129\n')
series_number <- opts$series_number
platform <- opts$platform
case_sample_list <- opts$case_sample
control_sample_list <- opts$control_sample
output_prefix <- opts$output_prefix
pvalue_cutoff <- as.numeric(opts$pvalue_cutoff)
case_group_name <- opts$case_group_name
control_group_name <- opts$control_group_name
case_samples = unlist(strsplit(case_sample_list, ','))
control_samples = unlist(strsplit(control_sample_list, ','))
sample_group <- c(rep(case_group_name, length(case_samples)), rep(control_group_name, length(control_samples)))
# https://www.ncbi.nlm.nih.gov/geo/info/geo2r.html
##################
# (1) 加载R包
##################
library(Biobase)
library(GEOquery)
library(limma)
library(ggplot2)
library(pheatmap)
##################
# (2) 加载每个series的数据
##################
data_matrix <- '' # 汇总的表达量矩阵,matrix
chip_version <- '' # 芯片版本
probe_annotation <- '' # 探针注释
series_count <- 0
for (series in unlist(strsplit(series_number, ',')) )
{
series_count <- series_count + 1
message("loading series : ", series)
gset_tmp <- getGEO(series, GSEMatrix =TRUE, AnnotGPL=FALSE)
# 测序平台选择,默认选择第一个
if(is.null(platform))
{
gset_tmp <- gset_tmp[[1]]
}else{
idx <- grep(platform, attr(gset_tmp, "names"))
if(length(idx) == 0)
{
message("[ERROR] 测序平台没有找到, 请仔细确认编号 ", platform)
q()
}
gset_tmp <- gset_tmp[[idx]]
}
# 提取芯片版本、表达矩阵、注释数据库
chip_version_tmp <- gset_tmp@annotation
data_matrix_tmp <- exprs(gset_tmp)
message(series, " 选择芯片版本: ", chip_version_tmp)
# 芯片版本核对
if(chip_version != '' && chip_version != chip_version_tmp)
{
message("输入的series的芯片版本不一致,不能放在一起分析")
q()
}else {
chip_version <- chip_version_tmp
}
# 保存芯片探针注释信息
if(series_count == 1)
{
probe_annotation <- gset_tmp@featureData@data
}
# 保存表达矩阵
data_matrix_tmp <- exprs(gset_tmp)
# log2 转换
qx <- as.numeric(quantile(data_matrix_tmp, c(0., 0.25, 0.5, 0.75, 0.99, 1.0), na.rm=T))
LogC <- (qx[5] > 100) ||
(qx[6]-qx[1] > 50 && qx[2] > 0) ||
(qx[2] > 0 && qx[2] < 1 && qx[4] > 1 && qx[4] < 2)
if (LogC)
{
data_matrix_tmp[which(data_matrix_tmp <= 0)] <- NaN
data_matrix_tmp <- log2(data_matrix_tmp)
}
if(series_count == 1)
{
data_matrix <- data_matrix_tmp
}else {
# 两个批次数据行数不对
if(nrow(data_matrix) != nrow(data_matrix_tmp))
{
message("输入的series的探针数量不一致,不能放在一起分析");
q();
}
# 两个批次数据的探针编号不对应
if(identical(sort(rownames(data_matrix)), sort(rownames(data_matrix_tmp))) == FALSE)
{
message("输入的series的探针名称不一致,不能放在一起分析");
q();
}
# 可以合并
match_pos <- match(rownames(data_matrix), rownames(data_matrix_tmp)) # 根据探针名确定行对应关系
data_matrix <- cbind(data_matrix, data_matrix_tmp[match_pos,])
}
}
##################
# (3) 检查样本编号是否有问题 并 制作新的BioBase数据库
##################
message("build BioBase dataset")
if( sum( c(case_samples, control_samples) %in% colnames(data_matrix) ) != (length(case_samples) + length(control_samples)) ) { message("部分输入的case/control样本没有在series中找到,请仔细核对是否写错"); q(); }
data_matrix <- data_matrix[, c(case_samples, control_samples)] # 仅保留需要的样本
gset <- ExpressionSet(assayData = data_matrix,
annotation = chip_version
)
##################
# (4) 差异分析
##################
message("diff analysis")
sml <- paste("G", c(rep(1, length(case_samples)), rep(0, length(control_samples))), sep="")
fl <- as.factor(sml)
gset$description <- fl
design <- model.matrix(~ description + 0, gset)
colnames(design) <- levels(fl)
fit <- lmFit(gset, design)
cont.matrix <- makeContrasts(G1-G0, levels=design)
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit2, 0.01)
tT <- topTable(fit2, adjust="fdr", sort.by="B", n=Inf, p.value=pvalue_cutoff) # 最终差异分析结果
##################
# (5) 差异分析结果整理输出
##################
message("write diff result")
result <- data.frame(probe = rownames(tT),
data_matrix[rownames(tT), ],
caseExpr = apply(data_matrix[rownames(tT), case_samples], 1, mean),
controlExpr = apply(data_matrix[rownames(tT), control_samples], 1, mean),
averageExpr = tT$AveExpr,
log2FC = tT$logFC,
pvalue = tT$P.Value,
fdr = tT$adj.P.Val,
probe_annotation[rownames(tT), ]
)
result$type <- "Not DEG"
result$type[result$pvalue < 0.05 & result$log2FC >= 1 ] <- "Up"
result$type[result$pvalue < 0.05 & result$log2FC <= -1] <- "Down"
result$type <- factor(result$type, levels = c("Up", "Down", "Not DEG"))
file_diff = paste0(output_prefix, ".diff.xls")
message("output diff file : ", file_diff)
write.table(result, file=file_diff, row.names=F, sep="\t", quote = FALSE)
##################
# (6)PCA 暂时不绘制,数据做过log2处理,存在极限值
##################
# mycol <- c(119,132,147,454,89,404,123,463,461,128,139,552,28,54,100,258,558,376,43,652,165,31,610,477,256,588,99,632,81,503,104,562,76,96,495,598,645,507,657,33,179,107,62)
# mycol <- colors()[rep(mycol, 50)]
# myshape <- rep(c(15,16,17,18,19,20,21,22,23,24,25,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14),4)
# pca_coordate <- paste(output_prefix, ".pca.site.txt", sep="")
# pca_pdf <- paste(output_prefix, ".pca.pdf", sep="")
# message("plot pca : ", pca_pdf)
# pca <- prcomp(t(data_matrix))
# summaryInfo <- summary(pca)
# pc1Pro <- 100 * summaryInfo$importance['Proportion of Variance', 'PC1'] # PC1主元解释百分比
# pc2Pro <- 100 * summaryInfo$importance['Proportion of Variance', 'PC2'] # PC2主元解释百分比
# scores <- as.data.frame(pca$x)
# scores$Group = factor(sample_group, levels = unique(sample_group)) # 设定成factor数据,且定义levels与输入顺序一致,这样就可以固定pca图上的分组顺序了
# write.csv(scores, file=pca_coordate)
# pdf(file=pca_pdf, width=13, height=13)
# p = ggplot(data=scores, aes(x = PC1, y = PC2, colour = Group, shape = Group)) +
# geom_hline(yintercept = 0, colour = "gray65") +
# geom_vline(xintercept = 0, colour = "gray65") +
# scale_shape_manual(values = myshape ) +
# scale_color_manual(values = mycol ) +
# geom_point(size = 3, alpha = 1) +
# ggtitle('PCA') + xlab(paste("PC1 ", "(", pc1Pro, "%)", plot_x_lab, sep="")) + ylab(paste("PC2 ", "(", pc2Pro, "%)", sep="")) + theme(plot.title = element_text(hjust = 0.5))
# p
# dev.off()
##################
# (7)MA plot
##################
ma_file <- paste(output_prefix, ".MA.pdf", sep="")
message("plot MA : ", ma_file)
pdf(ma_file)
ggplot(result, aes(x = averageExpr, y = log2FC , colour = type)) +
geom_point() +
scale_x_continuous(limits = c(0, 10)) +
scale_y_continuous(limits = c(-20, 20)) +
theme(legend.title = element_blank()) +
labs(x = "averageExpr", y="log2(FC)", tilte="MA plot")
dev.off()
##################
# (8)valcano plot
##################
volcano_file <- paste(output_prefix, ".volcano.pdf", sep="")
message("plot volcano : ", volcano_file)
pdf(volcano_file)
volcano_title = paste0("genes: ", case_group_name, '/', control_group_name)
ggplot(result, aes(x = log2FC, y = -log10(result$pvalue), color =type)) +
geom_point() +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5)) +
labs(x = bquote(paste(log[2],"(fold change)",sep="")), y = bquote(paste(-log[10],"(p value)",sep="")), title = volcano_title) +
scale_x_continuous(limits=c(-10,10))
dev.off()
##################
# (9)热图绘制
##################
message("热图绘制,数据过滤")
message(" 仅保留差异显著的基因")
result_diff <- result[result$type != 'Not DEG', ]
# 去掉标准差为0的基因,否则hclust可能
message(" 去掉表达量的标准差为0的基因")
result_diff <- result_diff[apply(result_diff[ , c(case_samples, control_samples)],1, function(x){ sd(x[!is.na(x)]) != 0}), ]
# na比例超过50%的去掉,有可能导致绘图报错
message(" 去掉表达量缺失样本比例超过50%的基因")
na_perc <- apply(result_diff[ , c(case_samples, control_samples)],1,function(x){sum(is.na(x)) / length(c(case_samples, control_samples))})
result_diff <- result_diff[na_perc <= 0.5, ]
# Top50 heatmap plot
if( nrow(result_diff) < 50 ){
data <- data.matrix(result_diff[ , c(case_samples, control_samples)]) # 小于50,全画
}else{
result_diff <- result_diff[order(result_diff$pvalue), ]
data <- data.matrix(result_diff[1:50, c(case_samples, control_samples)]) # 相对丰度最高的50个
}
heatmap_file <- paste(output_prefix, ".heatmap.top50.pdf", sep="")
message("plot heatmap top50 : ", heatmap_file)
pdf(heatmap_file , width=12, height = 12 )
# 样本注释
annotation_group <- data.frame(Group = factor(sml, levels = unique(sml)))
rownames(annotation_group) <- c(case_samples, control_samples)
myheatcol = colorRampPalette(c('green','black','red'))(100)
pheatmap(data,
scale = 'row',
# margins = c(8,10),
cluster_rows = T,
cluster_cols = T,
color = myheatcol,
show_rownames = T,
show_colnames = T,
annotation_col = annotation_group,
)
dev.off()
# heatmap plot
data <- data.matrix(result_diff[, c(case_samples, control_samples)])
heatmap_file <- paste(output_prefix, ".heatmap.pdf", sep="")
message("plot heatmap all : ", heatmap_file)
pdf(heatmap_file , width=12, height = 12 )
# 样本颜色
pheatmap(data,
scale = 'row',
# margins = c(8,10),
cluster_rows = T,
cluster_cols = T,
color = myheatcol,
show_rownames = F,
show_colnames = T,
annotation_col = annotation_group,
)
dev.off()
|
#' Calculate med load
#'
#' This function allow you to start the process of med load/response calculations
#' @keywords med load
#' @export
#' @importFrom lubridate interval
#' @importFrom lubridate dyears
#' @import RMySQL
#' @import ggplot2
#' @import openxlsx
#' @examples
#' calculate_seizure_load()
# Purpose - This script will take raw med data from the FILA_MED_DATA_SOURCE file, ranking data from
# the MED_RANKING_SOURCE file, anthropometrics data from the FILA_ANTHROPOMETRICS_CLINICAL
# file, and demographics data from the DEMOGRAPHICS_SOURCE file and calculate daily seizure loads
# based on the ranking values and anthropometric/demographics data given for that patient
# The script will give you as the user the option to specify the identifier signifying
# the patient you wish to run this script for, and the directory where the two needed xlsx files
# can be found
# When the script finishes calculating daily seizure loads, you will be asked if you wish to save
# a FILA_MED_LOAD file into the work directory for your viewing
calculate_med_load <- function() {
## Read in all relevant data from xlsx files
print("Input the identifier that signify the patient we are doing calculations for")
print("Example: FILA")
patient <- readline(prompt="Enter here: ")
print("Input the directory that you wish to draw this patient's DEMOGRAPHICS_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
demo <- "DEMOGRAPHICS_SOURCE.xlsx"
demo <- read.xlsx(demo,sheet=1,detectDates=TRUE)
print("Type 'yes' if the MED_RANKING_SOURCE file can be found in the same folder as the DEMOGRAPHICS_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to draw the MED_RANKING_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
ranking <- "MED_RANKING_SOURCE.xlsx"
ranking <- read.xlsx(ranking,sheet=1,detectDates=TRUE)
print("Type 'yes' if this patient's ANTHROPOMETRICS_SOURCE file can be found in the same folder as the MED_RANKING_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to draw this patient's ANTHROPOMETRICS_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
anthro <- "ANTHROPOMETRICS_SOURCE.xlsx"
anthro <- gsub(" ","",paste(patient,"_",anthro))
anthro <- read.xlsx(anthro,sheet=1,detectDates=TRUE)
print("Type 'yes' if this patient's MED_DATA_SOURCE file can be found in the same folder as this patient's ANTHROPOMETRICS_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to draw this patient's MED_DATA_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
data <- "MED_DATA_SOURCE.xlsx"
data <- gsub(" ","",paste(patient,"_",data))
data <- read.xlsx(data,sheet=1,detectDates=TRUE)
## Remove all empty rows from each data frame
data <- data[!is.na(data$MRNUMBER),]
anthro <- anthro[!is.na(anthro$MRNUMBER),]
demo <- demo[!is.na(demo$LAST),]
ranking <- ranking[!is.na(ranking$MED_GENERIC_NAME),]
print("Calculating med intake in mg/kg/day, please wait...")
## Save patient's medical record number and birthdate as R objects
mrnumber <- unique(anthro$MRNUMBER)
birthdate <- demo$DOB[demo$MRNUMBER==mrnumber]
birthdate <- as.Date(birthdate,format="%m/%d/%Y")
first <- unique(demo[demo$MRNUMBER==mrnumber,colnames(demo)=="FIRST"])
last <- unique(demo[demo$MRNUMBER==mrnumber,colnames(demo)=="LAST"])
## Ensure that date is formatted properly (removing time stamp if necessary)
for (i in 1:length(data$DATE)) {
if (nchar(as.character(data$DATE[i]))>10) {
data$DATE <- substr(data$DATE,1,nchar(as.character(data$DATE))-5)
}
}
## Split raw data table into two parts: one where day type = 3, and one where day type != 3
data$DATE <- as.Date(data$DATE,format="%m/%d/%Y")
sub <- data[data$DAY_TYPE==3 & data$DATE <= data[data$DAY_TYPE!=3,colnames(data)=="DATE"][1],]
data <- data[!(data$DATE %in% sub$DATE),]
## Save relevant columns of data table as object 'med_dose'
med_dose <- data[,colnames(data)=="DATE" | colnames(data)=="MED_ID" | colnames(data)=="DAILY_MED_DOSE_MG"]
## Obtain interpolated weights for the anthropometrics source table
anthro <- weight_interp(anthro)
anthro$DATE <- as.Date(anthro$DATE,format="%m/%d/%Y")
last_date <- as.Date(max(max(unique(anthro[,colnames(anthro)=="DATE"])),max(unique(med_dose[,1]))))
## Create table with start and end dates for each period of specific med usage and dosage
END_DATE <- data.frame(rep(unique(med_dose$DATE)[2]-1,length(med_dose[med_dose$DATE==unique(med_dose$DATE)[2-1],2])))
colnames(END_DATE) <- "END_DATE"
if (length(unique(med_dose$DATE))>=3) {
for (i in 3:(length(unique(med_dose$DATE)))) {
end_date <- data.frame(rep(unique(med_dose$DATE)[i]-1,length(med_dose[med_dose$DATE==unique(med_dose$DATE)[i-1],1])))
colnames(end_date) <- "END_DATE"
END_DATE <- data.frame(rbind(END_DATE,end_date))
colnames(END_DATE) <- "END_DATE"
}
}
na <- cbind(rep(NA,length(med_dose[med_dose$DATE==unique(med_dose$DATE)[length(unique(med_dose$DATE))],colnames(med_dose)=="DATE"])))
colnames(na) <- "END_DATE"
END_DATE <- data.frame(rbind(END_DATE,na))
med_dose <- cbind(med_dose$DATE,END_DATE,med_dose[,colnames(med_dose)!="DATE"])
colnames(med_dose)[1] <- "START_DATE"
## Create table with start and end dates for each period of specific weight
anthro <- data.frame(anthro,birthdate)
colnames(anthro)[dim(anthro)[2]] <- "BIRTHDATE"
## Create table 'med_intake' in which med intake per med per day will be calculated based on weight and dosage
med_intake <- data.frame(start.date=as.Date(character()),med.id=character(),
dosage=integer(),weight=integer(),med.intake=integer())
temp_date <- med_dose$START_DATE[1]
for (i in 1:((as.integer(last_date-med_dose$START_DATE[1]))+1)) {
compare <- max(unique(med_dose$START_DATE)[unique(med_dose$START_DATE)<=temp_date])
temp_med_intake <- data.frame(temp_date,med_dose[med_dose$START_DATE==compare,colnames(med_dose)=="MED_ID" | colnames(med_dose)=="DAILY_MED_DOSE_MG"],NA,NA)
colnames(temp_med_intake) <- c("DATE","MED_ID","DOSAGE","WEIGHT","MED_INTAKE")
med_intake <- data.frame(rbind(med_intake,temp_med_intake))
if (temp_date %in% anthro$DATE) {
weight <- anthro[anthro$DATE==temp_date,colnames(anthro)=="WT_DAY"]
} else {
weight <- anthro[anthro$DATE==max(anthro$DATE),colnames(anthro)=="WT_DAY"]
}
med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="WEIGHT"] <- weight
med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="MED_INTAKE"] <- med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="DOSAGE"]/med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="WEIGHT"]
temp_date <- med_dose$START_DATE[1]+i
}
print("Calculating minimum dose in mg/kg/day, please wait...")
## Denominator: Minimum dose in mg/kg/day
## Create table 'med_min_dose' where age per day (rounded to the nearest two decimal places) is
## calculated, and then used to determine the lower med limit for each med each day (with the exception)
## of med with MID0003, where weight is used instead of age)
med_min_dose <- data.frame(med_intake[,colnames(med_intake)!="MED_INTAKE"],AGE=NA,MIN_DOSE=NA)
temp_date <- med_dose$START_DATE[1]
for (i in 1:(as.integer(last_date-med_dose[1,1])+1)) {
duration <- interval(birthdate,temp_date)
med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="AGE"] <- round(duration/dyears(1),2)
for (j in med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="MED_ID"]) {
compare <- unique(med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="AGE"])
if (j == "MID0003") {
compare <- unique(med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="WEIGHT"])
}
for (k in 1:(length(ranking[ranking$MED_ID==j,colnames(ranking)=="MED_ID"]))) {
if ((compare >= ranking[ranking$MED_ID==j,][k,colnames(ranking)=="MED_LIMIT_LOW"]) && (compare < ranking[ranking$MED_ID==j,][k,colnames(ranking)=="MED_LIMIT_HIGH"])) {
med_min_dose[med_min_dose$DATE==temp_date & med_min_dose$MED_ID==j,colnames(med_min_dose)=="MIN_DOSE"] <- ranking[ranking$MED_ID==j,][k,colnames(ranking)=="MED_MIN_DOSE"]
break
}
}
}
temp_date <- temp_date+1
}
print("Calculating med load per day, please wait...")
med_min_dose[,colnames(med_min_dose)=="WEIGHT"] <- round(med_min_dose$WEIGHT,2)
## Use numerator and denominator to calculate the med load
## Calculate med load per med and and total med load per day
med_load <- data.frame(med_min_dose[,colnames(med_min_dose)!="MIN_DOSE"],med_intake[,colnames(med_intake)=="MED_INTAKE"],med_min_dose[,colnames(med_min_dose)=="MIN_DOSE"],MED_LOAD_PER_MED=NA,MED_LOAD_DAY=NA)
colnames(med_load)[c(6,7)] <- c("MED_INTAKE","MIN_DOSE")
med_load$MED_LOAD_PER_MED <- round(med_load$MED_INTAKE/med_load$MIN_DOSE,4)
med_load[,colnames(med_load)=="MED_INTAKE"] <- round(med_load$MED_INTAKE,4)
for (i in unique(med_load$DATE)) {
med_load[med_load$DATE==i,colnames(med_load)=="MED_LOAD_DAY"][1] <- sum(med_load[med_load$DATE==i,colnames(med_load)=="MED_LOAD_PER_MED"])
}
## Calculate total med number per day and display it in the first row of each observed date in the column MED_NUMBER_DAY
med.number.per.day <- rep(NA,dim(med_load)[1])
med_load <- data.frame(med_load,med.number.per.day)
colnames(med_load)[10] <- "MED_NUMBER_DAY"
for (i in unique(med_load[,1])) {
med_load[med_load$DATE==i,colnames(med_load)=="MED_NUMBER_DAY"][1] <- length(med_load[med_load$DATE==i & med_load[,colnames(med_load)=="MED_INTAKE"]!=0,colnames(med_load)=="MED_INTAKE"])
}
med_load <- data.frame(mrnumber,med_load)
colnames(med_load)[1] <- "MRNUMBER"
## Create day type column and determine the range of dates each row of the med load table falls into within the
## raw data table to determine which value to put in this row in column DAY_TYPE
med_load$DATE <- as.Date(med_load$DATE,format="%m/%d/%Y")
na <- rep(NA,dim(med_load)[1])
med_load <- data.frame(med_load[,1:2],na,med_load[,3:11])
colnames(med_load)[3] <- "DAY_TYPE"
for (i in 1:dim(med_load)[1]) {
med_load$DAY_TYPE[i] <- unique(data[data$DATE==max(unique(data$DATE[data$DATE<=med_load$DATE[i]])),colnames(data)=="DAY_TYPE"])
}
## Calculate daily med dose per milligram per kilogram, and then create the two versions of the med load
## table: a temp version for your viewing, and one that will be stored in the MySQL database
daily.dosage <- data.frame(DATE=as.Date(as.character()),DAILY_MED_DOSE_MG=integer(),DAILY_MED_DOSE_MG_KG=integer())
daily.dosage[1:dim(med_load)[1],c("DATE")] <- med_load$DATE
daily.dosage$DAILY_MED_DOSE_MG <- med_load$DOSAGE[med_load$DATE==daily.dosage$DATE]
daily.dosage$DAILY_MED_DOSE_MG_KG <- round(daily.dosage$DAILY_MED_DOSE_MG/(med_load$WEIGHT[med_load$DATE==daily.dosage$DATE]),4)
load <- data.frame(med_load[,1:3],med_load[,4],daily.dosage[,2:3],med_load[,9:10])
colnames(load) <- c("MRNUMBER","DATE","DAY_TYPE","MED_ID","DAILY_MED_DOSE_MG","DAILY_MED_DOSE_MG_KG","MED_MIN_DOSE","MED_LOAD_MED")
colnames(med_load) <- c("MRNUMBER","DATE","DAY_TYPE","MED_ID","DAILY_MED_DOSE_MG","WT","AGE","DAILY_MED_DOSE_MG_KG","MED_MIN_DOSE","MED_LOAD_MED","MED_LOAD_DAY","MED_NUMBER_DAY")
## If there are any days in the raw data table with day type = 3, add those rows to the med load table.
## These observations will be found in the MySQL database
if (dim(sub)[1] > 0) {
sub.med_load <- data.frame(sub[,colnames(sub) %in% c("MRNUMBER","DATE","DAY_TYPE")],
sub[,colnames(sub) %in% c("MED_ID","DAILY_MED_DOSE_MG")],
NA,NA,NA,NA,NA,NA,NA)
colnames(sub.med_load) <- colnames(med_load)
sub.med_load$AGE <- round((sub.med_load$DATE-birthdate)/dyears(1),2)
for (i in 1:dim(sub.med_load)[1]) {
if (sub.med_load$MED_ID[i] != "MID0003") {
sub.med_load$MED_MIN_DOSE[i] <- ranking[ranking$MED_ID==sub.med_load$MED_ID[i]
& sub.med_load$AGE[i] >= ranking$MED_LIMIT_LOW
& sub.med_load$AGE[i] <= ranking$MED_LIMIT_HIGH,
colnames(ranking)=="MED_MIN_DOSE"]
}
}
med_load <- rbind.data.frame(sub.med_load,med_load)
sub.load <- sub.med_load[,colnames(sub.med_load) %in% c("MRNUMBER","DATE","DAY_TYPE",
"MED_ID","DAILY_MED_DOSE_MG",
"DAILY_MED_DOSE_MG_KG","MED_MIN_DOSE",
"MED_LOAD_MED")]
colnames(sub.load) <- colnames(load)
load <- rbind.data.frame(sub.load,load)
}
comments <- rep(NA,dim(med_load)[1])
med_load <- cbind.data.frame(med_load,comments)
colnames(med_load)[dim(med_load)[2]] <- "COMMENTS"
for (i in unique(med_load$DATE)) {
if (length(data[data$DATE==i,c("COMMENTS")])>0) {
med_load[med_load$DATE==i,c("COMMENTS")] <- data[data$DATE==i,c("COMMENTS")]
}
}
observe_load <- FALSE
print("Would you like to save a temporary file to look at the med loads?")
print("Type 'YES' to save a file to look at, type 'NO' to move onto next step")
rl <- " "
while (tolower(rl)!="yes" && tolower(rl)!="no") {
rl <- readline(prompt="Enter here: ")
}
if (tolower(rl)=="yes") {
observe_load <- TRUE
}
if (observe_load == TRUE) {
print("Type 'yes' if you wish to save the MED_LOAD file in the same folder as this patient's MED_DATA_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to save this patient's MED_LOAD file in")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
print(paste("Saving med load table as",gsub(" ","",paste(patient,"_MED_LOAD.xlsx")),"in directory",getwd()))
xlsx <- "MED_LOAD.xlsx"
xlsx <- gsub(" ","",paste(patient,"_",xlsx))
xlsx::write.xlsx2(med_load,file=xlsx,showNA=FALSE,row.names=FALSE)
print("Type 'OKAY' whenever you are ready to move on to the next step")
print("Or type 'QUIT' if you would like to exit")
while (tolower(rl)!="okay" && tolower(rl)!="quit") {
rl <- readline(prompt="Enter here: ")
}
} else {
rl <- "okay"
}
if (tolower(rl)=="okay") {
calculate_med_response(patient,data,med_load,load,first,last)
}
}
| /R/calculate_med_load.R | no_license | jonathanelee1993/clinicalresponsepackage | R | false | false | 15,657 | r | #' Calculate med load
#'
#' This function allow you to start the process of med load/response calculations
#' @keywords med load
#' @export
#' @importFrom lubridate interval
#' @importFrom lubridate dyears
#' @import RMySQL
#' @import ggplot2
#' @import openxlsx
#' @examples
#' calculate_seizure_load()
# Purpose - This script will take raw med data from the FILA_MED_DATA_SOURCE file, ranking data from
# the MED_RANKING_SOURCE file, anthropometrics data from the FILA_ANTHROPOMETRICS_CLINICAL
# file, and demographics data from the DEMOGRAPHICS_SOURCE file and calculate daily seizure loads
# based on the ranking values and anthropometric/demographics data given for that patient
# The script will give you as the user the option to specify the identifier signifying
# the patient you wish to run this script for, and the directory where the two needed xlsx files
# can be found
# When the script finishes calculating daily seizure loads, you will be asked if you wish to save
# a FILA_MED_LOAD file into the work directory for your viewing
calculate_med_load <- function() {
## Read in all relevant data from xlsx files
print("Input the identifier that signify the patient we are doing calculations for")
print("Example: FILA")
patient <- readline(prompt="Enter here: ")
print("Input the directory that you wish to draw this patient's DEMOGRAPHICS_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
demo <- "DEMOGRAPHICS_SOURCE.xlsx"
demo <- read.xlsx(demo,sheet=1,detectDates=TRUE)
print("Type 'yes' if the MED_RANKING_SOURCE file can be found in the same folder as the DEMOGRAPHICS_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to draw the MED_RANKING_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
ranking <- "MED_RANKING_SOURCE.xlsx"
ranking <- read.xlsx(ranking,sheet=1,detectDates=TRUE)
print("Type 'yes' if this patient's ANTHROPOMETRICS_SOURCE file can be found in the same folder as the MED_RANKING_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to draw this patient's ANTHROPOMETRICS_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
anthro <- "ANTHROPOMETRICS_SOURCE.xlsx"
anthro <- gsub(" ","",paste(patient,"_",anthro))
anthro <- read.xlsx(anthro,sheet=1,detectDates=TRUE)
print("Type 'yes' if this patient's MED_DATA_SOURCE file can be found in the same folder as this patient's ANTHROPOMETRICS_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to draw this patient's MED_DATA_SOURCE file from")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
data <- "MED_DATA_SOURCE.xlsx"
data <- gsub(" ","",paste(patient,"_",data))
data <- read.xlsx(data,sheet=1,detectDates=TRUE)
## Remove all empty rows from each data frame
data <- data[!is.na(data$MRNUMBER),]
anthro <- anthro[!is.na(anthro$MRNUMBER),]
demo <- demo[!is.na(demo$LAST),]
ranking <- ranking[!is.na(ranking$MED_GENERIC_NAME),]
print("Calculating med intake in mg/kg/day, please wait...")
## Save patient's medical record number and birthdate as R objects
mrnumber <- unique(anthro$MRNUMBER)
birthdate <- demo$DOB[demo$MRNUMBER==mrnumber]
birthdate <- as.Date(birthdate,format="%m/%d/%Y")
first <- unique(demo[demo$MRNUMBER==mrnumber,colnames(demo)=="FIRST"])
last <- unique(demo[demo$MRNUMBER==mrnumber,colnames(demo)=="LAST"])
## Ensure that date is formatted properly (removing time stamp if necessary)
for (i in 1:length(data$DATE)) {
if (nchar(as.character(data$DATE[i]))>10) {
data$DATE <- substr(data$DATE,1,nchar(as.character(data$DATE))-5)
}
}
## Split raw data table into two parts: one where day type = 3, and one where day type != 3
data$DATE <- as.Date(data$DATE,format="%m/%d/%Y")
sub <- data[data$DAY_TYPE==3 & data$DATE <= data[data$DAY_TYPE!=3,colnames(data)=="DATE"][1],]
data <- data[!(data$DATE %in% sub$DATE),]
## Save relevant columns of data table as object 'med_dose'
med_dose <- data[,colnames(data)=="DATE" | colnames(data)=="MED_ID" | colnames(data)=="DAILY_MED_DOSE_MG"]
## Obtain interpolated weights for the anthropometrics source table
anthro <- weight_interp(anthro)
anthro$DATE <- as.Date(anthro$DATE,format="%m/%d/%Y")
last_date <- as.Date(max(max(unique(anthro[,colnames(anthro)=="DATE"])),max(unique(med_dose[,1]))))
## Create table with start and end dates for each period of specific med usage and dosage
END_DATE <- data.frame(rep(unique(med_dose$DATE)[2]-1,length(med_dose[med_dose$DATE==unique(med_dose$DATE)[2-1],2])))
colnames(END_DATE) <- "END_DATE"
if (length(unique(med_dose$DATE))>=3) {
for (i in 3:(length(unique(med_dose$DATE)))) {
end_date <- data.frame(rep(unique(med_dose$DATE)[i]-1,length(med_dose[med_dose$DATE==unique(med_dose$DATE)[i-1],1])))
colnames(end_date) <- "END_DATE"
END_DATE <- data.frame(rbind(END_DATE,end_date))
colnames(END_DATE) <- "END_DATE"
}
}
na <- cbind(rep(NA,length(med_dose[med_dose$DATE==unique(med_dose$DATE)[length(unique(med_dose$DATE))],colnames(med_dose)=="DATE"])))
colnames(na) <- "END_DATE"
END_DATE <- data.frame(rbind(END_DATE,na))
med_dose <- cbind(med_dose$DATE,END_DATE,med_dose[,colnames(med_dose)!="DATE"])
colnames(med_dose)[1] <- "START_DATE"
## Create table with start and end dates for each period of specific weight
anthro <- data.frame(anthro,birthdate)
colnames(anthro)[dim(anthro)[2]] <- "BIRTHDATE"
## Create table 'med_intake' in which med intake per med per day will be calculated based on weight and dosage
med_intake <- data.frame(start.date=as.Date(character()),med.id=character(),
dosage=integer(),weight=integer(),med.intake=integer())
temp_date <- med_dose$START_DATE[1]
for (i in 1:((as.integer(last_date-med_dose$START_DATE[1]))+1)) {
compare <- max(unique(med_dose$START_DATE)[unique(med_dose$START_DATE)<=temp_date])
temp_med_intake <- data.frame(temp_date,med_dose[med_dose$START_DATE==compare,colnames(med_dose)=="MED_ID" | colnames(med_dose)=="DAILY_MED_DOSE_MG"],NA,NA)
colnames(temp_med_intake) <- c("DATE","MED_ID","DOSAGE","WEIGHT","MED_INTAKE")
med_intake <- data.frame(rbind(med_intake,temp_med_intake))
if (temp_date %in% anthro$DATE) {
weight <- anthro[anthro$DATE==temp_date,colnames(anthro)=="WT_DAY"]
} else {
weight <- anthro[anthro$DATE==max(anthro$DATE),colnames(anthro)=="WT_DAY"]
}
med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="WEIGHT"] <- weight
med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="MED_INTAKE"] <- med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="DOSAGE"]/med_intake[med_intake$DATE==temp_date,colnames(med_intake)=="WEIGHT"]
temp_date <- med_dose$START_DATE[1]+i
}
print("Calculating minimum dose in mg/kg/day, please wait...")
## Denominator: Minimum dose in mg/kg/day
## Create table 'med_min_dose' where age per day (rounded to the nearest two decimal places) is
## calculated, and then used to determine the lower med limit for each med each day (with the exception)
## of med with MID0003, where weight is used instead of age)
med_min_dose <- data.frame(med_intake[,colnames(med_intake)!="MED_INTAKE"],AGE=NA,MIN_DOSE=NA)
temp_date <- med_dose$START_DATE[1]
for (i in 1:(as.integer(last_date-med_dose[1,1])+1)) {
duration <- interval(birthdate,temp_date)
med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="AGE"] <- round(duration/dyears(1),2)
for (j in med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="MED_ID"]) {
compare <- unique(med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="AGE"])
if (j == "MID0003") {
compare <- unique(med_min_dose[med_min_dose$DATE==temp_date,colnames(med_min_dose)=="WEIGHT"])
}
for (k in 1:(length(ranking[ranking$MED_ID==j,colnames(ranking)=="MED_ID"]))) {
if ((compare >= ranking[ranking$MED_ID==j,][k,colnames(ranking)=="MED_LIMIT_LOW"]) && (compare < ranking[ranking$MED_ID==j,][k,colnames(ranking)=="MED_LIMIT_HIGH"])) {
med_min_dose[med_min_dose$DATE==temp_date & med_min_dose$MED_ID==j,colnames(med_min_dose)=="MIN_DOSE"] <- ranking[ranking$MED_ID==j,][k,colnames(ranking)=="MED_MIN_DOSE"]
break
}
}
}
temp_date <- temp_date+1
}
print("Calculating med load per day, please wait...")
med_min_dose[,colnames(med_min_dose)=="WEIGHT"] <- round(med_min_dose$WEIGHT,2)
## Use numerator and denominator to calculate the med load
## Calculate med load per med and and total med load per day
med_load <- data.frame(med_min_dose[,colnames(med_min_dose)!="MIN_DOSE"],med_intake[,colnames(med_intake)=="MED_INTAKE"],med_min_dose[,colnames(med_min_dose)=="MIN_DOSE"],MED_LOAD_PER_MED=NA,MED_LOAD_DAY=NA)
colnames(med_load)[c(6,7)] <- c("MED_INTAKE","MIN_DOSE")
med_load$MED_LOAD_PER_MED <- round(med_load$MED_INTAKE/med_load$MIN_DOSE,4)
med_load[,colnames(med_load)=="MED_INTAKE"] <- round(med_load$MED_INTAKE,4)
for (i in unique(med_load$DATE)) {
med_load[med_load$DATE==i,colnames(med_load)=="MED_LOAD_DAY"][1] <- sum(med_load[med_load$DATE==i,colnames(med_load)=="MED_LOAD_PER_MED"])
}
## Calculate total med number per day and display it in the first row of each observed date in the column MED_NUMBER_DAY
med.number.per.day <- rep(NA,dim(med_load)[1])
med_load <- data.frame(med_load,med.number.per.day)
colnames(med_load)[10] <- "MED_NUMBER_DAY"
for (i in unique(med_load[,1])) {
med_load[med_load$DATE==i,colnames(med_load)=="MED_NUMBER_DAY"][1] <- length(med_load[med_load$DATE==i & med_load[,colnames(med_load)=="MED_INTAKE"]!=0,colnames(med_load)=="MED_INTAKE"])
}
med_load <- data.frame(mrnumber,med_load)
colnames(med_load)[1] <- "MRNUMBER"
## Create day type column and determine the range of dates each row of the med load table falls into within the
## raw data table to determine which value to put in this row in column DAY_TYPE
med_load$DATE <- as.Date(med_load$DATE,format="%m/%d/%Y")
na <- rep(NA,dim(med_load)[1])
med_load <- data.frame(med_load[,1:2],na,med_load[,3:11])
colnames(med_load)[3] <- "DAY_TYPE"
for (i in 1:dim(med_load)[1]) {
med_load$DAY_TYPE[i] <- unique(data[data$DATE==max(unique(data$DATE[data$DATE<=med_load$DATE[i]])),colnames(data)=="DAY_TYPE"])
}
## Calculate daily med dose per milligram per kilogram, and then create the two versions of the med load
## table: a temp version for your viewing, and one that will be stored in the MySQL database
daily.dosage <- data.frame(DATE=as.Date(as.character()),DAILY_MED_DOSE_MG=integer(),DAILY_MED_DOSE_MG_KG=integer())
daily.dosage[1:dim(med_load)[1],c("DATE")] <- med_load$DATE
daily.dosage$DAILY_MED_DOSE_MG <- med_load$DOSAGE[med_load$DATE==daily.dosage$DATE]
daily.dosage$DAILY_MED_DOSE_MG_KG <- round(daily.dosage$DAILY_MED_DOSE_MG/(med_load$WEIGHT[med_load$DATE==daily.dosage$DATE]),4)
load <- data.frame(med_load[,1:3],med_load[,4],daily.dosage[,2:3],med_load[,9:10])
colnames(load) <- c("MRNUMBER","DATE","DAY_TYPE","MED_ID","DAILY_MED_DOSE_MG","DAILY_MED_DOSE_MG_KG","MED_MIN_DOSE","MED_LOAD_MED")
colnames(med_load) <- c("MRNUMBER","DATE","DAY_TYPE","MED_ID","DAILY_MED_DOSE_MG","WT","AGE","DAILY_MED_DOSE_MG_KG","MED_MIN_DOSE","MED_LOAD_MED","MED_LOAD_DAY","MED_NUMBER_DAY")
## If there are any days in the raw data table with day type = 3, add those rows to the med load table.
## These observations will be found in the MySQL database
if (dim(sub)[1] > 0) {
sub.med_load <- data.frame(sub[,colnames(sub) %in% c("MRNUMBER","DATE","DAY_TYPE")],
sub[,colnames(sub) %in% c("MED_ID","DAILY_MED_DOSE_MG")],
NA,NA,NA,NA,NA,NA,NA)
colnames(sub.med_load) <- colnames(med_load)
sub.med_load$AGE <- round((sub.med_load$DATE-birthdate)/dyears(1),2)
for (i in 1:dim(sub.med_load)[1]) {
if (sub.med_load$MED_ID[i] != "MID0003") {
sub.med_load$MED_MIN_DOSE[i] <- ranking[ranking$MED_ID==sub.med_load$MED_ID[i]
& sub.med_load$AGE[i] >= ranking$MED_LIMIT_LOW
& sub.med_load$AGE[i] <= ranking$MED_LIMIT_HIGH,
colnames(ranking)=="MED_MIN_DOSE"]
}
}
med_load <- rbind.data.frame(sub.med_load,med_load)
sub.load <- sub.med_load[,colnames(sub.med_load) %in% c("MRNUMBER","DATE","DAY_TYPE",
"MED_ID","DAILY_MED_DOSE_MG",
"DAILY_MED_DOSE_MG_KG","MED_MIN_DOSE",
"MED_LOAD_MED")]
colnames(sub.load) <- colnames(load)
load <- rbind.data.frame(sub.load,load)
}
comments <- rep(NA,dim(med_load)[1])
med_load <- cbind.data.frame(med_load,comments)
colnames(med_load)[dim(med_load)[2]] <- "COMMENTS"
for (i in unique(med_load$DATE)) {
if (length(data[data$DATE==i,c("COMMENTS")])>0) {
med_load[med_load$DATE==i,c("COMMENTS")] <- data[data$DATE==i,c("COMMENTS")]
}
}
observe_load <- FALSE
print("Would you like to save a temporary file to look at the med loads?")
print("Type 'YES' to save a file to look at, type 'NO' to move onto next step")
rl <- " "
while (tolower(rl)!="yes" && tolower(rl)!="no") {
rl <- readline(prompt="Enter here: ")
}
if (tolower(rl)=="yes") {
observe_load <- TRUE
}
if (observe_load == TRUE) {
print("Type 'yes' if you wish to save the MED_LOAD file in the same folder as this patient's MED_DATA_SOURCE table. Type 'no' if it is in a different folder")
answer <- ""
while(tolower(answer)!="yes" & tolower(answer)!="no") {
answer <- readline(prompt="Enter here: ")
}
if (answer=="no") {
print("Input the directory that you wish to save this patient's MED_LOAD file in")
print("Example: C:/Folder_Name/")
directory <- readline(prompt="Enter here: ")
setwd(directory)
}
print(paste("Saving med load table as",gsub(" ","",paste(patient,"_MED_LOAD.xlsx")),"in directory",getwd()))
xlsx <- "MED_LOAD.xlsx"
xlsx <- gsub(" ","",paste(patient,"_",xlsx))
xlsx::write.xlsx2(med_load,file=xlsx,showNA=FALSE,row.names=FALSE)
print("Type 'OKAY' whenever you are ready to move on to the next step")
print("Or type 'QUIT' if you would like to exit")
while (tolower(rl)!="okay" && tolower(rl)!="quit") {
rl <- readline(prompt="Enter here: ")
}
} else {
rl <- "okay"
}
if (tolower(rl)=="okay") {
calculate_med_response(patient,data,med_load,load,first,last)
}
}
|
lineCounts <- read.csv("lineCounts.csv")
vocabCounts <- read.csv("vocabCounts.csv")
commentCounts <- read.csv("commentCounts.csv")
library(ggplot2)
ggplot(counts,aes(x=Lines,y=Comments)) +
geom_point(aes(color=Vocabulary)) +
geom_text(data=counts[counts$Comments>100,],aes(label=File)) | /dataFiles/visualisationScript.R | no_license | zabdulz/jfreechart | R | false | false | 292 | r | lineCounts <- read.csv("lineCounts.csv")
vocabCounts <- read.csv("vocabCounts.csv")
commentCounts <- read.csv("commentCounts.csv")
library(ggplot2)
ggplot(counts,aes(x=Lines,y=Comments)) +
geom_point(aes(color=Vocabulary)) +
geom_text(data=counts[counts$Comments>100,],aes(label=File)) |
testthat::context("package loads")
testthat::describe('package loads',{
it('an expectation',{
testthat::expect_true({1L==1L})
})
}) | /tests/testthat/test-loads.R | permissive | yonicd/reactor | R | false | false | 146 | r | testthat::context("package loads")
testthat::describe('package loads',{
it('an expectation',{
testthat::expect_true({1L==1L})
})
}) |
# function(m) sum(dpois(data, m, log = TRUE))
lambda <- 5.2
log_likelihood <- vector(mode = "integer")
for (i in 1:length(data)) {
a <- data[i]*log(lambda)
b <- lambda
c <- vector(mode = "numeric")
for (k in 1:data[i]) {
c[k] <- log(k)
}
log_likelihood[i] <- (a - b - sum(c))
}
sum(log_likelihood)
| /log_likelihood.R | no_license | yukinagae/midoribon | R | false | false | 317 | r | # function(m) sum(dpois(data, m, log = TRUE))
lambda <- 5.2
log_likelihood <- vector(mode = "integer")
for (i in 1:length(data)) {
a <- data[i]*log(lambda)
b <- lambda
c <- vector(mode = "numeric")
for (k in 1:data[i]) {
c[k] <- log(k)
}
log_likelihood[i] <- (a - b - sum(c))
}
sum(log_likelihood)
|
c_f <- 20
c_0 <- 4
t0 <- 0
Tc <- 5
t <- 0:10
c = c_f + (c_0-c_f)*exp((t0-t)/Tc)
plot(t, c)
| /saturate.R | no_license | dushoff/scratch | R | false | false | 94 | r | c_f <- 20
c_0 <- 4
t0 <- 0
Tc <- 5
t <- 0:10
c = c_f + (c_0-c_f)*exp((t0-t)/Tc)
plot(t, c)
|
makeTeamMain <- function(mlb.raw, mlb.vc, mlb.team, team) {
i <- row.names(mlb.team)==team
tv <- mlb.team
tv$WC.Off <- tv$VC.Off * mlb.par@model["R"]
tv$WC.Def <- tv$VC.Def * mlb.par@model["R"]
tv$WC.DefPitch <- tv$VC.DefPitch * mlb.par@model["R"]
tv$WC.DefField <- tv$VC.DefField * mlb.par@model["R"]
tr <- apply(-tv,2,rank)
tr[,c("RA","ERA","WHIP")] <- apply(tv[,c("RA","ERA","WHIP")],2,rank)
tr <- round(tr)
tv[,c("W.pyth","W.WC")] <- round(tv[,c("W.pyth","W.WC")])
tv[,c("ERA","WHIP")] <- round(tv[,c("ERA","WHIP")],digits=2)
tv[,c("OBP","SLG","OPS")] <- round(tv[,c("OBP","SLG","OPS")],digits=3)
tv[,grep("VC",colnames(tv))] <- round(tv[,grep("VC",colnames(tv))],digits=1)
tv[,grep("WC",colnames(tv))] <- round(tv[,grep("WC",colnames(tv))],digits=1)
cat1 <- c("W","W.pyth","W.WC","RS","RA","WC.Off","WC.Def","VC.Off","VC.Def")
X1 <- cbind(as.character(tv[i,cat1]), as.character(tr[i,cat1]))
rownames(X1) <- cat1
cat2 <- c("OBP","SLG","OPS","VC.1B","VC.2B","VC.3B","VC.HR","VC.H","VC.BB","VC.SB")
X2 <- cbind(as.numeric(tv[i,cat2]), as.character(tr[i,cat2]))
rownames(X2) <- cat2
cat3 <- c("ERA","WHIP","VC.SOA","VC.BBA","VC.HRA","VC.BIP","VC.BABIP","VC.DefPitch","WC.DefPitch")
X3 <- cbind(as.numeric(tv[i,cat3]), as.character(tr[i,cat3]))
rownames(X3) <- cat3
cat4 <- c("VC.Arm","VC.DP","VC.Rng","VC.Err","VC.SBA","VC.DefField","WC.DefField")
X4 <- cbind(as.numeric(tv[i,cat4]), as.character(tr[i,cat4]))
rownames(X4) <- cat4
colnames(X1) <- colnames(X2) <- colnames(X3) <- colnames(X4) <- c("Value","Rank")
aln <- c("r","r")
display1 <- knitr::kable(X1, format='html', align=aln, table.attr='class="sortable ctable"')
display2 <- knitr::kable(X2, format='html', align=aln, table.attr='class="sortable ctable"')
display3 <- knitr::kable(X3, format='html', align=aln, table.attr='class="sortable ctable"')
display4 <- knitr::kable(X4, format='html', align=aln, table.attr='class="sortable ctable"')
Y <- combineTeam(mlb.vc, team)
dig <- c(0, rep(1,3))
ind <- sort(Y[,"WC"],ind=T,dec=T)$ix
aln <- rep('r', ncol(Y))
display5 <- knitr::kable(Y[ind,], digits=dig, format='html', align=aln, table.attr='class="sortable ctable"')
f <- paste(mlb.par@loc, "/", mlb.par@year, "/", team, ".html", sep="")
cat(paste0("---\nyear: ", mlb.par@year, "\nrel: ../\n---\n"), file=f)
cat("<a href=", team,"_batting.html> Batting </a> <br><br>\n",sep="", file=f, append=TRUE)
cat("<a href=", team,"_pitching.html> Pitching </a> <br><br>\n",sep="", file=f, append=TRUE)
cat("<a href=", team,"_fielding.html> Fielding </a> <br><br>\n",sep="", file=f, append=TRUE)
cat("<TABLE class=\"container\">\n<TR><TD>", file=f, append=TRUE)
cat(display1, file=f, append=TRUE)
cat("</TD>\n<TD>", file=f, append=TRUE)
cat(display2, file=f, append=TRUE)
cat("</TD>\n<TD>", file=f, append=TRUE)
cat(display3, file=f, append=TRUE)
cat("</TD>\n<TD>", file=f, append=TRUE)
cat(display4, file=f, append=TRUE)
cat("</TD></TR></TABLE>\n", file=f, append=TRUE)
cat(display5, file=f, append=TRUE)
}
| /functions/html/makeTeamMain.R | no_license | sportModel/mlb | R | false | false | 3,082 | r | makeTeamMain <- function(mlb.raw, mlb.vc, mlb.team, team) {
i <- row.names(mlb.team)==team
tv <- mlb.team
tv$WC.Off <- tv$VC.Off * mlb.par@model["R"]
tv$WC.Def <- tv$VC.Def * mlb.par@model["R"]
tv$WC.DefPitch <- tv$VC.DefPitch * mlb.par@model["R"]
tv$WC.DefField <- tv$VC.DefField * mlb.par@model["R"]
tr <- apply(-tv,2,rank)
tr[,c("RA","ERA","WHIP")] <- apply(tv[,c("RA","ERA","WHIP")],2,rank)
tr <- round(tr)
tv[,c("W.pyth","W.WC")] <- round(tv[,c("W.pyth","W.WC")])
tv[,c("ERA","WHIP")] <- round(tv[,c("ERA","WHIP")],digits=2)
tv[,c("OBP","SLG","OPS")] <- round(tv[,c("OBP","SLG","OPS")],digits=3)
tv[,grep("VC",colnames(tv))] <- round(tv[,grep("VC",colnames(tv))],digits=1)
tv[,grep("WC",colnames(tv))] <- round(tv[,grep("WC",colnames(tv))],digits=1)
cat1 <- c("W","W.pyth","W.WC","RS","RA","WC.Off","WC.Def","VC.Off","VC.Def")
X1 <- cbind(as.character(tv[i,cat1]), as.character(tr[i,cat1]))
rownames(X1) <- cat1
cat2 <- c("OBP","SLG","OPS","VC.1B","VC.2B","VC.3B","VC.HR","VC.H","VC.BB","VC.SB")
X2 <- cbind(as.numeric(tv[i,cat2]), as.character(tr[i,cat2]))
rownames(X2) <- cat2
cat3 <- c("ERA","WHIP","VC.SOA","VC.BBA","VC.HRA","VC.BIP","VC.BABIP","VC.DefPitch","WC.DefPitch")
X3 <- cbind(as.numeric(tv[i,cat3]), as.character(tr[i,cat3]))
rownames(X3) <- cat3
cat4 <- c("VC.Arm","VC.DP","VC.Rng","VC.Err","VC.SBA","VC.DefField","WC.DefField")
X4 <- cbind(as.numeric(tv[i,cat4]), as.character(tr[i,cat4]))
rownames(X4) <- cat4
colnames(X1) <- colnames(X2) <- colnames(X3) <- colnames(X4) <- c("Value","Rank")
aln <- c("r","r")
display1 <- knitr::kable(X1, format='html', align=aln, table.attr='class="sortable ctable"')
display2 <- knitr::kable(X2, format='html', align=aln, table.attr='class="sortable ctable"')
display3 <- knitr::kable(X3, format='html', align=aln, table.attr='class="sortable ctable"')
display4 <- knitr::kable(X4, format='html', align=aln, table.attr='class="sortable ctable"')
Y <- combineTeam(mlb.vc, team)
dig <- c(0, rep(1,3))
ind <- sort(Y[,"WC"],ind=T,dec=T)$ix
aln <- rep('r', ncol(Y))
display5 <- knitr::kable(Y[ind,], digits=dig, format='html', align=aln, table.attr='class="sortable ctable"')
f <- paste(mlb.par@loc, "/", mlb.par@year, "/", team, ".html", sep="")
cat(paste0("---\nyear: ", mlb.par@year, "\nrel: ../\n---\n"), file=f)
cat("<a href=", team,"_batting.html> Batting </a> <br><br>\n",sep="", file=f, append=TRUE)
cat("<a href=", team,"_pitching.html> Pitching </a> <br><br>\n",sep="", file=f, append=TRUE)
cat("<a href=", team,"_fielding.html> Fielding </a> <br><br>\n",sep="", file=f, append=TRUE)
cat("<TABLE class=\"container\">\n<TR><TD>", file=f, append=TRUE)
cat(display1, file=f, append=TRUE)
cat("</TD>\n<TD>", file=f, append=TRUE)
cat(display2, file=f, append=TRUE)
cat("</TD>\n<TD>", file=f, append=TRUE)
cat(display3, file=f, append=TRUE)
cat("</TD>\n<TD>", file=f, append=TRUE)
cat(display4, file=f, append=TRUE)
cat("</TD></TR></TABLE>\n", file=f, append=TRUE)
cat(display5, file=f, append=TRUE)
}
|
library(jsonlite)
get_pbp_data <- function(game_id, date, lowercase=TRUE){
date <- get_nba_date_id(date)
data <- fromJSON(paste0('http://data.nba.net/json/cms/noseason/game/',
date, '/', game_id, '/pbp_all.json'))
data <- data$sports_content$game$play
if (lowercase)
data <- data %>%
mutate_if(is.character, tolower)
return (data.frame(data))
}
get_games <- function(date, statuses=c(2, 3)){
date <- get_nba_date_id(date)
data <- fromJSON(paste0('http://data.nba.net/10s/prod/v1/', date, '/scoreboard.json'))$games %>%
filter(statusNum %in% statuses) %>% #game is in progress or finished
select(gameId, vTeam, hTeam) %>%
mutate(hTeam=hTeam$triCode,
vTeam=vTeam$triCode)
return (data)
}
get_nba_date_id <- function(date){
format(as.Date(date), format='%Y%m%d')
}
get_team_logo <- function(team_abr){
.check_api_then_save_local(
paste0('input_img/team_logos/', tolower(team_abr), '.png'),
paste0('https://a.espncdn.com/i/teamlogos/nba/500/',
tolower(team_abr), '.png'))
}
get_player_headshot <- function(player_id){
.check_api_then_save_local(
paste0('input_img/player_headshots/', player_id, '.png'),
paste0('https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/',
player_id, '.png'))
}
get_player_action_shot <- function(player_id){
.check_api_then_save_local(
paste0('input_img/player_action_shots/', player_id, '.png'),
paste0('https://ak-static.cms.nba.com/wp-content/uploads/silos/nba/latest/440x700/',
player_id, '.png'))
}
.check_api_then_save_local <- function(fp, url){
if (!file.exists(fp)){
img <- png::readPNG(RCurl::getURLContent(url))
writePNG(img, fp)
} else
img <- png::readPNG(fp)
return (img)
} | /nba_api.R | no_license | ethan9carpenter/NBA-Game-Flow | R | false | false | 1,836 | r | library(jsonlite)
get_pbp_data <- function(game_id, date, lowercase=TRUE){
date <- get_nba_date_id(date)
data <- fromJSON(paste0('http://data.nba.net/json/cms/noseason/game/',
date, '/', game_id, '/pbp_all.json'))
data <- data$sports_content$game$play
if (lowercase)
data <- data %>%
mutate_if(is.character, tolower)
return (data.frame(data))
}
get_games <- function(date, statuses=c(2, 3)){
date <- get_nba_date_id(date)
data <- fromJSON(paste0('http://data.nba.net/10s/prod/v1/', date, '/scoreboard.json'))$games %>%
filter(statusNum %in% statuses) %>% #game is in progress or finished
select(gameId, vTeam, hTeam) %>%
mutate(hTeam=hTeam$triCode,
vTeam=vTeam$triCode)
return (data)
}
get_nba_date_id <- function(date){
format(as.Date(date), format='%Y%m%d')
}
get_team_logo <- function(team_abr){
.check_api_then_save_local(
paste0('input_img/team_logos/', tolower(team_abr), '.png'),
paste0('https://a.espncdn.com/i/teamlogos/nba/500/',
tolower(team_abr), '.png'))
}
get_player_headshot <- function(player_id){
.check_api_then_save_local(
paste0('input_img/player_headshots/', player_id, '.png'),
paste0('https://ak-static.cms.nba.com/wp-content/uploads/headshots/nba/latest/260x190/',
player_id, '.png'))
}
get_player_action_shot <- function(player_id){
.check_api_then_save_local(
paste0('input_img/player_action_shots/', player_id, '.png'),
paste0('https://ak-static.cms.nba.com/wp-content/uploads/silos/nba/latest/440x700/',
player_id, '.png'))
}
.check_api_then_save_local <- function(fp, url){
if (!file.exists(fp)){
img <- png::readPNG(RCurl::getURLContent(url))
writePNG(img, fp)
} else
img <- png::readPNG(fp)
return (img)
} |
#' Check to make sure all character are valid UTF8 format
#'
#' Checks entire human readable file for any invalid UTF8 format
#'
#' @param filename Character vector. Path to file name
#'
#' @return Null
#' The code just exits with an error and prints the first offending line
#'
check_valid_UTF8 <- function(filename) {
fileContent <- readLines(filename)
# look for <h tags format with hashes
for (aline in fileContent) {
validLine <- validUTF8(aline)
if(!validLine) {
print(aline)
stop()
}
}
} | /R/check_valid_UTF8.R | no_license | andybeet/ESR | R | false | false | 540 | r | #' Check to make sure all character are valid UTF8 format
#'
#' Checks entire human readable file for any invalid UTF8 format
#'
#' @param filename Character vector. Path to file name
#'
#' @return Null
#' The code just exits with an error and prints the first offending line
#'
check_valid_UTF8 <- function(filename) {
fileContent <- readLines(filename)
# look for <h tags format with hashes
for (aline in fileContent) {
validLine <- validUTF8(aline)
if(!validLine) {
print(aline)
stop()
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.