content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#Jennifer Jain
#Exploritory Data Analysis- Project 1
#January 2016
###############################Loading Data#####################################
# Set working directory
setwd("/Users/JAIN/Desktop/Exploratory_Data_Analysis")
# Download Electric Power Consumption file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="household_power_consumption.zip", method="curl")
# Unzip and read .txt file
unzip("household_power_consumption.zip","household_power_consumption.txt")
power_consumption <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors=FALSE, na.strings= "?", strip.white =TRUE)
# Subset data for dates specified in the assignment's instructions
filtered_power_consumption <- subset(power_consumption, Date== "1/2/2007" | Date== "2/2/2007")
# Create Date/Time and Weekday variable
filtered_power_consumption$Date <- as.Date(filtered_power_consumption$Date, format="%d/%m/%Y")
filtered_power_consumption$Date_Time <- as.POSIXct(paste(filtered_power_consumption$Date, filtered_power_consumption$Time), format = "%Y-%m-%d %H:%M:%S")
timeline <-c(min(filtered_power_consumption$Date_Time), max(filtered_power_consumption$Date_Time))
#filtered_power_consumption$Date_Time =paste(filtered_power_consumption$Date, filtered_power_consumption$Time)
#filtered_power_consumption$Date_Time <-strptime(filtered_power_consumption$Date_Time,"%d/%m/%Y %H:%M:%S")
#attach(filtered_power_consumption)
filtered_power_consumption$Weekday <- as.POSIXlt(filtered_power_consumption$Date_Time)$wday
filtered_power_consumption$Weekday <-c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")[as.POSIXlt(filtered_power_consumption$Date_Time)$wday +1]
###############################Generate Plot 3##################################
#png(file= "plot3.png", width =480, height =480)
plot(filtered_power_consumption$Date_Time, as.numeric(as.character(filtered_power_consumption$Sub_metering_1)), type="l", xlab="", ylab="Energy sub metering", xlim = timeline)
lines( filtered_power_consumption$Date_Time,filtered_power_consumption$Sub_metering_2, col="red")
lines( filtered_power_consumption$Date_Time,filtered_power_consumption$Sub_metering_3, col="blue")
legend("topright", lty = c(1,1,1),
col = c("black", "red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#dev.off()
| /Plot3.R | no_license | JennJain86/ExData_Plotting1 | R | false | false | 2,456 | r | #Jennifer Jain
#Exploritory Data Analysis- Project 1
#January 2016
###############################Loading Data#####################################
# Set working directory
setwd("/Users/JAIN/Desktop/Exploratory_Data_Analysis")
# Download Electric Power Consumption file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile="household_power_consumption.zip", method="curl")
# Unzip and read .txt file
unzip("household_power_consumption.zip","household_power_consumption.txt")
power_consumption <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors=FALSE, na.strings= "?", strip.white =TRUE)
# Subset data for dates specified in the assignment's instructions
filtered_power_consumption <- subset(power_consumption, Date== "1/2/2007" | Date== "2/2/2007")
# Create Date/Time and Weekday variable
filtered_power_consumption$Date <- as.Date(filtered_power_consumption$Date, format="%d/%m/%Y")
filtered_power_consumption$Date_Time <- as.POSIXct(paste(filtered_power_consumption$Date, filtered_power_consumption$Time), format = "%Y-%m-%d %H:%M:%S")
timeline <-c(min(filtered_power_consumption$Date_Time), max(filtered_power_consumption$Date_Time))
#filtered_power_consumption$Date_Time =paste(filtered_power_consumption$Date, filtered_power_consumption$Time)
#filtered_power_consumption$Date_Time <-strptime(filtered_power_consumption$Date_Time,"%d/%m/%Y %H:%M:%S")
#attach(filtered_power_consumption)
filtered_power_consumption$Weekday <- as.POSIXlt(filtered_power_consumption$Date_Time)$wday
filtered_power_consumption$Weekday <-c("Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday")[as.POSIXlt(filtered_power_consumption$Date_Time)$wday +1]
###############################Generate Plot 3##################################
#png(file= "plot3.png", width =480, height =480)
plot(filtered_power_consumption$Date_Time, as.numeric(as.character(filtered_power_consumption$Sub_metering_1)), type="l", xlab="", ylab="Energy sub metering", xlim = timeline)
lines( filtered_power_consumption$Date_Time,filtered_power_consumption$Sub_metering_2, col="red")
lines( filtered_power_consumption$Date_Time,filtered_power_consumption$Sub_metering_3, col="blue")
legend("topright", lty = c(1,1,1),
col = c("black", "red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
#dev.off()
|
#' @title Dictionary Handling
#' @description \code{loadFields()} loads dictionaries that are available on the web as plain text files.
#' @param fieldnames A list of names for the dictionaries. It is expected that files with that name can be found below the URL.
#' @param baseurl The base path delivering the dictionaries. Should end in a /, field names will be appended and fed into read.csv().
#' @param fileSuffix The suffix for the dictionary files
#' @param directory The last component of the base url.
#' Useful to retrieve enriched word fields from metadata repo.
#' @param fileSep The file separator used to construct the URL
#' Can be overwritten to load local dictionaries.
#' @importFrom utils read.csv
#' @section File Format:
#' Dictionary files should contain one word per line, with no comments or any other meta information.
#' The entry name for the dictionary is given as the file name. It's therefore best if it does not contain
#' special characters. The dictionary must be in UTF-8 encoding, and the file needs to end on .txt.
#' @rdname dictionaryHandling
#' @export
loadFields <- function(fieldnames=c("Liebe","Familie"),
baseurl=paste("https://raw.githubusercontent.com/quadrama/metadata/master",
ensureSuffix(directory,fileSep),sep=fileSep),
directory="fields/",
fileSuffix=".txt",
fileSep = "/") {
r <- list()
for (field in fieldnames) {
url <- paste(baseurl, field, fileSuffix, sep="")
r[[field]] <- as.character((read.csv(url, header=F, fileEncoding = "UTF-8"))$V1)
}
r
}
#' @description \code{enrichDictionary()} enriches an existing dictionary by addition of similar words, as
#' measured in a word2vec model.
#' @param dictionary The base dictionary, a named list of lists.
#' @param model the loaded word2vec model
#' @param top A maximal number of words that we consider
#' @param minimalSimilarity The minimal similarity for a word in order
#' to be added
#' @importFrom wordVectors closest_to
#' @rdname dictionaryHandling
#' @export
#' @examples
#' \dontrun{
#' # Load base dictionary
#' dict_base <- loadFields(fieldnames=c("Familie","Liebe"))
#' # Load the word2vec model
#' model = read.vectors("models/german-fiction_vectors.bin")
#' # Create a new dictionary with added words
#' dict_enriched <- enrichDictionary(dict_base, model)
#' }
enrichDictionary <- function(dictionary, model, top=100, minimalSimilarity=0.4) {
r <- dictionary
for (f in 1:length(dictionary)) {
fn <- names(dictionary)[[f]]
sims <- wordVectors::closest_to(model,dictionary[[f]],n=top,fancy_names = FALSE)
r[[fn]] <- c(r[[fn]],sims[sims$similarity>=minimalSimilarity,1])
}
r
}
#' @name dictionaryStatistics
#' @title Dictionary Use
#' @description These methods retrieve
#' count the number of occurrences of the words in the dictionaries,
#' across different speakers and/or segments.
#' The function \code{dictionaryStatistics()} calculates statistics for
#' dictionaries with multiple entries, \code{dictionaryStatisticsSingle()} only
#' for a single word list. Functions ending on \code{L} return a list with
#' multiple components.
#' @param t A text (data.frame or data.table)
#' @param fieldnames A list of names for the dictionaries.
#' @param fields A list of lists that contains the actual field names.
#' By default, we try to load the dictionaries using \code{fieldnames} and \code{baseurl}.
#' @param normalizeByFigure Logical. Whether to normalize by figure speech length
#' @param normalizeByField Logical. Whether to normalize by dictionary size. You usually want this.
#' @param names Logical. Whether the resulting table contains figure ids or names.
#' @param boost A scaling factor to generate nicer values.
#' @param baseurl The base path delivering the dictionaries.
#' Should end in a \code{/}.
#' @param column The table column we apply the dictionary on.
#' Should be either "Token.surface" or "Token.lemma".
#' @param ci Whether to ignore case. Defaults to TRUE, i.e., case is ignored.
#' @param asList Logical. Whether to return a list with separated components or a single data.frame.
#' @importFrom stats aggregate
#' @importFrom stats ave
#' @importFrom utils as.roman
#' @seealso \code{\link{loadFields}}
#' @rdname dictionaryStatistics
#' @examples
#' # Check multiple dictionary entries
#' data(rksp.0)
#' dstat <- dictionaryStatistics(rksp.0$mtext, fieldnames=c("Krieg","Familie"), names=TRUE)
#' @export
dictionaryStatistics <- function(t, fields=loadFields(fieldnames,baseurl),
fieldnames=c("Liebe"),
segment=c("Drama","Act","Scene"),
normalizeByFigure = FALSE,
normalizeByField = FALSE,
byFigure = TRUE,
names = FALSE,
boost = 1,
baseurl = "https://raw.githubusercontent.com/quadrama/metadata/master/fields/",
column="Token.surface",
asList = FALSE,
ci = TRUE) {
# we need this to prevent notes in R CMD check
.N <- NULL
. <- NULL
corpus <- NULL
drama <- NULL
Speaker.figure_surface <- NULL
Speaker.figure_id <- NULL
segment <- match.arg(segment)
bylist <- list(t$corpus, t$drama, t$Speaker.figure_id)
if (names == TRUE)
bylist <- list(t$corpus, t$drama, t$Speaker.figure_surface)
r <- aggregate(t, by=bylist, length)[,1:3]
first <- TRUE
singles <- lapply(names(fields),function(x) {
dss <- dictionaryStatisticsSingle(t, fields[[x]], ci=ci,
segment=segment,
byFigure = byFigure,
normalizeByFigure = normalizeByFigure,
normalizeByField = normalizeByField,
names=names, column=column)
colnames(dss)[ncol(dss)] <- x
if (x == names(fields)[[1]]) {
dss
} else {
dss[,x,with=FALSE]
}
})
r <- Reduce(cbind,singles)
if (FALSE==TRUE && normalizeByFigure == TRUE) {
if (names == TRUE) {
tokens <- t[,.N,
.(corpus,drama,Speaker.figure_surface)]
} else {
tokens <- t[,.N,
.(corpus,drama,Speaker.figure_id)]
}
r <- merge(r,tokens,
by.x=c("corpus","drama","figure"),
by.y=c("corpus","drama",ifelse(names==TRUE,"Speaker.figure_surface","Speaker.figure_id")),
allow.cartesian = TRUE)
r[,(ncol(r)-length(fieldnames)):ncol(r)] <- r[,(ncol(r)-length(fieldnames)):ncol(r)] / r$N
r$N <- NULL
}
if (asList == TRUE) {
l <- as.list(r[,1:switch(segment,Drama=3,Act=4,Scene=5)])
l$mat <- as.matrix(r[,(ncol(r)-length(fields)+1):ncol(r)])
rownames(l$mat) <- switch(segment,
Drama=as.character(l$figure),
Act=paste(l$figure,utils::as.roman(l$Number.Act)),
Scene=paste(l$figure,utils::as.roman(l$Number.Act),l$Number.Scene))
l
} else {
r
}
}
#' @param wordfield A character vector containing the words or lemmas
#' to be counted (only for \code{*Single}-functions)
#' @param fieldNormalizer defaults to the length of the wordfield
#' @param segment The segment level that should be used. By default,
#' the entire play will be used. Possible values are "Drama" (default),
#' "Act" or "Scene"
#' @param colnames The column names to be used
#' @param byFigure Logical, defaults to TRUE. If false, values will be calculated
#' for the entire segment (play, act, or scene), and not for individual characters.
#' @examples
#' # Check a single dictionary entries
#' data(rksp.0)
#' fstat <- dictionaryStatisticsSingle(rksp.0$mtext, wordfield=c("der"), names=TRUE)
#' @importFrom stats aggregate
#' @importFrom stats na.omit
#' @importFrom reshape2 melt
#' @importFrom stats as.formula
#' @rdname dictionaryStatistics
#' @export
dictionaryStatisticsSingle <- function(t, wordfield=c(),
names = FALSE,
segment=c("Drama","Act","Scene"),
normalizeByFigure = FALSE,
normalizeByField = FALSE,
byFigure = TRUE,
fieldNormalizer=length(wordfield),
column="Token.surface", ci=TRUE,
colnames=NULL)
{
# we need this to prevent notes in R CMD check
.N <- NULL
. <- NULL
.SD <- NULL
segment <- match.arg(segment)
bycolumns <- c("corpus",
switch(segment,
Drama=c("drama"),
Act=c("drama","Number.Act"),
Scene=c("drama","Number.Act","Number.Scene"))
)
if (byFigure == TRUE) {
bycolumns <- c(bycolumns, ifelse(names==TRUE,
"Speaker.figure_surface",
"Speaker.figure_id"))
}
bylist <- paste(bycolumns,collapse=",")
dt <- as.data.table(t)
if (ci) {
wordfield <- tolower(wordfield)
casing <- tolower
} else {
casing <- identity
}
if (normalizeByField == FALSE) {
fieldNormalizer <- 1
}
dt$match <- casing(dt[[column]]) %in% wordfield
form <- stats::as.formula(paste0("~ ", paste(c(bycolumns,"match"), collapse=" + ")))
xt <- data.table::data.table(reshape2::melt(xtabs(form, data=dt)))
if (normalizeByFigure == TRUE) {
r <- xt[,.((sum(.SD[match==TRUE]$value)/fieldNormalizer)/sum(.SD$value)),
keyby=bylist]
} else {
r <- xt[,.(sum(.SD[match==TRUE]$value)/fieldNormalizer),
keyby=bylist]
}
colnames(r)[ncol(r)] <- "x"
colnames(r)[ncol(r)-1] <- "figure"
if (! is.null(colnames)) {
colnames(r) <- colnames
}
r[is.nan(r$x)]$x <- 0
r
}
dictionaryStatisticsSingleL <- function(...) {
dstat <- dictionaryStatisticsSingle(...)
as.list(dstat)
}
#' @description \code{dictionaryStatisticsL()} should not be used
#' anymore. Please use \code{dictionaryStatistics()} with the parameter
#' \code{asList=TRUE}
#' @param ... All parameters are passed to \code{\link{dictionaryStatistics}}
#' @section Returned Lists:
#' The returned list has three named elements:
#' \describe{
#' \item{drama}{The drama in which these counts have been counted}
#' \item{figure}{the figure these values has spoken}
#' \item{mat}{A matrix containing the actual values}
#' }
#' @rdname dictionaryStatistics
#' @export
dictionaryStatisticsL <- function(...) {
.Deprecated("dictionaryStatistics")
dictionaryStatistics(..., asList=TRUE)
}
dictionary.statistics <- function(...) {
.Deprecated("dictionaryStatistics")
dictionaryStatistics(...)
}
#' @title regroup
#' @description This function isolates the dictionary statistics for
#' each character. The return value is a list containing lists similar
#' to the output of `dictionaryStatistics()`, but only containing
#' the table for one character.
#' @param dstat A list generated by `dictionaryStatistics()`,
#' using the `asList` parameter
#' @param by A character vector, either "Character" or "Field".
#' Depending on this parameter, we get a list organized by character
#' or a list organized by field. If it's organised by character, it allows
#' comparison of fields for a single character. If organised by field,
#' we can compare different characters for a single field.
#' @export
#' @examples
#' data(rksp.0)
#' field <- list(Liebe=c("liebe","lieben","herz"))
#' dsl <- dictionaryStatistics(rksp.0$mtext,
#' fields=field,
#' normalizeByFigure=TRUE,
#' asList=TRUE,
#' segment="Scene")
#' dslr <- regroup(dsl, by="Field")
#' \dontrun{
#' matplot(apply(dslr$Liebe, 1, cumsum),type="l", main="Liebe", col=rainbow(14))
#' legend(x="topleft", legend=rownames(dslr$Liebe),lty=1:5,col=rainbow(14), cex = 0.4)
#' }
regroup <- function(dstat, by=c("Character","Field")) {
by = match.arg(by)
switch(by,
Character={
l <- lapply(levels(dstat$figure), function(x) {
myLines = which(dstat$figure == x)
innerList <- list()
innerList$mat <- dstat$mat[myLines,]
if ("Number.Scene" %in% names(dstat)) {
innerList$Number.Scene <- dstat$Number.Scene[myLines]
}
if ("Number.Act" %in% names(dstat)) {
innerList$Number.Act <- dstat$Number.Act[myLines]
}
innerList
})
names(l) <- levels(dstat$figure)
return(l)
},
Field={
l <- lapply(colnames(dstat$mat), function(x) {
df <- data.frame(Field=dstat$mat[,x])
df$figure <- dstat$figure
if ("Number.Scene" %in% names(dstat)) {
df$Number.Scene <- dstat$Number.Scene
}
if ("Number.Act" %in% names(dstat)) {
df$Number.Act <- dstat$Number.Act
}
if ("Number.Act" %in% names(dstat) && "Number.Scene" %in% names(dstat)) {
df$Segment <- paste(as.roman(df$Number.Act), df$Number.Scene)
}
df2 <- reshape(df, direction="wide", timevar=c("Segment"), idvar=c("figure"),drop=c("Number.Act","Number.Scene"))
rownames(df2) <- df2$figure
df2$figure <- NULL
colnames(df2) <- substr(colnames(df2), 7, 100)
df2
})
names(l) <- colnames(dstat$mat)
return(l)
});
} | /R/dictionaryStatistics.R | permissive | anhnguyendepocen/DramaAnalysis | R | false | false | 13,850 | r |
#' @title Dictionary Handling
#' @description \code{loadFields()} loads dictionaries that are available on the web as plain text files.
#' @param fieldnames A list of names for the dictionaries. It is expected that files with that name can be found below the URL.
#' @param baseurl The base path delivering the dictionaries. Should end in a /, field names will be appended and fed into read.csv().
#' @param fileSuffix The suffix for the dictionary files
#' @param directory The last component of the base url.
#' Useful to retrieve enriched word fields from metadata repo.
#' @param fileSep The file separator used to construct the URL
#' Can be overwritten to load local dictionaries.
#' @importFrom utils read.csv
#' @section File Format:
#' Dictionary files should contain one word per line, with no comments or any other meta information.
#' The entry name for the dictionary is given as the file name. It's therefore best if it does not contain
#' special characters. The dictionary must be in UTF-8 encoding, and the file needs to end on .txt.
#' @rdname dictionaryHandling
#' @export
loadFields <- function(fieldnames=c("Liebe","Familie"),
baseurl=paste("https://raw.githubusercontent.com/quadrama/metadata/master",
ensureSuffix(directory,fileSep),sep=fileSep),
directory="fields/",
fileSuffix=".txt",
fileSep = "/") {
r <- list()
for (field in fieldnames) {
url <- paste(baseurl, field, fileSuffix, sep="")
r[[field]] <- as.character((read.csv(url, header=F, fileEncoding = "UTF-8"))$V1)
}
r
}
#' @description \code{enrichDictionary()} enriches an existing dictionary by addition of similar words, as
#' measured in a word2vec model.
#' @param dictionary The base dictionary, a named list of lists.
#' @param model the loaded word2vec model
#' @param top A maximal number of words that we consider
#' @param minimalSimilarity The minimal similarity for a word in order
#' to be added
#' @importFrom wordVectors closest_to
#' @rdname dictionaryHandling
#' @export
#' @examples
#' \dontrun{
#' # Load base dictionary
#' dict_base <- loadFields(fieldnames=c("Familie","Liebe"))
#' # Load the word2vec model
#' model = read.vectors("models/german-fiction_vectors.bin")
#' # Create a new dictionary with added words
#' dict_enriched <- enrichDictionary(dict_base, model)
#' }
enrichDictionary <- function(dictionary, model, top=100, minimalSimilarity=0.4) {
r <- dictionary
for (f in 1:length(dictionary)) {
fn <- names(dictionary)[[f]]
sims <- wordVectors::closest_to(model,dictionary[[f]],n=top,fancy_names = FALSE)
r[[fn]] <- c(r[[fn]],sims[sims$similarity>=minimalSimilarity,1])
}
r
}
#' @name dictionaryStatistics
#' @title Dictionary Use
#' @description These methods retrieve
#' count the number of occurrences of the words in the dictionaries,
#' across different speakers and/or segments.
#' The function \code{dictionaryStatistics()} calculates statistics for
#' dictionaries with multiple entries, \code{dictionaryStatisticsSingle()} only
#' for a single word list. Functions ending on \code{L} return a list with
#' multiple components.
#' @param t A text (data.frame or data.table)
#' @param fieldnames A list of names for the dictionaries.
#' @param fields A list of lists that contains the actual field names.
#' By default, we try to load the dictionaries using \code{fieldnames} and \code{baseurl}.
#' @param normalizeByFigure Logical. Whether to normalize by figure speech length
#' @param normalizeByField Logical. Whether to normalize by dictionary size. You usually want this.
#' @param names Logical. Whether the resulting table contains figure ids or names.
#' @param boost A scaling factor to generate nicer values.
#' @param baseurl The base path delivering the dictionaries.
#' Should end in a \code{/}.
#' @param column The table column we apply the dictionary on.
#' Should be either "Token.surface" or "Token.lemma".
#' @param ci Whether to ignore case. Defaults to TRUE, i.e., case is ignored.
#' @param asList Logical. Whether to return a list with separated components or a single data.frame.
#' @importFrom stats aggregate
#' @importFrom stats ave
#' @importFrom utils as.roman
#' @seealso \code{\link{loadFields}}
#' @rdname dictionaryStatistics
#' @examples
#' # Check multiple dictionary entries
#' data(rksp.0)
#' dstat <- dictionaryStatistics(rksp.0$mtext, fieldnames=c("Krieg","Familie"), names=TRUE)
#' @export
dictionaryStatistics <- function(t, fields=loadFields(fieldnames,baseurl),
fieldnames=c("Liebe"),
segment=c("Drama","Act","Scene"),
normalizeByFigure = FALSE,
normalizeByField = FALSE,
byFigure = TRUE,
names = FALSE,
boost = 1,
baseurl = "https://raw.githubusercontent.com/quadrama/metadata/master/fields/",
column="Token.surface",
asList = FALSE,
ci = TRUE) {
# we need this to prevent notes in R CMD check
.N <- NULL
. <- NULL
corpus <- NULL
drama <- NULL
Speaker.figure_surface <- NULL
Speaker.figure_id <- NULL
segment <- match.arg(segment)
bylist <- list(t$corpus, t$drama, t$Speaker.figure_id)
if (names == TRUE)
bylist <- list(t$corpus, t$drama, t$Speaker.figure_surface)
r <- aggregate(t, by=bylist, length)[,1:3]
first <- TRUE
singles <- lapply(names(fields),function(x) {
dss <- dictionaryStatisticsSingle(t, fields[[x]], ci=ci,
segment=segment,
byFigure = byFigure,
normalizeByFigure = normalizeByFigure,
normalizeByField = normalizeByField,
names=names, column=column)
colnames(dss)[ncol(dss)] <- x
if (x == names(fields)[[1]]) {
dss
} else {
dss[,x,with=FALSE]
}
})
r <- Reduce(cbind,singles)
if (FALSE==TRUE && normalizeByFigure == TRUE) {
if (names == TRUE) {
tokens <- t[,.N,
.(corpus,drama,Speaker.figure_surface)]
} else {
tokens <- t[,.N,
.(corpus,drama,Speaker.figure_id)]
}
r <- merge(r,tokens,
by.x=c("corpus","drama","figure"),
by.y=c("corpus","drama",ifelse(names==TRUE,"Speaker.figure_surface","Speaker.figure_id")),
allow.cartesian = TRUE)
r[,(ncol(r)-length(fieldnames)):ncol(r)] <- r[,(ncol(r)-length(fieldnames)):ncol(r)] / r$N
r$N <- NULL
}
if (asList == TRUE) {
l <- as.list(r[,1:switch(segment,Drama=3,Act=4,Scene=5)])
l$mat <- as.matrix(r[,(ncol(r)-length(fields)+1):ncol(r)])
rownames(l$mat) <- switch(segment,
Drama=as.character(l$figure),
Act=paste(l$figure,utils::as.roman(l$Number.Act)),
Scene=paste(l$figure,utils::as.roman(l$Number.Act),l$Number.Scene))
l
} else {
r
}
}
#' @param wordfield A character vector containing the words or lemmas
#' to be counted (only for \code{*Single}-functions)
#' @param fieldNormalizer defaults to the length of the wordfield
#' @param segment The segment level that should be used. By default,
#' the entire play will be used. Possible values are "Drama" (default),
#' "Act" or "Scene"
#' @param colnames The column names to be used
#' @param byFigure Logical, defaults to TRUE. If false, values will be calculated
#' for the entire segment (play, act, or scene), and not for individual characters.
#' @examples
#' # Check a single dictionary entries
#' data(rksp.0)
#' fstat <- dictionaryStatisticsSingle(rksp.0$mtext, wordfield=c("der"), names=TRUE)
#' @importFrom stats aggregate
#' @importFrom stats na.omit
#' @importFrom reshape2 melt
#' @importFrom stats as.formula
#' @rdname dictionaryStatistics
#' @export
dictionaryStatisticsSingle <- function(t, wordfield=c(),
names = FALSE,
segment=c("Drama","Act","Scene"),
normalizeByFigure = FALSE,
normalizeByField = FALSE,
byFigure = TRUE,
fieldNormalizer=length(wordfield),
column="Token.surface", ci=TRUE,
colnames=NULL)
{
# we need this to prevent notes in R CMD check
.N <- NULL
. <- NULL
.SD <- NULL
segment <- match.arg(segment)
bycolumns <- c("corpus",
switch(segment,
Drama=c("drama"),
Act=c("drama","Number.Act"),
Scene=c("drama","Number.Act","Number.Scene"))
)
if (byFigure == TRUE) {
bycolumns <- c(bycolumns, ifelse(names==TRUE,
"Speaker.figure_surface",
"Speaker.figure_id"))
}
bylist <- paste(bycolumns,collapse=",")
dt <- as.data.table(t)
if (ci) {
wordfield <- tolower(wordfield)
casing <- tolower
} else {
casing <- identity
}
if (normalizeByField == FALSE) {
fieldNormalizer <- 1
}
dt$match <- casing(dt[[column]]) %in% wordfield
form <- stats::as.formula(paste0("~ ", paste(c(bycolumns,"match"), collapse=" + ")))
xt <- data.table::data.table(reshape2::melt(xtabs(form, data=dt)))
if (normalizeByFigure == TRUE) {
r <- xt[,.((sum(.SD[match==TRUE]$value)/fieldNormalizer)/sum(.SD$value)),
keyby=bylist]
} else {
r <- xt[,.(sum(.SD[match==TRUE]$value)/fieldNormalizer),
keyby=bylist]
}
colnames(r)[ncol(r)] <- "x"
colnames(r)[ncol(r)-1] <- "figure"
if (! is.null(colnames)) {
colnames(r) <- colnames
}
r[is.nan(r$x)]$x <- 0
r
}
dictionaryStatisticsSingleL <- function(...) {
dstat <- dictionaryStatisticsSingle(...)
as.list(dstat)
}
#' @description \code{dictionaryStatisticsL()} should not be used
#' anymore. Please use \code{dictionaryStatistics()} with the parameter
#' \code{asList=TRUE}
#' @param ... All parameters are passed to \code{\link{dictionaryStatistics}}
#' @section Returned Lists:
#' The returned list has three named elements:
#' \describe{
#' \item{drama}{The drama in which these counts have been counted}
#' \item{figure}{the figure these values has spoken}
#' \item{mat}{A matrix containing the actual values}
#' }
#' @rdname dictionaryStatistics
#' @export
dictionaryStatisticsL <- function(...) {
.Deprecated("dictionaryStatistics")
dictionaryStatistics(..., asList=TRUE)
}
dictionary.statistics <- function(...) {
.Deprecated("dictionaryStatistics")
dictionaryStatistics(...)
}
#' @title regroup
#' @description This function isolates the dictionary statistics for
#' each character. The return value is a list containing lists similar
#' to the output of `dictionaryStatistics()`, but only containing
#' the table for one character.
#' @param dstat A list generated by `dictionaryStatistics()`,
#' using the `asList` parameter
#' @param by A character vector, either "Character" or "Field".
#' Depending on this parameter, we get a list organized by character
#' or a list organized by field. If it's organised by character, it allows
#' comparison of fields for a single character. If organised by field,
#' we can compare different characters for a single field.
#' @export
#' @examples
#' data(rksp.0)
#' field <- list(Liebe=c("liebe","lieben","herz"))
#' dsl <- dictionaryStatistics(rksp.0$mtext,
#' fields=field,
#' normalizeByFigure=TRUE,
#' asList=TRUE,
#' segment="Scene")
#' dslr <- regroup(dsl, by="Field")
#' \dontrun{
#' matplot(apply(dslr$Liebe, 1, cumsum),type="l", main="Liebe", col=rainbow(14))
#' legend(x="topleft", legend=rownames(dslr$Liebe),lty=1:5,col=rainbow(14), cex = 0.4)
#' }
regroup <- function(dstat, by=c("Character","Field")) {
by = match.arg(by)
switch(by,
Character={
l <- lapply(levels(dstat$figure), function(x) {
myLines = which(dstat$figure == x)
innerList <- list()
innerList$mat <- dstat$mat[myLines,]
if ("Number.Scene" %in% names(dstat)) {
innerList$Number.Scene <- dstat$Number.Scene[myLines]
}
if ("Number.Act" %in% names(dstat)) {
innerList$Number.Act <- dstat$Number.Act[myLines]
}
innerList
})
names(l) <- levels(dstat$figure)
return(l)
},
Field={
l <- lapply(colnames(dstat$mat), function(x) {
df <- data.frame(Field=dstat$mat[,x])
df$figure <- dstat$figure
if ("Number.Scene" %in% names(dstat)) {
df$Number.Scene <- dstat$Number.Scene
}
if ("Number.Act" %in% names(dstat)) {
df$Number.Act <- dstat$Number.Act
}
if ("Number.Act" %in% names(dstat) && "Number.Scene" %in% names(dstat)) {
df$Segment <- paste(as.roman(df$Number.Act), df$Number.Scene)
}
df2 <- reshape(df, direction="wide", timevar=c("Segment"), idvar=c("figure"),drop=c("Number.Act","Number.Scene"))
rownames(df2) <- df2$figure
df2$figure <- NULL
colnames(df2) <- substr(colnames(df2), 7, 100)
df2
})
names(l) <- colnames(dstat$mat)
return(l)
});
} |
avgs<-read.csv('averages.csv')
years<-read.csv('years.csv')
years<-years[years$Year < 2017,]
require(ggplot2)
data=years[years$School == "Brighton High School",]
#One School
brighton <- ggplot(data=years[years$School == "Brighton High School",], aes(x=Year, y=Total,fill = School)) + geom_bar(stat = "identity")
brighton
#second School
bls <- ggplot(data=years[years$School == "Boston Latin School",], aes(x=Year, y=Total,fill = School)) + geom_bar(stat = "identity")
bls
#Two School
#twoBase <- ggplot(data=years[years$School == "Brighton High School" | years$School == "Boston Latin School",], aes(x=Year, y=Total, fill = School))
#twoBase + geom_bar(stat = "identity", position = "dodge")
| /RAnalysis/sample_years.r | no_license | joshua-michel/bps-school-spending-analysis | R | false | false | 699 | r | avgs<-read.csv('averages.csv')
years<-read.csv('years.csv')
years<-years[years$Year < 2017,]
require(ggplot2)
data=years[years$School == "Brighton High School",]
#One School
brighton <- ggplot(data=years[years$School == "Brighton High School",], aes(x=Year, y=Total,fill = School)) + geom_bar(stat = "identity")
brighton
#second School
bls <- ggplot(data=years[years$School == "Boston Latin School",], aes(x=Year, y=Total,fill = School)) + geom_bar(stat = "identity")
bls
#Two School
#twoBase <- ggplot(data=years[years$School == "Brighton High School" | years$School == "Boston Latin School",], aes(x=Year, y=Total, fill = School))
#twoBase + geom_bar(stat = "identity", position = "dodge")
|
\alias{gFileInfoNew}
\name{gFileInfoNew}
\title{gFileInfoNew}
\description{Creates a new file info structure.}
\usage{gFileInfoNew()}
\value{[\code{\link{GFileInfo}}] a \code{\link{GFileInfo}}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gFileInfoNew.Rd | no_license | lawremi/RGtk2 | R | false | false | 266 | rd | \alias{gFileInfoNew}
\name{gFileInfoNew}
\title{gFileInfoNew}
\description{Creates a new file info structure.}
\usage{gFileInfoNew()}
\value{[\code{\link{GFileInfo}}] a \code{\link{GFileInfo}}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
d=read.table('avalues.txt',sep=" ")
dim(d)<-NULL
d<-as.numeric(d)
setEPS()
postscript("alpha.eps")
hist(d,c(-100,(-40:40)/20,100), xlim=c(-2,2), freq=TRUE)
dev.off()
| /alphaHistogram.r | no_license | jjoonathan/shakhnovich-bioinf | R | false | false | 166 | r | d=read.table('avalues.txt',sep=" ")
dim(d)<-NULL
d<-as.numeric(d)
setEPS()
postscript("alpha.eps")
hist(d,c(-100,(-40:40)/20,100), xlim=c(-2,2), freq=TRUE)
dev.off()
|
library(testthat)
context("Data retrieval from web")
dset="descartes_mobility_data"
if(grepl("^google|^apple",dset)) {
test_data_accessor(dset,nrows=100000)
} else {
test_data_accessor(dset)
}
| /tests/testthat/test_accessor_descartes_mobility_data.R | permissive | griffinracey2/sars2pack | R | false | false | 204 | r |
library(testthat)
context("Data retrieval from web")
dset="descartes_mobility_data"
if(grepl("^google|^apple",dset)) {
test_data_accessor(dset,nrows=100000)
} else {
test_data_accessor(dset)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/CostSensClassifWrapper.R
\name{makeCostSensClassifWrapper}
\alias{CostSensClassifModel}
\alias{CostSensClassifWrapper}
\alias{makeCostSensClassifWrapper}
\title{Wraps a classification learner for use in cost-sensitive learning.}
\usage{
makeCostSensClassifWrapper(learner)
}
\arguments{
\item{learner}{[\code{\link{Learner}} | \code{character(1)}]\cr
The classification learner.
If you pass a string the learner will be created via \code{\link{makeLearner}}.}
}
\value{
[\code{\link{Learner}}].
}
\description{
Creates a wrapper, which can be used like any other learner object.
The classification model can easily be accessed via \code{\link{getHomogeneousEnsembleModels}}.
This is a very naive learner, where the costs are transformed into classification labels -
the label for each case is the name of class with minimal costs.
(If ties occur, the label which is better on average w.r.t. costs over all training data is
preferred.)
Then the classifier is fitted to that data and subsequently used for prediction.
}
\seealso{
Other costsens: \code{\link{ClassifTask}},
\code{\link{ClusterTask}}, \code{\link{CostSensTask}},
\code{\link{RegrTask}}, \code{\link{SurvTask}},
\code{\link{Task}}, \code{\link{makeClassifTask}},
\code{\link{makeClusterTask}},
\code{\link{makeCostSensTask}},
\code{\link{makeRegrTask}}, \code{\link{makeSurvTask}};
\code{\link{CostSensRegrModel}},
\code{\link{CostSensRegrWrapper}},
\code{\link{makeCostSensRegrWrapper}};
\code{\link{CostSensWeightedPairsModel}},
\code{\link{CostSensWeightedPairsWrapper}},
\code{\link{makeCostSensWeightedPairsWrapper}}
Other wrapper: \code{\link{CostSensRegrModel}},
\code{\link{CostSensRegrWrapper}},
\code{\link{makeCostSensRegrWrapper}};
\code{\link{makeBaggingWrapper}};
\code{\link{makeDownsampleWrapper}};
\code{\link{makeFeatSelWrapper}};
\code{\link{makeFilterWrapper}};
\code{\link{makeImputeWrapper}};
\code{\link{makeMulticlassWrapper}};
\code{\link{makeOverBaggingWrapper}};
\code{\link{makeOversampleWrapper}},
\code{\link{makeUndersampleWrapper}};
\code{\link{makePreprocWrapperCaret}};
\code{\link{makePreprocWrapper}};
\code{\link{makeSMOTEWrapper}};
\code{\link{makeTuneWrapper}};
\code{\link{makeWeightedClassesWrapper}}
}
| /man/makeCostSensClassifWrapper.Rd | no_license | ppr10/mlr | R | false | false | 2,351 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/CostSensClassifWrapper.R
\name{makeCostSensClassifWrapper}
\alias{CostSensClassifModel}
\alias{CostSensClassifWrapper}
\alias{makeCostSensClassifWrapper}
\title{Wraps a classification learner for use in cost-sensitive learning.}
\usage{
makeCostSensClassifWrapper(learner)
}
\arguments{
\item{learner}{[\code{\link{Learner}} | \code{character(1)}]\cr
The classification learner.
If you pass a string the learner will be created via \code{\link{makeLearner}}.}
}
\value{
[\code{\link{Learner}}].
}
\description{
Creates a wrapper, which can be used like any other learner object.
The classification model can easily be accessed via \code{\link{getHomogeneousEnsembleModels}}.
This is a very naive learner, where the costs are transformed into classification labels -
the label for each case is the name of class with minimal costs.
(If ties occur, the label which is better on average w.r.t. costs over all training data is
preferred.)
Then the classifier is fitted to that data and subsequently used for prediction.
}
\seealso{
Other costsens: \code{\link{ClassifTask}},
\code{\link{ClusterTask}}, \code{\link{CostSensTask}},
\code{\link{RegrTask}}, \code{\link{SurvTask}},
\code{\link{Task}}, \code{\link{makeClassifTask}},
\code{\link{makeClusterTask}},
\code{\link{makeCostSensTask}},
\code{\link{makeRegrTask}}, \code{\link{makeSurvTask}};
\code{\link{CostSensRegrModel}},
\code{\link{CostSensRegrWrapper}},
\code{\link{makeCostSensRegrWrapper}};
\code{\link{CostSensWeightedPairsModel}},
\code{\link{CostSensWeightedPairsWrapper}},
\code{\link{makeCostSensWeightedPairsWrapper}}
Other wrapper: \code{\link{CostSensRegrModel}},
\code{\link{CostSensRegrWrapper}},
\code{\link{makeCostSensRegrWrapper}};
\code{\link{makeBaggingWrapper}};
\code{\link{makeDownsampleWrapper}};
\code{\link{makeFeatSelWrapper}};
\code{\link{makeFilterWrapper}};
\code{\link{makeImputeWrapper}};
\code{\link{makeMulticlassWrapper}};
\code{\link{makeOverBaggingWrapper}};
\code{\link{makeOversampleWrapper}},
\code{\link{makeUndersampleWrapper}};
\code{\link{makePreprocWrapperCaret}};
\code{\link{makePreprocWrapper}};
\code{\link{makeSMOTEWrapper}};
\code{\link{makeTuneWrapper}};
\code{\link{makeWeightedClassesWrapper}}
}
|
# Code to create figures
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('ggplot')
def plot_simple_line():
rng = np.random.RandomState(42)
x = 10 * rng.rand(20)
y = 2 * x + 5 + rng.randn(20)
p = np.polyfit(x, y, 1)
xfit = np.linspace(0, 10)
yfit = np.polyval(p, xfit)
plt.plot(x, y, 'ok')??
plt.plot(xfit, yfit, color='gray')
plt.text(9.8, 1,
"y = {0:.2f}x + {1:.2f}".format(*p),
ha='right', size=14);
def plot_underdetermined_fits(p, brange=(-0.5, 1.5), xlim=(-3, 3),
plot_conditioned=False):
rng = np.random.RandomState(42)
x, y = rng.rand(2, p).round(2)
xfit = np.linspace(xlim[0], xlim[1])
for r in rng.rand(20):
# add a datapoint to make model specified
b = brange[0] + r * (brange[1] - brange[0])
xx = np.concatenate([x, [0]])
yy = np.concatenate([y, [b]])
theta = np.polyfit(xx, yy, p)
yfit = np.polyval(theta, xfit)
plt.plot(xfit, yfit, color='#BBBBBB')
plt.plot(x, y, 'ok')
if plot_conditioned:
X = x[:, None] ** np.arange(p + 1)
theta = np.linalg.solve(np.dot(X.T, X)
+ 1E-3 * np.eye(X.shape[1]),
np.dot(X.T, y))
Xfit = xfit[:, None] ** np.arange(p + 1)
yfit = np.dot(Xfit, theta)
plt.plot(xfit, yfit, color='black', lw=2)
def plot_underdetermined_line():
plot_underdetermined_fits(1)
def plot_underdetermined_cubic():
plot_underdetermined_fits(3, brange=(-1, 2),
xlim=(0, 1.2))
def plot_conditioned_line():
plot_underdetermined_fits(1, plot_conditioned=True) | /multiparm.r | no_license | jocompto/RPrograms | R | false | false | 1,602 | r | # Code to create figures
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('ggplot')
def plot_simple_line():
rng = np.random.RandomState(42)
x = 10 * rng.rand(20)
y = 2 * x + 5 + rng.randn(20)
p = np.polyfit(x, y, 1)
xfit = np.linspace(0, 10)
yfit = np.polyval(p, xfit)
plt.plot(x, y, 'ok')??
plt.plot(xfit, yfit, color='gray')
plt.text(9.8, 1,
"y = {0:.2f}x + {1:.2f}".format(*p),
ha='right', size=14);
def plot_underdetermined_fits(p, brange=(-0.5, 1.5), xlim=(-3, 3),
plot_conditioned=False):
rng = np.random.RandomState(42)
x, y = rng.rand(2, p).round(2)
xfit = np.linspace(xlim[0], xlim[1])
for r in rng.rand(20):
# add a datapoint to make model specified
b = brange[0] + r * (brange[1] - brange[0])
xx = np.concatenate([x, [0]])
yy = np.concatenate([y, [b]])
theta = np.polyfit(xx, yy, p)
yfit = np.polyval(theta, xfit)
plt.plot(xfit, yfit, color='#BBBBBB')
plt.plot(x, y, 'ok')
if plot_conditioned:
X = x[:, None] ** np.arange(p + 1)
theta = np.linalg.solve(np.dot(X.T, X)
+ 1E-3 * np.eye(X.shape[1]),
np.dot(X.T, y))
Xfit = xfit[:, None] ** np.arange(p + 1)
yfit = np.dot(Xfit, theta)
plt.plot(xfit, yfit, color='black', lw=2)
def plot_underdetermined_line():
plot_underdetermined_fits(1)
def plot_underdetermined_cubic():
plot_underdetermined_fits(3, brange=(-1, 2),
xlim=(0, 1.2))
def plot_conditioned_line():
plot_underdetermined_fits(1, plot_conditioned=True) |
rm(list = ls())
options(scipen = 999)
setwd("C:\\Users\\AXIOM\\Desktop\\data\\PD_Analysis")
# libraries & functions:
GetMode = function(D){
UniqD = unique(D)
UniqD[which.max(tabulate(match(D,UniqD)))]
}
# data from :
#======> https://code.datasciencedojo.com/datasciencedojo/datasets/raw/master/Default%20of%20Credit%20Card%20Clients/default%20of%20credit%20card%20clients.csv
data = read.csv("https://code.datasciencedojo.com/datasciencedojo/datasets/raw/master/Default%20of%20Credit%20Card%20Clients/default%20of%20credit%20card%20clients.csv",
header = T,stringsAsFactors = F)
for(i in 1:ncol(data)){
colnames(data)[i] = paste0(data[1,i])
print(paste0("fixing col :",colnames(data)[i]))
}
data = data[2:nrow(data),]
# NOT RUN{
"Columns LIMIT_BAL, AGE, PAY_0, PAY_2, PAY_3,
PAY_4, PAY_5, PAY_6, BILL_AMT1, BILL_AMT2, BILL_AMT3,
BILL_AMT4, BILL_AMT5, BILL_AMT6, PAY_AMT1, PAY_AMT2, PAY_AMT3,
PAY_AMT4 ,PAY_AMT5, PAY_AMT6, default payment next month
has type character and must be transformed from character to numeric "
# }
#===== trnasforming cols: character --> numeric ====>
cols_list = colnames(data)[c(2,6:25)]
for(i in 1:length(cols_list)){
data[,cols_list[i]] = as.numeric(data[,cols_list[i]])
}
#====== data structure ====>
data_structure = data.frame()
for(i in 1:ncol(data)){
datum = data.frame(col_name = colnames(data)[i],
entry_type = typeof(data[,i]),
unique_values = length(unique(data[,i])),
unique_entries = ifelse(length(unique(data[,i])) < 6,paste(unique(data[,i]),collapse = "-"),"Too Many Entries"),
na_ = sum(is.na(data[,i])),
null_ = sum(is.null(data[,i])),
nan_ = sum(is.nan(data[,i])),
mean_ = ifelse(typeof(data[,i]) != "character",mean(data[,i],na.rm = T),"Not Applicable"),
median_ = ifelse(typeof(data[,i]) != "character",median(data[,i],na.rm = T),"Not Applicable"),
mode_ = GetMode(data[,i]),stringsAsFactors = F)
data_structure = rbind.data.frame(data_structure,datum,make.row.names = F)
print(paste0("decomposing column :",colnames(data)[i]))
}
#=== write --> csv
write.csv(data_structure,
file = paste0("data_structure-",format(Sys.Date(),"%d-%m-%Y"),".csv"))
| /Structural_Analysis.R | no_license | Indranil-Seal/Credit_Default_Analysis | R | false | false | 2,399 | r | rm(list = ls())
options(scipen = 999)
setwd("C:\\Users\\AXIOM\\Desktop\\data\\PD_Analysis")
# libraries & functions:
GetMode = function(D){
UniqD = unique(D)
UniqD[which.max(tabulate(match(D,UniqD)))]
}
# data from :
#======> https://code.datasciencedojo.com/datasciencedojo/datasets/raw/master/Default%20of%20Credit%20Card%20Clients/default%20of%20credit%20card%20clients.csv
data = read.csv("https://code.datasciencedojo.com/datasciencedojo/datasets/raw/master/Default%20of%20Credit%20Card%20Clients/default%20of%20credit%20card%20clients.csv",
header = T,stringsAsFactors = F)
for(i in 1:ncol(data)){
colnames(data)[i] = paste0(data[1,i])
print(paste0("fixing col :",colnames(data)[i]))
}
data = data[2:nrow(data),]
# NOT RUN{
"Columns LIMIT_BAL, AGE, PAY_0, PAY_2, PAY_3,
PAY_4, PAY_5, PAY_6, BILL_AMT1, BILL_AMT2, BILL_AMT3,
BILL_AMT4, BILL_AMT5, BILL_AMT6, PAY_AMT1, PAY_AMT2, PAY_AMT3,
PAY_AMT4 ,PAY_AMT5, PAY_AMT6, default payment next month
has type character and must be transformed from character to numeric "
# }
#===== trnasforming cols: character --> numeric ====>
cols_list = colnames(data)[c(2,6:25)]
for(i in 1:length(cols_list)){
data[,cols_list[i]] = as.numeric(data[,cols_list[i]])
}
#====== data structure ====>
data_structure = data.frame()
for(i in 1:ncol(data)){
datum = data.frame(col_name = colnames(data)[i],
entry_type = typeof(data[,i]),
unique_values = length(unique(data[,i])),
unique_entries = ifelse(length(unique(data[,i])) < 6,paste(unique(data[,i]),collapse = "-"),"Too Many Entries"),
na_ = sum(is.na(data[,i])),
null_ = sum(is.null(data[,i])),
nan_ = sum(is.nan(data[,i])),
mean_ = ifelse(typeof(data[,i]) != "character",mean(data[,i],na.rm = T),"Not Applicable"),
median_ = ifelse(typeof(data[,i]) != "character",median(data[,i],na.rm = T),"Not Applicable"),
mode_ = GetMode(data[,i]),stringsAsFactors = F)
data_structure = rbind.data.frame(data_structure,datum,make.row.names = F)
print(paste0("decomposing column :",colnames(data)[i]))
}
#=== write --> csv
write.csv(data_structure,
file = paste0("data_structure-",format(Sys.Date(),"%d-%m-%Y"),".csv"))
|
power <- read.table("household_power_consumption.txt", skip = 1,
sep = ";")
#assigning proper names to the columns in the dataset
names(power) <- c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
#taking the subset of the main set
subpower <- subset(power, power$Date == "1/2/2007" | power$Date == "2/2/2007")
#converting date and time to operatable formats
subpower$Date <- as.Date(subpower$Date, format = "%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format = "%H:%M:%S")
subpower[1:1440, "Time"] <- format(subpower[1:1440,"Time"], "2007-02-01 %H:%M:%Y")
subpower[1441:2880, "Time"] <- format(subpower[1441:2880, "Time"], "2007-02-02 %H:%M:%Y")
#diving the plot plain into parts for subplots
par(mfrow = c(2, 2))
# calling the basic plot function that calls different plot functions to build the 4 plots that form the graph
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
#copying to png
dev.copy(png, "plot4.png")
dev.off() | /plot4.R | no_license | Shorzinator/Analysing-household-power-consumption-data | R | false | false | 1,965 | r | power <- read.table("household_power_consumption.txt", skip = 1,
sep = ";")
#assigning proper names to the columns in the dataset
names(power) <- c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage", "Global_intensity",
"Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
#taking the subset of the main set
subpower <- subset(power, power$Date == "1/2/2007" | power$Date == "2/2/2007")
#converting date and time to operatable formats
subpower$Date <- as.Date(subpower$Date, format = "%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format = "%H:%M:%S")
subpower[1:1440, "Time"] <- format(subpower[1:1440,"Time"], "2007-02-01 %H:%M:%Y")
subpower[1441:2880, "Time"] <- format(subpower[1441:2880, "Time"], "2007-02-02 %H:%M:%Y")
#diving the plot plain into parts for subplots
par(mfrow = c(2, 2))
# calling the basic plot function that calls different plot functions to build the 4 plots that form the graph
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
#copying to png
dev.copy(png, "plot4.png")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trends_rolling_average.R
\name{rolling_average}
\alias{rolling_average}
\title{Calculate rolling averages}
\usage{
rolling_average(x, window_days = 7)
}
\arguments{
\item{x}{The time series for which to calculate the rolling average (for column "val").}
\item{window_days}{The length of the window in days for the rolling average (default = 7).}
}
\value{
The time series with the rolling average added in new column "rolling_avg".
}
\description{
Calculate rolling averages
}
| /man/rolling_average.Rd | permissive | ccodwg/Covid19CanadaTrends | R | false | true | 556 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trends_rolling_average.R
\name{rolling_average}
\alias{rolling_average}
\title{Calculate rolling averages}
\usage{
rolling_average(x, window_days = 7)
}
\arguments{
\item{x}{The time series for which to calculate the rolling average (for column "val").}
\item{window_days}{The length of the window in days for the rolling average (default = 7).}
}
\value{
The time series with the rolling average added in new column "rolling_avg".
}
\description{
Calculate rolling averages
}
|
####################################################################
#' Update the library
#'
#' This function lets the user update from repository or local source.
#'
#' @param local Boolean. Install package with local files (TRUE) or Github repository
#' @param force Boolean. Force install if needed
#' @param restart Boolean. Restart session after re-installing the library
#' @export
updateLares <- function(local = FALSE, force = FALSE, restart = FALSE) {
suppressMessages(require(devtools))
suppressMessages(require(config))
start <- Sys.time()
message(paste(start,"| Started installation..."))
if (local == TRUE) {
devtools::install("~/Dropbox (Personal)/Documentos/R/Github/lares")
} else {
devtools::install_github("laresbernardo/lares", force = force)
}
if (restart == TRUE) {
.rs.restartR()
}
message(paste(Sys.time(), "| Duration:", round(difftime(Sys.time(), start, units="secs"), 2), "s"))
}
| /R/update.R | no_license | fxcebx/lares | R | false | false | 949 | r | ####################################################################
#' Update the library
#'
#' This function lets the user update from repository or local source.
#'
#' @param local Boolean. Install package with local files (TRUE) or Github repository
#' @param force Boolean. Force install if needed
#' @param restart Boolean. Restart session after re-installing the library
#' @export
updateLares <- function(local = FALSE, force = FALSE, restart = FALSE) {
suppressMessages(require(devtools))
suppressMessages(require(config))
start <- Sys.time()
message(paste(start,"| Started installation..."))
if (local == TRUE) {
devtools::install("~/Dropbox (Personal)/Documentos/R/Github/lares")
} else {
devtools::install_github("laresbernardo/lares", force = force)
}
if (restart == TRUE) {
.rs.restartR()
}
message(paste(Sys.time(), "| Duration:", round(difftime(Sys.time(), start, units="secs"), 2), "s"))
}
|
library(powerSurvEpi)
### Name: numDEpi
### Title: Calculate Number of Deaths Required for Cox Proportional Hazards
### Regression with Two Covariates for Epidemiological Studies
### Aliases: numDEpi
### Keywords: survival design
### ** Examples
# generate a toy pilot data set
X1 <- c(rep(1, 39), rep(0, 61))
set.seed(123456)
X2 <- sample(c(0, 1), 100, replace = TRUE)
res <- numDEpi(X1, X2, power = 0.8, theta = 2, alpha = 0.05)
print(res)
# proportion of subjects died of the disease of interest.
psi <- 0.505
# total number of subjects required to achieve the desired power
ceiling(res$D / psi)
| /data/genthat_extracted_code/powerSurvEpi/examples/numDEpi.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 631 | r | library(powerSurvEpi)
### Name: numDEpi
### Title: Calculate Number of Deaths Required for Cox Proportional Hazards
### Regression with Two Covariates for Epidemiological Studies
### Aliases: numDEpi
### Keywords: survival design
### ** Examples
# generate a toy pilot data set
X1 <- c(rep(1, 39), rep(0, 61))
set.seed(123456)
X2 <- sample(c(0, 1), 100, replace = TRUE)
res <- numDEpi(X1, X2, power = 0.8, theta = 2, alpha = 0.05)
print(res)
# proportion of subjects died of the disease of interest.
psi <- 0.505
# total number of subjects required to achieve the desired power
ceiling(res$D / psi)
|
#This is Rohan's implementation of xgboost
if(require("dummies")) {
install.packages("dummies")
library(dummies)
}
if(require("plyr")) {
install.packages("plyr")
library(plyr)
}
if(require("xgboost")) {
install.packages("xgboost")
library(xgboost)
}
if(require("RCurl")) {
install.packages("RCurl")
library(RCurl)
}
if(require("pROC")) {
install.packages("pROC")
library(pROC)
}
source_https <- function(url)
{
eval(parse(text=getURL(url,followlocation=T,cainfo=system.file("CurlSSL","cacert.pem",package="RCurl"))),envir=.GlobalEnv)
}
source_https("https://raw.githubusercontent.com/rohanrao91/Models_CV/master/XGBoost.R")
#model building
#save test ID in submit variable to be used later
#remove target variable and ID variable (if present) from train and move it to 'y'
#model_xgb_1 <- XGBoost(X_train,y,X_test,cv=5,objective="reg:linear",nrounds=500,max.depth=10,eta=0.1,colsample_bytree=0.5,seed=235,metric="rmse",importance=1)
#
#"reg:linear" --linear regression
#"reg:logistic" --logistic regression
#"binary:logistic" --logistic regression for binary classification, output probability
#"binary:logitraw" --logistic regression for binary classification, output score before logistic transformation
#"count:poisson" --poisson regression for count data, output mean of poisson distribution
#max_delta_step is set to 0.7 by default in poisson regression (used to safeguard optimization)
#"multi:softmax" --set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
#"multi:softprob" --same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probability of each data point belonging to each class.
#"rank:pairwise" --set XGBoost to do ranking task by minimizing the pairwise loss
#submission file
#test_xgb_1 <- model_xgb_1[[2]]
#Adding predictions. submit should have the ID's of row numbers
#submit = data.frame(ID = temp$ID, test_xgb_1$pred_xgb)
#write to output
#write.csv(submit, "./submit.csv", row.names=F)
| /XGBoost with cv.R | no_license | highspirits/my_R_scripts | R | false | false | 2,167 | r | #This is Rohan's implementation of xgboost
if(require("dummies")) {
install.packages("dummies")
library(dummies)
}
if(require("plyr")) {
install.packages("plyr")
library(plyr)
}
if(require("xgboost")) {
install.packages("xgboost")
library(xgboost)
}
if(require("RCurl")) {
install.packages("RCurl")
library(RCurl)
}
if(require("pROC")) {
install.packages("pROC")
library(pROC)
}
source_https <- function(url)
{
eval(parse(text=getURL(url,followlocation=T,cainfo=system.file("CurlSSL","cacert.pem",package="RCurl"))),envir=.GlobalEnv)
}
source_https("https://raw.githubusercontent.com/rohanrao91/Models_CV/master/XGBoost.R")
#model building
#save test ID in submit variable to be used later
#remove target variable and ID variable (if present) from train and move it to 'y'
#model_xgb_1 <- XGBoost(X_train,y,X_test,cv=5,objective="reg:linear",nrounds=500,max.depth=10,eta=0.1,colsample_bytree=0.5,seed=235,metric="rmse",importance=1)
#
#"reg:linear" --linear regression
#"reg:logistic" --logistic regression
#"binary:logistic" --logistic regression for binary classification, output probability
#"binary:logitraw" --logistic regression for binary classification, output score before logistic transformation
#"count:poisson" --poisson regression for count data, output mean of poisson distribution
#max_delta_step is set to 0.7 by default in poisson regression (used to safeguard optimization)
#"multi:softmax" --set XGBoost to do multiclass classification using the softmax objective, you also need to set num_class(number of classes)
#"multi:softprob" --same as softmax, but output a vector of ndata * nclass, which can be further reshaped to ndata, nclass matrix. The result contains predicted probability of each data point belonging to each class.
#"rank:pairwise" --set XGBoost to do ranking task by minimizing the pairwise loss
#submission file
#test_xgb_1 <- model_xgb_1[[2]]
#Adding predictions. submit should have the ID's of row numbers
#submit = data.frame(ID = temp$ID, test_xgb_1$pred_xgb)
#write to output
#write.csv(submit, "./submit.csv", row.names=F)
|
###############################################################################
# Author: Alejandro Camacho
# Info: FIU - CCF
# Date: March 2017
# Version: 1.0
# Used:
# *R version 3.2.3 (2015-12-10)
# *Library: RCurl version 1.95-4.8
#
# Sources:
# - Code:
# * https://github.com/graywh/redcap
#
# - Explanation:
# * http://biostat.mc.vanderbilt.edu/wiki/pub/Main/JoAnnAlvarez/api.pdf
# * https://redcapdev.fiu.edu/api/help/index.php?content=exp_records
#
#
###############################################################################
# Load libraries
library(RCurl)
# FUNCTION: redcapExportRecords (get the data from REDCap)
#
# Parameters:
# - file_name: file name to save the data (with or without complete path)
# - api_url: URL to the API (e.g. https://redcap.fiu.edu/api/)
# - api_token: the API token specific to your REDCap project and username (each token is unique to each user for each project)
# - content: record
# - format: csv, json, xml [default]
# - type: flat [default] - output as one record per row
#
redcapExportRecords <- function(api_url, api_token, instrument, event) {
if (!require('RCurl')) {
stop('RCurl is not installed')
}
mydata <- read.csv(
text=postForm(
# Redcap API required
uri=api_url
, token=api_token
, content='participantList'
, format='csv'
, type='flat'
, instrument=instrument
# Redcap API optional
, event=event#NULL
# RCurl options
,.opts=curlOptions(ssl.verifyhost=2)
)
,stringsAsFactors=FALSE
,na.strings='')
return(mydata)
#write.csv(mydata, file = file_name)
}
redcapExportReport <- function(api_url, api_token, file_name) {
if (!require('RCurl')) {
stop('RCurl is not installed')
}
text <- postForm(
# Redcap API required
uri=api_url
, token=api_token
, content='report'
, format='csv'
, report_id= 1865#4
# RCurl options
,.opts=curlOptions(ssl.verifyhost=2)
)
mydata <- read.csv(
text = text,
stringsAsFactors=FALSE,
na.strings=''
)
write.csv(mydata, file = file_name)
}
# Config: URL
#api_url <- 'https://redcapdev.fiu.edu/api/'
# Config: Tokens for each database
#api_token <- "F66E35FDC22C3BE97BD3C5FCE0F5201E" #CCF Programs Database
#api_token <- "09C6537FF5EAFE92BD74E1AA1B9BEF67"
# Set the working directory
#setwd("D:/dev/CCF/redcap_sms_scheduler/")
#setwd("D:/CCF BI Projects/SSIS Projects/SSIS - ETL Clinic DW Project")
#file_name <- paste(getwd(), paste("ccf_programs_",gsub("[[:punct:][:space:]]","",Sys.time()),".csv",sep=""), sep="/")
#setwd("~/")
if(.Platform$OS.type == "windows") {
file_name <- "ccf_programs_participant_list.csv"
redcapExportReport(api_url,api_token, "phone_number_list.csv")
contact_list <<- read.csv("phone_number_list.csv", stringsAsFactors = F)
randomization <<- read.csv(
file = "random.csv",
stringsAsFactors=FALSE,
na.strings=''
)
} else { #UNIX
file_name <- "~/ccf_programs_participant_list.csv"
redcapExportReport(api_url,api_token, "~/phone_number_list.csv")
contact_list <<- read.csv("~/phone_number_list.csv", stringsAsFactors = F)
randomization <<- read.csv(
file = "~/random.csv",
stringsAsFactors=FALSE,
na.strings=''
)
}
| /get_participant_list.R | no_license | gladysCJ30/redcap_sms_scheduler | R | false | false | 3,335 | r | ###############################################################################
# Author: Alejandro Camacho
# Info: FIU - CCF
# Date: March 2017
# Version: 1.0
# Used:
# *R version 3.2.3 (2015-12-10)
# *Library: RCurl version 1.95-4.8
#
# Sources:
# - Code:
# * https://github.com/graywh/redcap
#
# - Explanation:
# * http://biostat.mc.vanderbilt.edu/wiki/pub/Main/JoAnnAlvarez/api.pdf
# * https://redcapdev.fiu.edu/api/help/index.php?content=exp_records
#
#
###############################################################################
# Load libraries
library(RCurl)
# FUNCTION: redcapExportRecords (get the data from REDCap)
#
# Parameters:
# - file_name: file name to save the data (with or without complete path)
# - api_url: URL to the API (e.g. https://redcap.fiu.edu/api/)
# - api_token: the API token specific to your REDCap project and username (each token is unique to each user for each project)
# - content: record
# - format: csv, json, xml [default]
# - type: flat [default] - output as one record per row
#
redcapExportRecords <- function(api_url, api_token, instrument, event) {
if (!require('RCurl')) {
stop('RCurl is not installed')
}
mydata <- read.csv(
text=postForm(
# Redcap API required
uri=api_url
, token=api_token
, content='participantList'
, format='csv'
, type='flat'
, instrument=instrument
# Redcap API optional
, event=event#NULL
# RCurl options
,.opts=curlOptions(ssl.verifyhost=2)
)
,stringsAsFactors=FALSE
,na.strings='')
return(mydata)
#write.csv(mydata, file = file_name)
}
redcapExportReport <- function(api_url, api_token, file_name) {
if (!require('RCurl')) {
stop('RCurl is not installed')
}
text <- postForm(
# Redcap API required
uri=api_url
, token=api_token
, content='report'
, format='csv'
, report_id= 1865#4
# RCurl options
,.opts=curlOptions(ssl.verifyhost=2)
)
mydata <- read.csv(
text = text,
stringsAsFactors=FALSE,
na.strings=''
)
write.csv(mydata, file = file_name)
}
# Config: URL
#api_url <- 'https://redcapdev.fiu.edu/api/'
# Config: Tokens for each database
#api_token <- "F66E35FDC22C3BE97BD3C5FCE0F5201E" #CCF Programs Database
#api_token <- "09C6537FF5EAFE92BD74E1AA1B9BEF67"
# Set the working directory
#setwd("D:/dev/CCF/redcap_sms_scheduler/")
#setwd("D:/CCF BI Projects/SSIS Projects/SSIS - ETL Clinic DW Project")
#file_name <- paste(getwd(), paste("ccf_programs_",gsub("[[:punct:][:space:]]","",Sys.time()),".csv",sep=""), sep="/")
#setwd("~/")
if(.Platform$OS.type == "windows") {
file_name <- "ccf_programs_participant_list.csv"
redcapExportReport(api_url,api_token, "phone_number_list.csv")
contact_list <<- read.csv("phone_number_list.csv", stringsAsFactors = F)
randomization <<- read.csv(
file = "random.csv",
stringsAsFactors=FALSE,
na.strings=''
)
} else { #UNIX
file_name <- "~/ccf_programs_participant_list.csv"
redcapExportReport(api_url,api_token, "~/phone_number_list.csv")
contact_list <<- read.csv("~/phone_number_list.csv", stringsAsFactors = F)
randomization <<- read.csv(
file = "~/random.csv",
stringsAsFactors=FALSE,
na.strings=''
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attributes_methods.R
\name{getDatabase}
\alias{getDatabase}
\title{Get database associated with an ftmsData object}
\usage{
getDatabase(ftmsObj)
}
\arguments{
\item{ftmsObj}{an object of type ftmsData}
}
\value{
database name
}
\description{
Get the database associated with an object that has been mapped to
the compound or module level.
}
| /man/getDatabase.Rd | permissive | EMSL-Computing/ftmsRanalysis | R | false | true | 419 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/attributes_methods.R
\name{getDatabase}
\alias{getDatabase}
\title{Get database associated with an ftmsData object}
\usage{
getDatabase(ftmsObj)
}
\arguments{
\item{ftmsObj}{an object of type ftmsData}
}
\value{
database name
}
\description{
Get the database associated with an object that has been mapped to
the compound or module level.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HandleBreathTestDatabase.R
\name{AddAllBreathTestRecords}
\alias{AddAllBreathTestRecords}
\title{Reads and saves multiple 13C Breath test records}
\usage{
AddAllBreathTestRecords(path, con)
}
\arguments{
\item{path}{start path for recursive search; can be a vector of
multiple start paths.}
\item{con}{connection to sqlite database}
}
\value{
A dataframe with columns \code{file}, \code{basename},
\code{recordID} (NULL if not saved) and \code{status}
with levels \code{"saved", "skipped", "invalid"}.
}
\description{
Reads all BreathID and Iris/Wagner data records in a directory.
Computes several fit parameters and a fit, and writes these to the database.
Files that are already in the database are skipped. Note only the base name is tested,
so that files with
the same name in different directories are considered identical without testing.
}
\examples{
if (exists("con")) suppressWarnings(dbDisconnect(con))
sqlitePath = tempfile(pattern = "Gastrobase", tmpdir = tempdir(), fileext = ".sqlite")
unlink(sqlitePath)
CreateEmptyBreathTestDatabase(sqlitePath)
con = OpenSqliteConnection(sqlitePath)
path = dirname(
system.file("extdata", "350_20043_0_GER.txt", package = "D13CBreath"))
AddAllBreathTestRecords(path,con)
dbDisconnect(con)
}
| /man/AddAllBreathTestRecords.Rd | no_license | dmenne/d13cbreath | R | false | true | 1,324 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HandleBreathTestDatabase.R
\name{AddAllBreathTestRecords}
\alias{AddAllBreathTestRecords}
\title{Reads and saves multiple 13C Breath test records}
\usage{
AddAllBreathTestRecords(path, con)
}
\arguments{
\item{path}{start path for recursive search; can be a vector of
multiple start paths.}
\item{con}{connection to sqlite database}
}
\value{
A dataframe with columns \code{file}, \code{basename},
\code{recordID} (NULL if not saved) and \code{status}
with levels \code{"saved", "skipped", "invalid"}.
}
\description{
Reads all BreathID and Iris/Wagner data records in a directory.
Computes several fit parameters and a fit, and writes these to the database.
Files that are already in the database are skipped. Note only the base name is tested,
so that files with
the same name in different directories are considered identical without testing.
}
\examples{
if (exists("con")) suppressWarnings(dbDisconnect(con))
sqlitePath = tempfile(pattern = "Gastrobase", tmpdir = tempdir(), fileext = ".sqlite")
unlink(sqlitePath)
CreateEmptyBreathTestDatabase(sqlitePath)
con = OpenSqliteConnection(sqlitePath)
path = dirname(
system.file("extdata", "350_20043_0_GER.txt", package = "D13CBreath"))
AddAllBreathTestRecords(path,con)
dbDisconnect(con)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{write_stan_json}
\alias{write_stan_json}
\title{Write data to a JSON file readable by CmdStan}
\usage{
write_stan_json(data, file)
}
\arguments{
\item{data}{(list) A named list of \R objects.}
\item{file}{(string) The path to where the data file should be written.}
}
\description{
Write data to a JSON file readable by CmdStan
}
\details{
\code{write_stan_json()} performs several conversions before writing the JSON
file:
\itemize{
\item \code{logical} -> \code{integer} (\code{TRUE} -> \code{1}, \code{FALSE} -> \code{0})
\item \code{data.frame} -> \code{matrix} (via \code{\link[=data.matrix]{data.matrix()}})
\item \code{list} -> \code{array}
\item \code{table} -> \code{vector}, \code{matrix}, or \code{array} (depending on dimensions of table)
}
The \code{list} to \code{array} conversion is intended to make it easier to prepare
the data for certain Stan declarations involving arrays:
\itemize{
\item \verb{vector[J] v[K]} (or equivalently \verb{array[K] vector[J] v } as of Stan 2.27)
can be constructed in \R as a list with \code{K} elements where each element a
vector of length \code{J}
\item \verb{matrix[I,J] v[K]} (or equivalently \verb{array[K] matrix[I,J] m } as of Stan
2.27 ) can be constructed in \R as a list with \code{K} elements where each element
an \code{IxJ} matrix
}
These can also be passed in from \R as arrays instead of lists but the list
option is provided for convenience. Unfortunately for arrays with more than
one dimension, e.g., \verb{vector[J] v[K,L]} (or equivalently
\verb{array[K,L] vector[J] v } as of Stan 2.27) it is not possible to use an \R
list and an array must be used instead. For this example the array in \R
should have dimensions \code{KxLxJ}.
}
\examples{
x <- matrix(rnorm(10), 5, 2)
y <- rpois(nrow(x), lambda = 10)
z <- c(TRUE, FALSE)
data <- list(N = nrow(x), K = ncol(x), x = x, y = y, z = z)
# write data to json file
file <- tempfile(fileext = ".json")
write_stan_json(data, file)
# check the contents of the file
cat(readLines(file), sep = "\n")
# demonstrating list to array conversion
# suppose x is declared as `vector[3] x[2]` (or equivalently `array[2] vector[3] x`)
# we can use a list of length 2 where each element is a vector of length 3
data <- list(x = list(1:3, 4:6))
file <- tempfile(fileext = ".json")
write_stan_json(data, file)
cat(readLines(file), sep = "\n")
}
| /man/write_stan_json.Rd | permissive | alyst/cmdstanr | R | false | true | 2,443 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{write_stan_json}
\alias{write_stan_json}
\title{Write data to a JSON file readable by CmdStan}
\usage{
write_stan_json(data, file)
}
\arguments{
\item{data}{(list) A named list of \R objects.}
\item{file}{(string) The path to where the data file should be written.}
}
\description{
Write data to a JSON file readable by CmdStan
}
\details{
\code{write_stan_json()} performs several conversions before writing the JSON
file:
\itemize{
\item \code{logical} -> \code{integer} (\code{TRUE} -> \code{1}, \code{FALSE} -> \code{0})
\item \code{data.frame} -> \code{matrix} (via \code{\link[=data.matrix]{data.matrix()}})
\item \code{list} -> \code{array}
\item \code{table} -> \code{vector}, \code{matrix}, or \code{array} (depending on dimensions of table)
}
The \code{list} to \code{array} conversion is intended to make it easier to prepare
the data for certain Stan declarations involving arrays:
\itemize{
\item \verb{vector[J] v[K]} (or equivalently \verb{array[K] vector[J] v } as of Stan 2.27)
can be constructed in \R as a list with \code{K} elements where each element a
vector of length \code{J}
\item \verb{matrix[I,J] v[K]} (or equivalently \verb{array[K] matrix[I,J] m } as of Stan
2.27 ) can be constructed in \R as a list with \code{K} elements where each element
an \code{IxJ} matrix
}
These can also be passed in from \R as arrays instead of lists but the list
option is provided for convenience. Unfortunately for arrays with more than
one dimension, e.g., \verb{vector[J] v[K,L]} (or equivalently
\verb{array[K,L] vector[J] v } as of Stan 2.27) it is not possible to use an \R
list and an array must be used instead. For this example the array in \R
should have dimensions \code{KxLxJ}.
}
\examples{
x <- matrix(rnorm(10), 5, 2)
y <- rpois(nrow(x), lambda = 10)
z <- c(TRUE, FALSE)
data <- list(N = nrow(x), K = ncol(x), x = x, y = y, z = z)
# write data to json file
file <- tempfile(fileext = ".json")
write_stan_json(data, file)
# check the contents of the file
cat(readLines(file), sep = "\n")
# demonstrating list to array conversion
# suppose x is declared as `vector[3] x[2]` (or equivalently `array[2] vector[3] x`)
# we can use a list of length 2 where each element is a vector of length 3
data <- list(x = list(1:3, 4:6))
file <- tempfile(fileext = ".json")
write_stan_json(data, file)
cat(readLines(file), sep = "\n")
}
|
# The selection function in RVEA
#function [Selection] = F_select(FunctionValue, V, theta0, refV)
# Completed!
F_select <- function(FunctionValue, V, theta0, refV, optimize_func){
# disable this entire segment
# values <- readMat("function_value.mat")
# FunctionValue <- values$FunctionValue
# V <- values$V
# theta0 <- values$theta0
# refV <- values$refV
NM <- size(FunctionValue)
N <- NM[1]
M <- NM[2]
VN <- size(V, 1)
# only name is Zmin, but it depends upon the optimize_func.. can be min or max
Zmin <- optimize_func(FunctionValue ,1)
#Translation
FunctionValue <- (FunctionValue - repmat(Zmin, R(size(FunctionValue,1), 1)) )
#Solutions associattion to reference vectors
div <- repmat(sqrt(Sum(FunctionValue^2,2)), R(1, M))
uFunctionValue <- FunctionValue / div
# Matrix multiplication
cosine <- uFunctionValue %*% t(V) #calculate the cosine values between each solution and each vector
acosine <- acos(cosine)
# call max with argument to give index too..
list[maxc, maxcidx] <- Max(cosine, 2, T)
# class <- data.frame(c = rep(NA, VN)) #classification
class <- as.list(rep(NA, VN))
for (i in 1:N){
# empty at first
# if (is.na(class[maxcidx[[i]], 'c'])){
# class[maxcidx[[i]], 'c'] <- R(i)
# }
# else { # append
# class[maxcidx[[i]], 'c'] <- R(class[maxcidx[[i]], 'c'], i)
# }
if (is.na(class[maxcidx[i]])){
class[maxcidx[i]] <- i
}
else {
class[[maxcidx[i]]] <- c(class[[maxcidx[i]]], i)
}
}
Selection <- NULL
for (k in 1:VN){
if (!is.na(class[k])){
sub <- class[[k]]
subFunctionValue <- FunctionValue[sub,]
#APD calculation
subacosine <- acosine[sub, k]
subacosine <- subacosine/refV[k]# angle normalization
D1 <- sqrt(Sum(subFunctionValue^2,2))# Euclidean distance from solution to the ideal point
D <- D1*(1 + theta0[1]*(subacosine))# APD
list[mind, mindidx] <- Min(D, 1, T)
Selection <- C(Selection, sub[mindidx])
}
}
return(Selection)
}
#return randomly mathced mating pool
# function [MatingPool] <- F_mating(Population)
# Completed !!
F_mating <- function(Population){
ND <- size(Population)
N <- ND[1]
D <- ND[2]
MatingPool <- zeros(N,D)
RandList <- R(sample(N))
MatingPool <- Population[RandList, ]
if (N %% 2 == 1){
MatingPool <- C(MatingPool, MatingPool[1,])
}
return (MatingPool)
}
#Function to generate uniformly distributed weight vectors
# function [N,W] <- F_weight(p1,p2,M)
# Completed !!
F_weight <- function(p1, p2, M){
list[N,W] <- T_weight(p1,M)
if (p2 > 0){
list[N2,W2] <- T_weight(p2,M)
N <- N+N2
W <- C(W, W2*0.5+(1 - 0.5)/(M))
}
return (list(N, W))
}
# function [N,W] <- T_weight(H,M)
T_weight <- function(H, M){
N <- nchoosek(H+M-1,M-1)
Temp <- nchoosek(1:(H+M-1),M-1)-repmat(0:(M-2),nchoosek(H+M-1,M-1),1)-1
W <- zeros(N,M)
W[,1] <- Temp[,1]-0
if ((M-1)>=2)
for (i in 2: (M-1)) {
W[,i] <- Temp[,i]-Temp[,i-1]
}
W[,size(W, 2)] <- H-Temp[,size(Temp, 2)]
W <- W/H
return (list(N, W))
}
# checked!
P_sort <- function(FunctionValue, operation = ""){
# Efficient non-dominated sort on sequential search strategy, TEVC, 2014,
# Xingyi Zhang, Ye Tian, Ran Cheng and Yaochu Jin
# Copyright 2014 BCMA Group, Written by Mr Ye Tian and Prof Xingyi Zhang
# Contact: xyzhanghust@gmail.com
if (operation == 'half'){
kinds <- 2
} else if (operation == 'first'){
kinds <- 3
} else {
kinds <- 1
}
NM <- size(FunctionValue)
N <- NM[1]; M <- NM[2]
MaxFront <- 0
Sorted <- zeros(1,N)
list[FunctionValue,rank] <- Sortrows(FunctionValue)
FrontValue <- zeros(1,N) + Inf
while ((kinds == 1 && sum(Sorted)<N) || (kinds == 2 && sum(Sorted)<N/2) || (kinds == 3 && MaxFront<1)){
MaxFront <- MaxFront + 1
ThisFront <- as.logical(zeros(1, N))
for (i in 1:N){
if (!Sorted[i]){
x <- 0
for (j in 1:N){
if (ThisFront[j]){
x <- 2
for (j2 in 2 : M){
if (FunctionValue[i,j2] < FunctionValue[j,j2]){
x <- 0
break
}
}
if (x == 2){
break
}
}
}
if (x != 2){
ThisFront[i] <- T
Sorted[i] <- T
}
}
}
# Potentially problematic?
# index <- 1 * (1 %in% ThisFront)
FrontValue[rank[ThisFront]] <- MaxFront
}
return(list(FrontValue,MaxFront))
}
# checked!
P_generator <- function(MatingPool,Boundary,Coding,MaxOffspring = 0){
# This function includes the SBX crossover operator and the polynomial
# mutation operator.
ND <- size(MatingPool)
N <- ND[1]
D <- ND[2]
if (MaxOffspring < 1 || MaxOffspring > N){
MaxOffspring <- N
}
if (Coding == 'Real'){
ProC <- 1
ProM <- 1/D
DisC <- 30
DisM <- 20
Offspring <- zeros( N, D)
for (i in seq(1, N, 2) ) {
beta <- zeros(1, D)
miu <- rand(1, D)
beta[miu<=0.5] <- (2*miu[miu<=0.5])^(1/(DisC+1))
beta[miu>0.5] <- (2-2*miu[miu>0.5])^(-1/(DisC+1))
beta <- beta * (-1)^round(rand(1, D))
beta[rand(1, D)>ProC] <- 1
Offspring[i,] <- (MatingPool[i,] +MatingPool[i+1,])/2 + beta * (MatingPool[i,]-MatingPool[i+1,])/2
Offspring[i+1,] <- (MatingPool[i,]+MatingPool[i+1,])/2 - beta * (MatingPool[i,]-MatingPool[i+1,])/2
}
Offspring <- Offspring[1:MaxOffspring,]
if (MaxOffspring == 1){
MaxValue <- Boundary[1,]
MinValue <- Boundary[2,]
} else {
# repmat defined at utils
MaxValue <- repmat(Boundary[1,], MaxOffspring,1);
MinValue <- repmat(Boundary[2,], MaxOffspring,1);
}
k <- rand(MaxOffspring, D)
miu <- rand(MaxOffspring, D)
Temp <- (k<=ProM & miu<0.5)
Offspring[Temp] <- Offspring[Temp]+(MaxValue[Temp]-MinValue[Temp]) * ((2*miu[Temp]+(1-2*miu[Temp])*(1-(Offspring[Temp]-MinValue[Temp])/(MaxValue[Temp]-MinValue[Temp]))^(DisM+1))^(1/(DisM+1))-1)
Temp <- (k<=ProM & miu>=0.5)
Offspring[Temp] <- Offspring[Temp]+(MaxValue[Temp]-MinValue[Temp]) * (1-(2*(1-miu[Temp])+2*(miu[Temp]-0.5)*(1-(MaxValue[Temp]-Offspring[Temp])/(MaxValue[Temp]-MinValue[Temp]))^(DisM+1))^(1/(DisM+1)))
Offspring[Offspring>MaxValue] <- MaxValue[Offspring>MaxValue]
Offspring[Offspring<MinValue] <- MinValue[Offspring<MinValue]
}
return (Offspring)
}
| /RVEA_NEW/F_misc.R | no_license | PraveshKoirala/RVEA_R | R | false | false | 6,413 | r | # The selection function in RVEA
#function [Selection] = F_select(FunctionValue, V, theta0, refV)
# Completed!
F_select <- function(FunctionValue, V, theta0, refV, optimize_func){
# disable this entire segment
# values <- readMat("function_value.mat")
# FunctionValue <- values$FunctionValue
# V <- values$V
# theta0 <- values$theta0
# refV <- values$refV
NM <- size(FunctionValue)
N <- NM[1]
M <- NM[2]
VN <- size(V, 1)
# only name is Zmin, but it depends upon the optimize_func.. can be min or max
Zmin <- optimize_func(FunctionValue ,1)
#Translation
FunctionValue <- (FunctionValue - repmat(Zmin, R(size(FunctionValue,1), 1)) )
#Solutions associattion to reference vectors
div <- repmat(sqrt(Sum(FunctionValue^2,2)), R(1, M))
uFunctionValue <- FunctionValue / div
# Matrix multiplication
cosine <- uFunctionValue %*% t(V) #calculate the cosine values between each solution and each vector
acosine <- acos(cosine)
# call max with argument to give index too..
list[maxc, maxcidx] <- Max(cosine, 2, T)
# class <- data.frame(c = rep(NA, VN)) #classification
class <- as.list(rep(NA, VN))
for (i in 1:N){
# empty at first
# if (is.na(class[maxcidx[[i]], 'c'])){
# class[maxcidx[[i]], 'c'] <- R(i)
# }
# else { # append
# class[maxcidx[[i]], 'c'] <- R(class[maxcidx[[i]], 'c'], i)
# }
if (is.na(class[maxcidx[i]])){
class[maxcidx[i]] <- i
}
else {
class[[maxcidx[i]]] <- c(class[[maxcidx[i]]], i)
}
}
Selection <- NULL
for (k in 1:VN){
if (!is.na(class[k])){
sub <- class[[k]]
subFunctionValue <- FunctionValue[sub,]
#APD calculation
subacosine <- acosine[sub, k]
subacosine <- subacosine/refV[k]# angle normalization
D1 <- sqrt(Sum(subFunctionValue^2,2))# Euclidean distance from solution to the ideal point
D <- D1*(1 + theta0[1]*(subacosine))# APD
list[mind, mindidx] <- Min(D, 1, T)
Selection <- C(Selection, sub[mindidx])
}
}
return(Selection)
}
#return randomly mathced mating pool
# function [MatingPool] <- F_mating(Population)
# Completed !!
F_mating <- function(Population){
ND <- size(Population)
N <- ND[1]
D <- ND[2]
MatingPool <- zeros(N,D)
RandList <- R(sample(N))
MatingPool <- Population[RandList, ]
if (N %% 2 == 1){
MatingPool <- C(MatingPool, MatingPool[1,])
}
return (MatingPool)
}
#Function to generate uniformly distributed weight vectors
# function [N,W] <- F_weight(p1,p2,M)
# Completed !!
F_weight <- function(p1, p2, M){
list[N,W] <- T_weight(p1,M)
if (p2 > 0){
list[N2,W2] <- T_weight(p2,M)
N <- N+N2
W <- C(W, W2*0.5+(1 - 0.5)/(M))
}
return (list(N, W))
}
# function [N,W] <- T_weight(H,M)
T_weight <- function(H, M){
N <- nchoosek(H+M-1,M-1)
Temp <- nchoosek(1:(H+M-1),M-1)-repmat(0:(M-2),nchoosek(H+M-1,M-1),1)-1
W <- zeros(N,M)
W[,1] <- Temp[,1]-0
if ((M-1)>=2)
for (i in 2: (M-1)) {
W[,i] <- Temp[,i]-Temp[,i-1]
}
W[,size(W, 2)] <- H-Temp[,size(Temp, 2)]
W <- W/H
return (list(N, W))
}
# checked!
P_sort <- function(FunctionValue, operation = ""){
# Efficient non-dominated sort on sequential search strategy, TEVC, 2014,
# Xingyi Zhang, Ye Tian, Ran Cheng and Yaochu Jin
# Copyright 2014 BCMA Group, Written by Mr Ye Tian and Prof Xingyi Zhang
# Contact: xyzhanghust@gmail.com
if (operation == 'half'){
kinds <- 2
} else if (operation == 'first'){
kinds <- 3
} else {
kinds <- 1
}
NM <- size(FunctionValue)
N <- NM[1]; M <- NM[2]
MaxFront <- 0
Sorted <- zeros(1,N)
list[FunctionValue,rank] <- Sortrows(FunctionValue)
FrontValue <- zeros(1,N) + Inf
while ((kinds == 1 && sum(Sorted)<N) || (kinds == 2 && sum(Sorted)<N/2) || (kinds == 3 && MaxFront<1)){
MaxFront <- MaxFront + 1
ThisFront <- as.logical(zeros(1, N))
for (i in 1:N){
if (!Sorted[i]){
x <- 0
for (j in 1:N){
if (ThisFront[j]){
x <- 2
for (j2 in 2 : M){
if (FunctionValue[i,j2] < FunctionValue[j,j2]){
x <- 0
break
}
}
if (x == 2){
break
}
}
}
if (x != 2){
ThisFront[i] <- T
Sorted[i] <- T
}
}
}
# Potentially problematic?
# index <- 1 * (1 %in% ThisFront)
FrontValue[rank[ThisFront]] <- MaxFront
}
return(list(FrontValue,MaxFront))
}
# checked!
P_generator <- function(MatingPool,Boundary,Coding,MaxOffspring = 0){
# This function includes the SBX crossover operator and the polynomial
# mutation operator.
ND <- size(MatingPool)
N <- ND[1]
D <- ND[2]
if (MaxOffspring < 1 || MaxOffspring > N){
MaxOffspring <- N
}
if (Coding == 'Real'){
ProC <- 1
ProM <- 1/D
DisC <- 30
DisM <- 20
Offspring <- zeros( N, D)
for (i in seq(1, N, 2) ) {
beta <- zeros(1, D)
miu <- rand(1, D)
beta[miu<=0.5] <- (2*miu[miu<=0.5])^(1/(DisC+1))
beta[miu>0.5] <- (2-2*miu[miu>0.5])^(-1/(DisC+1))
beta <- beta * (-1)^round(rand(1, D))
beta[rand(1, D)>ProC] <- 1
Offspring[i,] <- (MatingPool[i,] +MatingPool[i+1,])/2 + beta * (MatingPool[i,]-MatingPool[i+1,])/2
Offspring[i+1,] <- (MatingPool[i,]+MatingPool[i+1,])/2 - beta * (MatingPool[i,]-MatingPool[i+1,])/2
}
Offspring <- Offspring[1:MaxOffspring,]
if (MaxOffspring == 1){
MaxValue <- Boundary[1,]
MinValue <- Boundary[2,]
} else {
# repmat defined at utils
MaxValue <- repmat(Boundary[1,], MaxOffspring,1);
MinValue <- repmat(Boundary[2,], MaxOffspring,1);
}
k <- rand(MaxOffspring, D)
miu <- rand(MaxOffspring, D)
Temp <- (k<=ProM & miu<0.5)
Offspring[Temp] <- Offspring[Temp]+(MaxValue[Temp]-MinValue[Temp]) * ((2*miu[Temp]+(1-2*miu[Temp])*(1-(Offspring[Temp]-MinValue[Temp])/(MaxValue[Temp]-MinValue[Temp]))^(DisM+1))^(1/(DisM+1))-1)
Temp <- (k<=ProM & miu>=0.5)
Offspring[Temp] <- Offspring[Temp]+(MaxValue[Temp]-MinValue[Temp]) * (1-(2*(1-miu[Temp])+2*(miu[Temp]-0.5)*(1-(MaxValue[Temp]-Offspring[Temp])/(MaxValue[Temp]-MinValue[Temp]))^(DisM+1))^(1/(DisM+1)))
Offspring[Offspring>MaxValue] <- MaxValue[Offspring>MaxValue]
Offspring[Offspring<MinValue] <- MinValue[Offspring<MinValue]
}
return (Offspring)
}
|
alpha <- 1.1
gamma <- 0.87 | /RabinovichFabrikant/Input/Classic/Params.R | permissive | CyclicDynamicalSystems/DynamicalSystemsPortraits | R | false | false | 26 | r | alpha <- 1.1
gamma <- 0.87 |
# Solution file for BIS 244 Assignment 02, Fall 2020
# Clear out Console and Environment
rm(list=ls(all=TRUE))
cat("\014")
# Let's read in the us-counties file from covid-19-data
# We'll use packages readr, which is part of the tidyverse,
# and here
library(tidyverse)
library(here)
# Reading the us-states.csv in as a data frame
STATES <- read_csv(here("covid-19-data","us-states.csv"))
# Examining the data
# View(STATES)
# Using filter()to get just PA
PA <- filter(STATES, state=="Pennsylvania")
# View(PA)
# Set n to legth of data set
n <- length(PA$date)
# Initialize new variables in data frame
PA$incr_deaths <- 0
PA$incr_cases <- 0
PA$incr_deaths[1] <- PA$deaths[1]
PA$incr_cases[1] <- PA$cases[1]
# Calculate values for incremental cases and deatchs
for (i in 2:n) {
PA$incr_cases[i] <- PA$cases[i] - PA$cases[i-1]
PA$incr_deaths[i] <- PA$deaths[i] - PA$deaths[i-1]
}
# Calculating sum of all adjusted deaths as checksum
sd(PA$incr_cases)
| /BIS 244 - Assignment 02 - Solution.R | no_license | rstudent2019/BIS-244-Fall-2021 | R | false | false | 1,004 | r | # Solution file for BIS 244 Assignment 02, Fall 2020
# Clear out Console and Environment
rm(list=ls(all=TRUE))
cat("\014")
# Let's read in the us-counties file from covid-19-data
# We'll use packages readr, which is part of the tidyverse,
# and here
library(tidyverse)
library(here)
# Reading the us-states.csv in as a data frame
STATES <- read_csv(here("covid-19-data","us-states.csv"))
# Examining the data
# View(STATES)
# Using filter()to get just PA
PA <- filter(STATES, state=="Pennsylvania")
# View(PA)
# Set n to legth of data set
n <- length(PA$date)
# Initialize new variables in data frame
PA$incr_deaths <- 0
PA$incr_cases <- 0
PA$incr_deaths[1] <- PA$deaths[1]
PA$incr_cases[1] <- PA$cases[1]
# Calculate values for incremental cases and deatchs
for (i in 2:n) {
PA$incr_cases[i] <- PA$cases[i] - PA$cases[i-1]
PA$incr_deaths[i] <- PA$deaths[i] - PA$deaths[i-1]
}
# Calculating sum of all adjusted deaths as checksum
sd(PA$incr_cases)
|
#
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
##################################################################################################
# Saralamba et al. Intrahost modeling of artemisinin resistance
# in Plasmodium falciparum PNAS 397-402,
# doi: 10.1073/pnas.1006113108
#
# R Version adapted from http://demonstrations.wolfram.com/AModelOfPlasmodiumFalciparumPopulationDynamicsInAPatientDuri/
# by sompob@tropmedres.ac
#
#################################################################################################
Model.SompobPNAS2011 <- function(initn0,mu,sigma,pmf,KZ=c(6,26,27,38,39,44),concpars,everyH=24, ndrug=7,
gamma,ec50,emax,Tconst,runmax,outform=0,npoint=NULL,...){
# (initN0, mu, sd, pmf, KZ double rb,double re,double tb,double te,double sb,double se,
# double xm,double ym,double ke,double everyH,double ndrug,double gammar,
# double gammat,double gammas,double ec50r,double ec50t,double ec50s,
# double emaxr,double emaxt,double emaxs,double T,double npoint)
# read inputs
if(is.vector(KZ)&&length(KZ)==6){
rb<-KZ[1]
re<-KZ[2]
tb<-KZ[3]
te<-KZ[4]
sb<-KZ[5]
se<-KZ[6]
}
else{
stop("Please check your Kill Zones(KZ). ex c(6,26,27,38,39,44)")
}
if(is.vector(gamma) && length(gamma)==3){
gammar<-gamma[1]
gammat<-gamma[2]
gammas<-gamma[3]
}else{
stop("Please check the slopes of the EC curves.")
}
if(is.vector(ec50)&&length(ec50)==3){
ec50r<-ec50[1]
ec50t<-ec50[2]
ec50s<-ec50[3]
}else{
stop("Please check the EC50 vector.")
}
if(is.vector(emax)&&length(emax)==3){
emaxr<-emax[1]
emaxt<-emax[2]
emaxs<-emax[3]
}else{
stop("Please check the Emax vector.")
}
if(is.vector(concpars)&&length(concpars)==3){
xm<-concpars[1]
ym<-concpars[2]
ke<-concpars[3]
}else{
stop("Please check the drug concentration vector.")
}
###load dll
#clrLoadAssembly('TreatWithArtesunate.dll')
#loaded<-clrGetLoadedAssemblies()
#print(loaded)
# if npoint = NULL
#outform = 0 ---> {time, log10(circulating)}
#outform = 1 ---> {{agedist at t 0},{age dist at t 1},...}
# if npoint != NULL
# outform --> {time, log10(circulating)}
if(is.null(npoint)){
clrObj<-clrCallStatic("TreatWithArtesunate.Models","SompobPNAS2011",as.double(10^(initn0)),as.double(mu),
as.double(sigma),as.double(pmf),as.integer(rb),as.integer(re),as.integer(tb),
as.integer(te),as.integer(sb),as.integer(se),as.double(xm),as.double(ym),as.double(ke),
as.integer(everyH),as.integer(ndrug),as.double(gammar),as.double(gammat),as.double(gammas),
as.double(ec50r),as.double(ec50t),as.double(ec50s),
as.double(emaxr),as.double(emaxt),as.double(emaxs),as.double(Tconst),as.integer(runmax),as.integer(outform))
outvec<-clrGet(clrObj,"Values")
if(outform==0){
outdata <- matrix(outvec, ncol=2)
colnames(outdata)<-c("observed time","log10(circulating parasites)")
}else if(outform==1){
outdata <- matrix(outvec,ncol=48)
}
}else if(!is.null(npoint)){
clrObj<-clrCallStatic("TreatWithArtesunate.Models","SompobPNAS2011",as.double(10^(initn0)),as.double(mu),
as.double(sigma),as.double(pmf),as.integer(rb),as.integer(re),as.integer(tb),
as.integer(te),as.integer(sb),as.integer(se),as.double(xm),as.double(ym),as.double(ke),
as.integer(everyH),as.integer(ndrug),as.double(gammar),as.double(gammat),as.double(gammas),
as.double(ec50r),as.double(ec50t),as.double(ec50s),
as.double(emaxr),as.double(emaxt),as.double(emaxs),as.double(Tconst),as.integer(npoint))
outvec<-clrGet(clrObj,"Values")
outdata <- matrix(outvec,ncol=2)
colnames(outdata)<-c("observed time","log10(circulating parasites)")
}
return(outdata)
}
| /R/SompobPNAS2011.R | no_license | slphyx/RIndivaria | R | false | false | 4,157 | r | #
# Build and Reload Package: 'Ctrl + Shift + B'
# Check Package: 'Ctrl + Shift + E'
# Test Package: 'Ctrl + Shift + T'
##################################################################################################
# Saralamba et al. Intrahost modeling of artemisinin resistance
# in Plasmodium falciparum PNAS 397-402,
# doi: 10.1073/pnas.1006113108
#
# R Version adapted from http://demonstrations.wolfram.com/AModelOfPlasmodiumFalciparumPopulationDynamicsInAPatientDuri/
# by sompob@tropmedres.ac
#
#################################################################################################
Model.SompobPNAS2011 <- function(initn0,mu,sigma,pmf,KZ=c(6,26,27,38,39,44),concpars,everyH=24, ndrug=7,
gamma,ec50,emax,Tconst,runmax,outform=0,npoint=NULL,...){
# (initN0, mu, sd, pmf, KZ double rb,double re,double tb,double te,double sb,double se,
# double xm,double ym,double ke,double everyH,double ndrug,double gammar,
# double gammat,double gammas,double ec50r,double ec50t,double ec50s,
# double emaxr,double emaxt,double emaxs,double T,double npoint)
# read inputs
if(is.vector(KZ)&&length(KZ)==6){
rb<-KZ[1]
re<-KZ[2]
tb<-KZ[3]
te<-KZ[4]
sb<-KZ[5]
se<-KZ[6]
}
else{
stop("Please check your Kill Zones(KZ). ex c(6,26,27,38,39,44)")
}
if(is.vector(gamma) && length(gamma)==3){
gammar<-gamma[1]
gammat<-gamma[2]
gammas<-gamma[3]
}else{
stop("Please check the slopes of the EC curves.")
}
if(is.vector(ec50)&&length(ec50)==3){
ec50r<-ec50[1]
ec50t<-ec50[2]
ec50s<-ec50[3]
}else{
stop("Please check the EC50 vector.")
}
if(is.vector(emax)&&length(emax)==3){
emaxr<-emax[1]
emaxt<-emax[2]
emaxs<-emax[3]
}else{
stop("Please check the Emax vector.")
}
if(is.vector(concpars)&&length(concpars)==3){
xm<-concpars[1]
ym<-concpars[2]
ke<-concpars[3]
}else{
stop("Please check the drug concentration vector.")
}
###load dll
#clrLoadAssembly('TreatWithArtesunate.dll')
#loaded<-clrGetLoadedAssemblies()
#print(loaded)
# if npoint = NULL
#outform = 0 ---> {time, log10(circulating)}
#outform = 1 ---> {{agedist at t 0},{age dist at t 1},...}
# if npoint != NULL
# outform --> {time, log10(circulating)}
if(is.null(npoint)){
clrObj<-clrCallStatic("TreatWithArtesunate.Models","SompobPNAS2011",as.double(10^(initn0)),as.double(mu),
as.double(sigma),as.double(pmf),as.integer(rb),as.integer(re),as.integer(tb),
as.integer(te),as.integer(sb),as.integer(se),as.double(xm),as.double(ym),as.double(ke),
as.integer(everyH),as.integer(ndrug),as.double(gammar),as.double(gammat),as.double(gammas),
as.double(ec50r),as.double(ec50t),as.double(ec50s),
as.double(emaxr),as.double(emaxt),as.double(emaxs),as.double(Tconst),as.integer(runmax),as.integer(outform))
outvec<-clrGet(clrObj,"Values")
if(outform==0){
outdata <- matrix(outvec, ncol=2)
colnames(outdata)<-c("observed time","log10(circulating parasites)")
}else if(outform==1){
outdata <- matrix(outvec,ncol=48)
}
}else if(!is.null(npoint)){
clrObj<-clrCallStatic("TreatWithArtesunate.Models","SompobPNAS2011",as.double(10^(initn0)),as.double(mu),
as.double(sigma),as.double(pmf),as.integer(rb),as.integer(re),as.integer(tb),
as.integer(te),as.integer(sb),as.integer(se),as.double(xm),as.double(ym),as.double(ke),
as.integer(everyH),as.integer(ndrug),as.double(gammar),as.double(gammat),as.double(gammas),
as.double(ec50r),as.double(ec50t),as.double(ec50s),
as.double(emaxr),as.double(emaxt),as.double(emaxs),as.double(Tconst),as.integer(npoint))
outvec<-clrGet(clrObj,"Values")
outdata <- matrix(outvec,ncol=2)
colnames(outdata)<-c("observed time","log10(circulating parasites)")
}
return(outdata)
}
|
#' @title GameDayPlays
#'
#' @description Contains the output from getData()
#'
#' @exportClass GameDayPlays
#' @examples showClass("GameDayPlays")
setClass("GameDayPlays", contains = "data.frame")
#' @title panel.baseball
#'
#' @description Visualize Balls in Play
#'
#' @details A convenience function for drawing a generic baseball field using a Cartesian coordinate
#' system scaled in feet with home plate at the origin.
#'
#'
#' @return nothing
#'
#' @export
#' @examples
#'
#' ds = getData()
#' plot(ds)
panel.baseball <- function () {
bgcol = "darkgray"
panel.segments(0, 0, -400, 400, col=bgcol) # LF line
panel.segments(0, 0, 400, 400, col=bgcol) # RF line
bw = 2
# midpoint is at (0, 127.27)
base2.y = sqrt(90^2 + 90^2)
panel.polygon(c(-bw, 0, bw, 0), c(base2.y, base2.y - bw, base2.y, base2.y + bw), col=bgcol)
# back corner is 90' away on the line
base1.x = 90 * cos(pi/4)
base1.y = 90 * sin(pi/4)
panel.polygon(c(base1.x, base1.x - bw, base1.x - 2*bw, base1.x - bw), c(base1.y, base1.y - bw, base1.y, base1.y + bw), col=bgcol)
# back corner is 90' away on the line
base3.x = 90 * cos(3*pi/4)
panel.polygon(c(base3.x, base3.x + bw, base3.x + 2*bw, base3.x + bw), c(base1.y, base1.y - bw, base1.y, base1.y + bw), col=bgcol)
# infield cutout is 95' from the pitcher's mound
panel.curve(60.5 + sqrt(95^2 - x^2), from=base3.x - 26, to=base1.x + 26, col=bgcol)
# pitching rubber
panel.rect(-bw, 60.5 - bw/2, bw, 60.5 + bw/2, col=bgcol)
# home plate
panel.polygon(c(0, -8.5/12, -8.5/12, 8.5/12, 8.5/12), c(0, 8.5/12, 17/12, 17/12, 8.5/12), col=bgcol)
# distance curves
distances = seq(from=200, to=500, by = 100)
for (i in 1:length(distances)) {
d = distances[i]
panel.curve(sqrt(d^2 - x^2), from= d * cos(3*pi/4), to=d * cos(pi/4), col=bgcol)
}
}
#' @title plot.GameDayPlays
#'
#' @description Visualize Balls in Play
#'
#' @details Plots the balls in play from GameDay data. This function will plot (x,y)-coordinates
#' with a generic baseball field plotted in the background. Other lattice options can be passed
#' to xyplot().
#'
#' @param data A GameDayPlays set with fields "our.x" and "our.y"
#' @param batterName A character string containing the last name of a batter
#' @param pitcherName A character string containing the last name of a pitcher
#' @param pch A numeric
#'
#' @return an xyplot()
#'
#' @export
#' @examples
#'
#' ds = getData()
#' plot(ds)
plot.GameDayPlays = function (data, batterName=NULL,pitcherName=NULL,event=NULL,pch=1) {
require(mosaic)
xy.fields = c("our.x", "our.y")
if (!length(intersect(xy.fields, names(data))) == length(xy.fields)) {
stop("(x,y) coordinate locations not found.")
}
#Code for filtering base on batter, pitcher and/or event type.
if (!is.null(batterName)) {
data = data[data$batterName==batterName,]
}
if (!is.null(pitcherName)) {
data = data[data$pitcherName==pitcherName,]
}
if (!is.null(event)) {
data = data[data$event %in% event,]
}
ds <- filter(data, !is.na(our.y) & !is.na(our.x))
ds$event <- factor(ds$event)
plot = xyplot(our.y ~ our.x, groups=event, data=ds,pch=pch
, panel = function(x,y, ...) {
panel.baseball()
panel.xyplot(x,y, alpha = 0.3, ...)
}
, auto.key=list(columns=4)
, xlim = c(-350, 350), ylim = c(-20, 525)
, xlab = "Horizontal Distance from Home Plate (ft.)"
, ylab = "Vertical Distance from Home Plate (ft.)"
)
return(plot)
}
#' @title summary.GameDayPlays
#'
#' @description Summarize MLBAM data
#'
#' @details Prints information about the contents of an GameDayPlays data set.
#'
#' @param data A GameDayPlays data set
#'
#' @return nothing
#'
#' @export
#' @examples
#'
#' ds = getData()
#' summary(ds)
summary.GameDayPlays = function (data) {
gIds = sort(unique(data$gameId))
message(paste("...Contains data from", length(gIds), "games"))
message(paste("...from", gIds[1], "to", gIds[length(gIds)]))
summary.data.frame(data)
}
#' @title tabulate.GameDayPlays
#'
#' @description Summarize MLBAM data
#'
#' @details Tabulates Lahman-style statistics by team for the contents of a GameDayPlays data set.
#'
#' @param data A GameDayPlays set
#'
#' @return A data.frame of seasonal totals for each team
#'
#' @export tabulate.GameDayPlays
#' @examples
#'
#' ds = getData()
#' tabulate(ds)
tabulate = function (data) UseMethod("tabulate")
tabulate.GameDayPlays = function (data) {
# data$bat_team = with(data, ifelse(half == "top", as.character(away_team), as.character(home_team)))
# data <- mutate(data, yearId = as.numeric(substr(gameId, start=5, stop=8)))
# teams = plyr::ddply(data, ~ yearId + bat_team, summarise, G = length(unique(gameId))
# , PA = sum(isPA), AB = sum(isAB), R = sum(runsOnPlay), H = sum(isHit)
# , HR = sum(event == "Home Run")
# , BB = sum(event %in% c("Walk", "Intent Walk"))
# , K = sum(event %in% c("Strikeout", "Strikeout - DP"))
# , BA = sum(isHit) / sum(isAB)
# , OBP = sum(isHit | event %in% c("Walk", "Intent Walk", "Hit By Pitch")) / sum(isPA & !event %in% c("Sac Bunt", "Sacrifice Bunt DP"))
# , SLG = (sum(event == "Single") + 2*sum(event == "Double") + 3*sum(event == "Triple") + 4*sum(event == "Home Run") ) / sum(isAB)
# )
data %>%
mutate(bat_team = ifelse(half == "top", as.character(away_team), as.character(home_team))) %>%
mutate(yearId = as.numeric(substr(gameId, start=5, stop=8))) %>%
group_by(yearId, bat_team) %>%
summarise(G = length(unique(gameId))
, PA = sum(isPA)
, AB = sum(isAB)
, R = sum(runsOnPlay)
, H = sum(isHit)
, HR = sum(event == "Home Run")
, BB = sum(event %in% c("Walk", "Intent Walk"))
, K = sum(event %in% c("Strikeout", "Strikeout - DP"))
, BA = sum(isHit) / sum(isAB)
, OBP = sum(isHit | event %in% c("Walk", "Intent Walk", "Hit By Pitch")) / sum(isPA & !event %in% c("Sac Bunt", "Sacrifice Bunt DP"))
, SLG = (sum(event == "Single") + 2*sum(event == "Double") + 3*sum(event == "Triple") + 4*sum(event == "Home Run") ) / sum(isAB)
)
}
#' @title crosscheck.GameDayPlays
#'
#' @description Cross-check the accuracy of the GameDay data with the Lahman database
#'
#' @details Cross-checks summary statistics with the Lahman database.
#'
#' @param data An MLBAM data set
#'
#' @return The ratio of the Frobenius norm of the matrix of differences to the Frobenius norm of the matrix
#' defined by the Lahman database.
#'
#' @export crosscheck.GameDayPlays
#' @examples
#'
#' ds = getData()
#' crosscheck(ds)
#'
#'
crosscheck = function (data) UseMethod("crosscheck")
crosscheck.GameDayPlays = function (data) {
require(Lahman)
teams = tabulate(data)
lteams <- Batting %>%
group_by(yearID, teamID) %>%
summarise(PA = sum(AB + BB + HBP + SH + SF, na.rm=TRUE)
, AB = sum(AB, na.rm=TRUE)
, R = sum(R, na.rm=TRUE)
, H = sum(H, na.rm=TRUE)
, HR = sum(HR, na.rm=TRUE)
, BB = sum(BB, na.rm=TRUE)
, K = sum(SO, na.rm=TRUE)
, BA = sum(H, na.rm=TRUE) / sum(AB, na.rm=TRUE)
, OBP = sum(H + BB + HBP, na.rm=TRUE) / sum(AB + BB + HBP + SF, na.rm=TRUE)
, SLG = sum(H + X2B + X3B + HR, na.rm=TRUE) / sum(AB, na.rm=TRUE))
lteams = merge(x=lteams, y=Teams[,c("yearID", "teamID", "G")], by=c("yearID", "teamID"))
lteams <- mutate(lteams, teamId = tolower(teamID))
lteams <- mutate(lteams, teamId = ifelse(teamId == "laa", "ana", as.character(teamId)))
match = merge(x=teams, y=lteams, by.x=c("yearId", "bat_team"), by.y=c("yearID", "teamId"), all.x=TRUE)
# move this out of here eventually
# require(xtable)
# x = xtable(match[,c("bat_team", "G.x", "PA.x", "AB.x", "R.x", "H.x", "HR.x", "BB.x", "K.x", "G.y", "PA.y", "AB.y", "R.y", "H.y", "HR.y", "BB.y", "K.y")]
# , caption=c("Cross-check between MLBAM data (left) and Lahman data (right), 2012"), label="tab:crosscheck"
# , align = rep("c", 18))
# print(x, include.rownames=FALSE)
A = as.matrix(match[,c("G.x", "PA.x", "AB.x", "R.x", "H.x", "HR.x", "BB.x", "K.x")])
B = as.matrix(match[,c("G.y", "PA.y", "AB.y", "R.y", "H.y", "HR.y", "BB.y", "K.y")])
return(norm(A - B, "F") / norm(B, "F"))
}
#' @title shakeWAR
#'
#' @description resample a data.frame to obtain variance estimate for WAR
#'
#' @details Resamples the rows of an MLBAM data set
#'
#' @param data An MLBAM data.frame
#' @param resample An element of \code{c("plays", "models", "both")}
#' @param N the number of resamples (default 5000)
#'
#' @return a data.frame with RAA values
#'
#' @export shakeWAR
#' @export shakeWAR.GameDayPlays
#' @examples
#'
#' ds = getData()
#' res = shakeWAR(ds, resample="plays", N=10)
#' summary(res)
#'
shakeWAR = function (data, resample = "plays", N = 10, ...) UseMethod("shakeWAR")
shakeWAR.GameDayPlays = function (data, resample = "plays", N = 10, ...) {
require(mosaic)
if (resample == "both") {
# resample the actual plays AND rebuild the models each time
# this captures both measurement error and sampling error
bstrap = do(N) * getWAR(makeWAR(resample(data), low.memory=TRUE)$openWAR)
} else {
ext = makeWAR(data, verbose=FALSE, low.memory=TRUE)
# Keep track of the original data
reality = data
# Keep track of the models built on the original data
reality.models = ext$models
# Keep track of the original RAA values
reality.raa = ext$openWAR
if (resample == "plays") {
# assume the models are fixed, and resample the RAA values
# this captures the sampling error
# supposedly the performance of do() is really bad
bstrap = do(N) * getWAR(resample(reality.raa), verbose=FALSE)
# use replicate() instead
# bstrap = rdply(N, getWAR(resample(reality.raa), verbose=FALSE))
# class(bstrap) = c("do.openWARPlayers", class(bstrap))
# } else {
# # to isolate the measurement error, use the models we built on the resampled rows
# # but apply them exclusively to the real data
# ext.list = lapply(sims$models.used, makeWAR, data = reality, verbose=FALSE)
# raa.list = lapply(ext.list, "[[", "openWAR")
# war.list = t(lapply(raa.list, getWAR))
# bstrap = do.call("rbind", war.list)
# class(bstrap) = c("do.openWARPlayers", class(bstrap))
}
}
# bstrap should be a data.frame of class "do.openWARPlayers"
class(bstrap) <- c("do.openWARPlayers", "data.frame")
# with roughly N * M rows, where M is the numbers of players
return(bstrap)
}
| /R/GameDayPlays.R | no_license | djmosfett/openWAR | R | false | false | 10,886 | r | #' @title GameDayPlays
#'
#' @description Contains the output from getData()
#'
#' @exportClass GameDayPlays
#' @examples showClass("GameDayPlays")
setClass("GameDayPlays", contains = "data.frame")
#' @title panel.baseball
#'
#' @description Visualize Balls in Play
#'
#' @details A convenience function for drawing a generic baseball field using a Cartesian coordinate
#' system scaled in feet with home plate at the origin.
#'
#'
#' @return nothing
#'
#' @export
#' @examples
#'
#' ds = getData()
#' plot(ds)
panel.baseball <- function () {
bgcol = "darkgray"
panel.segments(0, 0, -400, 400, col=bgcol) # LF line
panel.segments(0, 0, 400, 400, col=bgcol) # RF line
bw = 2
# midpoint is at (0, 127.27)
base2.y = sqrt(90^2 + 90^2)
panel.polygon(c(-bw, 0, bw, 0), c(base2.y, base2.y - bw, base2.y, base2.y + bw), col=bgcol)
# back corner is 90' away on the line
base1.x = 90 * cos(pi/4)
base1.y = 90 * sin(pi/4)
panel.polygon(c(base1.x, base1.x - bw, base1.x - 2*bw, base1.x - bw), c(base1.y, base1.y - bw, base1.y, base1.y + bw), col=bgcol)
# back corner is 90' away on the line
base3.x = 90 * cos(3*pi/4)
panel.polygon(c(base3.x, base3.x + bw, base3.x + 2*bw, base3.x + bw), c(base1.y, base1.y - bw, base1.y, base1.y + bw), col=bgcol)
# infield cutout is 95' from the pitcher's mound
panel.curve(60.5 + sqrt(95^2 - x^2), from=base3.x - 26, to=base1.x + 26, col=bgcol)
# pitching rubber
panel.rect(-bw, 60.5 - bw/2, bw, 60.5 + bw/2, col=bgcol)
# home plate
panel.polygon(c(0, -8.5/12, -8.5/12, 8.5/12, 8.5/12), c(0, 8.5/12, 17/12, 17/12, 8.5/12), col=bgcol)
# distance curves
distances = seq(from=200, to=500, by = 100)
for (i in 1:length(distances)) {
d = distances[i]
panel.curve(sqrt(d^2 - x^2), from= d * cos(3*pi/4), to=d * cos(pi/4), col=bgcol)
}
}
#' @title plot.GameDayPlays
#'
#' @description Visualize Balls in Play
#'
#' @details Plots the balls in play from GameDay data. This function will plot (x,y)-coordinates
#' with a generic baseball field plotted in the background. Other lattice options can be passed
#' to xyplot().
#'
#' @param data A GameDayPlays set with fields "our.x" and "our.y"
#' @param batterName A character string containing the last name of a batter
#' @param pitcherName A character string containing the last name of a pitcher
#' @param pch A numeric
#'
#' @return an xyplot()
#'
#' @export
#' @examples
#'
#' ds = getData()
#' plot(ds)
plot.GameDayPlays = function (data, batterName=NULL,pitcherName=NULL,event=NULL,pch=1) {
require(mosaic)
xy.fields = c("our.x", "our.y")
if (!length(intersect(xy.fields, names(data))) == length(xy.fields)) {
stop("(x,y) coordinate locations not found.")
}
#Code for filtering base on batter, pitcher and/or event type.
if (!is.null(batterName)) {
data = data[data$batterName==batterName,]
}
if (!is.null(pitcherName)) {
data = data[data$pitcherName==pitcherName,]
}
if (!is.null(event)) {
data = data[data$event %in% event,]
}
ds <- filter(data, !is.na(our.y) & !is.na(our.x))
ds$event <- factor(ds$event)
plot = xyplot(our.y ~ our.x, groups=event, data=ds,pch=pch
, panel = function(x,y, ...) {
panel.baseball()
panel.xyplot(x,y, alpha = 0.3, ...)
}
, auto.key=list(columns=4)
, xlim = c(-350, 350), ylim = c(-20, 525)
, xlab = "Horizontal Distance from Home Plate (ft.)"
, ylab = "Vertical Distance from Home Plate (ft.)"
)
return(plot)
}
#' @title summary.GameDayPlays
#'
#' @description Summarize MLBAM data
#'
#' @details Prints information about the contents of an GameDayPlays data set.
#'
#' @param data A GameDayPlays data set
#'
#' @return nothing
#'
#' @export
#' @examples
#'
#' ds = getData()
#' summary(ds)
summary.GameDayPlays = function (data) {
gIds = sort(unique(data$gameId))
message(paste("...Contains data from", length(gIds), "games"))
message(paste("...from", gIds[1], "to", gIds[length(gIds)]))
summary.data.frame(data)
}
#' @title tabulate.GameDayPlays
#'
#' @description Summarize MLBAM data
#'
#' @details Tabulates Lahman-style statistics by team for the contents of a GameDayPlays data set.
#'
#' @param data A GameDayPlays set
#'
#' @return A data.frame of seasonal totals for each team
#'
#' @export tabulate.GameDayPlays
#' @examples
#'
#' ds = getData()
#' tabulate(ds)
tabulate = function (data) UseMethod("tabulate")
tabulate.GameDayPlays = function (data) {
# data$bat_team = with(data, ifelse(half == "top", as.character(away_team), as.character(home_team)))
# data <- mutate(data, yearId = as.numeric(substr(gameId, start=5, stop=8)))
# teams = plyr::ddply(data, ~ yearId + bat_team, summarise, G = length(unique(gameId))
# , PA = sum(isPA), AB = sum(isAB), R = sum(runsOnPlay), H = sum(isHit)
# , HR = sum(event == "Home Run")
# , BB = sum(event %in% c("Walk", "Intent Walk"))
# , K = sum(event %in% c("Strikeout", "Strikeout - DP"))
# , BA = sum(isHit) / sum(isAB)
# , OBP = sum(isHit | event %in% c("Walk", "Intent Walk", "Hit By Pitch")) / sum(isPA & !event %in% c("Sac Bunt", "Sacrifice Bunt DP"))
# , SLG = (sum(event == "Single") + 2*sum(event == "Double") + 3*sum(event == "Triple") + 4*sum(event == "Home Run") ) / sum(isAB)
# )
data %>%
mutate(bat_team = ifelse(half == "top", as.character(away_team), as.character(home_team))) %>%
mutate(yearId = as.numeric(substr(gameId, start=5, stop=8))) %>%
group_by(yearId, bat_team) %>%
summarise(G = length(unique(gameId))
, PA = sum(isPA)
, AB = sum(isAB)
, R = sum(runsOnPlay)
, H = sum(isHit)
, HR = sum(event == "Home Run")
, BB = sum(event %in% c("Walk", "Intent Walk"))
, K = sum(event %in% c("Strikeout", "Strikeout - DP"))
, BA = sum(isHit) / sum(isAB)
, OBP = sum(isHit | event %in% c("Walk", "Intent Walk", "Hit By Pitch")) / sum(isPA & !event %in% c("Sac Bunt", "Sacrifice Bunt DP"))
, SLG = (sum(event == "Single") + 2*sum(event == "Double") + 3*sum(event == "Triple") + 4*sum(event == "Home Run") ) / sum(isAB)
)
}
#' @title crosscheck.GameDayPlays
#'
#' @description Cross-check the accuracy of the GameDay data with the Lahman database
#'
#' @details Cross-checks summary statistics with the Lahman database.
#'
#' @param data An MLBAM data set
#'
#' @return The ratio of the Frobenius norm of the matrix of differences to the Frobenius norm of the matrix
#' defined by the Lahman database.
#'
#' @export crosscheck.GameDayPlays
#' @examples
#'
#' ds = getData()
#' crosscheck(ds)
#'
#'
crosscheck = function (data) UseMethod("crosscheck")
crosscheck.GameDayPlays = function (data) {
require(Lahman)
teams = tabulate(data)
lteams <- Batting %>%
group_by(yearID, teamID) %>%
summarise(PA = sum(AB + BB + HBP + SH + SF, na.rm=TRUE)
, AB = sum(AB, na.rm=TRUE)
, R = sum(R, na.rm=TRUE)
, H = sum(H, na.rm=TRUE)
, HR = sum(HR, na.rm=TRUE)
, BB = sum(BB, na.rm=TRUE)
, K = sum(SO, na.rm=TRUE)
, BA = sum(H, na.rm=TRUE) / sum(AB, na.rm=TRUE)
, OBP = sum(H + BB + HBP, na.rm=TRUE) / sum(AB + BB + HBP + SF, na.rm=TRUE)
, SLG = sum(H + X2B + X3B + HR, na.rm=TRUE) / sum(AB, na.rm=TRUE))
lteams = merge(x=lteams, y=Teams[,c("yearID", "teamID", "G")], by=c("yearID", "teamID"))
lteams <- mutate(lteams, teamId = tolower(teamID))
lteams <- mutate(lteams, teamId = ifelse(teamId == "laa", "ana", as.character(teamId)))
match = merge(x=teams, y=lteams, by.x=c("yearId", "bat_team"), by.y=c("yearID", "teamId"), all.x=TRUE)
# move this out of here eventually
# require(xtable)
# x = xtable(match[,c("bat_team", "G.x", "PA.x", "AB.x", "R.x", "H.x", "HR.x", "BB.x", "K.x", "G.y", "PA.y", "AB.y", "R.y", "H.y", "HR.y", "BB.y", "K.y")]
# , caption=c("Cross-check between MLBAM data (left) and Lahman data (right), 2012"), label="tab:crosscheck"
# , align = rep("c", 18))
# print(x, include.rownames=FALSE)
A = as.matrix(match[,c("G.x", "PA.x", "AB.x", "R.x", "H.x", "HR.x", "BB.x", "K.x")])
B = as.matrix(match[,c("G.y", "PA.y", "AB.y", "R.y", "H.y", "HR.y", "BB.y", "K.y")])
return(norm(A - B, "F") / norm(B, "F"))
}
#' @title shakeWAR
#'
#' @description resample a data.frame to obtain variance estimate for WAR
#'
#' @details Resamples the rows of an MLBAM data set
#'
#' @param data An MLBAM data.frame
#' @param resample An element of \code{c("plays", "models", "both")}
#' @param N the number of resamples (default 5000)
#'
#' @return a data.frame with RAA values
#'
#' @export shakeWAR
#' @export shakeWAR.GameDayPlays
#' @examples
#'
#' ds = getData()
#' res = shakeWAR(ds, resample="plays", N=10)
#' summary(res)
#'
shakeWAR = function (data, resample = "plays", N = 10, ...) UseMethod("shakeWAR")
shakeWAR.GameDayPlays = function (data, resample = "plays", N = 10, ...) {
require(mosaic)
if (resample == "both") {
# resample the actual plays AND rebuild the models each time
# this captures both measurement error and sampling error
bstrap = do(N) * getWAR(makeWAR(resample(data), low.memory=TRUE)$openWAR)
} else {
ext = makeWAR(data, verbose=FALSE, low.memory=TRUE)
# Keep track of the original data
reality = data
# Keep track of the models built on the original data
reality.models = ext$models
# Keep track of the original RAA values
reality.raa = ext$openWAR
if (resample == "plays") {
# assume the models are fixed, and resample the RAA values
# this captures the sampling error
# supposedly the performance of do() is really bad
bstrap = do(N) * getWAR(resample(reality.raa), verbose=FALSE)
# use replicate() instead
# bstrap = rdply(N, getWAR(resample(reality.raa), verbose=FALSE))
# class(bstrap) = c("do.openWARPlayers", class(bstrap))
# } else {
# # to isolate the measurement error, use the models we built on the resampled rows
# # but apply them exclusively to the real data
# ext.list = lapply(sims$models.used, makeWAR, data = reality, verbose=FALSE)
# raa.list = lapply(ext.list, "[[", "openWAR")
# war.list = t(lapply(raa.list, getWAR))
# bstrap = do.call("rbind", war.list)
# class(bstrap) = c("do.openWARPlayers", class(bstrap))
}
}
# bstrap should be a data.frame of class "do.openWARPlayers"
class(bstrap) <- c("do.openWARPlayers", "data.frame")
# with roughly N * M rows, where M is the numbers of players
return(bstrap)
}
|
setwd("C:\\Users\\bishofij\\Proteomics_Pipeline\\NIH\\casey")
proteinGroups<-read.csv("substract.csv", row.names = 1)
mat <- as.matrix(proteinGroups)
mat[mat > 0.0] <- 0
write.csv(mat, "negative.csv")
write.csv(mat, "postive.csv")
| /R_scripts/remove_low_high_values.R | no_license | ibishof/Proteomic_pipeline | R | false | false | 249 | r |
setwd("C:\\Users\\bishofij\\Proteomics_Pipeline\\NIH\\casey")
proteinGroups<-read.csv("substract.csv", row.names = 1)
mat <- as.matrix(proteinGroups)
mat[mat > 0.0] <- 0
write.csv(mat, "negative.csv")
write.csv(mat, "postive.csv")
|
# ranges-eval-utils.R
# some helpers for 'tidy' evaluation on ranges
#' Create an overscoped environment from a Ranges object
#'
#' @param x a Ranges object
#' @param envir the environment to place the Ranges in (default = `parent.frame()`)
#'
#' @details This is the backend for non-standard evaluation in `plyranges`.
#'
#' @seealso [rlang::new_data_mask()], [rlang::eval_tidy()]
#' @return an environment
#'
#' @export
overscope_ranges <- function(x, envir = parent.frame()) {
UseMethod("overscope_ranges")
}
overscope_ranges.Ranges <- function(x, envir = parent.frame()) {
env <- as.env(x, envir)
new_data_mask(env, top = parent.env(env))
}
overscope_ranges.DelegatingGenomicRanges <- function(x, envir = parent.frame()) {
overscope_ranges(x@delegate, envir)
}
overscope_ranges.DelegatingIntegerRanges <- overscope_ranges.DelegatingGenomicRanges
overscope_ranges.GroupedGenomicRanges <- function(x, envir = parent.frame()) {
env <- as.env(x@delegate,
envir,
tform = function(col) unname(IRanges::extractList(col, x@inx)))
new_data_mask(env, top = parent.env(env))
}
overscope_ranges.GroupedIntegerRanges <- overscope_ranges.GroupedGenomicRanges
#' @importFrom rlang env_bind := new_data_mask eval_tidy
overscope_eval_update <- function(overscope, dots, bind_envir = TRUE) {
update <- vector("list", length(dots))
names(update) <- names(dots)
for (i in seq_along(update)) {
quo <- dots[[i]]
update[[i]] <- eval_tidy(quo, data = overscope)
# sometimes we want to compute on previously constructed columns
# we can do this by binding the evaluated expression to
# the overscope environment
if (bind_envir) {
new_col <- names(dots)[[i]]
rlang::env_bind(overscope, !!new_col := update[[i]])
}
}
return(update)
}
ranges_vars <- function(x) {
x_env <- as.env(x, parent.frame())
vars_rng <-ls(x_env)
vars_rng <- vars_rng[!(vars_rng %in% "names")]
vars_mcols <- ls(parent.env(x_env))
c(vars_rng, vars_mcols)
}
# Port of dplyrs `n` function
# It works by searching for a vector in the overscope environment
# and calling length on it.
#' Compute the number of ranges in each group.
#'
#' @description This function should only be used
#' within `summarise()`, `mutate()` and `filter()`.
#'
#' @examples
#' ir <- as_iranges(
#' data.frame(start = 1:10,
#' width = 5,
#' name = c(rep("a", 5), rep("b", 3), rep("c", 2))
#' )
#' )
#' by_names <- group_by(ir, name)
#' summarise(by_names, n = n())
#' mutate(by_names, n = n())
#' filter(by_names, n() >= 3)
#' @return `n()` will only be evaluated inside a function call, where it
#' returns an integer.
#'
#' @importFrom rlang env_get env_parent
#' @export
n <- function() {
up_env <- parent.frame()
parent_env <- rlang::env_parent(up_env)
if (rlang::env_has(parent_env, "start")) {
.data <- rlang::env_get(parent_env, "start")
if (is(.data, "IntegerList")) {
return(lengths(.data))
} else {
return(length(.data))
}
}
stop("This function should not be called directly")
}
#' Compute the number of distinct unique values in a vector or List
#'
#' @param var a vector of values
#' @return an integer vector
#'
#' @description This is a wrapper to `length(unique(x))` or
#' `lengths(unique(x))` if `x` is a List object
#'
#' @examples
#' x <- CharacterList(c("a", "b", "c", "a"), "d")
#' n_distinct(x)
#' n_distinct(unlist(x))
#'
#' @export
n_distinct <- function(var) {
if (inherits(var, "List")) {
return(lengths(unique(var)))
} else {
return(length(unique(var)))
}
}
is_empty_quos <- function(quos) {
length(quos) == 0L
}
# dplyr's join syntax uses a function called tbl_vars to get
# variable names, using this function will enable a Ranges to be copied through
# as a data.frame in a join.
tbl_vars.GenomicRanges <- function(x) {
ranges_vars(x)
} | /R/ranges-eval.R | no_license | iimog/plyranges | R | false | false | 4,000 | r | # ranges-eval-utils.R
# some helpers for 'tidy' evaluation on ranges
#' Create an overscoped environment from a Ranges object
#'
#' @param x a Ranges object
#' @param envir the environment to place the Ranges in (default = `parent.frame()`)
#'
#' @details This is the backend for non-standard evaluation in `plyranges`.
#'
#' @seealso [rlang::new_data_mask()], [rlang::eval_tidy()]
#' @return an environment
#'
#' @export
overscope_ranges <- function(x, envir = parent.frame()) {
UseMethod("overscope_ranges")
}
overscope_ranges.Ranges <- function(x, envir = parent.frame()) {
env <- as.env(x, envir)
new_data_mask(env, top = parent.env(env))
}
overscope_ranges.DelegatingGenomicRanges <- function(x, envir = parent.frame()) {
overscope_ranges(x@delegate, envir)
}
overscope_ranges.DelegatingIntegerRanges <- overscope_ranges.DelegatingGenomicRanges
overscope_ranges.GroupedGenomicRanges <- function(x, envir = parent.frame()) {
env <- as.env(x@delegate,
envir,
tform = function(col) unname(IRanges::extractList(col, x@inx)))
new_data_mask(env, top = parent.env(env))
}
overscope_ranges.GroupedIntegerRanges <- overscope_ranges.GroupedGenomicRanges
#' @importFrom rlang env_bind := new_data_mask eval_tidy
overscope_eval_update <- function(overscope, dots, bind_envir = TRUE) {
update <- vector("list", length(dots))
names(update) <- names(dots)
for (i in seq_along(update)) {
quo <- dots[[i]]
update[[i]] <- eval_tidy(quo, data = overscope)
# sometimes we want to compute on previously constructed columns
# we can do this by binding the evaluated expression to
# the overscope environment
if (bind_envir) {
new_col <- names(dots)[[i]]
rlang::env_bind(overscope, !!new_col := update[[i]])
}
}
return(update)
}
ranges_vars <- function(x) {
x_env <- as.env(x, parent.frame())
vars_rng <-ls(x_env)
vars_rng <- vars_rng[!(vars_rng %in% "names")]
vars_mcols <- ls(parent.env(x_env))
c(vars_rng, vars_mcols)
}
# Port of dplyrs `n` function
# It works by searching for a vector in the overscope environment
# and calling length on it.
#' Compute the number of ranges in each group.
#'
#' @description This function should only be used
#' within `summarise()`, `mutate()` and `filter()`.
#'
#' @examples
#' ir <- as_iranges(
#' data.frame(start = 1:10,
#' width = 5,
#' name = c(rep("a", 5), rep("b", 3), rep("c", 2))
#' )
#' )
#' by_names <- group_by(ir, name)
#' summarise(by_names, n = n())
#' mutate(by_names, n = n())
#' filter(by_names, n() >= 3)
#' @return `n()` will only be evaluated inside a function call, where it
#' returns an integer.
#'
#' @importFrom rlang env_get env_parent
#' @export
n <- function() {
up_env <- parent.frame()
parent_env <- rlang::env_parent(up_env)
if (rlang::env_has(parent_env, "start")) {
.data <- rlang::env_get(parent_env, "start")
if (is(.data, "IntegerList")) {
return(lengths(.data))
} else {
return(length(.data))
}
}
stop("This function should not be called directly")
}
#' Compute the number of distinct unique values in a vector or List
#'
#' @param var a vector of values
#' @return an integer vector
#'
#' @description This is a wrapper to `length(unique(x))` or
#' `lengths(unique(x))` if `x` is a List object
#'
#' @examples
#' x <- CharacterList(c("a", "b", "c", "a"), "d")
#' n_distinct(x)
#' n_distinct(unlist(x))
#'
#' @export
n_distinct <- function(var) {
if (inherits(var, "List")) {
return(lengths(unique(var)))
} else {
return(length(unique(var)))
}
}
is_empty_quos <- function(quos) {
length(quos) == 0L
}
# dplyr's join syntax uses a function called tbl_vars to get
# variable names, using this function will enable a Ranges to be copied through
# as a data.frame in a join.
tbl_vars.GenomicRanges <- function(x) {
ranges_vars(x)
} |
# EXAMPLE 1
# Coelli(2005) p165
setwd("C:/Users/...")
rm(list=ls(all=TRUE))
# load the Benchmarking library
library(Benchmarking)
#load data
y <- matrix(c(1,2,3,1,2),ncol=1) # output matrix Mxq
x1 <- matrix(c(2, 2, 6,3,6),ncol=1)
x2<-matrix(c(5,4,6,2,2),ncol=1)
x <- matrix(c(x1,x2),ncol=2) #input matrix Nxp
# plot the input isoquant (Frontier)
dea.plot.isoquant(x1%/%y, x2%/%y, RTS="vrs",main="DEA isoquant",txt=TRUE,xlim=c(0,6))
# envelopment form
env <- dea(x,y, RTS="crs", ORIENTATION="in")
eff(env)
peers(env)
lambda(env)
# multiplier form
mult <-dea(x,y, RTS="crs", ORIENTATION="in",DUAL=TRUE)
# Print results
print(cbind("theta"=env$eff,peers(env),lambda(env),mult$u, mult$v), digits=3)
# Targets
x_star<-cbind(x1*env$eff, x2*env$eff); x_star # using efficiency scores
x_star2 <- lambda(env)%*%rbind(x[2,],x[5,]); x_star2 #using lambdas
| /example1.R | no_license | MateoBarletta/eficiencia | R | false | false | 859 | r |
# EXAMPLE 1
# Coelli(2005) p165
setwd("C:/Users/...")
rm(list=ls(all=TRUE))
# load the Benchmarking library
library(Benchmarking)
#load data
y <- matrix(c(1,2,3,1,2),ncol=1) # output matrix Mxq
x1 <- matrix(c(2, 2, 6,3,6),ncol=1)
x2<-matrix(c(5,4,6,2,2),ncol=1)
x <- matrix(c(x1,x2),ncol=2) #input matrix Nxp
# plot the input isoquant (Frontier)
dea.plot.isoquant(x1%/%y, x2%/%y, RTS="vrs",main="DEA isoquant",txt=TRUE,xlim=c(0,6))
# envelopment form
env <- dea(x,y, RTS="crs", ORIENTATION="in")
eff(env)
peers(env)
lambda(env)
# multiplier form
mult <-dea(x,y, RTS="crs", ORIENTATION="in",DUAL=TRUE)
# Print results
print(cbind("theta"=env$eff,peers(env),lambda(env),mult$u, mult$v), digits=3)
# Targets
x_star<-cbind(x1*env$eff, x2*env$eff); x_star # using efficiency scores
x_star2 <- lambda(env)%*%rbind(x[2,],x[5,]); x_star2 #using lambdas
|
simule.nh.MSAR <-
function(theta,Y0,T,N.samples = 1,covar.emis=NULL,covar.trans=NULL,link.ct = NULL,nc=1,S0 = NULL) {
# If length(covar)==1, covar is built from Y with delay covar
# if (!inherits(res, "MSAR"))
# stop("use only with \"MSAR\" objects")
if (!is.null(S0) & length(S0) != N.samples){stop("The length of S0 has to be equal to N.samples")}
M = attributes(theta)$NbRegimes
order = attributes(theta)$order
d <- attributes(theta)$NbComp
if (length(Y0[,1,1]) < order) {stop(paste("Length of Y0 should be equal to ",order,sep=""))}
label = attributes(theta)$label
L = 1
Y = array(0,c(T,N.samples,d))
S = matrix(0,T,N.samples)
Y[1:max(order,1),,] = Y0[1:max(order,1),,]
transition = array(0,c(M,M,T,N.samples))
if (is.null(S0)){
for (ex in 1:N.samples) {
S[1,ex] = which(rmultinom(1, size = 1, prob = theta$prior)==1)
}
} else { S[1,] = S0}
if (substr(label,1,1) == "N") {
nh_transitions = attributes(theta)$nh.transitions
if (length(covar.trans)==1) {
L = covar.trans
} else {
#transition=nh_transitions(c(covar.trans),theta$par.trans,theta$transmat);
for (ex in 1:N.samples) {transition[,,,ex]=nh_transitions(array(covar.trans[,ex,],c(T,1,dim(covar.trans)[3])),theta$par.trans,theta$transmat);}
}
} else {
transition = array(theta$transmat,c(M,M,T,N.samples))
}
if (max(order,L)>1) {
for (ex in 1:N.samples) {
for (t in 2:max(order,L)){
S[t,ex] = which.max(rmultinom(1, size = 1, prob = theta$transmat[S[t-1,ex],]))
}
}
}
A0 = theta$A0
sigma = theta$sigma
f.emis = array(0,c(T,M,d))
if (substr(label,2,2) == "N") {
par.emis = theta$par.emis
nh_emissions = attributes(theta)$nh.emissions
for (m in 1:M) {
f.emis[,m,] = nh_emissions(covar.emis,as.matrix(par.emis[[m]])) # A voir si d>1
}
}
d.c = length(nc)
if(order>0){ A = theta$A}
if (d>1) {
sq_sigma = list()
for(i in 1:M){
sq_sigma[[i]] = chol(sigma[[i]])
}
} else
{ sq_sigma = numeric(M)
for (i in 1:M){sq_sigma[[i]] = sqrt(sigma[[i]])}
A = list()
for (m in 1:M) {
A[[m]] = list()
for (o in 1:order) {A[[m]][[o]] = theta$A[m,o]}}
}
for (ex in 1:N.samples){
for (t in max(c(2,order+1,L+1)):(T)){
if (substr(label,1,1) == "N" & length(covar.trans)==1) {
if (is.null(link.ct)) {z = Y[t-L,ex,nc,drop=FALSE]}
else {
z = link.ct(Y[t-L,ex,,drop=FALSE])
#z = matrix(z,1,length(z))
}
transition[,,t,ex] = nh_transitions(z,theta$par.trans,theta$transmat)
}
S[t,ex] = which.max(rmultinom(1, size = 1, prob = transition[S[t-1,ex], ,t,ex]))
if(order>0){
for(o in 1:order){
Y[t,ex,] = Y[t,ex,]+ A[[S[t,ex]]][[o]]%*%Y[t-o,ex,]
}
}
Y[t,ex,] = Y[t,ex,] + A0[S[t,ex],] + f.emis[t,S[t,ex],] + t(t(sq_sigma[[S[t,ex]]])%*%matrix(rnorm(d),d,1))
}
}
return(list(S=S,Y=Y))
}
| /R/simule.nh.MSAR.R | no_license | cran/NHMSAR | R | false | false | 2,734 | r | simule.nh.MSAR <-
function(theta,Y0,T,N.samples = 1,covar.emis=NULL,covar.trans=NULL,link.ct = NULL,nc=1,S0 = NULL) {
# If length(covar)==1, covar is built from Y with delay covar
# if (!inherits(res, "MSAR"))
# stop("use only with \"MSAR\" objects")
if (!is.null(S0) & length(S0) != N.samples){stop("The length of S0 has to be equal to N.samples")}
M = attributes(theta)$NbRegimes
order = attributes(theta)$order
d <- attributes(theta)$NbComp
if (length(Y0[,1,1]) < order) {stop(paste("Length of Y0 should be equal to ",order,sep=""))}
label = attributes(theta)$label
L = 1
Y = array(0,c(T,N.samples,d))
S = matrix(0,T,N.samples)
Y[1:max(order,1),,] = Y0[1:max(order,1),,]
transition = array(0,c(M,M,T,N.samples))
if (is.null(S0)){
for (ex in 1:N.samples) {
S[1,ex] = which(rmultinom(1, size = 1, prob = theta$prior)==1)
}
} else { S[1,] = S0}
if (substr(label,1,1) == "N") {
nh_transitions = attributes(theta)$nh.transitions
if (length(covar.trans)==1) {
L = covar.trans
} else {
#transition=nh_transitions(c(covar.trans),theta$par.trans,theta$transmat);
for (ex in 1:N.samples) {transition[,,,ex]=nh_transitions(array(covar.trans[,ex,],c(T,1,dim(covar.trans)[3])),theta$par.trans,theta$transmat);}
}
} else {
transition = array(theta$transmat,c(M,M,T,N.samples))
}
if (max(order,L)>1) {
for (ex in 1:N.samples) {
for (t in 2:max(order,L)){
S[t,ex] = which.max(rmultinom(1, size = 1, prob = theta$transmat[S[t-1,ex],]))
}
}
}
A0 = theta$A0
sigma = theta$sigma
f.emis = array(0,c(T,M,d))
if (substr(label,2,2) == "N") {
par.emis = theta$par.emis
nh_emissions = attributes(theta)$nh.emissions
for (m in 1:M) {
f.emis[,m,] = nh_emissions(covar.emis,as.matrix(par.emis[[m]])) # A voir si d>1
}
}
d.c = length(nc)
if(order>0){ A = theta$A}
if (d>1) {
sq_sigma = list()
for(i in 1:M){
sq_sigma[[i]] = chol(sigma[[i]])
}
} else
{ sq_sigma = numeric(M)
for (i in 1:M){sq_sigma[[i]] = sqrt(sigma[[i]])}
A = list()
for (m in 1:M) {
A[[m]] = list()
for (o in 1:order) {A[[m]][[o]] = theta$A[m,o]}}
}
for (ex in 1:N.samples){
for (t in max(c(2,order+1,L+1)):(T)){
if (substr(label,1,1) == "N" & length(covar.trans)==1) {
if (is.null(link.ct)) {z = Y[t-L,ex,nc,drop=FALSE]}
else {
z = link.ct(Y[t-L,ex,,drop=FALSE])
#z = matrix(z,1,length(z))
}
transition[,,t,ex] = nh_transitions(z,theta$par.trans,theta$transmat)
}
S[t,ex] = which.max(rmultinom(1, size = 1, prob = transition[S[t-1,ex], ,t,ex]))
if(order>0){
for(o in 1:order){
Y[t,ex,] = Y[t,ex,]+ A[[S[t,ex]]][[o]]%*%Y[t-o,ex,]
}
}
Y[t,ex,] = Y[t,ex,] + A0[S[t,ex],] + f.emis[t,S[t,ex],] + t(t(sq_sigma[[S[t,ex]]])%*%matrix(rnorm(d),d,1))
}
}
return(list(S=S,Y=Y))
}
|
paquetes <- c("ade4", "corrplot", "xlsx", "openxlsx", "readxl")
lapply(paquetes, require, character.only=TRUE)
library(readxl)
Cfbinvierno <- read_excel("CfbEstaciones/Cfb inestable otonho/Cfb inestable Otonho.xlsx")
View(Csa_inestable_otonho)
Cfbinvierno <- read_excel("C:/Users/Lism_/Desktop/AnaSTATIS/CfbEstaciones/Cfb inestable verano/Cfb inestable verano 2.xlsx")
Csa_todo <- read_excel("C:/Users/Lism_/Desktop/Compromiso inestable/Todo.xlsx")
View(Csa_todo)
Data <- Cfbinvierno
Data <- Csa_todo
str(Data)
Data$fecha <- as.factor(Data$fecha) ### Definimos la fecha como un factor de 27 niveles.
Data$Clima <- as.factor(Data$Clima)
str(Data$fecha)
clima <- Data$Clima
fecha<-Data$fecha
class(fecha)
Data.num <- Data[,5:14]
Data.num <- Data[,-c(1,2,3,4,9,11,12,15,18,20)]
Data.num <- Data[,-11]
Data.num <- Data[,-c(1,12)]
str(Data.num)
Data.within <- withinpca(Data.num,clima, scann=FALSE) ##Estandarizamos nuestros datos.
Data.ktab<-ktab.within(Data.within)
Data.statis<-statis(Data.ktab, scann=FALSE)
Data.within <- withinpca(Data.num,fecha, scann=FALSE) ##Estandarizamos nuestros datos.
Data.ktab<-ktab.within(Data.within)
Data.statis<-statis(Data.ktab, scann=FALSE)
cor.plot <- corrplot(Data.statis$RV)
cor.plot2 <- corrplot(Data.statis$RV, order = c("hclust"))
s.corcircle(Data.statis$RV.coo, lab=Data.statis$tab.names, sub="INTERESTRUCTURA")
s.arrow(Data.statis$C.li, sub="VARIABLES EN EL ESPACIO COMPROMISO")
s.arrow(Data.statis$C.li[,2:3], sub="VARIABLES EN EL ESPACIO COMPROMISO")
##################################################33
library(writexl)
tcoord <- t(Data.statis$C.li)
tcoord.std <- scale(tcoord)
tcoord.cov <- cov(tcoord.std)
corrplot(tcoord.cov)
write_xlsx(Data.ktab$`2019-09-13` ,"C:\\Users\\Lism_\\Desktop\\Compromisos\\Csaprimavera.xlsx")
#################################################
str(tcoord)
tcoord.dataframe <- as.data.frame(tcoord)
write_xlsx(tcoord.dataframe ,"C:\\Users\\Lism_\\Desktop\\Compromiso inestable\\Cfbverano 2.xlsx")
| /Análisis inestable.R | no_license | Lism2992/Clima | R | false | false | 2,113 | r | paquetes <- c("ade4", "corrplot", "xlsx", "openxlsx", "readxl")
lapply(paquetes, require, character.only=TRUE)
library(readxl)
Cfbinvierno <- read_excel("CfbEstaciones/Cfb inestable otonho/Cfb inestable Otonho.xlsx")
View(Csa_inestable_otonho)
Cfbinvierno <- read_excel("C:/Users/Lism_/Desktop/AnaSTATIS/CfbEstaciones/Cfb inestable verano/Cfb inestable verano 2.xlsx")
Csa_todo <- read_excel("C:/Users/Lism_/Desktop/Compromiso inestable/Todo.xlsx")
View(Csa_todo)
Data <- Cfbinvierno
Data <- Csa_todo
str(Data)
Data$fecha <- as.factor(Data$fecha) ### Definimos la fecha como un factor de 27 niveles.
Data$Clima <- as.factor(Data$Clima)
str(Data$fecha)
clima <- Data$Clima
fecha<-Data$fecha
class(fecha)
Data.num <- Data[,5:14]
Data.num <- Data[,-c(1,2,3,4,9,11,12,15,18,20)]
Data.num <- Data[,-11]
Data.num <- Data[,-c(1,12)]
str(Data.num)
Data.within <- withinpca(Data.num,clima, scann=FALSE) ##Estandarizamos nuestros datos.
Data.ktab<-ktab.within(Data.within)
Data.statis<-statis(Data.ktab, scann=FALSE)
Data.within <- withinpca(Data.num,fecha, scann=FALSE) ##Estandarizamos nuestros datos.
Data.ktab<-ktab.within(Data.within)
Data.statis<-statis(Data.ktab, scann=FALSE)
cor.plot <- corrplot(Data.statis$RV)
cor.plot2 <- corrplot(Data.statis$RV, order = c("hclust"))
s.corcircle(Data.statis$RV.coo, lab=Data.statis$tab.names, sub="INTERESTRUCTURA")
s.arrow(Data.statis$C.li, sub="VARIABLES EN EL ESPACIO COMPROMISO")
s.arrow(Data.statis$C.li[,2:3], sub="VARIABLES EN EL ESPACIO COMPROMISO")
##################################################33
library(writexl)
tcoord <- t(Data.statis$C.li)
tcoord.std <- scale(tcoord)
tcoord.cov <- cov(tcoord.std)
corrplot(tcoord.cov)
write_xlsx(Data.ktab$`2019-09-13` ,"C:\\Users\\Lism_\\Desktop\\Compromisos\\Csaprimavera.xlsx")
#################################################
str(tcoord)
tcoord.dataframe <- as.data.frame(tcoord)
write_xlsx(tcoord.dataframe ,"C:\\Users\\Lism_\\Desktop\\Compromiso inestable\\Cfbverano 2.xlsx")
|
library("sqldf")
library("anytime")
library("dplyr")
library("sparklyr")
setwd("/Users/jitaekim/Desktop/cse6250/final project/Final Code Files/Backend/CodeBackend/Data/Input")
sc <- spark_connect(master = "local")
#################################### patients/admissions data ######################################
admissions = read.csv(file = "ADMISSIONS.csv", header = T)
patients = read.csv(file = "PATIENTS.csv", header = T)
icu_stays = read.csv(file = "ICUSTAYS.csv", header = T)
names(admissions) <- tolower(names(admissions))
names(patients) <- tolower(names(patients))
names(icu_stays) <- tolower(names(icu_stays))
# query to merge and admissions and patients table - call table 1
query1 = "select a.row_id, a.subject_id, a.admittime, a.dischtime, a.deathtime, a.admission_type,
a.admission_location, a.discharge_location, a.religion, a.marital_status, a.ethnicity, a.diagnosis,
a.hospital_expire_flag, b.gender, b.dob, b.dod, b.dod_hosp, b.expire_flag from admissions as a
left join patients as b on a.subject_id = b.subject_id"
table1 = sqldf(query1)
table1$admit_time_num = as.numeric(as.POSIXct(table1$admittime))
table1$disch_time_num = as.numeric(as.POSIXct(table1$dischtime))
# icu stays time keys
icu_stays$intime_num = as.numeric(as.POSIXct(strptime(icu_stays$intime, "%m/%d/%Y %H:%M")))
icu_stays$outtime_num = as.numeric(as.POSIXct(strptime(icu_stays$outtime, "%m/%d/%Y %H:%M")))
# trying full outer join in dplyr
temp1 = table1 %>%
full_join(icu_stays, by = c("subject_id"))
temp1$flag = ifelse((temp1$intime_num > temp1$admit_time_num) & (temp1$outtime_num > temp1$disch_time_num), 1, 0)
temp1 = subset(temp1, flag == 1)
colnames(temp1)[1] = "row_id"
temp = ifelse(temp1$deathtime == " ", (anytime(temp1$dod) - anytime(temp1$dob))/(60*60*24*365),
(anytime(temp1$dischtime) - anytime(temp1$dob))/(60*60*24*365))
temp1$age = temp
temp1$age = ifelse(temp1$age > 89, 91.4, temp1$age)
# merging the previous table with ICU stays
query2 = "select a.*, b.intime, b.outtime, b.los from table1 as a left join icu_stays as b on a.subject_id = b.subject_id
and a.admit_time_num < b.intime_num and a.disch_time_num > b.outtime_num"
table2 = sqldf(query2)
drops <- c("admit_time_num", "disch_time_num")
table2 = table2[ , !(names(table2) %in% drops)]
# creating age variable
temp = ifelse(table2$deathtime == " ", (anytime(table2$dod) - anytime(table2$dob))/(60*60*24*365),
(anytime(table2$dischtime) - anytime(table2$dob))/(60*60*24*365))
table2$age = temp
table2$age = ifelse(table2$age > 89, 91.4, table2$age)
temp2 = subset(table2, is.na(table2$intime))
names_filter = colnames(temp2)
final_data = rbind(temp1[, names_filter], temp2)
#################################### lab-events data ######################################
labevents = read.csv(file = "LABEVENTS.csv", header = T)
labitems = read.csv(file = "D_LABITEMS.csv", header = T)
names(labevents) <- tolower(names(labevents))
names(labitems) <- tolower(names(labitems))
query3 = "select a.itemid, a.subject_id, a.charttime, a.value, a.valuenum, a.valueuom, a.flag,
b.label, b.fluid, b.category
from labevents as a left join labitems as b on a.itemid = b.itemid"
table3 = sqldf(query3)
filter_data = subset(table3, itemid %in% c(50820, 50813, 51222))
write.csv(file = "lab_data_ph_lactate_hb.csv", filter_data, row.names = F)
| /Final Code Files/Backend/CodeBackend/Codes/data_manipulation1.R | no_license | 32101115/BigData | R | false | false | 3,415 | r | library("sqldf")
library("anytime")
library("dplyr")
library("sparklyr")
setwd("/Users/jitaekim/Desktop/cse6250/final project/Final Code Files/Backend/CodeBackend/Data/Input")
sc <- spark_connect(master = "local")
#################################### patients/admissions data ######################################
admissions = read.csv(file = "ADMISSIONS.csv", header = T)
patients = read.csv(file = "PATIENTS.csv", header = T)
icu_stays = read.csv(file = "ICUSTAYS.csv", header = T)
names(admissions) <- tolower(names(admissions))
names(patients) <- tolower(names(patients))
names(icu_stays) <- tolower(names(icu_stays))
# query to merge and admissions and patients table - call table 1
query1 = "select a.row_id, a.subject_id, a.admittime, a.dischtime, a.deathtime, a.admission_type,
a.admission_location, a.discharge_location, a.religion, a.marital_status, a.ethnicity, a.diagnosis,
a.hospital_expire_flag, b.gender, b.dob, b.dod, b.dod_hosp, b.expire_flag from admissions as a
left join patients as b on a.subject_id = b.subject_id"
table1 = sqldf(query1)
table1$admit_time_num = as.numeric(as.POSIXct(table1$admittime))
table1$disch_time_num = as.numeric(as.POSIXct(table1$dischtime))
# icu stays time keys
icu_stays$intime_num = as.numeric(as.POSIXct(strptime(icu_stays$intime, "%m/%d/%Y %H:%M")))
icu_stays$outtime_num = as.numeric(as.POSIXct(strptime(icu_stays$outtime, "%m/%d/%Y %H:%M")))
# trying full outer join in dplyr
temp1 = table1 %>%
full_join(icu_stays, by = c("subject_id"))
temp1$flag = ifelse((temp1$intime_num > temp1$admit_time_num) & (temp1$outtime_num > temp1$disch_time_num), 1, 0)
temp1 = subset(temp1, flag == 1)
colnames(temp1)[1] = "row_id"
temp = ifelse(temp1$deathtime == " ", (anytime(temp1$dod) - anytime(temp1$dob))/(60*60*24*365),
(anytime(temp1$dischtime) - anytime(temp1$dob))/(60*60*24*365))
temp1$age = temp
temp1$age = ifelse(temp1$age > 89, 91.4, temp1$age)
# merging the previous table with ICU stays
query2 = "select a.*, b.intime, b.outtime, b.los from table1 as a left join icu_stays as b on a.subject_id = b.subject_id
and a.admit_time_num < b.intime_num and a.disch_time_num > b.outtime_num"
table2 = sqldf(query2)
drops <- c("admit_time_num", "disch_time_num")
table2 = table2[ , !(names(table2) %in% drops)]
# creating age variable
temp = ifelse(table2$deathtime == " ", (anytime(table2$dod) - anytime(table2$dob))/(60*60*24*365),
(anytime(table2$dischtime) - anytime(table2$dob))/(60*60*24*365))
table2$age = temp
table2$age = ifelse(table2$age > 89, 91.4, table2$age)
temp2 = subset(table2, is.na(table2$intime))
names_filter = colnames(temp2)
final_data = rbind(temp1[, names_filter], temp2)
#################################### lab-events data ######################################
labevents = read.csv(file = "LABEVENTS.csv", header = T)
labitems = read.csv(file = "D_LABITEMS.csv", header = T)
names(labevents) <- tolower(names(labevents))
names(labitems) <- tolower(names(labitems))
query3 = "select a.itemid, a.subject_id, a.charttime, a.value, a.valuenum, a.valueuom, a.flag,
b.label, b.fluid, b.category
from labevents as a left join labitems as b on a.itemid = b.itemid"
table3 = sqldf(query3)
filter_data = subset(table3, itemid %in% c(50820, 50813, 51222))
write.csv(file = "lab_data_ph_lactate_hb.csv", filter_data, row.names = F)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/awsFunctions.R
\name{checkStatus}
\alias{checkStatus}
\title{AWS Support Function: Checks the status of a given job on EMR}
\usage{
checkStatus(jobFlowId)
}
\arguments{
\item{jobFlowId}{the Job Flow Id of the job to check}
}
\value{
Job Status
}
\description{
Checks the status of a previously issued job.
}
\author{
James "JD" Long
}
| /man/checkStatus.Rd | no_license | zachmayer/segue2 | R | false | false | 422 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/awsFunctions.R
\name{checkStatus}
\alias{checkStatus}
\title{AWS Support Function: Checks the status of a given job on EMR}
\usage{
checkStatus(jobFlowId)
}
\arguments{
\item{jobFlowId}{the Job Flow Id of the job to check}
}
\value{
Job Status
}
\description{
Checks the status of a previously issued job.
}
\author{
James "JD" Long
}
|
% Auto-generated: do not edit by hand
\name{dashMoreComponents}
\alias{dashMoreComponents}
\title{DashMoreComponents component}
\description{
ExampleComponent is an example component. It takes a property, `label`, and displays it. It renders an input with the property `value` which is editable by the user.
}
\usage{
dashMoreComponents(id=NULL, label=NULL, value=NULL)
}
\arguments{
\item{id}{Character. The ID used to identify this component in Dash callbacks.}
\item{label}{Character. A label that will be printed when this component is rendered.}
\item{value}{Character. The value displayed in the input.}
}
\value{named list of JSON elements corresponding to React.js properties and their values}
| /man/dashMoreComponents.Rd | no_license | PythonJournalist/dash-more-components | R | false | false | 711 | rd | % Auto-generated: do not edit by hand
\name{dashMoreComponents}
\alias{dashMoreComponents}
\title{DashMoreComponents component}
\description{
ExampleComponent is an example component. It takes a property, `label`, and displays it. It renders an input with the property `value` which is editable by the user.
}
\usage{
dashMoreComponents(id=NULL, label=NULL, value=NULL)
}
\arguments{
\item{id}{Character. The ID used to identify this component in Dash callbacks.}
\item{label}{Character. A label that will be printed when this component is rendered.}
\item{value}{Character. The value displayed in the input.}
}
\value{named list of JSON elements corresponding to React.js properties and their values}
|
rm(list=ls())
if(!require(dplyr)){
install.packages("dplyr")
library(dplyr)
}
if(!require(car)){
install.packages("car")
library(car)
}
#Set working directory
path_wd <- paste0("/Users/Bart/Documents/Coursera_data_cleaning/",
"project")
setwd(path_wd)
#Read in test data
setwd("./test")
testD <- read.table("X_test.txt")
testDy <- read.table("y_test.txt")#activity codes
testDsub <- read.table("subject_test.txt")#subject codes
setwd("../")
#Read in training data
setwd("./train")
trainD <- read.table("X_train.txt")
trainDy <- read.table("y_train.txt")#activity codes
trainDsub <- read.table("subject_train.txt")#subject codes
setwd("../")
#access column labels
feat <- read.table("features.txt")
feat <- as.character(feat[,2])
#combine subject id, activity id and data for test subjects
testAll <- cbind(testDsub, testDy, testD)
#combine subject id, activity id and data for training subjects
trainAll <- cbind(trainDsub, trainDy, trainD)
#merge training and test data sets
mergedD <- rbind(testAll, trainAll)
#create labels for id's and label all columns
newLabs <- c("subject_id", "activity_id")
newLabsAll <- c(newLabs, feat)
colnames(mergedD) <- newLabsAll
#extract only measurements of mean and standard deviation and combine with id's
a<-mergedD[,grepl("mean", colnames(mergedD))]
b<-mergedD[,grepl("std", colnames(mergedD))]
c<-mergedD[,grepl("Mean", colnames(mergedD))]
d<-mergedD[,c(1,2)]
D<-cbind(d,a,b,c)
as.character(D$activity_id)
#recode activities to meaningful labels
D$activity_id <- recode(D$activity_id, "1='WALKING';
2='WALKING_UPSTAIRS';
3='WALKING_DOWNSTAIRS';
4='SITTING';
5='STANDING';
6='LAYING'")
#create summary tidy data set containing the means of the mean and standard
#deviation variables for each activity for each subject
tidy <- D %>%
group_by(subject_id, activity_id) %>%
summarise_each(funs(mean))
#write out summary tidy data in .txt format
write.table(tidy, "subject_activity_summary.txt", row.name = FALSE)
#code to read summary tidy data
look <- read.table("subject_activity_summary.txt", header = TRUE)
| /run_analysis.R | no_license | Bartesto/CleanData | R | false | false | 2,211 | r | rm(list=ls())
if(!require(dplyr)){
install.packages("dplyr")
library(dplyr)
}
if(!require(car)){
install.packages("car")
library(car)
}
#Set working directory
path_wd <- paste0("/Users/Bart/Documents/Coursera_data_cleaning/",
"project")
setwd(path_wd)
#Read in test data
setwd("./test")
testD <- read.table("X_test.txt")
testDy <- read.table("y_test.txt")#activity codes
testDsub <- read.table("subject_test.txt")#subject codes
setwd("../")
#Read in training data
setwd("./train")
trainD <- read.table("X_train.txt")
trainDy <- read.table("y_train.txt")#activity codes
trainDsub <- read.table("subject_train.txt")#subject codes
setwd("../")
#access column labels
feat <- read.table("features.txt")
feat <- as.character(feat[,2])
#combine subject id, activity id and data for test subjects
testAll <- cbind(testDsub, testDy, testD)
#combine subject id, activity id and data for training subjects
trainAll <- cbind(trainDsub, trainDy, trainD)
#merge training and test data sets
mergedD <- rbind(testAll, trainAll)
#create labels for id's and label all columns
newLabs <- c("subject_id", "activity_id")
newLabsAll <- c(newLabs, feat)
colnames(mergedD) <- newLabsAll
#extract only measurements of mean and standard deviation and combine with id's
a<-mergedD[,grepl("mean", colnames(mergedD))]
b<-mergedD[,grepl("std", colnames(mergedD))]
c<-mergedD[,grepl("Mean", colnames(mergedD))]
d<-mergedD[,c(1,2)]
D<-cbind(d,a,b,c)
as.character(D$activity_id)
#recode activities to meaningful labels
D$activity_id <- recode(D$activity_id, "1='WALKING';
2='WALKING_UPSTAIRS';
3='WALKING_DOWNSTAIRS';
4='SITTING';
5='STANDING';
6='LAYING'")
#create summary tidy data set containing the means of the mean and standard
#deviation variables for each activity for each subject
tidy <- D %>%
group_by(subject_id, activity_id) %>%
summarise_each(funs(mean))
#write out summary tidy data in .txt format
write.table(tidy, "subject_activity_summary.txt", row.name = FALSE)
#code to read summary tidy data
look <- read.table("subject_activity_summary.txt", header = TRUE)
|
% Generated by roxygen2: do not edit by hand
\name{BANOVA.Bernoulli}
\alias{BANOVA.Bernoulli}
\alias{predict.BANOVA.Bernoulli}
\alias{print.BANOVA.Bernoulli}
\alias{summary.BANOVA.Bernoulli}
\title{Estimation of BANOVA with a Bernoulli dependent variable}
\description{
\code{BANOVA.Bernoulli} implements a Bayesian ANOVA for binary dependent variable, using a logit link and a normal heterogeneity distribution.
}
\usage{
BANOVA.Bernoulli(l1_formula = "NA", l2_formula = "NA", data,
id, l2_hyper = c(1, 1, 0.0001), burnin = 5000, sample = 2000, thin = 10,
adapt = 0, conv_speedup = F, jags = runjags.getOption('jagspath'))
\method{summary}{BANOVA.Bernoulli}(object, ...)
\method{predict}{BANOVA.Bernoulli}(object, newdata = NULL,...)
\method{print}{BANOVA.Bernoulli}(x, ...)
}
\arguments{
\item{l1_formula}{formula for level 1 e.g. 'Y~X1+X2'}
\item{l2_formula}{formula for level 2 e.g. '~Z1+Z2',
response variable must not be included}
\item{data}{a data.frame in long format including all features in level 1 and level 2(covariates and categorical factors) and responses}
\item{id}{subject ID of each response unit}
\item{l2_hyper}{level 2 hyperparameters, c(a, b, \eqn{\gamma}), default c(1,1,0.0001)}
\item{burnin}{the number of burn in draws in the MCMC algorithm, default 5000}
\item{sample}{target samples in the MCMC algorithm after thinning, default 2000}
\item{thin}{the number of samples in the MCMC algorithm that needs to be thinned, default 10}
\item{adapt}{the number of adaptive iterations, default 0 (see \link[runjags]{run.jags})}
\item{conv_speedup}{whether to speedup convergence, default F}
\item{jags}{the system call or path for activating 'JAGS'. Default calls findjags() to attempt to locate 'JAGS' on your system}
\item{object}{object of class \code{BANOVA.Bern} (returned by \code{BANOVA.Bern})}
\item{newdata}{test data, either a matrix, vector or a
data.frame. It must have the same format with the original data (the same number of features and the same data classes)}
\item{x}{object of class \code{BANOVA.Bern} (returned by \code{BANOVA.Bern})}
\item{\dots}{additional arguments,currently ignored}
}
\details{
Level 1 model: \cr
\eqn{y_i} {~} \eqn{Binomial(1,p_i)}, \eqn{p_i = logit^{-1}(\eta_i)} \cr
where \eqn{\eta_i = \sum_{p = 0}^{P}\sum_{j=1}^{J_p}X_{i,j}^p\beta_{j,s_i}^p}, \eqn{s_i} is the subject id of data record \eqn{i}. see \code{\link{BANOVA-package}}
}
\value{
\code{BANOVA.Bernoulli} returns an object of class \code{"BANOVA.Bernoulli"}. The returned object is a list containing:
\item{anova.table}{table of effect sizes \code{\link{BAnova}}}
\item{coef.tables}{table of estimated coefficients}
\item{pvalue.table}{table of p-values \code{\link{table.pvalues}}}
\item{dMatrice}{design matrices at level 1 and level 2}
\item{samples_l2_param}{posterior samples of level 2 parameters}
\item{data}{original data.frame}
\item{mf1}{model.frame of level 1}
\item{mf2}{model.frame of level 2}
\item{JAGSmodel}{'JAGS' model}
}
\examples{
\donttest{
data(bernlogtime)
# model with the dependent variable : response
res <- BANOVA.Bernoulli(response~typical, ~blur + color, bernlogtime,
bernlogtime$subject, burnin = 5000, sample = 2000, thin = 10)
summary(res)
}
}
| /man/BANOVA.Bernoulli.Rd | no_license | cran/BANOVA | R | false | true | 3,304 | rd | % Generated by roxygen2: do not edit by hand
\name{BANOVA.Bernoulli}
\alias{BANOVA.Bernoulli}
\alias{predict.BANOVA.Bernoulli}
\alias{print.BANOVA.Bernoulli}
\alias{summary.BANOVA.Bernoulli}
\title{Estimation of BANOVA with a Bernoulli dependent variable}
\description{
\code{BANOVA.Bernoulli} implements a Bayesian ANOVA for binary dependent variable, using a logit link and a normal heterogeneity distribution.
}
\usage{
BANOVA.Bernoulli(l1_formula = "NA", l2_formula = "NA", data,
id, l2_hyper = c(1, 1, 0.0001), burnin = 5000, sample = 2000, thin = 10,
adapt = 0, conv_speedup = F, jags = runjags.getOption('jagspath'))
\method{summary}{BANOVA.Bernoulli}(object, ...)
\method{predict}{BANOVA.Bernoulli}(object, newdata = NULL,...)
\method{print}{BANOVA.Bernoulli}(x, ...)
}
\arguments{
\item{l1_formula}{formula for level 1 e.g. 'Y~X1+X2'}
\item{l2_formula}{formula for level 2 e.g. '~Z1+Z2',
response variable must not be included}
\item{data}{a data.frame in long format including all features in level 1 and level 2(covariates and categorical factors) and responses}
\item{id}{subject ID of each response unit}
\item{l2_hyper}{level 2 hyperparameters, c(a, b, \eqn{\gamma}), default c(1,1,0.0001)}
\item{burnin}{the number of burn in draws in the MCMC algorithm, default 5000}
\item{sample}{target samples in the MCMC algorithm after thinning, default 2000}
\item{thin}{the number of samples in the MCMC algorithm that needs to be thinned, default 10}
\item{adapt}{the number of adaptive iterations, default 0 (see \link[runjags]{run.jags})}
\item{conv_speedup}{whether to speedup convergence, default F}
\item{jags}{the system call or path for activating 'JAGS'. Default calls findjags() to attempt to locate 'JAGS' on your system}
\item{object}{object of class \code{BANOVA.Bern} (returned by \code{BANOVA.Bern})}
\item{newdata}{test data, either a matrix, vector or a
data.frame. It must have the same format with the original data (the same number of features and the same data classes)}
\item{x}{object of class \code{BANOVA.Bern} (returned by \code{BANOVA.Bern})}
\item{\dots}{additional arguments,currently ignored}
}
\details{
Level 1 model: \cr
\eqn{y_i} {~} \eqn{Binomial(1,p_i)}, \eqn{p_i = logit^{-1}(\eta_i)} \cr
where \eqn{\eta_i = \sum_{p = 0}^{P}\sum_{j=1}^{J_p}X_{i,j}^p\beta_{j,s_i}^p}, \eqn{s_i} is the subject id of data record \eqn{i}. see \code{\link{BANOVA-package}}
}
\value{
\code{BANOVA.Bernoulli} returns an object of class \code{"BANOVA.Bernoulli"}. The returned object is a list containing:
\item{anova.table}{table of effect sizes \code{\link{BAnova}}}
\item{coef.tables}{table of estimated coefficients}
\item{pvalue.table}{table of p-values \code{\link{table.pvalues}}}
\item{dMatrice}{design matrices at level 1 and level 2}
\item{samples_l2_param}{posterior samples of level 2 parameters}
\item{data}{original data.frame}
\item{mf1}{model.frame of level 1}
\item{mf2}{model.frame of level 2}
\item{JAGSmodel}{'JAGS' model}
}
\examples{
\donttest{
data(bernlogtime)
# model with the dependent variable : response
res <- BANOVA.Bernoulli(response~typical, ~blur + color, bernlogtime,
bernlogtime$subject, burnin = 5000, sample = 2000, thin = 10)
summary(res)
}
}
|
# phdwhipbot 0.1
# author: simon munzert
# load packages
library(stringr)
library(XML)
library(twitteR)
library(XLConnect)
library(ROAuth)
# load text bits
phrases <- readWorksheet(loadWorkbook("kalondozi-phrases.xlsx"), sheet=1,header=F,simplify=T)
animals <- readWorksheet(loadWorkbook("kalondozi-animals.xlsx"), sheet=1,header=F,simplify=T)
attributs <- readWorksheet(loadWorkbook("kalondozi-attributes.xlsx"),sheet=1,header=F,simplify=T)
# setup authentication
api_key <- "AjWcUYDLtPoXvxNDbPTSlK48K"
api_secret <- "RlchPcIvVMtraKANBtz20kginzuWzUmpo374ICT8PoecSiI6Q7"
access_token <- "65899213-DKwjhtpqJRykNtis0Ak12KGwQvgj68cRYowF0fcak"
access_token_secret <- "8mFiKGo9MdLVBxdrBmOhLQTvtPugRL4BD4IPx4UGOpvkr"
setup_twitter_oauth(api_key, api_secret, access_token, access_token_secret)
# generate tweet
tweettxt <- toupper(str_c(sample(phrases, 1), " ", sample(attributs, 1), " ", sample(animals, 1), "."))
# send tweet
tweettxt
tweet(tweettxt)
| /rbot.R | no_license | tmuhimbisemoses/twitterbot | R | false | false | 1,021 | r | # phdwhipbot 0.1
# author: simon munzert
# load packages
library(stringr)
library(XML)
library(twitteR)
library(XLConnect)
library(ROAuth)
# load text bits
phrases <- readWorksheet(loadWorkbook("kalondozi-phrases.xlsx"), sheet=1,header=F,simplify=T)
animals <- readWorksheet(loadWorkbook("kalondozi-animals.xlsx"), sheet=1,header=F,simplify=T)
attributs <- readWorksheet(loadWorkbook("kalondozi-attributes.xlsx"),sheet=1,header=F,simplify=T)
# setup authentication
api_key <- "AjWcUYDLtPoXvxNDbPTSlK48K"
api_secret <- "RlchPcIvVMtraKANBtz20kginzuWzUmpo374ICT8PoecSiI6Q7"
access_token <- "65899213-DKwjhtpqJRykNtis0Ak12KGwQvgj68cRYowF0fcak"
access_token_secret <- "8mFiKGo9MdLVBxdrBmOhLQTvtPugRL4BD4IPx4UGOpvkr"
setup_twitter_oauth(api_key, api_secret, access_token, access_token_secret)
# generate tweet
tweettxt <- toupper(str_c(sample(phrases, 1), " ", sample(attributs, 1), " ", sample(animals, 1), "."))
# send tweet
tweettxt
tweet(tweettxt)
|
####### TK - Search and Comparison
########################################################
### Turkish
########################################################
library(rgdal)
library(magrittr)
library(leaflet)
library(htmltools)
library(shinydashboard)
library(dashboardthemes)
library(shinyjs)
library(shiny)
library(leaflet.extras)
library(shinyWidgets)
#devtools::install_github("dreamRs/shinyWidgets")
# library(romato)
# devtools::install_github('andrewsali/shinycssloaders')
library(shinycssloaders) # new package
library(shinyalert) # new packeges for pop_up
#### suburb profile ----------------------------------
#
# read in shape file
vic <- readOGR(dsn = path.expand('data/2016_SA2_shape'), layer = 'merged_all')
# load cuisine ranking file
# cuisine_top10 <- read.csv('data/cuisine_top10.csv', stringsAsFactors = T)
#ranking <- read.csv('data/total_ranking_allbusiness.csv', stringsAsFactors = F)
# load childcare + legal services + school
legal <- read.csv('data/legal_services.csv', stringsAsFactors = F)
childcare <- read.csv('data/childcare.csv', stringsAsFactors = F)
school <- read.csv('data/greaterM_school.csv', stringsAsFactors = F)
name <- names(vic)
name <- c('suburb', 'Ratio', 'Population', 'income_class', 'LB', 'ME', 'TK')
names(vic) <- name
childcare_suburb <- subset(vic, suburb %in% childcare$Suburb)
legal_suburb <- subset(vic, suburb %in% legal$Suburb)
school_suburb <- subset(vic, suburb %in% school$Address_Town)
# cuisine id
cuisine_reference <- list()
cuisine_reference[["MDE"]] <- '137'
cuisine_reference[["TK"]] <- '142'
cuisine_reference[["LB"]] <- '66'
cuisine_to_search <- cuisine_reference[["TK"]]
# city id
cities <- list('Pearcedale','Dromana','Flinders','Hastings','Mornington','Mount Eliza','Rosebud','Somerville')
key1 = 'ff866ef6f69b8e3a15bf229dfaeb6de3'
key2 = '99378b51db2be03b10fcf53fa607f012'
key3 = '436ccd4578d0387765bc95d5aeafda4d'
key4 = '0271743913d22592682a7e8e502daad8'
key5 = 'fe6bcdd36b02e450d7bbc0677b745ab7'
### colour palette for heatmap ----------------------------------
# mypal <- colorQuantile(palette = "Reds", domain = vic$Ratio, n = 5, reverse = TRUE)
mypal_tk <- colorQuantile(palette = "Blues", domain = vic$TK, n = 5, reverse = TRUE)
# mypal_lb <- colorQuantile(palette = "Greens", domain = vic$LB, n = 5, reverse = TRUE)
# mypal_me <- colorQuantile(palette = "Reds", domain = vic$ME, n = 5, reverse = TRUE)
##################################################################################
### New codes - legend html for price
##################################################################################
#html_legend_price <- img(src="https://i.ibb.co/s13tvbN/price-range.jpg", width = 200, high = 100 )
#html_legend_price <- '<img src = "https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png"/>'
###control group
control_group <- c("<div style = 'position: relative; display: inline-block'><i class='fa fa-graduation-cap fa-lg'></i></div> School",
"<div style = 'display: inline-block'><i class='fa fa-gavel fa-lg'></i></div> Legal Facility",
"<div style = 'display: inline-block'><i class='fa fa-child fa-lg'></i></div> Childcare Facility",
"<div style = 'display: inline-block'><i class='fa fa-train fa-lg'></i></div> Train Stations",
"<div style = 'display: inline-block'><i class='fa fa-subway fa-lg'></i></div> Tram Stations",
"<div style = 'display: inline-block'><i class='fa fa-bus fa-lg'></i></div> Bus Stations")
####### function to get legal services based on a given suburb
business_count <- read.csv('data/hair_food_count.csv', stringsAsFactors = F)
get_business_count <- function(suburb){
business_subset = subset(business_count, suburb == suburbs)
no = 0
if (length(business_subset) > 0){
no = business_subset[[5]] ###### require update when changing map type
}
return(no)
}
########################################################
#### funtion to get routes of bus and tram##############
########################################################
get_routes <- function(df){
trams = subset(df, 0 == df$route_type)
buses = subset(df, 3 == df$route_type)
tram_routes = levels(factor(trams$route_short_name))
bus_routes = levels(factor(buses$route_short_name))
return(list(tram_routes,bus_routes))
}
#D4AF37
#new UI
ui = fluidPage(
style = 'width: 100%; height: 100%',
####### keep app running #####
tags$head(
HTML(
"
<script>
var socket_timeout_interval
var n = 0
$(document).on('shiny:connected', function(event) {
socket_timeout_interval = setInterval(function(){
Shiny.onInputChange('count', n++)
}, 15000)
});
$(document).on('shiny:disconnected', function(event) {
clearInterval(socket_timeout_interval)
});
</script>
"
)
),
####### keep app running #####
setBackgroundColor('#F0F4FF'),
tags$head(tags$style(HTML('#pop_up{background-color:#D4AF37}'))),
shinyjs::useShinyjs(),
useShinyalert(),
tags$br(),
# checkboxInput('recommendation', HTML({paste('<p class="shinyjs-hide" style="color:#D4AF37; margin-top:-5px; font-size:20px"><strong>Recommended Suburbs</strong></p>')}), FALSE),
mainPanel(style = "background: #F0F4FF; width: 100%; height: 100%",
fluidRow(column(7, wellPanel(style = "background: white; width: 100%;",
leafletOutput(outputId = "map", width = '100%', height = '560px') %>% withSpinner(type = '6'),
div(id = 'controls', uiOutput("reset")))), # return button
# column(1, div(id = 'zoomed', style="margin-top: 100px; text-align: center;", htmlOutput(outputId = 'detail'))), # zoomed in info boses. get information from output$detail
column(5, offset = 0,
wellPanel(style = "background: white; height:600px; width: 100%; margin-left: -4%",
# fluidRow(
# div(style="margin-left : 5%; margin-right: 8%",
# div(id = 'default', htmlOutput(outputId = 'help_text'))# default and help text
# )
# ),
div(id = 'input_Panel',
div(id = 'Description',
HTML("<p style = 'color:balck; font-size:14px; margin-bottom: 5%;'>1. To select <span style = 'color : #D4AF37'><strong>Map Suburb</strong></span>, you can move your mouse (or click) on the map; Also you could use search bar on the map to locate<br><br>2. To add <span style = 'color : #D4AF37'><strong>Second Suburb</strong></span> for comparison, you could use search bar below</p>")
),
pickerInput(
inputId = "search",
width = '70%',
#label = HTML("<p style = 'color:royalblue; font-size:20px; font-weight:bold'>Search</p>"),
#multiple = TRUE,
choices = levels(vic$suburb)
,
#selected = rownames(mtcars)[1:5]
options = list(`live-search` = T,title = "Second Suburb",showIcon = F)
),
h5(style = 'margin-top: 5%',hr())
),
div(id = 'comparison_panel',
style = 'height: 85%; margin-top: 6%',
fluidRow(# headings
column(5,
div(HTML("<p style = 'color: #D4AF37; text-align: center; font-size:18px; margin-bottom: 10%; margin-left: -20px;'><strong>Map Suburb</strong></p>"))
),
column(2,
tags$br()
#div(HTML({paste('<p style = "font-size:12px; color:black; font-weight:bold; text-align: center; margin-top: 10px">Suburb Name', '</p>')}))
),
column(5,
div(HTML("<p style = 'color:#D4AF37; text-align: center; font-size:18px; margin-bottom: 10%; margin-right: -15px'><strong>Second Suburb</strong></p>"))
)
),
wellPanel(id = 'row0',
style = 'height: 9%;margin-bottom: 3px',
fluidRow( #suburb names
style = 'margin-top: -8px',
column(5,
div(style='font-size:1.6rem; color:#5067EB; text-align: center; margin-left: -140px; margin-right: -100px', id = 'highlevel1', htmlOutput(outputId = 'suburb_box'))
),
column(2,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center; margin-left: -100px; margin-right: -100px">Name', '</p>')}))
),
column(5,
#div('Suburb found'),
div(style='font-size:1.6rem; color:#5067EB; text-align: center; margin-left: -100px; margin-right: -140px', id = 'matchresult', htmlOutput(outputId = 'match_result'))
)
)
),
wellPanel(id = 'row01',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(
id = 'highlevel2', htmlOutput(outputId = 'customer_box')) # customer size of suburb hovered
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center; margin-left: -100px; margin-right: -100px">Customer Size', '</p>')}))
),
column(4,
div(
id = 'matchresult1', htmlOutput(outputId = 'customer_match')) # customer size of suburb searched
)
)),
wellPanel(id = 'row02',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'highlevel3', htmlOutput(outputId = 'income_box')) # income level of suburb hovered
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Income Level', '</p>')}))
),
column(4,
div(id = 'matchresult2', htmlOutput(outputId = 'income_match')) # income level of suburb searched
)
)),
wellPanel(id='row03',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'school_zoomed', htmlOutput(outputId = 'school_text'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Schools', '</p>')}))
),
column(4,
div(id = 'matchresult3', htmlOutput(outputId = 'school_match'))
)
)
),
wellPanel(id = 'row04',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'childcare_zoomed', htmlOutput(outputId = 'childcare_text'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Childcare Facilities', '</p>')}))
),
column(4,
div(id = 'matchresult4', htmlOutput(outputId = 'childcare_match'))
)
)
),
wellPanel(id = 'row05',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'legal_zoomed', htmlOutput(outputId = 'legal_text'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Legal Services', '</p>')}))
),
column(4,
div(id = 'matchresult5', htmlOutput(outputId = 'legal_match'))
)
)
),
wellPanel(id = 'row06',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'highlevel4', htmlOutput(outputId = 'existing_business'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Existing Business', '</p>')}))
),
column(4,
div(id = 'matchresult6', htmlOutput(outputId = 'existing_business_match'))
)
)
))
)
)
)
),
div(style = '' ,textOutput("keepAlive"))
)
### Server -----------------------------------------------------
server = function(input, output, session){
#### keep alive
output$keepAlive <- renderText({
req(input$count)
paste("")
})
shinyjs::hide('Legal_zoomed')
startup <- reactiveVal(0)
is_zoomed <- reactiveVal(FALSE) # new code: to track whether is zoomed in view or not to disable mouseover
shinyjs::hide('controls') # hiding the control panel of reset button (whose div id is controls) in ui
shinyjs::hide('recommendation_explain') # hide the panel that has the recommendation text in ui
# (help text)
output$help_text <- renderText({
help_text = HTML('<p style="color:royalblue; font-weight:bold; font-size: 16px;text-align: center">Please hover on suburb to see more </p>')
#help_text = 'Please hover on suburb to see more infomation'
})
# shinyjs::show('default')
# shinyjs::hide('input_Panel')
# shinyjs::hide('comparison_panel')
# shinyjs::hide('highlevel2')
# shinyjs::hide('highlevel3')
# shinyjs::hide('school_zommed')
# shinyjs::hide('legal_zommed')
# shinyjs::hide('childcare_zommed')
# shinyjs::hide('matchresult')
# shinyjs::hide('matchresult1')
# shinyjs::hide('matchresult2')
# shinyjs::hide('matchresult3')
# shinyjs::hide('matchresult4')
# shinyjs::hide('matchresult5')
# shinyjs::hide('matchresult6')
# ### only show at start up - default suburb information to show in the information boxes -----------------------------------------------------
# suburb_to_show <- subset(vic, suburb == 'Caulfield')
#
# # all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'suburb_box')) in ui
#
# output$suburb_box <- renderUI(HTML({paste('<p style = "font-size:20px; color:#D4AF37">', suburb_to_show[1,]$suburb, '</p>')}))
#
# # all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'income_box')) in ui
# output$income_box <- renderUI(HTML({paste('<p style = "font-size:15px; color:black">', '<strong>Income Class:</strong>', suburb_to_show[1,]$income_class, '</p>')}))
#
# # all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'customer_box')) in ui
# output$customer_box <- renderUI(HTML({paste('<p style = "font-size:15px; color:black">', '<strong>Customer Size:</strong>', suburb_to_show[1,]$TK, '</p>')}))
### initialise a global service side reactiveValues object to store supporting services information and to enable passing/changing this variable between
### observers on the server side
detail_information <- reactiveValues()
### default heatmap -----------------------------------------------------
base_map <- function(){
leaflet(options = leafletOptions(zoomControl = T)) %>%
# basemap - no shapes or markers
addProviderTiles(provider = providers$Stamen.TonerLite,
options = providerTileOptions(opacity = 0.8,detectRetina = T,minZoom = 9)) %>%
fitBounds(lng1 = 144.515897, lng2 = 145.626704, lat1 = -37.20, lat2 = -38.50) %>%
setView(lng = (144.515897 + 145.626704) /2 , lat = (-37.20-38.50)/2, zoom = 9) %>%
# plot all suburbs in polygon. colour the shapes based on mypal_me function
# can change shape colour, label style, highlight colour, opacity, etc.
# basically everything visual about the map can be changed through different options
addPolygons(data = vic,
weight = .7,
stroke = T,
fillColor = ~mypal_tk(vic$TK),
fillOpacity = 0.5,
color = "black",
smoothFactor = 0.2,
label = ~suburb,
labelOptions = labelOptions(
opacity = 0),
highlight = highlightOptions(
fill = T,
fillColor = ~mypal_tk(vic$TK),
fillOpacity = .8,
color = ~mypal_tk(vic$TK),
opacity = .5,
bringToFront = TRUE,
sendToBack = TRUE),
group = 'Turkish Restaurants',
layerId = vic$suburb)%>%
#search functions, can use parameters in options to change the looks and the behaviours
addSearchFeatures(
targetGroups = 'Turkish Restaurants',
options = searchFeaturesOptions(
position = 'topleft',
textPlaceholder = 'Map Suburbs', # default text
zoom=10, openPopup = TRUE, firstTipSubmit = TRUE,
collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE )) %>%
##################################################################################
### New codes - add legend
##################################################################################
addLegend("bottomright",
colors =c("#DCE8FF", "#A0C0F6", "#81A4DF", "#6289CD", "#416FBD "),
labels= c("Less","","","", "More"),
title= "Market Size in Melbourne",
opacity = 1)
}
react_map <- reactiveVal(base_map())
### readio button map
output$map <- renderLeaflet({
react_map()
})
#newUI
output$recommendation_text <- renderText(
HTML('<p style="color:white; font-weight:bold; font-size: 14px; margin-left: -60px">
<br/>Recommendation is calculated based on Recommendation is calculated based on Recommendation is calculated based onRecommendation is calculated based onRecommendation is calculated based onRecommendation is calculated based onRecommendation is calculated based on <span style="color:#D4AF37"></span> are:
<br/><span style="color:red; font-size: 25px; margin-left: 20px; font-weight:bold"></span></p>')
)
### function to subset supporting information -----------------------------------------------------
supporting_info <- function(suburb){
if (suburb %in% legal$Suburb) {
legal_to_show <- subset(legal, Suburb == suburb)
legal_count <- nrow(legal_to_show)
childcare_to_show <- subset(childcare, Suburb == suburb)
childcare_count <- nrow(childcare_to_show)
}
if (!suburb %in% legal$Suburb)
{
legal_count <- 'Coming Soon'
legal_to_show <- ''
childcare_count <- 'Coming Soon'
childcare_to_show <- ''
}
if (suburb %in% school$Address_Town){
school_to_show <- subset(school, Address_Town == suburb) # school
school_count <- nrow(school_to_show)
}
if (!suburb %in% school$Address_Town) {
school_to_show <- ''
school_count <- 'No Schools Found'
}
list(school_to_show,
school_count,
legal_to_show,
legal_count,
childcare_to_show,
childcare_count)
}
#### search behaviour -----------------------------------------------------
observeEvent(input$search, {
selected_suburb <- input$search
suburb_to_show <- subset(vic, suburb == selected_suburb)
if (selected_suburb %in% levels(vic$suburb)){
#### match information box ####
#print(suburb_to_show@data[["Ratio"]])
# # suburb name
output$match_result <- renderText({ input$search })
# # customer size
customer_output = HTML({paste('<p style = "font-size:15px; color:black;text-align: center;;">', suburb_to_show@data[["Ratio"]], '</p>')})
output$customer_match <- renderText({customer_output})
# # income level
income_output = HTML({paste('<p style = "font-size:15px; color:black;text-align: center;;">', suburb_to_show@data[["income_class"]], '</p>')})
output$income_match<- renderText({income_output})
print(income_output)
# # school
school_output = HTML('<p style="color:black; font-size: 15px; text-align: center;">',length(subset(school, Address_Town == selected_suburb)[,1]),'</p>')
output$school_match <- renderText({school_output})
# # childcare
childcare_output = HTML('<p style="color:black; font-size: 15px; text-align: center;">5</p>')
output$childcare_match <- renderText({childcare_output})
# # legal
legal_output = HTML('<p style="color:black; font-size: 15px; text-align: center;">5</p>')
output$legal_match <- renderText({legal_output})
####### existing business
output$existing_business_match <- renderText({
HTML({paste('<p style="color:black; font-size: 15px; text-align: center;">',get_business_count(input$search),'</p>')})
})
#### match information box end ####
# show match result when a suburb selceted
print(selected_suburb)
shinyjs::show('matchresult')
shinyjs::show('matchresult1')
shinyjs::show('matchresult2')
shinyjs::show('matchresult3')
shinyjs::show('matchresult4')
shinyjs::show('matchresult5')
shinyjs::show('matchresult6')
}
})
#### hoverover behaviour -----------------------------------------------------
#### hoverover suburb to see details
observeEvent(input$map_shape_mouseover$id, {
startup <- startup() + 1
if (is_zoomed() == FALSE){
req(input$map_shape_mouseover$id)
#shinyjs::hide('default') # hide help text
# shinyjs::show('input_Panel')
# shinyjs::show('comparison_panel') # correspond to div class id highlevel1 in UI file - just show panel, doesn't change the content
# shinyjs::show('highlevel2') # correspond to div class id highlevel2 in UI file - just show panel, doesn't change the content
# shinyjs::show('highlevel3') # correspond to div class id highlevel3 in UI file - just show panel, doesn't change the content
# shinyjs::show('highlevel4')
selected_suburb <- input$map_shape_mouseover$id
suburb_to_show <- subset(vic, suburb == selected_suburb)
### overwrite the default texts
# all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'suburb_box')) in ui
output$suburb_box <- renderUI(HTML({paste('<p>', suburb_to_show[1,]$suburb, '</p>')}))
# all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'income_box')) in ui
output$income_box <- renderUI(HTML({paste('<p style = "color:black; font-size: 15px; text-align: center;;">', suburb_to_show[1,]$income_class,'</p>')}))
# all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'customer_box')) in ui
output$customer_box <- renderUI(HTML({paste('<p style = "color:black; font-size: 15px; text-align: center;">', suburb_to_show[1,]$TK, '</p>')}))
#### new information box ####
##Help @Ting
# school_text
output$school_text <- renderText({
HTML('<p style="color:black; font-size: 15px; text-align: center;">',length(subset(school, Address_Town == selected_suburb)[,1]),'</p>')
#school_text = 'Please hover on suburb to see more infomation'
})
shinyjs::show('school_zoomed')
# childcare_text
output$childcare_text <- renderText({
HTML('<p style="color:black; font-size: 15px; text-align: center;">', length(subset(childcare, Suburb == selected_suburb)[,1]),'</p>')
#school_text = 'Please hover on suburb to see more infomation'
})
shinyjs::show('childcare_zoomed')
# legal_text
output$legal_text <- renderText({
HTML('<p style="color:black; font-size: 15px; text-align: center;">',length(subset(legal, Suburb == selected_suburb)[,1]),'</p>')
#school_text = 'Please hover on suburb to see more infomation'
})
shinyjs::show('legal_zoomed')
#### new information box end ####
####### existing business
output$existing_business <- renderText({
HTML({paste('<p style="color:black;font-size: 15px; text-align: center;"> ',get_business_count(selected_suburb),'</p>')})
})
}
})
#### observer to listen to the behaviour of reset button, when it's clicked do... -----------------------------------------------------
observeEvent(input$reset_button, {
react_map(base_map()) # show the default heatmap
output$map <- renderLeaflet({
react_map()
})
is_zoomed(FALSE)
# hide everything and show helpetxt
#shinyjs::show('default')
shinyjs::hide('controls') # hiding the control panel of reset button in ui
# shinyjs::hide('input_Panel')
# shinyjs::hide('comparison_panel')
# shinyjs::hide('highlevel2')
# shinyjs::hide('highlevel3')
# shinyjs::hide('highlevel4')
# shinyjs::hide('school_zoomed')
# shinyjs::hide('Legal_zoomed')
# shinyjs::hide('childcare_zoomed')
#shinyjs::hide('matchresult')
legal_info = ' '
output$legal_text <- renderText({
legal_info
})
# shinyjs::hide('detail1') # correspond to div class id highlevel1 in UI file - just show panel, doesn't change the content
# shinyjs::hide('detail2') # correspond to div class id highlevel2 in UI file - just show panel, doesn't change the content
# shinyjs::hide('detail3') # correspond to div class id highlevel3 in UI file - just show panel, doesn't change the content
# reset and show ui. correspond to div classes in ui
#shinyjs::reset('recommendation') # reset the checkbox option to FASLE in checkboxInput('recommendation', 'Show Recommendations', FALSE)
#shinyjs::show('checkbox1') # show the panel that has the recommendation checkbox in ui
})
### return button is in a panel feed to line 98 -----------------------------------------------------
output$reset <- renderUI({
absolutePanel(id = "controls", top = "auto", left = 50,
right = "auto", bottom = 70, width = "auto", height = "auto",
actionButton(inputId = "reset_button", label = "Back", class = "btn-primary") # can style the button here
)
})
# information boxes for zoomed in version, feed to div class 'zoomed' in UI file. line 101-----------------------------------------------------
### check box UI. text : Show Recommendations, default setting is FALSE (unchecked )
#output$checkbox_rec <- renderUI({checkboxInput('recommendation', HTML({paste('<p style="color:#D4AF37; margin-top:-5px; font-size:20px"><strong>Recommended Suburbs</strong></p>')}), FALSE)})
###################################################
############## For Zomato API - start ############
###################################################
get_city_ID <- function(suburb){
ID = 259
for (s in cities){
if (s == suburb){
ID = 1543
}
}
return(ID)
}
## api request function
search <- function(cityid, api_key, cuisine,query = NULL) {
zmt <- zomato$new(api_key)
an.error.occured <- FALSE
## catch the error when no result is found
tryCatch( { restaurants <- zmt$search(entity_type = 'city', entity_id = cityid, query = query, cuisine = cuisine)}
, error = function(e) {an.error.occured <<- TRUE})
if (an.error.occured == FALSE){
colnames(restaurants) <- make.unique(names(restaurants))
data <- dplyr::select(restaurants, id,name,cuisines,locality,longitude,latitude,price_range, average_cost_for_two)
return(data)
}
else{
no_result = TRUE
}
}
## a function to get no_restaurants
no_restaurants <- function(data) {
if (typeof(data) =='logical'){
return(0)
}
else{
nn = length(data$name)
return(nn)
}
}
###################################################
############## For Zomato API - end ############
###################################################
# observer to listen to clickig on a shape in the map. when there's a click on a suburb, do the following part 1-----------------------------------------------------
observeEvent(input$map_shape_click, {
is_zoomed(TRUE)
print (paste0('mapshape is', input$map_shape_click$id))
click <- input$map_shape_click
selected_suburb <- click$id # return suburb name
})
# observer to listen to clickig on a shape in the map. when there's a click on a suburb, do the following part 2 -----------------------------------------------------
observeEvent(input$map_shape_click, {
### define trainsport function
print (paste0('mapshape is', input$map_shape_click$id))
shinyjs::show('controls') # show the absoluatePanel that has the control button object in ui
shinyjs::hide('checkbox1') # # hide the checkbox panel in ui
# subset data based on shape click
click <- input$map_shape_click
selected_suburb <- click$id # return suburb name
### define trainsport function
transport <- reactive({
df = read.csv('data/transport.csv', stringsAsFactors = F)
})
if (!is.null(selected_suburb)){
suburb_to_show <- subset(vic, suburb == selected_suburb) # suburb df, customer size, and income
boundary <- suburb_to_show@polygons[[1]]@Polygons[[1]]@coords # suburb boundary
school_to_show <- supporting_info(selected_suburb)[[1]] # school df
school_count <- supporting_info(selected_suburb)[[2]][1]
legal_to_show <- supporting_info(selected_suburb)[[3]]# legal df
legal_count <- supporting_info(selected_suburb)[[4]][1]
childcare_to_show <- supporting_info(selected_suburb)[[5]]# childcare df
childcare_count <- supporting_info(selected_suburb)[[6]][1]
#### tooltip/popup styleing for clicking on a marker (school) ----------------------------------
labs_school <- sapply(seq(nrow(school_to_show)), function(i) {
# paste0 is used to Concatenate Strings
paste0( '<p>', 'Name: ', school_to_show[i,]$School_Name, '<br/>',
'Address: ', school_to_show[i,]$Address_Line, ' ', school_to_show[i,]$Address_Town,' ', school_to_show[i,]$Address_Postcode, '<br/>',
'Phone: ', school_to_show[i,]$Full_Phone_No, '<br/>',
'Type: ', school_to_show[i,]$School_Type,
'<p>')
})
#### tooltip/popup styleing for clicking on a marker (childcare) ----------------------------------
labs_childcare <- sapply(seq(nrow(childcare_to_show)), function(i) {
# paste0 is used to Concatenate Strings
paste0( '<p>', 'Name: ', childcare_to_show[i,]$Name.of.Business, '<br/>',
'Address: ', childcare_to_show[i,]$Address, '<br/>',
'Phone: ', childcare_to_show[i,]$Phone.Number , '<p>')
})
#### tooltip/popup styleing for clicking on a marker (legal) ----------------------------------
labs_legal <- sapply(seq(nrow(legal_to_show)), function(i) {
# paste0 is used to Concatenate Strings
paste0( '<p>',
'Name: ', legal_to_show[i,]$Name.of.Business, '<br/>',
'Type: ', legal_to_show[i,]$Business.Type, '<br/>',
'Address: ', legal_to_show[i,]$Address, '<br/>',
'Phone: ', legal_to_show[i,]$Phone.Number , '<p>')
})
#### send new commands to the leaflet instance "map" we create in line 133 (base_map) ----------------------------------
#### since they have the same variable names, leaflet will just change it based on the following codes
# leafletProxy('map') %>% # telling leaflet which instance (map) to change
# clearControls() %>% # clear all the control filters
# clearShapes() %>% # clear all the polygons
# clearMarkers() %>% # clear all the markers
output$map <- renderLeaflet({
#react_map()
leaflet(options = leafletOptions(zoomControl = T)) %>%
# basemap - no shapes or markers
addProviderTiles(provider = providers$Stamen.TonerLite,
options = providerTileOptions(opacity = 0.8,detectRetina = T,minZoom = 9)) %>%
fitBounds(lng1 = max(boundary[,1]),lat1 = max(boundary[,2]), # set the view to only see this suburb
lng2 = min(boundary[,1]),lat2 = min(boundary[,2]),
options = options(zoom = 9)) %>%
##################################################################################
### New codes - argument addLayersControl (add image behind checkbox filters
##################################################################################
addLayersControl(overlayGroups = control_group,
options = layersControlOptions(collapsed = F)) %>%
##################################################################################
### New codes - argument hidecGroup (default setting to uncheck layers)
##################################################################################
hideGroup(group = control_group[5:6]) %>%
# plot all the suburbs polygon but don't show the shapes in order to keep the colouring the same for this suburb
addPolygons(data = vic,
weight = 0,
stroke = 0,
fillColor = ~mypal_tk(vic$TK), # heatmap colour
fillOpacity = 0,
label = ~suburb,
labelOptions = labelOptions(
opacity = 0),
group = 'Turkish Restaurants',
layerId = vic$suburb) %>%
# plot the selected suburb, colour it
addPolygons(data = suburb_to_show,
weight = 4, # the weight of the boundary line
stroke = T, # the boundary
fillColor = ~mypal_tk(vic$TK), # heatmap colour
fillOpacity = 0.003,
color = "black",
smoothFactor = 0.7,
label = ~suburb,
labelOptions = # dont show the label
labelOptions(
opacity = 0))
})
### if the suburb has legal/childcare services AND schools
if (selected_suburb %in% legal$Suburb && selected_suburb %in% school$Address_Town)
{
# change the map view - zoom in and then add schools
leafletProxy('map') %>%
# plot schools in this suburb
addAwesomeMarkers(data = school_to_show,
lng = ~ X,
lat = ~ Y,
icon = awesomeIcons( # use awesome icons. can look up icons online to
icon = "graduation-cap",
library = "fa",
markerColor = "lightred"),
popup = lapply(labs_school, HTML),
popupOptions = popupOptions(noHide = F, # use css to style pop up box
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes - argument group (change group name checkbox display)
##################################################################################
group = control_group[1]) %>%
# plot legal in this suburb
addAwesomeMarkers(data = legal_to_show,
lng = ~ Longitude,
lat = ~ Latitude,
icon = awesomeIcons(
icon = "gavel",
library = "fa",
markerColor = "purple"),
popup = lapply(labs_legal, HTML),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes - argument group (change group name checkbox display)
##################################################################################
group = control_group[2]) %>%
# plot legal in this suburb
addAwesomeMarkers(data = childcare_to_show,
lng = ~ Longitude,
lat = ~ Latitude,
icon = awesomeIcons(
icon = "child",
library = "fa",
markerColor = "green"),
popup = lapply(labs_childcare, HTML),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[3])
# search function
# addSearchFeatures(
# targetGroups = 'Turkish Restaurants',
# options = searchFeaturesOptions(
# position = 'topleft',
# textPlaceholder = 'Search Suburbs',
# zoom=12, openPopup = TRUE, firstTipSubmit = TRUE,
# collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE ))
}
####transport####
# read and subset transport data based on selected suburb
transport_to_show <- subset(transport(), suburb == selected_suburb)
### if the suburb only has schools but no legal/childcare services, but has transport, do this
if (selected_suburb %in% school$Address_Town && length(transport_to_show$suburb) > 0) {
# train
train_to_show <- subset(transport_to_show,route_type == 2)
print(train_to_show$stop_name)
# tram
tram_to_show <- subset(transport_to_show,route_type == 0)
# bus
bus_to_show <- subset(transport_to_show,route_type == 3)
leafletProxy('map') %>%
# plot schools in this suburb
addAwesomeMarkers(data = school_to_show,
lng = ~ X,
lat = ~ Y,
icon = awesomeIcons(
icon = "graduation-cap",
library = "fa",
markerColor = "lightred"),
popup = lapply(labs_school, HTML),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes - argument group (change group name checkbox display)
##################################################################################
group = control_group[1]) %>%
# plot Trains in this suburb
addAwesomeMarkers(data = train_to_show,
lat = train_to_show$stop_lat,
lng = train_to_show$stop_lon,
icon = awesomeIcons(
icon = "train",
library = "fa",
markerColor = "blue"),
popup = train_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[4]) %>%
# plot Tram in this suburb
addAwesomeMarkers(data = tram_to_show,
lat = tram_to_show$stop_lat,
lng = tram_to_show$stop_lon,
icon = awesomeIcons(
icon = "subway",
library = "fa",
markerColor = "pink"),
popup = tram_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[5]) %>%
# plot Bus in this suburb
addAwesomeMarkers(data = bus_to_show,
lat = bus_to_show$stop_lat,
lng = bus_to_show$stop_lon,
icon = awesomeIcons(
icon = "bus",
library = "fa",
markerColor = "orange"),
popup = bus_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[6])
# search function
# addSearchFeatures(
# targetGroups = 'Turkish Restaurants',
# options = searchFeaturesOptions(
# position = 'topright',
# textPlaceholder = 'Search suburbs',
# zoom=12, openPopup = TRUE, firstTipSubmit = TRUE,
# collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE ))
#
}
if (length(transport_to_show$suburb) > 0) {
# train #
train_to_show <- subset(transport_to_show,route_type == 2)
# no. of train stations
output$train_stations_count <- renderText({
length(train_to_show[[1]])
})
print(length(train_to_show[[1]]))
# tram #
tram_to_show <- subset(transport_to_show,route_type == 0)
# tram routes count
output$tram_routes_count <-renderText({
length(get_routes(tram_to_show)[[1]])
})
# bus #
bus_to_show <- subset(transport_to_show,route_type == 3)
# bus routes count
output$bus_routes_count <-renderText({
length(get_routes(bus_to_show)[[2]])
})
leafletProxy('map') %>%
# plot Trains in this suburb
addAwesomeMarkers(data = train_to_show,
lat = train_to_show$stop_lat,
lng = train_to_show$stop_lon,
icon = awesomeIcons(
icon = "train",
library = "fa",
markerColor = "blue"),
popup = train_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[4]) %>%
# plot Tram in this suburb
addAwesomeMarkers(data = tram_to_show,
lat = tram_to_show$stop_lat,
lng = tram_to_show$stop_lon,
icon = awesomeIcons(
icon = "subway",
library = "fa",
markerColor = "pink"),
popup = paste('Tram Route:',tram_to_show$route_short_name),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[5]) %>%
# plot Bus in this suburb
addAwesomeMarkers(data = bus_to_show,
lat = bus_to_show$stop_lat,
lng = bus_to_show$stop_lon,
icon = awesomeIcons(
icon = "bus",
library = "fa",
markerColor = "orange"),
popup = paste('Bus Route:',bus_to_show$route_short_name),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[6])
# search function
# addSearchFeatures(
# targetGroups = 'Turkish Restaurants',
# options = searchFeaturesOptions(
# position = 'topright',
# textPlaceholder = 'Search suburbs',
# zoom=12, openPopup = TRUE, firstTipSubmit = TRUE,
# collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE ))
}
}
})
}
shinyApp(ui = ui, server = server)
| /Food/TK/Search and Comparison/app.R | no_license | di0nys1s/Supporting_New_Australians | R | false | false | 59,766 | r |
####### TK - Search and Comparison
########################################################
### Turkish
########################################################
library(rgdal)
library(magrittr)
library(leaflet)
library(htmltools)
library(shinydashboard)
library(dashboardthemes)
library(shinyjs)
library(shiny)
library(leaflet.extras)
library(shinyWidgets)
#devtools::install_github("dreamRs/shinyWidgets")
# library(romato)
# devtools::install_github('andrewsali/shinycssloaders')
library(shinycssloaders) # new package
library(shinyalert) # new packeges for pop_up
#### suburb profile ----------------------------------
#
# read in shape file
vic <- readOGR(dsn = path.expand('data/2016_SA2_shape'), layer = 'merged_all')
# load cuisine ranking file
# cuisine_top10 <- read.csv('data/cuisine_top10.csv', stringsAsFactors = T)
#ranking <- read.csv('data/total_ranking_allbusiness.csv', stringsAsFactors = F)
# load childcare + legal services + school
legal <- read.csv('data/legal_services.csv', stringsAsFactors = F)
childcare <- read.csv('data/childcare.csv', stringsAsFactors = F)
school <- read.csv('data/greaterM_school.csv', stringsAsFactors = F)
name <- names(vic)
name <- c('suburb', 'Ratio', 'Population', 'income_class', 'LB', 'ME', 'TK')
names(vic) <- name
childcare_suburb <- subset(vic, suburb %in% childcare$Suburb)
legal_suburb <- subset(vic, suburb %in% legal$Suburb)
school_suburb <- subset(vic, suburb %in% school$Address_Town)
# cuisine id
cuisine_reference <- list()
cuisine_reference[["MDE"]] <- '137'
cuisine_reference[["TK"]] <- '142'
cuisine_reference[["LB"]] <- '66'
cuisine_to_search <- cuisine_reference[["TK"]]
# city id
cities <- list('Pearcedale','Dromana','Flinders','Hastings','Mornington','Mount Eliza','Rosebud','Somerville')
key1 = 'ff866ef6f69b8e3a15bf229dfaeb6de3'
key2 = '99378b51db2be03b10fcf53fa607f012'
key3 = '436ccd4578d0387765bc95d5aeafda4d'
key4 = '0271743913d22592682a7e8e502daad8'
key5 = 'fe6bcdd36b02e450d7bbc0677b745ab7'
### colour palette for heatmap ----------------------------------
# mypal <- colorQuantile(palette = "Reds", domain = vic$Ratio, n = 5, reverse = TRUE)
mypal_tk <- colorQuantile(palette = "Blues", domain = vic$TK, n = 5, reverse = TRUE)
# mypal_lb <- colorQuantile(palette = "Greens", domain = vic$LB, n = 5, reverse = TRUE)
# mypal_me <- colorQuantile(palette = "Reds", domain = vic$ME, n = 5, reverse = TRUE)
##################################################################################
### New codes - legend html for price
##################################################################################
#html_legend_price <- img(src="https://i.ibb.co/s13tvbN/price-range.jpg", width = 200, high = 100 )
#html_legend_price <- '<img src = "https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png"/>'
###control group
control_group <- c("<div style = 'position: relative; display: inline-block'><i class='fa fa-graduation-cap fa-lg'></i></div> School",
"<div style = 'display: inline-block'><i class='fa fa-gavel fa-lg'></i></div> Legal Facility",
"<div style = 'display: inline-block'><i class='fa fa-child fa-lg'></i></div> Childcare Facility",
"<div style = 'display: inline-block'><i class='fa fa-train fa-lg'></i></div> Train Stations",
"<div style = 'display: inline-block'><i class='fa fa-subway fa-lg'></i></div> Tram Stations",
"<div style = 'display: inline-block'><i class='fa fa-bus fa-lg'></i></div> Bus Stations")
####### function to get legal services based on a given suburb
business_count <- read.csv('data/hair_food_count.csv', stringsAsFactors = F)
get_business_count <- function(suburb){
business_subset = subset(business_count, suburb == suburbs)
no = 0
if (length(business_subset) > 0){
no = business_subset[[5]] ###### require update when changing map type
}
return(no)
}
########################################################
#### funtion to get routes of bus and tram##############
########################################################
get_routes <- function(df){
trams = subset(df, 0 == df$route_type)
buses = subset(df, 3 == df$route_type)
tram_routes = levels(factor(trams$route_short_name))
bus_routes = levels(factor(buses$route_short_name))
return(list(tram_routes,bus_routes))
}
#D4AF37
#new UI
ui = fluidPage(
style = 'width: 100%; height: 100%',
####### keep app running #####
tags$head(
HTML(
"
<script>
var socket_timeout_interval
var n = 0
$(document).on('shiny:connected', function(event) {
socket_timeout_interval = setInterval(function(){
Shiny.onInputChange('count', n++)
}, 15000)
});
$(document).on('shiny:disconnected', function(event) {
clearInterval(socket_timeout_interval)
});
</script>
"
)
),
####### keep app running #####
setBackgroundColor('#F0F4FF'),
tags$head(tags$style(HTML('#pop_up{background-color:#D4AF37}'))),
shinyjs::useShinyjs(),
useShinyalert(),
tags$br(),
# checkboxInput('recommendation', HTML({paste('<p class="shinyjs-hide" style="color:#D4AF37; margin-top:-5px; font-size:20px"><strong>Recommended Suburbs</strong></p>')}), FALSE),
mainPanel(style = "background: #F0F4FF; width: 100%; height: 100%",
fluidRow(column(7, wellPanel(style = "background: white; width: 100%;",
leafletOutput(outputId = "map", width = '100%', height = '560px') %>% withSpinner(type = '6'),
div(id = 'controls', uiOutput("reset")))), # return button
# column(1, div(id = 'zoomed', style="margin-top: 100px; text-align: center;", htmlOutput(outputId = 'detail'))), # zoomed in info boses. get information from output$detail
column(5, offset = 0,
wellPanel(style = "background: white; height:600px; width: 100%; margin-left: -4%",
# fluidRow(
# div(style="margin-left : 5%; margin-right: 8%",
# div(id = 'default', htmlOutput(outputId = 'help_text'))# default and help text
# )
# ),
div(id = 'input_Panel',
div(id = 'Description',
HTML("<p style = 'color:balck; font-size:14px; margin-bottom: 5%;'>1. To select <span style = 'color : #D4AF37'><strong>Map Suburb</strong></span>, you can move your mouse (or click) on the map; Also you could use search bar on the map to locate<br><br>2. To add <span style = 'color : #D4AF37'><strong>Second Suburb</strong></span> for comparison, you could use search bar below</p>")
),
pickerInput(
inputId = "search",
width = '70%',
#label = HTML("<p style = 'color:royalblue; font-size:20px; font-weight:bold'>Search</p>"),
#multiple = TRUE,
choices = levels(vic$suburb)
,
#selected = rownames(mtcars)[1:5]
options = list(`live-search` = T,title = "Second Suburb",showIcon = F)
),
h5(style = 'margin-top: 5%',hr())
),
div(id = 'comparison_panel',
style = 'height: 85%; margin-top: 6%',
fluidRow(# headings
column(5,
div(HTML("<p style = 'color: #D4AF37; text-align: center; font-size:18px; margin-bottom: 10%; margin-left: -20px;'><strong>Map Suburb</strong></p>"))
),
column(2,
tags$br()
#div(HTML({paste('<p style = "font-size:12px; color:black; font-weight:bold; text-align: center; margin-top: 10px">Suburb Name', '</p>')}))
),
column(5,
div(HTML("<p style = 'color:#D4AF37; text-align: center; font-size:18px; margin-bottom: 10%; margin-right: -15px'><strong>Second Suburb</strong></p>"))
)
),
wellPanel(id = 'row0',
style = 'height: 9%;margin-bottom: 3px',
fluidRow( #suburb names
style = 'margin-top: -8px',
column(5,
div(style='font-size:1.6rem; color:#5067EB; text-align: center; margin-left: -140px; margin-right: -100px', id = 'highlevel1', htmlOutput(outputId = 'suburb_box'))
),
column(2,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center; margin-left: -100px; margin-right: -100px">Name', '</p>')}))
),
column(5,
#div('Suburb found'),
div(style='font-size:1.6rem; color:#5067EB; text-align: center; margin-left: -100px; margin-right: -140px', id = 'matchresult', htmlOutput(outputId = 'match_result'))
)
)
),
wellPanel(id = 'row01',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(
id = 'highlevel2', htmlOutput(outputId = 'customer_box')) # customer size of suburb hovered
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center; margin-left: -100px; margin-right: -100px">Customer Size', '</p>')}))
),
column(4,
div(
id = 'matchresult1', htmlOutput(outputId = 'customer_match')) # customer size of suburb searched
)
)),
wellPanel(id = 'row02',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'highlevel3', htmlOutput(outputId = 'income_box')) # income level of suburb hovered
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Income Level', '</p>')}))
),
column(4,
div(id = 'matchresult2', htmlOutput(outputId = 'income_match')) # income level of suburb searched
)
)),
wellPanel(id='row03',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'school_zoomed', htmlOutput(outputId = 'school_text'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Schools', '</p>')}))
),
column(4,
div(id = 'matchresult3', htmlOutput(outputId = 'school_match'))
)
)
),
wellPanel(id = 'row04',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'childcare_zoomed', htmlOutput(outputId = 'childcare_text'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Childcare Facilities', '</p>')}))
),
column(4,
div(id = 'matchresult4', htmlOutput(outputId = 'childcare_match'))
)
)
),
wellPanel(id = 'row05',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'legal_zoomed', htmlOutput(outputId = 'legal_text'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Legal Services', '</p>')}))
),
column(4,
div(id = 'matchresult5', htmlOutput(outputId = 'legal_match'))
)
)
),
wellPanel(id = 'row06',
style = 'height: 8%;margin-bottom: 3px',
fluidRow(
style = 'margin-top: -8px',
column(4,
div(id = 'highlevel4', htmlOutput(outputId = 'existing_business'))
),
column(4,
div(HTML({paste('<p style = "font-size:1.5rem; color:black; font-weight:bold; text-align: center;margin-left: -100px; margin-right: -100px">Existing Business', '</p>')}))
),
column(4,
div(id = 'matchresult6', htmlOutput(outputId = 'existing_business_match'))
)
)
))
)
)
)
),
div(style = '' ,textOutput("keepAlive"))
)
### Server -----------------------------------------------------
server = function(input, output, session){
#### keep alive
output$keepAlive <- renderText({
req(input$count)
paste("")
})
shinyjs::hide('Legal_zoomed')
startup <- reactiveVal(0)
is_zoomed <- reactiveVal(FALSE) # new code: to track whether is zoomed in view or not to disable mouseover
shinyjs::hide('controls') # hiding the control panel of reset button (whose div id is controls) in ui
shinyjs::hide('recommendation_explain') # hide the panel that has the recommendation text in ui
# (help text)
output$help_text <- renderText({
help_text = HTML('<p style="color:royalblue; font-weight:bold; font-size: 16px;text-align: center">Please hover on suburb to see more </p>')
#help_text = 'Please hover on suburb to see more infomation'
})
# shinyjs::show('default')
# shinyjs::hide('input_Panel')
# shinyjs::hide('comparison_panel')
# shinyjs::hide('highlevel2')
# shinyjs::hide('highlevel3')
# shinyjs::hide('school_zommed')
# shinyjs::hide('legal_zommed')
# shinyjs::hide('childcare_zommed')
# shinyjs::hide('matchresult')
# shinyjs::hide('matchresult1')
# shinyjs::hide('matchresult2')
# shinyjs::hide('matchresult3')
# shinyjs::hide('matchresult4')
# shinyjs::hide('matchresult5')
# shinyjs::hide('matchresult6')
# ### only show at start up - default suburb information to show in the information boxes -----------------------------------------------------
# suburb_to_show <- subset(vic, suburb == 'Caulfield')
#
# # all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'suburb_box')) in ui
#
# output$suburb_box <- renderUI(HTML({paste('<p style = "font-size:20px; color:#D4AF37">', suburb_to_show[1,]$suburb, '</p>')}))
#
# # all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'income_box')) in ui
# output$income_box <- renderUI(HTML({paste('<p style = "font-size:15px; color:black">', '<strong>Income Class:</strong>', suburb_to_show[1,]$income_class, '</p>')}))
#
# # all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'customer_box')) in ui
# output$customer_box <- renderUI(HTML({paste('<p style = "font-size:15px; color:black">', '<strong>Customer Size:</strong>', suburb_to_show[1,]$TK, '</p>')}))
### initialise a global service side reactiveValues object to store supporting services information and to enable passing/changing this variable between
### observers on the server side
detail_information <- reactiveValues()
### default heatmap -----------------------------------------------------
base_map <- function(){
leaflet(options = leafletOptions(zoomControl = T)) %>%
# basemap - no shapes or markers
addProviderTiles(provider = providers$Stamen.TonerLite,
options = providerTileOptions(opacity = 0.8,detectRetina = T,minZoom = 9)) %>%
fitBounds(lng1 = 144.515897, lng2 = 145.626704, lat1 = -37.20, lat2 = -38.50) %>%
setView(lng = (144.515897 + 145.626704) /2 , lat = (-37.20-38.50)/2, zoom = 9) %>%
# plot all suburbs in polygon. colour the shapes based on mypal_me function
# can change shape colour, label style, highlight colour, opacity, etc.
# basically everything visual about the map can be changed through different options
addPolygons(data = vic,
weight = .7,
stroke = T,
fillColor = ~mypal_tk(vic$TK),
fillOpacity = 0.5,
color = "black",
smoothFactor = 0.2,
label = ~suburb,
labelOptions = labelOptions(
opacity = 0),
highlight = highlightOptions(
fill = T,
fillColor = ~mypal_tk(vic$TK),
fillOpacity = .8,
color = ~mypal_tk(vic$TK),
opacity = .5,
bringToFront = TRUE,
sendToBack = TRUE),
group = 'Turkish Restaurants',
layerId = vic$suburb)%>%
#search functions, can use parameters in options to change the looks and the behaviours
addSearchFeatures(
targetGroups = 'Turkish Restaurants',
options = searchFeaturesOptions(
position = 'topleft',
textPlaceholder = 'Map Suburbs', # default text
zoom=10, openPopup = TRUE, firstTipSubmit = TRUE,
collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE )) %>%
##################################################################################
### New codes - add legend
##################################################################################
addLegend("bottomright",
colors =c("#DCE8FF", "#A0C0F6", "#81A4DF", "#6289CD", "#416FBD "),
labels= c("Less","","","", "More"),
title= "Market Size in Melbourne",
opacity = 1)
}
react_map <- reactiveVal(base_map())
### readio button map
output$map <- renderLeaflet({
react_map()
})
#newUI
output$recommendation_text <- renderText(
HTML('<p style="color:white; font-weight:bold; font-size: 14px; margin-left: -60px">
<br/>Recommendation is calculated based on Recommendation is calculated based on Recommendation is calculated based onRecommendation is calculated based onRecommendation is calculated based onRecommendation is calculated based onRecommendation is calculated based on <span style="color:#D4AF37"></span> are:
<br/><span style="color:red; font-size: 25px; margin-left: 20px; font-weight:bold"></span></p>')
)
### function to subset supporting information -----------------------------------------------------
supporting_info <- function(suburb){
if (suburb %in% legal$Suburb) {
legal_to_show <- subset(legal, Suburb == suburb)
legal_count <- nrow(legal_to_show)
childcare_to_show <- subset(childcare, Suburb == suburb)
childcare_count <- nrow(childcare_to_show)
}
if (!suburb %in% legal$Suburb)
{
legal_count <- 'Coming Soon'
legal_to_show <- ''
childcare_count <- 'Coming Soon'
childcare_to_show <- ''
}
if (suburb %in% school$Address_Town){
school_to_show <- subset(school, Address_Town == suburb) # school
school_count <- nrow(school_to_show)
}
if (!suburb %in% school$Address_Town) {
school_to_show <- ''
school_count <- 'No Schools Found'
}
list(school_to_show,
school_count,
legal_to_show,
legal_count,
childcare_to_show,
childcare_count)
}
#### search behaviour -----------------------------------------------------
observeEvent(input$search, {
selected_suburb <- input$search
suburb_to_show <- subset(vic, suburb == selected_suburb)
if (selected_suburb %in% levels(vic$suburb)){
#### match information box ####
#print(suburb_to_show@data[["Ratio"]])
# # suburb name
output$match_result <- renderText({ input$search })
# # customer size
customer_output = HTML({paste('<p style = "font-size:15px; color:black;text-align: center;;">', suburb_to_show@data[["Ratio"]], '</p>')})
output$customer_match <- renderText({customer_output})
# # income level
income_output = HTML({paste('<p style = "font-size:15px; color:black;text-align: center;;">', suburb_to_show@data[["income_class"]], '</p>')})
output$income_match<- renderText({income_output})
print(income_output)
# # school
school_output = HTML('<p style="color:black; font-size: 15px; text-align: center;">',length(subset(school, Address_Town == selected_suburb)[,1]),'</p>')
output$school_match <- renderText({school_output})
# # childcare
childcare_output = HTML('<p style="color:black; font-size: 15px; text-align: center;">5</p>')
output$childcare_match <- renderText({childcare_output})
# # legal
legal_output = HTML('<p style="color:black; font-size: 15px; text-align: center;">5</p>')
output$legal_match <- renderText({legal_output})
####### existing business
output$existing_business_match <- renderText({
HTML({paste('<p style="color:black; font-size: 15px; text-align: center;">',get_business_count(input$search),'</p>')})
})
#### match information box end ####
# show match result when a suburb selceted
print(selected_suburb)
shinyjs::show('matchresult')
shinyjs::show('matchresult1')
shinyjs::show('matchresult2')
shinyjs::show('matchresult3')
shinyjs::show('matchresult4')
shinyjs::show('matchresult5')
shinyjs::show('matchresult6')
}
})
#### hoverover behaviour -----------------------------------------------------
#### hoverover suburb to see details
observeEvent(input$map_shape_mouseover$id, {
startup <- startup() + 1
if (is_zoomed() == FALSE){
req(input$map_shape_mouseover$id)
#shinyjs::hide('default') # hide help text
# shinyjs::show('input_Panel')
# shinyjs::show('comparison_panel') # correspond to div class id highlevel1 in UI file - just show panel, doesn't change the content
# shinyjs::show('highlevel2') # correspond to div class id highlevel2 in UI file - just show panel, doesn't change the content
# shinyjs::show('highlevel3') # correspond to div class id highlevel3 in UI file - just show panel, doesn't change the content
# shinyjs::show('highlevel4')
selected_suburb <- input$map_shape_mouseover$id
suburb_to_show <- subset(vic, suburb == selected_suburb)
### overwrite the default texts
# all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'suburb_box')) in ui
output$suburb_box <- renderUI(HTML({paste('<p>', suburb_to_show[1,]$suburb, '</p>')}))
# all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'income_box')) in ui
output$income_box <- renderUI(HTML({paste('<p style = "color:black; font-size: 15px; text-align: center;;">', suburb_to_show[1,]$income_class,'</p>')}))
# all the texts are styled in HTML at the moment then send to htmlOutput(outputId = 'customer_box')) in ui
output$customer_box <- renderUI(HTML({paste('<p style = "color:black; font-size: 15px; text-align: center;">', suburb_to_show[1,]$TK, '</p>')}))
#### new information box ####
##Help @Ting
# school_text
output$school_text <- renderText({
HTML('<p style="color:black; font-size: 15px; text-align: center;">',length(subset(school, Address_Town == selected_suburb)[,1]),'</p>')
#school_text = 'Please hover on suburb to see more infomation'
})
shinyjs::show('school_zoomed')
# childcare_text
output$childcare_text <- renderText({
HTML('<p style="color:black; font-size: 15px; text-align: center;">', length(subset(childcare, Suburb == selected_suburb)[,1]),'</p>')
#school_text = 'Please hover on suburb to see more infomation'
})
shinyjs::show('childcare_zoomed')
# legal_text
output$legal_text <- renderText({
HTML('<p style="color:black; font-size: 15px; text-align: center;">',length(subset(legal, Suburb == selected_suburb)[,1]),'</p>')
#school_text = 'Please hover on suburb to see more infomation'
})
shinyjs::show('legal_zoomed')
#### new information box end ####
####### existing business
output$existing_business <- renderText({
HTML({paste('<p style="color:black;font-size: 15px; text-align: center;"> ',get_business_count(selected_suburb),'</p>')})
})
}
})
#### observer to listen to the behaviour of reset button, when it's clicked do... -----------------------------------------------------
observeEvent(input$reset_button, {
react_map(base_map()) # show the default heatmap
output$map <- renderLeaflet({
react_map()
})
is_zoomed(FALSE)
# hide everything and show helpetxt
#shinyjs::show('default')
shinyjs::hide('controls') # hiding the control panel of reset button in ui
# shinyjs::hide('input_Panel')
# shinyjs::hide('comparison_panel')
# shinyjs::hide('highlevel2')
# shinyjs::hide('highlevel3')
# shinyjs::hide('highlevel4')
# shinyjs::hide('school_zoomed')
# shinyjs::hide('Legal_zoomed')
# shinyjs::hide('childcare_zoomed')
#shinyjs::hide('matchresult')
legal_info = ' '
output$legal_text <- renderText({
legal_info
})
# shinyjs::hide('detail1') # correspond to div class id highlevel1 in UI file - just show panel, doesn't change the content
# shinyjs::hide('detail2') # correspond to div class id highlevel2 in UI file - just show panel, doesn't change the content
# shinyjs::hide('detail3') # correspond to div class id highlevel3 in UI file - just show panel, doesn't change the content
# reset and show ui. correspond to div classes in ui
#shinyjs::reset('recommendation') # reset the checkbox option to FASLE in checkboxInput('recommendation', 'Show Recommendations', FALSE)
#shinyjs::show('checkbox1') # show the panel that has the recommendation checkbox in ui
})
### return button is in a panel feed to line 98 -----------------------------------------------------
output$reset <- renderUI({
absolutePanel(id = "controls", top = "auto", left = 50,
right = "auto", bottom = 70, width = "auto", height = "auto",
actionButton(inputId = "reset_button", label = "Back", class = "btn-primary") # can style the button here
)
})
# information boxes for zoomed in version, feed to div class 'zoomed' in UI file. line 101-----------------------------------------------------
### check box UI. text : Show Recommendations, default setting is FALSE (unchecked )
#output$checkbox_rec <- renderUI({checkboxInput('recommendation', HTML({paste('<p style="color:#D4AF37; margin-top:-5px; font-size:20px"><strong>Recommended Suburbs</strong></p>')}), FALSE)})
###################################################
############## For Zomato API - start ############
###################################################
get_city_ID <- function(suburb){
ID = 259
for (s in cities){
if (s == suburb){
ID = 1543
}
}
return(ID)
}
## api request function
search <- function(cityid, api_key, cuisine,query = NULL) {
zmt <- zomato$new(api_key)
an.error.occured <- FALSE
## catch the error when no result is found
tryCatch( { restaurants <- zmt$search(entity_type = 'city', entity_id = cityid, query = query, cuisine = cuisine)}
, error = function(e) {an.error.occured <<- TRUE})
if (an.error.occured == FALSE){
colnames(restaurants) <- make.unique(names(restaurants))
data <- dplyr::select(restaurants, id,name,cuisines,locality,longitude,latitude,price_range, average_cost_for_two)
return(data)
}
else{
no_result = TRUE
}
}
## a function to get no_restaurants
no_restaurants <- function(data) {
if (typeof(data) =='logical'){
return(0)
}
else{
nn = length(data$name)
return(nn)
}
}
###################################################
############## For Zomato API - end ############
###################################################
# observer to listen to clickig on a shape in the map. when there's a click on a suburb, do the following part 1-----------------------------------------------------
observeEvent(input$map_shape_click, {
is_zoomed(TRUE)
print (paste0('mapshape is', input$map_shape_click$id))
click <- input$map_shape_click
selected_suburb <- click$id # return suburb name
})
# observer to listen to clickig on a shape in the map. when there's a click on a suburb, do the following part 2 -----------------------------------------------------
observeEvent(input$map_shape_click, {
### define trainsport function
print (paste0('mapshape is', input$map_shape_click$id))
shinyjs::show('controls') # show the absoluatePanel that has the control button object in ui
shinyjs::hide('checkbox1') # # hide the checkbox panel in ui
# subset data based on shape click
click <- input$map_shape_click
selected_suburb <- click$id # return suburb name
### define trainsport function
transport <- reactive({
df = read.csv('data/transport.csv', stringsAsFactors = F)
})
if (!is.null(selected_suburb)){
suburb_to_show <- subset(vic, suburb == selected_suburb) # suburb df, customer size, and income
boundary <- suburb_to_show@polygons[[1]]@Polygons[[1]]@coords # suburb boundary
school_to_show <- supporting_info(selected_suburb)[[1]] # school df
school_count <- supporting_info(selected_suburb)[[2]][1]
legal_to_show <- supporting_info(selected_suburb)[[3]]# legal df
legal_count <- supporting_info(selected_suburb)[[4]][1]
childcare_to_show <- supporting_info(selected_suburb)[[5]]# childcare df
childcare_count <- supporting_info(selected_suburb)[[6]][1]
#### tooltip/popup styleing for clicking on a marker (school) ----------------------------------
labs_school <- sapply(seq(nrow(school_to_show)), function(i) {
# paste0 is used to Concatenate Strings
paste0( '<p>', 'Name: ', school_to_show[i,]$School_Name, '<br/>',
'Address: ', school_to_show[i,]$Address_Line, ' ', school_to_show[i,]$Address_Town,' ', school_to_show[i,]$Address_Postcode, '<br/>',
'Phone: ', school_to_show[i,]$Full_Phone_No, '<br/>',
'Type: ', school_to_show[i,]$School_Type,
'<p>')
})
#### tooltip/popup styleing for clicking on a marker (childcare) ----------------------------------
labs_childcare <- sapply(seq(nrow(childcare_to_show)), function(i) {
# paste0 is used to Concatenate Strings
paste0( '<p>', 'Name: ', childcare_to_show[i,]$Name.of.Business, '<br/>',
'Address: ', childcare_to_show[i,]$Address, '<br/>',
'Phone: ', childcare_to_show[i,]$Phone.Number , '<p>')
})
#### tooltip/popup styleing for clicking on a marker (legal) ----------------------------------
labs_legal <- sapply(seq(nrow(legal_to_show)), function(i) {
# paste0 is used to Concatenate Strings
paste0( '<p>',
'Name: ', legal_to_show[i,]$Name.of.Business, '<br/>',
'Type: ', legal_to_show[i,]$Business.Type, '<br/>',
'Address: ', legal_to_show[i,]$Address, '<br/>',
'Phone: ', legal_to_show[i,]$Phone.Number , '<p>')
})
#### send new commands to the leaflet instance "map" we create in line 133 (base_map) ----------------------------------
#### since they have the same variable names, leaflet will just change it based on the following codes
# leafletProxy('map') %>% # telling leaflet which instance (map) to change
# clearControls() %>% # clear all the control filters
# clearShapes() %>% # clear all the polygons
# clearMarkers() %>% # clear all the markers
output$map <- renderLeaflet({
#react_map()
leaflet(options = leafletOptions(zoomControl = T)) %>%
# basemap - no shapes or markers
addProviderTiles(provider = providers$Stamen.TonerLite,
options = providerTileOptions(opacity = 0.8,detectRetina = T,minZoom = 9)) %>%
fitBounds(lng1 = max(boundary[,1]),lat1 = max(boundary[,2]), # set the view to only see this suburb
lng2 = min(boundary[,1]),lat2 = min(boundary[,2]),
options = options(zoom = 9)) %>%
##################################################################################
### New codes - argument addLayersControl (add image behind checkbox filters
##################################################################################
addLayersControl(overlayGroups = control_group,
options = layersControlOptions(collapsed = F)) %>%
##################################################################################
### New codes - argument hidecGroup (default setting to uncheck layers)
##################################################################################
hideGroup(group = control_group[5:6]) %>%
# plot all the suburbs polygon but don't show the shapes in order to keep the colouring the same for this suburb
addPolygons(data = vic,
weight = 0,
stroke = 0,
fillColor = ~mypal_tk(vic$TK), # heatmap colour
fillOpacity = 0,
label = ~suburb,
labelOptions = labelOptions(
opacity = 0),
group = 'Turkish Restaurants',
layerId = vic$suburb) %>%
# plot the selected suburb, colour it
addPolygons(data = suburb_to_show,
weight = 4, # the weight of the boundary line
stroke = T, # the boundary
fillColor = ~mypal_tk(vic$TK), # heatmap colour
fillOpacity = 0.003,
color = "black",
smoothFactor = 0.7,
label = ~suburb,
labelOptions = # dont show the label
labelOptions(
opacity = 0))
})
### if the suburb has legal/childcare services AND schools
if (selected_suburb %in% legal$Suburb && selected_suburb %in% school$Address_Town)
{
# change the map view - zoom in and then add schools
leafletProxy('map') %>%
# plot schools in this suburb
addAwesomeMarkers(data = school_to_show,
lng = ~ X,
lat = ~ Y,
icon = awesomeIcons( # use awesome icons. can look up icons online to
icon = "graduation-cap",
library = "fa",
markerColor = "lightred"),
popup = lapply(labs_school, HTML),
popupOptions = popupOptions(noHide = F, # use css to style pop up box
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes - argument group (change group name checkbox display)
##################################################################################
group = control_group[1]) %>%
# plot legal in this suburb
addAwesomeMarkers(data = legal_to_show,
lng = ~ Longitude,
lat = ~ Latitude,
icon = awesomeIcons(
icon = "gavel",
library = "fa",
markerColor = "purple"),
popup = lapply(labs_legal, HTML),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes - argument group (change group name checkbox display)
##################################################################################
group = control_group[2]) %>%
# plot legal in this suburb
addAwesomeMarkers(data = childcare_to_show,
lng = ~ Longitude,
lat = ~ Latitude,
icon = awesomeIcons(
icon = "child",
library = "fa",
markerColor = "green"),
popup = lapply(labs_childcare, HTML),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[3])
# search function
# addSearchFeatures(
# targetGroups = 'Turkish Restaurants',
# options = searchFeaturesOptions(
# position = 'topleft',
# textPlaceholder = 'Search Suburbs',
# zoom=12, openPopup = TRUE, firstTipSubmit = TRUE,
# collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE ))
}
####transport####
# read and subset transport data based on selected suburb
transport_to_show <- subset(transport(), suburb == selected_suburb)
### if the suburb only has schools but no legal/childcare services, but has transport, do this
if (selected_suburb %in% school$Address_Town && length(transport_to_show$suburb) > 0) {
# train
train_to_show <- subset(transport_to_show,route_type == 2)
print(train_to_show$stop_name)
# tram
tram_to_show <- subset(transport_to_show,route_type == 0)
# bus
bus_to_show <- subset(transport_to_show,route_type == 3)
leafletProxy('map') %>%
# plot schools in this suburb
addAwesomeMarkers(data = school_to_show,
lng = ~ X,
lat = ~ Y,
icon = awesomeIcons(
icon = "graduation-cap",
library = "fa",
markerColor = "lightred"),
popup = lapply(labs_school, HTML),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes - argument group (change group name checkbox display)
##################################################################################
group = control_group[1]) %>%
# plot Trains in this suburb
addAwesomeMarkers(data = train_to_show,
lat = train_to_show$stop_lat,
lng = train_to_show$stop_lon,
icon = awesomeIcons(
icon = "train",
library = "fa",
markerColor = "blue"),
popup = train_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[4]) %>%
# plot Tram in this suburb
addAwesomeMarkers(data = tram_to_show,
lat = tram_to_show$stop_lat,
lng = tram_to_show$stop_lon,
icon = awesomeIcons(
icon = "subway",
library = "fa",
markerColor = "pink"),
popup = tram_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[5]) %>%
# plot Bus in this suburb
addAwesomeMarkers(data = bus_to_show,
lat = bus_to_show$stop_lat,
lng = bus_to_show$stop_lon,
icon = awesomeIcons(
icon = "bus",
library = "fa",
markerColor = "orange"),
popup = bus_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[6])
# search function
# addSearchFeatures(
# targetGroups = 'Turkish Restaurants',
# options = searchFeaturesOptions(
# position = 'topright',
# textPlaceholder = 'Search suburbs',
# zoom=12, openPopup = TRUE, firstTipSubmit = TRUE,
# collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE ))
#
}
if (length(transport_to_show$suburb) > 0) {
# train #
train_to_show <- subset(transport_to_show,route_type == 2)
# no. of train stations
output$train_stations_count <- renderText({
length(train_to_show[[1]])
})
print(length(train_to_show[[1]]))
# tram #
tram_to_show <- subset(transport_to_show,route_type == 0)
# tram routes count
output$tram_routes_count <-renderText({
length(get_routes(tram_to_show)[[1]])
})
# bus #
bus_to_show <- subset(transport_to_show,route_type == 3)
# bus routes count
output$bus_routes_count <-renderText({
length(get_routes(bus_to_show)[[2]])
})
leafletProxy('map') %>%
# plot Trains in this suburb
addAwesomeMarkers(data = train_to_show,
lat = train_to_show$stop_lat,
lng = train_to_show$stop_lon,
icon = awesomeIcons(
icon = "train",
library = "fa",
markerColor = "blue"),
popup = train_to_show$stop_name,
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[4]) %>%
# plot Tram in this suburb
addAwesomeMarkers(data = tram_to_show,
lat = tram_to_show$stop_lat,
lng = tram_to_show$stop_lon,
icon = awesomeIcons(
icon = "subway",
library = "fa",
markerColor = "pink"),
popup = paste('Tram Route:',tram_to_show$route_short_name),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[5]) %>%
# plot Bus in this suburb
addAwesomeMarkers(data = bus_to_show,
lat = bus_to_show$stop_lat,
lng = bus_to_show$stop_lon,
icon = awesomeIcons(
icon = "bus",
library = "fa",
markerColor = "orange"),
popup = paste('Bus Route:',bus_to_show$route_short_name),
popupOptions = popupOptions(noHide = F,
direction = "center",
style = list(
"color" = "black",
"font-family" = "open sans",
"box-shadow" = "0.1px 0.1px rgba(0,0,0,0.25)",
"font-size" = "13px",
"border-color" = "rgba(0,0,0,0.5)")),
##################################################################################
### New codes : argument group (change group name checkbox display)
##################################################################################
group = control_group[6])
# search function
# addSearchFeatures(
# targetGroups = 'Turkish Restaurants',
# options = searchFeaturesOptions(
# position = 'topright',
# textPlaceholder = 'Search suburbs',
# zoom=12, openPopup = TRUE, firstTipSubmit = TRUE,
# collapsed = FALSE, autoCollapse = FALSE, hideMarkerOnCollapse = TRUE ))
}
}
})
}
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OctaveFunction-class.R
\docType{class}
\name{OctaveFunction-class}
\alias{OctaveFunction-class}
\alias{OctaveFunction}
\alias{show,OctaveFunction-method}
\title{Wrapping and Defining Octave Functions from R}
\usage{
OctaveFunction(fun, check = TRUE)
}
\arguments{
\item{fun}{the name of an existing Octave function or, Octave code that
defines a function.}
\item{check}{logical that indicates if the existence of the Octave function
should be checked.
If function does not exist then, an error or a warning is thrown if \code{check=TRUE}
or \code{check=FALSE} respectively.
The existence check can be completly disabled with \code{check=NA}.}
}
\description{
Wrapping and Defining Octave Functions from R
\code{OctaveFunction} objects can be created from existing Octave function
using their name, or directly from their Octave implementation.
In this case, the Octave code is parsed to extract and use the name of the first
function defined therein.
}
\section{Slots}{
\describe{
\item{\code{name}}{name of the wrapped Octave function}
}}
\examples{
osvd <- OctaveFunction('svd')
osvd
osvd(matrix(1:9,3))
orand <- OctaveFunction('rand')
orand()
orand(2)
orand(2, 3)
# From source code
myfun <- OctaveFunction('function [Y] = somefun(x)
Y = x * x;
end
')
myfun
myfun(10)
}
| /man/OctaveFunction-class.Rd | no_license | git-steb/RcppOctave | R | false | true | 1,364 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OctaveFunction-class.R
\docType{class}
\name{OctaveFunction-class}
\alias{OctaveFunction-class}
\alias{OctaveFunction}
\alias{show,OctaveFunction-method}
\title{Wrapping and Defining Octave Functions from R}
\usage{
OctaveFunction(fun, check = TRUE)
}
\arguments{
\item{fun}{the name of an existing Octave function or, Octave code that
defines a function.}
\item{check}{logical that indicates if the existence of the Octave function
should be checked.
If function does not exist then, an error or a warning is thrown if \code{check=TRUE}
or \code{check=FALSE} respectively.
The existence check can be completly disabled with \code{check=NA}.}
}
\description{
Wrapping and Defining Octave Functions from R
\code{OctaveFunction} objects can be created from existing Octave function
using their name, or directly from their Octave implementation.
In this case, the Octave code is parsed to extract and use the name of the first
function defined therein.
}
\section{Slots}{
\describe{
\item{\code{name}}{name of the wrapped Octave function}
}}
\examples{
osvd <- OctaveFunction('svd')
osvd
osvd(matrix(1:9,3))
orand <- OctaveFunction('rand')
orand()
orand(2)
orand(2, 3)
# From source code
myfun <- OctaveFunction('function [Y] = somefun(x)
Y = x * x;
end
')
myfun
myfun(10)
}
|
library(rhdf5)
h5File <- tempfile(pattern = "H5D_", fileext = ".h5")
if(file.exists(h5File))
file.remove(h5File)
expect_true( h5createFile(h5File) )
expect_silent( h5write(matrix(1:20, ncol = 2), file = h5File, name = "foo") )
############################################################
context("H5D: getting property lists")
###########################################################
## The property list interface is really limited at the moment
## so there aren't many functions that we can check
test_that("Extracting property list", {
expect_silent( fid <- H5Fopen(h5File) )
expect_silent( did <- H5Dopen(fid, name = "foo") )
expect_silent( pid <- H5Dget_create_plist(did) )
expect_output( print(pid), "HDF5 GENPROP_LST")
expect_silent( H5Pclose(pid) )
expect_silent( H5Dclose(did) )
expect_silent( H5Fclose(fid) )
})
| /tests/testthat/test_H5D.R | no_license | rcastelo/rhdf5 | R | false | false | 871 | r | library(rhdf5)
h5File <- tempfile(pattern = "H5D_", fileext = ".h5")
if(file.exists(h5File))
file.remove(h5File)
expect_true( h5createFile(h5File) )
expect_silent( h5write(matrix(1:20, ncol = 2), file = h5File, name = "foo") )
############################################################
context("H5D: getting property lists")
###########################################################
## The property list interface is really limited at the moment
## so there aren't many functions that we can check
test_that("Extracting property list", {
expect_silent( fid <- H5Fopen(h5File) )
expect_silent( did <- H5Dopen(fid, name = "foo") )
expect_silent( pid <- H5Dget_create_plist(did) )
expect_output( print(pid), "HDF5 GENPROP_LST")
expect_silent( H5Pclose(pid) )
expect_silent( H5Dclose(did) )
expect_silent( H5Fclose(fid) )
})
|
#' Default plot hooks for different output formats
#'
#' These hook functions define how to mark up graphics output in different
#' output formats.
#'
#' Depending on the options passed over, \code{hook_plot_tex} may return the
#' normal \samp{\\includegraphics{}} command, or \samp{\\input{}} (for tikz
#' files), or \samp{\\animategraphics{}} (for animations); it also takes many
#' other options into consideration to align plots and set figure sizes, etc.
#' Similarly, \code{hook_plot_html}, \code{hook_plot_md} and
#' \code{hook_plot_rst} return character strings which are HTML, Markdown, reST
#' code.
#'
#' In most cases we do not need to call these hooks explicitly, and they were
#' designed to be used internally. Sometimes we may not be able to record R
#' plots using \code{\link[grDevices]{recordPlot}}, and we can make use of these
#' hooks to insert graphics output in the output document; see
#' \code{\link{hook_plot_custom}} for details.
#' @param x a character vector of length 2 ; \code{x[1]} is the plot base
#' filename, and \code{x[2]} is the file extension
#' @param options a list of the current chunk options
#' @rdname hook_plot
#' @return A character string (code with plot filenames wrapped)
#' @references \url{http://yihui.name/knitr/hooks}
#' @seealso \code{\link{hook_plot_custom}}
#' @export
#' @examples ## this is what happens for a chunk like this
#'
#' ## <<foo-bar-plot, dev='pdf', fig.align='right'>>=
#' hook_plot_tex(c('foo-bar-plot', 'pdf'), opts_chunk$merge(list(fig.align='right')))
#'
#' ## <<bar, dev='tikz'>>=
#' hook_plot_tex(c('bar', 'tikz'), opts_chunk$merge(list(dev='tikz')))
#'
#' ## <<foo, dev='pdf', fig.show='animate', interval=.1>>=
#'
#' ## 5 plots are generated in this chunk
#' hook_plot_tex(c('foo5', 'pdf'), opts_chunk$merge(list(fig.show='animate',interval=.1,fig.cur=5, fig.num=5)))
hook_plot_tex = function(x, options) {
if (!options$include) return('')
rw = options$resize.width; rh = options$resize.height
resize1 = resize2 = ''
if (!is.null(rw) || !is.null(rh)) {
resize1 = sprintf('\\resizebox{%s}{%s}{', rw %n% '!', rh %n% '!')
resize2 = '} '
}
tikz = is_tikz_dev(options)
a = options$fig.align
fig.cur = options$fig.cur %n% 0L; fig.num = options$fig.num %n% 1L
animate = options$fig.show == 'animate'
if (!tikz && animate && fig.cur < fig.num) return('')
align1 = align2 = ''
## multiple plots: begin at 1, end at fig.num
ai = options$fig.show != 'hold'
plot1 = ai || fig.cur <= 1L; plot2 = ai || fig.cur == 0L || fig.cur == fig.num
if (plot1) align1 = switch(a, left = '\n\n', center = '\n\n{\\centering ',
right = '\n\n\\hfill{}', '')
if (plot2) align2 = switch(a, left = '\\hfill{}\n\n', center = '\n\n}\n\n',
right = '\n\n', '')
## figure environment: caption, short caption, label
cap = options$fig.cap; scap = options$fig.scap; fig1 = fig2 = ''
mcap = fig.num > 1L && options$fig.show == 'asis'
if (mcap) {
cap = rep(cap, length.out = fig.num)[fig.cur] # multiple captions
scap = rep(scap, length.out = fig.num)[fig.cur]
} else {
cap = cap[1L]; scap = scap[1L]
}
if(length(cap) && !is.na(cap)) {
if (plot1) {
fig1 = sprintf('\\begin{figure}[%s]\n', options$fig.pos)
}
if (plot2) {
lab = str_c(options$fig.lp, options$label, ifelse(mcap, fig.cur, ''))
if (is.null(scap)) scap = str_split(cap, '\\.|;|:')[[1L]][1L]
scap = if(is.na(scap)) '' else str_c('[', scap, ']')
fig2 = sprintf('\\caption%s{%s\\label{%s}}\n\\end{figure}\n', scap, cap, lab)
}
}
# maxwidth does not work with animations
if (animate && identical(options$out.width, '\\maxwidth')) options$out.width = NULL
size = paste(c(sprintf('width=%s', options$out.width),
sprintf('height=%s', options$out.height),
options$out.extra), collapse = ',')
paste(fig1, align1, resize1,
if (tikz) {
sprintf('\\input{%s.tikz}', x[1])
} else if (animate) {
## \animategraphics{} should be inserted only *once*!
aniopts = options$aniopts
aniopts = if (is.na(aniopts)) NULL else gsub(';', ',', aniopts)
size = paste(c(size, sprintf('%s', aniopts)), collapse = ',')
if (nzchar(size)) size = sprintf('[%s]', size)
sprintf('\\animategraphics%s{%s}{%s}{%s}{%s}', size, 1/options$interval,
sub(str_c(fig.num, '$'), '', x[1]), 1L, fig.num)
} else {
if (nzchar(size)) size = sprintf('[%s]', size)
sprintf('\\includegraphics%s{%s} ', size, x[1])
},
resize2, align2, fig2, sep = '')
}
.chunk.hook.tex = function(x, options) {
col = if (ai <- output_asis(x, options)) '' else
str_c(color_def(options$background), ifelse(is_tikz_dev(options), '', '\\color{fgcolor}'))
k1 = str_c(col, '\\begin{kframe}\n')
k2 = '\\end{kframe}'
x = str_c(k1, x, k2)
## rm empty kframe and verbatim environments
x = gsub('\\\\begin\\{(kframe)\\}\\s*\\\\end\\{\\1\\}', '', x)
x = gsub('\\\\end\\{(verbatim)\\}\\s*\\\\begin\\{\\1\\}[\n]?', '\n', x)
size = if (options$size == 'normalsize') '' else str_c('\\', options$size)
if (!ai) x = str_c('\\begin{knitrout}', size, '\n', x, '\n\\end{knitrout}')
if (options$split) {
name = fig_path('.tex', options)
if (!file.exists(dirname(name)))
dir.create(dirname(name))
cat(x, file = name)
sprintf('\\input{%s}', name)
} else x
}
## inline hook for tex
.inline.hook.tex = function(x) {
if (is.numeric(x)) x = format_sci(x, 'latex')
.inline.hook(x)
}
## single param hook: a function of one argument
.param.hook = function(before, options, envir) {
if (before) {
'do something before the code chunk'
} else {
'do something after the code chunk'
}
}
.verb.hook = function(x, options) str_c('\\begin{verbatim}\n', x, '\\end{verbatim}\n')
#' Set output hooks for different output formats
#'
#' These functions set built-in output hooks for LaTeX, HTML, Markdown and
#' reStructuredText.
#'
#' There are three variants of markdown documents: ordinary markdown
#' (\code{render_markdown(strict = TRUE)}), extended markdown (e.g. GitHub
#' Flavored Markdown and pandoc; \code{render_markdown(strict = FALSE)}), and
#' Jekyll (a blogging system on GitHub; \code{render_jekyll()}). For LaTeX
#' output, there are three variants as well: \pkg{knitr}'s default style
#' (\code{render_latex()}; use the LaTeX \pkg{framed} package), Sweave style
#' (\code{render_sweave()}; use \file{Sweave.sty}) and listings style
#' (\code{render_listings()}; use LaTeX \pkg{listings} package). Default HTML
#' output hooks are set by \code{render_html()}, and reStructuredText uses
#' \code{render_rst()}.
#'
#' These functions can be used before \code{knit()} or in the first chunk of the
#' input document (ideally this chunk has options \code{include = FALSE} and
#' \code{cache = FALSE}) so that all the following chunks will be formatted as
#' expected.
#'
#' You can use \code{\link{knit_hooks}} to further customize output hooks; see
#' references.
#' @rdname output_hooks
#' @return \code{NULL}; corresponding hooks are set as a side effect
#' @export
#' @references See output hooks in \url{http://yihui.name/knitr/hooks}
render_latex = function() {
if (child_mode()) return()
test_latex_pkg('framed', system.file('misc', 'framed.sty', package = 'knitr'))
opts_chunk$set(out.width = '\\maxwidth')
h = opts_knit$get('header')
if (!nzchar(h['framed'])) set_header(framed = .header.framed)
if (!nzchar(h['highlight'])) {
if (!has_package('highlight') && !str_detect(.header.hi.tex, fixed('\\usepackage{alltt}')))
.header.hi.tex = str_c(.header.hi.tex, '\\usepackage{alltt}', sep = '\n')
set_header(highlight = .header.hi.tex)
}
knit_hooks$restore()
knit_hooks$set(source = function(x, options) {
if (options$engine != 'R' || !options$highlight)
return(.verb.hook(x, options))
if (!has_package('highlight')) return(x)
## gsub() makes sure " will not produce an umlaut
str_c('\\begin{flushleft}\n', gsub('"', '"{}', x, fixed = TRUE),
'\\end{flushleft}\n')
}, output = function(x, options) {
if (output_asis(x, options)) {
str_c('\\end{kframe}\n', x, '\n\\begin{kframe}')
} else .verb.hook(x, options)
}, warning = .verb.hook, message = .verb.hook, error = .verb.hook,
inline = .inline.hook.tex, chunk = .chunk.hook.tex,
plot = function(x, options) {
## escape plot environments from kframe
str_c('\\end{kframe}', hook_plot_tex(x, options), '\\begin{kframe}')
})
}
#' @rdname output_hooks
#' @export
render_sweave = function() {
if (child_mode()) return()
opts_chunk$set(highlight = FALSE, comment = NA, prompt = TRUE) # mimic Sweave settings
test_latex_pkg('Sweave', file.path(R.home("share"), "texmf", "tex", "latex", "Sweave.sty"))
set_header(framed = '', highlight = '\\usepackage{Sweave}')
knit_hooks$restore()
## wrap source code in the Sinput environment, output in Soutput
hook.i = function(x, options) str_c('\\begin{Sinput}\n', x, '\\end{Sinput}\n')
hook.s = function(x, options) str_c('\\begin{Soutput}\n', x, '\\end{Soutput}\n')
hook.o = function(x, options) if (output_asis(x, options)) x else hook.s(x, options)
hook.c = function(x, options) {
if (output_asis(x, options)) return(x)
str_c('\\begin{Schunk}\n', x, '\\end{Schunk}\n')
}
knit_hooks$set(source = hook.i, output = hook.o, warning = hook.s,
message = hook.s, error = hook.s, inline = .inline.hook.tex,
plot = hook_plot_tex, chunk = hook.c)
}
#' @rdname output_hooks
#' @export
render_listings = function() {
if (child_mode()) return()
render_sweave()
opts_chunk$set(prompt = FALSE)
test_latex_pkg('Sweavel', system.file('misc', 'Sweavel.sty', package = 'knitr'))
set_header(framed = '', highlight = '\\usepackage{Sweavel}')
invisible(NULL)
}
## may add textile, and many other markup languages
| /R/hooks-latex.R | no_license | messert/knitr | R | false | false | 10,070 | r | #' Default plot hooks for different output formats
#'
#' These hook functions define how to mark up graphics output in different
#' output formats.
#'
#' Depending on the options passed over, \code{hook_plot_tex} may return the
#' normal \samp{\\includegraphics{}} command, or \samp{\\input{}} (for tikz
#' files), or \samp{\\animategraphics{}} (for animations); it also takes many
#' other options into consideration to align plots and set figure sizes, etc.
#' Similarly, \code{hook_plot_html}, \code{hook_plot_md} and
#' \code{hook_plot_rst} return character strings which are HTML, Markdown, reST
#' code.
#'
#' In most cases we do not need to call these hooks explicitly, and they were
#' designed to be used internally. Sometimes we may not be able to record R
#' plots using \code{\link[grDevices]{recordPlot}}, and we can make use of these
#' hooks to insert graphics output in the output document; see
#' \code{\link{hook_plot_custom}} for details.
#' @param x a character vector of length 2 ; \code{x[1]} is the plot base
#' filename, and \code{x[2]} is the file extension
#' @param options a list of the current chunk options
#' @rdname hook_plot
#' @return A character string (code with plot filenames wrapped)
#' @references \url{http://yihui.name/knitr/hooks}
#' @seealso \code{\link{hook_plot_custom}}
#' @export
#' @examples ## this is what happens for a chunk like this
#'
#' ## <<foo-bar-plot, dev='pdf', fig.align='right'>>=
#' hook_plot_tex(c('foo-bar-plot', 'pdf'), opts_chunk$merge(list(fig.align='right')))
#'
#' ## <<bar, dev='tikz'>>=
#' hook_plot_tex(c('bar', 'tikz'), opts_chunk$merge(list(dev='tikz')))
#'
#' ## <<foo, dev='pdf', fig.show='animate', interval=.1>>=
#'
#' ## 5 plots are generated in this chunk
#' hook_plot_tex(c('foo5', 'pdf'), opts_chunk$merge(list(fig.show='animate',interval=.1,fig.cur=5, fig.num=5)))
hook_plot_tex = function(x, options) {
if (!options$include) return('')
rw = options$resize.width; rh = options$resize.height
resize1 = resize2 = ''
if (!is.null(rw) || !is.null(rh)) {
resize1 = sprintf('\\resizebox{%s}{%s}{', rw %n% '!', rh %n% '!')
resize2 = '} '
}
tikz = is_tikz_dev(options)
a = options$fig.align
fig.cur = options$fig.cur %n% 0L; fig.num = options$fig.num %n% 1L
animate = options$fig.show == 'animate'
if (!tikz && animate && fig.cur < fig.num) return('')
align1 = align2 = ''
## multiple plots: begin at 1, end at fig.num
ai = options$fig.show != 'hold'
plot1 = ai || fig.cur <= 1L; plot2 = ai || fig.cur == 0L || fig.cur == fig.num
if (plot1) align1 = switch(a, left = '\n\n', center = '\n\n{\\centering ',
right = '\n\n\\hfill{}', '')
if (plot2) align2 = switch(a, left = '\\hfill{}\n\n', center = '\n\n}\n\n',
right = '\n\n', '')
## figure environment: caption, short caption, label
cap = options$fig.cap; scap = options$fig.scap; fig1 = fig2 = ''
mcap = fig.num > 1L && options$fig.show == 'asis'
if (mcap) {
cap = rep(cap, length.out = fig.num)[fig.cur] # multiple captions
scap = rep(scap, length.out = fig.num)[fig.cur]
} else {
cap = cap[1L]; scap = scap[1L]
}
if(length(cap) && !is.na(cap)) {
if (plot1) {
fig1 = sprintf('\\begin{figure}[%s]\n', options$fig.pos)
}
if (plot2) {
lab = str_c(options$fig.lp, options$label, ifelse(mcap, fig.cur, ''))
if (is.null(scap)) scap = str_split(cap, '\\.|;|:')[[1L]][1L]
scap = if(is.na(scap)) '' else str_c('[', scap, ']')
fig2 = sprintf('\\caption%s{%s\\label{%s}}\n\\end{figure}\n', scap, cap, lab)
}
}
# maxwidth does not work with animations
if (animate && identical(options$out.width, '\\maxwidth')) options$out.width = NULL
size = paste(c(sprintf('width=%s', options$out.width),
sprintf('height=%s', options$out.height),
options$out.extra), collapse = ',')
paste(fig1, align1, resize1,
if (tikz) {
sprintf('\\input{%s.tikz}', x[1])
} else if (animate) {
## \animategraphics{} should be inserted only *once*!
aniopts = options$aniopts
aniopts = if (is.na(aniopts)) NULL else gsub(';', ',', aniopts)
size = paste(c(size, sprintf('%s', aniopts)), collapse = ',')
if (nzchar(size)) size = sprintf('[%s]', size)
sprintf('\\animategraphics%s{%s}{%s}{%s}{%s}', size, 1/options$interval,
sub(str_c(fig.num, '$'), '', x[1]), 1L, fig.num)
} else {
if (nzchar(size)) size = sprintf('[%s]', size)
sprintf('\\includegraphics%s{%s} ', size, x[1])
},
resize2, align2, fig2, sep = '')
}
.chunk.hook.tex = function(x, options) {
col = if (ai <- output_asis(x, options)) '' else
str_c(color_def(options$background), ifelse(is_tikz_dev(options), '', '\\color{fgcolor}'))
k1 = str_c(col, '\\begin{kframe}\n')
k2 = '\\end{kframe}'
x = str_c(k1, x, k2)
## rm empty kframe and verbatim environments
x = gsub('\\\\begin\\{(kframe)\\}\\s*\\\\end\\{\\1\\}', '', x)
x = gsub('\\\\end\\{(verbatim)\\}\\s*\\\\begin\\{\\1\\}[\n]?', '\n', x)
size = if (options$size == 'normalsize') '' else str_c('\\', options$size)
if (!ai) x = str_c('\\begin{knitrout}', size, '\n', x, '\n\\end{knitrout}')
if (options$split) {
name = fig_path('.tex', options)
if (!file.exists(dirname(name)))
dir.create(dirname(name))
cat(x, file = name)
sprintf('\\input{%s}', name)
} else x
}
## inline hook for tex
.inline.hook.tex = function(x) {
if (is.numeric(x)) x = format_sci(x, 'latex')
.inline.hook(x)
}
## single param hook: a function of one argument
.param.hook = function(before, options, envir) {
if (before) {
'do something before the code chunk'
} else {
'do something after the code chunk'
}
}
.verb.hook = function(x, options) str_c('\\begin{verbatim}\n', x, '\\end{verbatim}\n')
#' Set output hooks for different output formats
#'
#' These functions set built-in output hooks for LaTeX, HTML, Markdown and
#' reStructuredText.
#'
#' There are three variants of markdown documents: ordinary markdown
#' (\code{render_markdown(strict = TRUE)}), extended markdown (e.g. GitHub
#' Flavored Markdown and pandoc; \code{render_markdown(strict = FALSE)}), and
#' Jekyll (a blogging system on GitHub; \code{render_jekyll()}). For LaTeX
#' output, there are three variants as well: \pkg{knitr}'s default style
#' (\code{render_latex()}; use the LaTeX \pkg{framed} package), Sweave style
#' (\code{render_sweave()}; use \file{Sweave.sty}) and listings style
#' (\code{render_listings()}; use LaTeX \pkg{listings} package). Default HTML
#' output hooks are set by \code{render_html()}, and reStructuredText uses
#' \code{render_rst()}.
#'
#' These functions can be used before \code{knit()} or in the first chunk of the
#' input document (ideally this chunk has options \code{include = FALSE} and
#' \code{cache = FALSE}) so that all the following chunks will be formatted as
#' expected.
#'
#' You can use \code{\link{knit_hooks}} to further customize output hooks; see
#' references.
#' @rdname output_hooks
#' @return \code{NULL}; corresponding hooks are set as a side effect
#' @export
#' @references See output hooks in \url{http://yihui.name/knitr/hooks}
render_latex = function() {
if (child_mode()) return()
test_latex_pkg('framed', system.file('misc', 'framed.sty', package = 'knitr'))
opts_chunk$set(out.width = '\\maxwidth')
h = opts_knit$get('header')
if (!nzchar(h['framed'])) set_header(framed = .header.framed)
if (!nzchar(h['highlight'])) {
if (!has_package('highlight') && !str_detect(.header.hi.tex, fixed('\\usepackage{alltt}')))
.header.hi.tex = str_c(.header.hi.tex, '\\usepackage{alltt}', sep = '\n')
set_header(highlight = .header.hi.tex)
}
knit_hooks$restore()
knit_hooks$set(source = function(x, options) {
if (options$engine != 'R' || !options$highlight)
return(.verb.hook(x, options))
if (!has_package('highlight')) return(x)
## gsub() makes sure " will not produce an umlaut
str_c('\\begin{flushleft}\n', gsub('"', '"{}', x, fixed = TRUE),
'\\end{flushleft}\n')
}, output = function(x, options) {
if (output_asis(x, options)) {
str_c('\\end{kframe}\n', x, '\n\\begin{kframe}')
} else .verb.hook(x, options)
}, warning = .verb.hook, message = .verb.hook, error = .verb.hook,
inline = .inline.hook.tex, chunk = .chunk.hook.tex,
plot = function(x, options) {
## escape plot environments from kframe
str_c('\\end{kframe}', hook_plot_tex(x, options), '\\begin{kframe}')
})
}
#' @rdname output_hooks
#' @export
render_sweave = function() {
if (child_mode()) return()
opts_chunk$set(highlight = FALSE, comment = NA, prompt = TRUE) # mimic Sweave settings
test_latex_pkg('Sweave', file.path(R.home("share"), "texmf", "tex", "latex", "Sweave.sty"))
set_header(framed = '', highlight = '\\usepackage{Sweave}')
knit_hooks$restore()
## wrap source code in the Sinput environment, output in Soutput
hook.i = function(x, options) str_c('\\begin{Sinput}\n', x, '\\end{Sinput}\n')
hook.s = function(x, options) str_c('\\begin{Soutput}\n', x, '\\end{Soutput}\n')
hook.o = function(x, options) if (output_asis(x, options)) x else hook.s(x, options)
hook.c = function(x, options) {
if (output_asis(x, options)) return(x)
str_c('\\begin{Schunk}\n', x, '\\end{Schunk}\n')
}
knit_hooks$set(source = hook.i, output = hook.o, warning = hook.s,
message = hook.s, error = hook.s, inline = .inline.hook.tex,
plot = hook_plot_tex, chunk = hook.c)
}
#' @rdname output_hooks
#' @export
render_listings = function() {
if (child_mode()) return()
render_sweave()
opts_chunk$set(prompt = FALSE)
test_latex_pkg('Sweavel', system.file('misc', 'Sweavel.sty', package = 'knitr'))
set_header(framed = '', highlight = '\\usepackage{Sweavel}')
invisible(NULL)
}
## may add textile, and many other markup languages
|
#' Count Features
#'
#' @description
#' Count reads associated with annotated features
#'
#' @import tibble
#' @importFrom Rsubread featureCounts
#' @importFrom purrr map imap reduce
#' @importFrom dplyr pull left_join
#' @importFrom stringr str_replace
#'
#' @param go_obj gostripes object
#' @param genome_annotation Genome annotation in GTF file format
#' @param cores Number of CPU cores available
#'
#' @details
#' Genome annotations can be found in repositories such as NCBI, UCSC, and ensembl.
#' The 'genome_annotation' file should be the same GTF used in read alignment for consistency.
#'
#' This function uses the featureCounts function from Rsubread to summarize counts to annotated features.
#' First, sequenced fragments are assigned to the nearest exon if there is at least 10 overlapping bases.
#' If the fragment overlaps more than one feature, it is asigned to the feature with the largest overlap.
#' Finally, counts for all exons from the same gene are aggregated into a sum score for that gene.
#'
#' @return gostripes object with feature counts matrix
#'
#' @examples
#' R1_fastq <- system.file("extdata", "S288C_R1.fastq", package = "gostripes")
#' R2_fastq <- system.file("extdata", "S288C_R2.fastq", package = "gostripes")
#' rRNA <- system.file("extdata", "Sc_rRNA.fasta", package = "gostripes")
#' assembly <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.dna_sm.toplevel.fa", package = "gostripes")
#' annotation <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.99.gtf", package = "gostripes")
#'
#' sample_sheet <- tibble::tibble(
#' "sample_name" = "stripeseq", "replicate_ID" = 1,
#' "R1_read" = R1_fastq, "R2_read" = R2_fastq
#' )
#'
#' go_object <- gostripes(sample_sheet) %>%
#' process_reads("./scratch/cleaned_fastq", rRNA) %>%
#' fastq_quality("./scratch/fastqc_reports") %>%
#' genome_index(assembly, annotation, "./scratch/genome_index") %>%
#' align_reads("./scratch/aligned") %>%
#' process_bams("./scratch/cleaned_bams") %>%
#' count_features(annotation)
#'
#' @rdname count_features-function
#'
#' @export
count_features <- function(go_obj, genome_annotation, cores = 1) {
## Check validity of input.
if (!is(go_obj, "gostripes")) stop("go_obj should be a gostripes object")
if (!is(genome_annotation, "character")) stop("genome_annotation must be a character string")
if (!file.exists(genome_annotation)) stop("genome_annotation file could not be found")
if (!is(cores, "numeric")) stop("cores should be a positive integer")
if (cores < 1 | !cores %% 1 == 0) stop("cores should be a positive integer")
## Print out some information on feature counting.
message(
"\n## Feature Counting\n",
"##\n",
"## Annotation: ", genome_annotation, "\n",
"## Cores: ", cores, "\n",
"##\n",
"## Feature Count Settings:\n",
"## - Assign fragments to exon \n",
"## - Summarize fragment counts by gene \n",
"## - Fragments must overlap feature at least 10 bases \n",
"## - Fragments are assigned to feature with largest overlap \n",
"...Started counting features"
)
## Separate paired-end and single-end reads.
seq_status <- go_obj@sample_sheet %>%
split(.$seq_mode) %>%
map(function(x) {
samp_names <- pull(x, "sample_name")
samp_names <- file.path(go_obj@settings$bam_dir, paste0("soft_", samp_names, ".bam"))
return(samp_names)
})
## Build featureCounts command.
counts <- imap(seq_status, function(bams, seq_mode) {
# Count paired-end features.
if (seq_mode == "paired") {
capture.output(feature_counts <- featureCounts(
files = bams,
annot.ext = genome_annotation,
isGTFAnnotationFile = TRUE,
GTF.featureType = "exon",
GTF.attrType = "gene_id",
useMetaFeatures = TRUE,
allowMultiOverlap = FALSE,
minOverlap = 10,
largestOverlap = TRUE,
strandSpecific = 1,
isPairedEnd = TRUE,
nthreads = cores
))
# Count single-end features.
} else {
capture.output(feature_counts <- featureCounts(
files = bams,
annot.ext = genome_annotation,
isGTFAnnotationFile = TRUE,
GTF.featureType = "exon",
GTF.attrType = "gene_id",
useMetaFeatures = TRUE,
allowMultiOverlap = FALSE,
minOverlap = 10,
largestOverlap = TRUE,
strandSpecific = 1,
isPairedEnd = FALSE,
nthreads = cores,
readExtension3 = 200
))
}
# Extract feature counts and remove .bam from sample names.
feature_counts <- feature_counts$counts %>%
as_tibble(.name_repair = "unique", rownames = "gene_id")
colnames(feature_counts) <- str_replace(colnames(feature_counts), "\\.bam$", "")
return(feature_counts)
})
## Merge counts.
counts <- reduce(counts, left_join, by = "gene_id")
message("...Finished counting features!")
## Add counts back to gostripes object.
go_obj@feature_counts <- counts
return(go_obj)
}
#' Export Feature Counts
#'
#' @description
#' Export feature counts as a table
#'
#' @import tibble
#' @importFrom dplyr pull
#'
#' @param go_obj gostripes object
#' @param outdir Output directory for table
#'
#' @return gostripes object and tab separated table of feature counts.
#'
#' @examples
#' R1_fastq <- system.file("extdata", "S288C_R1.fastq", package = "gostripes")
#' R2_fastq <- system.file("extdata", "S288C_R2.fastq", package = "gostripes")
#' rRNA <- system.file("extdata", "Sc_rRNA.fasta", package = "gostripes")
#' assembly <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.dna_sm.toplevel.fa", package = "gostripes")
#' annotation <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.99.gtf", package = "gostripes")
#'
#' sample_sheet <- tibble::tibble(
#' "sample_name" = "stripeseq", "replicate_ID" = 1,
#' "R1_read" = R1_fastq, "R2_read" = R2_fastq
#' )
#'
#' go_object <- gostripes(sample_sheet) %>%
#' process_reads("./scratch/cleaned_fastq", rRNA) %>%
#' fastq_quality("./scratch/fastqc_reports") %>%
#' genome_index(assembly, annotation, "./scratch/genome_index") %>%
#' align_reads("./scratch/aligned") %>%
#' process_bams("./scratch/cleaned_bams") %>%
#' count_features(annotation) %>%
#' export_counts("./scratch/counts")
#'
#' @rdname export_counts-function
#'
#' @export
export_counts <- function(go_obj, outdir) {
## Check validity of inputs.
if(!is(go_obj, "gostripes")) stop("go_obj should be a gostripes object")
if(!is(outdir, "character")) stop("outdir should be a character string")
## Ensure output directory exists.
if (!dir.exists(outdir)) dir.create(outdir, recursive = TRUE)
## Print out some information.
message(
"\n## Exporting Feature Counts\n",
"##\n",
"## Output Directory: ", outdir, "\n"
)
## Export the counts to a table.
message("...Exporting feature counts table")
write.table(
go_obj@feature_counts, file.path(outdir, "feature_counts.tsv"),
col.names = TRUE, row.names = FALSE, sep = "\t", quote = FALSE
)
message("...Finished exporting feature counts table")
return(go_obj)
}
| /R/feature_count.R | no_license | rpolicastro/gostripes | R | false | false | 6,923 | r |
#' Count Features
#'
#' @description
#' Count reads associated with annotated features
#'
#' @import tibble
#' @importFrom Rsubread featureCounts
#' @importFrom purrr map imap reduce
#' @importFrom dplyr pull left_join
#' @importFrom stringr str_replace
#'
#' @param go_obj gostripes object
#' @param genome_annotation Genome annotation in GTF file format
#' @param cores Number of CPU cores available
#'
#' @details
#' Genome annotations can be found in repositories such as NCBI, UCSC, and ensembl.
#' The 'genome_annotation' file should be the same GTF used in read alignment for consistency.
#'
#' This function uses the featureCounts function from Rsubread to summarize counts to annotated features.
#' First, sequenced fragments are assigned to the nearest exon if there is at least 10 overlapping bases.
#' If the fragment overlaps more than one feature, it is asigned to the feature with the largest overlap.
#' Finally, counts for all exons from the same gene are aggregated into a sum score for that gene.
#'
#' @return gostripes object with feature counts matrix
#'
#' @examples
#' R1_fastq <- system.file("extdata", "S288C_R1.fastq", package = "gostripes")
#' R2_fastq <- system.file("extdata", "S288C_R2.fastq", package = "gostripes")
#' rRNA <- system.file("extdata", "Sc_rRNA.fasta", package = "gostripes")
#' assembly <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.dna_sm.toplevel.fa", package = "gostripes")
#' annotation <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.99.gtf", package = "gostripes")
#'
#' sample_sheet <- tibble::tibble(
#' "sample_name" = "stripeseq", "replicate_ID" = 1,
#' "R1_read" = R1_fastq, "R2_read" = R2_fastq
#' )
#'
#' go_object <- gostripes(sample_sheet) %>%
#' process_reads("./scratch/cleaned_fastq", rRNA) %>%
#' fastq_quality("./scratch/fastqc_reports") %>%
#' genome_index(assembly, annotation, "./scratch/genome_index") %>%
#' align_reads("./scratch/aligned") %>%
#' process_bams("./scratch/cleaned_bams") %>%
#' count_features(annotation)
#'
#' @rdname count_features-function
#'
#' @export
count_features <- function(go_obj, genome_annotation, cores = 1) {
## Check validity of input.
if (!is(go_obj, "gostripes")) stop("go_obj should be a gostripes object")
if (!is(genome_annotation, "character")) stop("genome_annotation must be a character string")
if (!file.exists(genome_annotation)) stop("genome_annotation file could not be found")
if (!is(cores, "numeric")) stop("cores should be a positive integer")
if (cores < 1 | !cores %% 1 == 0) stop("cores should be a positive integer")
## Print out some information on feature counting.
message(
"\n## Feature Counting\n",
"##\n",
"## Annotation: ", genome_annotation, "\n",
"## Cores: ", cores, "\n",
"##\n",
"## Feature Count Settings:\n",
"## - Assign fragments to exon \n",
"## - Summarize fragment counts by gene \n",
"## - Fragments must overlap feature at least 10 bases \n",
"## - Fragments are assigned to feature with largest overlap \n",
"...Started counting features"
)
## Separate paired-end and single-end reads.
seq_status <- go_obj@sample_sheet %>%
split(.$seq_mode) %>%
map(function(x) {
samp_names <- pull(x, "sample_name")
samp_names <- file.path(go_obj@settings$bam_dir, paste0("soft_", samp_names, ".bam"))
return(samp_names)
})
## Build featureCounts command.
counts <- imap(seq_status, function(bams, seq_mode) {
# Count paired-end features.
if (seq_mode == "paired") {
capture.output(feature_counts <- featureCounts(
files = bams,
annot.ext = genome_annotation,
isGTFAnnotationFile = TRUE,
GTF.featureType = "exon",
GTF.attrType = "gene_id",
useMetaFeatures = TRUE,
allowMultiOverlap = FALSE,
minOverlap = 10,
largestOverlap = TRUE,
strandSpecific = 1,
isPairedEnd = TRUE,
nthreads = cores
))
# Count single-end features.
} else {
capture.output(feature_counts <- featureCounts(
files = bams,
annot.ext = genome_annotation,
isGTFAnnotationFile = TRUE,
GTF.featureType = "exon",
GTF.attrType = "gene_id",
useMetaFeatures = TRUE,
allowMultiOverlap = FALSE,
minOverlap = 10,
largestOverlap = TRUE,
strandSpecific = 1,
isPairedEnd = FALSE,
nthreads = cores,
readExtension3 = 200
))
}
# Extract feature counts and remove .bam from sample names.
feature_counts <- feature_counts$counts %>%
as_tibble(.name_repair = "unique", rownames = "gene_id")
colnames(feature_counts) <- str_replace(colnames(feature_counts), "\\.bam$", "")
return(feature_counts)
})
## Merge counts.
counts <- reduce(counts, left_join, by = "gene_id")
message("...Finished counting features!")
## Add counts back to gostripes object.
go_obj@feature_counts <- counts
return(go_obj)
}
#' Export Feature Counts
#'
#' @description
#' Export feature counts as a table
#'
#' @import tibble
#' @importFrom dplyr pull
#'
#' @param go_obj gostripes object
#' @param outdir Output directory for table
#'
#' @return gostripes object and tab separated table of feature counts.
#'
#' @examples
#' R1_fastq <- system.file("extdata", "S288C_R1.fastq", package = "gostripes")
#' R2_fastq <- system.file("extdata", "S288C_R2.fastq", package = "gostripes")
#' rRNA <- system.file("extdata", "Sc_rRNA.fasta", package = "gostripes")
#' assembly <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.dna_sm.toplevel.fa", package = "gostripes")
#' annotation <- system.file("extdata", "Saccharomyces_cerevisiae.R64-1-1.99.gtf", package = "gostripes")
#'
#' sample_sheet <- tibble::tibble(
#' "sample_name" = "stripeseq", "replicate_ID" = 1,
#' "R1_read" = R1_fastq, "R2_read" = R2_fastq
#' )
#'
#' go_object <- gostripes(sample_sheet) %>%
#' process_reads("./scratch/cleaned_fastq", rRNA) %>%
#' fastq_quality("./scratch/fastqc_reports") %>%
#' genome_index(assembly, annotation, "./scratch/genome_index") %>%
#' align_reads("./scratch/aligned") %>%
#' process_bams("./scratch/cleaned_bams") %>%
#' count_features(annotation) %>%
#' export_counts("./scratch/counts")
#'
#' @rdname export_counts-function
#'
#' @export
export_counts <- function(go_obj, outdir) {
## Check validity of inputs.
if(!is(go_obj, "gostripes")) stop("go_obj should be a gostripes object")
if(!is(outdir, "character")) stop("outdir should be a character string")
## Ensure output directory exists.
if (!dir.exists(outdir)) dir.create(outdir, recursive = TRUE)
## Print out some information.
message(
"\n## Exporting Feature Counts\n",
"##\n",
"## Output Directory: ", outdir, "\n"
)
## Export the counts to a table.
message("...Exporting feature counts table")
write.table(
go_obj@feature_counts, file.path(outdir, "feature_counts.tsv"),
col.names = TRUE, row.names = FALSE, sep = "\t", quote = FALSE
)
message("...Finished exporting feature counts table")
return(go_obj)
}
|
#' Calculate summary time-series of master dataset for variable of interest
#'
#' @param master data.frame, provided in package data
#' @param column unquoted column name, c(capacity, generation, lcoe)
#' @param funcn
#' @return data.frame using keys (oc, fg, yr), reporting yearly averages for column-variable and ultimate averages for missing (oc, fg, yr) rows
#' @export
summaryCalc <- function(master, column, weights=NULL) {
column <- enquo(column) # convert unquote column name to quosure
# average value given (oc, fg) for each year
# only lcoe is weighted (by capacity)
# if unweighted, provide vector of weights=1. vector must be same length as group
# if weighted, use plant-capacity to weight (oc, fg, yr)-avg
summary.avg.yr <- master %>%
group_by(yr, overnightcategory, fuel.general) %>%
summarise(column.avg.yr = ifelse(quo_name(column) != "lcoe",
stats::weighted.mean((!!column), rep(1, n())),
stats::weighted.mean((!!column), capacity))) %>%
ungroup()
# grab 21 (oc, fg) combos we have a mapping for
mapping <- mapping %>%
select(overnightcategory, fuel.general) %>%
distinct() %>%
mutate_all(as.character)
# force row for all possible (oc, fg, yr) combos: 130 possible -> some have NA and need to be filled with (oc, fg)-avg
# keep only those (oc, fg) combos that appear in the mapping: 16 observed in data
summary.complete.yr <- summary.avg.yr %>%
complete(overnightcategory, fuel.general, yr) %>%
inner_join(mapping, by=c("overnightcategory", "fuel.general"))
# make summary.complete.yr have two time series:
## one with actual LCOE data
## the other holds mean(LCOE) wherever there is missing data
oc.fg.avg <- summary.complete.yr %>%
group_by(fuel.general, overnightcategory) %>%
summarise(column.avg = mean(column.avg.yr, na.rm=TRUE)) %>%
ungroup()
summary.complete.yr.na <- summary.complete.yr %>%
filter(is.na(column.avg.yr)) %>%
select(-column.avg.yr) %>%
left_join(oc.fg.avg, by=c("fuel.general", "overnightcategory")) %>%
rename(missing=column.avg)
summary.complete.yr <- summary.complete.yr %>%
left_join(summary.complete.yr.na, by=c("yr", "fuel.general", "overnightcategory")) %>%
rename(!!quo_name(column) := column.avg.yr) # report average values under colname indicated as input
}
#' Plot the summary time-series for variable of interest
#'
#' @param df data.frame, summary data produced by summaryCalc()
#' @param column unquoted column name
#' @param units string, Units of variable
#' @return time-series plot faceted by oc & fg
#' @export
summaryPlot <- function(df, column, units) {
column <- enquo(column)
# barchart (w/ points on period average for missing data)
ggplot(df, aes(x=yr)) + ylab(units) + ggtitle(paste0("average plant ", quo_name(column))) +
geom_line(aes_string(y = quo_text(column))) +
geom_point(aes(y = missing)) +
facet_wrap(~fuel.general + overnightcategory, scales="free") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
}
| /R/summary.R | no_license | JGCRI/electricity_hindcasting_data | R | false | false | 3,100 | r | #' Calculate summary time-series of master dataset for variable of interest
#'
#' @param master data.frame, provided in package data
#' @param column unquoted column name, c(capacity, generation, lcoe)
#' @param funcn
#' @return data.frame using keys (oc, fg, yr), reporting yearly averages for column-variable and ultimate averages for missing (oc, fg, yr) rows
#' @export
summaryCalc <- function(master, column, weights=NULL) {
column <- enquo(column) # convert unquote column name to quosure
# average value given (oc, fg) for each year
# only lcoe is weighted (by capacity)
# if unweighted, provide vector of weights=1. vector must be same length as group
# if weighted, use plant-capacity to weight (oc, fg, yr)-avg
summary.avg.yr <- master %>%
group_by(yr, overnightcategory, fuel.general) %>%
summarise(column.avg.yr = ifelse(quo_name(column) != "lcoe",
stats::weighted.mean((!!column), rep(1, n())),
stats::weighted.mean((!!column), capacity))) %>%
ungroup()
# grab 21 (oc, fg) combos we have a mapping for
mapping <- mapping %>%
select(overnightcategory, fuel.general) %>%
distinct() %>%
mutate_all(as.character)
# force row for all possible (oc, fg, yr) combos: 130 possible -> some have NA and need to be filled with (oc, fg)-avg
# keep only those (oc, fg) combos that appear in the mapping: 16 observed in data
summary.complete.yr <- summary.avg.yr %>%
complete(overnightcategory, fuel.general, yr) %>%
inner_join(mapping, by=c("overnightcategory", "fuel.general"))
# make summary.complete.yr have two time series:
## one with actual LCOE data
## the other holds mean(LCOE) wherever there is missing data
oc.fg.avg <- summary.complete.yr %>%
group_by(fuel.general, overnightcategory) %>%
summarise(column.avg = mean(column.avg.yr, na.rm=TRUE)) %>%
ungroup()
summary.complete.yr.na <- summary.complete.yr %>%
filter(is.na(column.avg.yr)) %>%
select(-column.avg.yr) %>%
left_join(oc.fg.avg, by=c("fuel.general", "overnightcategory")) %>%
rename(missing=column.avg)
summary.complete.yr <- summary.complete.yr %>%
left_join(summary.complete.yr.na, by=c("yr", "fuel.general", "overnightcategory")) %>%
rename(!!quo_name(column) := column.avg.yr) # report average values under colname indicated as input
}
#' Plot the summary time-series for variable of interest
#'
#' @param df data.frame, summary data produced by summaryCalc()
#' @param column unquoted column name
#' @param units string, Units of variable
#' @return time-series plot faceted by oc & fg
#' @export
summaryPlot <- function(df, column, units) {
column <- enquo(column)
# barchart (w/ points on period average for missing data)
ggplot(df, aes(x=yr)) + ylab(units) + ggtitle(paste0("average plant ", quo_name(column))) +
geom_line(aes_string(y = quo_text(column))) +
geom_point(aes(y = missing)) +
facet_wrap(~fuel.general + overnightcategory, scales="free") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
}
|
# Create an EMM for a single variable and experiment.
#
# Parameters
# ----------
# lmem_for_one_experiment: lmerModLmerTest
# LMEM for a single variable and experiment.
#
# Returns
# -------
# emmGrid
# EMM for the LMEM
emm <- function(lmem_for_one_experiment) {
lmem_for_one_experiment %>%
emmeans::emmeans(~ density * food)
}
# Create a named list of EMMs for LMEMs.
#
# Parameters
# ----------
# named_lmems: list
# A named list of lmerModLmerTest objects for a single dependent variable,
# with experiment names as element names.
#
# Returns
# -------
# list
# List of emmGrid objects with same names as named_lmems, holding contrast data.
make_named_emms <- function(named_lmems) {
emms <- list()
emms[[settings("experiment_level_dend1")]] <-
named_lmems[[settings("experiment_level_dend1")]] %>%
emm()
emms[[settings("experiment_level_dend2")]] <-
named_lmems[[settings("experiment_level_dend2")]] %>%
emm()
emms[[settings("experiment_level_lyt1")]] <-
named_lmems[[settings("experiment_level_lyt1")]] %>%
emm()
emms[[settings("experiment_level_lyt2")]] <-
named_lmems[[settings("experiment_level_lyt2")]] %>%
emm()
emms
}
# Create a data.frame with EMM information for a single experiment.
emm_df <- function(emm_for_one_experiment, species_name, experiment_name) {
emm_for_one_experiment %>%
as.data.frame() %>%
dplyr::mutate(
species = species_name,
experiment = experiment_name
) %>%
dplyr::relocate(.data$species, .data$experiment)
}
# Create a named list of data.frame objects holding EMM info.
#
# Parameters
# ----------
# named_emms: list
# A named list of emmGrid objects for a single dependent variable, with
# experiment names as element names.
#
# Returns
# -------
# list
# List of data.frame objects with same names as named_emms.
make_named_emm_dfs <- function(named_emms) {
emm_dfs <- list()
emm_dfs[[settings("experiment_level_dend1")]] <-
named_emms[[settings("experiment_level_dend1")]] %>%
emm_df(settings("species_level_dend"), settings("experiment_level_dend1"))
emm_dfs[[settings("experiment_level_dend2")]] <-
named_emms[[settings("experiment_level_dend2")]] %>%
emm_df(settings("species_level_dend"), settings("experiment_level_dend2"))
emm_dfs[[settings("experiment_level_lyt1")]] <-
named_emms[[settings("experiment_level_lyt1")]] %>%
emm_df(settings("species_level_lyt"), settings("experiment_level_lyt1"))
emm_dfs[[settings("experiment_level_lyt2")]] <-
named_emms[[settings("experiment_level_lyt2")]] %>%
emm_df(settings("species_level_lyt"), settings("experiment_level_lyt2"))
emm_dfs
}
# Combine a list of data.frames with EMM info into one big data.frame.
join_emm_dfs <- function(emm_df_list) {
join_all(emm_df_list,
by = c(
"species", "experiment", "density", "food", "emmean", "SE",
"df", "lower.CL", "upper.CL"
)
)
}
# Combine a data.frame of PORL EMM info with one of SL EMM info.
combine_po_and_sl_emm_df <- function(po_emm_df, sl_emm_df) {
dplyr::full_join(po_emm_df, sl_emm_df,
by = c("species", "experiment", "density", "food"),
suffix = c(".po", ".sl")
)
}
| /code/analysis/estimates/emms.R | permissive | PeterNilssonBio/NilssonPernet2022 | R | false | false | 3,207 | r | # Create an EMM for a single variable and experiment.
#
# Parameters
# ----------
# lmem_for_one_experiment: lmerModLmerTest
# LMEM for a single variable and experiment.
#
# Returns
# -------
# emmGrid
# EMM for the LMEM
emm <- function(lmem_for_one_experiment) {
lmem_for_one_experiment %>%
emmeans::emmeans(~ density * food)
}
# Create a named list of EMMs for LMEMs.
#
# Parameters
# ----------
# named_lmems: list
# A named list of lmerModLmerTest objects for a single dependent variable,
# with experiment names as element names.
#
# Returns
# -------
# list
# List of emmGrid objects with same names as named_lmems, holding contrast data.
make_named_emms <- function(named_lmems) {
emms <- list()
emms[[settings("experiment_level_dend1")]] <-
named_lmems[[settings("experiment_level_dend1")]] %>%
emm()
emms[[settings("experiment_level_dend2")]] <-
named_lmems[[settings("experiment_level_dend2")]] %>%
emm()
emms[[settings("experiment_level_lyt1")]] <-
named_lmems[[settings("experiment_level_lyt1")]] %>%
emm()
emms[[settings("experiment_level_lyt2")]] <-
named_lmems[[settings("experiment_level_lyt2")]] %>%
emm()
emms
}
# Create a data.frame with EMM information for a single experiment.
emm_df <- function(emm_for_one_experiment, species_name, experiment_name) {
emm_for_one_experiment %>%
as.data.frame() %>%
dplyr::mutate(
species = species_name,
experiment = experiment_name
) %>%
dplyr::relocate(.data$species, .data$experiment)
}
# Create a named list of data.frame objects holding EMM info.
#
# Parameters
# ----------
# named_emms: list
# A named list of emmGrid objects for a single dependent variable, with
# experiment names as element names.
#
# Returns
# -------
# list
# List of data.frame objects with same names as named_emms.
make_named_emm_dfs <- function(named_emms) {
emm_dfs <- list()
emm_dfs[[settings("experiment_level_dend1")]] <-
named_emms[[settings("experiment_level_dend1")]] %>%
emm_df(settings("species_level_dend"), settings("experiment_level_dend1"))
emm_dfs[[settings("experiment_level_dend2")]] <-
named_emms[[settings("experiment_level_dend2")]] %>%
emm_df(settings("species_level_dend"), settings("experiment_level_dend2"))
emm_dfs[[settings("experiment_level_lyt1")]] <-
named_emms[[settings("experiment_level_lyt1")]] %>%
emm_df(settings("species_level_lyt"), settings("experiment_level_lyt1"))
emm_dfs[[settings("experiment_level_lyt2")]] <-
named_emms[[settings("experiment_level_lyt2")]] %>%
emm_df(settings("species_level_lyt"), settings("experiment_level_lyt2"))
emm_dfs
}
# Combine a list of data.frames with EMM info into one big data.frame.
join_emm_dfs <- function(emm_df_list) {
join_all(emm_df_list,
by = c(
"species", "experiment", "density", "food", "emmean", "SE",
"df", "lower.CL", "upper.CL"
)
)
}
# Combine a data.frame of PORL EMM info with one of SL EMM info.
combine_po_and_sl_emm_df <- function(po_emm_df, sl_emm_df) {
dplyr::full_join(po_emm_df, sl_emm_df,
by = c("species", "experiment", "density", "food"),
suffix = c(".po", ".sl")
)
}
|
source("main.R")
load("../simData_example.Rdata") #loads example data set called simData
D <- tsdata$event
T <- tsdata$trt
Y1 <- tsdata$Y1
Y2 <- tsdata$Y2
#trtsel objects
trtsel.Y1 <- TrtSel(disease = D, trt = T, marker = Y1, cohort.type="randomized cohort")
trtsel.Y1
trtsel.Y2 <- TrtSel(disease = D, trt = T, marker = Y2, study.design="randomized cohort")
trtsel.Y2
#plot
tmp <- plot(trtsel.Y1, plot.type = "cdf", bootstraps = 50)
head(tmp)
plot(trtsel.Y1, bootstraps = 200, ci = "vertical", plot.type = "treatment effect")
plot(trtsel.Y1, plot.type = "cdf", conf.bands = FALSE)#, fixed.values = seq(from=0.01, to=.4, by=.01))
plot(trtsel.Y1, plot.type = "risk", ylim = c(0, .8), main = "NEW MAIN HERE", bootstraps = 100 )
plot(trtsel.Y1, plot.type = "risk", ci = "horizontal" , fixed.values = c(.2, .25, .3, .35, .4))
plot(trtsel.Y2, bootstraps = 500)
plot(trtsel.Y2)
#eval
eval.Y1 <- evalTrtSel(trtsel.Y1, bootstraps= 100)
eval.Y1
eval.Y2 <- evalTrtSel(trtsel.Y2, bootstraps = 0)
eval.Y2
#compare
mycompare <- compare(trtsel1 = trtsel.Y1, trtsel2 = trtsel.Y2, bootstraps = 100)
mycompare
tmp <- plot(mycompare, bootstraps = 100)
#calibrate
cali.coh.Y1 <- calibrate(trtsel.Y1, plot.type = "risk.t0")
cali.coh.Y2 <- calibrate(trtsel.Y2)
##### BELOW is not functional anymore, I have been using it to check the code
### different sample designs
source("../trtsel_Aug2013/sim_functions.R")
load("../trtsel_Aug2013/my_sim_FY.Rdata")
alpha.strong <- c( -1.2402598, -0.6910426, 0.6, -2.25) #need to provide this for the bounded marker
#y.strong <- seq( -15, 15, by = .01)
n = 50000
simData <- sim.data(n=n, d.vec = d.vec,
grid.y = grid.y, FY.11 = FY.11, FY.10 = FY.10, FY.01 = FY.01, FY.00 = FY.00)
nmatch = 1
D <- simData$D
T <- simData$T
Y1 <- simData$Y1
Y2 <- simData$Y2
# generate case-control subset (sample based on D only)
S <- NULL
S[D==1] <- 1 #select all cases
numcontrols <- length(D[D==1])*nmatch
S[D==0] <- sample(c(rep(1,numcontrols),rep(0,length(D[D==0])-numcontrols)))
myD<-D[S==1]; myT<-T[S==1]; myY<-Y2[S==1]
my.trtsel<-trtsel(event="D",trt="T",marker="Y2", data = simData,
default.trt = "trt none")
cc.trtsel<-trtsel(event="D",trt="T",marker="Y2", data = simData[S==1,],
cohort.attributes = c(n, mean(T), mean(D), 1),
study.design="nested case control",
default.trt = "trt none")
#rho = c(mean(D), 1000000, mean(D[T==0]), mean(D[T==1]), nmatch, sum(T==1),0)
plot(cc.trtsel, bootstraps=500, plot.type = "risk", trt.names = c("marshall", "brownsworth"))
mean(1-myT[myD==1])
##STRATIFIED CASE CONTROL
nmatch = 1
# generate case-control subset (sample based on R and T)
S <- NULL
S[D==1] <- 1 #select all cases
numcontrols <- length(D[D==1 & T==0])*nmatch
#numcontrols <- sum(myconts.t0)*nmatch
S[D==0 & T==0] <- sample(c(rep(1,numcontrols),rep(0,length(D[D==0 & T==0])-numcontrols)))
#numcontrols <- sum(myconts.t0)*nmatch
numcontrols <- length(D[D==1 & T==1])*nmatch
S[D==0 & T==1] <- sample(c(rep(1,numcontrols),rep(0,length(D[D==0 & T==1])-numcontrols)))
# fit risk model
myD<-D[S==1]; myT<-T[S==1]; myY<-Y2[S==1]
#rho[1] = Pr(D = 1 | T = 0)
#rho[2] = Pr(D = 1 | T = 1)
# N.t0.r0 <- rho[3]
# N.t1.r0 <- rho[4]
# N.t1 <- rho[5]
# N <- rho[6]
scc.trtsel<-trtsel(event="D",trt="T",marker="Y2", data = simData[S==1,],
cohort.attributes = c(n, mean(D==0 & T==0), mean(D==1 & T==0), mean(D==0 & T==1), 1,1),
study.design="stratified nested case control",
default.trt = "trt none")
coh <- eval.trtsel(my.trtsel, bootstraps = 0)#500)
cc <- eval.trtsel(cc.trtsel, bootstraps = 0)#500)
scc <- eval.trtsel(scc.trtsel, bootstraps = 0)#500)
rbind(coh$estimates,
cc$estimates,
scc$estimates)
| /inst/example/example.R | no_license | mdbrown/TreatmentSelection | R | false | false | 3,827 | r |
source("main.R")
load("../simData_example.Rdata") #loads example data set called simData
D <- tsdata$event
T <- tsdata$trt
Y1 <- tsdata$Y1
Y2 <- tsdata$Y2
#trtsel objects
trtsel.Y1 <- TrtSel(disease = D, trt = T, marker = Y1, cohort.type="randomized cohort")
trtsel.Y1
trtsel.Y2 <- TrtSel(disease = D, trt = T, marker = Y2, study.design="randomized cohort")
trtsel.Y2
#plot
tmp <- plot(trtsel.Y1, plot.type = "cdf", bootstraps = 50)
head(tmp)
plot(trtsel.Y1, bootstraps = 200, ci = "vertical", plot.type = "treatment effect")
plot(trtsel.Y1, plot.type = "cdf", conf.bands = FALSE)#, fixed.values = seq(from=0.01, to=.4, by=.01))
plot(trtsel.Y1, plot.type = "risk", ylim = c(0, .8), main = "NEW MAIN HERE", bootstraps = 100 )
plot(trtsel.Y1, plot.type = "risk", ci = "horizontal" , fixed.values = c(.2, .25, .3, .35, .4))
plot(trtsel.Y2, bootstraps = 500)
plot(trtsel.Y2)
#eval
eval.Y1 <- evalTrtSel(trtsel.Y1, bootstraps= 100)
eval.Y1
eval.Y2 <- evalTrtSel(trtsel.Y2, bootstraps = 0)
eval.Y2
#compare
mycompare <- compare(trtsel1 = trtsel.Y1, trtsel2 = trtsel.Y2, bootstraps = 100)
mycompare
tmp <- plot(mycompare, bootstraps = 100)
#calibrate
cali.coh.Y1 <- calibrate(trtsel.Y1, plot.type = "risk.t0")
cali.coh.Y2 <- calibrate(trtsel.Y2)
##### BELOW is not functional anymore, I have been using it to check the code
### different sample designs
source("../trtsel_Aug2013/sim_functions.R")
load("../trtsel_Aug2013/my_sim_FY.Rdata")
alpha.strong <- c( -1.2402598, -0.6910426, 0.6, -2.25) #need to provide this for the bounded marker
#y.strong <- seq( -15, 15, by = .01)
n = 50000
simData <- sim.data(n=n, d.vec = d.vec,
grid.y = grid.y, FY.11 = FY.11, FY.10 = FY.10, FY.01 = FY.01, FY.00 = FY.00)
nmatch = 1
D <- simData$D
T <- simData$T
Y1 <- simData$Y1
Y2 <- simData$Y2
# generate case-control subset (sample based on D only)
S <- NULL
S[D==1] <- 1 #select all cases
numcontrols <- length(D[D==1])*nmatch
S[D==0] <- sample(c(rep(1,numcontrols),rep(0,length(D[D==0])-numcontrols)))
myD<-D[S==1]; myT<-T[S==1]; myY<-Y2[S==1]
my.trtsel<-trtsel(event="D",trt="T",marker="Y2", data = simData,
default.trt = "trt none")
cc.trtsel<-trtsel(event="D",trt="T",marker="Y2", data = simData[S==1,],
cohort.attributes = c(n, mean(T), mean(D), 1),
study.design="nested case control",
default.trt = "trt none")
#rho = c(mean(D), 1000000, mean(D[T==0]), mean(D[T==1]), nmatch, sum(T==1),0)
plot(cc.trtsel, bootstraps=500, plot.type = "risk", trt.names = c("marshall", "brownsworth"))
mean(1-myT[myD==1])
##STRATIFIED CASE CONTROL
nmatch = 1
# generate case-control subset (sample based on R and T)
S <- NULL
S[D==1] <- 1 #select all cases
numcontrols <- length(D[D==1 & T==0])*nmatch
#numcontrols <- sum(myconts.t0)*nmatch
S[D==0 & T==0] <- sample(c(rep(1,numcontrols),rep(0,length(D[D==0 & T==0])-numcontrols)))
#numcontrols <- sum(myconts.t0)*nmatch
numcontrols <- length(D[D==1 & T==1])*nmatch
S[D==0 & T==1] <- sample(c(rep(1,numcontrols),rep(0,length(D[D==0 & T==1])-numcontrols)))
# fit risk model
myD<-D[S==1]; myT<-T[S==1]; myY<-Y2[S==1]
#rho[1] = Pr(D = 1 | T = 0)
#rho[2] = Pr(D = 1 | T = 1)
# N.t0.r0 <- rho[3]
# N.t1.r0 <- rho[4]
# N.t1 <- rho[5]
# N <- rho[6]
scc.trtsel<-trtsel(event="D",trt="T",marker="Y2", data = simData[S==1,],
cohort.attributes = c(n, mean(D==0 & T==0), mean(D==1 & T==0), mean(D==0 & T==1), 1,1),
study.design="stratified nested case control",
default.trt = "trt none")
coh <- eval.trtsel(my.trtsel, bootstraps = 0)#500)
cc <- eval.trtsel(cc.trtsel, bootstraps = 0)#500)
scc <- eval.trtsel(scc.trtsel, bootstraps = 0)#500)
rbind(coh$estimates,
cc$estimates,
scc$estimates)
|
test_that("Test readtext:::mktemp function for test dirs",{
filename <- readtext:::mktemp()
expect_true(file.exists(filename))
filename2 <- readtext:::mktemp()
expect_true(file.exists(filename2))
expect_false(filename == filename2)
# test directory parameter
dirname <- readtext:::mktemp(directory=T)
expect_true(dir.exists(dirname))
# test prefix parameter
filename <- readtext:::mktemp(prefix='testprefix')
expect_equal(
substr(basename(filename), 1, 10),
'testprefix'
)
# test that a new filename will be given if the original already exists
set.seed(0)
original_filename <- readtext:::mktemp()
set.seed(0)
new_filename <- readtext:::mktemp()
expect_false(original_filename == new_filename)
expect_true(file.exists(original_filename))
expect_true(file.exists(new_filename))
})
test_that("Test is_probably_xpath",{
expect_false(is_probably_xpath('A'))
expect_false(is_probably_xpath('a:what'))
expect_true(is_probably_xpath('/A/B/C'))
expect_true(is_probably_xpath('A/B/C'))
})
test_that("Test readtext:::getdocvarsFromFilenames for parsing filenames", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.txt",
"~/tmp/documents/China_red_dragon.txt",
"~/tmp/spaced words/Ireland_black_bear.txt")
df <- readtext:::getdocvarsFromFilenames(filenames,
docvarnames = c("country", "color", "animal"))
expect_equal(df$animal,
c("horse", "dog", "dragon", "bear"))
expect_equal(names(df), c("country", "color", "animal"))
expect_s3_class(df, "data.frame")
})
test_that("file_ext returns expected extensions", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.csv",
"~/tmp/documents/China_red_dragon.json",
"~/tmp/spaced words/Ireland_black_bear.tar.gz")
expect_equal(readtext:::file_ext(filenames),
c("txt", "csv", "json", "gz"))
})
test_that("Test downloadRemote",{
expect_error(
downloadRemote('http://www.google.com/404.txt', ignoreMissing=F)
)
})
| /tests/testthat/test-utils.R | no_license | leeper/readtext | R | false | false | 2,321 | r | test_that("Test readtext:::mktemp function for test dirs",{
filename <- readtext:::mktemp()
expect_true(file.exists(filename))
filename2 <- readtext:::mktemp()
expect_true(file.exists(filename2))
expect_false(filename == filename2)
# test directory parameter
dirname <- readtext:::mktemp(directory=T)
expect_true(dir.exists(dirname))
# test prefix parameter
filename <- readtext:::mktemp(prefix='testprefix')
expect_equal(
substr(basename(filename), 1, 10),
'testprefix'
)
# test that a new filename will be given if the original already exists
set.seed(0)
original_filename <- readtext:::mktemp()
set.seed(0)
new_filename <- readtext:::mktemp()
expect_false(original_filename == new_filename)
expect_true(file.exists(original_filename))
expect_true(file.exists(new_filename))
})
test_that("Test is_probably_xpath",{
expect_false(is_probably_xpath('A'))
expect_false(is_probably_xpath('a:what'))
expect_true(is_probably_xpath('/A/B/C'))
expect_true(is_probably_xpath('A/B/C'))
})
test_that("Test readtext:::getdocvarsFromFilenames for parsing filenames", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.txt",
"~/tmp/documents/China_red_dragon.txt",
"~/tmp/spaced words/Ireland_black_bear.txt")
df <- readtext:::getdocvarsFromFilenames(filenames,
docvarnames = c("country", "color", "animal"))
expect_equal(df$animal,
c("horse", "dog", "dragon", "bear"))
expect_equal(names(df), c("country", "color", "animal"))
expect_s3_class(df, "data.frame")
})
test_that("file_ext returns expected extensions", {
filenames <- c("~/tmp/documents/USA_blue_horse.txt",
"~/tmp/documents/France_green_dog.csv",
"~/tmp/documents/China_red_dragon.json",
"~/tmp/spaced words/Ireland_black_bear.tar.gz")
expect_equal(readtext:::file_ext(filenames),
c("txt", "csv", "json", "gz"))
})
test_that("Test downloadRemote",{
expect_error(
downloadRemote('http://www.google.com/404.txt', ignoreMissing=F)
)
})
|
# excerpts from the book: Data Visualization for Social Science A practical introduction with R and ggplot2 by Kieran Healy accessible at http://socviz.co/
# required packages
my_packages <- c("tidyverse", "broom", "coefplot", "cowplot",
"gapminder", "GGally", "ggjoy", "ggrepel", "gridExtra",
"interplot", "margins", "maps", "mapproj", "mapdata",
"MASS", "quantreg", "scales", "survey", "srvyr",
"viridis", "viridisLite", "devtools")
# install the required packages if not already installed
install.packages(my_packages, repos = "http://cran.rstudio.com")
#devtools::install_github("kjhealy/socviz")
# Load the libraries
library(ggplot2)
ggplot(data = mpg, aes(x=displ, y=hwy))+
geom_point()
library(gapminder)
head(gapminder)
p<-ggplot(data = gapminder, aes(x=gdpPercap, y=lifeExp))
p+geom_point()
# using the ggpubr package
library(ggpubr)
# boxplot
dat<- as.data.frame(gapminder)
str(dat)
ggboxplot(data = dat, x= "year", y="lifeExp", palette = "simpsons",
orientation="horizontal", color = "peachpuff")
| /scripts/uncategorized/book-1.R | permissive | duttashi/visualizer | R | false | false | 1,095 | r | # excerpts from the book: Data Visualization for Social Science A practical introduction with R and ggplot2 by Kieran Healy accessible at http://socviz.co/
# required packages
my_packages <- c("tidyverse", "broom", "coefplot", "cowplot",
"gapminder", "GGally", "ggjoy", "ggrepel", "gridExtra",
"interplot", "margins", "maps", "mapproj", "mapdata",
"MASS", "quantreg", "scales", "survey", "srvyr",
"viridis", "viridisLite", "devtools")
# install the required packages if not already installed
install.packages(my_packages, repos = "http://cran.rstudio.com")
#devtools::install_github("kjhealy/socviz")
# Load the libraries
library(ggplot2)
ggplot(data = mpg, aes(x=displ, y=hwy))+
geom_point()
library(gapminder)
head(gapminder)
p<-ggplot(data = gapminder, aes(x=gdpPercap, y=lifeExp))
p+geom_point()
# using the ggpubr package
library(ggpubr)
# boxplot
dat<- as.data.frame(gapminder)
str(dat)
ggboxplot(data = dat, x= "year", y="lifeExp", palette = "simpsons",
orientation="horizontal", color = "peachpuff")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/body.R
\name{html}
\alias{html}
\title{Add an HTML body to a message object.}
\usage{
html(
msg,
content,
disposition = "inline",
charset = "utf-8",
encoding = "quoted-printable"
)
}
\arguments{
\item{msg}{A message object.}
\item{content}{A string of message content.}
\item{disposition}{How content is presented (Content-Disposition).}
\item{charset}{How content is encoded.}
\item{encoding}{How content is transformed to ASCII (Content-Transfer-Encoding).}
}
\value{
A message object.
}
\description{
Add an HTML body to a message object.
}
\examples{
library(magrittr)
msg <- envelope() \%>\% html("<b>Hello!</b>")
}
\seealso{
\code{\link{text}}
}
| /man/html.Rd | no_license | minghao2016/emayili | R | false | true | 745 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/body.R
\name{html}
\alias{html}
\title{Add an HTML body to a message object.}
\usage{
html(
msg,
content,
disposition = "inline",
charset = "utf-8",
encoding = "quoted-printable"
)
}
\arguments{
\item{msg}{A message object.}
\item{content}{A string of message content.}
\item{disposition}{How content is presented (Content-Disposition).}
\item{charset}{How content is encoded.}
\item{encoding}{How content is transformed to ASCII (Content-Transfer-Encoding).}
}
\value{
A message object.
}
\description{
Add an HTML body to a message object.
}
\examples{
library(magrittr)
msg <- envelope() \%>\% html("<b>Hello!</b>")
}
\seealso{
\code{\link{text}}
}
|
# Tiffanie Stone - Data Simulation - 10/24/2019
library(tidyverse)
library(ggplot2)
library(ggthemes)
#Simulating a data set using means and 95% confidence intervals for the vegetable portion of the dataset.
#Will account for annual variations and high and low income data.
#took low and high data for each food type throughout years and averaged them to create approximate mean, used range/2 as standard dev.
vegmean94 <- rnorm(n= 500, mean=c(109.87), sd = c(18.204)) #simulate avg veg consumption in 94-98
veglowinc94 <- rnorm(n=500, mean=c(105.69), sd = c(16.5994)) #simulate avg low income veg consumption in 94-98
veghighinc94 <- rnorm(n= 500, mean=c(111.76), sd = c(18.9233)) #simulate avg high income veg consumption in 94-98
vegmean03 <- rnorm(n= 500, mean=c(105.72), sd = c(23.7582)) #simulate avg veg consumption in 03-04
veglowinc03 <- rnorm(n=500, mean=c(103.77), sd = c(22.3607)) #simulate avg low income veg consumption in 03-04
veghighinc03 <- rnorm(n= 500, mean=c(106.94), sd = c(24.6526)) #simulate avg high income veg consumption in 03-04
vegmean05 <- rnorm(n= 500, mean=c(103.03), sd = c(24.1495)) #simulate avg veg consumption in 05-06
veglowinc05 <- rnorm(n=500, mean=c(104.21), sd = c(22.3607)) #simulate avg low income veg consumption in 05-06
veghighinc05 <- rnorm(n= 500, mean=c(102.48), sd = c(25.044)) #simulate avg high income veg consumption in 05-06
vegmean07 <- rnorm(n= 500, mean=c(102.76), sd = c(22.9197)) #simulate avg veg consumption in 07-08
veglowinc07 <- rnorm(n=500, mean=c(99.62), sd = c(21.2426)) #simulate avg low income veg consumption in 07-08
veghighinc07 <- rnorm(n= 500, mean=c(104.83), sd = c(24.0377)) #simulate avg high income veg consumption in 07-08
simveg <- c(vegmean94, vegmean03, vegmean05, vegmean07, veglowinc94,veglowinc03,veglowinc05,
veglowinc07, veghighinc94, veghighinc03, veghighinc05,veghighinc07)
#Together the dataset simulated has 2000 participants for low income, high income and average income per year
#simulate predictors
year <- factor(rep (c("94-98", "03-04", "05-06", "07-08"), each = 500, times = 3))
incomelevel <- factor(rep (c("mean", "low", "high"), each = 2000, times = 1))
#combine all into a dataframe
vegsim <- data.frame(simveg, year, incomelevel)
vegsim$incomelevel <- as.factor (vegsim$incomelevel)
vegsim$year <- as.factor (vegsim$year)
write.csv(vegsim,"data/tidydata/vegsim.csv")
# Make sure dataset looks correct
summary(vegsim)
ggplot(vegsim, aes(year, simveg))+
geom_boxplot()
ggplot(vegsim, aes(incomelevel, simveg))+
geom_boxplot()
| /data_wrangling/stone_simdatadevelopment_final proj_11-11-19.R | no_license | EEOB590A-Fall-2019/TiffanieRepository | R | false | false | 3,024 | r | # Tiffanie Stone - Data Simulation - 10/24/2019
library(tidyverse)
library(ggplot2)
library(ggthemes)
#Simulating a data set using means and 95% confidence intervals for the vegetable portion of the dataset.
#Will account for annual variations and high and low income data.
#took low and high data for each food type throughout years and averaged them to create approximate mean, used range/2 as standard dev.
vegmean94 <- rnorm(n= 500, mean=c(109.87), sd = c(18.204)) #simulate avg veg consumption in 94-98
veglowinc94 <- rnorm(n=500, mean=c(105.69), sd = c(16.5994)) #simulate avg low income veg consumption in 94-98
veghighinc94 <- rnorm(n= 500, mean=c(111.76), sd = c(18.9233)) #simulate avg high income veg consumption in 94-98
vegmean03 <- rnorm(n= 500, mean=c(105.72), sd = c(23.7582)) #simulate avg veg consumption in 03-04
veglowinc03 <- rnorm(n=500, mean=c(103.77), sd = c(22.3607)) #simulate avg low income veg consumption in 03-04
veghighinc03 <- rnorm(n= 500, mean=c(106.94), sd = c(24.6526)) #simulate avg high income veg consumption in 03-04
vegmean05 <- rnorm(n= 500, mean=c(103.03), sd = c(24.1495)) #simulate avg veg consumption in 05-06
veglowinc05 <- rnorm(n=500, mean=c(104.21), sd = c(22.3607)) #simulate avg low income veg consumption in 05-06
veghighinc05 <- rnorm(n= 500, mean=c(102.48), sd = c(25.044)) #simulate avg high income veg consumption in 05-06
vegmean07 <- rnorm(n= 500, mean=c(102.76), sd = c(22.9197)) #simulate avg veg consumption in 07-08
veglowinc07 <- rnorm(n=500, mean=c(99.62), sd = c(21.2426)) #simulate avg low income veg consumption in 07-08
veghighinc07 <- rnorm(n= 500, mean=c(104.83), sd = c(24.0377)) #simulate avg high income veg consumption in 07-08
simveg <- c(vegmean94, vegmean03, vegmean05, vegmean07, veglowinc94,veglowinc03,veglowinc05,
veglowinc07, veghighinc94, veghighinc03, veghighinc05,veghighinc07)
#Together the dataset simulated has 2000 participants for low income, high income and average income per year
#simulate predictors
year <- factor(rep (c("94-98", "03-04", "05-06", "07-08"), each = 500, times = 3))
incomelevel <- factor(rep (c("mean", "low", "high"), each = 2000, times = 1))
#combine all into a dataframe
vegsim <- data.frame(simveg, year, incomelevel)
vegsim$incomelevel <- as.factor (vegsim$incomelevel)
vegsim$year <- as.factor (vegsim$year)
write.csv(vegsim,"data/tidydata/vegsim.csv")
# Make sure dataset looks correct
summary(vegsim)
ggplot(vegsim, aes(year, simveg))+
geom_boxplot()
ggplot(vegsim, aes(incomelevel, simveg))+
geom_boxplot()
|
# AGREE.COEFF3.RAW.R
# (September 2, 2016)
#Description: This script file contains a series of R functions for computing various agreement coefficients
# for multiple raters (2 or more) when the input data file is in the form of nxr matrix or data frame showing
# the actual ratings each rater (column) assigned to each subject (in row). That is n = number of subjects, and r = number of raters.
# A typical table entry (i,g) represents the rating associated with subject i and rater g.
#Author: Kilem L. Gwet, Ph.D. (gwet@agreestat.com)
#-----------------------------------------------------------------
# EXAMPLES OF SIMPLE CALLS OF THE MAIN FUNCTIONS:
# > gwet.ac1.raw(YourRatings) # to obtain gwet's AC1 coefficient
# > fleiss.kappa.raw(YourRatings) # to obtain fleiss' unweighted generalized kappa coefficient
# > krippen.alpha.raw(YourRatings) # to obtain krippendorff's unweighted alpha coefficient
# > conger.kappa.raw(YourRatings) # to obtain conger's unweighted generalized kappa coefficient
# > bp.coeff.raw(YourRatings) # to obtain Brennan-Prediger unweighted coefficient
#
#===========================================================================================
#gwet.ac1.raw: Gwet's AC1/Ac2 coefficient (Gwet(2008)) and its standard error for multiple raters when input
# dataset is a nxr matrix of alphanumeric ratings from n subjects and r raters
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Gwet, K. L. (2008). ``Computing inter-rater reliability and its variance in the presence of high
# agreement." British Journal of Mathematical and Statistical Psychology, 61, 29-48.
#============================================================================================
gwet.ac1.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating gwet's ac1 coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/(ri.vec%*%t(rep(1,q)))))
pe <- sum(weights.mat) * sum(pi.vec*(1-pi.vec)) / (q*(q-1))
gwet.ac1 <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
ac1.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
pe.ivec <- (sum(weights.mat)/(q*(q-1))) * (agree.mat%*%(1-pi.vec))/ri.vec
ac1.ivec.x <- ac1.ivec - 2*(1-gwet.ac1) * (pe.ivec-pe)/(1-pe)
var.ac1 <- ((1-f)/(n*(n-1))) * sum((ac1.ivec.x - gwet.ac1)^2)
stderr <- sqrt(var.ac1)# ac1's standard error
p.value <- 2*(1-pt(abs(gwet.ac1/stderr),n-1))
lcb <- gwet.ac1 - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,gwet.ac1 + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE) {
if (weights=="unweighted") {
cat("Gwet's AC1 Coefficient\n")
cat('======================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('AC1 coefficient:',gwet.ac1,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
}
else {
cat("Gwet's AC2 Coefficient\n")
cat('==========================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('AC2 coefficient:',gwet.ac1,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,gwet.ac1,stderr,p.value))
}
#=====================================================================================
#fleiss.kappa.raw: This function computes Fleiss' generalized kappa coefficient (see Fleiss(1971)) and
# its standard error for 3 raters or more when input dataset is a nxr matrix of alphanumeric
# ratings from n subjects and r raters.
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Fleiss, J. L. (1981). Statistical Methods for Rates and Proportions. John Wiley & Sons.
#======================================================================================
fleiss.kappa.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating fleiss's generalized kappa coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/(ri.vec%*%t(rep(1,q)))))
pe <- sum(weights.mat * (pi.vec%*%t(pi.vec)))
fleiss.kappa <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
kappa.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
pi.vec.wk. <- weights.mat%*%pi.vec
pi.vec.w.k <- t(weights.mat)%*%pi.vec
pi.vec.w <- (pi.vec.wk. + pi.vec.w.k)/2
pe.ivec <- (agree.mat%*%pi.vec.w)/ri.vec
kappa.ivec.x <- kappa.ivec - 2*(1-fleiss.kappa) * (pe.ivec-pe)/(1-pe)
var.fleiss <- ((1-f)/(n*(n-1))) * sum((kappa.ivec.x - fleiss.kappa)^2)
stderr <- sqrt(var.fleiss)# kappa's standard error
p.value <- 2*(1-pt(abs(fleiss.kappa/stderr),n-1))
lcb <- fleiss.kappa - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,fleiss.kappa + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE){
cat("Fleiss' Kappa Coefficient\n")
cat('==========================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('Fleiss kappa coefficient:',fleiss.kappa,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,fleiss.kappa,stderr,p.value))
}
#=====================================================================================
#krippen.alpha.raw: This function computes Krippendorff's alpha coefficient (see Krippendorff(1970, 1980)) and
# its standard error for 3 raters or more when input dataset is a nxr matrix of alphanumeric
# ratings from n subjects and r raters.
#-------------
#The algorithm used to compute krippendorff's alpha is very different from anything that was published on this topic. Instead,
#it follows the equations presented by K. Gwet (2012)
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Gwet, K. (2012). Handbook of Inter-Rater Reliability: The Definitive Guide to Measuring the Extent of Agreement Among
# Multiple Raters, 3rd Edition. Advanced Analytics, LLC; 3rd edition (March 2, 2012)
#Krippendorff (1970). "Bivariate agreement coefficients for reliability of data." Sociological Methodology,2,139-150
#Krippendorff (1980). Content analysis: An introduction to its methodology (2nd ed.), New-bury Park, CA: Sage.
#======================================================================================
krippen.alpha.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating krippendorff's alpha coefficient
ri.vec <- agree.mat%*%rep(1,q)
agree.mat<-agree.mat[(ri.vec>=2),]
agree.mat.w <- agree.mat.w[(ri.vec>=2),]
ri.vec <- ri.vec[(ri.vec>=2)]
ri.mean <- mean(ri.vec)
n <- nrow(agree.mat)
epsi <- 1/sum(ri.vec)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
print(n)
paprime <-sum(sum.q/(ri.mean*(ri.vec-1)))/n
print(paprime)
pa <- (1-epsi)*sum(sum.q/(ri.mean*(ri.vec-1)))/n + epsi
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/ri.mean))
pe <- sum(weights.mat * (pi.vec%*%t(pi.vec)))
krippen.alpha <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.mean*(ri.vec-1)
pa.ivec <- sum.q/den.ivec
pa.v <- mean(pa.ivec)
pa.ivec <- pa.ivec-pa.v*(ri.vec-ri.mean)/ri.mean
krippen.ivec <- (pa.ivec-pe)/(1-pe)
pi.vec.wk. <- weights.mat%*%pi.vec
pi.vec.w.k <- t(weights.mat)%*%pi.vec
pi.vec.w <- (pi.vec.wk. + pi.vec.w.k)/2
pe.ivec <- (agree.mat%*%pi.vec.w)/ri.mean - sum(pi.vec) * (ri.vec-ri.mean)/ri.mean
krippen.ivec.x <- krippen.ivec - 2*(1-krippen.alpha) * (pe.ivec-pe)/(1-pe)
var.krippen <- ((1-f)/(n*(n-1))) * sum((krippen.ivec.x - krippen.alpha)^2)
stderr <- sqrt(var.krippen)# alpha's standard error
p.value <- 2*(1-pt(abs(krippen.alpha/stderr),n-1))
lcb <- krippen.alpha - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,krippen.alpha + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE){
cat("Krippendorff's Alpha Coefficient\n")
cat('==========================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('Krippendorff alpha coefficient:',krippen.alpha,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,krippen.alpha,stderr,p.value))
}
#===========================================================================================
#conger.kappa.raw: Conger's kappa coefficient (see Conger(1980)) and its standard error for multiple raters when input
# dataset is a nxr matrix of alphanumeric ratings from n subjects and r raters
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Conger, A. J. (1980), ``Integration and Generalization of Kappas for Multiple Raters,"
# Psychological Bulletin, 88, 322-328.
#======================================================================================
conger.kappa.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# creating the rxq rater-category matrix representing the distribution of subjects by rater and category
classif.mat <- matrix(0,nrow=r,ncol=q)
for(k in 1:q){
with.mis <-(t(ratings.mat)==categ[k])
without.mis <- replace(with.mis,is.na(with.mis),FALSE)
classif.mat[,k] <- without.mis%*%rep(1,n)
}
# calculating conger's kappa coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
ng.vec <- classif.mat%*%rep(1,q)
pgk.mat <- classif.mat/(ng.vec%*%rep(1,q))
p.mean.k <- (t(pgk.mat)%*%rep(1,r))/r
s2kl.mat <- (t(pgk.mat)%*%pgk.mat - r * p.mean.k%*%t(p.mean.k))/(r-1)
pe <- sum(weights.mat * (p.mean.k%*%t(p.mean.k) - s2kl.mat/r))
conger.kappa <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of conger's kappa coefficient
bkl.mat <- (weights.mat+t(weights.mat))/2
pe.ivec1 <- r*(agree.mat%*%t(t(p.mean.k)%*%bkl.mat))
pe.ivec2 = rep(0,n)
lamda.ig.mat=matrix(0,n,r)
if (is.numeric(ratings.mat)){
epsi.ig.mat <-1-is.na(ratings.mat)
epsi.ig.mat <- replace(epsi.ig.mat,is.na(epsi.ig.mat),FALSE)
}else{
epsi.ig.mat <- 1-(ratings.mat=="")
epsi.ig.mat <- replace(epsi.ig.mat,is.na(epsi.ig.mat),FALSE)
}
for(k in 1:q){
lamda.ig.kmat=matrix(0,n,r)
for(l in 1:q){
delta.ig.mat <- (ratings.mat==categ[l])
delta.ig.mat <- replace(delta.ig.mat,is.na(delta.ig.mat),FALSE)
lamda.ig.kmat <- lamda.ig.kmat + weights.mat[k,l] * (delta.ig.mat - (epsi.ig.mat - rep(1,n)%*%t(ng.vec/n)) * (rep(1,n)%*%t(pgk.mat[,l])))
}
lamda.ig.kmat = lamda.ig.kmat*(rep(1,n)%*%t(n/ng.vec))
lamda.ig.mat = lamda.ig.mat+ lamda.ig.kmat*(r*mean(pgk.mat[,k]) - rep(1,n)%*%t(pgk.mat[,k]))
}
pe.ivec <- (lamda.ig.mat%*%rep(1,r)) / (r*(r-1))
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
conger.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
conger.ivec.x <- conger.ivec - 2*(1-conger.kappa) * (pe.ivec-pe)/(1-pe)
var.conger <- ((1-f)/(n*(n-1))) * sum((conger.ivec.x - conger.kappa)^2)
stderr <- sqrt(var.conger)# conger's kappa standard error
p.value <- 2*(1-pt(abs(conger.kappa/stderr),n-1))
lcb <- conger.kappa - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,conger.kappa + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE) {
cat("Conger's Kappa Coefficient\n")
cat('==========================\n')
cat('Percent agreement: ',pa,'Percent chance agreement: ',pe,'\n')
cat("Conger's kappa coefficient: ",conger.kappa,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,conger.kappa,stderr,p.value))
}
#===========================================================================================
#bp.coeff.raw: Brennan-Prediger coefficient (see Brennan & Prediger(1981)) and its standard error for multiple raters when input
# dataset is a nxr matrix of alphanumeric ratings from n subjects and r raters
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Brennan, R.L., and Prediger, D. J. (1981). ``Coefficient Kappa: some uses, misuses, and alternatives."
# Educational and Psychological Measurement, 41, 687-699.
#======================================================================================
bp.coeff.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating gwet's ac1 coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/(ri.vec%*%t(rep(1,q)))))
pe <- sum(weights.mat) / (q^2)
bp.coeff <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
bp.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
var.bp <- ((1-f)/(n*(n-1))) * sum((bp.ivec - bp.coeff)^2)
stderr <- sqrt(var.bp)# BP's standard error
p.value <- 2*(1-pt(abs(bp.coeff/stderr),n-1))
lcb <- bp.coeff - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,bp.coeff + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE) {
cat("Brennan-Prediger Coefficient\n")
cat('============================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('B-P coefficient:',bp.coeff,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
if (!is.numeric(weights)) {
cat('\n')
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
print(weights.mat)
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,bp.coeff,stderr,p.value))
}
#
#----- Additional functions needed to run the main functions. If the main functions must be included in another R script, then
# the user will need to add these additional functions to the new script file.
#
# ==============================================================
# trim(x): This is an r function for trimming leading and trealing blanks
# ==============================================================
trim <- function( x ) {
gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
}
# ==============================================================
# The following functions generate various weight matrices used
# in the weighted or unweighted analyses.
# ==============================================================
identity.weights<-function(categ){
weights<-diag(length(categ))
return (weights)
}
quadratic.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-(categ.vec[k]-categ.vec[l])^2/(xmax-xmin)^2
}
}
return (weights)
}
linear.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-abs(categ.vec[k]-categ.vec[l])/abs(xmax-xmin)
}
}
return (weights)
}
#--------------------------------
radical.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-sqrt(abs(categ.vec[k]-categ.vec[l]))/sqrt(abs(xmax-xmin))
}
}
return (weights)
}
#--------------------------------
ratio.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-((categ.vec[k]-categ.vec[l])/(categ.vec[k]+categ.vec[l]))^2 / ((xmax-xmin)/(xmax+xmin))^2
}
}
return (weights)
}
#--------------------------------
circular.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
U = xmax-xmin+1
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- (sin(pi*(categ.vec[k]-categ.vec[l])/U))^2
}
}
weights <- 1-weights/max(weights)
return (weights)
}
#--------------------------------
bipolar.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
if (k!=l)
weights[k,l] <- (categ.vec[k]-categ.vec[l])^2 / (((categ.vec[k]+categ.vec[l])-2*xmin)*(2*xmax-(categ.vec[k]+categ.vec[l])))
else weights[k,l] <- 0
}
}
weights <- 1-weights/max(weights)
return (weights)
}
#--------------------------------
ordinal.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
categ.vec<-1:length(categ)
for(k in 1:q){
for(l in 1:q){
nkl <- max(k,l)-min(k,l)+1
weights[k,l] <- nkl * (nkl-1)/2
}
}
weights <- 1-weights/max(weights)
return (weights)
}
| /agreement/agree.coeff3.raw.r | no_license | MAgojam/inter-rater-reliability-cac | R | false | false | 30,352 | r | # AGREE.COEFF3.RAW.R
# (September 2, 2016)
#Description: This script file contains a series of R functions for computing various agreement coefficients
# for multiple raters (2 or more) when the input data file is in the form of nxr matrix or data frame showing
# the actual ratings each rater (column) assigned to each subject (in row). That is n = number of subjects, and r = number of raters.
# A typical table entry (i,g) represents the rating associated with subject i and rater g.
#Author: Kilem L. Gwet, Ph.D. (gwet@agreestat.com)
#-----------------------------------------------------------------
# EXAMPLES OF SIMPLE CALLS OF THE MAIN FUNCTIONS:
# > gwet.ac1.raw(YourRatings) # to obtain gwet's AC1 coefficient
# > fleiss.kappa.raw(YourRatings) # to obtain fleiss' unweighted generalized kappa coefficient
# > krippen.alpha.raw(YourRatings) # to obtain krippendorff's unweighted alpha coefficient
# > conger.kappa.raw(YourRatings) # to obtain conger's unweighted generalized kappa coefficient
# > bp.coeff.raw(YourRatings) # to obtain Brennan-Prediger unweighted coefficient
#
#===========================================================================================
#gwet.ac1.raw: Gwet's AC1/Ac2 coefficient (Gwet(2008)) and its standard error for multiple raters when input
# dataset is a nxr matrix of alphanumeric ratings from n subjects and r raters
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Gwet, K. L. (2008). ``Computing inter-rater reliability and its variance in the presence of high
# agreement." British Journal of Mathematical and Statistical Psychology, 61, 29-48.
#============================================================================================
gwet.ac1.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating gwet's ac1 coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/(ri.vec%*%t(rep(1,q)))))
pe <- sum(weights.mat) * sum(pi.vec*(1-pi.vec)) / (q*(q-1))
gwet.ac1 <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
ac1.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
pe.ivec <- (sum(weights.mat)/(q*(q-1))) * (agree.mat%*%(1-pi.vec))/ri.vec
ac1.ivec.x <- ac1.ivec - 2*(1-gwet.ac1) * (pe.ivec-pe)/(1-pe)
var.ac1 <- ((1-f)/(n*(n-1))) * sum((ac1.ivec.x - gwet.ac1)^2)
stderr <- sqrt(var.ac1)# ac1's standard error
p.value <- 2*(1-pt(abs(gwet.ac1/stderr),n-1))
lcb <- gwet.ac1 - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,gwet.ac1 + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE) {
if (weights=="unweighted") {
cat("Gwet's AC1 Coefficient\n")
cat('======================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('AC1 coefficient:',gwet.ac1,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
}
else {
cat("Gwet's AC2 Coefficient\n")
cat('==========================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('AC2 coefficient:',gwet.ac1,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,gwet.ac1,stderr,p.value))
}
#=====================================================================================
#fleiss.kappa.raw: This function computes Fleiss' generalized kappa coefficient (see Fleiss(1971)) and
# its standard error for 3 raters or more when input dataset is a nxr matrix of alphanumeric
# ratings from n subjects and r raters.
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Fleiss, J. L. (1981). Statistical Methods for Rates and Proportions. John Wiley & Sons.
#======================================================================================
fleiss.kappa.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating fleiss's generalized kappa coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/(ri.vec%*%t(rep(1,q)))))
pe <- sum(weights.mat * (pi.vec%*%t(pi.vec)))
fleiss.kappa <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
kappa.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
pi.vec.wk. <- weights.mat%*%pi.vec
pi.vec.w.k <- t(weights.mat)%*%pi.vec
pi.vec.w <- (pi.vec.wk. + pi.vec.w.k)/2
pe.ivec <- (agree.mat%*%pi.vec.w)/ri.vec
kappa.ivec.x <- kappa.ivec - 2*(1-fleiss.kappa) * (pe.ivec-pe)/(1-pe)
var.fleiss <- ((1-f)/(n*(n-1))) * sum((kappa.ivec.x - fleiss.kappa)^2)
stderr <- sqrt(var.fleiss)# kappa's standard error
p.value <- 2*(1-pt(abs(fleiss.kappa/stderr),n-1))
lcb <- fleiss.kappa - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,fleiss.kappa + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE){
cat("Fleiss' Kappa Coefficient\n")
cat('==========================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('Fleiss kappa coefficient:',fleiss.kappa,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,fleiss.kappa,stderr,p.value))
}
#=====================================================================================
#krippen.alpha.raw: This function computes Krippendorff's alpha coefficient (see Krippendorff(1970, 1980)) and
# its standard error for 3 raters or more when input dataset is a nxr matrix of alphanumeric
# ratings from n subjects and r raters.
#-------------
#The algorithm used to compute krippendorff's alpha is very different from anything that was published on this topic. Instead,
#it follows the equations presented by K. Gwet (2012)
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Gwet, K. (2012). Handbook of Inter-Rater Reliability: The Definitive Guide to Measuring the Extent of Agreement Among
# Multiple Raters, 3rd Edition. Advanced Analytics, LLC; 3rd edition (March 2, 2012)
#Krippendorff (1970). "Bivariate agreement coefficients for reliability of data." Sociological Methodology,2,139-150
#Krippendorff (1980). Content analysis: An introduction to its methodology (2nd ed.), New-bury Park, CA: Sage.
#======================================================================================
krippen.alpha.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating krippendorff's alpha coefficient
ri.vec <- agree.mat%*%rep(1,q)
agree.mat<-agree.mat[(ri.vec>=2),]
agree.mat.w <- agree.mat.w[(ri.vec>=2),]
ri.vec <- ri.vec[(ri.vec>=2)]
ri.mean <- mean(ri.vec)
n <- nrow(agree.mat)
epsi <- 1/sum(ri.vec)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
print(n)
paprime <-sum(sum.q/(ri.mean*(ri.vec-1)))/n
print(paprime)
pa <- (1-epsi)*sum(sum.q/(ri.mean*(ri.vec-1)))/n + epsi
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/ri.mean))
pe <- sum(weights.mat * (pi.vec%*%t(pi.vec)))
krippen.alpha <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.mean*(ri.vec-1)
pa.ivec <- sum.q/den.ivec
pa.v <- mean(pa.ivec)
pa.ivec <- pa.ivec-pa.v*(ri.vec-ri.mean)/ri.mean
krippen.ivec <- (pa.ivec-pe)/(1-pe)
pi.vec.wk. <- weights.mat%*%pi.vec
pi.vec.w.k <- t(weights.mat)%*%pi.vec
pi.vec.w <- (pi.vec.wk. + pi.vec.w.k)/2
pe.ivec <- (agree.mat%*%pi.vec.w)/ri.mean - sum(pi.vec) * (ri.vec-ri.mean)/ri.mean
krippen.ivec.x <- krippen.ivec - 2*(1-krippen.alpha) * (pe.ivec-pe)/(1-pe)
var.krippen <- ((1-f)/(n*(n-1))) * sum((krippen.ivec.x - krippen.alpha)^2)
stderr <- sqrt(var.krippen)# alpha's standard error
p.value <- 2*(1-pt(abs(krippen.alpha/stderr),n-1))
lcb <- krippen.alpha - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,krippen.alpha + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE){
cat("Krippendorff's Alpha Coefficient\n")
cat('==========================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('Krippendorff alpha coefficient:',krippen.alpha,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,krippen.alpha,stderr,p.value))
}
#===========================================================================================
#conger.kappa.raw: Conger's kappa coefficient (see Conger(1980)) and its standard error for multiple raters when input
# dataset is a nxr matrix of alphanumeric ratings from n subjects and r raters
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Conger, A. J. (1980), ``Integration and Generalization of Kappas for Multiple Raters,"
# Psychological Bulletin, 88, 322-328.
#======================================================================================
conger.kappa.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# creating the rxq rater-category matrix representing the distribution of subjects by rater and category
classif.mat <- matrix(0,nrow=r,ncol=q)
for(k in 1:q){
with.mis <-(t(ratings.mat)==categ[k])
without.mis <- replace(with.mis,is.na(with.mis),FALSE)
classif.mat[,k] <- without.mis%*%rep(1,n)
}
# calculating conger's kappa coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
ng.vec <- classif.mat%*%rep(1,q)
pgk.mat <- classif.mat/(ng.vec%*%rep(1,q))
p.mean.k <- (t(pgk.mat)%*%rep(1,r))/r
s2kl.mat <- (t(pgk.mat)%*%pgk.mat - r * p.mean.k%*%t(p.mean.k))/(r-1)
pe <- sum(weights.mat * (p.mean.k%*%t(p.mean.k) - s2kl.mat/r))
conger.kappa <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of conger's kappa coefficient
bkl.mat <- (weights.mat+t(weights.mat))/2
pe.ivec1 <- r*(agree.mat%*%t(t(p.mean.k)%*%bkl.mat))
pe.ivec2 = rep(0,n)
lamda.ig.mat=matrix(0,n,r)
if (is.numeric(ratings.mat)){
epsi.ig.mat <-1-is.na(ratings.mat)
epsi.ig.mat <- replace(epsi.ig.mat,is.na(epsi.ig.mat),FALSE)
}else{
epsi.ig.mat <- 1-(ratings.mat=="")
epsi.ig.mat <- replace(epsi.ig.mat,is.na(epsi.ig.mat),FALSE)
}
for(k in 1:q){
lamda.ig.kmat=matrix(0,n,r)
for(l in 1:q){
delta.ig.mat <- (ratings.mat==categ[l])
delta.ig.mat <- replace(delta.ig.mat,is.na(delta.ig.mat),FALSE)
lamda.ig.kmat <- lamda.ig.kmat + weights.mat[k,l] * (delta.ig.mat - (epsi.ig.mat - rep(1,n)%*%t(ng.vec/n)) * (rep(1,n)%*%t(pgk.mat[,l])))
}
lamda.ig.kmat = lamda.ig.kmat*(rep(1,n)%*%t(n/ng.vec))
lamda.ig.mat = lamda.ig.mat+ lamda.ig.kmat*(r*mean(pgk.mat[,k]) - rep(1,n)%*%t(pgk.mat[,k]))
}
pe.ivec <- (lamda.ig.mat%*%rep(1,r)) / (r*(r-1))
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
conger.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
conger.ivec.x <- conger.ivec - 2*(1-conger.kappa) * (pe.ivec-pe)/(1-pe)
var.conger <- ((1-f)/(n*(n-1))) * sum((conger.ivec.x - conger.kappa)^2)
stderr <- sqrt(var.conger)# conger's kappa standard error
p.value <- 2*(1-pt(abs(conger.kappa/stderr),n-1))
lcb <- conger.kappa - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,conger.kappa + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE) {
cat("Conger's Kappa Coefficient\n")
cat('==========================\n')
cat('Percent agreement: ',pa,'Percent chance agreement: ',pe,'\n')
cat("Conger's kappa coefficient: ",conger.kappa,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
cat('\n')
if (!is.numeric(weights)) {
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,conger.kappa,stderr,p.value))
}
#===========================================================================================
#bp.coeff.raw: Brennan-Prediger coefficient (see Brennan & Prediger(1981)) and its standard error for multiple raters when input
# dataset is a nxr matrix of alphanumeric ratings from n subjects and r raters
#-------------
#The input data "ratings" is a nxr data frame of raw alphanumeric ratings
#from n subjects and r raters. Exclude all subjects that are not rated by any rater.
#Bibliography:
#Brennan, R.L., and Prediger, D. J. (1981). ``Coefficient Kappa: some uses, misuses, and alternatives."
# Educational and Psychological Measurement, 41, 687-699.
#======================================================================================
bp.coeff.raw <- function(ratings,weights="unweighted",conflev=0.95,N=Inf,print=TRUE){
ratings.mat <- as.matrix(ratings)
if (is.character(ratings.mat)){ratings.mat <- toupper(ratings.mat)}
n <- nrow(ratings.mat) # number of subjects
r <- ncol(ratings.mat) # number of raters
f <- n/N # final population correction
# creating a vector containing all categories used by the raters
categ.init <- unique(as.vector(na.omit(ratings.mat)))
if (is.numeric(categ.init))
categ <- sort(categ.init)
else {
ratings.mat<-trim(ratings.mat)
categ.init <- trim(categ.init) #trim vector elements to remove leading and trailing blanks
categ <- sort(categ.init[nchar(categ.init)>0])
}
q <- length(categ)
# creating the weights matrix
if (is.character(weights)){
if (weights=="quadratic")
weights.mat<-quadratic.weights(categ)
else if (weights=="ordinal")
weights.mat<-ordinal.weights(categ)
else if (weights=="linear")
weights.mat<-linear.weights(categ)
else if (weights=="radical")
weights.mat<-radical.weights(categ)
else if (weights=="ratio")
weights.mat<-ratio.weights(categ)
else if (weights=="circular")
weights.mat<-circular.weights(categ)
else if (weights=="bipolar")
weights.mat<-bipolar.weights(categ)
else weights.mat<-identity.weights(categ)
}else weights.mat= as.matrix(weights)
# creating the nxq agreement matrix representing the distribution of raters by subjects and category
agree.mat <- matrix(0,nrow=n,ncol=q)
for(k in 1:q){
if (is.numeric(ratings.mat)){
k.mis <-(ratings.mat==categ[k])
in.categ.k <- replace(k.mis,is.na(k.mis),FALSE)
agree.mat[,k] <- in.categ.k%*%rep(1,r)
}else
agree.mat[,k] <- (trim(ratings.mat)==categ[k])%*%rep(1,r)
}
agree.mat.w <- t(weights.mat%*%t(agree.mat))
# calculating gwet's ac1 coefficient
ri.vec <- agree.mat%*%rep(1,q)
sum.q <- (agree.mat*(agree.mat.w-1))%*%rep(1,q)
n2more <- sum(ri.vec>=2)
pa <- sum(sum.q[ri.vec>=2]/((ri.vec*(ri.vec-1))[ri.vec>=2]))/n2more
pi.vec <- t(t(rep(1/n,n))%*%(agree.mat/(ri.vec%*%t(rep(1,q)))))
pe <- sum(weights.mat) / (q^2)
bp.coeff <- (pa-pe)/(1-pe)
# calculating variance, stderr & p-value of gwet's ac1 coefficient
den.ivec <- ri.vec*(ri.vec-1)
den.ivec <- den.ivec - (den.ivec==0) # this operation replaces each 0 value with -1 to make the next ratio calculation always possible.
pa.ivec <- sum.q/den.ivec
pe.r2 <- pe*(ri.vec>=2)
bp.ivec <- (n/n2more)*(pa.ivec-pe.r2)/(1-pe)
var.bp <- ((1-f)/(n*(n-1))) * sum((bp.ivec - bp.coeff)^2)
stderr <- sqrt(var.bp)# BP's standard error
p.value <- 2*(1-pt(abs(bp.coeff/stderr),n-1))
lcb <- bp.coeff - stderr*qt(1-(1-conflev)/2,n-1) # lower confidence bound
ucb <- min(1,bp.coeff + stderr*qt(1-(1-conflev)/2,n-1)) # upper confidence bound
if(print==TRUE) {
cat("Brennan-Prediger Coefficient\n")
cat('============================\n')
cat('Percent agreement:',pa,'Percent chance agreement:',pe,'\n')
cat('B-P coefficient:',bp.coeff,'Standard error:',stderr,'\n')
cat(conflev*100,'% Confidence Interval: (',lcb,',',ucb,')\n')
cat('P-value: ',p.value,'\n')
if (weights!="unweighted") {
if (!is.numeric(weights)) {
cat('\n')
cat('Weights: ', weights,'\n')
cat('---------------------------\n')
print(weights.mat)
}
else{
cat('Weights: Custom Weights\n')
cat('---------------------------\n')
}
print(weights.mat)
}
}
invisible(c(pa,pe,bp.coeff,stderr,p.value))
}
#
#----- Additional functions needed to run the main functions. If the main functions must be included in another R script, then
# the user will need to add these additional functions to the new script file.
#
# ==============================================================
# trim(x): This is an r function for trimming leading and trealing blanks
# ==============================================================
trim <- function( x ) {
gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
}
# ==============================================================
# The following functions generate various weight matrices used
# in the weighted or unweighted analyses.
# ==============================================================
identity.weights<-function(categ){
weights<-diag(length(categ))
return (weights)
}
quadratic.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-(categ.vec[k]-categ.vec[l])^2/(xmax-xmin)^2
}
}
return (weights)
}
linear.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-abs(categ.vec[k]-categ.vec[l])/abs(xmax-xmin)
}
}
return (weights)
}
#--------------------------------
radical.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-sqrt(abs(categ.vec[k]-categ.vec[l]))/sqrt(abs(xmax-xmin))
}
}
return (weights)
}
#--------------------------------
ratio.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- 1-((categ.vec[k]-categ.vec[l])/(categ.vec[k]+categ.vec[l]))^2 / ((xmax-xmin)/(xmax+xmin))^2
}
}
return (weights)
}
#--------------------------------
circular.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
U = xmax-xmin+1
for(k in 1:q){
for(l in 1:q){
weights[k,l] <- (sin(pi*(categ.vec[k]-categ.vec[l])/U))^2
}
}
weights <- 1-weights/max(weights)
return (weights)
}
#--------------------------------
bipolar.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
if (is.numeric(categ)) {
categ.vec <- sort(categ)
}
else {
categ.vec<-1:length(categ)
}
xmin<-min(categ.vec)
xmax<-max(categ.vec)
for(k in 1:q){
for(l in 1:q){
if (k!=l)
weights[k,l] <- (categ.vec[k]-categ.vec[l])^2 / (((categ.vec[k]+categ.vec[l])-2*xmin)*(2*xmax-(categ.vec[k]+categ.vec[l])))
else weights[k,l] <- 0
}
}
weights <- 1-weights/max(weights)
return (weights)
}
#--------------------------------
ordinal.weights<-function(categ){
q<-length(categ)
weights <- diag(q)
categ.vec<-1:length(categ)
for(k in 1:q){
for(l in 1:q){
nkl <- max(k,l)-min(k,l)+1
weights[k,l] <- nkl * (nkl-1)/2
}
}
weights <- 1-weights/max(weights)
return (weights)
}
|
import_NAIS_data_2013_2018=function(year){
yearsel=year
#import all nais data
getEndSize=function(vec,levels){
out=c()
for (i in 1:length(vec)){
pos=which(levels==vec[i])
if (pos < length(vec)){
out=c(out,levels[pos+1])
} else{
out=c(out,0.7e-7)
}
}
return(out)
}#getEndSize
workDir=getwd()
# setwd("C:/fastFiles/springCourse")
setwd(paste0(workDir,"/data/NAIS3/NAIS3_sum_20132017"))
#creating a list pointing to all files with extension .dat
tempall = list.files(pattern="*.sum")
tempyears=substring(tempall,7,10)
temp=tempall[which(tempyears==yearsel)]
#creating a list of dataframes for the NAIS data.
#Also creating a vector of names
NAIS_ion_list_13_17=list()
NAIS_ion_list_13_17_indices=c()
for (i in 1:length(temp)){
filename=temp[i]
wd=fread(temp[i],header=F,skip=1)
headers=names(fread(temp[i],header=T,skip=0))
levels=as.numeric(as.character(headers))[-1]
names(wd)=headers
names(wd)[1]="Time"
wd$day=wd$Time %/% 1
wd$hour=wd$Time %%1
wd$Time=paste(yearsel,wd$day,sep="/")
wd$Time=as.POSIXct(wd$Time,format="%Y/%j")
wd$Time=force_tz(wd$Time,tzone="UTC")
wd$startTime=wd$Time+24*3600*wd$hour
wd$Time=NULL
wd$day=NULL
wd$hour=NULL
wd$endTime=lead(wd$startTime,1)
wd2= melt(wd,id.vars = c("startTime","endTime"))
wd2$startSize=as.numeric(as.character(wd2$variable))
wd2 = wd2 %>%
mutate(endSize=getEndSize(startSize,levels))
wd2$startSize=ifelse(wd2$startSize > 0, wd2$startSize,1e-10)
wd2$variable=NULL
NAIS_ion_list_13_17_indices[i]=filename
NAIS_ion_list_13_17[[i]]=wd2
}
#now I create a dataframe containing that information. Sometimes easier to work with.
NAIS_ion_list_13_17_indices=gsub("nds","",NAIS_ion_list_13_17_indices)
NAIS_ion_list_13_17_indices=gsub("NAIS","",NAIS_ion_list_13_17_indices)
posindices=which((data.frame(date=NAIS_ion_list_13_17_indices,flag=F) %>%
mutate(flag=grepl("p",date)))$flag)
negindices=which((data.frame(date=NAIS_ion_list_13_17_indices,flag=F) %>%
mutate(flag=grepl("n",date)))$flag)
helplist=NAIS_ion_list_13_17
for (i in 1:length(helplist)){
if (i %in% posindices){
helplist[[i]]$ion="positive"
} else if (i %in% negindices){
helplist[[i]]$ion="negative"
}
}
#back to main workDir
setwd(workDir)
help=rbindlist(helplist, use.names=T, fill=T, idcol=NULL)
attributes(help$startTime)$tzone="etc/GMT+4"
attributes(help$endTime)$tzone="etc/GMT+4"
return(help)
}
| /Rproject/archive/R/2018_03_16/function_import_NAIS_data_2013_2018.R | no_license | daliagachc/GR_chc | R | false | false | 2,647 | r |
import_NAIS_data_2013_2018=function(year){
yearsel=year
#import all nais data
getEndSize=function(vec,levels){
out=c()
for (i in 1:length(vec)){
pos=which(levels==vec[i])
if (pos < length(vec)){
out=c(out,levels[pos+1])
} else{
out=c(out,0.7e-7)
}
}
return(out)
}#getEndSize
workDir=getwd()
# setwd("C:/fastFiles/springCourse")
setwd(paste0(workDir,"/data/NAIS3/NAIS3_sum_20132017"))
#creating a list pointing to all files with extension .dat
tempall = list.files(pattern="*.sum")
tempyears=substring(tempall,7,10)
temp=tempall[which(tempyears==yearsel)]
#creating a list of dataframes for the NAIS data.
#Also creating a vector of names
NAIS_ion_list_13_17=list()
NAIS_ion_list_13_17_indices=c()
for (i in 1:length(temp)){
filename=temp[i]
wd=fread(temp[i],header=F,skip=1)
headers=names(fread(temp[i],header=T,skip=0))
levels=as.numeric(as.character(headers))[-1]
names(wd)=headers
names(wd)[1]="Time"
wd$day=wd$Time %/% 1
wd$hour=wd$Time %%1
wd$Time=paste(yearsel,wd$day,sep="/")
wd$Time=as.POSIXct(wd$Time,format="%Y/%j")
wd$Time=force_tz(wd$Time,tzone="UTC")
wd$startTime=wd$Time+24*3600*wd$hour
wd$Time=NULL
wd$day=NULL
wd$hour=NULL
wd$endTime=lead(wd$startTime,1)
wd2= melt(wd,id.vars = c("startTime","endTime"))
wd2$startSize=as.numeric(as.character(wd2$variable))
wd2 = wd2 %>%
mutate(endSize=getEndSize(startSize,levels))
wd2$startSize=ifelse(wd2$startSize > 0, wd2$startSize,1e-10)
wd2$variable=NULL
NAIS_ion_list_13_17_indices[i]=filename
NAIS_ion_list_13_17[[i]]=wd2
}
#now I create a dataframe containing that information. Sometimes easier to work with.
NAIS_ion_list_13_17_indices=gsub("nds","",NAIS_ion_list_13_17_indices)
NAIS_ion_list_13_17_indices=gsub("NAIS","",NAIS_ion_list_13_17_indices)
posindices=which((data.frame(date=NAIS_ion_list_13_17_indices,flag=F) %>%
mutate(flag=grepl("p",date)))$flag)
negindices=which((data.frame(date=NAIS_ion_list_13_17_indices,flag=F) %>%
mutate(flag=grepl("n",date)))$flag)
helplist=NAIS_ion_list_13_17
for (i in 1:length(helplist)){
if (i %in% posindices){
helplist[[i]]$ion="positive"
} else if (i %in% negindices){
helplist[[i]]$ion="negative"
}
}
#back to main workDir
setwd(workDir)
help=rbindlist(helplist, use.names=T, fill=T, idcol=NULL)
attributes(help$startTime)$tzone="etc/GMT+4"
attributes(help$endTime)$tzone="etc/GMT+4"
return(help)
}
|
# https://towardsdatascience.com/twitter-sentiment-analysis-and-visualization-using-r-22e1f70f6967
library(dplyr)
library(tidyr)
library(tidytext)
library(ggplot2)
library(purrr)
library(tidyverse)
library(tibble)
library(twitteR)
library(ROAuth)
library(wordcloud)
library(reshape2)
library(RColorBrewer)
# Loading credentials
consumer_key <- 'my customer key'
consumer_secret <- 'my secret key'
access_token <- 'my token'
access_secret <- 'my access key'
# Setting up to authenticate
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
tweets_bernie <- searchTwitter("#bernie", n=1000,lang = "en")
tweets_biden <- searchTwitter("#biden", n=1000,lang = "en")
tweets_trump <- searchTwitter("#trump", n=1000,lang = "en")
tweets_election2020 <- searchTwitter("#election2020", n=2000,lang = "en")
# Striping retweets
no_rt_bernie <- strip_retweets(tweets_bernie)
no_rt_biden <- strip_retweets(tweets_biden)
no_rt_trump <- strip_retweets(tweets_trump)
no_rt_election2020 <- strip_retweets(tweets_election2020)
# Converting extracted tweets without retweet to dataframe
bernie <- twListToDF(no_rt_bernie)
biden <-twListToDF(no_rt_biden)
trump <- twListToDF(no_rt_trump)
election2020 <- twListToDF(no_rt_election2020)
#bernie tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.bernie = bernie %>% select(screenName, text)
tweets.bernie$clean_text <- gsub("http\\S+", " ", tweets.bernie$text)
tweets.bernie$clean_text <- gsub("@\\w+", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("[[:digit:]]", " ", tweets.bernie$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.bernie$clean_text <- gsub("Trump", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("trump", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("TRUMP", " ", tweets.bernie$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.bernie_stem <- tweets.bernie %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.bernie <- tweets.bernie_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_bernie = cleaned_tweets.bernie %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_bernie
#plot top 10 negative and positive
bing_bernie %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#bernie'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_bernie <- bing_bernie %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Bernie Tweets Polarity") +
coord_flip()
polar_bar_bernie
#biden tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.biden = biden %>% select(screenName, text)
tweets.biden$clean_text <- gsub("http\\S+", " ", tweets.biden$text)
tweets.biden$clean_text <- gsub("@\\w+", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("[[:digit:]]", " ", tweets.biden$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.biden$clean_text <- gsub("Trump", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("trump", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("TRUMP", " ", tweets.biden$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.biden_stem <- tweets.biden %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.biden <- tweets.biden_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_biden = cleaned_tweets.biden %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_biden
#plot top 10 negative and positive
bing_biden %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#biden'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_biden <- bing_biden %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Biden Tweets Polarity") +
coord_flip()
polar_bar_biden
#trump tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.trump = trump %>% select(screenName, text)
tweets.trump$clean_text <- gsub("http\\S+", " ", tweets.trump$text)
tweets.trump$clean_text <- gsub("@\\w+", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("[[:digit:]]", " ", tweets.trump$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.trump$clean_text <- gsub("Trump", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("trump", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("TRUMP", " ", tweets.trump$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.trump_stem <- tweets.trump %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.trump <- tweets.trump_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_trump = cleaned_tweets.trump %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_trump
#plot top 10 negative and positive
bing_trump %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#trump'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_trump <- bing_trump %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Trump Tweets Polarity") +
coord_flip()
polar_bar_trump
#election2020 tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.election = election2020 %>% select(screenName, text)
tweets.election$clean_text <- gsub("http\\S+", " ", tweets.election$text)
tweets.election$clean_text <- gsub("@\\w+", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("[[:digit:]]", " ", tweets.election$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.election$clean_text <- gsub("Trump", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("trump", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("TRUMP", " ", tweets.election$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.election_stem <- tweets.election %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.election <- tweets.election_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_election = cleaned_tweets.election %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_election
#plot top 10 negative and positive
bing_election %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#election2020'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_election <- bing_election %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Election2020 Tweets Polarity") +
coord_flip()
polar_bar_election
#sentiment score
sentiment_bing = function(twt){
twt_tbl = tibble(text=twt) %>% #text cleaning, remove "trump" as well since it is considered as positive sentiment
mutate(
stripped_text=gsub("http\\S+"," ",text),
stripped_text=gsub("TRUMP", " ",stripped_text),
stripped_text=gsub("Trump", " ",stripped_text),
stripped_text=gsub("trump", " ",stripped_text)
) %>%
unnest_tokens(word,stripped_text) %>%
anti_join(stop_words) %>%
inner_join(get_sentiments("bing")) %>%
count(word,sentiment, sort= TRUE) %>%
ungroup() %>%
mutate(
score= case_when( #create a column "score
sentiment == 'negative'~n*(-1), #assigns -1 when negative word
sentiment == 'positive'~n*1) #assings 1 when positive word
)
sent.score=case_when(
nrow(twt_tbl)==0~0, #if there are no words, score is 0
nrow(twt_tbl)>0~sum(twt_tbl$score) #otherwise, sum the positive and negatives
)
zero.type=case_when(
nrow(twt_tbl)==0~"Type1", #no words at all, zero=no
nrow(twt_tbl)>0~"Type2" #zero means sum of words=0
)
list(score= sent.score, type=zero.type, twt_tbl=twt_tbl)
}
#apply function: retuns a list of all the sentiment scores, types and tables of the tweets
bernie_sent = lapply(bernie$text, function(x){sentiment_bing(x)})
biden_sent = lapply(biden$text, function(x){sentiment_bing(x)})
trump_sent = lapply(trump$text, function(x){sentiment_bing(x)})
election_sent = lapply(election2020$text, function(x){sentiment_bing(x)})
#create a tibble specifying the #keywords, sentiment scores and types
tweets_sentiment = bind_rows(
tibble(
keyword='#bernie',
score=unlist(map(bernie_sent, 'score')),
type=unlist(map(bernie_sent, 'type'))
),
tibble(
keyword='#biden',
score=unlist(map(biden_sent, 'score')),
type=unlist(map(biden_sent, 'type'))
),
tibble(
keyword='#trump',
score=unlist(map(trump_sent, 'score')),
type=unlist(map(trump_sent, 'type'))
)
)
election_sentiment= tibble(
keyword='#election2020',
score=unlist(map(election_sent, 'score')),
type=unlist(map(election_sent, 'type'))
)
#plot histograms of tweets sentiment for three candidate
ggplot(tweets_sentiment,aes(x=score, fill=keyword)) +
geom_histogram(bins=10, alpha=0.6) +
facet_grid(~keyword) + theme_bw()
#plot histogram of tweets sentiment for election 2020
ggplot(election_sentiment,aes(x=score, fill=keyword)) +
geom_histogram(bins=10, alpha=0.6) +
theme_bw()
#https://www.tidytextmining.com/sentiment.html
#https://cran.r-project.org/web/packages/syuzhet/vignettes/syuzhet-vignette.html
#https://www.datacamp.com/community/tutorials/sentiment-analysis-R
# NRC emotion sentiment analysis
#bernie
nrc_bernie = cleaned_tweets.bernie %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_bernie
#plot
bernie_plot <- nrc_bernie %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Bernie NRC Sentiment") +
coord_flip()
bernie_plot
#biden
nrc_biden = cleaned_tweets.biden %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_biden
#plot
biden_plot <- nrc_biden %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Biden NRC Sentiment") +
coord_flip()
biden_plot
#trump
nrc_trump = cleaned_tweets.trump %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_trump
#plot
trump_plot <- nrc_trump %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Trump NRC Sentiment") +
coord_flip()
trump_plot
#election2020
nrc_election = cleaned_tweets.election %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_trump
#plot
election_plot <- nrc_election %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Election2020 NRC Sentiment") +
coord_flip()
election_plot
#wordcloud
#https://www.r-bloggers.com/thrice-sentiment-analysis-emotions-in-lyrics/
#common wordcloud
cleaned_tweets.bernie %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
cleaned_tweets.biden %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
cleaned_tweets.trump %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
cleaned_tweets.election %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
#polarity
cleaned_tweets.bernie %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
cleaned_tweets.biden %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
cleaned_tweets.trump %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
cleaned_tweets.election %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
#emotion
#https://tabvizexplorer.com/sentiment-analysis-using-r-and-twitter/
#https://rpubs.com/SulmanKhan/437587
cleaned_tweets.bernie %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
cleaned_tweets.biden %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
cleaned_tweets.trump %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
cleaned_tweets.election %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
| /Final_Rscript.R | no_license | nitroamy/DA420_Predictive_Analytics_Final_Project | R | false | false | 18,429 | r | # https://towardsdatascience.com/twitter-sentiment-analysis-and-visualization-using-r-22e1f70f6967
library(dplyr)
library(tidyr)
library(tidytext)
library(ggplot2)
library(purrr)
library(tidyverse)
library(tibble)
library(twitteR)
library(ROAuth)
library(wordcloud)
library(reshape2)
library(RColorBrewer)
# Loading credentials
consumer_key <- 'my customer key'
consumer_secret <- 'my secret key'
access_token <- 'my token'
access_secret <- 'my access key'
# Setting up to authenticate
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
tweets_bernie <- searchTwitter("#bernie", n=1000,lang = "en")
tweets_biden <- searchTwitter("#biden", n=1000,lang = "en")
tweets_trump <- searchTwitter("#trump", n=1000,lang = "en")
tweets_election2020 <- searchTwitter("#election2020", n=2000,lang = "en")
# Striping retweets
no_rt_bernie <- strip_retweets(tweets_bernie)
no_rt_biden <- strip_retweets(tweets_biden)
no_rt_trump <- strip_retweets(tweets_trump)
no_rt_election2020 <- strip_retweets(tweets_election2020)
# Converting extracted tweets without retweet to dataframe
bernie <- twListToDF(no_rt_bernie)
biden <-twListToDF(no_rt_biden)
trump <- twListToDF(no_rt_trump)
election2020 <- twListToDF(no_rt_election2020)
#bernie tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.bernie = bernie %>% select(screenName, text)
tweets.bernie$clean_text <- gsub("http\\S+", " ", tweets.bernie$text)
tweets.bernie$clean_text <- gsub("@\\w+", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("[[:digit:]]", " ", tweets.bernie$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.bernie$clean_text <- gsub("Trump", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("trump", " ", tweets.bernie$clean_text)
tweets.bernie$clean_text <- gsub("TRUMP", " ", tweets.bernie$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.bernie_stem <- tweets.bernie %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.bernie <- tweets.bernie_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_bernie = cleaned_tweets.bernie %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_bernie
#plot top 10 negative and positive
bing_bernie %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#bernie'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_bernie <- bing_bernie %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Bernie Tweets Polarity") +
coord_flip()
polar_bar_bernie
#biden tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.biden = biden %>% select(screenName, text)
tweets.biden$clean_text <- gsub("http\\S+", " ", tweets.biden$text)
tweets.biden$clean_text <- gsub("@\\w+", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("[[:digit:]]", " ", tweets.biden$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.biden$clean_text <- gsub("Trump", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("trump", " ", tweets.biden$clean_text)
tweets.biden$clean_text <- gsub("TRUMP", " ", tweets.biden$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.biden_stem <- tweets.biden %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.biden <- tweets.biden_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_biden = cleaned_tweets.biden %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_biden
#plot top 10 negative and positive
bing_biden %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#biden'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_biden <- bing_biden %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Biden Tweets Polarity") +
coord_flip()
polar_bar_biden
#trump tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.trump = trump %>% select(screenName, text)
tweets.trump$clean_text <- gsub("http\\S+", " ", tweets.trump$text)
tweets.trump$clean_text <- gsub("@\\w+", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("[[:digit:]]", " ", tweets.trump$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.trump$clean_text <- gsub("Trump", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("trump", " ", tweets.trump$clean_text)
tweets.trump$clean_text <- gsub("TRUMP", " ", tweets.trump$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.trump_stem <- tweets.trump %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.trump <- tweets.trump_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_trump = cleaned_tweets.trump %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_trump
#plot top 10 negative and positive
bing_trump %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#trump'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_trump <- bing_trump %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Trump Tweets Polarity") +
coord_flip()
polar_bar_trump
#election2020 tweets
#remove unnecessary elements include: link, username, emoji, numbers
tweets.election = election2020 %>% select(screenName, text)
tweets.election$clean_text <- gsub("http\\S+", " ", tweets.election$text)
tweets.election$clean_text <- gsub("@\\w+", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("[^\x01-\x7F]", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("[[:digit:]]", " ", tweets.election$clean_text)
# Removing "trump" since trump is considered as positive polarity and emotion during text analysis
tweets.election$clean_text <- gsub("Trump", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("trump", " ", tweets.election$clean_text)
tweets.election$clean_text <- gsub("TRUMP", " ", tweets.election$clean_text)
#unnext_tokens() function to convert to lowercase, remove punctuation
tweets.election_stem <- tweets.election %>%
select(clean_text) %>%
unnest_tokens(word, clean_text)
#remove stop words
cleaned_tweets.election <- tweets.election_stem %>%
anti_join(stop_words)
#bing sentiment analysis
bing_election = cleaned_tweets.election %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
bing_election
#plot top 10 negative and positive
bing_election %>%
group_by(sentiment) %>%
top_n(10) %>%
ungroup() %>%
mutate(word=reorder(word,n)) %>%
ggplot(aes(word,n,fill=sentiment))+
geom_col(show.legend=FALSE)+
facet_wrap(~sentiment,scale="free_y")+
labs(title="Tweets contatining '#election2020'", y="Contribution to sentiment", x=NULL)+
coord_flip()+theme_bw()
#Polarity plot
polar_bar_election <- bing_election %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = c("blue","red"))) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Election2020 Tweets Polarity") +
coord_flip()
polar_bar_election
#sentiment score
sentiment_bing = function(twt){
twt_tbl = tibble(text=twt) %>% #text cleaning, remove "trump" as well since it is considered as positive sentiment
mutate(
stripped_text=gsub("http\\S+"," ",text),
stripped_text=gsub("TRUMP", " ",stripped_text),
stripped_text=gsub("Trump", " ",stripped_text),
stripped_text=gsub("trump", " ",stripped_text)
) %>%
unnest_tokens(word,stripped_text) %>%
anti_join(stop_words) %>%
inner_join(get_sentiments("bing")) %>%
count(word,sentiment, sort= TRUE) %>%
ungroup() %>%
mutate(
score= case_when( #create a column "score
sentiment == 'negative'~n*(-1), #assigns -1 when negative word
sentiment == 'positive'~n*1) #assings 1 when positive word
)
sent.score=case_when(
nrow(twt_tbl)==0~0, #if there are no words, score is 0
nrow(twt_tbl)>0~sum(twt_tbl$score) #otherwise, sum the positive and negatives
)
zero.type=case_when(
nrow(twt_tbl)==0~"Type1", #no words at all, zero=no
nrow(twt_tbl)>0~"Type2" #zero means sum of words=0
)
list(score= sent.score, type=zero.type, twt_tbl=twt_tbl)
}
#apply function: retuns a list of all the sentiment scores, types and tables of the tweets
bernie_sent = lapply(bernie$text, function(x){sentiment_bing(x)})
biden_sent = lapply(biden$text, function(x){sentiment_bing(x)})
trump_sent = lapply(trump$text, function(x){sentiment_bing(x)})
election_sent = lapply(election2020$text, function(x){sentiment_bing(x)})
#create a tibble specifying the #keywords, sentiment scores and types
tweets_sentiment = bind_rows(
tibble(
keyword='#bernie',
score=unlist(map(bernie_sent, 'score')),
type=unlist(map(bernie_sent, 'type'))
),
tibble(
keyword='#biden',
score=unlist(map(biden_sent, 'score')),
type=unlist(map(biden_sent, 'type'))
),
tibble(
keyword='#trump',
score=unlist(map(trump_sent, 'score')),
type=unlist(map(trump_sent, 'type'))
)
)
election_sentiment= tibble(
keyword='#election2020',
score=unlist(map(election_sent, 'score')),
type=unlist(map(election_sent, 'type'))
)
#plot histograms of tweets sentiment for three candidate
ggplot(tweets_sentiment,aes(x=score, fill=keyword)) +
geom_histogram(bins=10, alpha=0.6) +
facet_grid(~keyword) + theme_bw()
#plot histogram of tweets sentiment for election 2020
ggplot(election_sentiment,aes(x=score, fill=keyword)) +
geom_histogram(bins=10, alpha=0.6) +
theme_bw()
#https://www.tidytextmining.com/sentiment.html
#https://cran.r-project.org/web/packages/syuzhet/vignettes/syuzhet-vignette.html
#https://www.datacamp.com/community/tutorials/sentiment-analysis-R
# NRC emotion sentiment analysis
#bernie
nrc_bernie = cleaned_tweets.bernie %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_bernie
#plot
bernie_plot <- nrc_bernie %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Bernie NRC Sentiment") +
coord_flip()
bernie_plot
#biden
nrc_biden = cleaned_tweets.biden %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_biden
#plot
biden_plot <- nrc_biden %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Biden NRC Sentiment") +
coord_flip()
biden_plot
#trump
nrc_trump = cleaned_tweets.trump %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_trump
#plot
trump_plot <- nrc_trump %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Trump NRC Sentiment") +
coord_flip()
trump_plot
#election2020
nrc_election = cleaned_tweets.election %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort=TRUE) %>%
ungroup()
nrc_trump
#plot
election_plot <- nrc_election %>%
group_by(sentiment) %>%
summarise(word_count = n()) %>%
ungroup() %>%
mutate(sentiment = reorder(sentiment, word_count)) %>%
#Use `fill = -word_count` to make the larger bars darker
ggplot(aes(sentiment, word_count, fill = -word_count)) +
geom_col() +
guides(fill = FALSE) + #Turn off the legend
labs(x = NULL, y = "Word Count") +
ggtitle("Election2020 NRC Sentiment") +
coord_flip()
election_plot
#wordcloud
#https://www.r-bloggers.com/thrice-sentiment-analysis-emotions-in-lyrics/
#common wordcloud
cleaned_tweets.bernie %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
cleaned_tweets.biden %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
cleaned_tweets.trump %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
cleaned_tweets.election %>%
anti_join(stop_words) %>%
count(word) %>%
with(wordcloud(word, n, max.words = 100, colors=brewer.pal(8, "Dark2")))
#polarity
cleaned_tweets.bernie %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
cleaned_tweets.biden %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
cleaned_tweets.trump %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
cleaned_tweets.election %>%
inner_join(get_sentiments("bing")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = c("brown", "dark green"),
title.size=1, max.words = 50)
#emotion
#https://tabvizexplorer.com/sentiment-analysis-using-r-and-twitter/
#https://rpubs.com/SulmanKhan/437587
cleaned_tweets.bernie %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
cleaned_tweets.biden %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
cleaned_tweets.trump %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
cleaned_tweets.election %>%
inner_join(get_sentiments("nrc")) %>%
filter(!sentiment %in% c("positive","negative")) %>%
count(word, sentiment, sort = TRUE) %>%
acast(word ~ sentiment, value.var = "n", fill = 0) %>%
comparison.cloud(colors = brewer.pal(8, "Dark2"),
title.size=1, max.words=50, random.order = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vessel.R
\name{vessel}
\alias{vessel}
\alias{vessel.default}
\alias{vessel.character}
\alias{vessel.scsset}
\alias{vessel.gulf.set}
\title{Project Identifiers}
\usage{
vessel(x, ...)
\method{vessel}{default}(x, ...)
\method{vessel}{character}(x, verbose = FALSE, ...)
\method{vessel}{scsset}(x, ...)
\method{vessel}{gulf.set}(x, ...)
}
\arguments{
\item{x}{Character search string.}
\item{...}{Other arguments (not used).}
}
\description{
Functions to retrieve survey vessel information.
}
\section{Methods (by class)}{
\itemize{
\item \code{vessel(default)}: Default \code{vessel} method. Returns the complete vessel data table.
\item \code{vessel(character)}: Search for vessel name and return vessel specifications.
\item \code{vessel(scsset)}: Vessel names for snow crab survey.
\item \code{vessel(gulf.set)}: Vessel names for various science surveys.
}}
\examples{
vessel() # Survey vessel table.
vessel("prince") # Search for vessel name.
vessel("marco") # Search for vessel name.
vessel("opilio") # Search for vessel name.
# Read snow crab set data:
x <- read.scsset(2000:2021)
vessel(x) # Determine vessel for each data record.
}
| /man/vessel.Rd | no_license | TobieSurette/gulf.data | R | false | true | 1,260 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vessel.R
\name{vessel}
\alias{vessel}
\alias{vessel.default}
\alias{vessel.character}
\alias{vessel.scsset}
\alias{vessel.gulf.set}
\title{Project Identifiers}
\usage{
vessel(x, ...)
\method{vessel}{default}(x, ...)
\method{vessel}{character}(x, verbose = FALSE, ...)
\method{vessel}{scsset}(x, ...)
\method{vessel}{gulf.set}(x, ...)
}
\arguments{
\item{x}{Character search string.}
\item{...}{Other arguments (not used).}
}
\description{
Functions to retrieve survey vessel information.
}
\section{Methods (by class)}{
\itemize{
\item \code{vessel(default)}: Default \code{vessel} method. Returns the complete vessel data table.
\item \code{vessel(character)}: Search for vessel name and return vessel specifications.
\item \code{vessel(scsset)}: Vessel names for snow crab survey.
\item \code{vessel(gulf.set)}: Vessel names for various science surveys.
}}
\examples{
vessel() # Survey vessel table.
vessel("prince") # Search for vessel name.
vessel("marco") # Search for vessel name.
vessel("opilio") # Search for vessel name.
# Read snow crab set data:
x <- read.scsset(2000:2021)
vessel(x) # Determine vessel for each data record.
}
|
## R code to test the FVS api
# find and get the R code
cwd = getwd()
while(TRUE)
{
if (length(dir(pattern="rFVS")) > 0) break
setwd("..")
if (nchar(getwd()) < 4) {setwd(cwd);stop("Cannot find R code.")}
}
setwd("rFVS/R")
# fetching R code
for (rf in dir ()) source (rf)
setwd(cwd)
# load the FVS library
fvsLoad("qFVSie","../../bin")
# define tree attribute list names
treeAttrs = c("id","species","mort","tpa","dbh","dg","ht",
"htg","crwdth","cratio","age","plot",
"tcuft","mcuft","bdft","plotsize","mgmtcd")
# no cycles, plots, or trees yet
fvsGetDims()
# should be return an empty list
fvsGetTreeAttrs(treeAttrs)
# the species codes
fvsGetSpeciesCodes()
# list supported activity codes
fvsAddActivity()
## first run
fvsSetCmdLine("--keywordfile=base.key")
fvsRun(2,2030)
fvsGetStandIDs()
# get and output some event monitor vars
fvsGetEventMonitorVariables(c("year","atpa","aba"))
# get and output tree attributes
fvsGetTreeAttrs(treeAttrs)
# get and set some species attributes
spAttrs = fvsGetSpeciesAttrs(c("spsdi","spccf","spsiteindx"))
spAttrs
rtn = fvsSetSpeciesAttrs(spAttrs)
cat ("rtn = ",rtn,"\n")
# run to 2060 stop prior to adding increments
fvsRun(5,2060)
trees=fvsGetTreeAttrs(treeAttrs)
#set mortality and growth to zero
trees$mort = 0
trees$htg = 0
trees$dg = 0
fvsSetTreeAttrs(trees[,c(3,6,8)])
# finish the run
fvsRun(0,0)
# get and output summary statistics
fvsGetSummary() #year 2060 and 2070 should be equal
# run the next stand in the set, no stoping.
fvsRun()
## next run, use the same keywords
fvsSetCmdLine("--keywordfile=base.key")
fvsRun(2,1993)
addtrees <- fvsGetTreeAttrs(treeAttrs)
addtrees <- subset(addtrees,dbh<2)[,c("dbh","species","ht","cratio","plot","tpa")]
# these trees will be added to the run at 2013
addtrees
# add a yearloss and thindbh for 1993
fvsAddActivity(1993,"base_yardloss",c(0.50, 0.70, 0.50))
fvsAddActivity(1993,"base_thindbh",c(0.00,12.00,1.00,0.00,0.00))
# continue the run
fvsRun(6,2013)
# add the trees and output the current trees
fvsAddTrees(addtrees)
fvsGetTreeAttrs(treeAttrs)
# continue the run
fvsRun(0,0)
#get and output summary statistics
fvsGetSummary()
# continue the run for the next stand.
fvsRun()
| /tests/APIviaR/Rapi.R | no_license | tharen/open-fvs-import | R | false | false | 2,230 | r | ## R code to test the FVS api
# find and get the R code
cwd = getwd()
while(TRUE)
{
if (length(dir(pattern="rFVS")) > 0) break
setwd("..")
if (nchar(getwd()) < 4) {setwd(cwd);stop("Cannot find R code.")}
}
setwd("rFVS/R")
# fetching R code
for (rf in dir ()) source (rf)
setwd(cwd)
# load the FVS library
fvsLoad("qFVSie","../../bin")
# define tree attribute list names
treeAttrs = c("id","species","mort","tpa","dbh","dg","ht",
"htg","crwdth","cratio","age","plot",
"tcuft","mcuft","bdft","plotsize","mgmtcd")
# no cycles, plots, or trees yet
fvsGetDims()
# should be return an empty list
fvsGetTreeAttrs(treeAttrs)
# the species codes
fvsGetSpeciesCodes()
# list supported activity codes
fvsAddActivity()
## first run
fvsSetCmdLine("--keywordfile=base.key")
fvsRun(2,2030)
fvsGetStandIDs()
# get and output some event monitor vars
fvsGetEventMonitorVariables(c("year","atpa","aba"))
# get and output tree attributes
fvsGetTreeAttrs(treeAttrs)
# get and set some species attributes
spAttrs = fvsGetSpeciesAttrs(c("spsdi","spccf","spsiteindx"))
spAttrs
rtn = fvsSetSpeciesAttrs(spAttrs)
cat ("rtn = ",rtn,"\n")
# run to 2060 stop prior to adding increments
fvsRun(5,2060)
trees=fvsGetTreeAttrs(treeAttrs)
#set mortality and growth to zero
trees$mort = 0
trees$htg = 0
trees$dg = 0
fvsSetTreeAttrs(trees[,c(3,6,8)])
# finish the run
fvsRun(0,0)
# get and output summary statistics
fvsGetSummary() #year 2060 and 2070 should be equal
# run the next stand in the set, no stoping.
fvsRun()
## next run, use the same keywords
fvsSetCmdLine("--keywordfile=base.key")
fvsRun(2,1993)
addtrees <- fvsGetTreeAttrs(treeAttrs)
addtrees <- subset(addtrees,dbh<2)[,c("dbh","species","ht","cratio","plot","tpa")]
# these trees will be added to the run at 2013
addtrees
# add a yearloss and thindbh for 1993
fvsAddActivity(1993,"base_yardloss",c(0.50, 0.70, 0.50))
fvsAddActivity(1993,"base_thindbh",c(0.00,12.00,1.00,0.00,0.00))
# continue the run
fvsRun(6,2013)
# add the trees and output the current trees
fvsAddTrees(addtrees)
fvsGetTreeAttrs(treeAttrs)
# continue the run
fvsRun(0,0)
#get and output summary statistics
fvsGetSummary()
# continue the run for the next stand.
fvsRun()
|
cllc <- yaml::read_yaml("data/configuration/CreatureConfig.yml")
cllc <- data.table(cllc_category=cllc,names=names(cllc))
cllc <- cllc[names !="groups"]
cllc_expand <- cbind(cllc[,.(cllc_category)],cllc[,tstrsplit(names,",")])
cllc_melt <- melt(cllc_expand,id.vars=c("cllc_category"))
cllc_melt <- cllc_melt[!is.na(value),!"variable",with=F]
cllc_melt[,attack:=list(list(cllc_category[[1]][["attack speed"]])),by=1:nrow(cllc_melt)]
cllc_melt[,movement:=list(list(cllc_category[[1]][["movement speed"]])),by=1:nrow(cllc_melt)]
cllc_melt[,health:=list(list(cllc_category[[1]][["health"]])),by=1:nrow(cllc_melt)]
cllc_melt[,damage:=list(list(cllc_category[[1]][["damage"]])),by=1:nrow(cllc_melt)]
attack <- cllc_melt[,unlist(attack),by=.(value)]
attack %>% setnames(c("V1","value"),c("attack_speed","value"))
attack[,stars:=1:.N-1,by=value]
movement <- cllc_melt[,unlist(movement),by=.(value)]
movement %>% setnames(c("V1","value"),c("movement_speed","value"))
movement[,stars:=1:.N-1,by=value]
health <- cllc_melt[,unlist(health),by=.(value)]
health %>% setnames(c("V1","value"),c("health","value"))
health[,stars:=1:.N-1,by=value]
damage <- cllc_melt[,unlist(damage),by=.(value)]
damage %>% setnames(c("V1","value"),c("damage","value"))
damage[,stars:=1:.N-1,by=value]
level_base <- data.table(stars=0:10)
level_all <- CJ.dt(level_base,attack[,.(value)] %>% unique)
level_all <- attack[level_all,on=c("stars","value")]
level_all <- movement[level_all,on=c("stars","value")]
level_all <- health[level_all,on=c("stars","value")]
level_all <- damage[level_all,on=c("stars","value")]
level_melt <- melt(level_all,id.vars=c("stars","value"),value.name="setting")
level_melt[,setting:=na.locf(setting),by=.(value,variable)]
level_melt <- level_melt[!is.na(setting)]
level_cast <- dcast(level_melt,value+stars ~ variable,value.var = "setting")
| /R/read_cllc_yaml.R | no_license | danbartl/val_loot_gen | R | false | false | 1,863 | r | cllc <- yaml::read_yaml("data/configuration/CreatureConfig.yml")
cllc <- data.table(cllc_category=cllc,names=names(cllc))
cllc <- cllc[names !="groups"]
cllc_expand <- cbind(cllc[,.(cllc_category)],cllc[,tstrsplit(names,",")])
cllc_melt <- melt(cllc_expand,id.vars=c("cllc_category"))
cllc_melt <- cllc_melt[!is.na(value),!"variable",with=F]
cllc_melt[,attack:=list(list(cllc_category[[1]][["attack speed"]])),by=1:nrow(cllc_melt)]
cllc_melt[,movement:=list(list(cllc_category[[1]][["movement speed"]])),by=1:nrow(cllc_melt)]
cllc_melt[,health:=list(list(cllc_category[[1]][["health"]])),by=1:nrow(cllc_melt)]
cllc_melt[,damage:=list(list(cllc_category[[1]][["damage"]])),by=1:nrow(cllc_melt)]
attack <- cllc_melt[,unlist(attack),by=.(value)]
attack %>% setnames(c("V1","value"),c("attack_speed","value"))
attack[,stars:=1:.N-1,by=value]
movement <- cllc_melt[,unlist(movement),by=.(value)]
movement %>% setnames(c("V1","value"),c("movement_speed","value"))
movement[,stars:=1:.N-1,by=value]
health <- cllc_melt[,unlist(health),by=.(value)]
health %>% setnames(c("V1","value"),c("health","value"))
health[,stars:=1:.N-1,by=value]
damage <- cllc_melt[,unlist(damage),by=.(value)]
damage %>% setnames(c("V1","value"),c("damage","value"))
damage[,stars:=1:.N-1,by=value]
level_base <- data.table(stars=0:10)
level_all <- CJ.dt(level_base,attack[,.(value)] %>% unique)
level_all <- attack[level_all,on=c("stars","value")]
level_all <- movement[level_all,on=c("stars","value")]
level_all <- health[level_all,on=c("stars","value")]
level_all <- damage[level_all,on=c("stars","value")]
level_melt <- melt(level_all,id.vars=c("stars","value"),value.name="setting")
level_melt[,setting:=na.locf(setting),by=.(value,variable)]
level_melt <- level_melt[!is.na(setting)]
level_cast <- dcast(level_melt,value+stars ~ variable,value.var = "setting")
|
#' @title Area Under The Curve of Group-Specific Polynomial Marginal Dynamics
#' @description \loadmathjax This function estimates the area under the curve of marginal dynamics modeled by group-structured polynomials or B-spline curves.
#'
#' @param MEM_Pol_group A list with similar structure than the output provided by the function \link[AUCcomparison]{MEM_Polynomial_Group_structure}.
#'
#' A list containing:
#' \itemize{
#' \item \code{Model_estimation}: a list containing at least 2 elements: \enumerate{
#' \item the vector of the marginal (fixed) parameters estimates (at least for the groups whose AUC is to estimate), labeled _'beta'_.
#' \item the variance-covariance matrix of these parameters, labeled _'varFix'_ (see \link[AUCcomparison]{MEM_Polynomial_Group_structure} for details about the parameter order).
#' }
#' \item \code{Model_features}: a list of at least 2 elements: \enumerate{
#' \item \code{Groups}: a vector indicating the names of the groups whose fixed parameters are given.
#' \item \code{Marginal.dyn.feature}: a list summarizing the features of the marginal dynamics defined in the model:
#' \itemize{
#' \item \code{dynamic.type}: a character scalar indicating the chosen type of marginal dynamics. Options are 'polynomial' or 'spline'.
#' \item \code{intercept}: a logical vector summarizing choices about global and group-specific intercepts (Number of groups + 1) elements whose elements are named as ('global.intercept','group.intercept1', ..., 'group.interceptG') if G Groups are defined in \code{MEM_Pol_group}. For each element of the vector, if TRUE, the considered intercept is considered as included in the model (see \emph{Examples}).
#' }
#' If \code{dynamic.type} is defined as 'polynomial':\itemize{
#' \item \code{polynomial.degree}: an integer vector indicating the degree of polynomial functions, one value for each group.
#' }
#' If \code{dynamic.type} is defined as 'spline':\itemize{
#' \item \code{spline.degree}: an integer vector indicating the degree of B-spline curves, one for each group.
#' \item \code{knots}: a list of group-specific internal knots used to build B-spline basis (one numerical vector for each group) (see \link[splines]{bs} for more details).
#' \item \code{df}: a numerical vector of group-specific degrees of freedom used to build B-spline basis, (one for each group).
#' \item \code{boundary.knots}: a list of group-specific boundary knots used to build B-spline basis (one vector for each group) (see \link[splines]{bs} for more details).
#' }
#' }
#' }
#'
#' @param time a numerical vector of time points (x-axis coordinates) or a list of numerical vectors (with as much elements than the number of groups in \code{Groups}).
#' @param Groups a vector indicating the names of the groups belonging to the set of groups involved in \code{MEM_Pol_group} for which we want to estimate the AUC (a subset or the entire set of groups involved in the model can be considered). If NULL (default), the AUC for all the groups involved the MEM is calculated.
#' @param method a character scalar indicating the interpolation method to use to estimate the AUC. Options are 'trapezoid' (default), 'lagrange' and 'spline'. In this version, the 'spline' interpolation is implemented with the "not-a-knot" spline boundary conditions.
#' @param Averaged a logical scalar. If TRUE, the function return the normalized AUC (nAUC) computed as the AUC divided by the range of the time calculation. If FALSE (default), the classic AUC is calculated.
#'
#' @details The area under the curve for the group g of interest is calculated as an approximation of the integral of the expected value of the estimated outcome Y specific to the group g. Assuming a time interval \mjteqn{\[0,T_g\]}{\[0,T_g\]}{\[0,T_g\]}, the AUC is then calculated as
#' \mjtdeqn{AUC_g = \int_0^{T_g} E(\hat{Y_g})(t) dt}{AUC_g = \int_0^{T_g} E(\hat{Y_g})(t) dt}{AUC_g = \int_0^{T_g} E(\hat{Y_g})(t) dt}
#' Similarly, the normalized AUC (nAUC) for this same group is then defined as
#' \mjtdeqn{nAUC_g = \frac{1}{T_g}\int_0^{T_g} E(\hat{Y_g})(t) dt}{nAUC_g = \frac{1}{T_g}\int_0^{T_g} E(\hat{Y_g})(t) dt}{nAUC_g = \frac{1}{T_g}\int_0^{T_g} E(\hat{Y_g})(t) dt}
#'
#' @return A numerical vector containing the estimation of the AUC (or nAUC) for each group defined in the \code{Groups} vector.
#' @examples
#' \donttest{# Download of data
#' data("HIV_Simu_Dataset_Delta01_cens")
#' data <- HIV_Simu_Dataset_Delta01_cens
#'
#' # Change factors in character vectors
#' data$id <- as.character(data$id) ; data$Group <- as.character(data$Group)
#'
#' # Example 1: We consider the variable \code{MEM_Pol_Group} as the output
#' # of our function \link[AUCcomparison]{MEM_Polynomial_Group_structure}
#' MEM_estimation <- MEM_Polynomial_Group_structure(y=data$VL,x=data$time,Group=data$Group,
#' Id=data$id,Cens=data$cens)
#'
#' time_group1 <- unique(data$time[which(data$Group == "Group1")])
#' time_group2 <- unique(data$time[which(data$Group == "Group2")])
#'
#' # Estimation of the AUC for the two groups defined in the dataset
#' AUC_estimation <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_estimation,
#' time=list(time_group1,time_group2),
#' Groups=unique(data$Group))
#'
#' # Estimation of the AUC only for the group "Group1"
#' AUC_estimation_G1 <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_estimation,
#' time=time_group1,Groups=c("Group1"))
#'
#' # Example 2: We consider results of MEM estimation from another source.
#' # We have to give build the variable 'MEM_Pol_group' with the good structure
#' # We build the variable 'MEM_Pol_group.1' with the results of MEM estimation obtained
#' # for two groups (even if only "Group1" is called in AUC estimation function)
#'
#' MEM_Pol_group.1 <- list(Model_estimation=c(1.077,0.858,-0.061,0.0013,0.887,-0.066,0.0014),
#' Model_features=list(Groups=c("Group1","Group2"),
#' Marginal.dyn.feature=list(dynamic.type="polynomial",
#' intercept=c(global.intercept=TRUE,
#' group.intercept1=FALSE,group.intercept2=FALSE),
#' polynomial.degree=c(3,3))))
#'
#'# We build the variable 'MEM_Pol_group.2' with the results of MEM estimation obtained only for
#'# the group of interest (extraction)
#' MEM_Pol_group.2 <- list(Model_estimation=c(1.077,0.858,-0.061,0.0013),
#' Model_features=list(Groups=c("Group1"),
#' Marginal.dyn.feature=list(dynamic.type="polynomial",
#' intercept=c(global.intercept=TRUE,group.intercept1=FALSE),
#' polynomial.degree=c(3))))
#'
#'# Estimation of the AUC for the group "Group1"
#' time_group1 <- unique(data$time[which(data$Group == "Group1")])
#' AUC_estimation_G1.1 <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_Pol_group.1,
#' time=time_group1,Groups=c("Group1"))
#' AUC_estimation_G1.2 <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_Pol_group.2,
#' time=time_group1)
#'}
#' @seealso
#' \code{\link[splines]{bs}},
#' \code{\link[AUCcomparison]{MEM_Polynomial_Group_structure}}
#' @rdname Group_specific_AUC_estimation
#' @export
#' @importFrom splines bs
Group_specific_AUC_estimation <- function(MEM_Pol_group,time,Groups=NULL,method="trapezoid",Averaged=FALSE){
'%notin%' <- Negate('%in%')
# Step 1: Verification of the type of arguments
# ----- #
Check_argument_Group_specific_AUC(MEM_Pol_group,time,Groups,method,Averaged)
Model_features <- MEM_Pol_group$Model_features
Marginal_dynamics <- Model_features$Marginal.dyn.feature
if(is.null(Groups)){
Groups <- Model_features$Groups
}
if(is.numeric(time)){
time <- lapply(seq(1,length(Groups)),function(g) return(time))
}
# Extraction of population parameters according to their groups
if(is.list(MEM_Pol_group$Model_estimation)){
Population_params <- MEM_Pol_group$Model_estimation$beta
}else{
Population_params <- MEM_Pol_group$Model_estimation
}
MEM_groups <- as.vector(Model_features$Groups)
global_intercept <- Marginal_dynamics$intercept["global.intercept"]
ind_params <- 0
Group_parameters <- list()
for(g in 1:length(MEM_groups)){
params <- NULL
if(global_intercept){
params <- c(params,Population_params[1])
if(g == 1){
ind_params <- ind_params + 1
}
}
if(Marginal_dynamics$dynamic.type == "spline"){
Nb_group_params <- as.numeric(1*Marginal_dynamics$intercept[paste("group.intercept",g,sep="")] +
length(Marginal_dynamics$knots[[MEM_groups[g]]]) + Marginal_dynamics$spline.degree[g])
}else if(Marginal_dynamics$dynamic.type == "polynomial"){
Nb_group_params <- as.numeric(1*Marginal_dynamics$intercept[paste("group.intercept",g,sep="")] +
Marginal_dynamics$polynomial.degree[g])
}
params <- c(params,Population_params[(ind_params+1):(ind_params+Nb_group_params)])
ind_params <- ind_params + Nb_group_params
Group_parameters[[MEM_groups[g]]] <- params
}
# Step 2: Calculation of AUC
# ----- #
Estimated_AUC <- NULL
for(g in 1:length(Groups)){
time_group <- time[[g]]
beta_group <- Group_parameters[[Groups[g]]]
Pop_Covariate <- NULL
if(global_intercept){
Pop_Covariate <- cbind(Pop_Covariate,rep(1,length(time_group)))
}
# Extraction of information about model
if(Marginal_dynamics$dynamic.type == "polynomial"){
# Creation of covariate matrix
Covariate_poly_group <- do.call(cbind,lapply(1*isFALSE(Marginal_dynamics$intercept[paste("group.intercept",g,sep="")]):Marginal_dynamics$polynomial.degree[g],function(d) time_group^d))
Pop_Covariate <- cbind(Pop_Covariate,Covariate_poly_group)
}else if(Marginal_dynamics$dynamic.type == "spline"){
# Creation of covariate matrix
if(is.null(Marginal_dynamics$boundary.knots[[Groups[g]]])){
Covariate_spline_group <- splines::bs(x=time_group,knots=Marginal_dynamics$knots[[Groups[g]]],df=Marginal_dynamics$df[g],degree=Marginal_dynamics$spline.degree[g])
}else{
Covariate_spline_group <- splines::bs(x=time_group,knots=Marginal_dynamics$knots[[Groups[g]]],df=Marginal_dynamics$df[g],degree=Marginal_dynamics$spline.degree[g],Boundary.knots=Marginal_dynamics$boundary.knots[[Groups[g]]])
}
if(Marginal_dynamics$intercept[paste("group.intercept",g,sep="")]){
Covariate_spline_group <- cbind(rep(1,length(time_group)),Covariate_spline_group)
}
Pop_Covariate <- cbind(Pop_Covariate,Covariate_spline_group)
}# End spline covariate
# Estimation of the marginal dynamics
Group_dynamics <- as.numeric(Pop_Covariate %*% beta_group)
# Creation of method time weights (W) vector
time_weights <- AUC_time_weights_estimation(time=time_group,method)
AUC_group <- as.numeric(Group_dynamics %*% time_weights)
Estimated_AUC <- c(Estimated_AUC,AUC_group)
}
names(Estimated_AUC) <- Groups
if(Averaged){
Estimated_nAUC <- sapply(seq(1,length(Groups)),function(g) Estimated_AUC[g]/diff(range(time[[g]])),simplify=TRUE)
Results <- Estimated_nAUC
}else{
Results <- Estimated_AUC
}
return(Results)
}
| /R/Group_specific_AUC_estimation.R | permissive | marie-alexandre/AUCcomparison | R | false | false | 11,773 | r | #' @title Area Under The Curve of Group-Specific Polynomial Marginal Dynamics
#' @description \loadmathjax This function estimates the area under the curve of marginal dynamics modeled by group-structured polynomials or B-spline curves.
#'
#' @param MEM_Pol_group A list with similar structure than the output provided by the function \link[AUCcomparison]{MEM_Polynomial_Group_structure}.
#'
#' A list containing:
#' \itemize{
#' \item \code{Model_estimation}: a list containing at least 2 elements: \enumerate{
#' \item the vector of the marginal (fixed) parameters estimates (at least for the groups whose AUC is to estimate), labeled _'beta'_.
#' \item the variance-covariance matrix of these parameters, labeled _'varFix'_ (see \link[AUCcomparison]{MEM_Polynomial_Group_structure} for details about the parameter order).
#' }
#' \item \code{Model_features}: a list of at least 2 elements: \enumerate{
#' \item \code{Groups}: a vector indicating the names of the groups whose fixed parameters are given.
#' \item \code{Marginal.dyn.feature}: a list summarizing the features of the marginal dynamics defined in the model:
#' \itemize{
#' \item \code{dynamic.type}: a character scalar indicating the chosen type of marginal dynamics. Options are 'polynomial' or 'spline'.
#' \item \code{intercept}: a logical vector summarizing choices about global and group-specific intercepts (Number of groups + 1) elements whose elements are named as ('global.intercept','group.intercept1', ..., 'group.interceptG') if G Groups are defined in \code{MEM_Pol_group}. For each element of the vector, if TRUE, the considered intercept is considered as included in the model (see \emph{Examples}).
#' }
#' If \code{dynamic.type} is defined as 'polynomial':\itemize{
#' \item \code{polynomial.degree}: an integer vector indicating the degree of polynomial functions, one value for each group.
#' }
#' If \code{dynamic.type} is defined as 'spline':\itemize{
#' \item \code{spline.degree}: an integer vector indicating the degree of B-spline curves, one for each group.
#' \item \code{knots}: a list of group-specific internal knots used to build B-spline basis (one numerical vector for each group) (see \link[splines]{bs} for more details).
#' \item \code{df}: a numerical vector of group-specific degrees of freedom used to build B-spline basis, (one for each group).
#' \item \code{boundary.knots}: a list of group-specific boundary knots used to build B-spline basis (one vector for each group) (see \link[splines]{bs} for more details).
#' }
#' }
#' }
#'
#' @param time a numerical vector of time points (x-axis coordinates) or a list of numerical vectors (with as much elements than the number of groups in \code{Groups}).
#' @param Groups a vector indicating the names of the groups belonging to the set of groups involved in \code{MEM_Pol_group} for which we want to estimate the AUC (a subset or the entire set of groups involved in the model can be considered). If NULL (default), the AUC for all the groups involved the MEM is calculated.
#' @param method a character scalar indicating the interpolation method to use to estimate the AUC. Options are 'trapezoid' (default), 'lagrange' and 'spline'. In this version, the 'spline' interpolation is implemented with the "not-a-knot" spline boundary conditions.
#' @param Averaged a logical scalar. If TRUE, the function return the normalized AUC (nAUC) computed as the AUC divided by the range of the time calculation. If FALSE (default), the classic AUC is calculated.
#'
#' @details The area under the curve for the group g of interest is calculated as an approximation of the integral of the expected value of the estimated outcome Y specific to the group g. Assuming a time interval \mjteqn{\[0,T_g\]}{\[0,T_g\]}{\[0,T_g\]}, the AUC is then calculated as
#' \mjtdeqn{AUC_g = \int_0^{T_g} E(\hat{Y_g})(t) dt}{AUC_g = \int_0^{T_g} E(\hat{Y_g})(t) dt}{AUC_g = \int_0^{T_g} E(\hat{Y_g})(t) dt}
#' Similarly, the normalized AUC (nAUC) for this same group is then defined as
#' \mjtdeqn{nAUC_g = \frac{1}{T_g}\int_0^{T_g} E(\hat{Y_g})(t) dt}{nAUC_g = \frac{1}{T_g}\int_0^{T_g} E(\hat{Y_g})(t) dt}{nAUC_g = \frac{1}{T_g}\int_0^{T_g} E(\hat{Y_g})(t) dt}
#'
#' @return A numerical vector containing the estimation of the AUC (or nAUC) for each group defined in the \code{Groups} vector.
#' @examples
#' \donttest{# Download of data
#' data("HIV_Simu_Dataset_Delta01_cens")
#' data <- HIV_Simu_Dataset_Delta01_cens
#'
#' # Change factors in character vectors
#' data$id <- as.character(data$id) ; data$Group <- as.character(data$Group)
#'
#' # Example 1: We consider the variable \code{MEM_Pol_Group} as the output
#' # of our function \link[AUCcomparison]{MEM_Polynomial_Group_structure}
#' MEM_estimation <- MEM_Polynomial_Group_structure(y=data$VL,x=data$time,Group=data$Group,
#' Id=data$id,Cens=data$cens)
#'
#' time_group1 <- unique(data$time[which(data$Group == "Group1")])
#' time_group2 <- unique(data$time[which(data$Group == "Group2")])
#'
#' # Estimation of the AUC for the two groups defined in the dataset
#' AUC_estimation <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_estimation,
#' time=list(time_group1,time_group2),
#' Groups=unique(data$Group))
#'
#' # Estimation of the AUC only for the group "Group1"
#' AUC_estimation_G1 <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_estimation,
#' time=time_group1,Groups=c("Group1"))
#'
#' # Example 2: We consider results of MEM estimation from another source.
#' # We have to give build the variable 'MEM_Pol_group' with the good structure
#' # We build the variable 'MEM_Pol_group.1' with the results of MEM estimation obtained
#' # for two groups (even if only "Group1" is called in AUC estimation function)
#'
#' MEM_Pol_group.1 <- list(Model_estimation=c(1.077,0.858,-0.061,0.0013,0.887,-0.066,0.0014),
#' Model_features=list(Groups=c("Group1","Group2"),
#' Marginal.dyn.feature=list(dynamic.type="polynomial",
#' intercept=c(global.intercept=TRUE,
#' group.intercept1=FALSE,group.intercept2=FALSE),
#' polynomial.degree=c(3,3))))
#'
#'# We build the variable 'MEM_Pol_group.2' with the results of MEM estimation obtained only for
#'# the group of interest (extraction)
#' MEM_Pol_group.2 <- list(Model_estimation=c(1.077,0.858,-0.061,0.0013),
#' Model_features=list(Groups=c("Group1"),
#' Marginal.dyn.feature=list(dynamic.type="polynomial",
#' intercept=c(global.intercept=TRUE,group.intercept1=FALSE),
#' polynomial.degree=c(3))))
#'
#'# Estimation of the AUC for the group "Group1"
#' time_group1 <- unique(data$time[which(data$Group == "Group1")])
#' AUC_estimation_G1.1 <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_Pol_group.1,
#' time=time_group1,Groups=c("Group1"))
#' AUC_estimation_G1.2 <- Group_specific_AUC_estimation(MEM_Pol_group=MEM_Pol_group.2,
#' time=time_group1)
#'}
#' @seealso
#' \code{\link[splines]{bs}},
#' \code{\link[AUCcomparison]{MEM_Polynomial_Group_structure}}
#' @rdname Group_specific_AUC_estimation
#' @export
#' @importFrom splines bs
Group_specific_AUC_estimation <- function(MEM_Pol_group,time,Groups=NULL,method="trapezoid",Averaged=FALSE){
'%notin%' <- Negate('%in%')
# Step 1: Verification of the type of arguments
# ----- #
Check_argument_Group_specific_AUC(MEM_Pol_group,time,Groups,method,Averaged)
Model_features <- MEM_Pol_group$Model_features
Marginal_dynamics <- Model_features$Marginal.dyn.feature
if(is.null(Groups)){
Groups <- Model_features$Groups
}
if(is.numeric(time)){
time <- lapply(seq(1,length(Groups)),function(g) return(time))
}
# Extraction of population parameters according to their groups
if(is.list(MEM_Pol_group$Model_estimation)){
Population_params <- MEM_Pol_group$Model_estimation$beta
}else{
Population_params <- MEM_Pol_group$Model_estimation
}
MEM_groups <- as.vector(Model_features$Groups)
global_intercept <- Marginal_dynamics$intercept["global.intercept"]
ind_params <- 0
Group_parameters <- list()
for(g in 1:length(MEM_groups)){
params <- NULL
if(global_intercept){
params <- c(params,Population_params[1])
if(g == 1){
ind_params <- ind_params + 1
}
}
if(Marginal_dynamics$dynamic.type == "spline"){
Nb_group_params <- as.numeric(1*Marginal_dynamics$intercept[paste("group.intercept",g,sep="")] +
length(Marginal_dynamics$knots[[MEM_groups[g]]]) + Marginal_dynamics$spline.degree[g])
}else if(Marginal_dynamics$dynamic.type == "polynomial"){
Nb_group_params <- as.numeric(1*Marginal_dynamics$intercept[paste("group.intercept",g,sep="")] +
Marginal_dynamics$polynomial.degree[g])
}
params <- c(params,Population_params[(ind_params+1):(ind_params+Nb_group_params)])
ind_params <- ind_params + Nb_group_params
Group_parameters[[MEM_groups[g]]] <- params
}
# Step 2: Calculation of AUC
# ----- #
Estimated_AUC <- NULL
for(g in 1:length(Groups)){
time_group <- time[[g]]
beta_group <- Group_parameters[[Groups[g]]]
Pop_Covariate <- NULL
if(global_intercept){
Pop_Covariate <- cbind(Pop_Covariate,rep(1,length(time_group)))
}
# Extraction of information about model
if(Marginal_dynamics$dynamic.type == "polynomial"){
# Creation of covariate matrix
Covariate_poly_group <- do.call(cbind,lapply(1*isFALSE(Marginal_dynamics$intercept[paste("group.intercept",g,sep="")]):Marginal_dynamics$polynomial.degree[g],function(d) time_group^d))
Pop_Covariate <- cbind(Pop_Covariate,Covariate_poly_group)
}else if(Marginal_dynamics$dynamic.type == "spline"){
# Creation of covariate matrix
if(is.null(Marginal_dynamics$boundary.knots[[Groups[g]]])){
Covariate_spline_group <- splines::bs(x=time_group,knots=Marginal_dynamics$knots[[Groups[g]]],df=Marginal_dynamics$df[g],degree=Marginal_dynamics$spline.degree[g])
}else{
Covariate_spline_group <- splines::bs(x=time_group,knots=Marginal_dynamics$knots[[Groups[g]]],df=Marginal_dynamics$df[g],degree=Marginal_dynamics$spline.degree[g],Boundary.knots=Marginal_dynamics$boundary.knots[[Groups[g]]])
}
if(Marginal_dynamics$intercept[paste("group.intercept",g,sep="")]){
Covariate_spline_group <- cbind(rep(1,length(time_group)),Covariate_spline_group)
}
Pop_Covariate <- cbind(Pop_Covariate,Covariate_spline_group)
}# End spline covariate
# Estimation of the marginal dynamics
Group_dynamics <- as.numeric(Pop_Covariate %*% beta_group)
# Creation of method time weights (W) vector
time_weights <- AUC_time_weights_estimation(time=time_group,method)
AUC_group <- as.numeric(Group_dynamics %*% time_weights)
Estimated_AUC <- c(Estimated_AUC,AUC_group)
}
names(Estimated_AUC) <- Groups
if(Averaged){
Estimated_nAUC <- sapply(seq(1,length(Groups)),function(g) Estimated_AUC[g]/diff(range(time[[g]])),simplify=TRUE)
Results <- Estimated_nAUC
}else{
Results <- Estimated_AUC
}
return(Results)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reqParse.R
\name{parse_crawlerror_sample}
\alias{parse_crawlerror_sample}
\title{Parsing function for \code{\link{list_crawl_error_samples}}}
\usage{
parse_crawlerror_sample(x)
}
\arguments{
\item{x}{req$content from API response}
}
\description{
Parsing function for \code{\link{list_crawl_error_samples}}
}
\seealso{
Other parsing functions: \code{\link{parse_crawlerrors}},
\code{\link{parse_errorsample_url}},
\code{\link{parse_search_analytics}},
\code{\link{parse_sitemaps}}
}
\concept{parsing functions}
\keyword{internal}
| /man/parse_crawlerror_sample.Rd | no_license | cran/searchConsoleR | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reqParse.R
\name{parse_crawlerror_sample}
\alias{parse_crawlerror_sample}
\title{Parsing function for \code{\link{list_crawl_error_samples}}}
\usage{
parse_crawlerror_sample(x)
}
\arguments{
\item{x}{req$content from API response}
}
\description{
Parsing function for \code{\link{list_crawl_error_samples}}
}
\seealso{
Other parsing functions: \code{\link{parse_crawlerrors}},
\code{\link{parse_errorsample_url}},
\code{\link{parse_search_analytics}},
\code{\link{parse_sitemaps}}
}
\concept{parsing functions}
\keyword{internal}
|
library(data.table)
library(RtextminerPkg)
# The following data example originated from
# https://sci2lab.github.io/ml_tutorial/tfidf/
stop_words <- data.frame(
word = c("the", "is", "in", "we", ".", ",")
)
sentence_text <- c(
"The sky is blue.",
"The sun is bright today.",
"The sun in the sky is bright.",
"We can see the shining sun, the bright sun."
)
sentence_id <- c(
"s1",
"s2",
"s3",
"s4"
)
# create a data frame of sentences and their id
sentences_df <- data.frame(
text = sentence_text,
sentence = sentence_id
)
# tokenize sentences to words
datatable_words_dt <- RtextminerPkg::tokenize_text(
x = sentences_df,
stopwords = stop_words$word
) | /demos/tokenize_text_demo_2.R | permissive | deandevl/RtextminerPkg | R | false | false | 680 | r | library(data.table)
library(RtextminerPkg)
# The following data example originated from
# https://sci2lab.github.io/ml_tutorial/tfidf/
stop_words <- data.frame(
word = c("the", "is", "in", "we", ".", ",")
)
sentence_text <- c(
"The sky is blue.",
"The sun is bright today.",
"The sun in the sky is bright.",
"We can see the shining sun, the bright sun."
)
sentence_id <- c(
"s1",
"s2",
"s3",
"s4"
)
# create a data frame of sentences and their id
sentences_df <- data.frame(
text = sentence_text,
sentence = sentence_id
)
# tokenize sentences to words
datatable_words_dt <- RtextminerPkg::tokenize_text(
x = sentences_df,
stopwords = stop_words$word
) |
/PPI.R | no_license | labio-unesp/SantanaGG | R | false | false | 4,607 | r | ||
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 186407
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 186407
c
c Input Parameter (command line, file):
c input filename QBFLIB/Cashmore-Fox-Giunchiglia/Planning-CTE/depots07_9.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2403
c no.of clauses 186407
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 186407
c
c QBFLIB/Cashmore-Fox-Giunchiglia/Planning-CTE/depots07_9.qdimacs 2403 186407 E1 [] 0 3 2400 186407 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Cashmore-Fox-Giunchiglia/Planning-CTE/depots07_9/depots07_9.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 668 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 186407
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 186407
c
c Input Parameter (command line, file):
c input filename QBFLIB/Cashmore-Fox-Giunchiglia/Planning-CTE/depots07_9.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 2403
c no.of clauses 186407
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 186407
c
c QBFLIB/Cashmore-Fox-Giunchiglia/Planning-CTE/depots07_9.qdimacs 2403 186407 E1 [] 0 3 2400 186407 NONE
|
if (!require("pacman")) install.packages("pacman")
pacman::p_load(lubridate,plyr,dplyr,reshape2,devtools,shiny,shinydashboard,dygraphs,DT,shinyjs,tools,data.table,writexl,zoo,readxl
,gmailr,mailR,cronR,miniUI,shinyFiles,ggplot2,stringr,chron,doParallel,foreach,openxlsx,gridExtra,egg,cowplot,corrgram,
factoextra,scales,htmlwidgets,tidyfast,tidyr,kableExtra,janitor,xlsx,FuzzyNumbers,tibble,openair,npregfast)
registerDoParallel(cores=detectCores()-2)
| /r_scripts/load.R | no_license | ricpie/indio | R | false | false | 454 | r | if (!require("pacman")) install.packages("pacman")
pacman::p_load(lubridate,plyr,dplyr,reshape2,devtools,shiny,shinydashboard,dygraphs,DT,shinyjs,tools,data.table,writexl,zoo,readxl
,gmailr,mailR,cronR,miniUI,shinyFiles,ggplot2,stringr,chron,doParallel,foreach,openxlsx,gridExtra,egg,cowplot,corrgram,
factoextra,scales,htmlwidgets,tidyfast,tidyr,kableExtra,janitor,xlsx,FuzzyNumbers,tibble,openair,npregfast)
registerDoParallel(cores=detectCores()-2)
|
ScalesInput <- function(id) {
ns <- NS(id)
tagList(
# shiny::splitLayout(
# cellWidths = c("20%", "80%"),
# shiny::helpText("theme:"),
# shiny::selectInput(ns("theme"), label = NULL,
# choices = c("theme_gray", "theme_bw", "theme_light",
# "theme_dark", "theme_minimal", "theme_void"),
# selected = CONST_DEFAULT_THEME)
# ),
# shiny::splitLayout(
# cellWidths = c("20%", "80%"),
# shiny::helpText("base size:"),
# shiny::sliderInput(ns("base_size"), label = NULL,
# min = 5, max = 20, step = 1,
# value = CONST_DEFAULT_THEME_BASE_SIZE)
# )
)
}
Scales <- function(input, output, session) {
ns <- session$ns
values <- reactiveValues()
observe({
# values$theme <- input$theme
})
return(values)
}
| /R/module-input-scales.R | no_license | wcmbishop/gogoplot | R | false | false | 905 | r |
ScalesInput <- function(id) {
ns <- NS(id)
tagList(
# shiny::splitLayout(
# cellWidths = c("20%", "80%"),
# shiny::helpText("theme:"),
# shiny::selectInput(ns("theme"), label = NULL,
# choices = c("theme_gray", "theme_bw", "theme_light",
# "theme_dark", "theme_minimal", "theme_void"),
# selected = CONST_DEFAULT_THEME)
# ),
# shiny::splitLayout(
# cellWidths = c("20%", "80%"),
# shiny::helpText("base size:"),
# shiny::sliderInput(ns("base_size"), label = NULL,
# min = 5, max = 20, step = 1,
# value = CONST_DEFAULT_THEME_BASE_SIZE)
# )
)
}
Scales <- function(input, output, session) {
ns <- session$ns
values <- reactiveValues()
observe({
# values$theme <- input$theme
})
return(values)
}
|
testlist <- list(hi = 4.66726145839586e-62, lo = 4.66726145839584e-62, mu = 4.66726145839586e-62, sig = 4.66726145839586e-62)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610046760-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 187 | r | testlist <- list(hi = 4.66726145839586e-62, lo = 4.66726145839584e-62, mu = 4.66726145839586e-62, sig = 4.66726145839586e-62)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
# TOOL gatk4-mutect2-call-snv-and-indels.R: "GATK4 -Call somatic SNVs and INDELs with Mutect2" (Call somatic short variants via local assembly of haplotypes. Short variants include single nucleotide (SNV\) and insertion and deletion (indel\) variants. Tool is based on GATK4 Mutect2 tool.)
# INPUT tumor.bam: "Tumor BAM file" TYPE BAM
# INPUT OPTIONAL normal.bam: "Normal BAM file" TYPE BAM
# INPUT OPTIONAL reference: "Reference genome FASTA" TYPE GENERIC
# INPUT OPTIONAL germline_resource.vcf: "Germline resource VCF" TYPE GENERIC
# INPUT OPTIONAL normal_panel.vcf: "Panel of Normals" TYPE GENERIC
# INPUT OPTIONAL gatk_interval.list: "Intervals list" TYPE GENERIC
# OUTPUT OPTIONAL mutect2.vcf
# OUTPUT OPTIONAL gatk_log.txt
# OUTPUT OPTIONAL mutect2.bam
# PARAMETER organism: "Reference sequence" TYPE [other, "FILES genomes/fasta .fa"] DEFAULT other (Reference sequence.)
# PARAMETER chr: "Chromosome names in my BAM file look like" TYPE [chr1, 1] DEFAULT 1 (Chromosome names must match in the BAM file and in the reference sequence. Check your BAM and choose accordingly. This only applies to provided reference genomes.)
# PARAMETER tumor: "Tumor sample name" TYPE STRING (BAM sample name of tumor.)
# PARAMETER OPTIONAL normal: "Normal sample name" TYPE STRING (BAM sample name of normal.)
# PARAMETER OPTIONAL gatk.interval: "Genomic intervals" TYPE STRING (One or more genomic intervals over which to operate. Format chromosome:begin-end, e.g. 20:10,000,000-10,200,000)
# PARAMETER OPTIONAL gatk.padding: "Interval padding" TYPE INTEGER DEFAULT 0 (Amount of padding in bp to add to each interval.)
# PARAMETER OPTIONAL gatk.bamout: "Output assembled haplotypes as BAM" TYPE [yes, no] DEFAULT no (Output assembled haplotypes as BAM.)
## PARAMETER gatk.afofalleles: "Allele fraction of alleles not in germline resource" TYPE DECIMAL DEFAULT -1 (Population allele fraction assigned to alleles not found in germline resource. Only applicable if germline resource file is provided. -1 = use default value. Default for case-only calling is 5e-8 and for matched-control calling 1e-5.)
source(file.path(chipster.common.path, "gatk-utils.R"))
source(file.path(chipster.common.path, "tool-utils.R"))
source(file.path(chipster.common.path, "zip-utils.R"))
# read input names
inputnames <- read_input_definitions()
# binaries
gatk.binary <- c(file.path(chipster.tools.path, "GATK4", "gatk"))
samtools.binary <- c(file.path(chipster.tools.path, "samtools", "samtools"))
# If user provided fasta we use it, else use internal fasta
if (organism == "other"){
# If user has provided a FASTA, we use it
if (file.exists("reference")){
unzipIfGZipFile("reference")
file.rename("reference", "reference.fasta")
}else{
stop(paste('CHIPSTER-NOTE: ', "You need to provide a FASTA file or choose one of the provided reference genomes."))
}
}else{
# If not, we use the internal one.
internal.fa <- file.path(chipster.tools.path, "genomes", "fasta", paste(organism,".fa",sep="",collapse=""))
# If chromosome names in BAM have chr, we make a temporary copy of fasta with chr names, otherwise we use it as is.
if(chr == "chr1"){
source(file.path(chipster.common.path, "seq-utils.R"))
addChrToFasta(internal.fa, "reference.fasta")
}else{
file.copy(internal.fa, "reference.fasta")
}
}
options <- ""
# Pre-process and add input files
#
# FASTA
formatGatkFasta("reference.fasta")
system("mv reference.fasta.dict reference.dict")
options <- paste(options, "-R reference.fasta")
# BAM file(s)
system(paste(samtools.binary, "index tumor.bam > tumor.bam.bai"))
options <- paste(options, "-I tumor.bam", "--tumor", tumor)
if (fileOk("normal.bam")){
system(paste(samtools.binary, "index normal.bam > normal.bam.bai"))
options <- paste(options, "-I normal.bam", "--normal", normal)
}
# VCF files. These need to bgzip compressed and tabix indexed
if (fileOk("germline_resource.vcf")){
formatGatkVcf("germline_resource.vcf")
options <- paste(options, "--germline-resource germline_resource.vcf.gz")
}
if (fileOk("normal_panel.vcf")){
formatGatkVcf("normal_panel.vcf")
options <- paste(options, "--panel-of-normals normal_panel.vcf.gz")
}
if (fileOk("gatk_interval.list")){
# Interval list file handling is based on file name, so we need to use the original name
interval_list_name <- inputnames$gatk_interval.list
system(paste("mv gatk_interval.list", interval_list_name))
#unzipIfGZipFile("gatk.interval_list")
options <- paste(options, "-L", interval_list_name)
}
# Add other options
if (nchar(gatk.interval) > 0 ){
options <- paste(options, "-L", gatk.interval)
if (gatk.padding > 0){
options <- paste(options, "-ip", gatk.padding)
}
}
if (gatk.bamout == "yes"){
options <- paste(options, "-bamout mutect2.bam")
}
# Command
command <- paste(gatk.binary, "Mutect2", "-O mutect2.vcf", options)
# Capture stderr
command <- paste(command, "2>> error.txt")
# Run command
system(command)
# Return error message if no result
if (fileNotOk("mutect2.vcf")){
system("ls -l >> error.txt")
system("mv error.txt gatk_log.txt")
}
# Output names
basename <- strip_name(inputnames$tumor.bam)
# Make a matrix of output names
outputnames <- matrix(NA, nrow=2, ncol=2)
outputnames[1,] <- c("mutect2.bam", paste(basename, "_mutect2.bam", sep=""))
outputnames[2,] <- c("mutect2.vcf", paste(basename, "_mutect2.vcf", sep=""))
# Write output definitions file
write_output_definitions(outputnames)
| /tools/ngs/R/gatk4-mutect2-call-snv-and-indels.R | permissive | edwardtao/chipster-tools | R | false | false | 5,537 | r | # TOOL gatk4-mutect2-call-snv-and-indels.R: "GATK4 -Call somatic SNVs and INDELs with Mutect2" (Call somatic short variants via local assembly of haplotypes. Short variants include single nucleotide (SNV\) and insertion and deletion (indel\) variants. Tool is based on GATK4 Mutect2 tool.)
# INPUT tumor.bam: "Tumor BAM file" TYPE BAM
# INPUT OPTIONAL normal.bam: "Normal BAM file" TYPE BAM
# INPUT OPTIONAL reference: "Reference genome FASTA" TYPE GENERIC
# INPUT OPTIONAL germline_resource.vcf: "Germline resource VCF" TYPE GENERIC
# INPUT OPTIONAL normal_panel.vcf: "Panel of Normals" TYPE GENERIC
# INPUT OPTIONAL gatk_interval.list: "Intervals list" TYPE GENERIC
# OUTPUT OPTIONAL mutect2.vcf
# OUTPUT OPTIONAL gatk_log.txt
# OUTPUT OPTIONAL mutect2.bam
# PARAMETER organism: "Reference sequence" TYPE [other, "FILES genomes/fasta .fa"] DEFAULT other (Reference sequence.)
# PARAMETER chr: "Chromosome names in my BAM file look like" TYPE [chr1, 1] DEFAULT 1 (Chromosome names must match in the BAM file and in the reference sequence. Check your BAM and choose accordingly. This only applies to provided reference genomes.)
# PARAMETER tumor: "Tumor sample name" TYPE STRING (BAM sample name of tumor.)
# PARAMETER OPTIONAL normal: "Normal sample name" TYPE STRING (BAM sample name of normal.)
# PARAMETER OPTIONAL gatk.interval: "Genomic intervals" TYPE STRING (One or more genomic intervals over which to operate. Format chromosome:begin-end, e.g. 20:10,000,000-10,200,000)
# PARAMETER OPTIONAL gatk.padding: "Interval padding" TYPE INTEGER DEFAULT 0 (Amount of padding in bp to add to each interval.)
# PARAMETER OPTIONAL gatk.bamout: "Output assembled haplotypes as BAM" TYPE [yes, no] DEFAULT no (Output assembled haplotypes as BAM.)
## PARAMETER gatk.afofalleles: "Allele fraction of alleles not in germline resource" TYPE DECIMAL DEFAULT -1 (Population allele fraction assigned to alleles not found in germline resource. Only applicable if germline resource file is provided. -1 = use default value. Default for case-only calling is 5e-8 and for matched-control calling 1e-5.)
source(file.path(chipster.common.path, "gatk-utils.R"))
source(file.path(chipster.common.path, "tool-utils.R"))
source(file.path(chipster.common.path, "zip-utils.R"))
# read input names
inputnames <- read_input_definitions()
# binaries
gatk.binary <- c(file.path(chipster.tools.path, "GATK4", "gatk"))
samtools.binary <- c(file.path(chipster.tools.path, "samtools", "samtools"))
# If user provided fasta we use it, else use internal fasta
if (organism == "other"){
# If user has provided a FASTA, we use it
if (file.exists("reference")){
unzipIfGZipFile("reference")
file.rename("reference", "reference.fasta")
}else{
stop(paste('CHIPSTER-NOTE: ', "You need to provide a FASTA file or choose one of the provided reference genomes."))
}
}else{
# If not, we use the internal one.
internal.fa <- file.path(chipster.tools.path, "genomes", "fasta", paste(organism,".fa",sep="",collapse=""))
# If chromosome names in BAM have chr, we make a temporary copy of fasta with chr names, otherwise we use it as is.
if(chr == "chr1"){
source(file.path(chipster.common.path, "seq-utils.R"))
addChrToFasta(internal.fa, "reference.fasta")
}else{
file.copy(internal.fa, "reference.fasta")
}
}
options <- ""
# Pre-process and add input files
#
# FASTA
formatGatkFasta("reference.fasta")
system("mv reference.fasta.dict reference.dict")
options <- paste(options, "-R reference.fasta")
# BAM file(s)
system(paste(samtools.binary, "index tumor.bam > tumor.bam.bai"))
options <- paste(options, "-I tumor.bam", "--tumor", tumor)
if (fileOk("normal.bam")){
system(paste(samtools.binary, "index normal.bam > normal.bam.bai"))
options <- paste(options, "-I normal.bam", "--normal", normal)
}
# VCF files. These need to bgzip compressed and tabix indexed
if (fileOk("germline_resource.vcf")){
formatGatkVcf("germline_resource.vcf")
options <- paste(options, "--germline-resource germline_resource.vcf.gz")
}
if (fileOk("normal_panel.vcf")){
formatGatkVcf("normal_panel.vcf")
options <- paste(options, "--panel-of-normals normal_panel.vcf.gz")
}
if (fileOk("gatk_interval.list")){
# Interval list file handling is based on file name, so we need to use the original name
interval_list_name <- inputnames$gatk_interval.list
system(paste("mv gatk_interval.list", interval_list_name))
#unzipIfGZipFile("gatk.interval_list")
options <- paste(options, "-L", interval_list_name)
}
# Add other options
if (nchar(gatk.interval) > 0 ){
options <- paste(options, "-L", gatk.interval)
if (gatk.padding > 0){
options <- paste(options, "-ip", gatk.padding)
}
}
if (gatk.bamout == "yes"){
options <- paste(options, "-bamout mutect2.bam")
}
# Command
command <- paste(gatk.binary, "Mutect2", "-O mutect2.vcf", options)
# Capture stderr
command <- paste(command, "2>> error.txt")
# Run command
system(command)
# Return error message if no result
if (fileNotOk("mutect2.vcf")){
system("ls -l >> error.txt")
system("mv error.txt gatk_log.txt")
}
# Output names
basename <- strip_name(inputnames$tumor.bam)
# Make a matrix of output names
outputnames <- matrix(NA, nrow=2, ncol=2)
outputnames[1,] <- c("mutect2.bam", paste(basename, "_mutect2.bam", sep=""))
outputnames[2,] <- c("mutect2.vcf", paste(basename, "_mutect2.vcf", sep=""))
# Write output definitions file
write_output_definitions(outputnames)
|
ggplot(data=as.data.frame(df), aes(x=date, y=sec, color=phonenumber)) +
geom_tile(aes(fill = phonenumber)) +
geom_text(aes(label = sec), color = "white") | /src/analyse_phone/plot_speed_of_answer.R | no_license | enyuka/rlang_mokumoku | R | false | false | 157 | r | ggplot(data=as.data.frame(df), aes(x=date, y=sec, color=phonenumber)) +
geom_tile(aes(fill = phonenumber)) +
geom_text(aes(label = sec), color = "white") |
#### LIBS ####
library(here)
library(tidyverse)
source(here("postprocessing/charting_functions.R"))
predictions_dnn_1 <-
read.csv2(
here(
"postprocessing/performance_metrics/predictions/test_predictions_dnn_1.csv"
),
sep = ",",
dec = "."
)
predictions_rf_2 <-
readRDS(
here(
"postprocessing/performance_metrics/predictions/test_predictions_rf_2.RDS"
)
)
predictions_gravity_2 <-
readRDS(
here(
"postprocessing/performance_metrics/predictions/test_predictions_gravity_2.RDS"
)
)
predictions_actual <-
data.frame(
prediction = predictions_dnn_1$actual,
actual = predictions_dnn_1$actual,
Model = "Actual"
)
color_scale <-
c(
"Gravity-2" = "#FD635E",
"RF-2" = "#02B8AA",
"DNN-1" = "#303636",
"Actual" = "#9999CC"
)
p <-
plot_three_model_predictions(
predictions_gravity_2,
predictions_rf_2,
predictions_dnn_1,
predictions_actual,
color_scale
)
p
ggsave("postprocessing/charts/best_models_plot.pdf", p)
| /postprocessing/overall/plot_best_models.R | no_license | coelhosilva/it213_research | R | false | false | 1,025 | r | #### LIBS ####
library(here)
library(tidyverse)
source(here("postprocessing/charting_functions.R"))
predictions_dnn_1 <-
read.csv2(
here(
"postprocessing/performance_metrics/predictions/test_predictions_dnn_1.csv"
),
sep = ",",
dec = "."
)
predictions_rf_2 <-
readRDS(
here(
"postprocessing/performance_metrics/predictions/test_predictions_rf_2.RDS"
)
)
predictions_gravity_2 <-
readRDS(
here(
"postprocessing/performance_metrics/predictions/test_predictions_gravity_2.RDS"
)
)
predictions_actual <-
data.frame(
prediction = predictions_dnn_1$actual,
actual = predictions_dnn_1$actual,
Model = "Actual"
)
color_scale <-
c(
"Gravity-2" = "#FD635E",
"RF-2" = "#02B8AA",
"DNN-1" = "#303636",
"Actual" = "#9999CC"
)
p <-
plot_three_model_predictions(
predictions_gravity_2,
predictions_rf_2,
predictions_dnn_1,
predictions_actual,
color_scale
)
p
ggsave("postprocessing/charts/best_models_plot.pdf", p)
|
context("optimize_bigM")
library(RADstackshelpR)
test_that("optimize_bigM generates output of the appropriate class (list)", {
#find data in local directory
#opt.m<-optimize_bigM(M1 = "~/Desktop/RADstackshelpR/inst/extdata/bigM1.vcf.gz")
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list"
expect_is(opt.m, "list" )
})
test_that("optimize_bigM generates a list with length of 5", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list"
expect_equal(length(opt.m), 4)
})
test_that("optimize_bigM generates an error if run with a non-vcf file", {
#expect error trying to read this vector as a vcf file
expect_error(optimize_bigM(M1 = system.file("extdata", "denovo.stacks.pipeline.sh", package = "RADstackshelpR"))
)
})
test_that("optimize_bigM generates a list with the appropriate names", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list" with appropriately named components
expect_equal(names(opt.m)[1], "snp")
expect_equal(names(opt.m)[2], "loci")
expect_equal(names(opt.m)[3], "snp.R80")
expect_equal(names(opt.m)[4], "loci.R80")
})
test_that("optimize_bigM generates a list with each object inside being a dataframe", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list", with each object inside being a "data.frame" object
for (i in length(opt.m)){
expect_is(opt.m[[i]], "data.frame")
}
})
test_that("optimize_bigM generates dataframes with appropriate dimensions when all slots are filled", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"),
M2 = system.file("extdata", "bigM2.vcf.gz", package = "RADstackshelpR"),
M3 = system.file("extdata", "bigM3.vcf.gz", package = "RADstackshelpR"),
M4 = system.file("extdata", "bigM4.vcf.gz", package = "RADstackshelpR"),
M5 = system.file("extdata", "bigM5.vcf.gz", package = "RADstackshelpR"),
M6 = system.file("extdata", "bigM6.vcf.gz", package = "RADstackshelpR"),
M7 = system.file("extdata", "bigM7.vcf.gz", package = "RADstackshelpR"),
M8 = system.file("extdata", "bigM8.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list", with a 25 row data.frame as the first object when all slots are filled
expect_equal(nrow(opt.m[[1]]), 152)
})
| /tests/testthat/test-optimize_bigM.R | no_license | cran/RADstackshelpR | R | false | false | 3,056 | r | context("optimize_bigM")
library(RADstackshelpR)
test_that("optimize_bigM generates output of the appropriate class (list)", {
#find data in local directory
#opt.m<-optimize_bigM(M1 = "~/Desktop/RADstackshelpR/inst/extdata/bigM1.vcf.gz")
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list"
expect_is(opt.m, "list" )
})
test_that("optimize_bigM generates a list with length of 5", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list"
expect_equal(length(opt.m), 4)
})
test_that("optimize_bigM generates an error if run with a non-vcf file", {
#expect error trying to read this vector as a vcf file
expect_error(optimize_bigM(M1 = system.file("extdata", "denovo.stacks.pipeline.sh", package = "RADstackshelpR"))
)
})
test_that("optimize_bigM generates a list with the appropriate names", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list" with appropriately named components
expect_equal(names(opt.m)[1], "snp")
expect_equal(names(opt.m)[2], "loci")
expect_equal(names(opt.m)[3], "snp.R80")
expect_equal(names(opt.m)[4], "loci.R80")
})
test_that("optimize_bigM generates a list with each object inside being a dataframe", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list", with each object inside being a "data.frame" object
for (i in length(opt.m)){
expect_is(opt.m[[i]], "data.frame")
}
})
test_that("optimize_bigM generates dataframes with appropriate dimensions when all slots are filled", {
#find data in package using CRAN friendly syntax
opt.m<- optimize_bigM(M1 = system.file("extdata", "bigM1.vcf.gz", package = "RADstackshelpR"),
M2 = system.file("extdata", "bigM2.vcf.gz", package = "RADstackshelpR"),
M3 = system.file("extdata", "bigM3.vcf.gz", package = "RADstackshelpR"),
M4 = system.file("extdata", "bigM4.vcf.gz", package = "RADstackshelpR"),
M5 = system.file("extdata", "bigM5.vcf.gz", package = "RADstackshelpR"),
M6 = system.file("extdata", "bigM6.vcf.gz", package = "RADstackshelpR"),
M7 = system.file("extdata", "bigM7.vcf.gz", package = "RADstackshelpR"),
M8 = system.file("extdata", "bigM8.vcf.gz", package = "RADstackshelpR"))
#test that optimize_bigM returns an object of class "list", with a 25 row data.frame as the first object when all slots are filled
expect_equal(nrow(opt.m[[1]]), 152)
})
|
bws2.dataset.marginal <-
function(
data,
id,
response,
choice.sets,
attribute.levels,
type,
base.attribute,
base.level,
reverse)
{
# set variables
### added v 0.2-0 below ------------------------------------------------------
## delete.best
if (type == "sequential") {
delete.best <- TRUE
} else {
delete.best <- FALSE
}
## effect ver
effect <- base.level
## attribute.variables
if (isTRUE(reverse)) {
attribute.variables <- "reverse"
} else {
attribute.variables <- "constant"
}
### added v 0.2-0 above ------------------------------------------------------
## respondent dataset
if (!is.null(data)) {
resp.data <- data ## modified ver 0.2-0
colnames(resp.data)[which(colnames(resp.data) == id)] <- "ID"
}
## attributes and their levels
attr.lev <- attribute.levels
## number of attributes
num.attr <- length(attr.lev)
## number of levels in each attribute
num.lev <- sapply(attr.lev, length)
## number of questions (scenarios)
num.ques <- nrow(choice.sets)
## attribute.variables
attr.var <- names(attr.lev)
## level variables
lev.var <- unlist(attr.lev)
## change level values in choice sets (serial number starting from 1)
temp <- matrix(data = c(0, cumsum(num.lev)[-num.attr]),
nrow = num.ques, ncol = num.attr, byrow = TRUE)
choice.set.serial <- choice.sets + temp
## level variables without the reference level in each attribute
original.attr.lev <- attr.lev
if (!is.null(effect)){
for (i in attr.var) {
attr.lev[[i]] <- attr.lev[[i]][attr.lev[[i]] != effect[[i]]]
attr.lev[[i]] <- c(attr.lev[[i]], effect[[i]])
}
lev.var.wo.ref <- unlist(attr.lev)[-cumsum(num.lev)]
} else {
lev.var.wo.ref <- unlist(attr.lev)
}
# creat a design matrix
des.mat <- matrix(0L, nrow = 2 * num.attr * num.ques,
ncol = 7 + num.attr + length(lev.var.wo.ref))
des.mat <- data.frame(des.mat)
colnames(des.mat) <-
c("Q", # question number
"ALT", # attribute number in each question
"BW", # best and worst indicator (1 = best, -1 = worst)
"ATT.cha", "ATT", # attribute variables (AT.cha: charactor, AT: integer)
"LEV.cha", "LEV", # level variables (LV.cha: charactors, LV: integer)
attr.var, # attribute variables
lev.var.wo.ref) # level variables
## create "Q" variable: serial number starting from 1
des.mat[, 1] <- rep(1:num.ques, each = 2 * num.attr)
## create "ALT" variable: serial number starting from 1
des.mat[, 2] <- rep(1:num.attr, times = 2 * num.ques)
## create "BW" variable
des.mat[, 3] <- rep(c(rep(1, times = num.attr), rep(-1, times = num.attr)),
times = num.ques)
## create ATT.cha and ATT variables
des.mat[, 4] <- rep(attr.var, times = 2 * num.ques)
des.mat[, 5] <- rep(1:num.attr, times = 2 * num.ques)
## create LEV.cha and LEV variables
choice.sets.cha <- choice.sets
for (i in 1:num.attr){
choice.sets.cha[, i] <- original.attr.lev[[i]][choice.sets[, i]]
# Using attr.lev[[i]] is not appropriate because bese.level may be changed
}
des.mat[, 6] <- as.vector(t(cbind(choice.sets.cha, choice.sets.cha)))
des.mat[, 7] <- as.vector(t(cbind(choice.sets, choice.sets)))
## create attribute variables
### added v 0.2-0 below ------------------------------------------------------
ATTR <- factor(des.mat[, 4], levels = attr.var)
temp <- model.matrix(~ ATTR - 1)
colnames(temp) <- substring(text = colnames(temp), first = 5)
### effect coding
if (!is.null(base.attribute)) {
rows2ref <- temp[, base.attribute] == 1
temp[rows2ref, ] <- -1
}
### added v 0.2-0 above ------------------------------------------------------
if (isTRUE(attribute.variables == "reverse")) {
temp <- temp * des.mat[, "BW"]
}
storage.mode(temp) <- "integer"
des.mat[, 8:(7 + num.attr)] <- temp
## create level variables
if (!is.null(effect)) {
### effect coding
best.lev <- mapply(contr.sum, num.lev, SIMPLIFY = FALSE)
for (i in attr.var) {
rownames(best.lev[[i]]) <- attr.lev[[i]]
}
for (i in 1:nrow(des.mat)) {
y <- attr.lev[[des.mat[i, "ATT.cha"]]][-num.lev[des.mat[i, "ATT.cha"]]]
des.mat[i, y] <-
as.integer(best.lev[[des.mat[i, "ATT.cha"]]][des.mat[i, "LEV.cha"], ] *
des.mat[i, "BW"])
# des.mat[i, "BW"] is used to multiply level variables by -1
}
} else {
### dummy coding
temp <- model.matrix(~ factor(des.mat[, 6], levels = lev.var.wo.ref) - 1)
temp <- temp * des.mat[, "BW"]
storage.mode(temp) <- "integer"
des.mat[, (8 + num.attr):(7 + num.attr + length(lev.var.wo.ref))] <- temp
}
## calculate frequency of each level
temp <- table(subset(des.mat, des.mat$BW == 1, select = "LEV.cha"))
freq.levels <- as.vector(temp)
names(freq.levels) <- names(temp)
freq.levels <- freq.levels[lev.var]
### added ver 0.2-0 below -----------------------------------------------------
if (!is.null(base.attribute)) {
delete.column.ref <- colnames(des.mat) != base.attribute
des.mat <- des.mat[, delete.column.ref]
}
### added ver 0.2-0 above -----------------------------------------------------
## store design matrix
design.matrix <- des.mat
# return design matrix
if (is.null(data)) {
return(des.mat)
}
# create respondent dataset
## extract the names of respondents' characteristic variables
respondent.characteristics <-
colnames(resp.data)[!(colnames(resp.data) %in% c("ID", response))]
## reshape the dataset into long format
resp.data.long <- reshape(resp.data,
idvar = "ID",
varying = response,
sep = "",
direction = "long")
temp <- which(colnames(resp.data.long) == "time")
storage.mode(resp.data.long$time) <- "integer"
colnames(resp.data.long)[temp:(temp + 2)] <- c("Q", "RES.B", "RES.W")
## expand respondent dataset according to possible pairs in each BWS question
temp <- data.frame(
ID = rep(resp.data$ID, each = 2 * num.attr * num.ques),
Q = rep(1:num.ques, each = 2 * num.attr),
ALT = rep(1:num.attr, times = 2 * num.ques),
BW = rep(c(rep(1, times = num.attr), rep(-1, times = num.attr)),
times = num.ques))
exp.resp.data <- merge(temp, resp.data.long, by = c("ID", "Q"))
exp.resp.data <- exp.resp.data[order(exp.resp.data$ID,
exp.resp.data$Q,
-1 * exp.resp.data$BW,
exp.resp.data$ALT), ]
# create dataset for discrete choice models
dataset <- merge(exp.resp.data, des.mat, by = c("Q", "BW", "ALT"))
dataset$RES <- (dataset$RES.B == dataset$ALT) * (dataset$BW == 1) +
(dataset$RES.W == dataset$ALT) * (dataset$BW == -1)
dataset$STR <- dataset$ID * 1000 + dataset$Q * 10 + (dataset$BW == 1) +
(dataset$BW == -1) * 2
dataset <- dataset[order(dataset$STR, dataset$ALT), ]
if (delete.best == TRUE) {
select <- !(dataset$BW == -1 & dataset$ALT == dataset$RES.B)
dataset <- dataset[select,]
}
row.names(dataset) <- NULL
# change order of variables
### added ver 0.2-0 below -----------------------------------------------------
if (!is.null(base.attribute)) {
attr.var <- attr.var[attr.var != base.attribute]
}
### added ver 0.2-0 above -----------------------------------------------------
covariate.names <- colnames(resp.data)
covariate.names <-
covariate.names[!covariate.names %in% c("ID", response)]
dataset <- dataset[, c("ID", "Q", "ALT", "BW",
"ATT.cha" ,"ATT", "LEV.cha", "LEV",
attr.var, lev.var.wo.ref,
"RES.B", "RES.W", "RES", "STR", covariate.names)]
# change name of id variable
colnames(dataset)[which(colnames(dataset) == "ID")] <- id
# set attributes
attributes(dataset)$id <- id
attributes(dataset)$response <- response
attributes(dataset)$choice.sets <- choice.sets
attributes(dataset)$attribute.levels <- attribute.levels
attributes(dataset)$reverse <- reverse
attributes(dataset)$base.attribute <- base.attribute
attributes(dataset)$base.level <- base.level
attributes(dataset)$attribute.variables <- attribute.variables
attributes(dataset)$effect <- effect
attributes(dataset)$delete.best <- delete.best
attributes(dataset)$type <- type
attributes(dataset)$design.matrix <- design.matrix
attributes(dataset)$lev.var.wo.ref <- lev.var.wo.ref
attributes(dataset)$freq.levels <- freq.levels
attributes(dataset)$respondent.characteristics <- respondent.characteristics
# set S3 class bws2dataset
class(dataset) <- c("bws2dataset", "data.frame")
# return dataset
return(data = dataset)
}
| /R/bws2.dataset.marginal.R | no_license | cran/support.BWS2 | R | false | false | 9,019 | r | bws2.dataset.marginal <-
function(
data,
id,
response,
choice.sets,
attribute.levels,
type,
base.attribute,
base.level,
reverse)
{
# set variables
### added v 0.2-0 below ------------------------------------------------------
## delete.best
if (type == "sequential") {
delete.best <- TRUE
} else {
delete.best <- FALSE
}
## effect ver
effect <- base.level
## attribute.variables
if (isTRUE(reverse)) {
attribute.variables <- "reverse"
} else {
attribute.variables <- "constant"
}
### added v 0.2-0 above ------------------------------------------------------
## respondent dataset
if (!is.null(data)) {
resp.data <- data ## modified ver 0.2-0
colnames(resp.data)[which(colnames(resp.data) == id)] <- "ID"
}
## attributes and their levels
attr.lev <- attribute.levels
## number of attributes
num.attr <- length(attr.lev)
## number of levels in each attribute
num.lev <- sapply(attr.lev, length)
## number of questions (scenarios)
num.ques <- nrow(choice.sets)
## attribute.variables
attr.var <- names(attr.lev)
## level variables
lev.var <- unlist(attr.lev)
## change level values in choice sets (serial number starting from 1)
temp <- matrix(data = c(0, cumsum(num.lev)[-num.attr]),
nrow = num.ques, ncol = num.attr, byrow = TRUE)
choice.set.serial <- choice.sets + temp
## level variables without the reference level in each attribute
original.attr.lev <- attr.lev
if (!is.null(effect)){
for (i in attr.var) {
attr.lev[[i]] <- attr.lev[[i]][attr.lev[[i]] != effect[[i]]]
attr.lev[[i]] <- c(attr.lev[[i]], effect[[i]])
}
lev.var.wo.ref <- unlist(attr.lev)[-cumsum(num.lev)]
} else {
lev.var.wo.ref <- unlist(attr.lev)
}
# creat a design matrix
des.mat <- matrix(0L, nrow = 2 * num.attr * num.ques,
ncol = 7 + num.attr + length(lev.var.wo.ref))
des.mat <- data.frame(des.mat)
colnames(des.mat) <-
c("Q", # question number
"ALT", # attribute number in each question
"BW", # best and worst indicator (1 = best, -1 = worst)
"ATT.cha", "ATT", # attribute variables (AT.cha: charactor, AT: integer)
"LEV.cha", "LEV", # level variables (LV.cha: charactors, LV: integer)
attr.var, # attribute variables
lev.var.wo.ref) # level variables
## create "Q" variable: serial number starting from 1
des.mat[, 1] <- rep(1:num.ques, each = 2 * num.attr)
## create "ALT" variable: serial number starting from 1
des.mat[, 2] <- rep(1:num.attr, times = 2 * num.ques)
## create "BW" variable
des.mat[, 3] <- rep(c(rep(1, times = num.attr), rep(-1, times = num.attr)),
times = num.ques)
## create ATT.cha and ATT variables
des.mat[, 4] <- rep(attr.var, times = 2 * num.ques)
des.mat[, 5] <- rep(1:num.attr, times = 2 * num.ques)
## create LEV.cha and LEV variables
choice.sets.cha <- choice.sets
for (i in 1:num.attr){
choice.sets.cha[, i] <- original.attr.lev[[i]][choice.sets[, i]]
# Using attr.lev[[i]] is not appropriate because bese.level may be changed
}
des.mat[, 6] <- as.vector(t(cbind(choice.sets.cha, choice.sets.cha)))
des.mat[, 7] <- as.vector(t(cbind(choice.sets, choice.sets)))
## create attribute variables
### added v 0.2-0 below ------------------------------------------------------
ATTR <- factor(des.mat[, 4], levels = attr.var)
temp <- model.matrix(~ ATTR - 1)
colnames(temp) <- substring(text = colnames(temp), first = 5)
### effect coding
if (!is.null(base.attribute)) {
rows2ref <- temp[, base.attribute] == 1
temp[rows2ref, ] <- -1
}
### added v 0.2-0 above ------------------------------------------------------
if (isTRUE(attribute.variables == "reverse")) {
temp <- temp * des.mat[, "BW"]
}
storage.mode(temp) <- "integer"
des.mat[, 8:(7 + num.attr)] <- temp
## create level variables
if (!is.null(effect)) {
### effect coding
best.lev <- mapply(contr.sum, num.lev, SIMPLIFY = FALSE)
for (i in attr.var) {
rownames(best.lev[[i]]) <- attr.lev[[i]]
}
for (i in 1:nrow(des.mat)) {
y <- attr.lev[[des.mat[i, "ATT.cha"]]][-num.lev[des.mat[i, "ATT.cha"]]]
des.mat[i, y] <-
as.integer(best.lev[[des.mat[i, "ATT.cha"]]][des.mat[i, "LEV.cha"], ] *
des.mat[i, "BW"])
# des.mat[i, "BW"] is used to multiply level variables by -1
}
} else {
### dummy coding
temp <- model.matrix(~ factor(des.mat[, 6], levels = lev.var.wo.ref) - 1)
temp <- temp * des.mat[, "BW"]
storage.mode(temp) <- "integer"
des.mat[, (8 + num.attr):(7 + num.attr + length(lev.var.wo.ref))] <- temp
}
## calculate frequency of each level
temp <- table(subset(des.mat, des.mat$BW == 1, select = "LEV.cha"))
freq.levels <- as.vector(temp)
names(freq.levels) <- names(temp)
freq.levels <- freq.levels[lev.var]
### added ver 0.2-0 below -----------------------------------------------------
if (!is.null(base.attribute)) {
delete.column.ref <- colnames(des.mat) != base.attribute
des.mat <- des.mat[, delete.column.ref]
}
### added ver 0.2-0 above -----------------------------------------------------
## store design matrix
design.matrix <- des.mat
# return design matrix
if (is.null(data)) {
return(des.mat)
}
# create respondent dataset
## extract the names of respondents' characteristic variables
respondent.characteristics <-
colnames(resp.data)[!(colnames(resp.data) %in% c("ID", response))]
## reshape the dataset into long format
resp.data.long <- reshape(resp.data,
idvar = "ID",
varying = response,
sep = "",
direction = "long")
temp <- which(colnames(resp.data.long) == "time")
storage.mode(resp.data.long$time) <- "integer"
colnames(resp.data.long)[temp:(temp + 2)] <- c("Q", "RES.B", "RES.W")
## expand respondent dataset according to possible pairs in each BWS question
temp <- data.frame(
ID = rep(resp.data$ID, each = 2 * num.attr * num.ques),
Q = rep(1:num.ques, each = 2 * num.attr),
ALT = rep(1:num.attr, times = 2 * num.ques),
BW = rep(c(rep(1, times = num.attr), rep(-1, times = num.attr)),
times = num.ques))
exp.resp.data <- merge(temp, resp.data.long, by = c("ID", "Q"))
exp.resp.data <- exp.resp.data[order(exp.resp.data$ID,
exp.resp.data$Q,
-1 * exp.resp.data$BW,
exp.resp.data$ALT), ]
# create dataset for discrete choice models
dataset <- merge(exp.resp.data, des.mat, by = c("Q", "BW", "ALT"))
dataset$RES <- (dataset$RES.B == dataset$ALT) * (dataset$BW == 1) +
(dataset$RES.W == dataset$ALT) * (dataset$BW == -1)
dataset$STR <- dataset$ID * 1000 + dataset$Q * 10 + (dataset$BW == 1) +
(dataset$BW == -1) * 2
dataset <- dataset[order(dataset$STR, dataset$ALT), ]
if (delete.best == TRUE) {
select <- !(dataset$BW == -1 & dataset$ALT == dataset$RES.B)
dataset <- dataset[select,]
}
row.names(dataset) <- NULL
# change order of variables
### added ver 0.2-0 below -----------------------------------------------------
if (!is.null(base.attribute)) {
attr.var <- attr.var[attr.var != base.attribute]
}
### added ver 0.2-0 above -----------------------------------------------------
covariate.names <- colnames(resp.data)
covariate.names <-
covariate.names[!covariate.names %in% c("ID", response)]
dataset <- dataset[, c("ID", "Q", "ALT", "BW",
"ATT.cha" ,"ATT", "LEV.cha", "LEV",
attr.var, lev.var.wo.ref,
"RES.B", "RES.W", "RES", "STR", covariate.names)]
# change name of id variable
colnames(dataset)[which(colnames(dataset) == "ID")] <- id
# set attributes
attributes(dataset)$id <- id
attributes(dataset)$response <- response
attributes(dataset)$choice.sets <- choice.sets
attributes(dataset)$attribute.levels <- attribute.levels
attributes(dataset)$reverse <- reverse
attributes(dataset)$base.attribute <- base.attribute
attributes(dataset)$base.level <- base.level
attributes(dataset)$attribute.variables <- attribute.variables
attributes(dataset)$effect <- effect
attributes(dataset)$delete.best <- delete.best
attributes(dataset)$type <- type
attributes(dataset)$design.matrix <- design.matrix
attributes(dataset)$lev.var.wo.ref <- lev.var.wo.ref
attributes(dataset)$freq.levels <- freq.levels
attributes(dataset)$respondent.characteristics <- respondent.characteristics
# set S3 class bws2dataset
class(dataset) <- c("bws2dataset", "data.frame")
# return dataset
return(data = dataset)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{setup_key}
\alias{setup_key}
\title{Set up Carto user name and API key with environment}
\usage{
setup_key()
}
\value{
setup status message
}
\description{
All functions need a Carto user name and API key.
}
\details{
\enumerate{
\item run \code{file.edit("~/.Renviron")} to edit the environment
variable file
\item add two lines
\itemize{ \item \code{carto_acc = "your
user name"}
\item \code{carto_api_key = "your api key"}
}
\item run \code{setup_key()}.
}
Note if you want to remove the key and deleted the lines from
\code{~/.Renviron}, the key could still exist in environment. Restart R
session to make sure it was removed.
For adding key or changing key value, edit and run \code{setup_key()} is
enough.
}
| /man/setup_key.Rd | permissive | jjmata/rCartoAPI | R | false | true | 828 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper.R
\name{setup_key}
\alias{setup_key}
\title{Set up Carto user name and API key with environment}
\usage{
setup_key()
}
\value{
setup status message
}
\description{
All functions need a Carto user name and API key.
}
\details{
\enumerate{
\item run \code{file.edit("~/.Renviron")} to edit the environment
variable file
\item add two lines
\itemize{ \item \code{carto_acc = "your
user name"}
\item \code{carto_api_key = "your api key"}
}
\item run \code{setup_key()}.
}
Note if you want to remove the key and deleted the lines from
\code{~/.Renviron}, the key could still exist in environment. Restart R
session to make sure it was removed.
For adding key or changing key value, edit and run \code{setup_key()} is
enough.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3_operations.R
\name{s3_put_object_retention}
\alias{s3_put_object_retention}
\title{Places an Object Retention configuration on an object}
\usage{
s3_put_object_retention(
Bucket,
Key,
Retention = NULL,
RequestPayer = NULL,
VersionId = NULL,
BypassGovernanceRetention = NULL,
ContentMD5 = NULL,
ChecksumAlgorithm = NULL,
ExpectedBucketOwner = NULL
)
}
\arguments{
\item{Bucket}{[required] The bucket name that contains the object you want to apply this Object
Retention configuration to.
When using this action with an access point, you must direct requests to
the access point hostname. The access point hostname takes the form
\emph{AccessPointName}-\emph{AccountId}.s3-accesspoint.\emph{Region}.amazonaws.com.
When using this action with an access point through the Amazon Web
Services SDKs, you provide the access point ARN in place of the bucket
name. For more information about access point ARNs, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html}{Using access points}
in the \emph{Amazon S3 User Guide}.}
\item{Key}{[required] The key name for the object that you want to apply this Object Retention
configuration to.}
\item{Retention}{The container element for the Object Retention configuration.}
\item{RequestPayer}{}
\item{VersionId}{The version ID for the object that you want to apply this Object
Retention configuration to.}
\item{BypassGovernanceRetention}{Indicates whether this action should bypass Governance-mode
restrictions.}
\item{ContentMD5}{The MD5 hash for the request body.
For requests made using the Amazon Web Services Command Line Interface
(CLI) or Amazon Web Services SDKs, this field is calculated
automatically.}
\item{ChecksumAlgorithm}{Indicates the algorithm used to create the checksum for the object when
using the SDK. This header will not provide any additional functionality
if not using the SDK. When sending this header, there must be a
corresponding \code{x-amz-checksum} or \code{x-amz-trailer} header sent.
Otherwise, Amazon S3 fails the request with the HTTP status code
\verb{400 Bad Request}. For more information, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html}{Checking object integrity}
in the \emph{Amazon S3 User Guide}.
If you provide an individual checksum, Amazon S3 ignores any provided
\code{ChecksumAlgorithm} parameter.}
\item{ExpectedBucketOwner}{The account ID of the expected bucket owner. If the bucket is owned by a
different account, the request fails with the HTTP status code
\verb{403 Forbidden} (access denied).}
}
\description{
Places an Object Retention configuration on an object. For more information, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html}{Locking Objects}. Users or accounts require the \code{s3:PutObjectRetention} permission in order to place an Object Retention configuration on objects. Bypassing a Governance Retention configuration requires the \code{s3:BypassGovernanceRetention} permission.
See \url{https://www.paws-r-sdk.com/docs/s3_put_object_retention/} for full documentation.
}
\keyword{internal}
| /cran/paws.storage/man/s3_put_object_retention.Rd | permissive | paws-r/paws | R | false | true | 3,229 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s3_operations.R
\name{s3_put_object_retention}
\alias{s3_put_object_retention}
\title{Places an Object Retention configuration on an object}
\usage{
s3_put_object_retention(
Bucket,
Key,
Retention = NULL,
RequestPayer = NULL,
VersionId = NULL,
BypassGovernanceRetention = NULL,
ContentMD5 = NULL,
ChecksumAlgorithm = NULL,
ExpectedBucketOwner = NULL
)
}
\arguments{
\item{Bucket}{[required] The bucket name that contains the object you want to apply this Object
Retention configuration to.
When using this action with an access point, you must direct requests to
the access point hostname. The access point hostname takes the form
\emph{AccessPointName}-\emph{AccountId}.s3-accesspoint.\emph{Region}.amazonaws.com.
When using this action with an access point through the Amazon Web
Services SDKs, you provide the access point ARN in place of the bucket
name. For more information about access point ARNs, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html}{Using access points}
in the \emph{Amazon S3 User Guide}.}
\item{Key}{[required] The key name for the object that you want to apply this Object Retention
configuration to.}
\item{Retention}{The container element for the Object Retention configuration.}
\item{RequestPayer}{}
\item{VersionId}{The version ID for the object that you want to apply this Object
Retention configuration to.}
\item{BypassGovernanceRetention}{Indicates whether this action should bypass Governance-mode
restrictions.}
\item{ContentMD5}{The MD5 hash for the request body.
For requests made using the Amazon Web Services Command Line Interface
(CLI) or Amazon Web Services SDKs, this field is calculated
automatically.}
\item{ChecksumAlgorithm}{Indicates the algorithm used to create the checksum for the object when
using the SDK. This header will not provide any additional functionality
if not using the SDK. When sending this header, there must be a
corresponding \code{x-amz-checksum} or \code{x-amz-trailer} header sent.
Otherwise, Amazon S3 fails the request with the HTTP status code
\verb{400 Bad Request}. For more information, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html}{Checking object integrity}
in the \emph{Amazon S3 User Guide}.
If you provide an individual checksum, Amazon S3 ignores any provided
\code{ChecksumAlgorithm} parameter.}
\item{ExpectedBucketOwner}{The account ID of the expected bucket owner. If the bucket is owned by a
different account, the request fails with the HTTP status code
\verb{403 Forbidden} (access denied).}
}
\description{
Places an Object Retention configuration on an object. For more information, see \href{https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html}{Locking Objects}. Users or accounts require the \code{s3:PutObjectRetention} permission in order to place an Object Retention configuration on objects. Bypassing a Governance Retention configuration requires the \code{s3:BypassGovernanceRetention} permission.
See \url{https://www.paws-r-sdk.com/docs/s3_put_object_retention/} for full documentation.
}
\keyword{internal}
|
pdf(file='Day6_test.pdf',width=4.5,height=4.5);
gstable=read.table('Day6_test.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("YALI1_F04590t","YALI1_F27904g","YALI1_F07344g","YALI1_B07652g","YALI1_D22768g","YALI1_F04110g","YALI1_F00821g","YALI1_E34028g","YALI1_C01301t","YALI1_E18539g")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='PO1f_Cas9_1,PO1f_Cas9_2,PO1f_Cas9_3_vs_PO1f_1,PO1f_2,PO1f_3 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(48.879863051257374,77.22899649781886,189.065947587737,9.933475198475739,0.0,1.8612111712811965),c(69.16500621752918,165.99795798956467,94.31665119937453,9.933475198475739,5.222255807493615,28.848773154858545),c(122.44405694339972,64.35749708151572,69.22323023807306,14.900212797713607,0.0,0.0),c(77.47458293624294,49.26677362791892,93.88400601038657,4.966737599237869,0.0,0.0),c(169.1243261573505,118.95040839893939,96.91252233330226,9.933475198475739,0.0,2.791816756921795),c(39.59268907151847,42.165256708579264,97.34516752229023,19.866950396951477,0.0,0.0))
targetgene="YALI1_F04590t"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(133.68642544518892,86.9935822619109,137.5811700981702,69.53432638933018,20.88902322997446,24.195745226655553),c(144.19559600120925,95.87047841108547,138.01381528715814,4.966737599237869,0.0,7.444844685124786),c(254.41968718179461,382.5942240294244,190.79652834368883,49.667375992378695,0.0,2.791816756921795),c(59.63343292253399,60.80673862184588,57.97445532438618,19.866950396951477,0.0,2.791816756921795),c(164.48073916748106,71.45901400085538,122.87123367257966,4.966737599237869,0.0,0.0),c(49.12426236651366,70.1274795784792,50.61948711159092,0.0,0.0,0.0))
targetgene="YALI1_F27904g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(40.32588701728733,23.079929987853912,54.945939001470485,9.933475198475739,0.0,0.0),c(29.816716461266996,48.37908401300147,30.28516322915696,0.0,0.0,0.0),c(73.80859320739863,36.83911901907451,72.6843917499767,0.0,0.0,0.0),c(54.0122486716394,50.154463242836385,45.42774484373544,39.733900793902954,0.0,0.0),c(32.74950824434244,87.88127187682835,40.66864776486792,19.866950396951477,0.0,0.0),c(63.54382196663458,113.18042590197591,128.49562112942309,34.76716319466509,0.0,0.0))
targetgene="YALI1_F07344g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(85.53976033970041,124.72039089590287,125.03445961751945,14.900212797713607,26.111279037468073,13.028478198968376),c(76.00818704470521,88.76896149174581,67.92529467110919,0.0,0.0,0.0),c(38.859491125749614,90.54434072158072,60.13768126932596,19.866950396951477,0.0,0.0),c(123.42165420442487,119.39425320639812,134.12000858626652,19.866950396951477,0.0,4.653027928202992),c(39.83708838677476,82.5551341873236,73.11703693896466,0.0,0.0,0.0),c(4.154788359356877,46.60370478316655,29.419872851181047,0.0,0.0,5.58363351384359))
targetgene="YALI1_B07652g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(236.0897385375731,107.41044340501243,166.56839776036327,39.733900793902954,10.44451161498723,37.22422342562393),c(284.7252022735742,423.42794631562754,374.6707336635704,34.76716319466509,20.88902322997446,17.681506127171367),c(270.061243358197,222.36624853682326,64.89677834819348,0.0,15.666767422480845,7.444844685124786),c(230.22415497142222,215.2647316174836,178.24981786303812,9.933475198475739,5.222255807493615,22.33453405537436),c(31.03871303754843,26.630688447523745,82.20258590771175,4.966737599237869,0.0,6.514239099484188),c(48.3910644207448,23.967619602771368,26.824001717253307,0.0,0.0,1.8612111712811965))
targetgene="YALI1_D22768g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(99.95931993982133,58.58751458455224,97.77781271127819,19.866950396951477,0.0,7.444844685124786),c(130.75363366211346,131.82190781524253,142.77291236602568,29.800425595427214,0.0,6.514239099484188),c(110.46849049584166,44.82832555333164,81.76994071872379,9.933475198475739,0.0,0.0),c(116.82287269250511,124.72039089590287,93.45136082139862,89.40127678628164,0.0,0.9306055856405983),c(125.37684872647516,86.10589264699344,109.45923281395301,14.900212797713607,0.0,0.0),c(19.307545905246663,17.75379229834916,28.121937284217175,34.76716319466509,20.88902322997446,1.8612111712811965))
targetgene="YALI1_F04110g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(99.22612199405246,113.62427070943464,107.29600686901323,24.833687996189347,10.44451161498723,7.444844685124786),c(53.52345004112682,21.748395565477725,37.207486252964266,4.966737599237869,0.0,0.0),c(94.09373637367044,134.4849766599949,137.14852490918224,14.900212797713607,0.0,0.0),c(195.5194522050295,118.50656359148066,195.98827061154432,24.833687996189347,20.88902322997446,40.01604018254572),c(25.66192810191012,9.32074095663331,27.68929209522922,0.0,0.0,0.0),c(45.70267195292564,67.90825554118554,48.45626116665113,4.966737599237869,0.0,0.0))
targetgene="YALI1_F00821g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(196.25265015079836,112.73658109451718,169.59691408327896,9.933475198475739,0.0,0.0),c(33.97150482062387,94.09509918125056,73.11703693896466,34.76716319466509,5.222255807493615,0.0),c(30.549914407035857,13.7591890312206,24.660775772313524,0.0,0.0,0.0),c(114.62327885519854,34.176050174322135,107.29600686901323,9.933475198475739,0.0,0.0),c(40.08148770203105,23.079929987853912,30.28516322915696,4.966737599237869,0.0,0.9306055856405983),c(40.32588701728733,62.5821178516808,16.873162370530306,9.933475198475739,0.0,0.0))
targetgene="YALI1_E34028g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(121.95525831288714,75.89746207544268,67.06000429313326,0.0,0.0,0.0),c(64.52141922765973,37.28296382653324,72.25174656098875,14.900212797713607,0.0,0.0),c(31.283112352804718,46.159859975707825,36.34219587498835,9.933475198475739,0.0,0.0),c(51.07945688856395,19.085326720725348,32.44838917409674,0.0,0.0,0.0),c(17.596750698452652,35.951429404157054,25.526066150289438,0.0,0.0,0.9306055856405983),c(10.753569871276621,53.705221702506215,24.660775772313524,0.0,0.0,0.0))
targetgene="YALI1_C01301t"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(282.7700077515239,245.8900233321359,247.0404029121232,84.43453918704378,26.111279037468073,8.375450270765384),c(140.77400558762122,87.88127187682835,128.92826631841106,19.866950396951477,0.0,9.306055856405983),c(62.810624020865724,62.13827304422207,28.98722766219309,0.0,0.0,2.791816756921795),c(77.23018362098665,46.159859975707825,89.12490893151906,9.933475198475739,0.0,2.791816756921795),c(48.879863051257374,23.967619602771368,85.23110223062744,0.0,0.0,2.791816756921795),c(13.441962339095777,9.32074095663331,13.412000858626653,4.966737599237869,0.0,0.0))
targetgene="YALI1_E18539g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("Nontargeting","YALI1_C00098g","YALI1_E00019g","YALI1_D00040g","YALI1_A00032g","YALI1_B30333g","YALI1_C33545g","YALI1_D00062g","YALI1_A22415g","YALI1_A22496g")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='PO1f_Cas9_1,PO1f_Cas9_2,PO1f_Cas9_3_vs_PO1f_1,PO1f_2,PO1f_3 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(177.67830219132054,163.3348891448123,157.91549398060414,2051.26262848524,1180.229812493557,890.5895454580525),c(153.48276998094815,105.19121936771879,110.75716838091688,735.0771646872047,376.0024181395403,773.3332416673371),c(303.7883488635646,280.9537631213755,285.54582473205136,3188.645538710712,1947.9014161951184,2312.5548803168867),c(62.810624020865724,42.60910151603799,18.603743126482133,337.73815674817513,412.5582087919956,576.975463097171),c(101.67011514661533,173.09947490890434,102.96955497913366,814.5449662750106,788.5606269315358,589.0733357104987),c(200.65183782541152,185.52712951774876,157.05020360262824,973.4805694506224,940.0060453488506,945.4952750108479),c(290.5907858397251,271.1891773572835,323.18595617400354,2433.701423626556,2172.4584159173437,2319.069119416371),c(74.78619046842378,5.769982496963478,42.8318737098077,342.70489434741296,663.226487551689,322.9201382172876),c(197.47464672707977,44.38448074587291,31.58309879612083,536.4076607176899,762.4493478940677,506.2494385884855),c(99.22612199405246,118.95040839893939,63.598842781229614,615.8754623054958,725.8935572416125,834.7532103196166),c(21.01834111204067,52.81753208758876,11.681420102674828,198.66950396951478,334.22437167959134,338.7404331731778),c(122.44405694339972,82.99897899478233,80.03935996277197,1067.8485838361419,955.6728127713315,852.434716446788),c(452.6275318546433,303.5898483017707,328.810343630847,24.833687996189347,0.0,0.0),c(93.84933705841415,93.2074095663331,141.4749767990618,625.8089375039715,537.8923481718423,585.3509133679363),c(427.698801698502,360.40198365648797,408.417058404631,2279.732558050182,1895.6788581201822,2307.901852388684),c(113.15688296366082,122.05732205115049,98.6431030892541,993.3475198475738,762.4493478940677,688.6481333740427),c(131.24243229262603,102.52815052296641,118.97742697168806,1246.6511374087052,814.6719059690039,866.393800231397),c(82.36256924136867,70.1274795784792,89.99019930949497,809.5782286757727,616.2261852842465,565.8081960694838),c(53.76784935638311,68.35210034864427,86.96168298657926,943.6801438551952,464.7807668669317,657.9381490479029),c(222.8921755137336,216.15242123240105,175.2213015401224,1157.2498606224235,1697.2331374354249,1272.137835570698),c(436.00837841721574,222.36624853682326,215.45730411600238,1698.6242589393512,1608.4547887080334,2364.6687931127603),c(123.42165420442487,156.67721703293137,71.38645618301284,1529.7551805652638,924.3392779263698,870.1162225739594),c(50.59065825805138,100.30892648567277,109.45923281395301,516.5407103207384,381.22467394703386,436.45401966544057),c(132.95322749942005,131.3780630077838,78.7414243958081,1172.1500734201372,600.5594178617657,574.1836463402491),c(151.03877682838527,94.98278879616802,76.14555326188035,591.0417743093064,600.5594178617657,638.3954317494504),c(64.27701991240345,35.50758459669832,86.09639260860335,650.6426255001609,699.7822782041444,350.83830578650554),c(49.61306099702623,44.82832555333164,40.23600257587996,630.7756751032094,349.8911391020722,301.51620974755383),c(69.16500621752918,137.591890312206,68.35793986009713,526.4741855192142,762.4493478940677,641.1872485063722),c(24.1955322103724,39.05834305636816,10.816129724698914,322.8379439504615,344.66888329457856,257.7777472224457),c(166.68033300478763,152.68261376580278,35.47690549701244,481.7735471260733,391.6691855620211,905.4792348283021),c(109.49089323481651,64.35749708151572,79.606714773784,531.440923118452,793.7828827390294,407.60524651058205),c(16.619153437427507,49.26677362791892,44.995099654747484,243.3701423626556,365.557906524553,430.870386151597),c(65.98781511919745,130.49037339286633,177.3845274850622,342.70489434741296,564.0036272093104,790.0841422088679),c(162.76994396068704,186.4148191326662,141.4749767990618,1390.6865277866034,710.2267898191316,655.1463322909811),c(86.27295828546926,84.33051341715853,64.89677834819348,759.910852683394,720.6713014341188,593.7263636387017),c(256.3748817038449,230.35545507108037,212.86143298207463,1405.586740584317,1242.8968821834803,1283.305102598385),c(229.49095702565336,335.5466744387992,214.1593685490385,1485.0545421721229,1117.5627428036335,938.050430325723),c(69.16500621752918,80.77975495748869,64.89677834819348,1216.850711813278,699.7822782041444,529.5145782295004),c(198.45224398810493,234.35005833820895,146.23407387792932,1961.8613516989583,1383.897788985808,1619.2537190146409),c(104.35850761443449,110.51735705722353,110.32452319192892,596.0085119085443,835.5609291989783,543.4736620141094),c(137.3524151740332,165.55411318210594,118.97742697168806,948.646881454433,1373.4532773708206,920.3689241985517),c(788.9209896472939,648.4572636972032,875.2412173226361,7157.06888050177,6350.263061912236,6796.212591933289),c(118.7780672145554,67.90825554118554,93.88400601038657,1033.0814206414768,470.0030226744253,512.7636776879697),c(180.3666946591397,185.52712951774876,149.69523538983296,1062.881846236904,1284.6749286434292,885.005911944209),c(325.7842872366304,302.70215868685324,255.26066150289438,2523.1027004128377,2329.126090142152,2239.0370390512794),c(217.02659194758274,122.94501166606796,97.34516752229023,486.7402847253112,689.3377665891571,546.2654787710312),c(440.8963647223415,231.24314468599783,151.85846133477276,1430.4204285805063,1603.2325329005398,2020.3447264257388),c(211.16100838143186,183.75175028791384,160.51136511453188,695.3432638933017,1274.230417028442,912.9240795134269),c(54.989845932664544,21.304550758018994,32.8810343630847,307.9377311527479,428.2249762144764,386.20131804084826),c(44.969474007156784,117.61887397656321,122.4385884835917,804.6114910765348,464.7807668669317,534.1676061577034),c(106.31370213648478,70.57132438593793,67.49264948212122,586.0750367100686,584.8926504392848,629.0893758930445),c(57.92263771573999,23.52377479531264,68.35793986009713,730.1104270879667,548.3368597868296,485.7761157043923),c(81.8737706108561,46.60370478316655,21.63225944939783,163.90234077484968,287.2240694121488,331.295588488053),c(105.33610487545964,60.36289381438715,42.39922852081974,516.5407103207384,522.2255807493615,398.2991906541761),c(230.7129536019348,28.40606767735866,61.86826202527779,283.10404315655853,297.66858102713604,213.108679111697),c(299.87795981946397,229.0239206487042,215.45730411600238,1822.792698920298,1624.1215561305141,1389.3941393614132),c(159.59275286235533,162.89104433735358,117.67949140472419,859.2456046681514,788.5606269315358,620.713925622279),c(150.79437751312898,105.63506417517752,245.74246734515933,1485.0545421721229,2099.346834612433,1160.465165293826),c(55.47864456317712,16.866102683431706,65.7620687261694,615.8754623054958,402.11369717700836,323.8507438029282),c(15.885955491658645,43.05294632349672,37.207486252964266,198.66950396951478,339.446627487085,197.28838415580682),c(71.85339868534834,89.65665110666328,64.89677834819348,600.9752495077822,637.115208514221,391.7849515546919),c(141.99600216390266,107.85428821247116,148.82994501185706,655.6093630993987,689.3377665891571,723.0805400427448),c(92.38294116687644,3.9946032671285616,41.966583331831785,134.10191517942246,219.33474391473183,276.3898589352577),c(11.486767817045482,8.433051341715853,23.362840205349656,39.733900793902954,198.44572068475736,191.70475064196324),c(132.46442886890748,175.31869894619797,317.5615687171601,1311.2187261987974,877.3389756589273,951.0789085246914),c(113.64568159417338,109.62966744230607,193.82504466660453,1142.34964782471,673.6709991666763,751.9293131976034),c(38.37069249523704,35.951429404157054,109.02658762496506,461.9065967291218,407.33595298450194,481.1230877761893),c(36.41549797318674,67.02056592626809,77.44348882884422,789.7112782788212,386.4469297545275,687.7175277884021),c(152.99397135043557,112.29273628705845,157.05020360262824,1196.9837614163264,966.1173243863187,727.7335679709479),c(31.03871303754843,51.48599766521257,40.23600257587996,233.43666716417985,276.7795577971616,389.9237403834107),c(160.3259508081242,167.32949241194086,156.61755841364027,1465.1875917751715,981.7840918087995,1045.070072674392),c(92.62734048213272,114.95580513181083,145.80142868894137,1033.0814206414768,1007.8953708462676,671.8972328325119),c(284.7252022735742,177.0940781760329,89.99019930949497,700.3100014925395,835.5609291989783,674.6890495894337),c(99.47052130930875,113.18042590197591,161.3766554925078,615.8754623054958,799.0051385465231,482.9842989474705),c(33.4827061901113,57.25598016217605,39.37071219790405,253.30361756113135,188.00120906977014,161.9253719014641),c(71.60899937009205,38.61449824890943,86.96168298657926,665.5428382978745,459.5585110594381,309.8916600183192),c(160.3259508081242,126.05192531827906,54.08064862349457,561.2413487138792,678.89325497417,403.8828241680196),c(74.5417911531675,41.2775670936618,65.7620687261694,357.60510714512657,203.66797649225097,239.16563550963374),c(123.91045283493743,115.84349474672828,97.34516752229023,695.3432638933017,819.8941617764975,481.1230877761893),c(250.26489882243774,187.7463535550424,216.75523968296625,1251.617875007943,1613.6770445155269,1693.702165865889),c(152.2607734046667,82.5551341873236,291.60285737788274,998.3142574468118,1488.3429051356802,1029.2497777185017),c(133.68642544518892,119.83809801385685,62.30090721426575,754.9441150841561,940.0060453488506,981.7888928508312),c(75.03058978368007,125.1642357033616,116.81420102674828,928.7799310574816,840.783185006472,1206.0648389902153),c(76.49698567521779,22.192240372936453,84.36581185265153,188.73602877103903,203.66797649225097,189.84353947068203),c(201.62943508643667,103.85968494534261,64.46413315920553,357.60510714512657,511.78106913437426,372.2422342562393),c(59.14463429202142,182.42021586553764,111.18981356990484,730.1104270879667,981.7840918087995,867.3244058170376),c(139.06321038082723,125.1642357033616,158.78078435858006,779.7778030803455,1316.0084634883908,1121.379730696921),c(320.89630093150464,359.51429404157057,487.59112798942704,3114.144474722144,3175.131530956118,2321.8609361732924),c(153.48276998094815,143.80571761662821,195.55562542255637,1524.788442966026,1859.1230674677267,1162.3263764651072),c(75.76378772944892,19.973016335642807,17.73845274850622,104.30148958399525,88.77834872739145,218.69231262554058),c(55.96744319368969,33.73220536686341,37.207486252964266,451.9731215306461,417.78046459948916,536.0288173289846),c(311.60912695176575,146.9126312688393,260.45240377074987,2652.2378779930223,2214.236462377293,2192.5067597692496),c(47.65786647497594,8.87689614917458,37.64013144195222,228.469929564942,88.77834872739145,136.79902108916795),c(67.94300964124774,57.699824969634776,61.00297164730188,625.8089375039715,376.0024181395403,469.9558207485021),c(136.13041859775177,138.03573511966474,125.46710480650741,809.5782286757727,652.7819759367019,823.5859432919294),c(110.71288981109795,142.47418319425202,185.60478607583337,908.91298066053,1185.4520683010505,1515.9564990085346),c(117.0672720077614,131.3780630077838,149.262590200845,819.5117038742484,605.7816736692594,598.3793915669047),c(57.92263771573999,82.11128937986487,48.888906355639094,551.3078735154035,475.22527848191896,371.3116286705987),c(153.97156861146073,73.67823803814902,146.66671906691727,824.4784414734863,987.0063476162932,952.009514110332),c(200.65183782541152,263.1999708230263,180.84568899696583,933.7466686567194,1650.2328351679823,1195.8281775481687),c(61.099828814071714,46.159859975707825,70.52116580503692,432.1061711336946,767.6716037015614,339.6710387588184),c(99.95931993982133,98.08970244837913,92.15342525443475,392.37227033979167,370.78016233204664,375.0340510131611),c(434.0531838951655,405.2303092098196,311.07189088234077,2135.6971676722837,2799.1291128165776,2327.444569687136),c(31.03871303754843,44.82832555333164,41.966583331831785,273.1705679580828,245.44602295219988,162.8559774871047),c(105.58050419071593,87.88127187682835,113.35303951484462,1331.085676595749,574.4481388242976,675.6196551750743),c(102.4033130923842,71.01516919339664,139.74439604310996,536.4076607176899,501.33655751938704,569.5306184120461),c(107.53569871276622,70.1274795784792,109.89187800294097,1241.6843998094673,626.6706968992338,671.8972328325119),c(223.38097414424618,102.52815052296641,124.6018144285315,854.2788670689135,1295.1194402584165,855.2265332037098),c(88.22815280751955,38.170653441450696,102.5369097901457,372.5053199428402,642.3374643217146,320.1283214603658),c(38.12629317998075,11.983809801385684,66.62735910414531,253.30361756113135,313.3353484496169,375.0340510131611),c(639.3486087104465,635.5857642809,625.1722980875973,2513.1692252143616,2553.6830898643775,2452.145718162976),c(94.58253500418301,88.76896149174581,51.48477748956683,317.87120635122363,475.22527848191896,383.4095012839265),c(108.26889665853508,77.67284130527759,83.50052147467562,456.939859129884,1034.0066498837357,483.9149045331111),c(12.219965762814343,13.315344223761873,3.0285163229156957,19.866950396951477,41.77804645994892,11.16726702768718),c(198.6966433033612,142.0303383867933,84.79845704163948,1246.6511374087052,1143.6740218411016,1013.4294827626115),c(197.71904604233606,143.36187280916948,226.70607902968925,1038.0481582407147,924.3392779263698,1061.8209732159225),c(116.33407406199254,49.710618435377654,112.05510394788075,442.0396463321704,705.004534011638,671.8972328325119),c(177.43390287606425,107.85428821247116,145.80142868894137,809.5782286757727,642.3374643217146,566.7388016551243),c(161.30354806914931,63.469807466598255,60.57032645831392,749.9773774849183,532.6700923643486,591.8651524674204),c(180.611093974396,269.41379812744856,189.93123796571294,1485.0545421721229,1394.3423006007952,1240.4972456589176),c(58.16703703099627,19.085326720725348,48.02361597766318,188.73602877103903,381.22467394703386,463.4415816490179),c(60.855429498815425,137.591890312206,85.6637474196154,501.6404975230248,297.66858102713604,598.3793915669047),c(72.5865966311172,120.7257876287743,179.115108241014,1201.9504990155644,1122.784998611127,905.4792348283021),c(47.16906784446336,47.49139439808401,89.557554120507,451.9731215306461,673.6709991666763,396.43797948289483),c(101.18131651610275,53.705221702506215,12.979355669638696,387.4055327405538,250.66827875969352,265.2225919075705),c(162.76994396068704,149.5757001135917,205.0738195802914,1360.8861021911762,1598.0102770930462,1508.5116543234099),c(34.94910208164902,37.72680863399197,42.39922852081974,332.77141914893724,287.2240694121488,209.3862567691346),c(167.4135309505565,89.65665110666328,124.16916923954354,854.2788670689135,746.7825804715869,462.5109760633773),c(326.273085867143,378.5996207622959,401.06209019183575,1907.2272381073417,2642.461438591769,2893.25276575662),c(127.82084187903803,84.77435822461725,59.705036080338004,908.91298066053,819.8941617764975,559.2939569699995),c(225.58056798155278,35.0637397892396,40.23600257587996,432.1061711336946,553.5591155943232,436.45401966544057),c(79.9185760888058,68.795945156103,58.40710051337414,541.3743983169278,224.55699972222544,211.24746794041582),c(18.329948644221513,33.73220536686341,42.39922852081974,412.23922073674316,470.0030226744253,470.8864263341427),c(166.19153437427505,58.14366977709351,124.6018144285315,918.8464558590058,1028.784394076242,731.4559903135103),c(129.77603640108833,154.90183780309644,138.01381528715814,610.9087247062579,825.1164175839912,537.8900285002658),c(56.456241824202266,62.5821178516808,22.064904638385784,442.0396463321704,799.0051385465231,559.2939569699995),c(185.74347959477802,217.92780046223598,196.85356098952025,1147.3163854239478,767.6716037015614,875.6998560878029),c(301.3443557110017,219.25933488461214,307.6107293704371,2011.528727691337,1974.0126952325863,1946.8268851601315),c(178.16710082183312,298.2637106122659,324.9165369299554,1713.524471737065,1598.0102770930462,1501.066809638285),c(73.80859320739863,59.475204199469694,39.803357386892,635.7424127024473,564.0036272093104,390.8543459690513),c(62.566224705609436,120.28194282131558,75.7129080728924,983.4140446490982,710.2267898191316,523.9309447156568),c(28.10592125447299,6.657672111880936,46.72568041069931,243.3701423626556,282.0018136046552,181.46808919991665),c(43.74747743087535,17.75379229834916,38.50542181992813,288.0707807557964,235.00151133721266,258.7083528080863),c(9.042774664482614,21.748395565477725,22.064904638385784,153.96886557637396,104.44511614987229,93.06055856405983),c(24.1955322103724,25.299154025147555,60.57032645831392,233.43666716417985,329.0021158720977,167.50900541530768),c(160.57035012338048,150.46338972850916,127.19768556245923,1683.7240461416377,1164.563045071076,1151.15910943742),c(136.61921722826435,168.66102683431706,160.07871992554394,1216.850711813278,1206.341091531025,1296.3335807973533),c(420.3668222408134,424.75948073800373,371.64221734065467,2910.5082331533913,2381.3486482170883,1986.8429253426773),c(88.47255212277584,108.29813301992989,132.82207301930268,675.4763134963503,1086.2292079586719,689.5787389596833),c(210.1834111204067,154.01414818817898,143.20555755501363,1266.5180878056567,987.0063476162932,984.580709607753),c(48.879863051257374,37.28296382653324,28.98722766219309,198.66950396951478,485.6697900969062,328.50377173113117),c(66.47661374971003,65.6890315038919,109.89187800294097,715.2102142902531,856.4499524289528,626.2975591361226),c(92.87173979738901,106.52275379009498,48.45626116665113,481.7735471260733,543.1146039793359,466.23339840593974),c(95.56013226520817,83.8866686096998,89.557554120507,471.8400719275976,621.4484410917402,519.2779167874538),c(76.00818704470521,70.1274795784792,62.30090721426575,456.939859129884,569.225883016804,482.0536933618299),c(104.84730624494706,90.10049591412199,180.84568899696583,531.440923118452,699.7822782041444,491.3597492182359),c(68.43180827176032,105.63506417517752,115.94891064877235,1539.6886557637395,1509.2319283656548,964.1073867236598),c(217.75978989335158,295.60064176751354,214.59201373802645,1966.8280892981963,1624.1215561305141,1619.2537190146409),c(69.16500621752918,44.82832555333164,24.660775772313524,412.23922073674316,255.89053456718713,358.28315047163034),c(42.03668222408134,23.52377479531264,22.930195016361697,193.7027663702769,188.00120906977014,179.60687802863546),c(72.34219731586091,204.6124562384741,57.10916494641027,705.2767390917775,553.5591155943232,893.3813622149743),c(48.879863051257374,46.159859975707825,74.41497250592853,397.33900793902956,741.5603246640933,463.4415816490179),c(99.22612199405246,106.52275379009498,65.7620687261694,223.5031919657041,376.0024181395403,365.7279951567551),c(111.69048707212309,86.10589264699344,109.45923281395301,834.411916671962,626.6706968992338,630.9505870643256),c(117.55607063827398,128.71499416303143,131.95678264132675,1698.6242589393512,1096.6737195736591,997.6091878067214),c(120.24446310609314,84.77435822461725,82.20258590771175,809.5782286757727,731.115813049106,469.9558207485021),c(62.07742607509686,77.22899649781886,64.46413315920553,427.13943353445677,255.89053456718713,294.07136506242904),c(86.27295828546926,83.44282380224107,94.74929638836248,958.5803566529088,506.5588133268806,589.0733357104987),c(55.47864456317712,94.53894398870929,67.49264948212122,516.5407103207384,464.7807668669317,763.0965802252906),c(86.27295828546926,56.36829054725859,107.72865205800119,923.8131934582436,772.893859509055,704.4684283299329),c(128.79843914006318,168.21718202685832,178.24981786303812,1306.2519885995596,715.4490456266252,1338.2108321511803),c(31.527511668061006,52.37368728013003,48.02361597766318,541.3743983169278,360.3356507170594,303.377420918835),c(132.95322749942005,55.48060093234113,92.5860704434227,715.2102142902531,543.1146039793359,648.632093191497),c(46.435869898694506,27.962222869899932,14.277291236602567,491.7070223245491,161.88993003230206,239.16563550963374),c(53.27905072587053,65.24518669643317,54.945939001470485,377.47205754207806,276.7795577971616,259.6389583937269),c(24.1955322103724,47.04754959062528,13.84464604761461,198.66950396951478,292.4463252196424,370.3810230849581),c(51.07945688856395,25.299154025147555,66.62735910414531,531.440923118452,684.1155107816635,883.1447007729278),c(9.2871739797389,55.924445739799864,21.63225944939783,422.1726959352189,396.8914413695147,318.2671102890846),c(70.14260347855434,53.705221702506215,85.6637474196154,422.1726959352189,537.8923481718423,322.9201382172876),c(105.58050419071593,151.35107934342662,233.19575686450858,749.9773774849183,887.7834872739145,754.7211299545252),c(116.08967474673626,80.77975495748869,136.28323453120632,422.1726959352189,579.6703946317913,561.1551681412808),c(77.47458293624294,79.89206534257123,71.81910137200079,630.7756751032094,396.8914413695147,527.6533670582193),c(21.99593837306582,17.75379229834916,12.114065291662783,144.0353903778982,88.77834872739145,119.11751496199658),c(214.0938001645073,110.51735705722353,17.73845274850622,392.37227033979167,365.557906524553,355.49133371470856),c(51.07945688856395,50.154463242836385,76.14555326188035,168.86907837408756,631.8929527067273,297.79378740499146),c(21.01834111204067,25.742998832606286,15.14258161457848,253.30361756113135,219.33474391473183,158.2029495589017),c(45.94707126818193,52.81753208758876,45.42774484373544,526.4741855192142,329.0021158720977,201.9414120840098),c(37.88189386472446,23.52377479531264,24.228130583325566,139.06865277866035,224.55699972222544,222.41473496810298),c(18.329948644221513,3.5507584596698325,5.6243874568434356,9.933475198475739,167.11218583979567,78.17086919381025),c(116.57847337724883,100.7527712931315,85.23110223062744,680.4430510955881,637.115208514221,682.1338942745585),c(37.63749454946818,92.31971995141565,62.30090721426575,496.6737599237869,396.8914413695147,308.9610544326786),c(150.30557888261643,97.20201283346167,32.8810343630847,625.8089375039715,329.0021158720977,229.85957965322777),c(13.197563023839491,10.652275379009497,35.044260308024484,153.96886557637396,203.66797649225097,154.4805272163393),c(204.3178275542558,258.31767794098033,373.8054432855945,1976.761564496672,2527.5718108269093,1769.0812183027772),c(130.50923434685717,116.28733955418701,247.0404029121232,1261.5513502064189,1138.451766033608,1325.182353952212),c(149.083582306335,86.54973745445217,63.598842781229614,546.3411359161656,464.7807668669317,587.2121245392175),c(94.09373637367044,114.5119603243521,80.90465034074788,591.0417743093064,381.22467394703386,325.7119549742094),c(37.39309523421189,50.154463242836385,51.052132300578876,670.5095758971123,626.6706968992338,642.1178540920127),c(90.67214596008242,39.50218786382688,83.50052147467562,620.8421999047337,511.78106913437426,618.8527144509978),c(162.03674601491818,201.94938739372174,176.51923710708627,2021.4622028898127,1733.78892808788,1145.5754759235765),c(183.54388575747143,95.42663360362675,189.49859277672496,1127.4494350269963,908.6725105038889,788.2229310375867),c(58.655835661508846,102.97199533042514,154.88697765768845,705.2767390917775,971.3395801938124,732.3865958991508),c(78.45218019726808,84.33051341715853,77.01084363985626,973.4805694506224,621.4484410917402,536.9594229146252),c(20.77394179678438,16.422257875972974,35.909550686000394,188.73602877103903,135.77865099483398,108.88085351995),c(35.68230002741788,16.866102683431706,46.72568041069931,218.53645436646624,308.11309264212326,321.989532631647),c(45.45827263766935,26.186843640065014,55.37858419045844,526.4741855192142,454.33625525194446,490.4291436325953),c(92.87173979738901,94.98278879616802,70.52116580503692,998.3142574468118,1086.2292079586719,693.3011613022456),c(151.52757545889784,146.02494165392187,136.71587972019427,1281.4183006033702,1185.4520683010505,872.9080393308811),c(70.87580142432319,73.67823803814902,41.966583331831785,372.5053199428402,334.22437167959134,389.9237403834107),c(79.9185760888058,44.38448074587291,24.660775772313524,307.9377311527479,235.00151133721266,293.14075947678845),c(31.283112352804718,25.299154025147555,10.816129724698914,79.46780158780591,31.33353484496169,19.542717298452562),c(10.99796918653291,7.545361726798394,16.007871992554392,153.96886557637396,292.4463252196424,135.86841550352736),c(126.35444598750031,153.12645857326152,145.3687834999534,993.3475198475738,1075.7846963436846,835.6838159052572),c(19.063146589990374,17.75379229834916,34.61161511903653,223.5031919657041,208.89023229974458,124.70114847584017),c(114.13448022468596,99.86508167821404,202.47794844636368,918.8464558590058,1263.7859054134547,1107.420646912312),c(16.37475412217122,62.5821178516808,115.5162654597844,1544.6553933629773,402.11369717700836,230.79018523886836),c(59.87783223779028,65.6890315038919,64.89677834819348,377.47205754207806,579.6703946317913,406.67464092494146),c(160.0815514928679,82.5551341873236,122.4385884835917,983.4140446490982,1091.4514637661655,1039.4864391605483),c(75.27498909893636,87.88127187682835,33.74632474106061,442.0396463321704,449.1139994444509,497.8739883177201),c(167.90232958106907,146.9126312688393,222.81227232879763,1703.5909965385893,1060.1179289212039,1088.8085351995),c(183.29948644221514,255.65460909622794,294.1987285118105,1415.5202157827928,1937.456904580131,1551.3195112628773),c(114.86767817045482,120.7257876287743,80.03935996277197,1057.9151086376662,731.115813049106,516.486100030532),c(7.087580142432319,43.05294632349672,35.044260308024484,54.63411359161656,67.88932549741699,86.54631946457563),c(24.68433084088497,27.5183780624412,62.733552403253704,94.36801438551952,41.77804645994892,88.40753063585683),c(87.98375349226328,90.98818552903946,166.13575257137532,839.3786542711999,673.6709991666763,716.5663009432607),c(59.63343292253399,12.871499416303143,42.39922852081974,387.4055327405538,245.44602295219988,233.58200199579016),c(77.47458293624294,159.34028587768373,157.91549398060414,581.1082991108307,866.89446404394,789.1535366232273),c(32.26070961382987,51.48599766521257,45.860390032723394,531.440923118452,396.8914413695147,428.0785693946752),c(169.85752410311937,94.98278879616802,144.5034931219775,1162.2165982216613,1786.0114861628163,1037.6252279892672),c(78.69657951252437,139.36726954204093,184.73949569785745,943.6801438551952,1117.5627428036335,635.6036149925286),c(48.635463736001086,97.6458576409204,37.64013144195222,178.80255357256328,193.22346487726375,404.8134297536602),c(24.439931525628687,8.87689614917458,12.114065291662783,134.10191517942246,235.00151133721266,93.06055856405983),c(113.64568159417338,42.165256708579264,95.18194157735044,903.9462430612922,323.7798600646041,539.751239671547),c(14.419559600120925,66.57672111880936,68.7905850490851,511.5739727215005,438.66948782946366,310.8222656039598),c(167.65793026581278,134.92882146745364,232.33046648653269,1107.5824846300447,1331.6752309108717,965.0379923093004),c(50.835057573307665,27.5183780624412,19.901678693446,213.56971676722839,67.88932549741699,140.52144343173035),c(203.09583097797437,154.90183780309644,131.09149226335083,998.3142574468118,1013.1176266537612,907.3404459995833),c(195.76385152028578,164.22257875972974,202.47794844636368,1415.5202157827928,778.1161153165486,662.591176976106),c(204.8066261847684,184.6394399028313,200.74736769041183,1187.0502862178507,1718.1221606653992,1721.6203334351067),c(77.71898225149923,51.04215285775384,105.99807130204935,610.9087247062579,548.3368597868296,576.975463097171),c(14.663958915377211,21.304550758018994,42.39922852081974,392.37227033979167,287.2240694121488,149.82749928813632),c(52.545852780101676,92.76356475887438,134.98529896424245,397.33900793902956,417.78046459948916,555.5715346274371),c(32.505108929086155,2.219224037293645,33.31367955207266,243.3701423626556,135.77865099483398,141.45204901737094),c(42.03668222408134,19.973016335642807,11.248774913686871,39.733900793902954,135.77865099483398,108.88085351995),c(53.76784935638311,58.14366977709351,49.32155154462705,526.4741855192142,433.44723202197,308.03044884703803),c(128.3096405095506,90.54434072158072,96.47987714431432,571.174823912355,1065.3401847286973,503.45762183156364),c(136.86361654352064,245.44617852467718,214.1593685490385,1946.9611389012448,1373.4532773708206,1204.2036278189341),c(45.94707126818193,138.9234247345822,41.101292953855875,665.5428382978745,887.7834872739145,638.3954317494504),c(32.993907559598725,35.0637397892396,68.7905850490851,293.0375183550343,470.0030226744253,364.79738957111454),c(28.83911920024185,37.28296382653324,59.27239089135005,382.43879514131595,496.1143017118934,267.0838030788517),c(61.344228129328,32.400670944487224,50.61948711159092,481.7735471260733,255.89053456718713,315.4752935321628),c(154.7047665572296,219.25933488461214,88.25961855354313,1112.5492222292828,2068.0132997674714,957.5931476241756),c(59.63343292253399,125.60808051082033,88.25961855354313,908.91298066053,663.226487551689,444.82946993620595),c(36.904296603699315,15.090723453596787,11.681420102674828,521.5074479199762,292.4463252196424,541.6124508428281),c(42.525480854593916,15.090723453596787,15.14258161457848,278.1373055573207,292.4463252196424,139.59083784608973),c(83.5845658176501,85.66204783953471,89.99019930949497,496.6737599237869,553.5591155943232,574.1836463402491),c(159.10395423184275,148.2441656912155,167.86633332732714,2100.9300044776187,1519.6764399806418,1386.6023226044913),c(42.525480854593916,13.7591890312206,23.362840205349656,144.0353903778982,203.66797649225097,73.51784126560726),c(20.28514316627181,41.72141190112053,52.7827130565307,288.0707807557964,198.44572068475736,162.8559774871047),c(68.9206069022729,15.978413068514246,20.33432388243396,163.90234077484968,261.11279037468074,26.05695639793675),c(84.31776376341897,31.069136522111034,64.89677834819348,625.8089375039715,308.11309264212326,396.43797948289483),c(96.29333021097702,49.710618435377654,134.5526537752545,645.675887900923,788.5606269315358,726.8029623853072),c(113.64568159417338,97.20201283346167,75.28026288390444,774.8110654811076,564.0036272093104,502.52701624592305),c(73.56419389214234,75.45361726798394,97.77781271127819,859.2456046681514,804.2273943540167,497.8739883177201),c(169.85752410311937,87.88127187682835,138.01381528715814,814.5449662750106,631.8929527067273,730.5253847278697),c(182.32188918119,110.0735122497648,68.7905850490851,372.5053199428402,893.0057430814081,494.15156597515767),c(66.72101306496631,69.68363477102046,53.21535824551866,471.8400719275976,637.115208514221,473.67824309106453),c(19.551945220502947,24.4114644102301,56.67651975742231,655.6093630993987,198.44572068475736,220.5535237968218),c(30.305515091779572,22.63608518039518,80.90465034074788,451.9731215306461,407.33595298450194,382.4788956982859),c(30.305515091779572,25.742998832606286,67.06000429313326,402.3057455382674,464.7807668669317,270.80622542141407),c(120.977661051862,76.34130688290139,89.99019930949497,1475.1210669736472,981.7840918087995,615.1302921084355),c(45.70267195292564,27.074533254982473,54.945939001470485,437.0729087329325,658.0042317441954,446.69068110748714),c(60.611030183559144,99.42123687075531,97.34516752229023,804.6114910765348,579.6703946317913,584.4203077822957),c(31.03871303754843,21.748395565477725,8.652903779759132,129.1351775801846,99.22286034237868,314.54468794652223),c(101.67011514661533,189.52173278487732,103.83484535710957,486.7402847253112,940.0060453488506,723.0805400427448),c(5.132385620382024,31.512981329569765,64.89677834819348,188.73602877103903,156.66767422480845,50.252701624592305),c(36.904296603699315,54.59291131742368,53.21535824551866,298.00425595427214,198.44572068475736,227.06776289630596),c(21.507139742553242,26.630688447523745,46.72568041069931,407.2724831375053,193.22346487726375,294.07136506242904),c(68.43180827176032,67.46441073372682,37.207486252964266,531.440923118452,720.6713014341188,600.2406027381859),c(74.78619046842378,89.21280629920454,99.94103865621797,1087.7155342330934,511.78106913437426,355.49133371470856),c(54.745446617408255,43.05294632349672,25.958711339277393,253.30361756113135,349.8911391020722,337.80982758753714),c(137.3524151740332,93.2074095663331,108.5939424359771,794.6780158780591,485.6697900969062,586.2815189535769),c(36.65989728844303,64.35749708151572,67.92529467110919,109.26822718323312,391.6691855620211,132.14599316096496),c(160.81474943863677,180.64483663570272,93.01871563241066,963.5470942521466,908.6725105038889,772.4026360816965),c(43.99187674613164,114.06811551689337,13.84464604761461,357.60510714512657,266.3350461821743,124.70114847584017),c(80.89617334983095,128.2711493555727,100.37368384520592,854.2788670689135,783.3383711240422,768.6802137391342),c(52.30145346484539,93.65125437379183,71.81910137200079,412.23922073674316,245.44602295219988,283.83470362038247),c(28.350320569729277,47.935239205542736,14.277291236602567,367.53858234360234,266.3350461821743,157.27234397326112),c(26.395126047678982,79.4482205351125,55.811229379446395,307.9377311527479,595.3371620542721,390.8543459690513),c(4.887986305125737,0.0,0.0,9.933475198475739,57.44481388242976,29.779378740499144),c(59.87783223779028,93.65125437379183,60.13768126932596,397.33900793902956,517.0033249418678,545.3348731853906),c(68.9206069022729,60.36289381438715,164.83781700441145,720.1769518894911,736.3380688565996,376.8952621844423),c(40.814685647799905,19.52917152818408,59.705036080338004,407.2724831375053,344.66888329457856,313.61408236088164),c(214.0938001645073,216.15242123240105,200.3147225014239,1077.7820590346175,1336.8974867183654,1174.424249078435),c(23.706733579859826,3.9946032671285616,7.354968212795262,183.76929117180117,78.33383711240423,42.80785693946752),c(27.617122623960416,51.04215285775384,40.23600257587996,476.8068095268354,292.4463252196424,494.15156597515767),c(61.099828814071714,49.26677362791892,68.7905850490851,551.3078735154035,323.7798600646041,763.0965802252906),c(55.96744319368969,47.935239205542736,38.50542181992813,586.0750367100686,564.0036272093104,229.85957965322777),c(79.67417677354952,76.78515169036012,76.14555326188035,586.0750367100686,569.225883016804,547.1960843566718),c(15.885955491658645,21.748395565477725,20.33432388243396,144.0353903778982,130.55639518734037,71.65663009432606),c(8.309576718713753,20.860705950560266,6.922323023807305,178.80255357256328,78.33383711240423,30.709984326139743),c(23.706733579859826,11.096120186468227,48.45626116665113,332.77141914893724,412.5582087919956,157.27234397326112),c(50.10185962753881,89.21280629920454,64.46413315920553,700.3100014925395,621.4484410917402,680.2726831032774),c(20.28514316627181,29.29375729227612,15.14258161457848,129.1351775801846,250.66827875969352,221.48412938246238),c(43.74747743087535,29.29375729227612,33.31367955207266,203.63624156875264,229.77925552971905,271.7368310070547),c(92.38294116687644,53.26137689504749,71.38645618301284,556.2746111146414,689.3377665891571,482.0536933618299),c(32.505108929086155,43.940635938414175,24.660775772313524,188.73602877103903,490.89204590439977,281.0428868634607),c(50.346258942795096,138.9234247345822,70.08852061604897,740.0439022864425,1039.2289056912293,907.3404459995833),c(24.439931525628687,0.0,8.652903779759132,213.56971676722839,135.77865099483398,112.60327586251239),c(63.29942265137829,103.41584013788388,98.21045790026614,625.8089375039715,684.1155107816635,308.9610544326786),c(22.729136318834676,14.20303383867933,21.63225944939783,268.2038303588449,308.11309264212326,158.2029495589017),c(10.753569871276621,116.73118436164575,56.67651975742231,451.9731215306461,344.66888329457856,330.3649829024124),c(33.72710550536759,83.44282380224107,125.89974999549536,730.1104270879667,731.115813049106,777.0556640098995),c(51.56825551907653,27.962222869899932,0.0,14.900212797713607,57.44481388242976,55.83633513843589),c(73.31979457688605,106.9665985975537,99.50839346723001,908.91298066053,757.2270920865741,524.8615503012974),c(44.48067537664421,98.53354725583785,75.7129080728924,258.2703551603692,214.1124881072382,314.54468794652223),c(43.014279485106485,80.33591015002996,15.14258161457848,377.47205754207806,553.5591155943232,203.80262325529102),c(58.900234976765134,30.181446907193575,72.6843917499767,307.9377311527479,266.3350461821743,327.5731661454906),c(29.57231714601071,54.59291131742368,44.995099654747484,769.8443278818697,548.3368597868296,222.41473496810298),c(38.37069249523704,35.951429404157054,41.101292953855875,476.8068095268354,344.66888329457856,331.295588488053),c(52.30145346484539,22.192240372936453,21.19961426040987,129.1351775801846,167.11218583979567,218.69231262554058),c(29.57231714601071,26.186843640065014,64.03148797021757,258.2703551603692,329.0021158720977,274.5286477639765),c(90.9165452753387,67.02056592626809,74.84761769491648,650.6426255001609,480.44753428941254,332.2261940736936),c(13.441962339095777,41.72141190112053,23.79548539433761,298.00425595427214,114.88962776485953,164.71718865838588),c(66.72101306496631,40.83372228620308,83.50052147467562,730.1104270879667,198.44572068475736,357.35254488598974),c(30.549914407035857,30.625291714652306,30.28516322915696,104.30148958399525,224.55699972222544,117.25630379071538),c(22.484737003578392,20.41686114310154,92.15342525443475,481.7735471260733,151.44541841731484,121.90933171891837),c(28.350320569729277,14.20303383867933,22.497549827373742,89.40127678628164,167.11218583979567,191.70475064196324),c(26.883924678191555,85.66204783953471,95.6145867663384,511.5739727215005,381.22467394703386,474.6088486767051),c(14.663958915377211,19.52917152818408,20.766969071421915,34.76716319466509,62.66706968992338,25.126350812296153),c(40.814685647799905,27.5183780624412,25.958711339277393,849.3121294696756,313.3353484496169,214.0392846973376),c(75.27498909893636,157.56490664784883,127.19768556245923,923.8131934582436,1096.6737195736591,645.8402764345752),c(72.34219731586091,50.59830805029511,54.945939001470485,307.9377311527479,261.11279037468074,227.99836848194659),c(80.89617334983095,110.96120186468227,88.25961855354313,640.7091503016851,329.0021158720977,509.04125534540725),c(22.484737003578392,59.919049006928425,41.101292953855875,466.8733343283597,381.22467394703386,214.0392846973376),c(65.74341580394116,15.534568261055517,75.28026288390444,208.6029791679905,214.1124881072382,286.62652037730425),c(36.41549797318674,90.98818552903946,40.66864776486792,412.23922073674316,725.8935572416125,404.8134297536602),c(30.549914407035857,50.59830805029511,47.59097078867522,198.66950396951478,365.557906524553,365.7279951567551),c(70.6314021090669,44.82832555333164,64.89677834819348,407.2724831375053,229.77925552971905,171.23142775787008),c(0.7331979457688605,15.978413068514246,0.43264518898795656,9.933475198475739,36.5557906524553,33.50180108306154),c(76.74138499047407,96.3143232185442,50.61948711159092,600.9752495077822,1274.230417028442,434.5928084941594),c(22.240337688322104,39.05834305636816,58.839745702362094,367.53858234360234,261.11279037468074,357.35254488598974),c(8.55397603397004,15.090723453596787,16.44051718154235,129.1351775801846,120.11188357235314,79.10147477945085),c(23.95113289511611,36.83911901907451,45.860390032723394,427.13943353445677,527.4478365568551,385.2707124552077),c(52.790252095357964,78.56053092019505,97.34516752229023,740.0439022864425,339.446627487085,368.5198119136769),c(104.60290692969077,16.866102683431706,16.44051718154235,119.20170238170886,156.66767422480845,180.53748361427606),c(63.05502333612201,19.52917152818408,42.8318737098077,332.77141914893724,428.2249762144764,284.76530920602306),c(52.545852780101676,119.83809801385685,123.73652405055557,620.8421999047337,464.7807668669317,408.53585209622264),c(26.150726732422694,89.65665110666328,66.19471391515735,456.939859129884,631.8929527067273,533.2370005720628),c(129.28723777057576,93.65125437379183,97.77781271127819,630.7756751032094,631.8929527067273,560.2245625556402),c(76.49698567521779,19.085326720725348,53.64800343450661,228.469929564942,517.0033249418678,308.03044884703803),c(45.70267195292564,62.13827304422207,89.557554120507,243.3701423626556,407.33595298450194,270.80622542141407),c(92.87173979738901,79.89206534257123,54.08064862349457,705.2767390917775,668.4487433591827,464.37218723465855),c(119.75566447558056,96.75816802600293,83.93316666366357,516.5407103207384,814.6719059690039,563.016379312562),c(15.885955491658645,18.64148191326662,77.87613401783219,317.87120635122363,459.5585110594381,278.25107010653886),c(51.56825551907653,79.4482205351125,85.23110223062744,526.4741855192142,830.3386733914847,597.4487859812641),c(49.61306099702623,32.84451575194595,10.816129724698914,203.63624156875264,235.00151133721266,257.7777472224457),c(24.1955322103724,44.38448074587291,25.958711339277393,288.0707807557964,156.66767422480845,233.58200199579016),c(43.99187674613164,10.652275379009497,51.052132300578876,173.83581597332542,355.1133949095658,130.28478198968375),c(21.262740427296958,12.871499416303143,36.34219587498835,173.83581597332542,219.33474391473183,371.3116286705987),c(38.37069249523704,80.77975495748869,62.30090721426575,352.63836954588874,522.2255807493615,248.47169136603972),c(2.4439931525628684,0.8876896149174581,12.114065291662783,248.33687996189346,172.33444164728928,61.419968652279486),c(11.731167132301769,55.0367561248824,5.191742267855479,144.0353903778982,360.3356507170594,507.18004417412607),c(10.753569871276621,7.101516919339665,5.191742267855479,139.06865277866035,235.00151133721266,101.43600883482522),c(20.28514316627181,17.309947490890433,44.995099654747484,208.6029791679905,146.2231626098212,222.41473496810298),c(37.88189386472446,4.88229288204602,9.085548968747087,268.2038303588449,120.11188357235314,87.47692505021624),c(14.908358230633498,4.43844807458729,5.191742267855479,49.667375992378695,41.77804645994892,59.55875748099829),c(65.25461717342858,81.22359976494742,51.48477748956683,551.3078735154035,464.7807668669317,310.8222656039598),c(18.81874727473409,60.36289381438715,31.58309879612083,387.4055327405538,412.5582087919956,283.83470362038247),c(20.28514316627181,26.186843640065014,3.0285163229156957,178.80255357256328,172.33444164728928,65.14239099484188),c(13.93076096960835,13.7591890312206,44.995099654747484,84.43453918704378,36.5557906524553,229.85957965322777),c(95.07133363469559,38.61449824890943,30.717808418144916,243.3701423626556,470.0030226744253,359.2137560572709),c(38.615091810493325,9.32074095663331,35.47690549701244,54.63411359161656,297.66858102713604,62.350574237920085),c(28.350320569729277,27.962222869899932,0.0,104.30148958399525,78.33383711240423,54.9057295527953),c(49.85746031228252,56.36829054725859,40.66864776486792,491.7070223245491,339.446627487085,421.564330295191),c(26.150726732422694,7.545361726798394,71.38645618301284,233.43666716417985,668.4487433591827,464.37218723465855),c(21.262740427296958,11.539964993926956,17.73845274850622,129.1351775801846,73.1115813049106,189.84353947068203),c(40.32588701728733,34.176050174322135,70.08852061604897,412.23922073674316,412.5582087919956,188.91293388504144),c(5.621184250894598,1.3315344223761871,11.248774913686871,228.469929564942,141.0009068023276,117.25630379071538),c(45.94707126818193,43.940635938414175,72.6843917499767,322.8379439504615,428.2249762144764,329.43437731677176),c(103.13651103815306,57.699824969634776,96.47987714431432,680.4430510955881,699.7822782041444,662.591176976106),c(69.40940553278547,125.60808051082033,89.99019930949497,630.7756751032094,412.5582087919956,447.6212866931278),c(24.92873015614126,20.41686114310154,25.526066150289438,198.66950396951478,135.77865099483398,151.6887104594175),c(30.549914407035857,47.04754959062528,105.99807130204935,1008.2477326452874,621.4484410917402,639.326037335091),c(12.953163708583203,8.87689614917458,50.61948711159092,317.87120635122363,235.00151133721266,155.4111328019799),c(26.883924678191555,72.79054842323157,36.34219587498835,64.5675887900923,302.8908368346297,137.72962667480854),c(29.083518515498135,45.27217036079036,28.121937284217175,218.53645436646624,177.5566974547829,98.64419207790341),c(22.973535634090965,61.694428236763336,50.61948711159092,432.1061711336946,255.89053456718713,390.8543459690513),c(42.03668222408134,59.03135939201096,76.14555326188035,625.8089375039715,772.893859509055,727.7335679709479),c(25.66192810191012,27.5183780624412,18.603743126482133,193.7027663702769,88.77834872739145,92.12995297841923),c(37.39309523421189,52.37368728013003,87.39432817556722,382.43879514131595,318.5576042571105,288.4877315485855),c(29.816716461266996,12.427654608844414,22.064904638385784,84.43453918704378,109.66737195736592,119.11751496199658),c(63.29942265137829,50.154463242836385,87.82697336455519,625.8089375039715,762.4493478940677,548.1266899423124),c(60.12223155304657,44.38448074587291,50.18684192260296,779.7778030803455,412.5582087919956,403.8828241680196),c(17.84115001370894,24.4114644102301,10.383484535710958,139.06865277866035,99.22286034237868,53.0445183815141),c(17.596750698452652,35.50758459669832,48.02361597766318,198.66950396951478,167.11218583979567,174.02324451479188),c(60.366630868302856,116.28733955418701,83.50052147467562,645.675887900923,600.5594178617657,316.4058991178034),c(30.305515091779572,55.48060093234113,32.8810343630847,208.6029791679905,423.0027204069828,310.8222656039598),c(21.75153905780953,66.13287631135063,88.25961855354313,298.00425595427214,537.8923481718423,589.0733357104987),c(21.99593837306582,15.534568261055517,124.16916923954354,283.10404315655853,276.7795577971616,350.83830578650554),c(34.704702766392735,8.433051341715853,16.873162370530306,109.26822718323312,276.7795577971616,227.99836848194659),c(18.81874727473409,31.069136522111034,3.0285163229156957,168.86907837408756,219.33474391473183,43.73846252510812),c(70.14260347855434,60.36289381438715,68.7905850490851,352.63836954588874,344.66888329457856,242.88805785219614),c(25.66192810191012,35.0637397892396,32.44838917409674,332.77141914893724,548.3368597868296,289.4183371342261),c(24.1955322103724,34.176050174322135,4.326451889879566,104.30148958399525,151.44541841731484,102.36661442046581),c(27.617122623960416,10.652275379009497,2.163225944939783,14.900212797713607,73.1115813049106,67.93420775176368),c(19.551945220502947,68.35210034864427,83.06787628568766,317.87120635122363,438.66948782946366,271.7368310070547),c(50.59065825805138,23.52377479531264,18.171097937494174,238.4034047634177,208.89023229974458,118.18690937635598),c(38.37069249523704,37.28296382653324,44.56245446575953,451.9731215306461,475.22527848191896,282.9040980347419),c(10.264771240764048,7.101516919339665,28.98722766219309,119.20170238170886,88.77834872739145,205.6638344265722),c(33.4827061901113,13.315344223761873,29.852518040169002,317.87120635122363,412.5582087919956,511.833072102329),c(9.042774664482614,16.422257875972974,3.4611615119036525,119.20170238170886,208.89023229974458,55.83633513843589),c(1.9551945220502949,6.657672111880936,6.0570326458313914,203.63624156875264,287.2240694121488,72.58723567996667),c(46.68026921395079,29.29375729227612,99.50839346723001,213.56971676722839,339.446627487085,313.61408236088164),c(14.175160284864639,11.096120186468227,50.61948711159092,432.1061711336946,391.6691855620211,304.3080265044756),c(3.177191098331729,3.9946032671285616,3.8938067008916093,94.36801438551952,120.11188357235314,93.06055856405983),c(37.1486959189556,70.57132438593793,70.08852061604897,620.8421999047337,381.22467394703386,174.95385010043248),c(31.283112352804718,21.748395565477725,23.362840205349656,322.8379439504615,177.5566974547829,151.6887104594175),c(34.704702766392735,10.652275379009497,19.036388315470088,268.2038303588449,318.5576042571105,178.67627244299487),c(28.350320569729277,79.89206534257123,6.922323023807305,173.83581597332542,485.6697900969062,201.9414120840098),c(32.26070961382987,94.98278879616802,33.31367955207266,263.2370927596071,323.7798600646041,199.14959532708804),c(15.152757545889786,21.304550758018994,42.39922852081974,94.36801438551952,167.11218583979567,220.5535237968218),c(13.197563023839491,4.88229288204602,5.6243874568434356,213.56971676722839,125.33413937984676,80.03208036509145),c(58.16703703099627,72.34670361577284,30.28516322915696,412.23922073674316,151.44541841731484,127.49296523276196),c(12.953163708583203,8.433051341715853,25.958711339277393,248.33687996189346,125.33413937984676,160.9947663158235),c(48.635463736001086,90.54434072158072,54.08064862349457,322.8379439504615,757.2270920865741,498.8045939033607),c(25.173129471397548,27.962222869899932,18.603743126482133,203.63624156875264,151.44541841731484,159.1335551445423),c(59.3890336072777,115.39964993926955,81.33729552973584,725.1436894887289,475.22527848191896,416.911302366988),c(14.908358230633498,31.956826137028493,66.19471391515735,630.7756751032094,631.8929527067273,174.95385010043248),c(20.28514316627181,7.101516919339665,0.0,89.40127678628164,156.66767422480845,78.17086919381025),c(42.03668222408134,80.77975495748869,58.40710051337414,302.97099355351,412.5582087919956,567.6694072407649),c(11.975566447558055,0.8876896149174581,5.191742267855479,59.60085119085443,156.66767422480845,57.69754630971709),c(48.3910644207448,24.855309217688827,33.31367955207266,243.3701423626556,125.33413937984676,90.26874180713803),c(19.307545905246663,63.469807466598255,16.44051718154235,417.205958335981,475.22527848191896,342.46285551574016),c(61.58862744458429,118.50656359148066,92.5860704434227,1077.7820590346175,746.7825804715869,710.0520618437764),c(16.863552752683795,31.956826137028493,35.909550686000394,312.90446875198575,109.66737195736592,291.27954830550726),c(17.352351383196368,21.304550758018994,40.66864776486792,213.56971676722839,339.446627487085,135.86841550352736),c(3.665989728844303,10.20843057155077,1.7305807559518263,34.76716319466509,146.2231626098212,140.52144343173035),c(16.619153437427507,68.795945156103,67.49264948212122,456.939859129884,266.3350461821743,225.20655172502478),c(8.55397603397004,5.769982496963478,1.7305807559518263,104.30148958399525,5.222255807493615,42.80785693946752),c(39.83708838677476,51.04215285775384,26.39135652826535,163.90234077484968,224.55699972222544,103.2972200061064),c(13.93076096960835,42.60910151603799,22.064904638385784,208.6029791679905,198.44572068475736,246.61048019475854),c(12.953163708583203,0.0,6.922323023807305,153.96886557637396,130.55639518734037,185.19051154247904),c(41.79228290882505,44.82832555333164,45.42774484373544,208.6029791679905,141.0009068023276,244.74926902347735),c(19.063146589990374,116.28733955418701,57.54181013539822,963.5470942521466,595.3371620542721,630.9505870643256),c(16.863552752683795,39.50218786382688,65.7620687261694,342.70489434741296,250.66827875969352,230.79018523886836),c(7.331979457688606,20.860705950560266,16.44051718154235,89.40127678628164,177.5566974547829,92.12995297841923),c(21.507139742553242,1.7753792298349163,21.19961426040987,54.63411359161656,130.55639518734037,131.21538757532434),c(7.087580142432319,15.090723453596787,6.922323023807305,89.40127678628164,10.44451161498723,30.709984326139743),c(26.639525362935267,8.433051341715853,19.036388315470088,238.4034047634177,125.33413937984676,459.71915930645554),c(9.775972610251474,39.50218786382688,58.40710051337414,526.4741855192142,224.55699972222544,147.9662881168551),c(10.509170556020335,40.83372228620308,65.32942353718144,144.0353903778982,558.7813714018167,290.34894271986667),c(11.731167132301769,1.3315344223761871,4.326451889879566,94.36801438551952,52.222558074936146,91.19934739277863),c(12.953163708583203,60.36289381438715,34.17896993004857,293.0375183550343,214.1124881072382,311.7528711896004),c(10.020371925507762,23.967619602771368,12.54671048065074,158.93560317561182,255.89053456718713,152.61931604505813),c(9.042774664482614,20.41686114310154,38.93806700891609,158.93560317561182,339.446627487085,192.63535622760384),c(0.4887986305125737,15.978413068514246,3.8938067008916093,79.46780158780591,125.33413937984676,134.00720433224615),c(22.973535634090965,47.04754959062528,25.526066150289438,139.06865277866035,94.00060453488507,93.99116414970042),c(2.6883924678191553,2.6630688447523743,0.0,29.800425595427214,0.0,7.444844685124786),c(36.41549797318674,38.61449824890943,37.64013144195222,273.1705679580828,517.0033249418678,360.1443616429115),c(1.466395891537721,0.0,0.0,0.0,5.222255807493615,1.8612111712811965),c(5.621184250894598,3.9946032671285616,2.163225944939783,44.70063839314082,73.1115813049106,118.18690937635598),c(9.2871739797389,5.3261376895047485,0.0,19.866950396951477,26.111279037468073,21.40392846973376),c(4.154788359356877,8.87689614917458,15.575226803566437,84.43453918704378,36.5557906524553,86.54631946457563),c(1.9551945220502949,11.096120186468227,27.68929209522922,94.36801438551952,146.2231626098212,170.3008221722295),c(0.4887986305125737,46.159859975707825,43.26451889879566,124.16843998094673,376.0024181395403,163.7865830727453),c(4.887986305125737,41.2775670936618,16.007871992554392,233.43666716417985,423.0027204069828,169.3702165865889),c(3.177191098331729,1.3315344223761871,0.0,74.50106398856803,114.88962776485953,153.54992163069872),c(1.2219965762814342,8.87689614917458,3.8938067008916093,69.53432638933018,57.44481388242976,67.93420775176368),c(0.4887986305125737,3.1069136522111034,5.191742267855479,0.0,5.222255807493615,0.9306055856405983),c(0.24439931525628686,0.0,1.2979355669638697,39.733900793902954,52.222558074936146,32.57119549742094),c(0.24439931525628686,9.76458576409204,7.354968212795262,139.06865277866035,297.66858102713604,186.12111712811966),c(0.0,18.197637105807893,3.4611615119036525,0.0,99.22286034237868,58.62815189535769),c(0.0,4.43844807458729,3.8938067008916093,89.40127678628164,208.89023229974458,111.67267027687178),c(0.0,3.5507584596698325,3.0285163229156957,29.800425595427214,78.33383711240423,40.94664576818632),c(0.0,3.1069136522111034,4.326451889879566,44.70063839314082,5.222255807493615,76.30965802252906),c(0.0,0.0,0.0,9.933475198475739,31.33353484496169,20.47332288409316),c(0.0,0.0,0.0,0.0,0.0,0.0),c(0.0,0.0,0.0,0.0,0.0,0.0),c(0.0,0.0,0.0,0.0,0.0,0.0))
targetgene="Nontargeting"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(118.28926858404284,70.1274795784792,116.38155583776032,1599.289506954594,1895.6788581201822,1138.1306312384518),c(155.19356518774217,217.92780046223598,213.29407817106258,1653.9236205462105,2015.7907416925352,1695.56337703717),c(55.7230438784334,183.75175028791384,94.74929638836248,710.2434766910153,710.2267898191316,835.6838159052572),c(33.72710550536759,34.176050174322135,102.5369097901457,337.73815674817513,449.1139994444509,246.61048019475854),c(57.6782384004837,43.940635938414175,131.5241374523388,650.6426255001609,840.783185006472,521.139127958735),c(1.2219965762814342,2.219224037293645,18.171097937494174,188.73602877103903,208.89023229974458,115.39509261943418))
targetgene="YALI1_C00098g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(63.29942265137829,93.2074095663331,97.34516752229023,486.7402847253112,621.4484410917402,775.1944528386183),c(93.36053842790157,90.54434072158072,99.07574827824206,625.8089375039715,934.783789541357,504.38822741720423),c(53.76784935638311,57.699824969634776,142.3402671770377,963.5470942521466,673.6709991666763,987.3725263646747),c(65.98781511919745,42.60910151603799,24.228130583325566,600.9752495077822,360.3356507170594,475.5394542623457),c(50.59065825805138,40.83372228620308,139.311750854122,536.4076607176899,710.2267898191316,395.50737389725424),c(7.576378772944893,17.75379229834916,21.63225944939783,258.2703551603692,485.6697900969062,370.3810230849581))
targetgene="YALI1_E00019g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(21.507139742553242,45.27217036079036,65.32942353718144,635.7424127024473,355.1133949095658,509.04125534540725),c(153.97156861146073,66.57672111880936,150.56052576780888,933.7466686567194,856.4499524289528,764.9577913965718),c(73.07539526162977,207.71936989068521,154.4543324687005,1038.0481582407147,1462.231626098212,1117.6573083543585),c(53.034651410614245,38.170653441450696,41.53393814284383,461.9065967291218,731.115813049106,248.47169136603972),c(8.309576718713753,25.742998832606286,17.73845274850622,248.33687996189346,214.1124881072382,124.70114847584017),c(78.45218019726808,168.66102683431706,86.52903779759131,1107.5824846300447,872.1167198514337,965.0379923093004))
targetgene="YALI1_D00040g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(214.33819947976357,245.00233371721845,125.46710480650741,1023.147945443001,976.561836001306,721.2193288714636),c(74.5417911531675,120.7257876287743,77.44348882884422,725.1436894887289,830.3386733914847,787.2923254519461),c(294.74557419908194,207.27552508322648,268.672662361521,1738.3581597332543,1770.3447187403353,1743.954867490481),c(32.01631029857358,9.76458576409204,42.8318737098077,506.6072351222627,376.0024181395403,324.7813493885688),c(20.28514316627181,6.213827304422207,16.007871992554392,193.7027663702769,52.222558074936146,209.3862567691346),c(24.68433084088497,122.94501166606796,35.47690549701244,332.77141914893724,349.8911391020722,349.90770020086495))
targetgene="YALI1_A00032g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(27.128323993447843,80.33591015002996,48.02361597766318,571.174823912355,569.225883016804,401.09100741109785),c(50.10185962753881,122.50116685860922,86.09639260860335,541.3743983169278,1007.8953708462676,547.1960843566718),c(14.908358230633498,65.6890315038919,32.44838917409674,178.80255357256328,282.0018136046552,415.98069678134743),c(10.264771240764048,10.652275379009497,31.58309879612083,129.1351775801846,182.7789532622765,101.43600883482522),c(90.9165452753387,61.694428236763336,113.35303951484462,769.8443278818697,501.33655751938704,802.1820148221957),c(47.41346715971965,68.795945156103,67.49264948212122,546.3411359161656,506.5588133268806,651.4239099484188))
targetgene="YALI1_B30333g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(74.2973918379112,27.5183780624412,48.888906355639094,402.3057455382674,355.1133949095658,303.377420918835),c(63.78822128189087,12.871499416303143,26.39135652826535,302.97099355351,214.1124881072382,442.0376531792842),c(70.14260347855434,75.45361726798394,57.10916494641027,1082.7487966338556,898.2279988889018,790.0841422088679),c(24.439931525628687,49.26677362791892,30.717808418144916,451.9731215306461,396.8914413695147,577.9060686828116),c(49.85746031228252,40.389877478744346,76.14555326188035,571.174823912355,339.446627487085,370.3810230849581),c(9.531573294995187,35.0637397892396,40.23600257587996,620.8421999047337,266.3350461821743,267.0838030788517))
targetgene="YALI1_C33545g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(15.152757545889786,7.545361726798394,45.42774484373544,248.33687996189346,229.77925552971905,126.56235964712137),c(29.816716461266996,7.101516919339665,54.945939001470485,183.76929117180117,355.1133949095658,177.74566685735428),c(122.688456258656,155.34568261055517,107.72865205800119,829.4451790727242,1060.1179289212039,698.8847948160893),c(92.13854185162015,231.68698949345657,157.05020360262824,1410.553478183555,1378.6755331783143,1245.1502735871204),c(98.4929240482836,132.26575262270126,221.0816915728458,1599.289506954594,1838.2340442377524,1248.8726959296828),c(9.531573294995187,5.3261376895047485,3.0285163229156957,193.7027663702769,94.00060453488507,129.35417640404316))
targetgene="YALI1_D00062g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(99.95931993982133,133.59728704507745,112.05510394788075,1286.3850382026083,699.7822782041444,741.6926517555568),c(127.33204324852545,147.80032088375677,159.64607473655596,1152.2831230231857,1237.6746263759867,1325.182353952212),c(174.7455104082451,164.22257875972974,98.21045790026614,943.6801438551952,976.561836001306,1216.301500432262),c(120.24446310609314,251.21616102164066,109.45923281395301,576.1415615115928,1081.0069521511782,840.3368438334602),c(84.31776376341897,35.0637397892396,56.24387456843435,382.43879514131595,778.1161153165486,621.6445312079196),c(40.32588701728733,19.973016335642807,39.803357386892,322.8379439504615,214.1124881072382,267.0838030788517))
targetgene="YALI1_A22415g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(171.32391999465707,85.66204783953471,141.90762198804975,1410.553478183555,1608.4547887080334,928.7443744693171),c(90.42774664482614,75.89746207544268,77.44348882884422,451.9731215306461,558.7813714018167,435.5234140798),c(26.150726732422694,175.7625437536567,82.6352310966997,342.70489434741296,485.6697900969062,567.6694072407649),c(174.01231246247625,48.37908401300147,65.7620687261694,551.3078735154035,370.78016233204664,358.28315047163034),c(18.81874727473409,57.25598016217605,54.51329381248253,402.3057455382674,511.78106913437426,305.23863209011625),c(15.885955491658645,13.315344223761873,38.072776630940176,288.0707807557964,156.66767422480845,221.48412938246238))
targetgene="YALI1_A22496g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("Day6_test_summary.Rnw");
library(tools);
texi2dvi("Day6_test_summary.tex",pdf=TRUE);
| /Day6_FS/Day6_test.R | no_license | oronoc1210/MAGeCK_Pipeline | R | false | false | 96,904 | r | pdf(file='Day6_test.pdf',width=4.5,height=4.5);
gstable=read.table('Day6_test.gene_summary.txt',header=T)
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=3
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("YALI1_F04590t","YALI1_F27904g","YALI1_F07344g","YALI1_B07652g","YALI1_D22768g","YALI1_F04110g","YALI1_F00821g","YALI1_E34028g","YALI1_C01301t","YALI1_E18539g")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='PO1f_Cas9_1,PO1f_Cas9_2,PO1f_Cas9_3_vs_PO1f_1,PO1f_2,PO1f_3 neg.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(48.879863051257374,77.22899649781886,189.065947587737,9.933475198475739,0.0,1.8612111712811965),c(69.16500621752918,165.99795798956467,94.31665119937453,9.933475198475739,5.222255807493615,28.848773154858545),c(122.44405694339972,64.35749708151572,69.22323023807306,14.900212797713607,0.0,0.0),c(77.47458293624294,49.26677362791892,93.88400601038657,4.966737599237869,0.0,0.0),c(169.1243261573505,118.95040839893939,96.91252233330226,9.933475198475739,0.0,2.791816756921795),c(39.59268907151847,42.165256708579264,97.34516752229023,19.866950396951477,0.0,0.0))
targetgene="YALI1_F04590t"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(133.68642544518892,86.9935822619109,137.5811700981702,69.53432638933018,20.88902322997446,24.195745226655553),c(144.19559600120925,95.87047841108547,138.01381528715814,4.966737599237869,0.0,7.444844685124786),c(254.41968718179461,382.5942240294244,190.79652834368883,49.667375992378695,0.0,2.791816756921795),c(59.63343292253399,60.80673862184588,57.97445532438618,19.866950396951477,0.0,2.791816756921795),c(164.48073916748106,71.45901400085538,122.87123367257966,4.966737599237869,0.0,0.0),c(49.12426236651366,70.1274795784792,50.61948711159092,0.0,0.0,0.0))
targetgene="YALI1_F27904g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(40.32588701728733,23.079929987853912,54.945939001470485,9.933475198475739,0.0,0.0),c(29.816716461266996,48.37908401300147,30.28516322915696,0.0,0.0,0.0),c(73.80859320739863,36.83911901907451,72.6843917499767,0.0,0.0,0.0),c(54.0122486716394,50.154463242836385,45.42774484373544,39.733900793902954,0.0,0.0),c(32.74950824434244,87.88127187682835,40.66864776486792,19.866950396951477,0.0,0.0),c(63.54382196663458,113.18042590197591,128.49562112942309,34.76716319466509,0.0,0.0))
targetgene="YALI1_F07344g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(85.53976033970041,124.72039089590287,125.03445961751945,14.900212797713607,26.111279037468073,13.028478198968376),c(76.00818704470521,88.76896149174581,67.92529467110919,0.0,0.0,0.0),c(38.859491125749614,90.54434072158072,60.13768126932596,19.866950396951477,0.0,0.0),c(123.42165420442487,119.39425320639812,134.12000858626652,19.866950396951477,0.0,4.653027928202992),c(39.83708838677476,82.5551341873236,73.11703693896466,0.0,0.0,0.0),c(4.154788359356877,46.60370478316655,29.419872851181047,0.0,0.0,5.58363351384359))
targetgene="YALI1_B07652g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(236.0897385375731,107.41044340501243,166.56839776036327,39.733900793902954,10.44451161498723,37.22422342562393),c(284.7252022735742,423.42794631562754,374.6707336635704,34.76716319466509,20.88902322997446,17.681506127171367),c(270.061243358197,222.36624853682326,64.89677834819348,0.0,15.666767422480845,7.444844685124786),c(230.22415497142222,215.2647316174836,178.24981786303812,9.933475198475739,5.222255807493615,22.33453405537436),c(31.03871303754843,26.630688447523745,82.20258590771175,4.966737599237869,0.0,6.514239099484188),c(48.3910644207448,23.967619602771368,26.824001717253307,0.0,0.0,1.8612111712811965))
targetgene="YALI1_D22768g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(99.95931993982133,58.58751458455224,97.77781271127819,19.866950396951477,0.0,7.444844685124786),c(130.75363366211346,131.82190781524253,142.77291236602568,29.800425595427214,0.0,6.514239099484188),c(110.46849049584166,44.82832555333164,81.76994071872379,9.933475198475739,0.0,0.0),c(116.82287269250511,124.72039089590287,93.45136082139862,89.40127678628164,0.0,0.9306055856405983),c(125.37684872647516,86.10589264699344,109.45923281395301,14.900212797713607,0.0,0.0),c(19.307545905246663,17.75379229834916,28.121937284217175,34.76716319466509,20.88902322997446,1.8612111712811965))
targetgene="YALI1_F04110g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(99.22612199405246,113.62427070943464,107.29600686901323,24.833687996189347,10.44451161498723,7.444844685124786),c(53.52345004112682,21.748395565477725,37.207486252964266,4.966737599237869,0.0,0.0),c(94.09373637367044,134.4849766599949,137.14852490918224,14.900212797713607,0.0,0.0),c(195.5194522050295,118.50656359148066,195.98827061154432,24.833687996189347,20.88902322997446,40.01604018254572),c(25.66192810191012,9.32074095663331,27.68929209522922,0.0,0.0,0.0),c(45.70267195292564,67.90825554118554,48.45626116665113,4.966737599237869,0.0,0.0))
targetgene="YALI1_F00821g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(196.25265015079836,112.73658109451718,169.59691408327896,9.933475198475739,0.0,0.0),c(33.97150482062387,94.09509918125056,73.11703693896466,34.76716319466509,5.222255807493615,0.0),c(30.549914407035857,13.7591890312206,24.660775772313524,0.0,0.0,0.0),c(114.62327885519854,34.176050174322135,107.29600686901323,9.933475198475739,0.0,0.0),c(40.08148770203105,23.079929987853912,30.28516322915696,4.966737599237869,0.0,0.9306055856405983),c(40.32588701728733,62.5821178516808,16.873162370530306,9.933475198475739,0.0,0.0))
targetgene="YALI1_E34028g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(121.95525831288714,75.89746207544268,67.06000429313326,0.0,0.0,0.0),c(64.52141922765973,37.28296382653324,72.25174656098875,14.900212797713607,0.0,0.0),c(31.283112352804718,46.159859975707825,36.34219587498835,9.933475198475739,0.0,0.0),c(51.07945688856395,19.085326720725348,32.44838917409674,0.0,0.0,0.0),c(17.596750698452652,35.951429404157054,25.526066150289438,0.0,0.0,0.9306055856405983),c(10.753569871276621,53.705221702506215,24.660775772313524,0.0,0.0,0.0))
targetgene="YALI1_C01301t"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(282.7700077515239,245.8900233321359,247.0404029121232,84.43453918704378,26.111279037468073,8.375450270765384),c(140.77400558762122,87.88127187682835,128.92826631841106,19.866950396951477,0.0,9.306055856405983),c(62.810624020865724,62.13827304422207,28.98722766219309,0.0,0.0,2.791816756921795),c(77.23018362098665,46.159859975707825,89.12490893151906,9.933475198475739,0.0,2.791816756921795),c(48.879863051257374,23.967619602771368,85.23110223062744,0.0,0.0,2.791816756921795),c(13.441962339095777,9.32074095663331,13.412000858626653,4.966737599237869,0.0,0.0))
targetgene="YALI1_E18539g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
#
#
# parameters
# Do not modify the variables beginning with "__"
# gstablename='__GENE_SUMMARY_FILE__'
startindex=9
# outputfile='__OUTPUT_FILE__'
targetgenelist=c("Nontargeting","YALI1_C00098g","YALI1_E00019g","YALI1_D00040g","YALI1_A00032g","YALI1_B30333g","YALI1_C33545g","YALI1_D00062g","YALI1_A22415g","YALI1_A22496g")
# samplelabel=sub('.\\w+.\\w+$','',colnames(gstable)[startindex]);
samplelabel='PO1f_Cas9_1,PO1f_Cas9_2,PO1f_Cas9_3_vs_PO1f_1,PO1f_2,PO1f_3 pos.'
# You need to write some codes in front of this code:
# gstable=read.table(gstablename,header=T)
# pdf(file=outputfile,width=6,height=6)
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
######
# function definition
plotrankedvalues<-function(val, tglist, ...){
plot(val,log='y',ylim=c(max(val),min(val)),type='l',lwd=2, ...)
if(length(tglist)>0){
for(i in 1:length(tglist)){
targetgene=tglist[i];
tx=which(names(val)==targetgene);ty=val[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
# text(tx+50,ty,targetgene,col=colors[i])
}
legend('topright',tglist,pch=20,pt.cex = 2,cex=1,col=colors)
}
}
plotrandvalues<-function(val,targetgenelist, ...){
# choose the one with the best distance distribution
mindiffvalue=0;
randval=val;
for(i in 1:20){
randval0=sample(val)
vindex=sort(which(names(randval0) %in% targetgenelist))
if(max(vindex)>0.9*length(val)){
# print('pass...')
next;
}
mindiffind=min(diff(vindex));
if (mindiffind > mindiffvalue){
mindiffvalue=mindiffind;
randval=randval0;
# print(paste('Diff: ',mindiffvalue))
}
}
plot(randval,log='y',ylim=c(max(randval),min(randval)),pch=20,col='grey', ...)
if(length(targetgenelist)>0){
for(i in 1:length(targetgenelist)){
targetgene=targetgenelist[i];
tx=which(names(randval)==targetgene);ty=randval[targetgene];
points(tx,ty,col=colors[(i %% length(colors)) ],cex=2,pch=20)
text(tx+50,ty,targetgene,col=colors[i])
}
}
}
# set.seed(1235)
pvec=gstable[,startindex]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='RRA score',main=paste('Distribution of RRA scores in \\n',samplelabel))
pvec=gstable[,startindex+1]
names(pvec)=gstable[,'id']
pvec=sort(pvec);
plotrankedvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# plotrandvalues(pvec,targetgenelist,xlab='Genes',ylab='p value',main=paste('Distribution of p values in \\n',samplelabel))
# you need to write after this code:
# dev.off()
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(177.67830219132054,163.3348891448123,157.91549398060414,2051.26262848524,1180.229812493557,890.5895454580525),c(153.48276998094815,105.19121936771879,110.75716838091688,735.0771646872047,376.0024181395403,773.3332416673371),c(303.7883488635646,280.9537631213755,285.54582473205136,3188.645538710712,1947.9014161951184,2312.5548803168867),c(62.810624020865724,42.60910151603799,18.603743126482133,337.73815674817513,412.5582087919956,576.975463097171),c(101.67011514661533,173.09947490890434,102.96955497913366,814.5449662750106,788.5606269315358,589.0733357104987),c(200.65183782541152,185.52712951774876,157.05020360262824,973.4805694506224,940.0060453488506,945.4952750108479),c(290.5907858397251,271.1891773572835,323.18595617400354,2433.701423626556,2172.4584159173437,2319.069119416371),c(74.78619046842378,5.769982496963478,42.8318737098077,342.70489434741296,663.226487551689,322.9201382172876),c(197.47464672707977,44.38448074587291,31.58309879612083,536.4076607176899,762.4493478940677,506.2494385884855),c(99.22612199405246,118.95040839893939,63.598842781229614,615.8754623054958,725.8935572416125,834.7532103196166),c(21.01834111204067,52.81753208758876,11.681420102674828,198.66950396951478,334.22437167959134,338.7404331731778),c(122.44405694339972,82.99897899478233,80.03935996277197,1067.8485838361419,955.6728127713315,852.434716446788),c(452.6275318546433,303.5898483017707,328.810343630847,24.833687996189347,0.0,0.0),c(93.84933705841415,93.2074095663331,141.4749767990618,625.8089375039715,537.8923481718423,585.3509133679363),c(427.698801698502,360.40198365648797,408.417058404631,2279.732558050182,1895.6788581201822,2307.901852388684),c(113.15688296366082,122.05732205115049,98.6431030892541,993.3475198475738,762.4493478940677,688.6481333740427),c(131.24243229262603,102.52815052296641,118.97742697168806,1246.6511374087052,814.6719059690039,866.393800231397),c(82.36256924136867,70.1274795784792,89.99019930949497,809.5782286757727,616.2261852842465,565.8081960694838),c(53.76784935638311,68.35210034864427,86.96168298657926,943.6801438551952,464.7807668669317,657.9381490479029),c(222.8921755137336,216.15242123240105,175.2213015401224,1157.2498606224235,1697.2331374354249,1272.137835570698),c(436.00837841721574,222.36624853682326,215.45730411600238,1698.6242589393512,1608.4547887080334,2364.6687931127603),c(123.42165420442487,156.67721703293137,71.38645618301284,1529.7551805652638,924.3392779263698,870.1162225739594),c(50.59065825805138,100.30892648567277,109.45923281395301,516.5407103207384,381.22467394703386,436.45401966544057),c(132.95322749942005,131.3780630077838,78.7414243958081,1172.1500734201372,600.5594178617657,574.1836463402491),c(151.03877682838527,94.98278879616802,76.14555326188035,591.0417743093064,600.5594178617657,638.3954317494504),c(64.27701991240345,35.50758459669832,86.09639260860335,650.6426255001609,699.7822782041444,350.83830578650554),c(49.61306099702623,44.82832555333164,40.23600257587996,630.7756751032094,349.8911391020722,301.51620974755383),c(69.16500621752918,137.591890312206,68.35793986009713,526.4741855192142,762.4493478940677,641.1872485063722),c(24.1955322103724,39.05834305636816,10.816129724698914,322.8379439504615,344.66888329457856,257.7777472224457),c(166.68033300478763,152.68261376580278,35.47690549701244,481.7735471260733,391.6691855620211,905.4792348283021),c(109.49089323481651,64.35749708151572,79.606714773784,531.440923118452,793.7828827390294,407.60524651058205),c(16.619153437427507,49.26677362791892,44.995099654747484,243.3701423626556,365.557906524553,430.870386151597),c(65.98781511919745,130.49037339286633,177.3845274850622,342.70489434741296,564.0036272093104,790.0841422088679),c(162.76994396068704,186.4148191326662,141.4749767990618,1390.6865277866034,710.2267898191316,655.1463322909811),c(86.27295828546926,84.33051341715853,64.89677834819348,759.910852683394,720.6713014341188,593.7263636387017),c(256.3748817038449,230.35545507108037,212.86143298207463,1405.586740584317,1242.8968821834803,1283.305102598385),c(229.49095702565336,335.5466744387992,214.1593685490385,1485.0545421721229,1117.5627428036335,938.050430325723),c(69.16500621752918,80.77975495748869,64.89677834819348,1216.850711813278,699.7822782041444,529.5145782295004),c(198.45224398810493,234.35005833820895,146.23407387792932,1961.8613516989583,1383.897788985808,1619.2537190146409),c(104.35850761443449,110.51735705722353,110.32452319192892,596.0085119085443,835.5609291989783,543.4736620141094),c(137.3524151740332,165.55411318210594,118.97742697168806,948.646881454433,1373.4532773708206,920.3689241985517),c(788.9209896472939,648.4572636972032,875.2412173226361,7157.06888050177,6350.263061912236,6796.212591933289),c(118.7780672145554,67.90825554118554,93.88400601038657,1033.0814206414768,470.0030226744253,512.7636776879697),c(180.3666946591397,185.52712951774876,149.69523538983296,1062.881846236904,1284.6749286434292,885.005911944209),c(325.7842872366304,302.70215868685324,255.26066150289438,2523.1027004128377,2329.126090142152,2239.0370390512794),c(217.02659194758274,122.94501166606796,97.34516752229023,486.7402847253112,689.3377665891571,546.2654787710312),c(440.8963647223415,231.24314468599783,151.85846133477276,1430.4204285805063,1603.2325329005398,2020.3447264257388),c(211.16100838143186,183.75175028791384,160.51136511453188,695.3432638933017,1274.230417028442,912.9240795134269),c(54.989845932664544,21.304550758018994,32.8810343630847,307.9377311527479,428.2249762144764,386.20131804084826),c(44.969474007156784,117.61887397656321,122.4385884835917,804.6114910765348,464.7807668669317,534.1676061577034),c(106.31370213648478,70.57132438593793,67.49264948212122,586.0750367100686,584.8926504392848,629.0893758930445),c(57.92263771573999,23.52377479531264,68.35793986009713,730.1104270879667,548.3368597868296,485.7761157043923),c(81.8737706108561,46.60370478316655,21.63225944939783,163.90234077484968,287.2240694121488,331.295588488053),c(105.33610487545964,60.36289381438715,42.39922852081974,516.5407103207384,522.2255807493615,398.2991906541761),c(230.7129536019348,28.40606767735866,61.86826202527779,283.10404315655853,297.66858102713604,213.108679111697),c(299.87795981946397,229.0239206487042,215.45730411600238,1822.792698920298,1624.1215561305141,1389.3941393614132),c(159.59275286235533,162.89104433735358,117.67949140472419,859.2456046681514,788.5606269315358,620.713925622279),c(150.79437751312898,105.63506417517752,245.74246734515933,1485.0545421721229,2099.346834612433,1160.465165293826),c(55.47864456317712,16.866102683431706,65.7620687261694,615.8754623054958,402.11369717700836,323.8507438029282),c(15.885955491658645,43.05294632349672,37.207486252964266,198.66950396951478,339.446627487085,197.28838415580682),c(71.85339868534834,89.65665110666328,64.89677834819348,600.9752495077822,637.115208514221,391.7849515546919),c(141.99600216390266,107.85428821247116,148.82994501185706,655.6093630993987,689.3377665891571,723.0805400427448),c(92.38294116687644,3.9946032671285616,41.966583331831785,134.10191517942246,219.33474391473183,276.3898589352577),c(11.486767817045482,8.433051341715853,23.362840205349656,39.733900793902954,198.44572068475736,191.70475064196324),c(132.46442886890748,175.31869894619797,317.5615687171601,1311.2187261987974,877.3389756589273,951.0789085246914),c(113.64568159417338,109.62966744230607,193.82504466660453,1142.34964782471,673.6709991666763,751.9293131976034),c(38.37069249523704,35.951429404157054,109.02658762496506,461.9065967291218,407.33595298450194,481.1230877761893),c(36.41549797318674,67.02056592626809,77.44348882884422,789.7112782788212,386.4469297545275,687.7175277884021),c(152.99397135043557,112.29273628705845,157.05020360262824,1196.9837614163264,966.1173243863187,727.7335679709479),c(31.03871303754843,51.48599766521257,40.23600257587996,233.43666716417985,276.7795577971616,389.9237403834107),c(160.3259508081242,167.32949241194086,156.61755841364027,1465.1875917751715,981.7840918087995,1045.070072674392),c(92.62734048213272,114.95580513181083,145.80142868894137,1033.0814206414768,1007.8953708462676,671.8972328325119),c(284.7252022735742,177.0940781760329,89.99019930949497,700.3100014925395,835.5609291989783,674.6890495894337),c(99.47052130930875,113.18042590197591,161.3766554925078,615.8754623054958,799.0051385465231,482.9842989474705),c(33.4827061901113,57.25598016217605,39.37071219790405,253.30361756113135,188.00120906977014,161.9253719014641),c(71.60899937009205,38.61449824890943,86.96168298657926,665.5428382978745,459.5585110594381,309.8916600183192),c(160.3259508081242,126.05192531827906,54.08064862349457,561.2413487138792,678.89325497417,403.8828241680196),c(74.5417911531675,41.2775670936618,65.7620687261694,357.60510714512657,203.66797649225097,239.16563550963374),c(123.91045283493743,115.84349474672828,97.34516752229023,695.3432638933017,819.8941617764975,481.1230877761893),c(250.26489882243774,187.7463535550424,216.75523968296625,1251.617875007943,1613.6770445155269,1693.702165865889),c(152.2607734046667,82.5551341873236,291.60285737788274,998.3142574468118,1488.3429051356802,1029.2497777185017),c(133.68642544518892,119.83809801385685,62.30090721426575,754.9441150841561,940.0060453488506,981.7888928508312),c(75.03058978368007,125.1642357033616,116.81420102674828,928.7799310574816,840.783185006472,1206.0648389902153),c(76.49698567521779,22.192240372936453,84.36581185265153,188.73602877103903,203.66797649225097,189.84353947068203),c(201.62943508643667,103.85968494534261,64.46413315920553,357.60510714512657,511.78106913437426,372.2422342562393),c(59.14463429202142,182.42021586553764,111.18981356990484,730.1104270879667,981.7840918087995,867.3244058170376),c(139.06321038082723,125.1642357033616,158.78078435858006,779.7778030803455,1316.0084634883908,1121.379730696921),c(320.89630093150464,359.51429404157057,487.59112798942704,3114.144474722144,3175.131530956118,2321.8609361732924),c(153.48276998094815,143.80571761662821,195.55562542255637,1524.788442966026,1859.1230674677267,1162.3263764651072),c(75.76378772944892,19.973016335642807,17.73845274850622,104.30148958399525,88.77834872739145,218.69231262554058),c(55.96744319368969,33.73220536686341,37.207486252964266,451.9731215306461,417.78046459948916,536.0288173289846),c(311.60912695176575,146.9126312688393,260.45240377074987,2652.2378779930223,2214.236462377293,2192.5067597692496),c(47.65786647497594,8.87689614917458,37.64013144195222,228.469929564942,88.77834872739145,136.79902108916795),c(67.94300964124774,57.699824969634776,61.00297164730188,625.8089375039715,376.0024181395403,469.9558207485021),c(136.13041859775177,138.03573511966474,125.46710480650741,809.5782286757727,652.7819759367019,823.5859432919294),c(110.71288981109795,142.47418319425202,185.60478607583337,908.91298066053,1185.4520683010505,1515.9564990085346),c(117.0672720077614,131.3780630077838,149.262590200845,819.5117038742484,605.7816736692594,598.3793915669047),c(57.92263771573999,82.11128937986487,48.888906355639094,551.3078735154035,475.22527848191896,371.3116286705987),c(153.97156861146073,73.67823803814902,146.66671906691727,824.4784414734863,987.0063476162932,952.009514110332),c(200.65183782541152,263.1999708230263,180.84568899696583,933.7466686567194,1650.2328351679823,1195.8281775481687),c(61.099828814071714,46.159859975707825,70.52116580503692,432.1061711336946,767.6716037015614,339.6710387588184),c(99.95931993982133,98.08970244837913,92.15342525443475,392.37227033979167,370.78016233204664,375.0340510131611),c(434.0531838951655,405.2303092098196,311.07189088234077,2135.6971676722837,2799.1291128165776,2327.444569687136),c(31.03871303754843,44.82832555333164,41.966583331831785,273.1705679580828,245.44602295219988,162.8559774871047),c(105.58050419071593,87.88127187682835,113.35303951484462,1331.085676595749,574.4481388242976,675.6196551750743),c(102.4033130923842,71.01516919339664,139.74439604310996,536.4076607176899,501.33655751938704,569.5306184120461),c(107.53569871276622,70.1274795784792,109.89187800294097,1241.6843998094673,626.6706968992338,671.8972328325119),c(223.38097414424618,102.52815052296641,124.6018144285315,854.2788670689135,1295.1194402584165,855.2265332037098),c(88.22815280751955,38.170653441450696,102.5369097901457,372.5053199428402,642.3374643217146,320.1283214603658),c(38.12629317998075,11.983809801385684,66.62735910414531,253.30361756113135,313.3353484496169,375.0340510131611),c(639.3486087104465,635.5857642809,625.1722980875973,2513.1692252143616,2553.6830898643775,2452.145718162976),c(94.58253500418301,88.76896149174581,51.48477748956683,317.87120635122363,475.22527848191896,383.4095012839265),c(108.26889665853508,77.67284130527759,83.50052147467562,456.939859129884,1034.0066498837357,483.9149045331111),c(12.219965762814343,13.315344223761873,3.0285163229156957,19.866950396951477,41.77804645994892,11.16726702768718),c(198.6966433033612,142.0303383867933,84.79845704163948,1246.6511374087052,1143.6740218411016,1013.4294827626115),c(197.71904604233606,143.36187280916948,226.70607902968925,1038.0481582407147,924.3392779263698,1061.8209732159225),c(116.33407406199254,49.710618435377654,112.05510394788075,442.0396463321704,705.004534011638,671.8972328325119),c(177.43390287606425,107.85428821247116,145.80142868894137,809.5782286757727,642.3374643217146,566.7388016551243),c(161.30354806914931,63.469807466598255,60.57032645831392,749.9773774849183,532.6700923643486,591.8651524674204),c(180.611093974396,269.41379812744856,189.93123796571294,1485.0545421721229,1394.3423006007952,1240.4972456589176),c(58.16703703099627,19.085326720725348,48.02361597766318,188.73602877103903,381.22467394703386,463.4415816490179),c(60.855429498815425,137.591890312206,85.6637474196154,501.6404975230248,297.66858102713604,598.3793915669047),c(72.5865966311172,120.7257876287743,179.115108241014,1201.9504990155644,1122.784998611127,905.4792348283021),c(47.16906784446336,47.49139439808401,89.557554120507,451.9731215306461,673.6709991666763,396.43797948289483),c(101.18131651610275,53.705221702506215,12.979355669638696,387.4055327405538,250.66827875969352,265.2225919075705),c(162.76994396068704,149.5757001135917,205.0738195802914,1360.8861021911762,1598.0102770930462,1508.5116543234099),c(34.94910208164902,37.72680863399197,42.39922852081974,332.77141914893724,287.2240694121488,209.3862567691346),c(167.4135309505565,89.65665110666328,124.16916923954354,854.2788670689135,746.7825804715869,462.5109760633773),c(326.273085867143,378.5996207622959,401.06209019183575,1907.2272381073417,2642.461438591769,2893.25276575662),c(127.82084187903803,84.77435822461725,59.705036080338004,908.91298066053,819.8941617764975,559.2939569699995),c(225.58056798155278,35.0637397892396,40.23600257587996,432.1061711336946,553.5591155943232,436.45401966544057),c(79.9185760888058,68.795945156103,58.40710051337414,541.3743983169278,224.55699972222544,211.24746794041582),c(18.329948644221513,33.73220536686341,42.39922852081974,412.23922073674316,470.0030226744253,470.8864263341427),c(166.19153437427505,58.14366977709351,124.6018144285315,918.8464558590058,1028.784394076242,731.4559903135103),c(129.77603640108833,154.90183780309644,138.01381528715814,610.9087247062579,825.1164175839912,537.8900285002658),c(56.456241824202266,62.5821178516808,22.064904638385784,442.0396463321704,799.0051385465231,559.2939569699995),c(185.74347959477802,217.92780046223598,196.85356098952025,1147.3163854239478,767.6716037015614,875.6998560878029),c(301.3443557110017,219.25933488461214,307.6107293704371,2011.528727691337,1974.0126952325863,1946.8268851601315),c(178.16710082183312,298.2637106122659,324.9165369299554,1713.524471737065,1598.0102770930462,1501.066809638285),c(73.80859320739863,59.475204199469694,39.803357386892,635.7424127024473,564.0036272093104,390.8543459690513),c(62.566224705609436,120.28194282131558,75.7129080728924,983.4140446490982,710.2267898191316,523.9309447156568),c(28.10592125447299,6.657672111880936,46.72568041069931,243.3701423626556,282.0018136046552,181.46808919991665),c(43.74747743087535,17.75379229834916,38.50542181992813,288.0707807557964,235.00151133721266,258.7083528080863),c(9.042774664482614,21.748395565477725,22.064904638385784,153.96886557637396,104.44511614987229,93.06055856405983),c(24.1955322103724,25.299154025147555,60.57032645831392,233.43666716417985,329.0021158720977,167.50900541530768),c(160.57035012338048,150.46338972850916,127.19768556245923,1683.7240461416377,1164.563045071076,1151.15910943742),c(136.61921722826435,168.66102683431706,160.07871992554394,1216.850711813278,1206.341091531025,1296.3335807973533),c(420.3668222408134,424.75948073800373,371.64221734065467,2910.5082331533913,2381.3486482170883,1986.8429253426773),c(88.47255212277584,108.29813301992989,132.82207301930268,675.4763134963503,1086.2292079586719,689.5787389596833),c(210.1834111204067,154.01414818817898,143.20555755501363,1266.5180878056567,987.0063476162932,984.580709607753),c(48.879863051257374,37.28296382653324,28.98722766219309,198.66950396951478,485.6697900969062,328.50377173113117),c(66.47661374971003,65.6890315038919,109.89187800294097,715.2102142902531,856.4499524289528,626.2975591361226),c(92.87173979738901,106.52275379009498,48.45626116665113,481.7735471260733,543.1146039793359,466.23339840593974),c(95.56013226520817,83.8866686096998,89.557554120507,471.8400719275976,621.4484410917402,519.2779167874538),c(76.00818704470521,70.1274795784792,62.30090721426575,456.939859129884,569.225883016804,482.0536933618299),c(104.84730624494706,90.10049591412199,180.84568899696583,531.440923118452,699.7822782041444,491.3597492182359),c(68.43180827176032,105.63506417517752,115.94891064877235,1539.6886557637395,1509.2319283656548,964.1073867236598),c(217.75978989335158,295.60064176751354,214.59201373802645,1966.8280892981963,1624.1215561305141,1619.2537190146409),c(69.16500621752918,44.82832555333164,24.660775772313524,412.23922073674316,255.89053456718713,358.28315047163034),c(42.03668222408134,23.52377479531264,22.930195016361697,193.7027663702769,188.00120906977014,179.60687802863546),c(72.34219731586091,204.6124562384741,57.10916494641027,705.2767390917775,553.5591155943232,893.3813622149743),c(48.879863051257374,46.159859975707825,74.41497250592853,397.33900793902956,741.5603246640933,463.4415816490179),c(99.22612199405246,106.52275379009498,65.7620687261694,223.5031919657041,376.0024181395403,365.7279951567551),c(111.69048707212309,86.10589264699344,109.45923281395301,834.411916671962,626.6706968992338,630.9505870643256),c(117.55607063827398,128.71499416303143,131.95678264132675,1698.6242589393512,1096.6737195736591,997.6091878067214),c(120.24446310609314,84.77435822461725,82.20258590771175,809.5782286757727,731.115813049106,469.9558207485021),c(62.07742607509686,77.22899649781886,64.46413315920553,427.13943353445677,255.89053456718713,294.07136506242904),c(86.27295828546926,83.44282380224107,94.74929638836248,958.5803566529088,506.5588133268806,589.0733357104987),c(55.47864456317712,94.53894398870929,67.49264948212122,516.5407103207384,464.7807668669317,763.0965802252906),c(86.27295828546926,56.36829054725859,107.72865205800119,923.8131934582436,772.893859509055,704.4684283299329),c(128.79843914006318,168.21718202685832,178.24981786303812,1306.2519885995596,715.4490456266252,1338.2108321511803),c(31.527511668061006,52.37368728013003,48.02361597766318,541.3743983169278,360.3356507170594,303.377420918835),c(132.95322749942005,55.48060093234113,92.5860704434227,715.2102142902531,543.1146039793359,648.632093191497),c(46.435869898694506,27.962222869899932,14.277291236602567,491.7070223245491,161.88993003230206,239.16563550963374),c(53.27905072587053,65.24518669643317,54.945939001470485,377.47205754207806,276.7795577971616,259.6389583937269),c(24.1955322103724,47.04754959062528,13.84464604761461,198.66950396951478,292.4463252196424,370.3810230849581),c(51.07945688856395,25.299154025147555,66.62735910414531,531.440923118452,684.1155107816635,883.1447007729278),c(9.2871739797389,55.924445739799864,21.63225944939783,422.1726959352189,396.8914413695147,318.2671102890846),c(70.14260347855434,53.705221702506215,85.6637474196154,422.1726959352189,537.8923481718423,322.9201382172876),c(105.58050419071593,151.35107934342662,233.19575686450858,749.9773774849183,887.7834872739145,754.7211299545252),c(116.08967474673626,80.77975495748869,136.28323453120632,422.1726959352189,579.6703946317913,561.1551681412808),c(77.47458293624294,79.89206534257123,71.81910137200079,630.7756751032094,396.8914413695147,527.6533670582193),c(21.99593837306582,17.75379229834916,12.114065291662783,144.0353903778982,88.77834872739145,119.11751496199658),c(214.0938001645073,110.51735705722353,17.73845274850622,392.37227033979167,365.557906524553,355.49133371470856),c(51.07945688856395,50.154463242836385,76.14555326188035,168.86907837408756,631.8929527067273,297.79378740499146),c(21.01834111204067,25.742998832606286,15.14258161457848,253.30361756113135,219.33474391473183,158.2029495589017),c(45.94707126818193,52.81753208758876,45.42774484373544,526.4741855192142,329.0021158720977,201.9414120840098),c(37.88189386472446,23.52377479531264,24.228130583325566,139.06865277866035,224.55699972222544,222.41473496810298),c(18.329948644221513,3.5507584596698325,5.6243874568434356,9.933475198475739,167.11218583979567,78.17086919381025),c(116.57847337724883,100.7527712931315,85.23110223062744,680.4430510955881,637.115208514221,682.1338942745585),c(37.63749454946818,92.31971995141565,62.30090721426575,496.6737599237869,396.8914413695147,308.9610544326786),c(150.30557888261643,97.20201283346167,32.8810343630847,625.8089375039715,329.0021158720977,229.85957965322777),c(13.197563023839491,10.652275379009497,35.044260308024484,153.96886557637396,203.66797649225097,154.4805272163393),c(204.3178275542558,258.31767794098033,373.8054432855945,1976.761564496672,2527.5718108269093,1769.0812183027772),c(130.50923434685717,116.28733955418701,247.0404029121232,1261.5513502064189,1138.451766033608,1325.182353952212),c(149.083582306335,86.54973745445217,63.598842781229614,546.3411359161656,464.7807668669317,587.2121245392175),c(94.09373637367044,114.5119603243521,80.90465034074788,591.0417743093064,381.22467394703386,325.7119549742094),c(37.39309523421189,50.154463242836385,51.052132300578876,670.5095758971123,626.6706968992338,642.1178540920127),c(90.67214596008242,39.50218786382688,83.50052147467562,620.8421999047337,511.78106913437426,618.8527144509978),c(162.03674601491818,201.94938739372174,176.51923710708627,2021.4622028898127,1733.78892808788,1145.5754759235765),c(183.54388575747143,95.42663360362675,189.49859277672496,1127.4494350269963,908.6725105038889,788.2229310375867),c(58.655835661508846,102.97199533042514,154.88697765768845,705.2767390917775,971.3395801938124,732.3865958991508),c(78.45218019726808,84.33051341715853,77.01084363985626,973.4805694506224,621.4484410917402,536.9594229146252),c(20.77394179678438,16.422257875972974,35.909550686000394,188.73602877103903,135.77865099483398,108.88085351995),c(35.68230002741788,16.866102683431706,46.72568041069931,218.53645436646624,308.11309264212326,321.989532631647),c(45.45827263766935,26.186843640065014,55.37858419045844,526.4741855192142,454.33625525194446,490.4291436325953),c(92.87173979738901,94.98278879616802,70.52116580503692,998.3142574468118,1086.2292079586719,693.3011613022456),c(151.52757545889784,146.02494165392187,136.71587972019427,1281.4183006033702,1185.4520683010505,872.9080393308811),c(70.87580142432319,73.67823803814902,41.966583331831785,372.5053199428402,334.22437167959134,389.9237403834107),c(79.9185760888058,44.38448074587291,24.660775772313524,307.9377311527479,235.00151133721266,293.14075947678845),c(31.283112352804718,25.299154025147555,10.816129724698914,79.46780158780591,31.33353484496169,19.542717298452562),c(10.99796918653291,7.545361726798394,16.007871992554392,153.96886557637396,292.4463252196424,135.86841550352736),c(126.35444598750031,153.12645857326152,145.3687834999534,993.3475198475738,1075.7846963436846,835.6838159052572),c(19.063146589990374,17.75379229834916,34.61161511903653,223.5031919657041,208.89023229974458,124.70114847584017),c(114.13448022468596,99.86508167821404,202.47794844636368,918.8464558590058,1263.7859054134547,1107.420646912312),c(16.37475412217122,62.5821178516808,115.5162654597844,1544.6553933629773,402.11369717700836,230.79018523886836),c(59.87783223779028,65.6890315038919,64.89677834819348,377.47205754207806,579.6703946317913,406.67464092494146),c(160.0815514928679,82.5551341873236,122.4385884835917,983.4140446490982,1091.4514637661655,1039.4864391605483),c(75.27498909893636,87.88127187682835,33.74632474106061,442.0396463321704,449.1139994444509,497.8739883177201),c(167.90232958106907,146.9126312688393,222.81227232879763,1703.5909965385893,1060.1179289212039,1088.8085351995),c(183.29948644221514,255.65460909622794,294.1987285118105,1415.5202157827928,1937.456904580131,1551.3195112628773),c(114.86767817045482,120.7257876287743,80.03935996277197,1057.9151086376662,731.115813049106,516.486100030532),c(7.087580142432319,43.05294632349672,35.044260308024484,54.63411359161656,67.88932549741699,86.54631946457563),c(24.68433084088497,27.5183780624412,62.733552403253704,94.36801438551952,41.77804645994892,88.40753063585683),c(87.98375349226328,90.98818552903946,166.13575257137532,839.3786542711999,673.6709991666763,716.5663009432607),c(59.63343292253399,12.871499416303143,42.39922852081974,387.4055327405538,245.44602295219988,233.58200199579016),c(77.47458293624294,159.34028587768373,157.91549398060414,581.1082991108307,866.89446404394,789.1535366232273),c(32.26070961382987,51.48599766521257,45.860390032723394,531.440923118452,396.8914413695147,428.0785693946752),c(169.85752410311937,94.98278879616802,144.5034931219775,1162.2165982216613,1786.0114861628163,1037.6252279892672),c(78.69657951252437,139.36726954204093,184.73949569785745,943.6801438551952,1117.5627428036335,635.6036149925286),c(48.635463736001086,97.6458576409204,37.64013144195222,178.80255357256328,193.22346487726375,404.8134297536602),c(24.439931525628687,8.87689614917458,12.114065291662783,134.10191517942246,235.00151133721266,93.06055856405983),c(113.64568159417338,42.165256708579264,95.18194157735044,903.9462430612922,323.7798600646041,539.751239671547),c(14.419559600120925,66.57672111880936,68.7905850490851,511.5739727215005,438.66948782946366,310.8222656039598),c(167.65793026581278,134.92882146745364,232.33046648653269,1107.5824846300447,1331.6752309108717,965.0379923093004),c(50.835057573307665,27.5183780624412,19.901678693446,213.56971676722839,67.88932549741699,140.52144343173035),c(203.09583097797437,154.90183780309644,131.09149226335083,998.3142574468118,1013.1176266537612,907.3404459995833),c(195.76385152028578,164.22257875972974,202.47794844636368,1415.5202157827928,778.1161153165486,662.591176976106),c(204.8066261847684,184.6394399028313,200.74736769041183,1187.0502862178507,1718.1221606653992,1721.6203334351067),c(77.71898225149923,51.04215285775384,105.99807130204935,610.9087247062579,548.3368597868296,576.975463097171),c(14.663958915377211,21.304550758018994,42.39922852081974,392.37227033979167,287.2240694121488,149.82749928813632),c(52.545852780101676,92.76356475887438,134.98529896424245,397.33900793902956,417.78046459948916,555.5715346274371),c(32.505108929086155,2.219224037293645,33.31367955207266,243.3701423626556,135.77865099483398,141.45204901737094),c(42.03668222408134,19.973016335642807,11.248774913686871,39.733900793902954,135.77865099483398,108.88085351995),c(53.76784935638311,58.14366977709351,49.32155154462705,526.4741855192142,433.44723202197,308.03044884703803),c(128.3096405095506,90.54434072158072,96.47987714431432,571.174823912355,1065.3401847286973,503.45762183156364),c(136.86361654352064,245.44617852467718,214.1593685490385,1946.9611389012448,1373.4532773708206,1204.2036278189341),c(45.94707126818193,138.9234247345822,41.101292953855875,665.5428382978745,887.7834872739145,638.3954317494504),c(32.993907559598725,35.0637397892396,68.7905850490851,293.0375183550343,470.0030226744253,364.79738957111454),c(28.83911920024185,37.28296382653324,59.27239089135005,382.43879514131595,496.1143017118934,267.0838030788517),c(61.344228129328,32.400670944487224,50.61948711159092,481.7735471260733,255.89053456718713,315.4752935321628),c(154.7047665572296,219.25933488461214,88.25961855354313,1112.5492222292828,2068.0132997674714,957.5931476241756),c(59.63343292253399,125.60808051082033,88.25961855354313,908.91298066053,663.226487551689,444.82946993620595),c(36.904296603699315,15.090723453596787,11.681420102674828,521.5074479199762,292.4463252196424,541.6124508428281),c(42.525480854593916,15.090723453596787,15.14258161457848,278.1373055573207,292.4463252196424,139.59083784608973),c(83.5845658176501,85.66204783953471,89.99019930949497,496.6737599237869,553.5591155943232,574.1836463402491),c(159.10395423184275,148.2441656912155,167.86633332732714,2100.9300044776187,1519.6764399806418,1386.6023226044913),c(42.525480854593916,13.7591890312206,23.362840205349656,144.0353903778982,203.66797649225097,73.51784126560726),c(20.28514316627181,41.72141190112053,52.7827130565307,288.0707807557964,198.44572068475736,162.8559774871047),c(68.9206069022729,15.978413068514246,20.33432388243396,163.90234077484968,261.11279037468074,26.05695639793675),c(84.31776376341897,31.069136522111034,64.89677834819348,625.8089375039715,308.11309264212326,396.43797948289483),c(96.29333021097702,49.710618435377654,134.5526537752545,645.675887900923,788.5606269315358,726.8029623853072),c(113.64568159417338,97.20201283346167,75.28026288390444,774.8110654811076,564.0036272093104,502.52701624592305),c(73.56419389214234,75.45361726798394,97.77781271127819,859.2456046681514,804.2273943540167,497.8739883177201),c(169.85752410311937,87.88127187682835,138.01381528715814,814.5449662750106,631.8929527067273,730.5253847278697),c(182.32188918119,110.0735122497648,68.7905850490851,372.5053199428402,893.0057430814081,494.15156597515767),c(66.72101306496631,69.68363477102046,53.21535824551866,471.8400719275976,637.115208514221,473.67824309106453),c(19.551945220502947,24.4114644102301,56.67651975742231,655.6093630993987,198.44572068475736,220.5535237968218),c(30.305515091779572,22.63608518039518,80.90465034074788,451.9731215306461,407.33595298450194,382.4788956982859),c(30.305515091779572,25.742998832606286,67.06000429313326,402.3057455382674,464.7807668669317,270.80622542141407),c(120.977661051862,76.34130688290139,89.99019930949497,1475.1210669736472,981.7840918087995,615.1302921084355),c(45.70267195292564,27.074533254982473,54.945939001470485,437.0729087329325,658.0042317441954,446.69068110748714),c(60.611030183559144,99.42123687075531,97.34516752229023,804.6114910765348,579.6703946317913,584.4203077822957),c(31.03871303754843,21.748395565477725,8.652903779759132,129.1351775801846,99.22286034237868,314.54468794652223),c(101.67011514661533,189.52173278487732,103.83484535710957,486.7402847253112,940.0060453488506,723.0805400427448),c(5.132385620382024,31.512981329569765,64.89677834819348,188.73602877103903,156.66767422480845,50.252701624592305),c(36.904296603699315,54.59291131742368,53.21535824551866,298.00425595427214,198.44572068475736,227.06776289630596),c(21.507139742553242,26.630688447523745,46.72568041069931,407.2724831375053,193.22346487726375,294.07136506242904),c(68.43180827176032,67.46441073372682,37.207486252964266,531.440923118452,720.6713014341188,600.2406027381859),c(74.78619046842378,89.21280629920454,99.94103865621797,1087.7155342330934,511.78106913437426,355.49133371470856),c(54.745446617408255,43.05294632349672,25.958711339277393,253.30361756113135,349.8911391020722,337.80982758753714),c(137.3524151740332,93.2074095663331,108.5939424359771,794.6780158780591,485.6697900969062,586.2815189535769),c(36.65989728844303,64.35749708151572,67.92529467110919,109.26822718323312,391.6691855620211,132.14599316096496),c(160.81474943863677,180.64483663570272,93.01871563241066,963.5470942521466,908.6725105038889,772.4026360816965),c(43.99187674613164,114.06811551689337,13.84464604761461,357.60510714512657,266.3350461821743,124.70114847584017),c(80.89617334983095,128.2711493555727,100.37368384520592,854.2788670689135,783.3383711240422,768.6802137391342),c(52.30145346484539,93.65125437379183,71.81910137200079,412.23922073674316,245.44602295219988,283.83470362038247),c(28.350320569729277,47.935239205542736,14.277291236602567,367.53858234360234,266.3350461821743,157.27234397326112),c(26.395126047678982,79.4482205351125,55.811229379446395,307.9377311527479,595.3371620542721,390.8543459690513),c(4.887986305125737,0.0,0.0,9.933475198475739,57.44481388242976,29.779378740499144),c(59.87783223779028,93.65125437379183,60.13768126932596,397.33900793902956,517.0033249418678,545.3348731853906),c(68.9206069022729,60.36289381438715,164.83781700441145,720.1769518894911,736.3380688565996,376.8952621844423),c(40.814685647799905,19.52917152818408,59.705036080338004,407.2724831375053,344.66888329457856,313.61408236088164),c(214.0938001645073,216.15242123240105,200.3147225014239,1077.7820590346175,1336.8974867183654,1174.424249078435),c(23.706733579859826,3.9946032671285616,7.354968212795262,183.76929117180117,78.33383711240423,42.80785693946752),c(27.617122623960416,51.04215285775384,40.23600257587996,476.8068095268354,292.4463252196424,494.15156597515767),c(61.099828814071714,49.26677362791892,68.7905850490851,551.3078735154035,323.7798600646041,763.0965802252906),c(55.96744319368969,47.935239205542736,38.50542181992813,586.0750367100686,564.0036272093104,229.85957965322777),c(79.67417677354952,76.78515169036012,76.14555326188035,586.0750367100686,569.225883016804,547.1960843566718),c(15.885955491658645,21.748395565477725,20.33432388243396,144.0353903778982,130.55639518734037,71.65663009432606),c(8.309576718713753,20.860705950560266,6.922323023807305,178.80255357256328,78.33383711240423,30.709984326139743),c(23.706733579859826,11.096120186468227,48.45626116665113,332.77141914893724,412.5582087919956,157.27234397326112),c(50.10185962753881,89.21280629920454,64.46413315920553,700.3100014925395,621.4484410917402,680.2726831032774),c(20.28514316627181,29.29375729227612,15.14258161457848,129.1351775801846,250.66827875969352,221.48412938246238),c(43.74747743087535,29.29375729227612,33.31367955207266,203.63624156875264,229.77925552971905,271.7368310070547),c(92.38294116687644,53.26137689504749,71.38645618301284,556.2746111146414,689.3377665891571,482.0536933618299),c(32.505108929086155,43.940635938414175,24.660775772313524,188.73602877103903,490.89204590439977,281.0428868634607),c(50.346258942795096,138.9234247345822,70.08852061604897,740.0439022864425,1039.2289056912293,907.3404459995833),c(24.439931525628687,0.0,8.652903779759132,213.56971676722839,135.77865099483398,112.60327586251239),c(63.29942265137829,103.41584013788388,98.21045790026614,625.8089375039715,684.1155107816635,308.9610544326786),c(22.729136318834676,14.20303383867933,21.63225944939783,268.2038303588449,308.11309264212326,158.2029495589017),c(10.753569871276621,116.73118436164575,56.67651975742231,451.9731215306461,344.66888329457856,330.3649829024124),c(33.72710550536759,83.44282380224107,125.89974999549536,730.1104270879667,731.115813049106,777.0556640098995),c(51.56825551907653,27.962222869899932,0.0,14.900212797713607,57.44481388242976,55.83633513843589),c(73.31979457688605,106.9665985975537,99.50839346723001,908.91298066053,757.2270920865741,524.8615503012974),c(44.48067537664421,98.53354725583785,75.7129080728924,258.2703551603692,214.1124881072382,314.54468794652223),c(43.014279485106485,80.33591015002996,15.14258161457848,377.47205754207806,553.5591155943232,203.80262325529102),c(58.900234976765134,30.181446907193575,72.6843917499767,307.9377311527479,266.3350461821743,327.5731661454906),c(29.57231714601071,54.59291131742368,44.995099654747484,769.8443278818697,548.3368597868296,222.41473496810298),c(38.37069249523704,35.951429404157054,41.101292953855875,476.8068095268354,344.66888329457856,331.295588488053),c(52.30145346484539,22.192240372936453,21.19961426040987,129.1351775801846,167.11218583979567,218.69231262554058),c(29.57231714601071,26.186843640065014,64.03148797021757,258.2703551603692,329.0021158720977,274.5286477639765),c(90.9165452753387,67.02056592626809,74.84761769491648,650.6426255001609,480.44753428941254,332.2261940736936),c(13.441962339095777,41.72141190112053,23.79548539433761,298.00425595427214,114.88962776485953,164.71718865838588),c(66.72101306496631,40.83372228620308,83.50052147467562,730.1104270879667,198.44572068475736,357.35254488598974),c(30.549914407035857,30.625291714652306,30.28516322915696,104.30148958399525,224.55699972222544,117.25630379071538),c(22.484737003578392,20.41686114310154,92.15342525443475,481.7735471260733,151.44541841731484,121.90933171891837),c(28.350320569729277,14.20303383867933,22.497549827373742,89.40127678628164,167.11218583979567,191.70475064196324),c(26.883924678191555,85.66204783953471,95.6145867663384,511.5739727215005,381.22467394703386,474.6088486767051),c(14.663958915377211,19.52917152818408,20.766969071421915,34.76716319466509,62.66706968992338,25.126350812296153),c(40.814685647799905,27.5183780624412,25.958711339277393,849.3121294696756,313.3353484496169,214.0392846973376),c(75.27498909893636,157.56490664784883,127.19768556245923,923.8131934582436,1096.6737195736591,645.8402764345752),c(72.34219731586091,50.59830805029511,54.945939001470485,307.9377311527479,261.11279037468074,227.99836848194659),c(80.89617334983095,110.96120186468227,88.25961855354313,640.7091503016851,329.0021158720977,509.04125534540725),c(22.484737003578392,59.919049006928425,41.101292953855875,466.8733343283597,381.22467394703386,214.0392846973376),c(65.74341580394116,15.534568261055517,75.28026288390444,208.6029791679905,214.1124881072382,286.62652037730425),c(36.41549797318674,90.98818552903946,40.66864776486792,412.23922073674316,725.8935572416125,404.8134297536602),c(30.549914407035857,50.59830805029511,47.59097078867522,198.66950396951478,365.557906524553,365.7279951567551),c(70.6314021090669,44.82832555333164,64.89677834819348,407.2724831375053,229.77925552971905,171.23142775787008),c(0.7331979457688605,15.978413068514246,0.43264518898795656,9.933475198475739,36.5557906524553,33.50180108306154),c(76.74138499047407,96.3143232185442,50.61948711159092,600.9752495077822,1274.230417028442,434.5928084941594),c(22.240337688322104,39.05834305636816,58.839745702362094,367.53858234360234,261.11279037468074,357.35254488598974),c(8.55397603397004,15.090723453596787,16.44051718154235,129.1351775801846,120.11188357235314,79.10147477945085),c(23.95113289511611,36.83911901907451,45.860390032723394,427.13943353445677,527.4478365568551,385.2707124552077),c(52.790252095357964,78.56053092019505,97.34516752229023,740.0439022864425,339.446627487085,368.5198119136769),c(104.60290692969077,16.866102683431706,16.44051718154235,119.20170238170886,156.66767422480845,180.53748361427606),c(63.05502333612201,19.52917152818408,42.8318737098077,332.77141914893724,428.2249762144764,284.76530920602306),c(52.545852780101676,119.83809801385685,123.73652405055557,620.8421999047337,464.7807668669317,408.53585209622264),c(26.150726732422694,89.65665110666328,66.19471391515735,456.939859129884,631.8929527067273,533.2370005720628),c(129.28723777057576,93.65125437379183,97.77781271127819,630.7756751032094,631.8929527067273,560.2245625556402),c(76.49698567521779,19.085326720725348,53.64800343450661,228.469929564942,517.0033249418678,308.03044884703803),c(45.70267195292564,62.13827304422207,89.557554120507,243.3701423626556,407.33595298450194,270.80622542141407),c(92.87173979738901,79.89206534257123,54.08064862349457,705.2767390917775,668.4487433591827,464.37218723465855),c(119.75566447558056,96.75816802600293,83.93316666366357,516.5407103207384,814.6719059690039,563.016379312562),c(15.885955491658645,18.64148191326662,77.87613401783219,317.87120635122363,459.5585110594381,278.25107010653886),c(51.56825551907653,79.4482205351125,85.23110223062744,526.4741855192142,830.3386733914847,597.4487859812641),c(49.61306099702623,32.84451575194595,10.816129724698914,203.63624156875264,235.00151133721266,257.7777472224457),c(24.1955322103724,44.38448074587291,25.958711339277393,288.0707807557964,156.66767422480845,233.58200199579016),c(43.99187674613164,10.652275379009497,51.052132300578876,173.83581597332542,355.1133949095658,130.28478198968375),c(21.262740427296958,12.871499416303143,36.34219587498835,173.83581597332542,219.33474391473183,371.3116286705987),c(38.37069249523704,80.77975495748869,62.30090721426575,352.63836954588874,522.2255807493615,248.47169136603972),c(2.4439931525628684,0.8876896149174581,12.114065291662783,248.33687996189346,172.33444164728928,61.419968652279486),c(11.731167132301769,55.0367561248824,5.191742267855479,144.0353903778982,360.3356507170594,507.18004417412607),c(10.753569871276621,7.101516919339665,5.191742267855479,139.06865277866035,235.00151133721266,101.43600883482522),c(20.28514316627181,17.309947490890433,44.995099654747484,208.6029791679905,146.2231626098212,222.41473496810298),c(37.88189386472446,4.88229288204602,9.085548968747087,268.2038303588449,120.11188357235314,87.47692505021624),c(14.908358230633498,4.43844807458729,5.191742267855479,49.667375992378695,41.77804645994892,59.55875748099829),c(65.25461717342858,81.22359976494742,51.48477748956683,551.3078735154035,464.7807668669317,310.8222656039598),c(18.81874727473409,60.36289381438715,31.58309879612083,387.4055327405538,412.5582087919956,283.83470362038247),c(20.28514316627181,26.186843640065014,3.0285163229156957,178.80255357256328,172.33444164728928,65.14239099484188),c(13.93076096960835,13.7591890312206,44.995099654747484,84.43453918704378,36.5557906524553,229.85957965322777),c(95.07133363469559,38.61449824890943,30.717808418144916,243.3701423626556,470.0030226744253,359.2137560572709),c(38.615091810493325,9.32074095663331,35.47690549701244,54.63411359161656,297.66858102713604,62.350574237920085),c(28.350320569729277,27.962222869899932,0.0,104.30148958399525,78.33383711240423,54.9057295527953),c(49.85746031228252,56.36829054725859,40.66864776486792,491.7070223245491,339.446627487085,421.564330295191),c(26.150726732422694,7.545361726798394,71.38645618301284,233.43666716417985,668.4487433591827,464.37218723465855),c(21.262740427296958,11.539964993926956,17.73845274850622,129.1351775801846,73.1115813049106,189.84353947068203),c(40.32588701728733,34.176050174322135,70.08852061604897,412.23922073674316,412.5582087919956,188.91293388504144),c(5.621184250894598,1.3315344223761871,11.248774913686871,228.469929564942,141.0009068023276,117.25630379071538),c(45.94707126818193,43.940635938414175,72.6843917499767,322.8379439504615,428.2249762144764,329.43437731677176),c(103.13651103815306,57.699824969634776,96.47987714431432,680.4430510955881,699.7822782041444,662.591176976106),c(69.40940553278547,125.60808051082033,89.99019930949497,630.7756751032094,412.5582087919956,447.6212866931278),c(24.92873015614126,20.41686114310154,25.526066150289438,198.66950396951478,135.77865099483398,151.6887104594175),c(30.549914407035857,47.04754959062528,105.99807130204935,1008.2477326452874,621.4484410917402,639.326037335091),c(12.953163708583203,8.87689614917458,50.61948711159092,317.87120635122363,235.00151133721266,155.4111328019799),c(26.883924678191555,72.79054842323157,36.34219587498835,64.5675887900923,302.8908368346297,137.72962667480854),c(29.083518515498135,45.27217036079036,28.121937284217175,218.53645436646624,177.5566974547829,98.64419207790341),c(22.973535634090965,61.694428236763336,50.61948711159092,432.1061711336946,255.89053456718713,390.8543459690513),c(42.03668222408134,59.03135939201096,76.14555326188035,625.8089375039715,772.893859509055,727.7335679709479),c(25.66192810191012,27.5183780624412,18.603743126482133,193.7027663702769,88.77834872739145,92.12995297841923),c(37.39309523421189,52.37368728013003,87.39432817556722,382.43879514131595,318.5576042571105,288.4877315485855),c(29.816716461266996,12.427654608844414,22.064904638385784,84.43453918704378,109.66737195736592,119.11751496199658),c(63.29942265137829,50.154463242836385,87.82697336455519,625.8089375039715,762.4493478940677,548.1266899423124),c(60.12223155304657,44.38448074587291,50.18684192260296,779.7778030803455,412.5582087919956,403.8828241680196),c(17.84115001370894,24.4114644102301,10.383484535710958,139.06865277866035,99.22286034237868,53.0445183815141),c(17.596750698452652,35.50758459669832,48.02361597766318,198.66950396951478,167.11218583979567,174.02324451479188),c(60.366630868302856,116.28733955418701,83.50052147467562,645.675887900923,600.5594178617657,316.4058991178034),c(30.305515091779572,55.48060093234113,32.8810343630847,208.6029791679905,423.0027204069828,310.8222656039598),c(21.75153905780953,66.13287631135063,88.25961855354313,298.00425595427214,537.8923481718423,589.0733357104987),c(21.99593837306582,15.534568261055517,124.16916923954354,283.10404315655853,276.7795577971616,350.83830578650554),c(34.704702766392735,8.433051341715853,16.873162370530306,109.26822718323312,276.7795577971616,227.99836848194659),c(18.81874727473409,31.069136522111034,3.0285163229156957,168.86907837408756,219.33474391473183,43.73846252510812),c(70.14260347855434,60.36289381438715,68.7905850490851,352.63836954588874,344.66888329457856,242.88805785219614),c(25.66192810191012,35.0637397892396,32.44838917409674,332.77141914893724,548.3368597868296,289.4183371342261),c(24.1955322103724,34.176050174322135,4.326451889879566,104.30148958399525,151.44541841731484,102.36661442046581),c(27.617122623960416,10.652275379009497,2.163225944939783,14.900212797713607,73.1115813049106,67.93420775176368),c(19.551945220502947,68.35210034864427,83.06787628568766,317.87120635122363,438.66948782946366,271.7368310070547),c(50.59065825805138,23.52377479531264,18.171097937494174,238.4034047634177,208.89023229974458,118.18690937635598),c(38.37069249523704,37.28296382653324,44.56245446575953,451.9731215306461,475.22527848191896,282.9040980347419),c(10.264771240764048,7.101516919339665,28.98722766219309,119.20170238170886,88.77834872739145,205.6638344265722),c(33.4827061901113,13.315344223761873,29.852518040169002,317.87120635122363,412.5582087919956,511.833072102329),c(9.042774664482614,16.422257875972974,3.4611615119036525,119.20170238170886,208.89023229974458,55.83633513843589),c(1.9551945220502949,6.657672111880936,6.0570326458313914,203.63624156875264,287.2240694121488,72.58723567996667),c(46.68026921395079,29.29375729227612,99.50839346723001,213.56971676722839,339.446627487085,313.61408236088164),c(14.175160284864639,11.096120186468227,50.61948711159092,432.1061711336946,391.6691855620211,304.3080265044756),c(3.177191098331729,3.9946032671285616,3.8938067008916093,94.36801438551952,120.11188357235314,93.06055856405983),c(37.1486959189556,70.57132438593793,70.08852061604897,620.8421999047337,381.22467394703386,174.95385010043248),c(31.283112352804718,21.748395565477725,23.362840205349656,322.8379439504615,177.5566974547829,151.6887104594175),c(34.704702766392735,10.652275379009497,19.036388315470088,268.2038303588449,318.5576042571105,178.67627244299487),c(28.350320569729277,79.89206534257123,6.922323023807305,173.83581597332542,485.6697900969062,201.9414120840098),c(32.26070961382987,94.98278879616802,33.31367955207266,263.2370927596071,323.7798600646041,199.14959532708804),c(15.152757545889786,21.304550758018994,42.39922852081974,94.36801438551952,167.11218583979567,220.5535237968218),c(13.197563023839491,4.88229288204602,5.6243874568434356,213.56971676722839,125.33413937984676,80.03208036509145),c(58.16703703099627,72.34670361577284,30.28516322915696,412.23922073674316,151.44541841731484,127.49296523276196),c(12.953163708583203,8.433051341715853,25.958711339277393,248.33687996189346,125.33413937984676,160.9947663158235),c(48.635463736001086,90.54434072158072,54.08064862349457,322.8379439504615,757.2270920865741,498.8045939033607),c(25.173129471397548,27.962222869899932,18.603743126482133,203.63624156875264,151.44541841731484,159.1335551445423),c(59.3890336072777,115.39964993926955,81.33729552973584,725.1436894887289,475.22527848191896,416.911302366988),c(14.908358230633498,31.956826137028493,66.19471391515735,630.7756751032094,631.8929527067273,174.95385010043248),c(20.28514316627181,7.101516919339665,0.0,89.40127678628164,156.66767422480845,78.17086919381025),c(42.03668222408134,80.77975495748869,58.40710051337414,302.97099355351,412.5582087919956,567.6694072407649),c(11.975566447558055,0.8876896149174581,5.191742267855479,59.60085119085443,156.66767422480845,57.69754630971709),c(48.3910644207448,24.855309217688827,33.31367955207266,243.3701423626556,125.33413937984676,90.26874180713803),c(19.307545905246663,63.469807466598255,16.44051718154235,417.205958335981,475.22527848191896,342.46285551574016),c(61.58862744458429,118.50656359148066,92.5860704434227,1077.7820590346175,746.7825804715869,710.0520618437764),c(16.863552752683795,31.956826137028493,35.909550686000394,312.90446875198575,109.66737195736592,291.27954830550726),c(17.352351383196368,21.304550758018994,40.66864776486792,213.56971676722839,339.446627487085,135.86841550352736),c(3.665989728844303,10.20843057155077,1.7305807559518263,34.76716319466509,146.2231626098212,140.52144343173035),c(16.619153437427507,68.795945156103,67.49264948212122,456.939859129884,266.3350461821743,225.20655172502478),c(8.55397603397004,5.769982496963478,1.7305807559518263,104.30148958399525,5.222255807493615,42.80785693946752),c(39.83708838677476,51.04215285775384,26.39135652826535,163.90234077484968,224.55699972222544,103.2972200061064),c(13.93076096960835,42.60910151603799,22.064904638385784,208.6029791679905,198.44572068475736,246.61048019475854),c(12.953163708583203,0.0,6.922323023807305,153.96886557637396,130.55639518734037,185.19051154247904),c(41.79228290882505,44.82832555333164,45.42774484373544,208.6029791679905,141.0009068023276,244.74926902347735),c(19.063146589990374,116.28733955418701,57.54181013539822,963.5470942521466,595.3371620542721,630.9505870643256),c(16.863552752683795,39.50218786382688,65.7620687261694,342.70489434741296,250.66827875969352,230.79018523886836),c(7.331979457688606,20.860705950560266,16.44051718154235,89.40127678628164,177.5566974547829,92.12995297841923),c(21.507139742553242,1.7753792298349163,21.19961426040987,54.63411359161656,130.55639518734037,131.21538757532434),c(7.087580142432319,15.090723453596787,6.922323023807305,89.40127678628164,10.44451161498723,30.709984326139743),c(26.639525362935267,8.433051341715853,19.036388315470088,238.4034047634177,125.33413937984676,459.71915930645554),c(9.775972610251474,39.50218786382688,58.40710051337414,526.4741855192142,224.55699972222544,147.9662881168551),c(10.509170556020335,40.83372228620308,65.32942353718144,144.0353903778982,558.7813714018167,290.34894271986667),c(11.731167132301769,1.3315344223761871,4.326451889879566,94.36801438551952,52.222558074936146,91.19934739277863),c(12.953163708583203,60.36289381438715,34.17896993004857,293.0375183550343,214.1124881072382,311.7528711896004),c(10.020371925507762,23.967619602771368,12.54671048065074,158.93560317561182,255.89053456718713,152.61931604505813),c(9.042774664482614,20.41686114310154,38.93806700891609,158.93560317561182,339.446627487085,192.63535622760384),c(0.4887986305125737,15.978413068514246,3.8938067008916093,79.46780158780591,125.33413937984676,134.00720433224615),c(22.973535634090965,47.04754959062528,25.526066150289438,139.06865277866035,94.00060453488507,93.99116414970042),c(2.6883924678191553,2.6630688447523743,0.0,29.800425595427214,0.0,7.444844685124786),c(36.41549797318674,38.61449824890943,37.64013144195222,273.1705679580828,517.0033249418678,360.1443616429115),c(1.466395891537721,0.0,0.0,0.0,5.222255807493615,1.8612111712811965),c(5.621184250894598,3.9946032671285616,2.163225944939783,44.70063839314082,73.1115813049106,118.18690937635598),c(9.2871739797389,5.3261376895047485,0.0,19.866950396951477,26.111279037468073,21.40392846973376),c(4.154788359356877,8.87689614917458,15.575226803566437,84.43453918704378,36.5557906524553,86.54631946457563),c(1.9551945220502949,11.096120186468227,27.68929209522922,94.36801438551952,146.2231626098212,170.3008221722295),c(0.4887986305125737,46.159859975707825,43.26451889879566,124.16843998094673,376.0024181395403,163.7865830727453),c(4.887986305125737,41.2775670936618,16.007871992554392,233.43666716417985,423.0027204069828,169.3702165865889),c(3.177191098331729,1.3315344223761871,0.0,74.50106398856803,114.88962776485953,153.54992163069872),c(1.2219965762814342,8.87689614917458,3.8938067008916093,69.53432638933018,57.44481388242976,67.93420775176368),c(0.4887986305125737,3.1069136522111034,5.191742267855479,0.0,5.222255807493615,0.9306055856405983),c(0.24439931525628686,0.0,1.2979355669638697,39.733900793902954,52.222558074936146,32.57119549742094),c(0.24439931525628686,9.76458576409204,7.354968212795262,139.06865277866035,297.66858102713604,186.12111712811966),c(0.0,18.197637105807893,3.4611615119036525,0.0,99.22286034237868,58.62815189535769),c(0.0,4.43844807458729,3.8938067008916093,89.40127678628164,208.89023229974458,111.67267027687178),c(0.0,3.5507584596698325,3.0285163229156957,29.800425595427214,78.33383711240423,40.94664576818632),c(0.0,3.1069136522111034,4.326451889879566,44.70063839314082,5.222255807493615,76.30965802252906),c(0.0,0.0,0.0,9.933475198475739,31.33353484496169,20.47332288409316),c(0.0,0.0,0.0,0.0,0.0,0.0),c(0.0,0.0,0.0,0.0,0.0,0.0),c(0.0,0.0,0.0,0.0,0.0,0.0))
targetgene="Nontargeting"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(118.28926858404284,70.1274795784792,116.38155583776032,1599.289506954594,1895.6788581201822,1138.1306312384518),c(155.19356518774217,217.92780046223598,213.29407817106258,1653.9236205462105,2015.7907416925352,1695.56337703717),c(55.7230438784334,183.75175028791384,94.74929638836248,710.2434766910153,710.2267898191316,835.6838159052572),c(33.72710550536759,34.176050174322135,102.5369097901457,337.73815674817513,449.1139994444509,246.61048019475854),c(57.6782384004837,43.940635938414175,131.5241374523388,650.6426255001609,840.783185006472,521.139127958735),c(1.2219965762814342,2.219224037293645,18.171097937494174,188.73602877103903,208.89023229974458,115.39509261943418))
targetgene="YALI1_C00098g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(63.29942265137829,93.2074095663331,97.34516752229023,486.7402847253112,621.4484410917402,775.1944528386183),c(93.36053842790157,90.54434072158072,99.07574827824206,625.8089375039715,934.783789541357,504.38822741720423),c(53.76784935638311,57.699824969634776,142.3402671770377,963.5470942521466,673.6709991666763,987.3725263646747),c(65.98781511919745,42.60910151603799,24.228130583325566,600.9752495077822,360.3356507170594,475.5394542623457),c(50.59065825805138,40.83372228620308,139.311750854122,536.4076607176899,710.2267898191316,395.50737389725424),c(7.576378772944893,17.75379229834916,21.63225944939783,258.2703551603692,485.6697900969062,370.3810230849581))
targetgene="YALI1_E00019g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(21.507139742553242,45.27217036079036,65.32942353718144,635.7424127024473,355.1133949095658,509.04125534540725),c(153.97156861146073,66.57672111880936,150.56052576780888,933.7466686567194,856.4499524289528,764.9577913965718),c(73.07539526162977,207.71936989068521,154.4543324687005,1038.0481582407147,1462.231626098212,1117.6573083543585),c(53.034651410614245,38.170653441450696,41.53393814284383,461.9065967291218,731.115813049106,248.47169136603972),c(8.309576718713753,25.742998832606286,17.73845274850622,248.33687996189346,214.1124881072382,124.70114847584017),c(78.45218019726808,168.66102683431706,86.52903779759131,1107.5824846300447,872.1167198514337,965.0379923093004))
targetgene="YALI1_D00040g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(214.33819947976357,245.00233371721845,125.46710480650741,1023.147945443001,976.561836001306,721.2193288714636),c(74.5417911531675,120.7257876287743,77.44348882884422,725.1436894887289,830.3386733914847,787.2923254519461),c(294.74557419908194,207.27552508322648,268.672662361521,1738.3581597332543,1770.3447187403353,1743.954867490481),c(32.01631029857358,9.76458576409204,42.8318737098077,506.6072351222627,376.0024181395403,324.7813493885688),c(20.28514316627181,6.213827304422207,16.007871992554392,193.7027663702769,52.222558074936146,209.3862567691346),c(24.68433084088497,122.94501166606796,35.47690549701244,332.77141914893724,349.8911391020722,349.90770020086495))
targetgene="YALI1_A00032g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(27.128323993447843,80.33591015002996,48.02361597766318,571.174823912355,569.225883016804,401.09100741109785),c(50.10185962753881,122.50116685860922,86.09639260860335,541.3743983169278,1007.8953708462676,547.1960843566718),c(14.908358230633498,65.6890315038919,32.44838917409674,178.80255357256328,282.0018136046552,415.98069678134743),c(10.264771240764048,10.652275379009497,31.58309879612083,129.1351775801846,182.7789532622765,101.43600883482522),c(90.9165452753387,61.694428236763336,113.35303951484462,769.8443278818697,501.33655751938704,802.1820148221957),c(47.41346715971965,68.795945156103,67.49264948212122,546.3411359161656,506.5588133268806,651.4239099484188))
targetgene="YALI1_B30333g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(74.2973918379112,27.5183780624412,48.888906355639094,402.3057455382674,355.1133949095658,303.377420918835),c(63.78822128189087,12.871499416303143,26.39135652826535,302.97099355351,214.1124881072382,442.0376531792842),c(70.14260347855434,75.45361726798394,57.10916494641027,1082.7487966338556,898.2279988889018,790.0841422088679),c(24.439931525628687,49.26677362791892,30.717808418144916,451.9731215306461,396.8914413695147,577.9060686828116),c(49.85746031228252,40.389877478744346,76.14555326188035,571.174823912355,339.446627487085,370.3810230849581),c(9.531573294995187,35.0637397892396,40.23600257587996,620.8421999047337,266.3350461821743,267.0838030788517))
targetgene="YALI1_C33545g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(15.152757545889786,7.545361726798394,45.42774484373544,248.33687996189346,229.77925552971905,126.56235964712137),c(29.816716461266996,7.101516919339665,54.945939001470485,183.76929117180117,355.1133949095658,177.74566685735428),c(122.688456258656,155.34568261055517,107.72865205800119,829.4451790727242,1060.1179289212039,698.8847948160893),c(92.13854185162015,231.68698949345657,157.05020360262824,1410.553478183555,1378.6755331783143,1245.1502735871204),c(98.4929240482836,132.26575262270126,221.0816915728458,1599.289506954594,1838.2340442377524,1248.8726959296828),c(9.531573294995187,5.3261376895047485,3.0285163229156957,193.7027663702769,94.00060453488507,129.35417640404316))
targetgene="YALI1_D00062g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(99.95931993982133,133.59728704507745,112.05510394788075,1286.3850382026083,699.7822782041444,741.6926517555568),c(127.33204324852545,147.80032088375677,159.64607473655596,1152.2831230231857,1237.6746263759867,1325.182353952212),c(174.7455104082451,164.22257875972974,98.21045790026614,943.6801438551952,976.561836001306,1216.301500432262),c(120.24446310609314,251.21616102164066,109.45923281395301,576.1415615115928,1081.0069521511782,840.3368438334602),c(84.31776376341897,35.0637397892396,56.24387456843435,382.43879514131595,778.1161153165486,621.6445312079196),c(40.32588701728733,19.973016335642807,39.803357386892,322.8379439504615,214.1124881072382,267.0838030788517))
targetgene="YALI1_A22415g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
# parameters
# Do not modify the variables beginning with "__"
targetmat=list(c(171.32391999465707,85.66204783953471,141.90762198804975,1410.553478183555,1608.4547887080334,928.7443744693171),c(90.42774664482614,75.89746207544268,77.44348882884422,451.9731215306461,558.7813714018167,435.5234140798),c(26.150726732422694,175.7625437536567,82.6352310966997,342.70489434741296,485.6697900969062,567.6694072407649),c(174.01231246247625,48.37908401300147,65.7620687261694,551.3078735154035,370.78016233204664,358.28315047163034),c(18.81874727473409,57.25598016217605,54.51329381248253,402.3057455382674,511.78106913437426,305.23863209011625),c(15.885955491658645,13.315344223761873,38.072776630940176,288.0707807557964,156.66767422480845,221.48412938246238))
targetgene="YALI1_A22496g"
collabel=c("PO1f_1","PO1f_2","PO1f_3","PO1f_Cas9_1","PO1f_Cas9_2","PO1f_Cas9_3")
# set up color using RColorBrewer
#library(RColorBrewer)
#colors <- brewer.pal(length(targetgenelist), "Set1")
colors=c( "#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#A65628", "#F781BF",
"#999999", "#66C2A5", "#FC8D62", "#8DA0CB", "#E78AC3", "#A6D854", "#FFD92F", "#E5C494", "#B3B3B3",
"#8DD3C7", "#FFFFB3", "#BEBADA", "#FB8072", "#80B1D3", "#FDB462", "#B3DE69", "#FCCDE5",
"#D9D9D9", "#BC80BD", "#CCEBC5", "#FFED6F")
## code
targetmatvec=unlist(targetmat)+1
yrange=range(targetmatvec[targetmatvec>0]);
# yrange[1]=1; # set the minimum value to 1
for(i in 1:length(targetmat)){
vali=targetmat[[i]]+1;
if(i==1){
plot(1:length(vali),vali,type='b',las=1,pch=20,main=paste('sgRNAs in',targetgene),ylab='Read counts',xlab='Samples',xlim=c(0.7,length(vali)+0.3),ylim = yrange,col=colors[(i %% length(colors))],xaxt='n',log='y')
axis(1,at=1:length(vali),labels=(collabel),las=2)
# lines(0:100,rep(1,101),col='black');
}else{
lines(1:length(vali),vali,type='b',pch=20,col=colors[(i %% length(colors))])
}
}
dev.off()
Sweave("Day6_test_summary.Rnw");
library(tools);
texi2dvi("Day6_test_summary.tex",pdf=TRUE);
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{makembindex}
\alias{makembindex}
\title{makembindex}
\usage{
makembindex(arglist = arglist_get(...))
}
\arguments{
\item{arglist}{Arguments}
}
\description{
Run makembindex
}
\examples{
library(outsider)
makembindex <- module_import('makembindex',
repo = 'dombennett/om..blast')
makembindex('-help')
}
| /man/makembindex.Rd | no_license | DomBennett/om..blast | R | false | true | 427 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{makembindex}
\alias{makembindex}
\title{makembindex}
\usage{
makembindex(arglist = arglist_get(...))
}
\arguments{
\item{arglist}{Arguments}
}
\description{
Run makembindex
}
\examples{
library(outsider)
makembindex <- module_import('makembindex',
repo = 'dombennett/om..blast')
makembindex('-help')
}
|
printer <- function(r,x,y) {
print(paste0("x =", x))
}
| /financeR/R/printer.R | no_license | oliealex/Stock-analysis | R | false | false | 57 | r | printer <- function(r,x,y) {
print(paste0("x =", x))
}
|
# load the data and rename some fields
nostratic = read.table("nostratic.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
nostratic$Language = ifelse(substr(nostratic$Language, nchar(nostratic$Language) - 9, nchar(nostratic$Language)) == " derivates", substr(nostratic$Language, 1, nchar(nostratic$Language) - 10), nostratic$Language)
afro.asiatic = read.table("afro_asiatic.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
sino.caucasian = read.table("sino_caucasian.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
sino.caucasian$Language = ifelse(substr(sino.caucasian$Language, nchar(sino.caucasian$Language) - 4, nchar(sino.caucasian$Language)) == " form", substr(sino.caucasian$Language, 1, nchar(sino.caucasian$Language) - 5), sino.caucasian$Language)
austric = read.table("austric.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
austric$Language = ifelse(substr(austric$Language, nchar(austric$Language) - 4, nchar(austric$Language)) == " form", substr(austric$Language, 1, nchar(austric$Language) - 5), austric$Language)
austric = austric[austric$Phon != "Old",]
macro.khoisan = read.table("macro_khoisan.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
starling = rbind(nostratic, afro.asiatic, sino.caucasian, austric, macro.khoisan, stringsAsFactors = F)
# get rid of reconstructed languages that have slipped through
starling$Language = trimws(starling$Language)
starling = starling[starling$Language != "Meaning",]
starling = starling[substr(starling$Language, 1, 5) != "Proto",]
starling = starling[!grepl("[*]", starling$Phon),]
starling = starling[starling$Phon != "~",]
starling = starling[starling$Phon != "?",]
starling$Phon = gsub("<.*", "", starling$Phon)
# strip unwanted characters from IPA representations
starling.strip.chars = "[-;,()<>+=]"
starling$PhonStrip = gsub(starling.strip.chars, "", starling$Phon)
starling$PhonStrip = gsub("/.*", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\[.*\\]", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\[", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\]", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\{", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\}", "", starling$PhonStrip)
starling$PhonStrip = gsub(":", "ː", starling$PhonStrip)
| /load_starling.R | no_license | kaplanas/Minimal-Pair-Counts | R | false | false | 2,492 | r | # load the data and rename some fields
nostratic = read.table("nostratic.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
nostratic$Language = ifelse(substr(nostratic$Language, nchar(nostratic$Language) - 9, nchar(nostratic$Language)) == " derivates", substr(nostratic$Language, 1, nchar(nostratic$Language) - 10), nostratic$Language)
afro.asiatic = read.table("afro_asiatic.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
sino.caucasian = read.table("sino_caucasian.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
sino.caucasian$Language = ifelse(substr(sino.caucasian$Language, nchar(sino.caucasian$Language) - 4, nchar(sino.caucasian$Language)) == " form", substr(sino.caucasian$Language, 1, nchar(sino.caucasian$Language) - 5), sino.caucasian$Language)
austric = read.table("austric.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
austric$Language = ifelse(substr(austric$Language, nchar(austric$Language) - 4, nchar(austric$Language)) == " form", substr(austric$Language, 1, nchar(austric$Language) - 5), austric$Language)
austric = austric[austric$Phon != "Old",]
macro.khoisan = read.table("macro_khoisan.txt", sep = '\t', quote = "", header = F, stringsAsFactors = F, col.names = c("Language", "Phon"))
starling = rbind(nostratic, afro.asiatic, sino.caucasian, austric, macro.khoisan, stringsAsFactors = F)
# get rid of reconstructed languages that have slipped through
starling$Language = trimws(starling$Language)
starling = starling[starling$Language != "Meaning",]
starling = starling[substr(starling$Language, 1, 5) != "Proto",]
starling = starling[!grepl("[*]", starling$Phon),]
starling = starling[starling$Phon != "~",]
starling = starling[starling$Phon != "?",]
starling$Phon = gsub("<.*", "", starling$Phon)
# strip unwanted characters from IPA representations
starling.strip.chars = "[-;,()<>+=]"
starling$PhonStrip = gsub(starling.strip.chars, "", starling$Phon)
starling$PhonStrip = gsub("/.*", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\[.*\\]", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\[", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\]", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\{", "", starling$PhonStrip)
starling$PhonStrip = gsub("\\}", "", starling$PhonStrip)
starling$PhonStrip = gsub(":", "ː", starling$PhonStrip)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loocv.R
\name{loocv}
\alias{loocv}
\title{Leave one group out cross-validation for \code{baggr} models}
\usage{
loocv(data, return_models = FALSE, ...)
}
\arguments{
\item{data}{Input data frame - same as for \link{baggr} function.}
\item{return_models}{logical; if FALSE, summary statistics will be returned and the
models discarded;
if TRUE, a list of models will be returned alongside summaries}
\item{...}{Additional arguments passed to \link{baggr}.}
}
\value{
log predictive density value, an object of class \code{baggr_cv};
full model, prior values and \emph{lpd} of each model are also returned.
These can be examined by using \code{attributes()} function.
}
\description{
Performs exact leave-one-group-out cross-validation on a baggr model.
}
\details{
The values returned by \code{loocv()} can be used to understand how excluding
any one group affects the overall result, as well as how well the model
predicts the omitted group. LOO-CV approaches are a good general practice
for comparing Bayesian models, not only in meta-analysis.
This function automatically runs \emph{K} baggr models, where \emph{K} is number of groups (e.g. studies),
leaving out one group at a time. For each run, it calculates
\emph{expected log predictive density} (ELPD) for that group (see Gelman et al 2013).
(In the logistic model, where the proportion in control group is unknown, each of
the groups is divided into data for controls, which is kept for estimation, and data for
treated units, which is not used for estimation but only for calculating predictive density.
This is akin to fixing the baseline risk and only trying to infer the odds ratio.)
The main output is the cross-validation
information criterion, or -2 times the ELPD summed over \emph{K} models.
(We sum the terms as we are working with logarithms.)
This is related to, and often approximated by, the Watanabe-Akaike
Information Criterion. When comparing models, smaller values mean
a better fit. For more information on cross-validation see
\href{http://www.stat.columbia.edu/~gelman/research/published/waic_understand3.pdf}{this overview article}
For running more computation-intensive models, consider setting the
\code{mc.cores} option before running loocv, e.g. \code{options(mc.cores = 4)}
(by default baggr runs 4 MCMC chains in parallel).
As a default, rstan runs "silently" (\code{refresh=0}).
To see sampling progress, please set e.g. \code{loocv(data, refresh = 500)}.
}
\examples{
\dontrun{
# even simple examples may take a while
cv <- loocv(schools, pooling = "partial")
print(cv) # returns the lpd value
attributes(cv) # more information is included in the object
}
}
\references{
Gelman, Andrew, Jessica Hwang, and Aki Vehtari.
'Understanding Predictive Information Criteria for Bayesian Models.'
Statistics and Computing 24, no. 6 (November 2014): 997–1016.
}
\seealso{
\link{loo_compare} for comparison of many LOO CV results; you can print and plot
output via \link{plot.baggr_cv} and \link{print.baggr_cv}
}
\author{
Witold Wiecek
}
| /man/loocv.Rd | no_license | cran/baggr | R | false | true | 3,182 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loocv.R
\name{loocv}
\alias{loocv}
\title{Leave one group out cross-validation for \code{baggr} models}
\usage{
loocv(data, return_models = FALSE, ...)
}
\arguments{
\item{data}{Input data frame - same as for \link{baggr} function.}
\item{return_models}{logical; if FALSE, summary statistics will be returned and the
models discarded;
if TRUE, a list of models will be returned alongside summaries}
\item{...}{Additional arguments passed to \link{baggr}.}
}
\value{
log predictive density value, an object of class \code{baggr_cv};
full model, prior values and \emph{lpd} of each model are also returned.
These can be examined by using \code{attributes()} function.
}
\description{
Performs exact leave-one-group-out cross-validation on a baggr model.
}
\details{
The values returned by \code{loocv()} can be used to understand how excluding
any one group affects the overall result, as well as how well the model
predicts the omitted group. LOO-CV approaches are a good general practice
for comparing Bayesian models, not only in meta-analysis.
This function automatically runs \emph{K} baggr models, where \emph{K} is number of groups (e.g. studies),
leaving out one group at a time. For each run, it calculates
\emph{expected log predictive density} (ELPD) for that group (see Gelman et al 2013).
(In the logistic model, where the proportion in control group is unknown, each of
the groups is divided into data for controls, which is kept for estimation, and data for
treated units, which is not used for estimation but only for calculating predictive density.
This is akin to fixing the baseline risk and only trying to infer the odds ratio.)
The main output is the cross-validation
information criterion, or -2 times the ELPD summed over \emph{K} models.
(We sum the terms as we are working with logarithms.)
This is related to, and often approximated by, the Watanabe-Akaike
Information Criterion. When comparing models, smaller values mean
a better fit. For more information on cross-validation see
\href{http://www.stat.columbia.edu/~gelman/research/published/waic_understand3.pdf}{this overview article}
For running more computation-intensive models, consider setting the
\code{mc.cores} option before running loocv, e.g. \code{options(mc.cores = 4)}
(by default baggr runs 4 MCMC chains in parallel).
As a default, rstan runs "silently" (\code{refresh=0}).
To see sampling progress, please set e.g. \code{loocv(data, refresh = 500)}.
}
\examples{
\dontrun{
# even simple examples may take a while
cv <- loocv(schools, pooling = "partial")
print(cv) # returns the lpd value
attributes(cv) # more information is included in the object
}
}
\references{
Gelman, Andrew, Jessica Hwang, and Aki Vehtari.
'Understanding Predictive Information Criteria for Bayesian Models.'
Statistics and Computing 24, no. 6 (November 2014): 997–1016.
}
\seealso{
\link{loo_compare} for comparison of many LOO CV results; you can print and plot
output via \link{plot.baggr_cv} and \link{print.baggr_cv}
}
\author{
Witold Wiecek
}
|
#PCL implementation of SEIR for CDC@SZ
#Authors: WH Li, XY Wei
#Jan 27, 2020
#remove (list = objects() )
library (deSolve)
#the function for SEIR
seir_model = function (current_timepoint, state_values, parameters)
{
# create state variables (local variables)
S = state_values [1] # susceptibles
E = state_values [2] # exposed
I = state_values [3] # infectious
R = state_values [4] # recovered
with (
as.list (parameters), # variable names within parameters can be used
{
# compute derivatives
dS = (-beta * S * I)
dE = (beta * S * I) - (delta * E)
dI = (delta * E) - (gamma * I)
dR = (gamma * I)
# combine results
results = c (dS, dE, dI, dR)
list (results)
}
)
}
#set parameters
contact_rate = 5 # number of contacts per day
transmission_probability = 0.035 # transmission probability
infectious_period = 14 # infectious period
latent_period = 10 # latent period
beta_value = contact_rate * transmission_probability
#beta_value = 0.8
gamma_value = 1 / infectious_period
delta_value = 1 / latent_period
Ro = beta_value / gamma_value
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
#Set initial S,E,I,R values
W = 50000 # susceptible hosts
X = 120 # infectious hosts
Y = 5 # recovered hosts
Z = 300 # exposed hosts
N = W + X + Y + Z
initial_values = c (S = W/N, E = X/N, I = Y/N, R = Z/N)
timepoints = seq (0, 1, by=1)
output <- c(time=0, initial_values)
#model = c(0.25, 1, 0.75, 1)
model = c(0.25, 0.95, 0.2, 1)
#model = c(0.25, 1.05, 1.2, 1)
for (i in 1:200) {
if (i <= 14 ) {
beta_value = model[1]
} else if (i <= 35) {
beta_value = model[2]
} else if (i <= 95) {
beta_value = model[3]
} else {
beta_value = model[4]
}
#beta_value = beta_value * 0.99
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
stage = lsoda (initial_values, timepoints, seir_model, parameter_list)
initial_values = stage[c(4,6,8,10)]
output <- rbind(output, c(time=i, initial_values))
}
scale = 1.2
# plot (I * N ~ time, data = output, type='b', col = 'blue', ylab = 'Infectious', main = 'Wuhan infectious')
# susceptible hosts over time plot (S * N ~ time, data = output, type='l', col = 'blue', ylab = 'S, E, I, R', main = 'SEIR epidemic')
plot (S * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'blue', ylab = 'S, E, I, R', main = '武汉管制变化预测(总量减少)')
# remain on same frame
par (new = TRUE)
# exposed hosts over time
plot (E * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'pink', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# infectious hosts over time
plot (I * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'red', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# recovered hosts over time
plot (R * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'green', ylab = '', axes = FALSE)
par (new = TRUE)
# recovered hosts over time
plot (city[,c("Wuhan")] ~ city[,c("Time")], type='o',xlim = c(0,200), ylim = c(0,W * #PCL implementation of SEIR for CDC@SZ
#Authors: WH Li, XY Wei
#Jan 27, 2020
#remove (list = objects() )
library (deSolve)
#the function for SEIR
seir_model = function (current_timepoint, state_values, parameters)
{
# create state variables (local variables)
S = state_values [1] # susceptibles
E = state_values [2] # exposed
I = state_values [3] # infectious
R = state_values [4] # recovered
with (
as.list (parameters), # variable names within parameters can be used
{
# compute derivatives
dS = (-beta * S * I)
dE = (beta * S * I) - (delta * E)
dI = (delta * E) - (gamma * I)
dR = (gamma * I)
# combine results
results = c (dS, dE, dI, dR)
list (results)
}
)
}
#set parameters
contact_rate = 5 # number of contacts per day
transmission_probability = 0.035 # transmission probability
infectious_period = 14 # infectious period
latent_period = 10 # latent period
beta_value = contact_rate * transmission_probability
#beta_value = 0.8
gamma_value = 1 / infectious_period
delta_value = 1 / latent_period
Ro = beta_value / gamma_value
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
#Set initial S,E,I,R values
W = 50000 # susceptible hosts
X = 120 # infectious hosts
Y = 5 # recovered hosts
Z = 300 # exposed hosts
N = W + X + Y + Z
initial_values = c (S = W/N, E = X/N, I = Y/N, R = Z/N)
timepoints = seq (0, 1, by=1)
output <- c(time=0, initial_values)
#model = c(0.25, 1, 0.75, 1)
model = c(0.25, 0.95, 0.2, 1)
#model = c(0.25, 1.05, 1.2, 1)
for (i in 1:200) {
if (i <= 14 ) {
beta_value = model[1]
} else if (i <= 35) {
beta_value = model[2]
} else if (i <= 95) {
beta_value = model[3]
} else {
beta_value = model[4]
}
#beta_value = beta_value * 0.99
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
stage = lsoda (initial_values, timepoints, seir_model, parameter_list)
initial_values = stage[c(4,6,8,10)]
output <- rbind(output, c(time=i, initial_values))
}
scale = 1.2
# plot (I * N ~ time, data = output, type='b', col = 'blue', ylab = 'Infectious', main = 'Wuhan infectious')
# susceptible hosts over time plot (S * N ~ time, data = output, type='l', col = 'blue', ylab = 'S, E, I, R', main = 'SEIR epidemic')
plot (S * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'blue', ylab = 'S, E, I, R', main = '武汉管制变化预测(总量减少)')
# remain on same frame
par (new = TRUE)
# exposed hosts over time
plot (E * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'pink', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# infectious hosts over time
plot (I * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'red', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# recovered hosts over time
plot (R * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'green', ylab = '', axes = FALSE)
par (new = TRUE)
# recovered hosts over time
plot (city[,c("Wuhan")] ~ city[,c("Time")], type='o',xlim = c(0,200), ylim = c(0,W * scale))
# remain on same frame
par (new = TRUE)
legend(x=150,y=W,legend=c("Susceptible","Exposed","Infectious","Recovered"),col=c("blue","pink","red","green"), lty=1, cex=0.8)
))
# remain on same frame
par (new = TRUE)
legend(x=150,y=W,legend=c("Susceptible","Exposed","Infectious","Recovered","Actual"),col=c("blue","pink","red","green","black"), lty=1, cex=0.8)
| /SEIR-CDCWH-RF-adjust.R | no_license | whupro2017/NSARS | R | false | false | 7,046 | r | #PCL implementation of SEIR for CDC@SZ
#Authors: WH Li, XY Wei
#Jan 27, 2020
#remove (list = objects() )
library (deSolve)
#the function for SEIR
seir_model = function (current_timepoint, state_values, parameters)
{
# create state variables (local variables)
S = state_values [1] # susceptibles
E = state_values [2] # exposed
I = state_values [3] # infectious
R = state_values [4] # recovered
with (
as.list (parameters), # variable names within parameters can be used
{
# compute derivatives
dS = (-beta * S * I)
dE = (beta * S * I) - (delta * E)
dI = (delta * E) - (gamma * I)
dR = (gamma * I)
# combine results
results = c (dS, dE, dI, dR)
list (results)
}
)
}
#set parameters
contact_rate = 5 # number of contacts per day
transmission_probability = 0.035 # transmission probability
infectious_period = 14 # infectious period
latent_period = 10 # latent period
beta_value = contact_rate * transmission_probability
#beta_value = 0.8
gamma_value = 1 / infectious_period
delta_value = 1 / latent_period
Ro = beta_value / gamma_value
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
#Set initial S,E,I,R values
W = 50000 # susceptible hosts
X = 120 # infectious hosts
Y = 5 # recovered hosts
Z = 300 # exposed hosts
N = W + X + Y + Z
initial_values = c (S = W/N, E = X/N, I = Y/N, R = Z/N)
timepoints = seq (0, 1, by=1)
output <- c(time=0, initial_values)
#model = c(0.25, 1, 0.75, 1)
model = c(0.25, 0.95, 0.2, 1)
#model = c(0.25, 1.05, 1.2, 1)
for (i in 1:200) {
if (i <= 14 ) {
beta_value = model[1]
} else if (i <= 35) {
beta_value = model[2]
} else if (i <= 95) {
beta_value = model[3]
} else {
beta_value = model[4]
}
#beta_value = beta_value * 0.99
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
stage = lsoda (initial_values, timepoints, seir_model, parameter_list)
initial_values = stage[c(4,6,8,10)]
output <- rbind(output, c(time=i, initial_values))
}
scale = 1.2
# plot (I * N ~ time, data = output, type='b', col = 'blue', ylab = 'Infectious', main = 'Wuhan infectious')
# susceptible hosts over time plot (S * N ~ time, data = output, type='l', col = 'blue', ylab = 'S, E, I, R', main = 'SEIR epidemic')
plot (S * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'blue', ylab = 'S, E, I, R', main = '武汉管制变化预测(总量减少)')
# remain on same frame
par (new = TRUE)
# exposed hosts over time
plot (E * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'pink', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# infectious hosts over time
plot (I * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'red', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# recovered hosts over time
plot (R * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'green', ylab = '', axes = FALSE)
par (new = TRUE)
# recovered hosts over time
plot (city[,c("Wuhan")] ~ city[,c("Time")], type='o',xlim = c(0,200), ylim = c(0,W * #PCL implementation of SEIR for CDC@SZ
#Authors: WH Li, XY Wei
#Jan 27, 2020
#remove (list = objects() )
library (deSolve)
#the function for SEIR
seir_model = function (current_timepoint, state_values, parameters)
{
# create state variables (local variables)
S = state_values [1] # susceptibles
E = state_values [2] # exposed
I = state_values [3] # infectious
R = state_values [4] # recovered
with (
as.list (parameters), # variable names within parameters can be used
{
# compute derivatives
dS = (-beta * S * I)
dE = (beta * S * I) - (delta * E)
dI = (delta * E) - (gamma * I)
dR = (gamma * I)
# combine results
results = c (dS, dE, dI, dR)
list (results)
}
)
}
#set parameters
contact_rate = 5 # number of contacts per day
transmission_probability = 0.035 # transmission probability
infectious_period = 14 # infectious period
latent_period = 10 # latent period
beta_value = contact_rate * transmission_probability
#beta_value = 0.8
gamma_value = 1 / infectious_period
delta_value = 1 / latent_period
Ro = beta_value / gamma_value
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
#Set initial S,E,I,R values
W = 50000 # susceptible hosts
X = 120 # infectious hosts
Y = 5 # recovered hosts
Z = 300 # exposed hosts
N = W + X + Y + Z
initial_values = c (S = W/N, E = X/N, I = Y/N, R = Z/N)
timepoints = seq (0, 1, by=1)
output <- c(time=0, initial_values)
#model = c(0.25, 1, 0.75, 1)
model = c(0.25, 0.95, 0.2, 1)
#model = c(0.25, 1.05, 1.2, 1)
for (i in 1:200) {
if (i <= 14 ) {
beta_value = model[1]
} else if (i <= 35) {
beta_value = model[2]
} else if (i <= 95) {
beta_value = model[3]
} else {
beta_value = model[4]
}
#beta_value = beta_value * 0.99
parameter_list = c (beta = beta_value, gamma = gamma_value, delta = delta_value)
stage = lsoda (initial_values, timepoints, seir_model, parameter_list)
initial_values = stage[c(4,6,8,10)]
output <- rbind(output, c(time=i, initial_values))
}
scale = 1.2
# plot (I * N ~ time, data = output, type='b', col = 'blue', ylab = 'Infectious', main = 'Wuhan infectious')
# susceptible hosts over time plot (S * N ~ time, data = output, type='l', col = 'blue', ylab = 'S, E, I, R', main = 'SEIR epidemic')
plot (S * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'blue', ylab = 'S, E, I, R', main = '武汉管制变化预测(总量减少)')
# remain on same frame
par (new = TRUE)
# exposed hosts over time
plot (E * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'pink', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# infectious hosts over time
plot (I * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'red', ylab = '', axes = FALSE)
# remain on same frame
par (new = TRUE)
# recovered hosts over time
plot (R * N ~ time, data = output, type='l', ylim = c(0,W * scale), col = 'green', ylab = '', axes = FALSE)
par (new = TRUE)
# recovered hosts over time
plot (city[,c("Wuhan")] ~ city[,c("Time")], type='o',xlim = c(0,200), ylim = c(0,W * scale))
# remain on same frame
par (new = TRUE)
legend(x=150,y=W,legend=c("Susceptible","Exposed","Infectious","Recovered"),col=c("blue","pink","red","green"), lty=1, cex=0.8)
))
# remain on same frame
par (new = TRUE)
legend(x=150,y=W,legend=c("Susceptible","Exposed","Infectious","Recovered","Actual"),col=c("blue","pink","red","green","black"), lty=1, cex=0.8)
|
render_MABM <- function(out_dir, year, station, stn_start_yr,
route_path, survey_path, bat_path, spp_path, key) {
# Need better error catching, but this will do for now...
if (is.null(station)) stop("You must provide a MABM station.")
rmd_document <- system.file("extdata", "MABM_report_template.Rmd", package = "MABMreportr")
station_short <- shorten_station(station)
fn <- paste("MABM", station_short, year, sep = "_")
out_file <- paste(fn, "pdf", sep = ".")
rmarkdown::render(rmd_document, output_dir = out_dir,
output_file = out_file,
params = list(year = year,
station = station,
stn_start_yr = stn_start_yr,
route_path = route_path,
survey_path = survey_path,
bat_path = bat_path,
spp_path = spp_path,
goog_API_key = key),
quiet = TRUE)
message("Created ", year, " MABM annual report for ", station, ":\n ",
tools::file_path_as_absolute(file.path(out_dir, out_file)))
}
| /R/render_MABM.R | no_license | adamdsmith/MABMreportr | R | false | false | 1,232 | r | render_MABM <- function(out_dir, year, station, stn_start_yr,
route_path, survey_path, bat_path, spp_path, key) {
# Need better error catching, but this will do for now...
if (is.null(station)) stop("You must provide a MABM station.")
rmd_document <- system.file("extdata", "MABM_report_template.Rmd", package = "MABMreportr")
station_short <- shorten_station(station)
fn <- paste("MABM", station_short, year, sep = "_")
out_file <- paste(fn, "pdf", sep = ".")
rmarkdown::render(rmd_document, output_dir = out_dir,
output_file = out_file,
params = list(year = year,
station = station,
stn_start_yr = stn_start_yr,
route_path = route_path,
survey_path = survey_path,
bat_path = bat_path,
spp_path = spp_path,
goog_API_key = key),
quiet = TRUE)
message("Created ", year, " MABM annual report for ", station, ":\n ",
tools::file_path_as_absolute(file.path(out_dir, out_file)))
}
|
pacman::p_load(sf, here)
#This script converts a shapefile into the format required to run the visualizer.
#There are no requirements regarding the coordinate system
#The shapefile MUST contain the following fields with exact the following names
# shp_area is the are in sq meters of the zone (can be calculated using the function sf::st_area())
# shp_id is the id of the zone
# shp_muni is the municipality/region/county/etc. use for the aggregation of zones for visualization purposes
#
# (A fourth variable is added in the R dataframe called data in this script - it is the geometry of each feature)
#
# Some files did not work properly showing "topology errors" while running the visualizer. This has been solved simplyfing the shapefile
# using the sf::st_simplify(), although the consequences of this have not been analyzed yet.
#
# Please adapt the code below to the specific case
original_file = paste(here(), "/map/mstm/zones_mstm_100026.shp", sep="")
data = st_read(original_file)
data$shp_area = data$AREA_SQMI * 1.6^2 * 1e6 # in sq_m
data$shp_id = data$SMZRMZ
data$shp_muni = data$STCOFIPS
data= data %>% select(shp_id, shp_muni, shp_area)
#for some reasons I need to simplify the shapefile without known consequences (maybe islands are removed)
data = data %>% sf::st_simplify()
final_file = paste(here(), "/map/mstm/zones_mstm_100026_clean.shp", sep="")
sf::write_sf(data, final_file)
| /map/mstm/shp_converter.R | no_license | rafleo2008/msm-visualizer | R | false | false | 1,415 | r | pacman::p_load(sf, here)
#This script converts a shapefile into the format required to run the visualizer.
#There are no requirements regarding the coordinate system
#The shapefile MUST contain the following fields with exact the following names
# shp_area is the are in sq meters of the zone (can be calculated using the function sf::st_area())
# shp_id is the id of the zone
# shp_muni is the municipality/region/county/etc. use for the aggregation of zones for visualization purposes
#
# (A fourth variable is added in the R dataframe called data in this script - it is the geometry of each feature)
#
# Some files did not work properly showing "topology errors" while running the visualizer. This has been solved simplyfing the shapefile
# using the sf::st_simplify(), although the consequences of this have not been analyzed yet.
#
# Please adapt the code below to the specific case
original_file = paste(here(), "/map/mstm/zones_mstm_100026.shp", sep="")
data = st_read(original_file)
data$shp_area = data$AREA_SQMI * 1.6^2 * 1e6 # in sq_m
data$shp_id = data$SMZRMZ
data$shp_muni = data$STCOFIPS
data= data %>% select(shp_id, shp_muni, shp_area)
#for some reasons I need to simplify the shapefile without known consequences (maybe islands are removed)
data = data %>% sf::st_simplify()
final_file = paste(here(), "/map/mstm/zones_mstm_100026_clean.shp", sep="")
sf::write_sf(data, final_file)
|
# Load Packages
# Ran into some memory issues with XLConnect so I eneded up using a similar package openxlsx
library(rJava)
# Defines how much memory can be used for this product. 1024 is 1.024 GB
options(java.parameters = "-Xmx1024m")
library(dplyr)
library(openxlsx)
setwd("~/SynopticSignals/04.08.18 - Automating Excel Processes")
# Creates list of countries which will be used in the for loop
Countries <- c("Botswana", "Burundi", "Cameroon")
# For Loop
# Cycles through each country, finds the file, pulls in the data, gets rid of rows with blanks,
# assigns dataset as that country name, clears import from memory
# Will import data as values from formulas and ignores filters
for (Country in Countries) {
filename <- paste0(Country, " Budget.xlsx")
CountryDF <- read.xlsx(filename, sheet = "Input", startRow = 1, colNames = TRUE, skipEmptyRows = TRUE)
CountryDF <- CountryDF[!(is.na(CountryDF$PrimePartner)),]
CountryDF$OU <- Country
assign(paste(Country),CountryDF)
gc()
}
# Appending all of the country files together
Budget_Full <- bind_rows(Botswana, Burundi, Cameroon)
# Exports the dataset as a CSV
write.csv(Budget_Full,"Budget_Full.csv", na = "")
# Export each dataset into Excel
for (Country in Countries) {
WB <- loadWorkbook("Budget Tool.xlsx")
CountryDF <- get(Country)
writeData(WB, sheet = "Output", x = CountryDF)
filenameXLSX <- paste0(Country, " Budget Tool populated.xlsx")
saveWorkbook(WB, filenameXLSX)
}
| /04.08.18 - Automating Excel Processes/Excel script.R | no_license | Ogweno/SynopticSignals | R | false | false | 1,465 | r | # Load Packages
# Ran into some memory issues with XLConnect so I eneded up using a similar package openxlsx
library(rJava)
# Defines how much memory can be used for this product. 1024 is 1.024 GB
options(java.parameters = "-Xmx1024m")
library(dplyr)
library(openxlsx)
setwd("~/SynopticSignals/04.08.18 - Automating Excel Processes")
# Creates list of countries which will be used in the for loop
Countries <- c("Botswana", "Burundi", "Cameroon")
# For Loop
# Cycles through each country, finds the file, pulls in the data, gets rid of rows with blanks,
# assigns dataset as that country name, clears import from memory
# Will import data as values from formulas and ignores filters
for (Country in Countries) {
filename <- paste0(Country, " Budget.xlsx")
CountryDF <- read.xlsx(filename, sheet = "Input", startRow = 1, colNames = TRUE, skipEmptyRows = TRUE)
CountryDF <- CountryDF[!(is.na(CountryDF$PrimePartner)),]
CountryDF$OU <- Country
assign(paste(Country),CountryDF)
gc()
}
# Appending all of the country files together
Budget_Full <- bind_rows(Botswana, Burundi, Cameroon)
# Exports the dataset as a CSV
write.csv(Budget_Full,"Budget_Full.csv", na = "")
# Export each dataset into Excel
for (Country in Countries) {
WB <- loadWorkbook("Budget Tool.xlsx")
CountryDF <- get(Country)
writeData(WB, sheet = "Output", x = CountryDF)
filenameXLSX <- paste0(Country, " Budget Tool populated.xlsx")
saveWorkbook(WB, filenameXLSX)
}
|
#-- This script trains all the available models in mlr (48) and stores their aucs in a table for the pairs defined
#-- Usage:
# In the config_file, change the instruments to the ones you want to train. Choose BUY and SELL of the same model so that the indicators selected are relevant
# In the config_file, choose the indicator types and the pairfilter to limit, and the indicator_pair_filter. Those define the kind of features that should be
# included in the model training process
#Output:
# The script generates the average auc per model and the cross correlation of the predictions of the models per pair
rm(list=ls())
set.seed(123)
library(data.table)
library(lubridate)
library(mlr)
library(ggplot2)
library(xgboost)
library(crayon)
library(plotly)
library(caret)
library(parallelMap)
#--- Directories
data_output_dir<-"02_data/output/"
data_input_dir<-"02_data/input/"
data_intermediate_dir<-"02_data/intermediate/"
#------------------------------------------------------------#
################## DEFINE THE CONFIGURATIONS #################
#------------------------------------------------------------#
config_file <- data.table(
instruments = c("BUY_RES_AUDUSD","SELL_RES_AUDUSD"), # Which models are to be trained in this script
SL = 15, # Stop loss
PF = 1, # Profit factor
SPREAD = 3, # Spread, make sure there is a file with the specified spread
#indicator_filter = c("EMA","TMS","SMA","atr","dist","RSI","williams"),
indicator_filter = c("EMA","TMS","SMA","atr","RSI","williams"),
indicator_pair_filter = c("AND"),
pair_filter = c("AUD","XAU"),
preprocess_steps = c("center","scale"),
test_portion = 0.3, # Out of sample test part for final evaluation
window_type = "FixedWindowCV", #"GrowingWindowCV",
initial.window = 1e4, # Window size for training
horizon = 1e4, # Future window for testing
initial.window_stack = 5e3, # Window size for training
horizon_stack = 1e4, # Future window for testing
REMOVE_FAST_WINS = T, # Flag to remove the positive trades which are finished in less than MIN_TRADE_TIME
MIN_TRADE_TIME = 15,
CORRELATION_FEATURE_SELECTION = T, # Flag whether to filter out the highly correlated features
CORRELATION_THRESHOLD = 0.9, # Filter to indicate the maximum correlation between indicators to be included in the training
READ_SELECTED_FEATURES = F,
returns_period = "week", #"month","day" defines the period of aggregating the returns
WRITE_FLAG = F
)
all_pairs <- c("EURUSD","GBPUSD","AUDUSD","USDJPY","USDCHF","NZDUSD","XAUUSD","USDCAD")
instruments <- data.table(currs = unique(config_file$instruments))
indicator_filter = unique(config_file[!is.na(indicator_filter),indicator_filter])
indicator_pair_filter = unique(config_file[,indicator_pair_filter])
pair_filter = unique(config_file[,pair_filter])
MIN_TRADE_TIME = config_file$MIN_TRADE_TIME[1]
preprocess_steps <- unique(config_file[!is.na(preprocess_steps),preprocess_steps])
CORRELATION_FEATURE_SELECTION <- config_file$CORRELATION_FEATURE_SELECTION[1] # use correlations to select the features
#-- Read the configurations
returns_period = config_file$returns_period[1] #"month","day" defines the period of aggregating the returns
READ_SELECTED_FEATURES <- config_file$READ_SELECTED_FEATURES[1]
WRITE_FLAG <- config_file$WRITE_FLAG[1]
SPREAD <- config_file$SPREAD[1] # Spread, make sure there is a file with the specified spread
SL <- config_file$SL[1]
PF <- config_file$PF[1]
test_ratio <- config_file$test_portion[1]
initial.window<-config_file$initial.window[1]
horizon <- config_file$horizon[1]
initial.window_stack<-config_file$initial.window_stack[1]
horizon_stack <- config_file$horizon_stack[1]
wind <- config_file$window_type[1]
REMOVE_FAST_WINS<-config_file$REMOVE_FAST_WINS[1]
CORRELATION_THRESHOLD <- config_file$CORRELATION_THRESHOLD[1]
#------------------------------------------------------------#
############### DEFINE THE FUNCTIONS #########################
#------------------------------------------------------------#
get_sharpe=function(dt_curr,dt_time_lut_prediction_period,PF)
{
dt_portfolio <- merge(dt_time_lut_prediction_period,dt_curr,all.x = T,by="index")
#-- Add equity, returns and drawdown
dt_portfolio[TARGET==1 & decision==1,returns:=PF][TARGET==0 & decision==1,returns:=-1][is.na(returns),returns:=0][,equity:=cumsum(returns)][,drawdown:=cummax(equity)][,drawdown:=(drawdown-equity) ]
mean_returns <- dt_portfolio[,.(mean_returns=sum(returns)),by="ret_per"]
return(list(mean_returns,mean(mean_returns$mean_returns),var(mean_returns$mean_returns),max(dt_portfolio$drawdown)))
}
get_mean_returns_and_variance=function(dt_res,dt_time_lut_prediction_period,PF)
{
step <-0.01
th<-step
bst_sharpe <- -999
bst_thr <- 0.01
#--DEBUG
#th<-0.54
while(th<0.95)
{
dt_curr<-copy(dt_res)
dt_curr[,decision:=as.numeric(prediction>th)]
dt_curr <- dt_curr[decision>0.5]
ret_varg<-get_sharpe(dt_curr,dt_time_lut_prediction_period,PF)
if((ret_varg[[2]]==0 & ret_varg[[3]]==0))
{
curr_sharpe<-0
}else{
#-- SHARPE RATIO CALCULATION
#curr_sharpe <- ret_varg[[2]]/sqrt(1+ret_varg[[3]]+ret_varg[[4]])
#curr_sharpe <- ret_varg[[2]]/(1+sqrt(ret_varg[[3]]))
#curr_sharpe <- ret_varg[[2]]/(0.01+sqrt(ret_varg[[3]]))
curr_sharpe <-1-pnorm(0,ret_varg[[2]],sqrt(ret_varg[[3]]))
}
if(curr_sharpe>bst_sharpe)
{
bst_sharpe<-curr_sharpe
bst_thr <- th
bst_mean_ret <- ret_varg[[2]]
#bst_var_ret <- sqrt(1+ret_varg[[3]]+ret_varg[[4]])
#bst_var_ret <- (1+sqrt(ret_varg[[3]]))
#bst_var_ret <- (0.01+sqrt(ret_varg[[3]]))
bst_var_ret <- (sqrt(ret_varg[[3]]))
}
th<-th+step
}
return(list(bst_mean_ret,bst_var_ret,bst_thr))
}
train_and_predict = function(dt,nrounds,eta,max_depth,initial.window,horizon,target_name="TARGET",index_name="index")
{
#-- Get feature columns and target columns
feat_cols <-setdiff(names(dt),target_name)
target_col <-target_name
#-- CHeck if index column is there
index_col_available <- index_name %in% names(dt)
#-- Exclude index from the feature columns
if(index_col_available)
{
feat_cols <- setdiff(feat_cols,index_name)
}
#-- Initialize the resultant table
dt_res <- data.table(prediction=numeric(0),index=numeric(0),TARGET=numeric(0))
i<-1+initial.window
while(i< (nrow(dt)-horizon-1) )
{
#-- subset train and prediction and index
dt_train <- copy(dt[(i-initial.window):i-1,])
dt_predict <- copy(dt[i:(i+horizon-1),] )
if(index_col_available)
{
dt_index <- copy(dt_predict[,..index_name])
}
dt_vars_cols_train <- dt_train[,..feat_cols]
dt_target_train <- dt_train[,..target_col]
#print(dt_vars_cols_train)
xgb <- xgboost(data = as.matrix(dt_vars_cols_train),
label = as.matrix(dt_target_train),
eta = eta,
max_depth = max_depth,
nround=nrounds,
objective = "binary:logistic",
# early_stopping_rounds = 3,
colsample_bytree = 0.5,
subsample = 0.8,
#eval_metric = "map",
verbose = F
)
#print(xgb.importance(model=xgb,feature_names = feat_cols))
#-- Predict
y_pred <- predict(xgb,newdata=as.matrix(dt_predict[,..feat_cols]))
#-- Include predictions
dt_index<-cbind(dt_index,data.table(prediction=y_pred))
#-- Include the ground truth
dt_index<-cbind(dt_index,dt_predict[,..target_col])
dt_res <- rbind(dt_res,dt_index)
rm(dt_index)
cat("\r",round(100.0*i/(nrow(dt)-horizon-1))+"%")
i<-i+horizon
}
cat("\n\n")
return(dt_res)
}
"+" = function(x,y) {
if(is.character(x) || is.character(y)) {
return(paste(x , y, sep=""))
} else {
.Primitive("+")(x,y)
}
}
#-- Sharpe ratio function
# Only the pred is used
# TODO: Use Sortino ratio instead and modify the way the sharpe_ratio is calculated
sharpe_ratio = function(task, model, pred, feats, extra.args) {
predTable <- as.data.table(pred)
#-- Select only the trades we label as true because they build up the portfolio
predTable <- predTable[response==T]
if(nrow(predTable)>5)
{
#-- Get the equity and drawdown
predTable[,equity:=2*(as.numeric(truth)-1.5)][equity>0,equity:=PF][,equity:=cumsum(equity)][,drawdown:=cummax(equity)][,drawdown:=(drawdown-equity) ]
#-- Calculate the modified sharpe ratio by including the drawdown
(predTable[nrow(predTable), equity])/((1+max(predTable$drawdown)))
}else{
(0)
}
}
#-- Set the sharpe ratio as a custom function for optimizing the models
sharpe_ratio = makeMeasure(
id = "sharpe_ratio", name = "sharpe_ratio",
properties = c("classif", "classif.multi", "req.pred",
"req.truth"),
minimize = FALSE, fun = sharpe_ratio
)
#-- Get the optimal threshold to maximize the portfolio
getBestThresh <- function(dt)
{
res_orig <- as.data.table(dt)
thresh_vec <- seq(0.01,0.99,0.01)
bst_thresh <-0
max_sharpe_ratio <- -991
bst_drawdown <- -9999
max_avg_ret <- -999
iters <- max(dt$iter)
for (th in thresh_vec)
{
res_sel <- copy(res_orig)
res_sel[,response:=prob.TRUE>th]
res_sel <- res_sel[response==T]
if(nrow(res_sel)>10)
{
#-- Compute the sharpe ratio as average ret per tra over the variance
#-- Net equity
res_sel[,equity:=2*(as.numeric(truth)-1.5)][equity>0,equity:=PF][,equity:=cumsum(equity)][,drawdown:=cummax(equity)][,drawdown:=(drawdown-equity) ]
total_ret <- res_sel[nrow(res_sel), equity]
std_ret <- sqrt(var(res_sel$equity))
min_drawdown <- max(res_sel$drawdown)
sharpe_ratio <- total_ret/((1+min_drawdown)*iters)
if(sharpe_ratio>max_sharpe_ratio)
{
max_sharpe_ratio <- sharpe_ratio
max_avg_ret <- total_ret
bst_thresh <- th
bst_drawdown <- min_drawdown
bst_dt <- res_sel
}
}
}
return( list(max(bst_dt$drawdown),max_avg_ret, nrow(bst_dt[equity<0]) , max_sharpe_ratio, bst_thresh ,bst_dt))
}
#------------------------------------------------------------#
############### READ THE DATA AND FORMAT THE COLUMNS ########
#------------------------------------------------------------#
#-- Read the main ML file
dt<-fread(paste0(data_intermediate_dir,"ML_SL_",SL,"_PF_",PF,"_SPREAD_",SPREAD,"_ALL.csv"))
#-- Attach the index column
dt[,index:= seq(1,nrow(dt))]
#-- Create the index, time lookup table
dt_time_lut <- dt[,.(index,Time)]
#- Setting the trades with quick wins to zeros, because they are probably resulting from news
if(REMOVE_FAST_WINS)
{
for(pr in all_pairs)
{
dt[ get("buy_profit_"+pr)<MIN_TRADE_TIME ,("BUY_RES_"+pr):=0]
dt[ get("sell_profit_"+pr)<MIN_TRADE_TIME ,("SELL_RES_"+pr):=0]
}
}
#-- COmputing the aggregation column
if(returns_period=="week")
{
dt_time_lut[,ret_per:=year(Time)+"_"+week(Time)]
}else
{
if(returns_period=="day")
{
dt_time_lut[,ret_per:=year(Time)+"_"+month(Time)+"_"+day(Time)]
}else{
if(returns_period=="month")
{
dt_time_lut[,ret_per:=year(Time)+"_"+month(Time)]
}else{
stop("Incorrect aggregation period")
}
}
}
#-- Group the column types
results_cols <- names(dt)[grepl("BUY_RES",names(dt)) | grepl("SELL_RES",names(dt))]
#-- Remove near zero variance features
#inds_nearZero_var <- caret::nearZeroVar(x=dt[sample(1e4)])
#nonZeroVarCols <- setdiff(names(dt),names(dt)[inds_nearZero_var])
profit_loss_cols <- names(dt)[grepl("profit",names(dt)) | grepl("loss",names(dt))]
bs_cols <- names(dt)[grepl("^bs",names(dt))]
ohlc_cols <- names(dt)[grepl("Low$",names(dt)) | grepl("Close$",names(dt)) | grepl("Open$",names(dt)) | grepl("High$",names(dt))]
full_features <- setdiff(names(dt),c("index","Time",results_cols,profit_loss_cols,ohlc_cols,bs_cols))
##########################################################################################
########## PREPROCESSING: HARD FILTER OF INDICATOR TYPES ###########################
##########################################################################################
#-- Hard filter of indicator types
if(length(indicator_filter)>0)
{
regex_cmd <- "("+paste0(indicator_filter,collapse = "|")+")"
indicators_filtered<-full_features[grepl(regex_cmd,full_features)]
}else{
indicators_filtered<-full_features
}
#-- Hard filter of pair types
if(length(pair_filter)>0)
{
regex_cmd <- "("+paste0(pair_filter,collapse = "|")+")"
pairs_filtered<-full_features[grepl(regex_cmd,full_features)]
}else{
pairs_filtered<-full_features
}
#-- Combine indicator and pair filter
if(indicator_pair_filter=="AND")
{
full_features <- intersect(indicators_filtered,pairs_filtered)
}else{
full_features <- unique(c(indicators_filtered,pairs_filtered))
}
#-- Feature selection
if(CORRELATION_FEATURE_SELECTION)
{
dt_features <- copy(dt[,..full_features])
#-- Remove near zero variance features
inds_nearZero_var <- caret::nearZeroVar(x=dt_features[sample(seq(1e4,nrow(dt_features)),5e3)])
if(length(inds_nearZero_var)>0)
{
nonZeroVarCols <- setdiff(names(dt_features),names(dt_features)[inds_nearZero_var])
}else{
nonZeroVarCols<-names(dt_features)
}
dt_features<-dt_features[,,..nonZeroVarCols]
cols <- names(dt_features)
dt_features[,(cols):=lapply(.SD,as.numeric),.SDcols = cols]
#dt_features <- sapply( dt_features, as.numeric )
cr <- cor(dt_features[sample(seq(2e4,nrow(dt_features)),5e3),], use="complete.obs")
highly_correlated_features<-caret::findCorrelation(x=cr,cutoff=CORRELATION_THRESHOLD)
correlated_features_to_be_excluded<-names(dt_features)[highly_correlated_features]
feat_cols<-setdiff(full_features,correlated_features_to_be_excluded)
unique_relevant_cols <- c("index",feat_cols,results_cols)
dt_sel<-dt[,..unique_relevant_cols]
}else{
#-- Previous implementation
#feat_cols <- names(dt)[grepl("TMS",names(dt)) | grepl("RSI$",names(dt))]
feat_cols <- full_features
index_cols <- "index"
relevant_cols <- c(index_cols,feat_cols,results_cols)
dt_sel <- dt[,..relevant_cols]
}
if(length(preprocess_steps)>0)
{
dt_feature_part <- dt_sel[,..feat_cols]
preProcValues <- preProcess(dt_feature_part, method = c("center", "scale"))
tmp <- predict(preProcValues,dt_feature_part)
dt_sel[,(feat_cols):=tmp]
}
#------------------------------------------------------------#
############## SPLIT TRAIN AND TEST ##########################
#------------------------------------------------------------#
dt_test <- dt_sel[floor(nrow(dt_sel)*(1-test_ratio)):nrow(dt),]
dt_sel <- dt_sel[1:(floor(nrow(dt_sel)*(1-test_ratio))-1),]
#-- Remove tests since we are not validating at this point
rm(dt_test)
rm(tmp)
rm(dt)
rm(dt_feature_part)
rm(dt_features)
#------------------------------------------------------------#
################## CREATE MLR TASK ##########################
#------------------------------------------------------------#
models_with_performance_issues <- c("classif.neuralnet","classif.ksvm","classif.extraTrees","classif.fdausc.glm","classif.fdausc.kernel","classif.fdausc.knn","classif.fdausc.np","classif.randomForestSRC","classif.featureless","classif.bartMachine","classif.blackboost","classif.cforest","classif.evtree","classif.gausspr","classif.rda")
#already_tested <- c("classif.ctree","classif.h2o.randomForest","classif.naiveBayes","classif.C50","classif.IBk","classif.nnTrain","classif.ada","classif.J48","classif.JRip","classif.ksvm","classif.lqa","classif.binomial","classif.earth","classif.LiblineaRL2LogReg","classif.pamr","classif.extraTrees","classif.mda","classif.plsdaCaret","classif.h2o.glm","classif.mlp","classif.probit","classif.h2o.gbm","classif.nnet","classif.h2o.deeplearning","classif.neuralnet","classif.randomForest","classif.glmnet","classif.cvglmnet","classif.OneR","classif.ranger","classif.gamboost","classif.plr","classif.rotationForest","classif.gbm","classif.logreg","classif.multinom","classif.LiblineaRL1LogReg","classif.adaboostm1","classif.nodeHarvest","classif.PART","classif.saeDNN","classif.rpart","classif.dbnDNN","classif.svm","classif.qda","classif.xgboost","classif.glmboost")
#classif_learners = all_learners[grepl("^classif",class) & installed==T & prob==T & !(class %in% already_tested) &!(class %in% models_with_performance_issues) &!(class %in% c("classif.rFerns","classif.rknn","classif.RRF","classif.rrlda","classif.sda",
# "classif.clusterSVM","classif.dcSVM","classif.fnn","classif.gaterSVM","classif.geoDA",
# "classif.knn","classif.LiblineaRL1L2SVC")) ,class]
#-- Choose the classifiers
all_learners<-as.data.table(listLearners())
classif_learners = all_learners[grepl("^classif",class) & installed==T & prob==T &
!(class %in% models_with_performance_issues) &
!(class %in% c("classif.rFerns","classif.rknn","classif.RRF","classif.rrlda","classif.sda","classif.knn","classif.LiblineaRL1L2SVC")) ,class]
#-- classif_learners<-c("classif.neuralnet")
#fwrite(data.table(classifiers=classif_learners),data_output_dir+"valid_classifiers.csv")
for (curr_model in unique(config_file$instruments))
{
cat("\n######## "+curr_model+" ############\n")
dt_curr<- copy(dt_sel)
lrnrs = lapply(classif_learners,makeLearner,predict.type="prob")
print(length(classif_learners))
# curr_model = "SELL_RES_USDJPY"
setnames(dt_curr,curr_model,"TARGET")
feats_and_target <- c(feat_cols,"TARGET")
dt_train <- dt_curr[,..feats_and_target]
rm(dt_curr)
#-- Get only non NA rows
dt_train <- na.omit(dt_train)
tsk <- makeClassifTask(id=curr_model,data=as.data.frame(dt_train), target="TARGET")
#-- TO check what are the available measures
#listMeasures(tsk)
#-- Make the resampling strategy
rsmpl_desc = makeResampleDesc(method=wind,initial.window=initial.window,horizon=horizon, skip =horizon)
#-- Benchmark
bmr<-benchmark(lrnrs,tsk,rsmpl_desc,measures = auc)
print(bmr)
#-- Get the iteration results and store them
res <- as.data.table(bmr)
fwrite(res,data_output_dir+curr_model+"/performance_iterations_"+Sys.Date()+".csv")
#-- Get the mean and variance of the auc
eta_val = 0.0001
#res[,.(sharpe=mean(auc),eta_val=std(auc)),by="learner.id"]
res_sharpe<-merge(res[,.(stdev=sqrt(var(auc))),by="learner.id"],res[,.(mean_v=mean(auc)),by="learner.id"])
res_sharpe[,sharpe:=mean_v/stdev][order(-sharpe)]
res_sharpe<-res_sharpe[order(-sharpe)]
fwrite(res_sharpe,data_output_dir+curr_model+"/res_sharpe_"+Sys.Date()+".csv")
predictions_str <- as.data.table(getBMRPredictions(bmr,as.df = T))
data_baselearners<-merge(dcast(data=predictions_str, id ~ learner.id, value.var = "prob.1"),unique(predictions_str[,.(id,truth)],by=c("id","truth")))
rm(predictions_str)
data_baselearners<- data_baselearners[order(id)]
data_baselearners[,id:=NULL]
dt_train_cor <- data_baselearners[,truth:=NULL]
cr<-as.data.table(cor(dt_train_cor))
cr$learner.id <- names(cr)
fwrite(cr,data_output_dir+curr_model+"/correlation_matrix_"+Sys.Date()+".csv")
fwrite(config_file,data_output_dir+curr_model+"/config_file_"+Sys.Date()+".csv")
cat("\n######################################\n")
#res_sharpe[,.(learner.id,sharpe)]
#-- Get performance matrix for easy matrix multiplication with the correlation matrix
##perf_mat <- res_sharpe$sharpe %*% t(res_sharpe$sharpe)
#perf_mat <-as.data.table(perf_mat)
#names(perf_mat)<-as.character(res_sharpe$learner.id)
}
#parallelStop()
if("STACK"!="STACK")
{
bst_learners_stack <- unique(c("truth",as.vector(res_sharpe[order(-sharpe)][,learner.id])[seq(1,2)],as.vector(res_sharpe[order(-mean_v)][,learner.id])[seq(1,2)]))
dt_train <- data_baselearners[,..bst_learners_stack]
#-- Classifier task
tsk_stack <- makeClassifTask(id=curr_model+"_stack",data=as.data.frame(dt_train), target="truth")
classif_learners<-c("classif.glmnet")
#classif_learners<-unique(c("classif.glmnet",as.vector(res_sharpe[order(-sharpe)][,learner.id])[seq(1,5)],as.vector(res_sharpe[order(-mean_v)][,learner.id])[seq(1,5)]))
lrnrs_stack = lapply(classif_learners,makeLearner,predict.type="prob")
rsmpl_desc_stack = makeResampleDesc(method=wind,initial.window=initial.window_stack,horizon=horizon_stack, skip =horizon)
bmr_stack<-benchmark(lrnrs_stack,tsk_stack,rsmpl_desc,measures = auc)
}
| /01_code/prod/3_MAIN_MULTIPLE.R | no_license | mohamedabolfadl/main_fx | R | false | false | 20,948 | r | #-- This script trains all the available models in mlr (48) and stores their aucs in a table for the pairs defined
#-- Usage:
# In the config_file, change the instruments to the ones you want to train. Choose BUY and SELL of the same model so that the indicators selected are relevant
# In the config_file, choose the indicator types and the pairfilter to limit, and the indicator_pair_filter. Those define the kind of features that should be
# included in the model training process
#Output:
# The script generates the average auc per model and the cross correlation of the predictions of the models per pair
rm(list=ls())
set.seed(123)
library(data.table)
library(lubridate)
library(mlr)
library(ggplot2)
library(xgboost)
library(crayon)
library(plotly)
library(caret)
library(parallelMap)
#--- Directories
data_output_dir<-"02_data/output/"
data_input_dir<-"02_data/input/"
data_intermediate_dir<-"02_data/intermediate/"
#------------------------------------------------------------#
################## DEFINE THE CONFIGURATIONS #################
#------------------------------------------------------------#
config_file <- data.table(
instruments = c("BUY_RES_AUDUSD","SELL_RES_AUDUSD"), # Which models are to be trained in this script
SL = 15, # Stop loss
PF = 1, # Profit factor
SPREAD = 3, # Spread, make sure there is a file with the specified spread
#indicator_filter = c("EMA","TMS","SMA","atr","dist","RSI","williams"),
indicator_filter = c("EMA","TMS","SMA","atr","RSI","williams"),
indicator_pair_filter = c("AND"),
pair_filter = c("AUD","XAU"),
preprocess_steps = c("center","scale"),
test_portion = 0.3, # Out of sample test part for final evaluation
window_type = "FixedWindowCV", #"GrowingWindowCV",
initial.window = 1e4, # Window size for training
horizon = 1e4, # Future window for testing
initial.window_stack = 5e3, # Window size for training
horizon_stack = 1e4, # Future window for testing
REMOVE_FAST_WINS = T, # Flag to remove the positive trades which are finished in less than MIN_TRADE_TIME
MIN_TRADE_TIME = 15,
CORRELATION_FEATURE_SELECTION = T, # Flag whether to filter out the highly correlated features
CORRELATION_THRESHOLD = 0.9, # Filter to indicate the maximum correlation between indicators to be included in the training
READ_SELECTED_FEATURES = F,
returns_period = "week", #"month","day" defines the period of aggregating the returns
WRITE_FLAG = F
)
all_pairs <- c("EURUSD","GBPUSD","AUDUSD","USDJPY","USDCHF","NZDUSD","XAUUSD","USDCAD")
instruments <- data.table(currs = unique(config_file$instruments))
indicator_filter = unique(config_file[!is.na(indicator_filter),indicator_filter])
indicator_pair_filter = unique(config_file[,indicator_pair_filter])
pair_filter = unique(config_file[,pair_filter])
MIN_TRADE_TIME = config_file$MIN_TRADE_TIME[1]
preprocess_steps <- unique(config_file[!is.na(preprocess_steps),preprocess_steps])
CORRELATION_FEATURE_SELECTION <- config_file$CORRELATION_FEATURE_SELECTION[1] # use correlations to select the features
#-- Read the configurations
returns_period = config_file$returns_period[1] #"month","day" defines the period of aggregating the returns
READ_SELECTED_FEATURES <- config_file$READ_SELECTED_FEATURES[1]
WRITE_FLAG <- config_file$WRITE_FLAG[1]
SPREAD <- config_file$SPREAD[1] # Spread, make sure there is a file with the specified spread
SL <- config_file$SL[1]
PF <- config_file$PF[1]
test_ratio <- config_file$test_portion[1]
initial.window<-config_file$initial.window[1]
horizon <- config_file$horizon[1]
initial.window_stack<-config_file$initial.window_stack[1]
horizon_stack <- config_file$horizon_stack[1]
wind <- config_file$window_type[1]
REMOVE_FAST_WINS<-config_file$REMOVE_FAST_WINS[1]
CORRELATION_THRESHOLD <- config_file$CORRELATION_THRESHOLD[1]
#------------------------------------------------------------#
############### DEFINE THE FUNCTIONS #########################
#------------------------------------------------------------#
get_sharpe=function(dt_curr,dt_time_lut_prediction_period,PF)
{
dt_portfolio <- merge(dt_time_lut_prediction_period,dt_curr,all.x = T,by="index")
#-- Add equity, returns and drawdown
dt_portfolio[TARGET==1 & decision==1,returns:=PF][TARGET==0 & decision==1,returns:=-1][is.na(returns),returns:=0][,equity:=cumsum(returns)][,drawdown:=cummax(equity)][,drawdown:=(drawdown-equity) ]
mean_returns <- dt_portfolio[,.(mean_returns=sum(returns)),by="ret_per"]
return(list(mean_returns,mean(mean_returns$mean_returns),var(mean_returns$mean_returns),max(dt_portfolio$drawdown)))
}
get_mean_returns_and_variance=function(dt_res,dt_time_lut_prediction_period,PF)
{
step <-0.01
th<-step
bst_sharpe <- -999
bst_thr <- 0.01
#--DEBUG
#th<-0.54
while(th<0.95)
{
dt_curr<-copy(dt_res)
dt_curr[,decision:=as.numeric(prediction>th)]
dt_curr <- dt_curr[decision>0.5]
ret_varg<-get_sharpe(dt_curr,dt_time_lut_prediction_period,PF)
if((ret_varg[[2]]==0 & ret_varg[[3]]==0))
{
curr_sharpe<-0
}else{
#-- SHARPE RATIO CALCULATION
#curr_sharpe <- ret_varg[[2]]/sqrt(1+ret_varg[[3]]+ret_varg[[4]])
#curr_sharpe <- ret_varg[[2]]/(1+sqrt(ret_varg[[3]]))
#curr_sharpe <- ret_varg[[2]]/(0.01+sqrt(ret_varg[[3]]))
curr_sharpe <-1-pnorm(0,ret_varg[[2]],sqrt(ret_varg[[3]]))
}
if(curr_sharpe>bst_sharpe)
{
bst_sharpe<-curr_sharpe
bst_thr <- th
bst_mean_ret <- ret_varg[[2]]
#bst_var_ret <- sqrt(1+ret_varg[[3]]+ret_varg[[4]])
#bst_var_ret <- (1+sqrt(ret_varg[[3]]))
#bst_var_ret <- (0.01+sqrt(ret_varg[[3]]))
bst_var_ret <- (sqrt(ret_varg[[3]]))
}
th<-th+step
}
return(list(bst_mean_ret,bst_var_ret,bst_thr))
}
train_and_predict = function(dt,nrounds,eta,max_depth,initial.window,horizon,target_name="TARGET",index_name="index")
{
#-- Get feature columns and target columns
feat_cols <-setdiff(names(dt),target_name)
target_col <-target_name
#-- CHeck if index column is there
index_col_available <- index_name %in% names(dt)
#-- Exclude index from the feature columns
if(index_col_available)
{
feat_cols <- setdiff(feat_cols,index_name)
}
#-- Initialize the resultant table
dt_res <- data.table(prediction=numeric(0),index=numeric(0),TARGET=numeric(0))
i<-1+initial.window
while(i< (nrow(dt)-horizon-1) )
{
#-- subset train and prediction and index
dt_train <- copy(dt[(i-initial.window):i-1,])
dt_predict <- copy(dt[i:(i+horizon-1),] )
if(index_col_available)
{
dt_index <- copy(dt_predict[,..index_name])
}
dt_vars_cols_train <- dt_train[,..feat_cols]
dt_target_train <- dt_train[,..target_col]
#print(dt_vars_cols_train)
xgb <- xgboost(data = as.matrix(dt_vars_cols_train),
label = as.matrix(dt_target_train),
eta = eta,
max_depth = max_depth,
nround=nrounds,
objective = "binary:logistic",
# early_stopping_rounds = 3,
colsample_bytree = 0.5,
subsample = 0.8,
#eval_metric = "map",
verbose = F
)
#print(xgb.importance(model=xgb,feature_names = feat_cols))
#-- Predict
y_pred <- predict(xgb,newdata=as.matrix(dt_predict[,..feat_cols]))
#-- Include predictions
dt_index<-cbind(dt_index,data.table(prediction=y_pred))
#-- Include the ground truth
dt_index<-cbind(dt_index,dt_predict[,..target_col])
dt_res <- rbind(dt_res,dt_index)
rm(dt_index)
cat("\r",round(100.0*i/(nrow(dt)-horizon-1))+"%")
i<-i+horizon
}
cat("\n\n")
return(dt_res)
}
"+" = function(x,y) {
if(is.character(x) || is.character(y)) {
return(paste(x , y, sep=""))
} else {
.Primitive("+")(x,y)
}
}
#-- Sharpe ratio function
# Only the pred is used
# TODO: Use Sortino ratio instead and modify the way the sharpe_ratio is calculated
sharpe_ratio = function(task, model, pred, feats, extra.args) {
predTable <- as.data.table(pred)
#-- Select only the trades we label as true because they build up the portfolio
predTable <- predTable[response==T]
if(nrow(predTable)>5)
{
#-- Get the equity and drawdown
predTable[,equity:=2*(as.numeric(truth)-1.5)][equity>0,equity:=PF][,equity:=cumsum(equity)][,drawdown:=cummax(equity)][,drawdown:=(drawdown-equity) ]
#-- Calculate the modified sharpe ratio by including the drawdown
(predTable[nrow(predTable), equity])/((1+max(predTable$drawdown)))
}else{
(0)
}
}
#-- Set the sharpe ratio as a custom function for optimizing the models
sharpe_ratio = makeMeasure(
id = "sharpe_ratio", name = "sharpe_ratio",
properties = c("classif", "classif.multi", "req.pred",
"req.truth"),
minimize = FALSE, fun = sharpe_ratio
)
#-- Get the optimal threshold to maximize the portfolio
getBestThresh <- function(dt)
{
res_orig <- as.data.table(dt)
thresh_vec <- seq(0.01,0.99,0.01)
bst_thresh <-0
max_sharpe_ratio <- -991
bst_drawdown <- -9999
max_avg_ret <- -999
iters <- max(dt$iter)
for (th in thresh_vec)
{
res_sel <- copy(res_orig)
res_sel[,response:=prob.TRUE>th]
res_sel <- res_sel[response==T]
if(nrow(res_sel)>10)
{
#-- Compute the sharpe ratio as average ret per tra over the variance
#-- Net equity
res_sel[,equity:=2*(as.numeric(truth)-1.5)][equity>0,equity:=PF][,equity:=cumsum(equity)][,drawdown:=cummax(equity)][,drawdown:=(drawdown-equity) ]
total_ret <- res_sel[nrow(res_sel), equity]
std_ret <- sqrt(var(res_sel$equity))
min_drawdown <- max(res_sel$drawdown)
sharpe_ratio <- total_ret/((1+min_drawdown)*iters)
if(sharpe_ratio>max_sharpe_ratio)
{
max_sharpe_ratio <- sharpe_ratio
max_avg_ret <- total_ret
bst_thresh <- th
bst_drawdown <- min_drawdown
bst_dt <- res_sel
}
}
}
return( list(max(bst_dt$drawdown),max_avg_ret, nrow(bst_dt[equity<0]) , max_sharpe_ratio, bst_thresh ,bst_dt))
}
#------------------------------------------------------------#
############### READ THE DATA AND FORMAT THE COLUMNS ########
#------------------------------------------------------------#
#-- Read the main ML file
dt<-fread(paste0(data_intermediate_dir,"ML_SL_",SL,"_PF_",PF,"_SPREAD_",SPREAD,"_ALL.csv"))
#-- Attach the index column
dt[,index:= seq(1,nrow(dt))]
#-- Create the index, time lookup table
dt_time_lut <- dt[,.(index,Time)]
#- Setting the trades with quick wins to zeros, because they are probably resulting from news
if(REMOVE_FAST_WINS)
{
for(pr in all_pairs)
{
dt[ get("buy_profit_"+pr)<MIN_TRADE_TIME ,("BUY_RES_"+pr):=0]
dt[ get("sell_profit_"+pr)<MIN_TRADE_TIME ,("SELL_RES_"+pr):=0]
}
}
#-- COmputing the aggregation column
if(returns_period=="week")
{
dt_time_lut[,ret_per:=year(Time)+"_"+week(Time)]
}else
{
if(returns_period=="day")
{
dt_time_lut[,ret_per:=year(Time)+"_"+month(Time)+"_"+day(Time)]
}else{
if(returns_period=="month")
{
dt_time_lut[,ret_per:=year(Time)+"_"+month(Time)]
}else{
stop("Incorrect aggregation period")
}
}
}
#-- Group the column types
results_cols <- names(dt)[grepl("BUY_RES",names(dt)) | grepl("SELL_RES",names(dt))]
#-- Remove near zero variance features
#inds_nearZero_var <- caret::nearZeroVar(x=dt[sample(1e4)])
#nonZeroVarCols <- setdiff(names(dt),names(dt)[inds_nearZero_var])
profit_loss_cols <- names(dt)[grepl("profit",names(dt)) | grepl("loss",names(dt))]
bs_cols <- names(dt)[grepl("^bs",names(dt))]
ohlc_cols <- names(dt)[grepl("Low$",names(dt)) | grepl("Close$",names(dt)) | grepl("Open$",names(dt)) | grepl("High$",names(dt))]
full_features <- setdiff(names(dt),c("index","Time",results_cols,profit_loss_cols,ohlc_cols,bs_cols))
##########################################################################################
########## PREPROCESSING: HARD FILTER OF INDICATOR TYPES ###########################
##########################################################################################
#-- Hard filter of indicator types
if(length(indicator_filter)>0)
{
regex_cmd <- "("+paste0(indicator_filter,collapse = "|")+")"
indicators_filtered<-full_features[grepl(regex_cmd,full_features)]
}else{
indicators_filtered<-full_features
}
#-- Hard filter of pair types
if(length(pair_filter)>0)
{
regex_cmd <- "("+paste0(pair_filter,collapse = "|")+")"
pairs_filtered<-full_features[grepl(regex_cmd,full_features)]
}else{
pairs_filtered<-full_features
}
#-- Combine indicator and pair filter
if(indicator_pair_filter=="AND")
{
full_features <- intersect(indicators_filtered,pairs_filtered)
}else{
full_features <- unique(c(indicators_filtered,pairs_filtered))
}
#-- Feature selection
if(CORRELATION_FEATURE_SELECTION)
{
dt_features <- copy(dt[,..full_features])
#-- Remove near zero variance features
inds_nearZero_var <- caret::nearZeroVar(x=dt_features[sample(seq(1e4,nrow(dt_features)),5e3)])
if(length(inds_nearZero_var)>0)
{
nonZeroVarCols <- setdiff(names(dt_features),names(dt_features)[inds_nearZero_var])
}else{
nonZeroVarCols<-names(dt_features)
}
dt_features<-dt_features[,,..nonZeroVarCols]
cols <- names(dt_features)
dt_features[,(cols):=lapply(.SD,as.numeric),.SDcols = cols]
#dt_features <- sapply( dt_features, as.numeric )
cr <- cor(dt_features[sample(seq(2e4,nrow(dt_features)),5e3),], use="complete.obs")
highly_correlated_features<-caret::findCorrelation(x=cr,cutoff=CORRELATION_THRESHOLD)
correlated_features_to_be_excluded<-names(dt_features)[highly_correlated_features]
feat_cols<-setdiff(full_features,correlated_features_to_be_excluded)
unique_relevant_cols <- c("index",feat_cols,results_cols)
dt_sel<-dt[,..unique_relevant_cols]
}else{
#-- Previous implementation
#feat_cols <- names(dt)[grepl("TMS",names(dt)) | grepl("RSI$",names(dt))]
feat_cols <- full_features
index_cols <- "index"
relevant_cols <- c(index_cols,feat_cols,results_cols)
dt_sel <- dt[,..relevant_cols]
}
if(length(preprocess_steps)>0)
{
dt_feature_part <- dt_sel[,..feat_cols]
preProcValues <- preProcess(dt_feature_part, method = c("center", "scale"))
tmp <- predict(preProcValues,dt_feature_part)
dt_sel[,(feat_cols):=tmp]
}
#------------------------------------------------------------#
############## SPLIT TRAIN AND TEST ##########################
#------------------------------------------------------------#
dt_test <- dt_sel[floor(nrow(dt_sel)*(1-test_ratio)):nrow(dt),]
dt_sel <- dt_sel[1:(floor(nrow(dt_sel)*(1-test_ratio))-1),]
#-- Remove tests since we are not validating at this point
rm(dt_test)
rm(tmp)
rm(dt)
rm(dt_feature_part)
rm(dt_features)
#------------------------------------------------------------#
################## CREATE MLR TASK ##########################
#------------------------------------------------------------#
models_with_performance_issues <- c("classif.neuralnet","classif.ksvm","classif.extraTrees","classif.fdausc.glm","classif.fdausc.kernel","classif.fdausc.knn","classif.fdausc.np","classif.randomForestSRC","classif.featureless","classif.bartMachine","classif.blackboost","classif.cforest","classif.evtree","classif.gausspr","classif.rda")
#already_tested <- c("classif.ctree","classif.h2o.randomForest","classif.naiveBayes","classif.C50","classif.IBk","classif.nnTrain","classif.ada","classif.J48","classif.JRip","classif.ksvm","classif.lqa","classif.binomial","classif.earth","classif.LiblineaRL2LogReg","classif.pamr","classif.extraTrees","classif.mda","classif.plsdaCaret","classif.h2o.glm","classif.mlp","classif.probit","classif.h2o.gbm","classif.nnet","classif.h2o.deeplearning","classif.neuralnet","classif.randomForest","classif.glmnet","classif.cvglmnet","classif.OneR","classif.ranger","classif.gamboost","classif.plr","classif.rotationForest","classif.gbm","classif.logreg","classif.multinom","classif.LiblineaRL1LogReg","classif.adaboostm1","classif.nodeHarvest","classif.PART","classif.saeDNN","classif.rpart","classif.dbnDNN","classif.svm","classif.qda","classif.xgboost","classif.glmboost")
#classif_learners = all_learners[grepl("^classif",class) & installed==T & prob==T & !(class %in% already_tested) &!(class %in% models_with_performance_issues) &!(class %in% c("classif.rFerns","classif.rknn","classif.RRF","classif.rrlda","classif.sda",
# "classif.clusterSVM","classif.dcSVM","classif.fnn","classif.gaterSVM","classif.geoDA",
# "classif.knn","classif.LiblineaRL1L2SVC")) ,class]
#-- Choose the classifiers
all_learners<-as.data.table(listLearners())
classif_learners = all_learners[grepl("^classif",class) & installed==T & prob==T &
!(class %in% models_with_performance_issues) &
!(class %in% c("classif.rFerns","classif.rknn","classif.RRF","classif.rrlda","classif.sda","classif.knn","classif.LiblineaRL1L2SVC")) ,class]
#-- classif_learners<-c("classif.neuralnet")
#fwrite(data.table(classifiers=classif_learners),data_output_dir+"valid_classifiers.csv")
for (curr_model in unique(config_file$instruments))
{
cat("\n######## "+curr_model+" ############\n")
dt_curr<- copy(dt_sel)
lrnrs = lapply(classif_learners,makeLearner,predict.type="prob")
print(length(classif_learners))
# curr_model = "SELL_RES_USDJPY"
setnames(dt_curr,curr_model,"TARGET")
feats_and_target <- c(feat_cols,"TARGET")
dt_train <- dt_curr[,..feats_and_target]
rm(dt_curr)
#-- Get only non NA rows
dt_train <- na.omit(dt_train)
tsk <- makeClassifTask(id=curr_model,data=as.data.frame(dt_train), target="TARGET")
#-- TO check what are the available measures
#listMeasures(tsk)
#-- Make the resampling strategy
rsmpl_desc = makeResampleDesc(method=wind,initial.window=initial.window,horizon=horizon, skip =horizon)
#-- Benchmark
bmr<-benchmark(lrnrs,tsk,rsmpl_desc,measures = auc)
print(bmr)
#-- Get the iteration results and store them
res <- as.data.table(bmr)
fwrite(res,data_output_dir+curr_model+"/performance_iterations_"+Sys.Date()+".csv")
#-- Get the mean and variance of the auc
eta_val = 0.0001
#res[,.(sharpe=mean(auc),eta_val=std(auc)),by="learner.id"]
res_sharpe<-merge(res[,.(stdev=sqrt(var(auc))),by="learner.id"],res[,.(mean_v=mean(auc)),by="learner.id"])
res_sharpe[,sharpe:=mean_v/stdev][order(-sharpe)]
res_sharpe<-res_sharpe[order(-sharpe)]
fwrite(res_sharpe,data_output_dir+curr_model+"/res_sharpe_"+Sys.Date()+".csv")
predictions_str <- as.data.table(getBMRPredictions(bmr,as.df = T))
data_baselearners<-merge(dcast(data=predictions_str, id ~ learner.id, value.var = "prob.1"),unique(predictions_str[,.(id,truth)],by=c("id","truth")))
rm(predictions_str)
data_baselearners<- data_baselearners[order(id)]
data_baselearners[,id:=NULL]
dt_train_cor <- data_baselearners[,truth:=NULL]
cr<-as.data.table(cor(dt_train_cor))
cr$learner.id <- names(cr)
fwrite(cr,data_output_dir+curr_model+"/correlation_matrix_"+Sys.Date()+".csv")
fwrite(config_file,data_output_dir+curr_model+"/config_file_"+Sys.Date()+".csv")
cat("\n######################################\n")
#res_sharpe[,.(learner.id,sharpe)]
#-- Get performance matrix for easy matrix multiplication with the correlation matrix
##perf_mat <- res_sharpe$sharpe %*% t(res_sharpe$sharpe)
#perf_mat <-as.data.table(perf_mat)
#names(perf_mat)<-as.character(res_sharpe$learner.id)
}
#parallelStop()
if("STACK"!="STACK")
{
bst_learners_stack <- unique(c("truth",as.vector(res_sharpe[order(-sharpe)][,learner.id])[seq(1,2)],as.vector(res_sharpe[order(-mean_v)][,learner.id])[seq(1,2)]))
dt_train <- data_baselearners[,..bst_learners_stack]
#-- Classifier task
tsk_stack <- makeClassifTask(id=curr_model+"_stack",data=as.data.frame(dt_train), target="truth")
classif_learners<-c("classif.glmnet")
#classif_learners<-unique(c("classif.glmnet",as.vector(res_sharpe[order(-sharpe)][,learner.id])[seq(1,5)],as.vector(res_sharpe[order(-mean_v)][,learner.id])[seq(1,5)]))
lrnrs_stack = lapply(classif_learners,makeLearner,predict.type="prob")
rsmpl_desc_stack = makeResampleDesc(method=wind,initial.window=initial.window_stack,horizon=horizon_stack, skip =horizon)
bmr_stack<-benchmark(lrnrs_stack,tsk_stack,rsmpl_desc,measures = auc)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/max_standardise.R
\name{max_standardise}
\alias{max_standardise}
\title{Standardise a variable where higher values are more 'valuable'}
\usage{
max_standardise(x)
}
\arguments{
\item{x}{The variable to be standardised.}
}
\description{
Default method of standardisation. The transformed value represents the number of
standard deviations the variable lies from the mean.
}
\details{
The mean is subtracted from the variable, then it is divided by the standard deviation.
}
| /max_standardise.Rd | permissive | oisin-hodgins/AutoMonthlyReport | R | false | true | 551 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/max_standardise.R
\name{max_standardise}
\alias{max_standardise}
\title{Standardise a variable where higher values are more 'valuable'}
\usage{
max_standardise(x)
}
\arguments{
\item{x}{The variable to be standardised.}
}
\description{
Default method of standardisation. The transformed value represents the number of
standard deviations the variable lies from the mean.
}
\details{
The mean is subtracted from the variable, then it is divided by the standard deviation.
}
|
if (FALSE) { ## DOC:
## Il semble difficile de garder une info sur chaque element de lambda ou ranCoefs, particulierement parce que
# les elements NULL de ranCoefs poseraient probleme pour relist(). Il faut plutôt utiliser les noms.
essai <- list(a=1,b=NULL,c=2)
relist(c(3,5),skeleton=essai) ## error mal documentee
## idiom for merging parameters
varNames <- setdiff(names(init.HLfit),c("fixef","v_h")) ## maybe also corrPars ? empty list in init.HLfit...
HLCor.args$fixed <- structure(.modify_list(fixed,init.HLfit[varNames]),
type=.modify_list(.relist_rep("fix",fixed),
attr(init.HLfit,"type")[varNames]))
## idiom for splitting parameters
rPtype <- attr(ranPars,"type")
if (is.null(rPtype) && length(ranPars)) { ## direct HLCor call
HL.info$ranFix <- structure(ranPars,
type=.relist_rep("fix",ranPars))
#HL.info$init.HLfit previously filled by dotlist[good_dotnames]
} else { ## through corrHLfit or fitme call: ranPars inherits values from <'corrfitme'> (...,init.HLfit(...))
u_rPtype <- unlist(rPtype)
varNames <- names(which(u_rPtype=="var"))
fix_outer_Names <- setdiff(names(u_rPtype),varNames)
## init.HLfit must recover elements from ranPars! (bug detected by AIC( <SEM fit> ) in test-CAR.R where it must get rho...
if (is.null(fix_outer_Names)) { ## can be NULL for corrMatrix case => not $ranFix
HL.info$init.HLfit <- .modify_list(HL.info$init.HLfit,ranPars)
} else { # builds a ranFix with types from rPtype (typically "fix" as outer" is set at the end of <'corrfitme'>, but see refit...)
HL.info$ranFix <- structure(.remove_from_cP(ranPars,u_names=varNames), ## loses attributes
type=.remove_from_cP(rPtype,u_rPtype, u_names=varNames) )
HL.info$init.HLfit <- .modify_list(HL.info$init.HLfit,
.remove_from_cP(ranPars,u_names=fix_outer_Names)) ## loses attributes
}
}
}
## derived from utils::modifyList ... works on named vectors!
.modify_list <- function (x, val, obey_NULLs=TRUE) { # obey_NULLs = FALSE => NULL elements in val are ignored, as if inexistent
if( is.null(x)) {
if (is.null(val)) {
return(NULL)
} else return(val)
} else if (is.null(val)) return(x) # but if val is a named list with explicit NULLs, those explicit NULLs will replace the corresponding LHS elements
#stopifnot(is.list(x), is.list(val)) # inefficient
xnames <- names(x)
vnames <- names(val)
if ( ! obey_NULLs ) {
is_null_vec <- sapply(val, is.null)
vnames <- vnames[which( ! is_null_vec)]
}
vnames <- vnames[nzchar(vnames)]
for (v in vnames) {
if (v %in% xnames) {
if ( is.list(x[[v]]) && is.list(val[[v]])) {
x[[v]] <- .modify_list(x[[v]], val[[v]])
} else if ( ! is.null(dim(val[[v]]))) { # if val[[v]] is a matrix names(val[[v]]) is not what we need here
x[[v]] <- val[[v]]
} else if ( is.environment(x[[v]]) && is.environment(val[[v]])) { # before next alternative, bc
# syntax x[[v]][nam] does not work on environments
# we could use another syntax to copy from one envir to the other, but currently copying envirs may be sufficient.
# This case occur in .get_inits_by_xLM() -> .modify_list(inits_by_xLM$mvlist,new_mvlist) in test-mv-extra (was missed by routine tests).
x[[v]] <- val[[v]]
} else if ( ! is.null(nam <- names(val[[v]]))) { # handles val[[v]] being list, or vector
x[[v]][nam] <- val[[v]]
} else x[[v]] <- val[[v]]
} else x[[v]] <- val[[v]]
}
x
}
.denullify <- function(x, modifier, vec_nobs=NULL) { # changes NULL's and not to NULLs
if (is.null(vec_nobs)) {
if (is.null(x)) x <- modifier
} else if (.anyNULL(x) ) {
for (mv_it in seq_along(modifier)) if ( is.null(x[[mv_it]])) x[mv_it] <- list(unlist(modifier[as.character(mv_it)])) # handling missing data properly
}
x
}
# getPar extract values from a list of lists, controlling that there is no redundancies between the lists => useful to *merge* lists
# but in fact I do not seem to use this facility. .getPar() is applied to 'ranFix' (once to 'fixed')
# Argument 'which' can be any way of indexing a list
.getPar <- function(parlist,name,which=NULL, count=FALSE) { ## see .get_cP_stuff() to extract from first level or from an optional corrPars element !
if ( ! is.null(which)) parlist <- parlist[[which]]
val <- parlist[[name]]
if (is.null(val)) { ## ie name not found a topmost level; scan sublists: NOT RECURSIVELY
nmatch <- 0L
val <- NULL
for (it in seq_along(parlist)) { ## the sublists are typically lists that we wish to merge (see examples below)
if (is.list(parlist[[it]]) && length(vv <- parlist[[it]][[name]])) {
val <- vv
nmatch <- nmatch+1L
}
}
if (count) return(nmatch)
## ELSE
if (nmatch>1L) {
stop(paste0("Found several instances of element '",name,"' in nested list: use 'which' to resolve this."))
}
return(val)
} else if (count) {return(1L)} else return(val) ## single first-level or [[which]] value
}
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"b") ## 2
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"c") ## 4
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"a") ## error
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"a",which=1) ## 1
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"d") ## NULL
.get_cP_stuff <- function(typelist,name,which=NULL,count=FALSE) {
if (is.null(cP_types <- typelist$corrPars)) {
.getPar(typelist,name,which=NULL,count=count)
} else .getPar(cP_types,name,which=which,count=count)
}
.process_HLfit_corrPars <- function(init.HLfit, template) { ## the template should be provided by preprocess
if (is.null(corrPars <- init.HLfit$corrPars)) {
if (!is.null(rho <- init.HLfit$rho)) {
return(relist(rho,template))
} else return(NULL)
} else return(corrPars)
}
.set_pars_stuff <- function(lhs_list, value, names_from) {
u_lhs <- unlist(lhs_list) ## generates automatic names
u_lhs[names(unlist(names_from))] <- value
relist(u_lhs,lhs_list)
}
.rmNaN_fn <- function(x) if (is.list(x)) .rmNaN(x) else {if (is.character(x)) x[x!= "NaN"] else {x[!is.nan(x)]}}
## Recursively step down into list, removing all NaN elements from vectors and vectors of NaN from lists
.rmNaN <- function(x) {
res <- vector("list",length(x))
for(it in seq_along(x)) res[[it]] <- .rmNaN_fn(x[[it]])
names(res) <- names(x) ## crucial (other attributes are lost !)
len <- integer(length(res))
for(it in seq_along(res)) len[it] <- length(res[[it]])
res[len>0L]
}
.remove_from_cP <- function(parlist, u_list=unlist(parlist), u_names) { ## not simply corrPars...
if (length(u_names)) { ## if something to subtract
u_list[u_names] <- rep(NaN,length(u_names))
u_list <- relist(u_list,parlist)
return(.rmNaN(u_list)) ## removes attributes
} else return(parlist) ## DHGLM where all parameters are fixed.
}
remove_from_parlist <- function(parlist, removand=NULL, rm_names=names(unlist(removand))) {
type <- attr(parlist,"type")
if ( ! is.null(type)) type <- .remove_from_cP(type, u_names=rm_names)
structure(.remove_from_cP(parlist,u_names=rm_names),
type=type )
}
#extract a sublist from a (structured) list x according to a skeleton; used for mv code
.subPars <- function (x, skeleton) {
xnames <- names(x)
sknames <- names(skeleton)
sknames <- sknames[nzchar(sknames)]
for (v in sknames) {
if (v %in% xnames) {
if (( is.list(x[[v]]) || inherits(x[[v]],"R6")) && is.list(skeleton[[v]])) {
skeleton[[v]] <- .subPars(x[[v]], skeleton[[v]])
} else if ( ! is.null(nam <- names(skeleton[[v]]))) { # ideally this test is always TRUE when it is reached
if (length(subnames <- intersect(nam, names(x[[v]])))) {
skeleton[[v]] <- x[[v]][subnames] # sub-vector here
} else skeleton[v] <- NULL # remove element from list
} else skeleton[[v]] <- x[[v]]
} else skeleton[[v]] <- x[[v]]
}
skeleton
}
| /R/corrPars.R | no_license | cran/spaMM | R | false | false | 8,361 | r | if (FALSE) { ## DOC:
## Il semble difficile de garder une info sur chaque element de lambda ou ranCoefs, particulierement parce que
# les elements NULL de ranCoefs poseraient probleme pour relist(). Il faut plutôt utiliser les noms.
essai <- list(a=1,b=NULL,c=2)
relist(c(3,5),skeleton=essai) ## error mal documentee
## idiom for merging parameters
varNames <- setdiff(names(init.HLfit),c("fixef","v_h")) ## maybe also corrPars ? empty list in init.HLfit...
HLCor.args$fixed <- structure(.modify_list(fixed,init.HLfit[varNames]),
type=.modify_list(.relist_rep("fix",fixed),
attr(init.HLfit,"type")[varNames]))
## idiom for splitting parameters
rPtype <- attr(ranPars,"type")
if (is.null(rPtype) && length(ranPars)) { ## direct HLCor call
HL.info$ranFix <- structure(ranPars,
type=.relist_rep("fix",ranPars))
#HL.info$init.HLfit previously filled by dotlist[good_dotnames]
} else { ## through corrHLfit or fitme call: ranPars inherits values from <'corrfitme'> (...,init.HLfit(...))
u_rPtype <- unlist(rPtype)
varNames <- names(which(u_rPtype=="var"))
fix_outer_Names <- setdiff(names(u_rPtype),varNames)
## init.HLfit must recover elements from ranPars! (bug detected by AIC( <SEM fit> ) in test-CAR.R where it must get rho...
if (is.null(fix_outer_Names)) { ## can be NULL for corrMatrix case => not $ranFix
HL.info$init.HLfit <- .modify_list(HL.info$init.HLfit,ranPars)
} else { # builds a ranFix with types from rPtype (typically "fix" as outer" is set at the end of <'corrfitme'>, but see refit...)
HL.info$ranFix <- structure(.remove_from_cP(ranPars,u_names=varNames), ## loses attributes
type=.remove_from_cP(rPtype,u_rPtype, u_names=varNames) )
HL.info$init.HLfit <- .modify_list(HL.info$init.HLfit,
.remove_from_cP(ranPars,u_names=fix_outer_Names)) ## loses attributes
}
}
}
## derived from utils::modifyList ... works on named vectors!
.modify_list <- function (x, val, obey_NULLs=TRUE) { # obey_NULLs = FALSE => NULL elements in val are ignored, as if inexistent
if( is.null(x)) {
if (is.null(val)) {
return(NULL)
} else return(val)
} else if (is.null(val)) return(x) # but if val is a named list with explicit NULLs, those explicit NULLs will replace the corresponding LHS elements
#stopifnot(is.list(x), is.list(val)) # inefficient
xnames <- names(x)
vnames <- names(val)
if ( ! obey_NULLs ) {
is_null_vec <- sapply(val, is.null)
vnames <- vnames[which( ! is_null_vec)]
}
vnames <- vnames[nzchar(vnames)]
for (v in vnames) {
if (v %in% xnames) {
if ( is.list(x[[v]]) && is.list(val[[v]])) {
x[[v]] <- .modify_list(x[[v]], val[[v]])
} else if ( ! is.null(dim(val[[v]]))) { # if val[[v]] is a matrix names(val[[v]]) is not what we need here
x[[v]] <- val[[v]]
} else if ( is.environment(x[[v]]) && is.environment(val[[v]])) { # before next alternative, bc
# syntax x[[v]][nam] does not work on environments
# we could use another syntax to copy from one envir to the other, but currently copying envirs may be sufficient.
# This case occur in .get_inits_by_xLM() -> .modify_list(inits_by_xLM$mvlist,new_mvlist) in test-mv-extra (was missed by routine tests).
x[[v]] <- val[[v]]
} else if ( ! is.null(nam <- names(val[[v]]))) { # handles val[[v]] being list, or vector
x[[v]][nam] <- val[[v]]
} else x[[v]] <- val[[v]]
} else x[[v]] <- val[[v]]
}
x
}
.denullify <- function(x, modifier, vec_nobs=NULL) { # changes NULL's and not to NULLs
if (is.null(vec_nobs)) {
if (is.null(x)) x <- modifier
} else if (.anyNULL(x) ) {
for (mv_it in seq_along(modifier)) if ( is.null(x[[mv_it]])) x[mv_it] <- list(unlist(modifier[as.character(mv_it)])) # handling missing data properly
}
x
}
# getPar extract values from a list of lists, controlling that there is no redundancies between the lists => useful to *merge* lists
# but in fact I do not seem to use this facility. .getPar() is applied to 'ranFix' (once to 'fixed')
# Argument 'which' can be any way of indexing a list
.getPar <- function(parlist,name,which=NULL, count=FALSE) { ## see .get_cP_stuff() to extract from first level or from an optional corrPars element !
if ( ! is.null(which)) parlist <- parlist[[which]]
val <- parlist[[name]]
if (is.null(val)) { ## ie name not found a topmost level; scan sublists: NOT RECURSIVELY
nmatch <- 0L
val <- NULL
for (it in seq_along(parlist)) { ## the sublists are typically lists that we wish to merge (see examples below)
if (is.list(parlist[[it]]) && length(vv <- parlist[[it]][[name]])) {
val <- vv
nmatch <- nmatch+1L
}
}
if (count) return(nmatch)
## ELSE
if (nmatch>1L) {
stop(paste0("Found several instances of element '",name,"' in nested list: use 'which' to resolve this."))
}
return(val)
} else if (count) {return(1L)} else return(val) ## single first-level or [[which]] value
}
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"b") ## 2
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"c") ## 4
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"a") ## error
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"a",which=1) ## 1
# .getPar(list("1"=list(a=1,b=2),"2"=list(a=3,c=4)),"d") ## NULL
.get_cP_stuff <- function(typelist,name,which=NULL,count=FALSE) {
if (is.null(cP_types <- typelist$corrPars)) {
.getPar(typelist,name,which=NULL,count=count)
} else .getPar(cP_types,name,which=which,count=count)
}
.process_HLfit_corrPars <- function(init.HLfit, template) { ## the template should be provided by preprocess
if (is.null(corrPars <- init.HLfit$corrPars)) {
if (!is.null(rho <- init.HLfit$rho)) {
return(relist(rho,template))
} else return(NULL)
} else return(corrPars)
}
.set_pars_stuff <- function(lhs_list, value, names_from) {
u_lhs <- unlist(lhs_list) ## generates automatic names
u_lhs[names(unlist(names_from))] <- value
relist(u_lhs,lhs_list)
}
.rmNaN_fn <- function(x) if (is.list(x)) .rmNaN(x) else {if (is.character(x)) x[x!= "NaN"] else {x[!is.nan(x)]}}
## Recursively step down into list, removing all NaN elements from vectors and vectors of NaN from lists
.rmNaN <- function(x) {
res <- vector("list",length(x))
for(it in seq_along(x)) res[[it]] <- .rmNaN_fn(x[[it]])
names(res) <- names(x) ## crucial (other attributes are lost !)
len <- integer(length(res))
for(it in seq_along(res)) len[it] <- length(res[[it]])
res[len>0L]
}
.remove_from_cP <- function(parlist, u_list=unlist(parlist), u_names) { ## not simply corrPars...
if (length(u_names)) { ## if something to subtract
u_list[u_names] <- rep(NaN,length(u_names))
u_list <- relist(u_list,parlist)
return(.rmNaN(u_list)) ## removes attributes
} else return(parlist) ## DHGLM where all parameters are fixed.
}
remove_from_parlist <- function(parlist, removand=NULL, rm_names=names(unlist(removand))) {
type <- attr(parlist,"type")
if ( ! is.null(type)) type <- .remove_from_cP(type, u_names=rm_names)
structure(.remove_from_cP(parlist,u_names=rm_names),
type=type )
}
#extract a sublist from a (structured) list x according to a skeleton; used for mv code
.subPars <- function (x, skeleton) {
xnames <- names(x)
sknames <- names(skeleton)
sknames <- sknames[nzchar(sknames)]
for (v in sknames) {
if (v %in% xnames) {
if (( is.list(x[[v]]) || inherits(x[[v]],"R6")) && is.list(skeleton[[v]])) {
skeleton[[v]] <- .subPars(x[[v]], skeleton[[v]])
} else if ( ! is.null(nam <- names(skeleton[[v]]))) { # ideally this test is always TRUE when it is reached
if (length(subnames <- intersect(nam, names(x[[v]])))) {
skeleton[[v]] <- x[[v]][subnames] # sub-vector here
} else skeleton[v] <- NULL # remove element from list
} else skeleton[[v]] <- x[[v]]
} else skeleton[[v]] <- x[[v]]
}
skeleton
}
|
# -------------------------------------------------------------------------
# Converting Strings to Dates #
# -------------------------------------------------------------------------
# When date and time data are impo rted into R they will often default
# to a character string . This requires us to convert strings to dates.
# We may also have multiple strings that we want to merge to create a date
# variable.
# -------------------------------------------------------------------------
# To convert a string that is already in a date format (YYYY-MM-DD) into a date
# object use as.Date () :
x <- c ("2015-07-01", "2015-08-01", "2015-09-01")
as.Date(x)
# -------------------------------------------------------------------------
# There are multiple formats that dates can be in; for a complete list of
# formatting code options in R type ?strftime in your console .
y <- c ("07/01/2015", "07/01/2015", "07/01/2015")
as.Date(y, format = "%m/%d/%y")
# -------------------------------------------------------------------------
# using the lubridate package: #
# -------------------------------------------------------------------------
library(lubridate)
ymd(x)
mdy(y)
# -------------------------------------------------------------------------
# One of the many benefi ts of the lubridate package is that
# it automatically recognizes the common separators used when
# recording dates ("-", "/", ".", and ""). As a result, you only
# need to focus on specifying the order of the date elements to
# determine the parsing function applied
# -------------------------------------------------------------------------
# Some of the parse functions:
# ymd()
# ydm()
# mdy()
# dmy()
# hm()
# hms()
# ymd_hms()
# -------------------------------------------------------------------------
| /dates/lubridate_convert_string_date.R | no_license | ttedla/data_analysis | R | false | false | 1,932 | r |
# -------------------------------------------------------------------------
# Converting Strings to Dates #
# -------------------------------------------------------------------------
# When date and time data are impo rted into R they will often default
# to a character string . This requires us to convert strings to dates.
# We may also have multiple strings that we want to merge to create a date
# variable.
# -------------------------------------------------------------------------
# To convert a string that is already in a date format (YYYY-MM-DD) into a date
# object use as.Date () :
x <- c ("2015-07-01", "2015-08-01", "2015-09-01")
as.Date(x)
# -------------------------------------------------------------------------
# There are multiple formats that dates can be in; for a complete list of
# formatting code options in R type ?strftime in your console .
y <- c ("07/01/2015", "07/01/2015", "07/01/2015")
as.Date(y, format = "%m/%d/%y")
# -------------------------------------------------------------------------
# using the lubridate package: #
# -------------------------------------------------------------------------
library(lubridate)
ymd(x)
mdy(y)
# -------------------------------------------------------------------------
# One of the many benefi ts of the lubridate package is that
# it automatically recognizes the common separators used when
# recording dates ("-", "/", ".", and ""). As a result, you only
# need to focus on specifying the order of the date elements to
# determine the parsing function applied
# -------------------------------------------------------------------------
# Some of the parse functions:
# ymd()
# ydm()
# mdy()
# dmy()
# hm()
# hms()
# ymd_hms()
# -------------------------------------------------------------------------
|
##############################################################################
##
## description: An altered version of DoSCENT. This function deletes the logarithmic step of signaling entropy to do a Gaussian fitting and keeps other steps unchanged.
## usage: DoSCENTalt(exp.m, NCG, pheno.v = NULL, mixmod = NULL, maxPS = 5, pctG = 0.01, kmax = 9, pctLM = 0.05, pcorTH = 0.1)
## arguments:
## exp.m: Normalized single-cell RNA-Seq data matrix, with rows labeling genes and columns labeling single cells.
## NCG: The output value of function CompNCG.
## pheno.v: A phenotype vector for the single cells, of same length and order as the columns of exp.m.
## mixmod: Specifies whether the Gaussian mixture model to be fit assumes components to have different (default) or equal variance. In the latter case, use mixmod=c("E").
## maxPS: Maximum number of potency states to allow, when inferring discrete potency states of single cells. Default value is 5.
## pctG: Percentage of all genes in \code{exp.m} to select from each principal component in an SVD/PCA of \code{exp.m}. The union set of all selected genes is then used for clustering. Default value is 0.01.
## kmax: Maximum number of co-expression clusters to allow when performing clustering. Default value is 9. Larger values are not allowed.
## pctLM: Percentage of total number of single cells to allow as a minimum size for selecting interesting landmarks i.e. potency-coexpression clusters of single cells. Default value is 0.05.
## pcorTH: Threshold for calling significant partial correlations. Default value is 0.1. Usually, single-cell experiments profile large number of cells, so 0.1 is a sensible threshold.
## value:
## potS: Inferred discrete potency states for each single cell. It is indexed so that the index increases as the NCG of the state decreases.
## distPSPH: Table giving the distribution of single-cells across potency states and phenotypes.
## prob: Table giving the probabilities of each potency state per phenotype value.
## hetPS: The normalised NCG of potency per phenotype value.
## cl: The co-expression clustering index for each single cell.
## pscl: The potency coexpression clustering label for each single cell.
## distPSCL: The distribution of single cell numbers per potency state and coexpression cluster.
## medLM: A matrix of medoids of gene expression for the selected landmarks.
## srPSCL: The average NCG of single cells in each potency coexpression cluster.
## srLM: The average NCG of single cells in each landmark.
## distPHLM: Table giving the distribution of single cell numbers per phenotype and landmark.
## cellLM: Nearest landmark for each single cell.
## cellLM2: A vector specifying the nearest and next-nearest landmark for each single cell.
## adj: Weighted adjacency matrix between landmarks with entries giving the number of single cells mapping closest to the two landmarks.
## pcorLM: Partial correlation matrix of landmarks as estimated from the expression medoids.
## netLM: Adjacency matrix of landmarks specifying which partial correlations are significant.
##
##############################################################################
DoSCENT <-
function(exp.m,sr.v,pheno.v=NULL,mixmod=NULL,maxPS=5,pctG=0.01,kmax=9,pctLM=0.05,pcorTH=0.1){
require(mclust);
require(igraph);
require(isva);
require(cluster);
require(corpcor);
ntop <- floor(pctG*nrow(exp.m));
print("Fit Gaussian Mixture Model to Signaling Entropies");
if(is.null(mixmod)){ ## default assumes different variance for clusters
mcl.o <- Mclust(sr.v,G=1:maxPS);
}
else {
mcl.o <- Mclust(sr.v,G=1:maxPS,modelNames=c("E"));
}
potS.v <- mcl.o$class;
nPS <- length(levels(as.factor(potS.v)));
print(paste("Identified ",nPS," potency states",sep=""));
names(potS.v) <- paste("PS",1:nPS,sep="");
mu.v <- mcl.o$param$mean;
sd.v <- sqrt(mcl.o$param$variance$sigmasq);
avSRps.v <- (2^mu.v)/(1+2^mu.v);
savSRps.s <- sort(avSRps.v,decreasing=TRUE,index.return=TRUE);
spsSid.v <- savSRps.s$ix;
ordpotS.v <- match(potS.v,spsSid.v);
if(!is.null(pheno.v)){
nPH <- length(levels(as.factor(pheno.v)));
distPSph.m <- table(pheno.v,ordpotS.v)
print("Compute Shannon (Heterogeneity) Index for each Phenotype class");
probPSph.m <- distPSph.m/apply(distPSph.m,1,sum);
hetPS.v <- vector();
for(ph in 1:nPH){
prob.v <- probPSph.m[ph,];
sel.idx <- which(prob.v >0);
hetPS.v[ph] <- - sum(prob.v[sel.idx]*log(prob.v[sel.idx]))/log(nPS);
}
names(hetPS.v) <- rownames(probPSph.m);
print("Done");
}
else {
distPSph.m=NULL; probPSph.m=NULL; hetPS.v=NULL;
}
### now cluster cells independently of SR
### select genes over which to cluster
print("Using RMT to estimate number of significant components of variation in scRNA-Seq data");
tmp.m <- exp.m - rowMeans(exp.m);
rmt.o <- EstDimRMT(tmp.m); svd.o <- svd(tmp.m);
tmpG2.v <- vector();
print(paste("Number of significant components=",rmt.o$dim,sep=""));
for(cp in 1:rmt.o$dim){
tmp.s <- sort(abs(svd.o$u[,cp]),decreasing=TRUE,index.return=TRUE);
tmpG2.v <- union(tmpG2.v,rownames(exp.m)[tmp.s$ix[1:ntop]]);
}
selGcl.v <- tmpG2.v;
### now perform clustering of all cells over the selected genes
print("Identifying co-expression clusters");
map.idx <- match(selGcl.v,rownames(exp.m));
distP.o <- as.dist( 0.5*(1-cor(exp.m[map.idx,])) );
asw.v <- vector();
for(k in 2:kmax){
pam.o <- pam(distP.o,k,stand=FALSE);
asw.v[k-1] <- pam.o$silinfo$avg.width;
}
k.opt <- which.max(asw.v)+1;
pam.o <- pam(distP.o,k=k.opt,stand=FALSE);
clust.idx <- pam.o$cluster;
print(paste("Inferred ",k.opt," clusters",sep=""));
psclID.v <- paste("PS",ordpotS.v,"-CL",clust.idx,sep="");
### identify landmark clusters
print("Now identifying landmarks (potency co-expression clusters)");
distPSCL.m <- table(paste("CL",clust.idx,sep=""),paste("PS",ordpotS.v,sep=""));
sizePSCL.v <- as.vector(distPSCL.m);
namePSCL.v <- vector();
ci <- 1;
for(ps in 1:nPS){
for(cl in 1:k.opt){
namePSCL.v[ci] <- paste("PS",ps,"-CL",cl,sep="");
ci <- ci+1;
}
}
names(sizePSCL.v) <- namePSCL.v;
ldmkCL.idx <- which(sizePSCL.v > pctLM*ncol(exp.m));
print(paste("Identified ",length(ldmkCL.idx)," Landmarks",sep=""));
### distribution of phenotypes among LMs
if(!is.null(pheno.v)){
tab.m <- table(pheno.v,psclID.v);
tmp.idx <- match(names(sizePSCL.v)[ldmkCL.idx],colnames(tab.m));
distPHLM.m <- tab.m[,tmp.idx];
}
else {
distPHLM.m <- NULL;
}
### medoids
print("Constructing expression medoids of landmarks");
med.m <- matrix(0,nrow=length(selGcl.v),ncol=nPS*k.opt);
srPSCL.v <- vector();
ci <- 1;
for(ps in 1:nPS){
for(cl in 1:k.opt){
tmpS.idx <- intersect(which(ordpotS.v==ps),which(clust.idx==cl));
m<-matrix(exp.m[map.idx,tmpS.idx]);
e<-unlist(m);
med.m[,ci] <- apply(matrix(e,nrow=length(map.idx)),1,median);
srPSCL.v[ci] <- mean(sr.v[tmpS.idx]);
ci <- ci+1;
}
}
names(srPSCL.v) <- namePSCL.v;
srLM.v <- srPSCL.v[ldmkCL.idx];
medLM.m <- med.m[,ldmkCL.idx];
colnames(medLM.m) <- namePSCL.v[ldmkCL.idx];
rownames(medLM.m) <- selGcl.v;
### now project each cell onto two nearest landmarks
print("Inferring dependencies/trajectories/transitions between landmarks");
cellLM2.v <- vector(); cellLM.v <- vector();
for(c in 1:ncol(exp.m)){
distCellLM.v <- 0.5*(1-as.vector(cor(exp.m[map.idx,c],medLM.m)));
tmp.s <- sort(distCellLM.v,decreasing=FALSE,index.return=TRUE);
cellLM2.v[c] <- paste("LM",tmp.s$ix[1],"-LM",tmp.s$ix[2],sep="");
cellLM.v[c] <- colnames(medLM.m)[tmp.s$ix[1]];
}
adjLM.m <- matrix(0,nrow=ncol(medLM.m),ncol=ncol(medLM.m));
rownames(adjLM.m) <- colnames(medLM.m);
colnames(adjLM.m) <- colnames(medLM.m);
for(lm1 in 1:ncol(medLM.m)){
for(lm2 in 1:ncol(medLM.m)){
adjLM.m[lm1,lm2] <- length(which(cellLM2.v==paste("LM",lm1,"-LM",lm2,sep="")));
}
}
sadjLM.m <- adjLM.m + t(adjLM.m);
corLM.m <- cor(medLM.m);
pcorLM.m <- cor2pcor(corLM.m);
rownames(pcorLM.m) <- rownames(corLM.m);
colnames(pcorLM.m) <- rownames(corLM.m);
netLM.m <- pcorLM.m;diag(netLM.m) <- 0;
netLM.m[pcorLM.m < pcorTH] <- 0;
netLM.m[pcorLM.m > pcorTH] <- 1;
return(list(potS=ordpotS.v,distPSPH=distPSph.m,prob=probPSph.m,hetPS=hetPS.v,cl=clust.idx,pscl=psclID.v,distPSCL=distPSCL.m,medLM=medLM.m,srPSCL=srPSCL.v,srLM=srLM.v,distPHLM=distPHLM.m,cellLM=cellLM.v,cellLM2=cellLM2.v,adj=sadjLM.m,pcorLM=pcorLM.m,netLM=netLM.m));
}
| /Function/DoSCENTalt.R | no_license | Xinzhe-Ni/NCG | R | false | false | 8,938 | r | ##############################################################################
##
## description: An altered version of DoSCENT. This function deletes the logarithmic step of signaling entropy to do a Gaussian fitting and keeps other steps unchanged.
## usage: DoSCENTalt(exp.m, NCG, pheno.v = NULL, mixmod = NULL, maxPS = 5, pctG = 0.01, kmax = 9, pctLM = 0.05, pcorTH = 0.1)
## arguments:
## exp.m: Normalized single-cell RNA-Seq data matrix, with rows labeling genes and columns labeling single cells.
## NCG: The output value of function CompNCG.
## pheno.v: A phenotype vector for the single cells, of same length and order as the columns of exp.m.
## mixmod: Specifies whether the Gaussian mixture model to be fit assumes components to have different (default) or equal variance. In the latter case, use mixmod=c("E").
## maxPS: Maximum number of potency states to allow, when inferring discrete potency states of single cells. Default value is 5.
## pctG: Percentage of all genes in \code{exp.m} to select from each principal component in an SVD/PCA of \code{exp.m}. The union set of all selected genes is then used for clustering. Default value is 0.01.
## kmax: Maximum number of co-expression clusters to allow when performing clustering. Default value is 9. Larger values are not allowed.
## pctLM: Percentage of total number of single cells to allow as a minimum size for selecting interesting landmarks i.e. potency-coexpression clusters of single cells. Default value is 0.05.
## pcorTH: Threshold for calling significant partial correlations. Default value is 0.1. Usually, single-cell experiments profile large number of cells, so 0.1 is a sensible threshold.
## value:
## potS: Inferred discrete potency states for each single cell. It is indexed so that the index increases as the NCG of the state decreases.
## distPSPH: Table giving the distribution of single-cells across potency states and phenotypes.
## prob: Table giving the probabilities of each potency state per phenotype value.
## hetPS: The normalised NCG of potency per phenotype value.
## cl: The co-expression clustering index for each single cell.
## pscl: The potency coexpression clustering label for each single cell.
## distPSCL: The distribution of single cell numbers per potency state and coexpression cluster.
## medLM: A matrix of medoids of gene expression for the selected landmarks.
## srPSCL: The average NCG of single cells in each potency coexpression cluster.
## srLM: The average NCG of single cells in each landmark.
## distPHLM: Table giving the distribution of single cell numbers per phenotype and landmark.
## cellLM: Nearest landmark for each single cell.
## cellLM2: A vector specifying the nearest and next-nearest landmark for each single cell.
## adj: Weighted adjacency matrix between landmarks with entries giving the number of single cells mapping closest to the two landmarks.
## pcorLM: Partial correlation matrix of landmarks as estimated from the expression medoids.
## netLM: Adjacency matrix of landmarks specifying which partial correlations are significant.
##
##############################################################################
DoSCENT <-
function(exp.m,sr.v,pheno.v=NULL,mixmod=NULL,maxPS=5,pctG=0.01,kmax=9,pctLM=0.05,pcorTH=0.1){
require(mclust);
require(igraph);
require(isva);
require(cluster);
require(corpcor);
ntop <- floor(pctG*nrow(exp.m));
print("Fit Gaussian Mixture Model to Signaling Entropies");
if(is.null(mixmod)){ ## default assumes different variance for clusters
mcl.o <- Mclust(sr.v,G=1:maxPS);
}
else {
mcl.o <- Mclust(sr.v,G=1:maxPS,modelNames=c("E"));
}
potS.v <- mcl.o$class;
nPS <- length(levels(as.factor(potS.v)));
print(paste("Identified ",nPS," potency states",sep=""));
names(potS.v) <- paste("PS",1:nPS,sep="");
mu.v <- mcl.o$param$mean;
sd.v <- sqrt(mcl.o$param$variance$sigmasq);
avSRps.v <- (2^mu.v)/(1+2^mu.v);
savSRps.s <- sort(avSRps.v,decreasing=TRUE,index.return=TRUE);
spsSid.v <- savSRps.s$ix;
ordpotS.v <- match(potS.v,spsSid.v);
if(!is.null(pheno.v)){
nPH <- length(levels(as.factor(pheno.v)));
distPSph.m <- table(pheno.v,ordpotS.v)
print("Compute Shannon (Heterogeneity) Index for each Phenotype class");
probPSph.m <- distPSph.m/apply(distPSph.m,1,sum);
hetPS.v <- vector();
for(ph in 1:nPH){
prob.v <- probPSph.m[ph,];
sel.idx <- which(prob.v >0);
hetPS.v[ph] <- - sum(prob.v[sel.idx]*log(prob.v[sel.idx]))/log(nPS);
}
names(hetPS.v) <- rownames(probPSph.m);
print("Done");
}
else {
distPSph.m=NULL; probPSph.m=NULL; hetPS.v=NULL;
}
### now cluster cells independently of SR
### select genes over which to cluster
print("Using RMT to estimate number of significant components of variation in scRNA-Seq data");
tmp.m <- exp.m - rowMeans(exp.m);
rmt.o <- EstDimRMT(tmp.m); svd.o <- svd(tmp.m);
tmpG2.v <- vector();
print(paste("Number of significant components=",rmt.o$dim,sep=""));
for(cp in 1:rmt.o$dim){
tmp.s <- sort(abs(svd.o$u[,cp]),decreasing=TRUE,index.return=TRUE);
tmpG2.v <- union(tmpG2.v,rownames(exp.m)[tmp.s$ix[1:ntop]]);
}
selGcl.v <- tmpG2.v;
### now perform clustering of all cells over the selected genes
print("Identifying co-expression clusters");
map.idx <- match(selGcl.v,rownames(exp.m));
distP.o <- as.dist( 0.5*(1-cor(exp.m[map.idx,])) );
asw.v <- vector();
for(k in 2:kmax){
pam.o <- pam(distP.o,k,stand=FALSE);
asw.v[k-1] <- pam.o$silinfo$avg.width;
}
k.opt <- which.max(asw.v)+1;
pam.o <- pam(distP.o,k=k.opt,stand=FALSE);
clust.idx <- pam.o$cluster;
print(paste("Inferred ",k.opt," clusters",sep=""));
psclID.v <- paste("PS",ordpotS.v,"-CL",clust.idx,sep="");
### identify landmark clusters
print("Now identifying landmarks (potency co-expression clusters)");
distPSCL.m <- table(paste("CL",clust.idx,sep=""),paste("PS",ordpotS.v,sep=""));
sizePSCL.v <- as.vector(distPSCL.m);
namePSCL.v <- vector();
ci <- 1;
for(ps in 1:nPS){
for(cl in 1:k.opt){
namePSCL.v[ci] <- paste("PS",ps,"-CL",cl,sep="");
ci <- ci+1;
}
}
names(sizePSCL.v) <- namePSCL.v;
ldmkCL.idx <- which(sizePSCL.v > pctLM*ncol(exp.m));
print(paste("Identified ",length(ldmkCL.idx)," Landmarks",sep=""));
### distribution of phenotypes among LMs
if(!is.null(pheno.v)){
tab.m <- table(pheno.v,psclID.v);
tmp.idx <- match(names(sizePSCL.v)[ldmkCL.idx],colnames(tab.m));
distPHLM.m <- tab.m[,tmp.idx];
}
else {
distPHLM.m <- NULL;
}
### medoids
print("Constructing expression medoids of landmarks");
med.m <- matrix(0,nrow=length(selGcl.v),ncol=nPS*k.opt);
srPSCL.v <- vector();
ci <- 1;
for(ps in 1:nPS){
for(cl in 1:k.opt){
tmpS.idx <- intersect(which(ordpotS.v==ps),which(clust.idx==cl));
m<-matrix(exp.m[map.idx,tmpS.idx]);
e<-unlist(m);
med.m[,ci] <- apply(matrix(e,nrow=length(map.idx)),1,median);
srPSCL.v[ci] <- mean(sr.v[tmpS.idx]);
ci <- ci+1;
}
}
names(srPSCL.v) <- namePSCL.v;
srLM.v <- srPSCL.v[ldmkCL.idx];
medLM.m <- med.m[,ldmkCL.idx];
colnames(medLM.m) <- namePSCL.v[ldmkCL.idx];
rownames(medLM.m) <- selGcl.v;
### now project each cell onto two nearest landmarks
print("Inferring dependencies/trajectories/transitions between landmarks");
cellLM2.v <- vector(); cellLM.v <- vector();
for(c in 1:ncol(exp.m)){
distCellLM.v <- 0.5*(1-as.vector(cor(exp.m[map.idx,c],medLM.m)));
tmp.s <- sort(distCellLM.v,decreasing=FALSE,index.return=TRUE);
cellLM2.v[c] <- paste("LM",tmp.s$ix[1],"-LM",tmp.s$ix[2],sep="");
cellLM.v[c] <- colnames(medLM.m)[tmp.s$ix[1]];
}
adjLM.m <- matrix(0,nrow=ncol(medLM.m),ncol=ncol(medLM.m));
rownames(adjLM.m) <- colnames(medLM.m);
colnames(adjLM.m) <- colnames(medLM.m);
for(lm1 in 1:ncol(medLM.m)){
for(lm2 in 1:ncol(medLM.m)){
adjLM.m[lm1,lm2] <- length(which(cellLM2.v==paste("LM",lm1,"-LM",lm2,sep="")));
}
}
sadjLM.m <- adjLM.m + t(adjLM.m);
corLM.m <- cor(medLM.m);
pcorLM.m <- cor2pcor(corLM.m);
rownames(pcorLM.m) <- rownames(corLM.m);
colnames(pcorLM.m) <- rownames(corLM.m);
netLM.m <- pcorLM.m;diag(netLM.m) <- 0;
netLM.m[pcorLM.m < pcorTH] <- 0;
netLM.m[pcorLM.m > pcorTH] <- 1;
return(list(potS=ordpotS.v,distPSPH=distPSph.m,prob=probPSph.m,hetPS=hetPS.v,cl=clust.idx,pscl=psclID.v,distPSCL=distPSCL.m,medLM=medLM.m,srPSCL=srPSCL.v,srLM=srLM.v,distPHLM=distPHLM.m,cellLM=cellLM.v,cellLM2=cellLM2.v,adj=sadjLM.m,pcorLM=pcorLM.m,netLM=netLM.m));
}
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(mstDIF)
## -----------------------------------------------------------------------------
data("toydata")
## -----------------------------------------------------------------------------
resp <- toydata$resp
group_categ <- toydata$group_categ
group_cont <- toydata$group_cont
it <- toydata$it
theta_est <- toydata$theta_est
see_est <- toydata$see_est
## -----------------------------------------------------------------------------
log_reg_DIF <- mstDIF(resp, DIF_covariate = factor(group_categ), method = "logreg",
theta = theta_est)
## -----------------------------------------------------------------------------
log_reg_DIF
## -----------------------------------------------------------------------------
summary(log_reg_DIF, DIF_type = "all")
## -----------------------------------------------------------------------------
mstSIB_DIF <- mstDIF(resp, DIF_covariate = factor(group_categ), method = "mstsib",
theta = theta_est, see = see_est)
mstSIB_DIF
## -----------------------------------------------------------------------------
summary(mstSIB_DIF)
## -----------------------------------------------------------------------------
library(mirt)
mirt_model <- mirt(as.data.frame(resp), model = 1, verbose = FALSE)
## -----------------------------------------------------------------------------
sc_DIF <- mstDIF(mirt_model, DIF_covariate = factor(group_categ), method = "analytical")
sc_DIF
## -----------------------------------------------------------------------------
summary(sc_DIF)
## -----------------------------------------------------------------------------
sc_DIF_2 <- mstDIF(mirt_model, DIF_covariate = group_cont, method = "analytical")
sc_DIF_2
## -----------------------------------------------------------------------------
summary(sc_DIF_2)
## -----------------------------------------------------------------------------
discr <- it[,1]
diff <- it[,2]
## -----------------------------------------------------------------------------
bootstrap_DIF <- mstDIF(resp = resp, DIF_covariate = group_categ, method = "bootstrap",
a = discr, b = diff, decorrelate = F)
## -----------------------------------------------------------------------------
bootstrap_DIF
## -----------------------------------------------------------------------------
summary(bootstrap_DIF)
## -----------------------------------------------------------------------------
bootstrap_DIF_2 <- mstDIF(resp = resp, DIF_covariate = group_cont, method = "bootstrap",
a = discr, b = diff, decorrelate = F)
bootstrap_DIF_2
## -----------------------------------------------------------------------------
summary(bootstrap_DIF_2)
## -----------------------------------------------------------------------------
permutation_DIF <- mstDIF(resp = resp, DIF_covariate = group_categ, method = "permutation",
a = discr, b = diff, decorrelate = F)
permutation_DIF_2 <- mstDIF(resp = resp, DIF_covariate = group_cont, method = "permutation",
a = discr, b = diff, decorrelate = F)
## -----------------------------------------------------------------------------
summary(permutation_DIF)
## -----------------------------------------------------------------------------
summary(permutation_DIF_2)
| /inst/doc/mstDIF.R | no_license | cran/mstDIF | R | false | false | 3,609 | r | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library(mstDIF)
## -----------------------------------------------------------------------------
data("toydata")
## -----------------------------------------------------------------------------
resp <- toydata$resp
group_categ <- toydata$group_categ
group_cont <- toydata$group_cont
it <- toydata$it
theta_est <- toydata$theta_est
see_est <- toydata$see_est
## -----------------------------------------------------------------------------
log_reg_DIF <- mstDIF(resp, DIF_covariate = factor(group_categ), method = "logreg",
theta = theta_est)
## -----------------------------------------------------------------------------
log_reg_DIF
## -----------------------------------------------------------------------------
summary(log_reg_DIF, DIF_type = "all")
## -----------------------------------------------------------------------------
mstSIB_DIF <- mstDIF(resp, DIF_covariate = factor(group_categ), method = "mstsib",
theta = theta_est, see = see_est)
mstSIB_DIF
## -----------------------------------------------------------------------------
summary(mstSIB_DIF)
## -----------------------------------------------------------------------------
library(mirt)
mirt_model <- mirt(as.data.frame(resp), model = 1, verbose = FALSE)
## -----------------------------------------------------------------------------
sc_DIF <- mstDIF(mirt_model, DIF_covariate = factor(group_categ), method = "analytical")
sc_DIF
## -----------------------------------------------------------------------------
summary(sc_DIF)
## -----------------------------------------------------------------------------
sc_DIF_2 <- mstDIF(mirt_model, DIF_covariate = group_cont, method = "analytical")
sc_DIF_2
## -----------------------------------------------------------------------------
summary(sc_DIF_2)
## -----------------------------------------------------------------------------
discr <- it[,1]
diff <- it[,2]
## -----------------------------------------------------------------------------
bootstrap_DIF <- mstDIF(resp = resp, DIF_covariate = group_categ, method = "bootstrap",
a = discr, b = diff, decorrelate = F)
## -----------------------------------------------------------------------------
bootstrap_DIF
## -----------------------------------------------------------------------------
summary(bootstrap_DIF)
## -----------------------------------------------------------------------------
bootstrap_DIF_2 <- mstDIF(resp = resp, DIF_covariate = group_cont, method = "bootstrap",
a = discr, b = diff, decorrelate = F)
bootstrap_DIF_2
## -----------------------------------------------------------------------------
summary(bootstrap_DIF_2)
## -----------------------------------------------------------------------------
permutation_DIF <- mstDIF(resp = resp, DIF_covariate = group_categ, method = "permutation",
a = discr, b = diff, decorrelate = F)
permutation_DIF_2 <- mstDIF(resp = resp, DIF_covariate = group_cont, method = "permutation",
a = discr, b = diff, decorrelate = F)
## -----------------------------------------------------------------------------
summary(permutation_DIF)
## -----------------------------------------------------------------------------
summary(permutation_DIF_2)
|
#load library
library(class) #Has the knn function
#loading data
data("iris")
#Set the seed for reproducibility
set.seed(4948493)
#Sample the Iris data set (70% train, 30% test)
ir_sample <- sample(1:nrow(iris),size=nrow(iris)*.7)
ir_train <- iris[ir_sample,] #Select the 70% of rows
ir_test <- iris[-ir_sample,] #Select the 30% of rows
#Find Accuracy of Prediction
accuracy = function(actual, predicted) {
mean(actual == predicted)
}
#test for single k
pred <- knn(train = scale(ir_train[,-5]),
test = scale(ir_test[,-5]),
cl = ir_train$Species,
k = 40)
accuracy(ir_test$Species, pred)
#LOOP FOR MULTIPLE K's
k_to_try = 1:100
acc_k = rep(x = 0, times = length(k_to_try))
for(i in seq_along(k_to_try)) {
pred <- knn(train = scale(ir_train[,-5]),
test = scale(ir_test[,-5]),
cl = ir_train$Species,
k = k_to_try[i])
acc_k[i] <- accuracy(ir_test$Species, pred)
}
plot(acc_k, type = "b", col = "dodgerblue", cex = 1, pch = 20,
xlab = "k, number of neighbors", ylab = "classification accuracy",
main = "Accuracy vs Neighbors")
# add lines indicating k with best accuracy
abline(v = which(acc_k == max(acc_k)), col = "darkorange", lwd = 1.5)
# add line for max accuracy seen
abline(h = max(acc_k), col = "grey", lty = 2) | /knn-exercise/exercise.R | no_license | DylanThornsberry/mini-demos | R | false | false | 1,328 | r | #load library
library(class) #Has the knn function
#loading data
data("iris")
#Set the seed for reproducibility
set.seed(4948493)
#Sample the Iris data set (70% train, 30% test)
ir_sample <- sample(1:nrow(iris),size=nrow(iris)*.7)
ir_train <- iris[ir_sample,] #Select the 70% of rows
ir_test <- iris[-ir_sample,] #Select the 30% of rows
#Find Accuracy of Prediction
accuracy = function(actual, predicted) {
mean(actual == predicted)
}
#test for single k
pred <- knn(train = scale(ir_train[,-5]),
test = scale(ir_test[,-5]),
cl = ir_train$Species,
k = 40)
accuracy(ir_test$Species, pred)
#LOOP FOR MULTIPLE K's
k_to_try = 1:100
acc_k = rep(x = 0, times = length(k_to_try))
for(i in seq_along(k_to_try)) {
pred <- knn(train = scale(ir_train[,-5]),
test = scale(ir_test[,-5]),
cl = ir_train$Species,
k = k_to_try[i])
acc_k[i] <- accuracy(ir_test$Species, pred)
}
plot(acc_k, type = "b", col = "dodgerblue", cex = 1, pch = 20,
xlab = "k, number of neighbors", ylab = "classification accuracy",
main = "Accuracy vs Neighbors")
# add lines indicating k with best accuracy
abline(v = which(acc_k == max(acc_k)), col = "darkorange", lwd = 1.5)
# add line for max accuracy seen
abline(h = max(acc_k), col = "grey", lty = 2) |
require(slam)
makeGPS<-function(pathwayTable=NULL,fn=NULL,maxLevels=5,saveFile=NULL,
repoName='userrepo', maxFunperGene=100,maxGenesperPathway=500,
minGenesperPathway=10){
##`pathwayTable: a dataframe with three columns: pathwayId, ,pathwayName,gene
##`fn : tab delimited file with three columns in the following order tab delimited pathway ids, pathway names, genes.
##` saveFile: where to store the results. (as rda file)
##` repoName: the repository name. Eg 'KEGG2016'
##` maxFunperGene: a cutoff threshold, genes with more than this number of associated pathways are excluded to speed up the GPS identification process.
##` maxGenesperPathway: a cutoff threshold, pathways with more than this number of associated genes are excluded to speed up the GPS identification process.
##` minGenesperPathway: a cutoff threshold, pathways with less than this number of associated genes are excluded to speed up the GPS identification process.
if(is.null(pathwayTable)){
fG<-read.table(fn,header=T,sep='\t',quote='@')}else{fG<-pathwayTable}
##,fileEncoding = "utf8")
colnames(fG)<-c('pwys','nms','gns')
valGenes<-names(table(fG$gns))[which(table(fG$gns)<(1+maxFunperGene)) ]
valPathways<-names(table(fG$pwys))[which(table(fG$pwys)<(1+maxGenesperPathway)&
table(fG$pwys)>(minGenesperPathway-1))]
fG<-fG[which(fG$gns%in%valGenes &fG$pwys%in%valPathways) ,]
##Encoding(levels(fG$pwys)) <- "latin1"
##levels(fG$pwys) <- iconv(levels(fG$pwys),"latin1","UTF-8")
## fGM<-fG[grep('MUS',fG$gns),]
## fG<-fG[-grep('MUS',fG$gns),]
L1<-NULL
L2<-NULL
L3<-NULL
L4<- NULL
L5<- NULL
t1<-Sys.time()
makedictionary<-function(y1){
upw<-unique(y1$pwys)
ugn<-unique(y1$gns)
dy1<-cbind(match(y1$pwys,upw),match(y1$gns,ugn))
colnames(dy1)<-c('pwys','gns')
res<-list(upw,ugn,dy1)
invisible(res)
}
makesigs<-function(f1){
gs<-as.character(unique(f1$gns))
ps<-as.character(unique(f1$pwys))
si<-match(f1$gns,gs)
sj<-match(f1$pwys,ps)
sv<-rep(1,nrow(f1))
s<-slam::simple_triplet_matrix(i=si,j=sj,v=sv,dimnames=list('rownames'=gs,'colnames'=ps))
M<-slam::tcrossprod_simple_triplet_matrix(s)
##M<-(m%*%t(m))
rownames(M)<-gs
colnames(M)<-gs
##rm(s)
PU<-which(diag(M)==1)
PUG<-cbind(si[PU],sj[PU])
GP<-which(M==1,arr.ind=T)
GP<-GP[GP[,1]<GP[,2],]
S<-vector(length=nrow(GP))
S1<-PUG[match(GP[,1],PUG[,1]),2]
S2<-PUG[match(GP[,2],PUG[,1]),2]
S<-ifelse(is.na(S1),S2,S1)
rm(M)
gc(T)
m<-as.matrix(s)
for(h1 in which(is.na(S))){S[h1]<-which(m[GP[h1,1],]+m[GP[h1,2],]==2)}
GPS<-cbind(GP,S)
print(Sys.time()-t1)
rm(m)
degs<-table(as.character(f1$gns))
pwyszs<-table(as.character(f1$pwys))
rownames(GPS)<-NULL
rownames(PUG)<-NULL
invisible(list('GPS'=GPS,'PUG'=PUG,'gs'=gs,'ps'=ps,'degs'=degs,'pwyszs'=pwyszs))
}##end internal function
L1<-makesigs(fG)
fG2<-fG[-which(fG$pwys%in%(L1$ps[unique(L1$GPS[,'S'])])),]
if(nrow(fG2)>1){
L2<-makesigs(fG2)
fG3<-fG2[-which(fG2$pwys%in%(L2$ps[unique(L2$GPS[,'S'])])),]
if(nrow(fG3)>1){
L3<-makesigs(fG3)
fG4<-fG3[-which(fG3$pwys%in%(L3$ps[unique(L3$GPS[,'S'])])),]
if(nrow(fG4)>1){
L4<-makesigs(fG4)
fG5<-fG4[-which(fG4$pwys%in%(L4$ps[unique(L4$GPS[,'S'])])),]
if(nrow(fG5)>1){
L5<-makesigs(fG5)
}
}
}
}
res<-list()
res[['origRepo']]<-makedictionary(fG)
res[['L1']]<-L1
res[['L2']]<-L2
res[['L3']]<-L3
res[['L4']]<-L4
res[['L5']]<-L5
res[['repoName']]<-repoName
res[['pathwaydescriptions']]<-unique(fG[,1:2])
res[["call"]] <- as.character(match.call())
x1<-as.character(repoName)
if(!is.null(saveFile)){
cmd2 <- paste("save(", x1 , ", file='", saveFile, "')", sep="")
assign(x1, res)
eval(parse(text=cmd2))}
##save(rp,file=saveFile)
invisible(res)
}
| /sigora/R/makeGPS.R | no_license | ingted/R-Examples | R | false | false | 4,301 | r | require(slam)
makeGPS<-function(pathwayTable=NULL,fn=NULL,maxLevels=5,saveFile=NULL,
repoName='userrepo', maxFunperGene=100,maxGenesperPathway=500,
minGenesperPathway=10){
##`pathwayTable: a dataframe with three columns: pathwayId, ,pathwayName,gene
##`fn : tab delimited file with three columns in the following order tab delimited pathway ids, pathway names, genes.
##` saveFile: where to store the results. (as rda file)
##` repoName: the repository name. Eg 'KEGG2016'
##` maxFunperGene: a cutoff threshold, genes with more than this number of associated pathways are excluded to speed up the GPS identification process.
##` maxGenesperPathway: a cutoff threshold, pathways with more than this number of associated genes are excluded to speed up the GPS identification process.
##` minGenesperPathway: a cutoff threshold, pathways with less than this number of associated genes are excluded to speed up the GPS identification process.
if(is.null(pathwayTable)){
fG<-read.table(fn,header=T,sep='\t',quote='@')}else{fG<-pathwayTable}
##,fileEncoding = "utf8")
colnames(fG)<-c('pwys','nms','gns')
valGenes<-names(table(fG$gns))[which(table(fG$gns)<(1+maxFunperGene)) ]
valPathways<-names(table(fG$pwys))[which(table(fG$pwys)<(1+maxGenesperPathway)&
table(fG$pwys)>(minGenesperPathway-1))]
fG<-fG[which(fG$gns%in%valGenes &fG$pwys%in%valPathways) ,]
##Encoding(levels(fG$pwys)) <- "latin1"
##levels(fG$pwys) <- iconv(levels(fG$pwys),"latin1","UTF-8")
## fGM<-fG[grep('MUS',fG$gns),]
## fG<-fG[-grep('MUS',fG$gns),]
L1<-NULL
L2<-NULL
L3<-NULL
L4<- NULL
L5<- NULL
t1<-Sys.time()
makedictionary<-function(y1){
upw<-unique(y1$pwys)
ugn<-unique(y1$gns)
dy1<-cbind(match(y1$pwys,upw),match(y1$gns,ugn))
colnames(dy1)<-c('pwys','gns')
res<-list(upw,ugn,dy1)
invisible(res)
}
makesigs<-function(f1){
gs<-as.character(unique(f1$gns))
ps<-as.character(unique(f1$pwys))
si<-match(f1$gns,gs)
sj<-match(f1$pwys,ps)
sv<-rep(1,nrow(f1))
s<-slam::simple_triplet_matrix(i=si,j=sj,v=sv,dimnames=list('rownames'=gs,'colnames'=ps))
M<-slam::tcrossprod_simple_triplet_matrix(s)
##M<-(m%*%t(m))
rownames(M)<-gs
colnames(M)<-gs
##rm(s)
PU<-which(diag(M)==1)
PUG<-cbind(si[PU],sj[PU])
GP<-which(M==1,arr.ind=T)
GP<-GP[GP[,1]<GP[,2],]
S<-vector(length=nrow(GP))
S1<-PUG[match(GP[,1],PUG[,1]),2]
S2<-PUG[match(GP[,2],PUG[,1]),2]
S<-ifelse(is.na(S1),S2,S1)
rm(M)
gc(T)
m<-as.matrix(s)
for(h1 in which(is.na(S))){S[h1]<-which(m[GP[h1,1],]+m[GP[h1,2],]==2)}
GPS<-cbind(GP,S)
print(Sys.time()-t1)
rm(m)
degs<-table(as.character(f1$gns))
pwyszs<-table(as.character(f1$pwys))
rownames(GPS)<-NULL
rownames(PUG)<-NULL
invisible(list('GPS'=GPS,'PUG'=PUG,'gs'=gs,'ps'=ps,'degs'=degs,'pwyszs'=pwyszs))
}##end internal function
L1<-makesigs(fG)
fG2<-fG[-which(fG$pwys%in%(L1$ps[unique(L1$GPS[,'S'])])),]
if(nrow(fG2)>1){
L2<-makesigs(fG2)
fG3<-fG2[-which(fG2$pwys%in%(L2$ps[unique(L2$GPS[,'S'])])),]
if(nrow(fG3)>1){
L3<-makesigs(fG3)
fG4<-fG3[-which(fG3$pwys%in%(L3$ps[unique(L3$GPS[,'S'])])),]
if(nrow(fG4)>1){
L4<-makesigs(fG4)
fG5<-fG4[-which(fG4$pwys%in%(L4$ps[unique(L4$GPS[,'S'])])),]
if(nrow(fG5)>1){
L5<-makesigs(fG5)
}
}
}
}
res<-list()
res[['origRepo']]<-makedictionary(fG)
res[['L1']]<-L1
res[['L2']]<-L2
res[['L3']]<-L3
res[['L4']]<-L4
res[['L5']]<-L5
res[['repoName']]<-repoName
res[['pathwaydescriptions']]<-unique(fG[,1:2])
res[["call"]] <- as.character(match.call())
x1<-as.character(repoName)
if(!is.null(saveFile)){
cmd2 <- paste("save(", x1 , ", file='", saveFile, "')", sep="")
assign(x1, res)
eval(parse(text=cmd2))}
##save(rp,file=saveFile)
invisible(res)
}
|
\name{dist.Multivariate.Laplace}
\alias{dmvl}
\alias{rmvl}
\title{Multivariate Laplace Distribution}
\description{
These functions provide the density and random number generation
for the multivariate Laplace distribution.
}
\usage{
dmvl(x, mu, Sigma, log=FALSE)
rmvl(n, mu, Sigma)
}
\arguments{
\item{x}{This is data or parameters in the form of a vector of length
\eqn{k} or a matrix with \eqn{k} columns.}
\item{n}{This is the number of random draws.}
\item{mu}{This is mean vector \eqn{\mu}{mu} with length \eqn{k} or
matrix with \eqn{k} columns.}
\item{Sigma}{This is the \eqn{k \times k}{k x k} covariance matrix
\eqn{\Sigma}{Sigma}.}
\item{log}{Logical. If \code{log=TRUE}, then the logarithm of the
density is returned.}
}
\details{
\itemize{
\item Application: Continuous Multivariate
\item Density: \deqn{p(\theta) = \frac{2}{(2\pi)^{k/2}
|\Sigma|^{1/2}} \frac{(\pi/(2\sqrt{2(\theta - \mu)^T \Sigma^{-1}
(\theta - \mu)}))^{1/2} \exp(-\sqrt{2(\theta - \mu)^T \Sigma^{-1}
(\theta - \mu)})}{\sqrt{((\theta - \mu)^T \Sigma^{-1} (\theta -
\mu) / 2)}^{k/2-1}}}{p(theta) = (2 / ((2*pi)^(k/2) *
|Sigma|^(1/2)))
((sqrt(pi/(2*sqrt(2*(theta-mu)^TSigma^(-1)(theta-mu)))) *
exp(-sqrt(2*(theta-mu)^TSigma^(-1)(theta-mu)))) /
sqrt((theta-mu)^TSigma^(-1)(theta-mu)/2)^(k/2-1))}
\item Inventor: Fang et al. (1990)
\item Notation 1: \eqn{\theta \sim \mathcal{MVL}(\mu, \Sigma)}{theta ~
MVL(mu, Sigma)}
\item Notation 2: \eqn{\theta \sim \mathcal{L}_k(\mu, \Sigma)}{theta ~
L[k](mu, Sigma)}
\item Notation 3: \eqn{p(\theta) = \mathcal{MVL}(\theta | \mu,
\Sigma)}{p(theta) = MVL(theta | mu, Sigma)}
\item Notation 4: \eqn{p(\theta) = \mathcal{L}_k(\theta | \mu,
\Sigma)}{p(theta) = L[k](theta | mu, Sigma)}
\item Parameter 1: location vector \eqn{\mu}{mu}
\item Parameter 2: positive-definite \eqn{k \times k}{k x k}
covariance matrix \eqn{\Sigma}{Sigma}
\item Mean: \eqn{E(\theta) = \mu}{E(theta) = mu}
\item Variance: \eqn{var(\theta) = \Sigma}{var(theta) = Sigma}
\item Mode: \eqn{mode(\theta) = \mu}{mode(theta) = mu}
}
The multivariate Laplace distribution is a multidimensional extension of
the one-dimensional or univariate symmetric Laplace distribution. There
are multiple forms of the multivariate Laplace distribution.
The bivariate case was introduced by Ulrich and Chen (1987), and the
first form in larger dimensions may have been Fang et al. (1990), which
requires a Bessel function. Alternatively, multivariate Laplace was soon
introduced as a special case of a multivariate Linnik distribution
(Anderson, 1992), and later as a special case of the multivariate power
exponential distribution (Fernandez et al., 1995; Ernst, 1998). Bayesian
considerations appear in Haro-Lopez and Smith (1999). Wainwright and
Simoncelli (2000) presented multivariate Laplace as a Gaussian scale
mixture. Kotz et al. (2001) present the distribution formally. Here, the
density is calculated with the asymptotic formula for the Bessel
function as presented in Wang et al. (2008).
The multivariate Laplace distribution is an attractive alternative to
the multivariate normal distribution due to its wider tails, and remains
a two-parameter distribution (though alternative three-parameter forms
have been introduced as well), unlike the three-parameter multivariate t
distribution, which is often used as a robust alternative to the
multivariate normal distribution.
}
\value{
\code{dmvl} gives the density, and
\code{rmvl} generates random deviates.
}
\references{
Anderson, D.N. (1992). "A Multivariate Linnik Distribution".
\emph{Statistical Probability Letters}, 14, p. 333--336.
Eltoft, T., Kim, T., and Lee, T. (2006). "On the Multivariate Laplace
Distribution". \emph{IEEE Signal Processing Letters}, 13(5),
p. 300--303.
Ernst, M. D. (1998). "A Multivariate Generalized Laplace
Distribution". \emph{Computational Statistics}, 13, p. 227--232.
Fang, K.T., Kotz, S., and Ng, K.W. (1990). "Symmetric Multivariate and
Related Distributions". Monographs on Statistics and Probability, 36,
Chapman-Hall, London.
Fernandez, C., Osiewalski, J. and Steel, M.F.J. (1995). "Modeling and
Inference with v-spherical Distributions". \emph{Journal of the
American Statistical Association}, 90, p. 1331--1340.
Gomez, E., Gomez-Villegas, M.A., and Marin, J.M. (1998). "A
Multivariate Generalization of the Power Exponential Family of
Distributions". \emph{Communications in Statistics-Theory and
Methods}, 27(3), p. 589--600.
Haro-Lopez, R.A. and Smith, A.F.M. (1999). "On Robust Bayesian
Analysis for Location and Scale Parameters". \emph{Journal of
Multivariate Analysis}, 70, p. 30--56.
Kotz., S., Kozubowski, T.J., and Podgorski, K. (2001). "The Laplace
Distribution and Generalizations: A Revisit with Applications to
Communications, Economics, Engineering, and Finance". Birkhauser:
Boston, MA.
Ulrich, G. and Chen, C.C. (1987). "A Bivariate Double Exponential
Distribution and its Generalization". \emph{ASA Proceedings on
Statistical Computing}, p. 127--129.
Wang, D., Zhang, C., and Zhao, X. (2008). "Multivariate Laplace
Filter: A Heavy-Tailed Model for Target Tracking". \emph{Proceedings
of the 19th International Conference on Pattern Recognition}: FL.
Wainwright, M.J. and Simoncelli, E.P. (2000). "Scale Mixtures of
Gaussians and the Statistics of Natural Images". \emph{Advances in
Neural Information Processing Systems}, 12, p. 855--861.
}
\author{Statisticat, LLC. \email{software@bayesian-inference.com}}
\seealso{
\code{\link{dlaplace}},
\code{\link{dmvn}},
\code{\link{dmvnp}},
\code{\link{dmvpe}},
\code{\link{dmvt}},
\code{\link{dnorm}},
\code{\link{dnormp}}, and
\code{\link{dnormv}}.
}
\examples{
library(LaplacesDemonCpp)
x <- dmvl(c(1,2,3), c(0,1,2), diag(3))
X <- rmvl(1000, c(0,1,2), diag(3))
joint.density.plot(X[,1], X[,2], color=TRUE)
}
\keyword{Distribution} | /man/dist.Multivariate.Laplace.Rd | permissive | sakex/LaplacesDemonCpp | R | false | false | 6,137 | rd | \name{dist.Multivariate.Laplace}
\alias{dmvl}
\alias{rmvl}
\title{Multivariate Laplace Distribution}
\description{
These functions provide the density and random number generation
for the multivariate Laplace distribution.
}
\usage{
dmvl(x, mu, Sigma, log=FALSE)
rmvl(n, mu, Sigma)
}
\arguments{
\item{x}{This is data or parameters in the form of a vector of length
\eqn{k} or a matrix with \eqn{k} columns.}
\item{n}{This is the number of random draws.}
\item{mu}{This is mean vector \eqn{\mu}{mu} with length \eqn{k} or
matrix with \eqn{k} columns.}
\item{Sigma}{This is the \eqn{k \times k}{k x k} covariance matrix
\eqn{\Sigma}{Sigma}.}
\item{log}{Logical. If \code{log=TRUE}, then the logarithm of the
density is returned.}
}
\details{
\itemize{
\item Application: Continuous Multivariate
\item Density: \deqn{p(\theta) = \frac{2}{(2\pi)^{k/2}
|\Sigma|^{1/2}} \frac{(\pi/(2\sqrt{2(\theta - \mu)^T \Sigma^{-1}
(\theta - \mu)}))^{1/2} \exp(-\sqrt{2(\theta - \mu)^T \Sigma^{-1}
(\theta - \mu)})}{\sqrt{((\theta - \mu)^T \Sigma^{-1} (\theta -
\mu) / 2)}^{k/2-1}}}{p(theta) = (2 / ((2*pi)^(k/2) *
|Sigma|^(1/2)))
((sqrt(pi/(2*sqrt(2*(theta-mu)^TSigma^(-1)(theta-mu)))) *
exp(-sqrt(2*(theta-mu)^TSigma^(-1)(theta-mu)))) /
sqrt((theta-mu)^TSigma^(-1)(theta-mu)/2)^(k/2-1))}
\item Inventor: Fang et al. (1990)
\item Notation 1: \eqn{\theta \sim \mathcal{MVL}(\mu, \Sigma)}{theta ~
MVL(mu, Sigma)}
\item Notation 2: \eqn{\theta \sim \mathcal{L}_k(\mu, \Sigma)}{theta ~
L[k](mu, Sigma)}
\item Notation 3: \eqn{p(\theta) = \mathcal{MVL}(\theta | \mu,
\Sigma)}{p(theta) = MVL(theta | mu, Sigma)}
\item Notation 4: \eqn{p(\theta) = \mathcal{L}_k(\theta | \mu,
\Sigma)}{p(theta) = L[k](theta | mu, Sigma)}
\item Parameter 1: location vector \eqn{\mu}{mu}
\item Parameter 2: positive-definite \eqn{k \times k}{k x k}
covariance matrix \eqn{\Sigma}{Sigma}
\item Mean: \eqn{E(\theta) = \mu}{E(theta) = mu}
\item Variance: \eqn{var(\theta) = \Sigma}{var(theta) = Sigma}
\item Mode: \eqn{mode(\theta) = \mu}{mode(theta) = mu}
}
The multivariate Laplace distribution is a multidimensional extension of
the one-dimensional or univariate symmetric Laplace distribution. There
are multiple forms of the multivariate Laplace distribution.
The bivariate case was introduced by Ulrich and Chen (1987), and the
first form in larger dimensions may have been Fang et al. (1990), which
requires a Bessel function. Alternatively, multivariate Laplace was soon
introduced as a special case of a multivariate Linnik distribution
(Anderson, 1992), and later as a special case of the multivariate power
exponential distribution (Fernandez et al., 1995; Ernst, 1998). Bayesian
considerations appear in Haro-Lopez and Smith (1999). Wainwright and
Simoncelli (2000) presented multivariate Laplace as a Gaussian scale
mixture. Kotz et al. (2001) present the distribution formally. Here, the
density is calculated with the asymptotic formula for the Bessel
function as presented in Wang et al. (2008).
The multivariate Laplace distribution is an attractive alternative to
the multivariate normal distribution due to its wider tails, and remains
a two-parameter distribution (though alternative three-parameter forms
have been introduced as well), unlike the three-parameter multivariate t
distribution, which is often used as a robust alternative to the
multivariate normal distribution.
}
\value{
\code{dmvl} gives the density, and
\code{rmvl} generates random deviates.
}
\references{
Anderson, D.N. (1992). "A Multivariate Linnik Distribution".
\emph{Statistical Probability Letters}, 14, p. 333--336.
Eltoft, T., Kim, T., and Lee, T. (2006). "On the Multivariate Laplace
Distribution". \emph{IEEE Signal Processing Letters}, 13(5),
p. 300--303.
Ernst, M. D. (1998). "A Multivariate Generalized Laplace
Distribution". \emph{Computational Statistics}, 13, p. 227--232.
Fang, K.T., Kotz, S., and Ng, K.W. (1990). "Symmetric Multivariate and
Related Distributions". Monographs on Statistics and Probability, 36,
Chapman-Hall, London.
Fernandez, C., Osiewalski, J. and Steel, M.F.J. (1995). "Modeling and
Inference with v-spherical Distributions". \emph{Journal of the
American Statistical Association}, 90, p. 1331--1340.
Gomez, E., Gomez-Villegas, M.A., and Marin, J.M. (1998). "A
Multivariate Generalization of the Power Exponential Family of
Distributions". \emph{Communications in Statistics-Theory and
Methods}, 27(3), p. 589--600.
Haro-Lopez, R.A. and Smith, A.F.M. (1999). "On Robust Bayesian
Analysis for Location and Scale Parameters". \emph{Journal of
Multivariate Analysis}, 70, p. 30--56.
Kotz., S., Kozubowski, T.J., and Podgorski, K. (2001). "The Laplace
Distribution and Generalizations: A Revisit with Applications to
Communications, Economics, Engineering, and Finance". Birkhauser:
Boston, MA.
Ulrich, G. and Chen, C.C. (1987). "A Bivariate Double Exponential
Distribution and its Generalization". \emph{ASA Proceedings on
Statistical Computing}, p. 127--129.
Wang, D., Zhang, C., and Zhao, X. (2008). "Multivariate Laplace
Filter: A Heavy-Tailed Model for Target Tracking". \emph{Proceedings
of the 19th International Conference on Pattern Recognition}: FL.
Wainwright, M.J. and Simoncelli, E.P. (2000). "Scale Mixtures of
Gaussians and the Statistics of Natural Images". \emph{Advances in
Neural Information Processing Systems}, 12, p. 855--861.
}
\author{Statisticat, LLC. \email{software@bayesian-inference.com}}
\seealso{
\code{\link{dlaplace}},
\code{\link{dmvn}},
\code{\link{dmvnp}},
\code{\link{dmvpe}},
\code{\link{dmvt}},
\code{\link{dnorm}},
\code{\link{dnormp}}, and
\code{\link{dnormv}}.
}
\examples{
library(LaplacesDemonCpp)
x <- dmvl(c(1,2,3), c(0,1,2), diag(3))
X <- rmvl(1000, c(0,1,2), diag(3))
joint.density.plot(X[,1], X[,2], color=TRUE)
}
\keyword{Distribution} |
#
# Plotting functions
#
plot.tree.breakpoints <- function(gtree, breakpoints.v, breakpoints.h, labels=c('name', 'id')){
mypalette <- c("black", "yellow", "orange", "red", "white")
par(mfrow=c(1,1))
gtree.un <- as.undirected(gtree)
V(gtree.un)$color <- 'white'
V(gtree.un)[breakpoints.v]$color <- 'green'
V(gtree.un)[breakpoints.h]$color <- 'blue'
V(gtree.un)$size <- 3
V(gtree.un)[breakpoints.v]$size <- 10
V(gtree.un)[breakpoints.h]$size <- 10
la = layout_as_tree(gtree.un, mode='out', root=which.min(V(gtree.un)$date))
if (labels=='name'){
labels <- V(gtree)$name
}
else{
labels <- as.numeric(V(gtree))
}
plot(gtree.un,
layout = la,
vertex.label = labels,
edge.arrow.size=0.6)
}
plot.tree <- function(gtree, labels=c('name', 'id')){
# Plots a tree graph
# Arguments:
# gtree: a igraph object graph with no cycles (tree)
if (missing(labels)){
labels <- NA
}
else{
labels <- switch(labels,
'name' = V(gtree)$name,
'id' = as.numeric(V(gtree)))
}
par(mfrow=c(1,1))
gtree.un <- as.undirected(gtree)
la = layout_as_tree(gtree.un, mode='out', root=which.min(V(gtree.un)$date))
plot(gtree.un,
layout = la,
vertex.label = labels,
vertex.size=3,
edge.arrow.size=0.6)
}
plot.trees <- function(trees, labels){
# Plots a set of trees in a grid
# Arguments:
# trees: a list of igraph tree objects
# labels: a label for each tree.
mypalette <- c("black", "yellow", "orange", "red", "white")
par(mfrow=c(3,5))
for(i in 1:length(trees)){
gmotif <- as.undirected(trees[[i]])
la = layout_as_tree(gmotif, mode='out', root=which.min(V(gmotif)$date))
plot(gmotif,
layout = la,
vertex.color=mypalette[V(gmotif)$color],
vertex.label = "",
edge.arrow.size=0.6)
title(labels[i])
}
} | /R/plotting.r | permissive | alumbreras/neighborhood_motifs | R | false | false | 1,907 | r | #
# Plotting functions
#
plot.tree.breakpoints <- function(gtree, breakpoints.v, breakpoints.h, labels=c('name', 'id')){
mypalette <- c("black", "yellow", "orange", "red", "white")
par(mfrow=c(1,1))
gtree.un <- as.undirected(gtree)
V(gtree.un)$color <- 'white'
V(gtree.un)[breakpoints.v]$color <- 'green'
V(gtree.un)[breakpoints.h]$color <- 'blue'
V(gtree.un)$size <- 3
V(gtree.un)[breakpoints.v]$size <- 10
V(gtree.un)[breakpoints.h]$size <- 10
la = layout_as_tree(gtree.un, mode='out', root=which.min(V(gtree.un)$date))
if (labels=='name'){
labels <- V(gtree)$name
}
else{
labels <- as.numeric(V(gtree))
}
plot(gtree.un,
layout = la,
vertex.label = labels,
edge.arrow.size=0.6)
}
plot.tree <- function(gtree, labels=c('name', 'id')){
# Plots a tree graph
# Arguments:
# gtree: a igraph object graph with no cycles (tree)
if (missing(labels)){
labels <- NA
}
else{
labels <- switch(labels,
'name' = V(gtree)$name,
'id' = as.numeric(V(gtree)))
}
par(mfrow=c(1,1))
gtree.un <- as.undirected(gtree)
la = layout_as_tree(gtree.un, mode='out', root=which.min(V(gtree.un)$date))
plot(gtree.un,
layout = la,
vertex.label = labels,
vertex.size=3,
edge.arrow.size=0.6)
}
plot.trees <- function(trees, labels){
# Plots a set of trees in a grid
# Arguments:
# trees: a list of igraph tree objects
# labels: a label for each tree.
mypalette <- c("black", "yellow", "orange", "red", "white")
par(mfrow=c(3,5))
for(i in 1:length(trees)){
gmotif <- as.undirected(trees[[i]])
la = layout_as_tree(gmotif, mode='out', root=which.min(V(gmotif)$date))
plot(gmotif,
layout = la,
vertex.color=mypalette[V(gmotif)$color],
vertex.label = "",
edge.arrow.size=0.6)
title(labels[i])
}
} |
#-- Input calculation ----
inputComputation <- function(data){ #, treeNewER){
# Isolate needed elements
# Previous input available
input <- data[ !measuredItemFaostat_L2 %in% primary & measuredElementSuaFbs == '5302', ]
#Er and production needed to compute input if no previous data
Er <- data[ !measuredItemFaostat_L2 %in% primary & measuredElementSuaFbs == '5423', ]
Prod <- data[ !measuredItemFaostat_L2 %in% primary & measuredElementSuaFbs == '5510', ]
# Calculate Input
InputCalc <- merge(Prod, Er, by = c("geographicAreaM49_fi",
"timePointYears", "measuredItemFaostat_L2",
"availability"), suffixes = c("_prod", "_Er"))
InputCalc <- InputCalc[!is.na(Value_Er)]
if(nrow(InputCalc[is.na(Value_Er)]) > 0 ){
message('Missing extraction rates for some Ics groups')
}
InputCalc[ , input := Value_prod / Value_Er]
data_compute31 <- melt(InputCalc,
id.vars = c("geographicAreaM49_fi", "timePointYears", "measuredItemFaostat_L2", "availability"),
measure.vars = "input",
value.name = "Value" ,
variable.name = "measuredElementSuaFbs", variable.factor = FALSE)
data_compute31[measuredElementSuaFbs=="input",measuredElementSuaFbs:="5302"]
data_compute31[ , ':='(flagObservationStatus = 'I', flagMethod = 'i', FBSsign = 0)]
# See if any official input
comp31 <- merge(data_compute31, input, by = c("geographicAreaM49_fi",
"timePointYears",
"measuredItemFaostat_L2",
"availability",
"measuredElementSuaFbs"), all = TRUE,
suff = c('', 'Official'))
# If previous data is not NA then it is assigned as input
# Note: the Er should have been computed as ratio between Production and Input
comp31[!is.na(ValueOfficial), c('Value','flagObservationStatus','flagMethod'):= list(ValueOfficial,
flagObservationStatusOfficial,
flagMethodOfficial)]
comp31 <- comp31[ , c('ValueOfficial', 'flagObservationStatusOfficial',
'flagMethodOfficial') := NULL]
# Remove all input data from the original data and add the only data31 part
# existing data are included as computed with computed extraction rates
# other input data are computed starting from given extraction rates
dataNo31 <- data[measuredElementSuaFbs!="5302"]
SUAinput <- rbind(dataNo31, comp31[,.(geographicAreaM49_fi, timePointYears, measuredItemFaostat_L2, availability, measuredElementSuaFbs, Value, flagObservationStatus, flagMethod)]) #rbind(data, data_compute31) #
return(SUAinput)
} | /module/fi_SUAFBS_plugin/R/InputCalc.R | no_license | SWS-Methodology/faoswsFisheryStandardization | R | false | false | 2,938 | r | #-- Input calculation ----
inputComputation <- function(data){ #, treeNewER){
# Isolate needed elements
# Previous input available
input <- data[ !measuredItemFaostat_L2 %in% primary & measuredElementSuaFbs == '5302', ]
#Er and production needed to compute input if no previous data
Er <- data[ !measuredItemFaostat_L2 %in% primary & measuredElementSuaFbs == '5423', ]
Prod <- data[ !measuredItemFaostat_L2 %in% primary & measuredElementSuaFbs == '5510', ]
# Calculate Input
InputCalc <- merge(Prod, Er, by = c("geographicAreaM49_fi",
"timePointYears", "measuredItemFaostat_L2",
"availability"), suffixes = c("_prod", "_Er"))
InputCalc <- InputCalc[!is.na(Value_Er)]
if(nrow(InputCalc[is.na(Value_Er)]) > 0 ){
message('Missing extraction rates for some Ics groups')
}
InputCalc[ , input := Value_prod / Value_Er]
data_compute31 <- melt(InputCalc,
id.vars = c("geographicAreaM49_fi", "timePointYears", "measuredItemFaostat_L2", "availability"),
measure.vars = "input",
value.name = "Value" ,
variable.name = "measuredElementSuaFbs", variable.factor = FALSE)
data_compute31[measuredElementSuaFbs=="input",measuredElementSuaFbs:="5302"]
data_compute31[ , ':='(flagObservationStatus = 'I', flagMethod = 'i', FBSsign = 0)]
# See if any official input
comp31 <- merge(data_compute31, input, by = c("geographicAreaM49_fi",
"timePointYears",
"measuredItemFaostat_L2",
"availability",
"measuredElementSuaFbs"), all = TRUE,
suff = c('', 'Official'))
# If previous data is not NA then it is assigned as input
# Note: the Er should have been computed as ratio between Production and Input
comp31[!is.na(ValueOfficial), c('Value','flagObservationStatus','flagMethod'):= list(ValueOfficial,
flagObservationStatusOfficial,
flagMethodOfficial)]
comp31 <- comp31[ , c('ValueOfficial', 'flagObservationStatusOfficial',
'flagMethodOfficial') := NULL]
# Remove all input data from the original data and add the only data31 part
# existing data are included as computed with computed extraction rates
# other input data are computed starting from given extraction rates
dataNo31 <- data[measuredElementSuaFbs!="5302"]
SUAinput <- rbind(dataNo31, comp31[,.(geographicAreaM49_fi, timePointYears, measuredItemFaostat_L2, availability, measuredElementSuaFbs, Value, flagObservationStatus, flagMethod)]) #rbind(data, data_compute31) #
return(SUAinput)
} |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rm_non_words.R
\name{rm_non_words}
\alias{rm_non_words}
\title{Remove/Replace/Extract Non-Words}
\usage{
rm_non_words(text.var, trim = !extract, clean = TRUE,
pattern = "@rm_non_words", replacement = " ", extract = FALSE,
dictionary = getOption("regex.library"), ...)
}
\arguments{
\item{text.var}{The text variable.}
\item{trim}{logical. If \code{TRUE} removes leading and trailing white
spaces.}
\item{clean}{trim logical. If \code{TRUE} extra white spaces and escaped
character will be removed.}
\item{pattern}{A character string containing a regular expression (or
character string for \code{fixed = TRUE}) to be matched in the given
character vector. Default, \code{@rm_non_words} uses the
\code{rm_non_words} regex from the regular expression dictionary from
the \code{dictionary} argument.}
\item{replacement}{Replacement for matched \code{pattern} (\bold{\emph{Note:}} default is
" ", whereas most \pkg{qdapRegex} functions replace with "").}
\item{extract}{logical. If \code{TRUE} the non-words are extracted into a
list of vectors.}
\item{dictionary}{A dictionary of canned regular expressions to search within
if \code{pattern} begins with \code{"@rm_"}.}
\item{\dots}{Other arguments passed to \code{\link[base]{gsub}}.}
}
\value{
Returns a character string with non-words removed.
}
\description{
\code{rm_non_words} - Remove/replace/extract non-words (Anything that's not a
letter or apostrophe; also removes multiple white spaces) from a string.
}
\note{
Setting the argument \code{extract = TRUE} is not very useful. Use the
following setup instead (see \bold{Examples} for a demonstration).\cr
\code{rm_default(x, pattern = "[^A-Za-z' ]", extract=TRUE)}
}
\examples{
x <- c(
"I like 56 dogs!",
"It's seventy-two feet from the px290.",
NA,
"What",
"that1is2a3way4to5go6.",
"What do you*\% want? For real\%; I think you'll see.",
"Oh some <html>code</html> to remove"
)
rm_non_words(x)
rm_non_words(x, extract=TRUE)
## For extraction purposes the following setup is more useful:
rm_default(x, pattern = "[^A-Za-z' ]", extract=TRUE)
}
\seealso{
\code{\link[base]{gsub}},
\code{\link[stringi]{stri_extract_all_regex}}
Other rm_.functions: \code{\link{as_numeric}},
\code{\link{as_numeric2}}, \code{\link{rm_number}};
\code{\link{as_time}}, \code{\link{as_time2}},
\code{\link{rm_time}}, \code{\link{rm_transcript_time}};
\code{\link{rm_abbreviation}}; \code{\link{rm_angle}},
\code{\link{rm_bracket}},
\code{\link{rm_bracket_multiple}},
\code{\link{rm_curly}}, \code{\link{rm_round}},
\code{\link{rm_square}}; \code{\link{rm_between}},
\code{\link{rm_between_multiple}};
\code{\link{rm_caps_phrase}}; \code{\link{rm_caps}};
\code{\link{rm_citation_tex}}; \code{\link{rm_citation}};
\code{\link{rm_city_state_zip}};
\code{\link{rm_city_state}}; \code{\link{rm_date}};
\code{\link{rm_default}}; \code{\link{rm_dollar}};
\code{\link{rm_email}}; \code{\link{rm_emoticon}};
\code{\link{rm_endmark}}; \code{\link{rm_hash}};
\code{\link{rm_nchar_words}}; \code{\link{rm_non_ascii}};
\code{\link{rm_percent}}; \code{\link{rm_phone}};
\code{\link{rm_postal_code}};
\code{\link{rm_repeated_characters}};
\code{\link{rm_repeated_phrases}};
\code{\link{rm_repeated_words}}; \code{\link{rm_tag}};
\code{\link{rm_title_name}};
\code{\link{rm_twitter_url}}, \code{\link{rm_url}};
\code{\link{rm_white}}, \code{\link{rm_white_bracket}},
\code{\link{rm_white_colon}},
\code{\link{rm_white_comma}},
\code{\link{rm_white_endmark}},
\code{\link{rm_white_lead}},
\code{\link{rm_white_lead_trail}},
\code{\link{rm_white_multiple}},
\code{\link{rm_white_punctuation}},
\code{\link{rm_white_trail}}; \code{\link{rm_zip}}
}
\keyword{non-words}
| /man/rm_non_words.Rd | no_license | Avinash-Raj/qdapRegex | R | false | false | 3,834 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rm_non_words.R
\name{rm_non_words}
\alias{rm_non_words}
\title{Remove/Replace/Extract Non-Words}
\usage{
rm_non_words(text.var, trim = !extract, clean = TRUE,
pattern = "@rm_non_words", replacement = " ", extract = FALSE,
dictionary = getOption("regex.library"), ...)
}
\arguments{
\item{text.var}{The text variable.}
\item{trim}{logical. If \code{TRUE} removes leading and trailing white
spaces.}
\item{clean}{trim logical. If \code{TRUE} extra white spaces and escaped
character will be removed.}
\item{pattern}{A character string containing a regular expression (or
character string for \code{fixed = TRUE}) to be matched in the given
character vector. Default, \code{@rm_non_words} uses the
\code{rm_non_words} regex from the regular expression dictionary from
the \code{dictionary} argument.}
\item{replacement}{Replacement for matched \code{pattern} (\bold{\emph{Note:}} default is
" ", whereas most \pkg{qdapRegex} functions replace with "").}
\item{extract}{logical. If \code{TRUE} the non-words are extracted into a
list of vectors.}
\item{dictionary}{A dictionary of canned regular expressions to search within
if \code{pattern} begins with \code{"@rm_"}.}
\item{\dots}{Other arguments passed to \code{\link[base]{gsub}}.}
}
\value{
Returns a character string with non-words removed.
}
\description{
\code{rm_non_words} - Remove/replace/extract non-words (Anything that's not a
letter or apostrophe; also removes multiple white spaces) from a string.
}
\note{
Setting the argument \code{extract = TRUE} is not very useful. Use the
following setup instead (see \bold{Examples} for a demonstration).\cr
\code{rm_default(x, pattern = "[^A-Za-z' ]", extract=TRUE)}
}
\examples{
x <- c(
"I like 56 dogs!",
"It's seventy-two feet from the px290.",
NA,
"What",
"that1is2a3way4to5go6.",
"What do you*\% want? For real\%; I think you'll see.",
"Oh some <html>code</html> to remove"
)
rm_non_words(x)
rm_non_words(x, extract=TRUE)
## For extraction purposes the following setup is more useful:
rm_default(x, pattern = "[^A-Za-z' ]", extract=TRUE)
}
\seealso{
\code{\link[base]{gsub}},
\code{\link[stringi]{stri_extract_all_regex}}
Other rm_.functions: \code{\link{as_numeric}},
\code{\link{as_numeric2}}, \code{\link{rm_number}};
\code{\link{as_time}}, \code{\link{as_time2}},
\code{\link{rm_time}}, \code{\link{rm_transcript_time}};
\code{\link{rm_abbreviation}}; \code{\link{rm_angle}},
\code{\link{rm_bracket}},
\code{\link{rm_bracket_multiple}},
\code{\link{rm_curly}}, \code{\link{rm_round}},
\code{\link{rm_square}}; \code{\link{rm_between}},
\code{\link{rm_between_multiple}};
\code{\link{rm_caps_phrase}}; \code{\link{rm_caps}};
\code{\link{rm_citation_tex}}; \code{\link{rm_citation}};
\code{\link{rm_city_state_zip}};
\code{\link{rm_city_state}}; \code{\link{rm_date}};
\code{\link{rm_default}}; \code{\link{rm_dollar}};
\code{\link{rm_email}}; \code{\link{rm_emoticon}};
\code{\link{rm_endmark}}; \code{\link{rm_hash}};
\code{\link{rm_nchar_words}}; \code{\link{rm_non_ascii}};
\code{\link{rm_percent}}; \code{\link{rm_phone}};
\code{\link{rm_postal_code}};
\code{\link{rm_repeated_characters}};
\code{\link{rm_repeated_phrases}};
\code{\link{rm_repeated_words}}; \code{\link{rm_tag}};
\code{\link{rm_title_name}};
\code{\link{rm_twitter_url}}, \code{\link{rm_url}};
\code{\link{rm_white}}, \code{\link{rm_white_bracket}},
\code{\link{rm_white_colon}},
\code{\link{rm_white_comma}},
\code{\link{rm_white_endmark}},
\code{\link{rm_white_lead}},
\code{\link{rm_white_lead_trail}},
\code{\link{rm_white_multiple}},
\code{\link{rm_white_punctuation}},
\code{\link{rm_white_trail}}; \code{\link{rm_zip}}
}
\keyword{non-words}
|
"quagep" <-
function(f, para, paracheck=TRUE) {
if(! check.fs(f)) return()
if(paracheck == TRUE) {
if(! are.pargep.valid(para)) return()
}
attributes(para$para) <- NULL
B <- para$para[1]
K <- para$para[2]
H <- para$para[3]
ix <- seq(1:length(f))
ops <- options(warn=-1)
x <- -B * log(1 + (1/H) * log(1 - f^(1/K) * (1-exp(-H)) ) )
for(i in ix[is.nan(x)]) {
warning("The ",i,"(th) value of 'f' results in NaN (assuming then f == 1), ",
"decrementing from the Machine's small to an f that just hits non NaN")
j <- 0
while(1) {
j <- j + 1
aF <- 1 - .Machine$double.eps^(1/j)
aX <- -B * log(1 + (1/H) * log(1 - aF^(1/K) * (1-exp(-H)) ) )
if(! is.nan(aX)) {
x[i] <- aX
break
}
}
}
options(ops)
return(x)
}
| /lmomco/R/quagep.R | no_license | ingted/R-Examples | R | false | false | 884 | r | "quagep" <-
function(f, para, paracheck=TRUE) {
if(! check.fs(f)) return()
if(paracheck == TRUE) {
if(! are.pargep.valid(para)) return()
}
attributes(para$para) <- NULL
B <- para$para[1]
K <- para$para[2]
H <- para$para[3]
ix <- seq(1:length(f))
ops <- options(warn=-1)
x <- -B * log(1 + (1/H) * log(1 - f^(1/K) * (1-exp(-H)) ) )
for(i in ix[is.nan(x)]) {
warning("The ",i,"(th) value of 'f' results in NaN (assuming then f == 1), ",
"decrementing from the Machine's small to an f that just hits non NaN")
j <- 0
while(1) {
j <- j + 1
aF <- 1 - .Machine$double.eps^(1/j)
aX <- -B * log(1 + (1/H) * log(1 - aF^(1/K) * (1-exp(-H)) ) )
if(! is.nan(aX)) {
x[i] <- aX
break
}
}
}
options(ops)
return(x)
}
|
plot.createBasin<-
function(x,...)
{
if(missing(x))
{
stop("missing object!")
}
if(!any(class(x)==c('sim','createBasin')))
{
stop("bad class type!")
}
x <-x$operation
nRes<-length(x$reservoirs)
nRec<-length(x$reachs)
nJun<-length(x$junctions)
nSub<-length(x$subbasins)
nDiv<-length(x$diversions)
labelMat<-matrix(NA,2,nRes+nRec+nJun+nSub+nDiv)
if(ncol(labelMat)<1){stop("At least one element is needed for simulation !")}
name<-c()
i<-0;j<-0;k<-0;l<-0;m<-0
if(nRes>0){for(i in 1:nRes){labelMat[1,i] <-x$reservoirs[[i]]$label;labelMat[2,i] <-x$reservoirs[[i]]$downstream; name<-c(name,x$reservoirs[[i]]$name)}}
if(nRec>0){for(j in 1:nRec){labelMat[1,j+nRes] <-x$reachs [[j]]$label;labelMat[2,j+nRes] <-x$reachs [[j]]$downstream; name<-c(name,x$reachs [[j]]$name)}}
if(nJun>0){for(k in 1:nJun){labelMat[1,k+nRec+nRes] <-x$junctions [[k]]$label;labelMat[2,k+nRec+nRes] <-x$junctions [[k]]$downstream; name<-c(name,x$junctions [[k]]$name)}}
if(nSub>0){for(l in 1:nSub){labelMat[1,l+nRec+nRes+nJun] <-x$subbasins [[l]]$label;labelMat[2,l+nRec+nRes+nJun] <-x$subbasins [[l]]$downstream; name<-c(name,x$subbasins [[l]]$name)}}
if(nDiv>0){for(m in 1:nDiv){labelMat[1,m+nRec+nRes+nJun+nSub]<-x$diversions[[m]]$label;labelMat[2,m+nRec+nRes+nJun+nSub]<-x$diversions[[m]]$downstream; name<-c(name,x$diversions[[m]]$name,x$diversions[[m]]$name)}}
if(nDiv>0){for(m in 1:nDiv){labelMat<-cbind(labelMat,c(x$diversions[[m]]$label,x$diversions[[m]]$divertTo))}}
colnames(labelMat)<-name
rownames(labelMat)<-c("code","downstream")
if(sum(is.na(labelMat[2,]))>1 & sum(is.na(labelMat[2,]))<1){stop("wrong number of outlet!")}
idUpstream<-which(is.na(match(labelMat[1,],labelMat[2,]))==TRUE)
type<-c('Reservoir','Reach','Junction','Sub-basin','Diversion')
availableTypes<-c(ifelse(i>0,1,NA),ifelse(j>0,1,NA),ifelse(k>0,1,NA),ifelse(l>0,1,NA),ifelse(m>0,1,NA))
type<-type[which(!is.na(availableTypes))]
types<-rep(type,c(i,j,k,l,2*m)[which(!is.na(availableTypes))])
color.palette<-c(5,1,2,3,4)[which(!is.na(availableTypes))]
shape.palette <-c(17,1,3,15,10)[which(!is.na(availableTypes))]
size.palette<-c(10,0.01,10,10,10)[which(!is.na(availableTypes))]
names(size.palette)<-type
names(shape.palette)<-type
names(color.palette)<-type
net<-matrix(0,nRes+nRec+nJun+nSub+nDiv*2,nRes+nRec+nJun+nSub+nDiv*2)
for(n in 1:ncol(net))
{
con<-which(labelMat[2,n]==labelMat[1,])
if(length(con)>0) {net[n,con]<-1}
}
colnames(net)<-colnames(labelMat)
rownames(net)<-colnames(labelMat)
Net<-net[1:(nRes+nRec+nJun+nSub),]
if(nDiv>0)
{
for(i in 1:nDiv)
{
Net<-rbind(Net,net[nRes+nRec+nJun+nSub+(i-1)*2+1,,drop=FALSE]+net[nRes+nRec+nJun+nSub+(i)*2,,drop=FALSE])
}
Net<-Net[,-which(duplicated(labelMat[1,]))]
}
net<-network(Net)
set.vertex.attribute(net,"type",types)
ggnet2(net,color='type',,size='type',shape='type',
color.palette=color.palette,shape.palette=shape.palette,size.palette=size.palette,
label=TRUE,arrow.size = 9, arrow.gap = 0.025)+guides(size = FALSE)
} | /R/plot.createBasin.R | no_license | cran/RHMS | R | false | false | 3,321 | r | plot.createBasin<-
function(x,...)
{
if(missing(x))
{
stop("missing object!")
}
if(!any(class(x)==c('sim','createBasin')))
{
stop("bad class type!")
}
x <-x$operation
nRes<-length(x$reservoirs)
nRec<-length(x$reachs)
nJun<-length(x$junctions)
nSub<-length(x$subbasins)
nDiv<-length(x$diversions)
labelMat<-matrix(NA,2,nRes+nRec+nJun+nSub+nDiv)
if(ncol(labelMat)<1){stop("At least one element is needed for simulation !")}
name<-c()
i<-0;j<-0;k<-0;l<-0;m<-0
if(nRes>0){for(i in 1:nRes){labelMat[1,i] <-x$reservoirs[[i]]$label;labelMat[2,i] <-x$reservoirs[[i]]$downstream; name<-c(name,x$reservoirs[[i]]$name)}}
if(nRec>0){for(j in 1:nRec){labelMat[1,j+nRes] <-x$reachs [[j]]$label;labelMat[2,j+nRes] <-x$reachs [[j]]$downstream; name<-c(name,x$reachs [[j]]$name)}}
if(nJun>0){for(k in 1:nJun){labelMat[1,k+nRec+nRes] <-x$junctions [[k]]$label;labelMat[2,k+nRec+nRes] <-x$junctions [[k]]$downstream; name<-c(name,x$junctions [[k]]$name)}}
if(nSub>0){for(l in 1:nSub){labelMat[1,l+nRec+nRes+nJun] <-x$subbasins [[l]]$label;labelMat[2,l+nRec+nRes+nJun] <-x$subbasins [[l]]$downstream; name<-c(name,x$subbasins [[l]]$name)}}
if(nDiv>0){for(m in 1:nDiv){labelMat[1,m+nRec+nRes+nJun+nSub]<-x$diversions[[m]]$label;labelMat[2,m+nRec+nRes+nJun+nSub]<-x$diversions[[m]]$downstream; name<-c(name,x$diversions[[m]]$name,x$diversions[[m]]$name)}}
if(nDiv>0){for(m in 1:nDiv){labelMat<-cbind(labelMat,c(x$diversions[[m]]$label,x$diversions[[m]]$divertTo))}}
colnames(labelMat)<-name
rownames(labelMat)<-c("code","downstream")
if(sum(is.na(labelMat[2,]))>1 & sum(is.na(labelMat[2,]))<1){stop("wrong number of outlet!")}
idUpstream<-which(is.na(match(labelMat[1,],labelMat[2,]))==TRUE)
type<-c('Reservoir','Reach','Junction','Sub-basin','Diversion')
availableTypes<-c(ifelse(i>0,1,NA),ifelse(j>0,1,NA),ifelse(k>0,1,NA),ifelse(l>0,1,NA),ifelse(m>0,1,NA))
type<-type[which(!is.na(availableTypes))]
types<-rep(type,c(i,j,k,l,2*m)[which(!is.na(availableTypes))])
color.palette<-c(5,1,2,3,4)[which(!is.na(availableTypes))]
shape.palette <-c(17,1,3,15,10)[which(!is.na(availableTypes))]
size.palette<-c(10,0.01,10,10,10)[which(!is.na(availableTypes))]
names(size.palette)<-type
names(shape.palette)<-type
names(color.palette)<-type
net<-matrix(0,nRes+nRec+nJun+nSub+nDiv*2,nRes+nRec+nJun+nSub+nDiv*2)
for(n in 1:ncol(net))
{
con<-which(labelMat[2,n]==labelMat[1,])
if(length(con)>0) {net[n,con]<-1}
}
colnames(net)<-colnames(labelMat)
rownames(net)<-colnames(labelMat)
Net<-net[1:(nRes+nRec+nJun+nSub),]
if(nDiv>0)
{
for(i in 1:nDiv)
{
Net<-rbind(Net,net[nRes+nRec+nJun+nSub+(i-1)*2+1,,drop=FALSE]+net[nRes+nRec+nJun+nSub+(i)*2,,drop=FALSE])
}
Net<-Net[,-which(duplicated(labelMat[1,]))]
}
net<-network(Net)
set.vertex.attribute(net,"type",types)
ggnet2(net,color='type',,size='type',shape='type',
color.palette=color.palette,shape.palette=shape.palette,size.palette=size.palette,
label=TRUE,arrow.size = 9, arrow.gap = 0.025)+guides(size = FALSE)
} |
#--------------------------------------------------------------------
# simage.R (npsp package)
#--------------------------------------------------------------------
# simage S3 generic
# simage.default
# simage.data.grid
# plot.np.den
#
# Based on image.plot and drape.plot functions from package fields:
# fields, Tools for spatial data
# Copyright 2004-2013, Institute for Mathematics Applied Geosciences
# University Corporation for Atmospheric Research
# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
#
# (c) Ruben Fernandez-Casal
# Created: Mar 2014 Last changed: Aug 2014
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# simage
#--------------------------------------------------------------------
#' Image plot with a color scale
#'
#' \code{simage} (generic function) draws an image (a grid of colored rectangles)
#' and (optionally) adds a legend strip with the color scale
#' (calls \code{\link{splot}} and \code{\link{image}}).
#'
#' @seealso \code{\link{splot}}, \code{\link{spoints}}, \code{\link{spersp}},
#' \code{\link{image}}, \code{\link[fields]{image.plot}}, \code{\link{data.grid}}.
#' @section Side Effects: After exiting, the plotting region may be changed
#' (\code{\link{par}("plt")}) to make it possible to add more features to the plot
#' (set \code{graphics.reset = FALSE} to avoid this).
#' @author
#' Based on \code{\link[fields]{image.plot}} function from package \pkg{fields}:
#' fields, Tools for spatial data.
#' Copyright 2004-2013, Institute for Mathematics Applied Geosciences.
#' University Corporation for Atmospheric Research.
#'
#' Modified by Ruben Fernandez-Casal <rubenfcasal@@gmail.com>.
#' @keywords hplot
#' @export
#--------------------------------------------------------------------
simage <- function(x, ...) UseMethod("simage")
# S3 generic function simage
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# simage.default
#--------------------------------------------------------------------
#' @rdname simage
#' @method simage default
#' @param x grid values for \code{x} coordinate. If \code{x} is a list,
#' its components \code{x$x} and \code{x$y} are used for \code{x}
#' and \code{y}, respectively. For compatibility with \code{\link{image}}, if the
#' list has component \code{z} this is used for \code{s}.
#' @param y grid values for \code{y} coordinate.
#' @param s matrix containing the values to be used for coloring the rectangles (NAs are allowed).
#' Note that \code{x} can be used instead of \code{s} for convenience.
#' @param legend logical; if \code{TRUE} (default), the plotting region is splitted into two parts,
#' drawing the image plot in one and the legend with the color scale in the other.
#' If \code{FALSE} only the image plot is drawn and the arguments related
#' to the legend are ignored (\code{\link{splot}} is not called).
#' @param ... additional graphical parameters (to be passed to \code{\link{image}}
#' or \code{simage.default}; e.g. \code{xlim, ylim,} ...). NOTE:
#' graphical arguments passed here will only have impact on the main plot.
#' To change the graphical defaults for the legend use the \code{\link{par}}
#' function beforehand (e.g. \code{par(cex.lab = 2)} to increase colorbar labels).
#' @return Invisibly returns a list with the following 3 components:
#' \item{bigplot}{plot coordinates of the main plot. These values may be useful for
#' drawing a plot without the legend that is the same size as the plots with legends.}
#' \item{smallplot}{plot coordinates of the secondary plot (legend strip).}
#' \item{old.par}{previous graphical parameters (\code{par(old.par)}
#' will reset plot parameters to the values before entering the function).}
#' @inheritParams splot
#' @inheritParams spoints
#' @examples
#'
#' #
#' # Regularly spaced 2D data
#' nx <- c(40, 40) # ndata = prod(nx)
#' x1 <- seq(-1, 1, length.out = nx[1])
#' x2 <- seq(-1, 1, length.out = nx[2])
#' trend <- outer(x1, x2, function(x,y) x^2 - y^2)
#' simage( x1, x2, trend, main = 'Trend')
#'
#' #
#' # Multiple plots
#' set.seed(1)
#' y <- trend + rnorm(prod(nx), 0, 0.1)
#' x <- as.matrix(expand.grid(x1 = x1, x2 = x2)) # two-dimensional grid
#' # local polynomial kernel regression
#' lp <- locpol(x, y, nbin = nx, h = diag(c(0.3, 0.3)))
#' # 1x2 plot
#' old.par <- par(mfrow = c(1,2))
#' simage( x1, x2, y, main = 'Data')
#' simage(lp, main = 'Estimated trend')
#' par(old.par)
#' @export
#--------------------------------------------------------------------
simage.default <- function(x = seq(0, 1, len = nrow(s)), y = seq(0, 1,
len = ncol(s)), s, slim = range(s, finite = TRUE), col = jet.colors(128),
breaks = NULL, legend = TRUE, horizontal = FALSE, legend.shrink = 1.0,
legend.width = 1.2, legend.mar = ifelse(horizontal, 3.1, 5.1), legend.lab = NULL,
bigplot = NULL, smallplot = NULL, lab.breaks = NULL, axis.args = NULL,
legend.args = NULL, graphics.reset = FALSE, xlab = NULL, ylab = NULL,
...) {
#--------------------------------------------------------------------
if (missing(s)) {
if (!missing(x)) {
if (is.list(x)) {
s <- x$z
y <- x$y
x <- x$x
}
else {
s <- x
if (!is.matrix(s))
stop("argument 's' must be a matrix")
x <- seq.int(0, 1, length.out = nrow(s))
}
}
else stop("no 's' matrix specified")
}
else if (is.list(x)) {
xn <- deparse(substitute(x))
if (missing(xlab)) xlab <- paste(xn, "x", sep = "$")
if (missing(ylab)) ylab <- paste(xn, "y", sep = "$")
y <- x$y
x <- x$x
}
if (!is.matrix(s))
if (missing(x) | missing(y)) stop("argument 's' must be a matrix")
else dim(s) <- c(length(x), length(y))
if (is.null(xlab))
xlab <- if (!missing(x))
deparse(substitute(x))
else "X"
if (is.null(ylab))
ylab <- if (!missing(y))
deparse(substitute(y))
else "Y"
if (legend)
# image in splot checks breaks and other parameters...
res <- splot(slim = slim, col = col, breaks = breaks, horizontal = horizontal,
legend.shrink = legend.shrink, legend.width = legend.width,
legend.mar = legend.mar, legend.lab = legend.lab,
bigplot = bigplot, smallplot = smallplot, lab.breaks = lab.breaks,
axis.args = axis.args, legend.args = legend.args)
else {
old.par <- par(no.readonly = TRUE)
# par(xpd = FALSE)
res <- list(bigplot = old.par$plt, smallplot = NA, old.par = old.par)
}
if (is.null(breaks)) {
# Compute breaks (in 'cut.default' style...)
ds <- diff(slim)
if (ds == 0) ds <- abs(slim[1L])
breaks <- seq.int(slim[1L] - ds/1000, slim[2L] + ds/1000, length.out = length(col) + 1)
}
image(x, y, s, xlab = xlab, ylab = ylab, col = col, breaks = breaks, ...)
box()
if (graphics.reset) par(res$old.par)
return(invisible(res))
#--------------------------------------------------------------------
} # simage.default
#--------------------------------------------------------------------
#' @rdname simage
#' @method simage data.grid
#' @param data.ind integer (or character) with the index (or name) of the component
#' containing the values to be used for coloring the rectangles.
#' @export
simage.data.grid <- function(x, data.ind = 1, xlab = NULL, ylab = NULL, ...) {
#--------------------------------------------------------------------
if (!inherits(x, "data.grid") | x$grid$nd != 2L)
stop("function only works for two-dimensional gridded data ('data.grid'-class objects)")
coorvs <- coordvalues(x)
ns <- names(coorvs)
if (is.null(xlab)) xlab <- ns[1]
if (is.null(ylab)) ylab <- ns[2]
res <- simage.default(coorvs[[1]], coorvs[[2]], s = x[[data.ind]],
xlab = xlab, ylab = ylab, ...)
return(invisible(res))
#--------------------------------------------------------------------
} # simage.grid.par
#--------------------------------------------------------------------
#' @rdname simage
#' @method plot np.den
#' @description \code{plot.np.den} calls \code{simage.data.grid}
#' (\code{\link{contour}} and \code{\link{points}} also by default).
#' @param log logical; if \code{TRUE} (default), \code{log(x$est)} is ploted.
#' @param contour logical; if \code{TRUE} (default), contour lines are added.
#' @param points logical; if \code{TRUE} (default), points at \code{x$data$x} are drawn.
#' @param tolerance tolerance value (lower values are masked).
#' @export
plot.np.den <- function(x, y = NULL, log = TRUE, contour = TRUE, points = TRUE,
col = hot.colors(128), tolerance = npsp.tolerance(), ...){
# if (!inherits(x, "data.grid") | x$grid$nd != 2L)
# stop("function only works for two-dimensional gridded data ('data.grid'-class objects)")
is.na(x$est) <- x$est < tolerance
if (log) x$est <- log(x$est)
ret <- simage(x, col = col, ...) # Comprueba x$grid$nd != 2L
if (contour) contour(x, add = TRUE)
if (points) points(x$data$x, pch = 21, bg = 'black', col = 'darkgray' )
return(invisible(ret))
#--------------------------------------------------------------------
} # plot.np.den | /R/simage.R | no_license | R4GIS/npsp | R | false | false | 9,644 | r | #--------------------------------------------------------------------
# simage.R (npsp package)
#--------------------------------------------------------------------
# simage S3 generic
# simage.default
# simage.data.grid
# plot.np.den
#
# Based on image.plot and drape.plot functions from package fields:
# fields, Tools for spatial data
# Copyright 2004-2013, Institute for Mathematics Applied Geosciences
# University Corporation for Atmospheric Research
# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
#
# (c) Ruben Fernandez-Casal
# Created: Mar 2014 Last changed: Aug 2014
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# simage
#--------------------------------------------------------------------
#' Image plot with a color scale
#'
#' \code{simage} (generic function) draws an image (a grid of colored rectangles)
#' and (optionally) adds a legend strip with the color scale
#' (calls \code{\link{splot}} and \code{\link{image}}).
#'
#' @seealso \code{\link{splot}}, \code{\link{spoints}}, \code{\link{spersp}},
#' \code{\link{image}}, \code{\link[fields]{image.plot}}, \code{\link{data.grid}}.
#' @section Side Effects: After exiting, the plotting region may be changed
#' (\code{\link{par}("plt")}) to make it possible to add more features to the plot
#' (set \code{graphics.reset = FALSE} to avoid this).
#' @author
#' Based on \code{\link[fields]{image.plot}} function from package \pkg{fields}:
#' fields, Tools for spatial data.
#' Copyright 2004-2013, Institute for Mathematics Applied Geosciences.
#' University Corporation for Atmospheric Research.
#'
#' Modified by Ruben Fernandez-Casal <rubenfcasal@@gmail.com>.
#' @keywords hplot
#' @export
#--------------------------------------------------------------------
simage <- function(x, ...) UseMethod("simage")
# S3 generic function simage
#--------------------------------------------------------------------
#--------------------------------------------------------------------
# simage.default
#--------------------------------------------------------------------
#' @rdname simage
#' @method simage default
#' @param x grid values for \code{x} coordinate. If \code{x} is a list,
#' its components \code{x$x} and \code{x$y} are used for \code{x}
#' and \code{y}, respectively. For compatibility with \code{\link{image}}, if the
#' list has component \code{z} this is used for \code{s}.
#' @param y grid values for \code{y} coordinate.
#' @param s matrix containing the values to be used for coloring the rectangles (NAs are allowed).
#' Note that \code{x} can be used instead of \code{s} for convenience.
#' @param legend logical; if \code{TRUE} (default), the plotting region is splitted into two parts,
#' drawing the image plot in one and the legend with the color scale in the other.
#' If \code{FALSE} only the image plot is drawn and the arguments related
#' to the legend are ignored (\code{\link{splot}} is not called).
#' @param ... additional graphical parameters (to be passed to \code{\link{image}}
#' or \code{simage.default}; e.g. \code{xlim, ylim,} ...). NOTE:
#' graphical arguments passed here will only have impact on the main plot.
#' To change the graphical defaults for the legend use the \code{\link{par}}
#' function beforehand (e.g. \code{par(cex.lab = 2)} to increase colorbar labels).
#' @return Invisibly returns a list with the following 3 components:
#' \item{bigplot}{plot coordinates of the main plot. These values may be useful for
#' drawing a plot without the legend that is the same size as the plots with legends.}
#' \item{smallplot}{plot coordinates of the secondary plot (legend strip).}
#' \item{old.par}{previous graphical parameters (\code{par(old.par)}
#' will reset plot parameters to the values before entering the function).}
#' @inheritParams splot
#' @inheritParams spoints
#' @examples
#'
#' #
#' # Regularly spaced 2D data
#' nx <- c(40, 40) # ndata = prod(nx)
#' x1 <- seq(-1, 1, length.out = nx[1])
#' x2 <- seq(-1, 1, length.out = nx[2])
#' trend <- outer(x1, x2, function(x,y) x^2 - y^2)
#' simage( x1, x2, trend, main = 'Trend')
#'
#' #
#' # Multiple plots
#' set.seed(1)
#' y <- trend + rnorm(prod(nx), 0, 0.1)
#' x <- as.matrix(expand.grid(x1 = x1, x2 = x2)) # two-dimensional grid
#' # local polynomial kernel regression
#' lp <- locpol(x, y, nbin = nx, h = diag(c(0.3, 0.3)))
#' # 1x2 plot
#' old.par <- par(mfrow = c(1,2))
#' simage( x1, x2, y, main = 'Data')
#' simage(lp, main = 'Estimated trend')
#' par(old.par)
#' @export
#--------------------------------------------------------------------
simage.default <- function(x = seq(0, 1, len = nrow(s)), y = seq(0, 1,
len = ncol(s)), s, slim = range(s, finite = TRUE), col = jet.colors(128),
breaks = NULL, legend = TRUE, horizontal = FALSE, legend.shrink = 1.0,
legend.width = 1.2, legend.mar = ifelse(horizontal, 3.1, 5.1), legend.lab = NULL,
bigplot = NULL, smallplot = NULL, lab.breaks = NULL, axis.args = NULL,
legend.args = NULL, graphics.reset = FALSE, xlab = NULL, ylab = NULL,
...) {
#--------------------------------------------------------------------
if (missing(s)) {
if (!missing(x)) {
if (is.list(x)) {
s <- x$z
y <- x$y
x <- x$x
}
else {
s <- x
if (!is.matrix(s))
stop("argument 's' must be a matrix")
x <- seq.int(0, 1, length.out = nrow(s))
}
}
else stop("no 's' matrix specified")
}
else if (is.list(x)) {
xn <- deparse(substitute(x))
if (missing(xlab)) xlab <- paste(xn, "x", sep = "$")
if (missing(ylab)) ylab <- paste(xn, "y", sep = "$")
y <- x$y
x <- x$x
}
if (!is.matrix(s))
if (missing(x) | missing(y)) stop("argument 's' must be a matrix")
else dim(s) <- c(length(x), length(y))
if (is.null(xlab))
xlab <- if (!missing(x))
deparse(substitute(x))
else "X"
if (is.null(ylab))
ylab <- if (!missing(y))
deparse(substitute(y))
else "Y"
if (legend)
# image in splot checks breaks and other parameters...
res <- splot(slim = slim, col = col, breaks = breaks, horizontal = horizontal,
legend.shrink = legend.shrink, legend.width = legend.width,
legend.mar = legend.mar, legend.lab = legend.lab,
bigplot = bigplot, smallplot = smallplot, lab.breaks = lab.breaks,
axis.args = axis.args, legend.args = legend.args)
else {
old.par <- par(no.readonly = TRUE)
# par(xpd = FALSE)
res <- list(bigplot = old.par$plt, smallplot = NA, old.par = old.par)
}
if (is.null(breaks)) {
# Compute breaks (in 'cut.default' style...)
ds <- diff(slim)
if (ds == 0) ds <- abs(slim[1L])
breaks <- seq.int(slim[1L] - ds/1000, slim[2L] + ds/1000, length.out = length(col) + 1)
}
image(x, y, s, xlab = xlab, ylab = ylab, col = col, breaks = breaks, ...)
box()
if (graphics.reset) par(res$old.par)
return(invisible(res))
#--------------------------------------------------------------------
} # simage.default
#--------------------------------------------------------------------
#' @rdname simage
#' @method simage data.grid
#' @param data.ind integer (or character) with the index (or name) of the component
#' containing the values to be used for coloring the rectangles.
#' @export
simage.data.grid <- function(x, data.ind = 1, xlab = NULL, ylab = NULL, ...) {
#--------------------------------------------------------------------
if (!inherits(x, "data.grid") | x$grid$nd != 2L)
stop("function only works for two-dimensional gridded data ('data.grid'-class objects)")
coorvs <- coordvalues(x)
ns <- names(coorvs)
if (is.null(xlab)) xlab <- ns[1]
if (is.null(ylab)) ylab <- ns[2]
res <- simage.default(coorvs[[1]], coorvs[[2]], s = x[[data.ind]],
xlab = xlab, ylab = ylab, ...)
return(invisible(res))
#--------------------------------------------------------------------
} # simage.grid.par
#--------------------------------------------------------------------
#' @rdname simage
#' @method plot np.den
#' @description \code{plot.np.den} calls \code{simage.data.grid}
#' (\code{\link{contour}} and \code{\link{points}} also by default).
#' @param log logical; if \code{TRUE} (default), \code{log(x$est)} is ploted.
#' @param contour logical; if \code{TRUE} (default), contour lines are added.
#' @param points logical; if \code{TRUE} (default), points at \code{x$data$x} are drawn.
#' @param tolerance tolerance value (lower values are masked).
#' @export
plot.np.den <- function(x, y = NULL, log = TRUE, contour = TRUE, points = TRUE,
col = hot.colors(128), tolerance = npsp.tolerance(), ...){
# if (!inherits(x, "data.grid") | x$grid$nd != 2L)
# stop("function only works for two-dimensional gridded data ('data.grid'-class objects)")
is.na(x$est) <- x$est < tolerance
if (log) x$est <- log(x$est)
ret <- simage(x, col = col, ...) # Comprueba x$grid$nd != 2L
if (contour) contour(x, add = TRUE)
if (points) points(x$data$x, pch = 21, bg = 'black', col = 'darkgray' )
return(invisible(ret))
#--------------------------------------------------------------------
} # plot.np.den |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nwos_estimates_add_minority.R
\name{nwos_estimates_add_minority}
\alias{nwos_estimates_add_minority}
\title{Add MINORITY Variable to an NWOS Data Set}
\usage{
nwos_estimates_add_minority(x = NA, data = QUEST)
}
\arguments{
\item{x}{list number. Only applicable if is data is a list of data frames, instead of a single data frame. This used mainly for apply functions.}
\item{data}{data frame or list of data frames}
}
\description{
Add variables to an NWOS data frame
}
\details{
The default values create the variables used in the NWOS tables.
}
\examples{
nwos_estimates_add_minority()
}
\keyword{nwos}
| /man/nwos_estimates_add_minority.Rd | no_license | jfontestad/nwos | R | false | true | 684 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nwos_estimates_add_minority.R
\name{nwos_estimates_add_minority}
\alias{nwos_estimates_add_minority}
\title{Add MINORITY Variable to an NWOS Data Set}
\usage{
nwos_estimates_add_minority(x = NA, data = QUEST)
}
\arguments{
\item{x}{list number. Only applicable if is data is a list of data frames, instead of a single data frame. This used mainly for apply functions.}
\item{data}{data frame or list of data frames}
}
\description{
Add variables to an NWOS data frame
}
\details{
The default values create the variables used in the NWOS tables.
}
\examples{
nwos_estimates_add_minority()
}
\keyword{nwos}
|
\name{getPlayerData}
\alias{getPlayerData}
\title{
Get the player data from ESPN Cricinfo based on specific inputs and store in a file in a given directory
}
\description{
Get the player data given the profile of the batsman. The allowed inputs are home,away or both and won,lost or draw of matches. The data is stored in a <player>.csv file in a directory specified. This function also returns a data frame of the player
}
\usage{
getPlayerData(profile,dir="./data",file="player001.csv",type="batting",
homeOrAway=c(1,2),result=c(1,2,4))
}
\arguments{
\item{profile}{
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For Sachin Tendulkar this turns out to be
http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
}
\item{dir}{
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./data"
}
\item{file}{
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
}
\item{type}{
type of data required. This can be "batting" or "bowling"
}
\item{homeOrAway}{
This is vector with either 1,2 or both. 1 is for home 2 is for away
}
\item{result}{
This is a vector that can take values 1,2,4. 1 - won match 2- lost match 4- draw
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
Returns the player's dataframe
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{getPlayerDataSp}}
}
\examples{
\donttest{
# Both home and away. Result = won,lost and drawn
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar1.csv",
type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# Only away. Get data only for won and lost innings
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar2.csv",
type="batting",homeOrAway=c(2),result=c(1,2))
# Get bowling data and store in file for future
kumble <- getPlayerData(30176,dir="../cricketr/data",file="kumble1.csv",
type="bowling",homeOrAway=c(1),result=c(1,2))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/getPlayerData.Rd | no_license | sidaga/cricketr | R | false | false | 2,728 | rd | \name{getPlayerData}
\alias{getPlayerData}
\title{
Get the player data from ESPN Cricinfo based on specific inputs and store in a file in a given directory
}
\description{
Get the player data given the profile of the batsman. The allowed inputs are home,away or both and won,lost or draw of matches. The data is stored in a <player>.csv file in a directory specified. This function also returns a data frame of the player
}
\usage{
getPlayerData(profile,dir="./data",file="player001.csv",type="batting",
homeOrAway=c(1,2),result=c(1,2,4))
}
\arguments{
\item{profile}{
This is the profile number of the player to get data. This can be obtained from http://www.espncricinfo.com/ci/content/player/index.html. Type the name of the player and click search. This will display the details of the player. Make a note of the profile ID. For e.g For Sachin Tendulkar this turns out to be
http://www.espncricinfo.com/india/content/player/35320.html. Hence the profile for Sachin is 35320
}
\item{dir}{
Name of the directory to store the player data into. If not specified the data is stored in a default directory "./data". Default="./data"
}
\item{file}{
Name of the file to store the data into for e.g. tendulkar.csv. This can be used for subsequent functions. Default="player001.csv"
}
\item{type}{
type of data required. This can be "batting" or "bowling"
}
\item{homeOrAway}{
This is vector with either 1,2 or both. 1 is for home 2 is for away
}
\item{result}{
This is a vector that can take values 1,2,4. 1 - won match 2- lost match 4- draw
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
Returns the player's dataframe
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{getPlayerDataSp}}
}
\examples{
\donttest{
# Both home and away. Result = won,lost and drawn
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar1.csv",
type="batting", homeOrAway=c(1,2),result=c(1,2,4))
# Only away. Get data only for won and lost innings
tendulkar <-getPlayerData(35320,dir="../cricketr/data", file="tendulkar2.csv",
type="batting",homeOrAway=c(2),result=c(1,2))
# Get bowling data and store in file for future
kumble <- getPlayerData(30176,dir="../cricketr/data",file="kumble1.csv",
type="bowling",homeOrAway=c(1),result=c(1,2))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
setwd("~/downloads/csv")
load("x.RData")
library(plyr)
library(forecast)
library(hydroGOF)
#----- clean the data ----
read.csv("grand.csv")->x
x=x[,c(1,2,4)]
x$date=as.Date(x$date)
tmp=unique(x[,1:2])
tmp2=vector()
for (i in 1:dim(tmp)[1]){
tmp2[i]=mean(x$TEMP[((i-1)*24+1):(i*24)])
}
temp=cbind(tmp,Temp=tmp2)
temp=temp[-48695,]
read.csv("result.csv")->tag # cluster result
read.csv("data_2.csv")->data
data=na.omit(data)
#spt=split(data,data$city)
data2=cbind(data,temp)[,-c(4,5)]
#----- cities index with cluster tag ----
cities=as.data.frame(cbind(as.character(unique(data$city)),c(1:48)))
colnames(cities)=c("city","No")
city.tag=join(cities,tag)
#---- cities in cluster 1 ----
#2013-2014
Eugene=ts(data[which(data$city=="Eugene"),][,2],frequency=365,start=c(2006,1,1))
part1=ts.decompose(Eugene)
part2=ts.forecast(Eugene,365)
Eugene2=cbind(data2[which(data$city=="Eugene"),][,c(1,2,4)],part1)
start=Eugene2[length(Eugene),1]
Eugene3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Eugene)
Eugene_tra=ts(Eugene[1:(len-365)],frequency=365,start=c(2006,1,1))
Eugene_ori=ts(Eugene[(len-365+1):len],frequency=365,start=c(2013,1,1))
Eugene_test=ts.forecast(Eugene_tra,365)
p=plot(Eugene_ori,Eugene_test[,1],xlim=c(200,500),ylim=c(200,500),xlab="observed",ylab="predict",main="Eugene")
abline(1,1,col="red")
sub=Eugene_ori-Eugene_test[,1]
hist(sub,xlim=c(-150,150),prob=TRUE,xlab="diff",main="distribution of residuals in test data")
curve(dnorm(x,mean=mean(sub), sd=sd(sub)), col="red", lwd=2, add=TRUE, yaxt="n")
x=times("2012-12-31",365)
plot(x,Eugene_ori,type="l",main="Observe/Predict of Eugene in 2013",xlab="time",ylab="Energy Demand")
lines(x,Eugene_test[,1],col="red")
rmse.eu=rmse(Eugene_ori,Eugene_test[,1])
#-----------------------------
write.csv(Eugene2,"Eugene-origianl.csv")
write.csv(Eugene3,"Eugene-predict-365.csv")
#---- cities in cluster 2 ----
#2014-2015
Dakar=ts(data[which(data$city=="Dakar"),][,2],frequency=365,start=c(2011,1,1))
part1=ts.decompose(Dakar)
part2=ts.forecast(Dakar,365)
Dakar2=cbind(data2[which(data$city=="Dakar"),][,c(1,2,4)],part1)
start=Dakar2[length(Abidjan),1]
Dakar3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Dakar)
Dakar_tra=ts(Dakar[1:(len-365)],frequency=365,start=c(2011,1,1))
Dakar_ori=ts(Dakar[(len-365+1):len],frequency=365,start=c(2014,1,1))
Dakar_test=ts.forecast(Dakar_tra,365)
plot(Dakar_ori,Dakar_test[,1],xlim=c(250,450),ylim=c(250,450),xlab="observed",ylab="predict",main="Dakar")
abline(1,1,col="red")
sub=Dakar_ori-Dakar_test[,1]
hist(sub,xlim=c(-50,100),prob=TRUE,xlab="diff",main="distribution of residuals in test data")
curve(dnorm(x,mean=mean(sub), sd=sd(sub)), col="red", lwd=2, add=TRUE, yaxt="n")
x=times("2013-12-31",365)
plot(x,Dakar_ori,type="l",main="Observe/Predict of Dakar in 2014",xlab="time",ylab="Energy Demand")
lines(x,Dakar_test[,1],col="red")
rmse.da=rmse(Dakar_ori,Dakar_test[,1])
#-----------------------------
write.csv(Dakar2,"Dakar-origianl.csv")
write.csv(Dakar3,"Dakar-predict-365.csv")
#---- cities in cluster 3 ----
#2013-2014
Louisville=ts(data[which(data$city=="Louisville"),][,2],frequency=365,start=c(2006,1,1))
part1=ts.decompose(Louisville)
part2=ts.forecast(Louisville,365)
Louisville2=cbind(data2[which(data$city=="Louisville"),][,c(1,2,4)],part1)
start=Louisville2[length(Louisville),1]
Louisville3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Louisville)
Louisville_tra=ts(Louisville[1:(len-365)],frequency=365,start=c(2006,1,1))
Louisville_ori=ts(Louisville[(len-365+1):len],frequency=365,start=c(2013,1,1))
Louisville_test=ts.forecast(Louisville_tra,365)
plot(Louisville_ori,Louisville_test[,1],xlim=c(3000,7000),ylim=c(3000,7000),xlab="observed",ylab="predict",main="Louisville")
abline(1,1,col="red")
x=times("2012-12-31",365)
plot(x,Louisville_ori,type="l",ylim=c(3000,7000),main="Observe/Predict of Louisville in 2013",xlab="time",ylab="Energy Demand")
lines(x,Louisville_test[,1],col="red")
rmse.lo=rmse(Louisville_ori,Louisville_test[,1])
#-----------------------------
# save the document
write.csv(Louisville2,"Louisville-origianl.csv")
write.csv(Louisville3,"Louisville-predict-365.csv")
#---- cities in cluster 4 ----
#2013-2014
Sacramento=ts(data[which(data$city=="Sacramento"),][,2],frequency=365,start=c(2006,1,1))
part1=ts.decompose(Sacramento)
part2=ts.forecast(Sacramento,365)
Sacramento2=cbind(data2[which(data$city=="Sacramento"),][,c(1,2,4)],part1)
start=Sacramento2[length(Sacramento),1]
Sacramento3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Sacramento)
Sacramento_tra=ts(Sacramento[1:(len-365)],frequency=365,start=c(2006,1,1))
Sacramento_ori=ts(Sacramento[(len-365+1):len],frequency=365,start=c(2013,1,1))
Sacramento_test=ts.forecast(Sacramento_tra,365)
plot(Sacramento_ori,Sacramento_test[,1],xlim=c(1000,2500),ylim=c(1000,2500),xlab="observed",ylab="predict",main="Sacramento")
abline(1,1,col="red")
x=times("2012-12-31",365)
plot(x,Sacramento_ori,type="l",ylim=c(1000,2500),main="Observe/Predict of Sacramento in 2013",xlab="time",ylab="Energy Demand")
lines(x,Sacramento_test[,1],col="red")
rmse.sa=rmse(Sacramento_ori,Sacramento_test[,1])
#-----------------------------
write.csv(Sacramento2,"Sacramento-original.csv")
write.csv(Sacramento3,"Sacramento-predict-365.csv")
#---- decompose function ------
ts.decompose=function(ts){
components=decompose(ts)
plot(components)
trend=components$trend
random=components$random
seasonal=components$seasonal
result=data.frame(trend,random,seasonal)
colnames(result)=c("Trend","Random","Seasonal")
return (result)
}
#---- forecast funtion ----
ts.forecast=function(ts,day){
model=HoltWinters(ts,beta=FALSE,gamma=TRUE)
plot(model)
fore=forecast.HoltWinters(model,h=day)
plot.forecast(fore)
# plotForecastErrors(fore$residuals)
print(Box.test(fore$residuals,lag=20,type="Ljung-Box"))
result=data.frame(forecast.HoltWinters(model,h=day))
return(result)
}
#---- time format ----
times=function(day_before_start,d){
st=as.Date(day_before_start)+1
end=st+d-1
return(as.Date(st:end))
}
#---- arima diff test ----
arima.diff=function(ts,d){
diff=diff(ts,differences=d)
plot.ts(diff)
return(diff)
}
###############
## Appendix ###
###############
#--------------- functions for error forecast --------
plotForecastErrors <- function(forecasterrors) {
# make a histogram of the forecast errors:
mybinsize <- IQR(forecasterrors)/4
mysd <- sd(forecasterrors)
mymin <- min(forecasterrors) - mysd*5
mymax <- max(forecasterrors) + mysd*3
# generate normally distributed data with mean 0 and standard deviation mysd
mynorm <- rnorm(10000, mean=0, sd=mysd)
mymin2 <- min(mynorm)
mymax2 <- max(mynorm)
if (mymin2 < mymin) { mymin <- mymin2 }
if (mymax2 > mymax) { mymax <- mymax2 }
# make a red histogram of the forecast errors, with the normally distributed data overl
mybins <- seq(mymin, mymax, mybinsize)
hist(forecasterrors, col="red", freq=FALSE, breaks=mybins)
# freq=FALSE ensures the area under the histogram = 1
# generate normally distributed data with mean 0 and standard deviation mysd
myhist <- hist(mynorm, plot=FALSE, breaks=mybins)
# plot the normal curve as a blue line on top of the histogram of forecast errors:
points(myhist$mids, myhist$density, type="l", col="blue", lwd=2)
} | /data/Forecasting.R | no_license | denistanwh/Energy | R | false | false | 7,489 | r | setwd("~/downloads/csv")
load("x.RData")
library(plyr)
library(forecast)
library(hydroGOF)
#----- clean the data ----
read.csv("grand.csv")->x
x=x[,c(1,2,4)]
x$date=as.Date(x$date)
tmp=unique(x[,1:2])
tmp2=vector()
for (i in 1:dim(tmp)[1]){
tmp2[i]=mean(x$TEMP[((i-1)*24+1):(i*24)])
}
temp=cbind(tmp,Temp=tmp2)
temp=temp[-48695,]
read.csv("result.csv")->tag # cluster result
read.csv("data_2.csv")->data
data=na.omit(data)
#spt=split(data,data$city)
data2=cbind(data,temp)[,-c(4,5)]
#----- cities index with cluster tag ----
cities=as.data.frame(cbind(as.character(unique(data$city)),c(1:48)))
colnames(cities)=c("city","No")
city.tag=join(cities,tag)
#---- cities in cluster 1 ----
#2013-2014
Eugene=ts(data[which(data$city=="Eugene"),][,2],frequency=365,start=c(2006,1,1))
part1=ts.decompose(Eugene)
part2=ts.forecast(Eugene,365)
Eugene2=cbind(data2[which(data$city=="Eugene"),][,c(1,2,4)],part1)
start=Eugene2[length(Eugene),1]
Eugene3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Eugene)
Eugene_tra=ts(Eugene[1:(len-365)],frequency=365,start=c(2006,1,1))
Eugene_ori=ts(Eugene[(len-365+1):len],frequency=365,start=c(2013,1,1))
Eugene_test=ts.forecast(Eugene_tra,365)
p=plot(Eugene_ori,Eugene_test[,1],xlim=c(200,500),ylim=c(200,500),xlab="observed",ylab="predict",main="Eugene")
abline(1,1,col="red")
sub=Eugene_ori-Eugene_test[,1]
hist(sub,xlim=c(-150,150),prob=TRUE,xlab="diff",main="distribution of residuals in test data")
curve(dnorm(x,mean=mean(sub), sd=sd(sub)), col="red", lwd=2, add=TRUE, yaxt="n")
x=times("2012-12-31",365)
plot(x,Eugene_ori,type="l",main="Observe/Predict of Eugene in 2013",xlab="time",ylab="Energy Demand")
lines(x,Eugene_test[,1],col="red")
rmse.eu=rmse(Eugene_ori,Eugene_test[,1])
#-----------------------------
write.csv(Eugene2,"Eugene-origianl.csv")
write.csv(Eugene3,"Eugene-predict-365.csv")
#---- cities in cluster 2 ----
#2014-2015
Dakar=ts(data[which(data$city=="Dakar"),][,2],frequency=365,start=c(2011,1,1))
part1=ts.decompose(Dakar)
part2=ts.forecast(Dakar,365)
Dakar2=cbind(data2[which(data$city=="Dakar"),][,c(1,2,4)],part1)
start=Dakar2[length(Abidjan),1]
Dakar3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Dakar)
Dakar_tra=ts(Dakar[1:(len-365)],frequency=365,start=c(2011,1,1))
Dakar_ori=ts(Dakar[(len-365+1):len],frequency=365,start=c(2014,1,1))
Dakar_test=ts.forecast(Dakar_tra,365)
plot(Dakar_ori,Dakar_test[,1],xlim=c(250,450),ylim=c(250,450),xlab="observed",ylab="predict",main="Dakar")
abline(1,1,col="red")
sub=Dakar_ori-Dakar_test[,1]
hist(sub,xlim=c(-50,100),prob=TRUE,xlab="diff",main="distribution of residuals in test data")
curve(dnorm(x,mean=mean(sub), sd=sd(sub)), col="red", lwd=2, add=TRUE, yaxt="n")
x=times("2013-12-31",365)
plot(x,Dakar_ori,type="l",main="Observe/Predict of Dakar in 2014",xlab="time",ylab="Energy Demand")
lines(x,Dakar_test[,1],col="red")
rmse.da=rmse(Dakar_ori,Dakar_test[,1])
#-----------------------------
write.csv(Dakar2,"Dakar-origianl.csv")
write.csv(Dakar3,"Dakar-predict-365.csv")
#---- cities in cluster 3 ----
#2013-2014
Louisville=ts(data[which(data$city=="Louisville"),][,2],frequency=365,start=c(2006,1,1))
part1=ts.decompose(Louisville)
part2=ts.forecast(Louisville,365)
Louisville2=cbind(data2[which(data$city=="Louisville"),][,c(1,2,4)],part1)
start=Louisville2[length(Louisville),1]
Louisville3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Louisville)
Louisville_tra=ts(Louisville[1:(len-365)],frequency=365,start=c(2006,1,1))
Louisville_ori=ts(Louisville[(len-365+1):len],frequency=365,start=c(2013,1,1))
Louisville_test=ts.forecast(Louisville_tra,365)
plot(Louisville_ori,Louisville_test[,1],xlim=c(3000,7000),ylim=c(3000,7000),xlab="observed",ylab="predict",main="Louisville")
abline(1,1,col="red")
x=times("2012-12-31",365)
plot(x,Louisville_ori,type="l",ylim=c(3000,7000),main="Observe/Predict of Louisville in 2013",xlab="time",ylab="Energy Demand")
lines(x,Louisville_test[,1],col="red")
rmse.lo=rmse(Louisville_ori,Louisville_test[,1])
#-----------------------------
# save the document
write.csv(Louisville2,"Louisville-origianl.csv")
write.csv(Louisville3,"Louisville-predict-365.csv")
#---- cities in cluster 4 ----
#2013-2014
Sacramento=ts(data[which(data$city=="Sacramento"),][,2],frequency=365,start=c(2006,1,1))
part1=ts.decompose(Sacramento)
part2=ts.forecast(Sacramento,365)
Sacramento2=cbind(data2[which(data$city=="Sacramento"),][,c(1,2,4)],part1)
start=Sacramento2[length(Sacramento),1]
Sacramento3=cbind(Date=times(start,365),part2)
#---- add traing and test ----
len=length(Sacramento)
Sacramento_tra=ts(Sacramento[1:(len-365)],frequency=365,start=c(2006,1,1))
Sacramento_ori=ts(Sacramento[(len-365+1):len],frequency=365,start=c(2013,1,1))
Sacramento_test=ts.forecast(Sacramento_tra,365)
plot(Sacramento_ori,Sacramento_test[,1],xlim=c(1000,2500),ylim=c(1000,2500),xlab="observed",ylab="predict",main="Sacramento")
abline(1,1,col="red")
x=times("2012-12-31",365)
plot(x,Sacramento_ori,type="l",ylim=c(1000,2500),main="Observe/Predict of Sacramento in 2013",xlab="time",ylab="Energy Demand")
lines(x,Sacramento_test[,1],col="red")
rmse.sa=rmse(Sacramento_ori,Sacramento_test[,1])
#-----------------------------
write.csv(Sacramento2,"Sacramento-original.csv")
write.csv(Sacramento3,"Sacramento-predict-365.csv")
#---- decompose function ------
ts.decompose=function(ts){
components=decompose(ts)
plot(components)
trend=components$trend
random=components$random
seasonal=components$seasonal
result=data.frame(trend,random,seasonal)
colnames(result)=c("Trend","Random","Seasonal")
return (result)
}
#---- forecast funtion ----
ts.forecast=function(ts,day){
model=HoltWinters(ts,beta=FALSE,gamma=TRUE)
plot(model)
fore=forecast.HoltWinters(model,h=day)
plot.forecast(fore)
# plotForecastErrors(fore$residuals)
print(Box.test(fore$residuals,lag=20,type="Ljung-Box"))
result=data.frame(forecast.HoltWinters(model,h=day))
return(result)
}
#---- time format ----
times=function(day_before_start,d){
st=as.Date(day_before_start)+1
end=st+d-1
return(as.Date(st:end))
}
#---- arima diff test ----
arima.diff=function(ts,d){
diff=diff(ts,differences=d)
plot.ts(diff)
return(diff)
}
###############
## Appendix ###
###############
#--------------- functions for error forecast --------
plotForecastErrors <- function(forecasterrors) {
# make a histogram of the forecast errors:
mybinsize <- IQR(forecasterrors)/4
mysd <- sd(forecasterrors)
mymin <- min(forecasterrors) - mysd*5
mymax <- max(forecasterrors) + mysd*3
# generate normally distributed data with mean 0 and standard deviation mysd
mynorm <- rnorm(10000, mean=0, sd=mysd)
mymin2 <- min(mynorm)
mymax2 <- max(mynorm)
if (mymin2 < mymin) { mymin <- mymin2 }
if (mymax2 > mymax) { mymax <- mymax2 }
# make a red histogram of the forecast errors, with the normally distributed data overl
mybins <- seq(mymin, mymax, mybinsize)
hist(forecasterrors, col="red", freq=FALSE, breaks=mybins)
# freq=FALSE ensures the area under the histogram = 1
# generate normally distributed data with mean 0 and standard deviation mysd
myhist <- hist(mynorm, plot=FALSE, breaks=mybins)
# plot the normal curve as a blue line on top of the histogram of forecast errors:
points(myhist$mids, myhist$density, type="l", col="blue", lwd=2)
} |
library(tidyverse)
library(igraph)
caedges = read.csv("../data/CaliforniaEdges.csv")
casites = scan("../data/CaliforniaNodes.txt", "character")
# caedges has two columns: from and to
# these integers correspond to the entries in casites
head(casites, 20)
# create an edge matrix with the right names
edgemat = cbind(casites[caedges$from], casites[caedges$to])
edgemat[1,]
# create a graph from the edge list
# this data structure encodes all the node and edge information
# also has some nice plotting and summary methods
calink = graph.edgelist(edgemat)
# one link away
latimes = graph.neighborhood(calink, 1, V(calink)["http://www.latimes.com/HOME/"])[[1]]
plot(latimes, vertex.label=NA)
## two links away
latimes2 = graph.neighborhood(calink, 2, V(calink)["http://www.latimes.com/HOME/"])[[1]]
plot(latimes2, vertex.label=NA)
# a little prettier
# these graphics options from igraph get pretty complex
# need to spend some time with the docs to get the hang of it
# see the docs using ?plot.igraph
V(latimes2)$color <- "lightblue"
V(latimes2)[V(latimes)$name]$color <- "gold" # these are the level-one links
V(latimes2)["http://www.latimes.com/HOME/"]$color <- "navy"
plot(latimes2, vertex.label=NA, edge.arrow.width=0, edge.curved=FALSE,
vertex.label=NA, vertex.frame.color=0, vertex.size=6)
# top 10 sites in the network by betweenness
order(betweenness(calink), decreasing=TRUE)
top10_ind = order(betweenness(calink), decreasing=TRUE)[1:10] %>% head(10)
V(calink)$name[top10_ind]
# run page rank
search = page.rank(calink)$vector
casites[order(search, decreasing=TRUE)[1:20]]
| /r/calsites.R | no_license | Eliza1494/ECO395M | R | false | false | 1,602 | r | library(tidyverse)
library(igraph)
caedges = read.csv("../data/CaliforniaEdges.csv")
casites = scan("../data/CaliforniaNodes.txt", "character")
# caedges has two columns: from and to
# these integers correspond to the entries in casites
head(casites, 20)
# create an edge matrix with the right names
edgemat = cbind(casites[caedges$from], casites[caedges$to])
edgemat[1,]
# create a graph from the edge list
# this data structure encodes all the node and edge information
# also has some nice plotting and summary methods
calink = graph.edgelist(edgemat)
# one link away
latimes = graph.neighborhood(calink, 1, V(calink)["http://www.latimes.com/HOME/"])[[1]]
plot(latimes, vertex.label=NA)
## two links away
latimes2 = graph.neighborhood(calink, 2, V(calink)["http://www.latimes.com/HOME/"])[[1]]
plot(latimes2, vertex.label=NA)
# a little prettier
# these graphics options from igraph get pretty complex
# need to spend some time with the docs to get the hang of it
# see the docs using ?plot.igraph
V(latimes2)$color <- "lightblue"
V(latimes2)[V(latimes)$name]$color <- "gold" # these are the level-one links
V(latimes2)["http://www.latimes.com/HOME/"]$color <- "navy"
plot(latimes2, vertex.label=NA, edge.arrow.width=0, edge.curved=FALSE,
vertex.label=NA, vertex.frame.color=0, vertex.size=6)
# top 10 sites in the network by betweenness
order(betweenness(calink), decreasing=TRUE)
top10_ind = order(betweenness(calink), decreasing=TRUE)[1:10] %>% head(10)
V(calink)$name[top10_ind]
# run page rank
search = page.rank(calink)$vector
casites[order(search, decreasing=TRUE)[1:20]]
|
# Plot3.R
# This script creates the plot as it appears on
# https://github.com/rdpeng/ExData_Plotting1/blob/master/figure/unnamed-chunk-4.png
# The plot is created as part of Course Project 1 for the 'Exploratory Data Analysis' Coursera MOOC.
# Execute common code for all plots in file 'prepare_data.R'
if (file.exists("prepare_data.R")) {
source("prepare_data.R")
} else {
stop("File 'prepare_data.R' is missing. This file is in charge of downloading, cleaning and preparing the data for plotting, so it´s critical.")
}
# Backup current base graphics defaults except for RO properties
.pardefault <- par(no.readonly = T)
# Create 1st Plot on PNG graphic device
par(mar=c(4,4,2,2))
png("plot3.png", width = 480, height = 480, bg = "transparent")
with(pcdata, {
plot(Sub_metering_1 ~ timestamp, type = "l", xlab="", ylab="Energy sub metering")
lines(Sub_metering_2 ~ timestamp, col = "red")
lines(Sub_metering_3 ~ timestamp, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = c(1,1))
})
# Close the graphics device
dev.off()
# Restore previous base graphic system defaults
par(.pardefault)
| /plot3.R | no_license | xuxoramos/ExData_Plotting1 | R | false | false | 1,240 | r | # Plot3.R
# This script creates the plot as it appears on
# https://github.com/rdpeng/ExData_Plotting1/blob/master/figure/unnamed-chunk-4.png
# The plot is created as part of Course Project 1 for the 'Exploratory Data Analysis' Coursera MOOC.
# Execute common code for all plots in file 'prepare_data.R'
if (file.exists("prepare_data.R")) {
source("prepare_data.R")
} else {
stop("File 'prepare_data.R' is missing. This file is in charge of downloading, cleaning and preparing the data for plotting, so it´s critical.")
}
# Backup current base graphics defaults except for RO properties
.pardefault <- par(no.readonly = T)
# Create 1st Plot on PNG graphic device
par(mar=c(4,4,2,2))
png("plot3.png", width = 480, height = 480, bg = "transparent")
with(pcdata, {
plot(Sub_metering_1 ~ timestamp, type = "l", xlab="", ylab="Energy sub metering")
lines(Sub_metering_2 ~ timestamp, col = "red")
lines(Sub_metering_3 ~ timestamp, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty = c(1,1))
})
# Close the graphics device
dev.off()
# Restore previous base graphic system defaults
par(.pardefault)
|
### Which S4 generic has the most methods defined for it? Which S4 class has the most methods associated with it?
all_generics <- as.character(getGenerics())
all_classes <- as.character(getClasses())
get_num_methods <- function(fname, mode) {
if (identical(mode, 'generic')) {
n <- capture.output(showMethods(fname))
} else if (identical(mode, 'class')) {
n <- capture.output(showMethods(class = fname))
} else {
return(0)
}
length(n) - 2
}
number_methods <- sapply(all_generics, get_num_methods, 'generic')
number_methods[which.max(number_methods)]
number_class_methods <- sapply(all_classes, get_num_methods, 'class')
number_class_methods[which.max(number_class_methods)]
| /05_oo_field_guide/02_S4/exercise1.r | no_license | Nabie/adv-r-book-solutions | R | false | false | 700 | r | ### Which S4 generic has the most methods defined for it? Which S4 class has the most methods associated with it?
all_generics <- as.character(getGenerics())
all_classes <- as.character(getClasses())
get_num_methods <- function(fname, mode) {
if (identical(mode, 'generic')) {
n <- capture.output(showMethods(fname))
} else if (identical(mode, 'class')) {
n <- capture.output(showMethods(class = fname))
} else {
return(0)
}
length(n) - 2
}
number_methods <- sapply(all_generics, get_num_methods, 'generic')
number_methods[which.max(number_methods)]
number_class_methods <- sapply(all_classes, get_num_methods, 'class')
number_class_methods[which.max(number_class_methods)]
|
# Reading, naming and subsetting power consumption data
power <- read.table("C:/Users/bkershner/Documents/household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# saving the results as a png file
png("plot3.png", width=480, height=480)
# calling the basic plot functions
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# annotating graph
title(main="Energy sub-metering")
dev.off() | /Plot 3.R | no_license | bkershner/ExData_Plotting1 | R | false | false | 1,413 | r | # Reading, naming and subsetting power consumption data
power <- read.table("C:/Users/bkershner/Documents/household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# saving the results as a png file
png("plot3.png", width=480, height=480)
# calling the basic plot functions
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# annotating graph
title(main="Energy sub-metering")
dev.off() |
rm(list=ls())
library(ggplot2)
library(MASS)
#install.packages('ggfortify')
library(ggfortify)
library(scatterplot3d)
library(dplyr)
library(forecast)
library(grid)
library(scales)
library(combinat)
setwd("C:/Users/bruce/Google Drive/765 Project/765_project")
setwd("~/Google Drive/765 Project/765_project")
data <- as.data.frame(read.csv('data-wenyue.csv'))
data<- data[,!(colnames(data) %in% c('F2.F1','F3.F2'))]
data[data == '--undefined--'] <- NA
unique((data%>%filter(type =='glide'))$sound)
sum(is.na(data$F4))
######################################################################
'Histogram visualization part'
'Before any transformation'
######################################################################
head(data)
hist(data$duration,xlab = 'Phone Duration' , breaks = 20, main = 'Histogram of feature Duration')
hist(data$intensity, xlab = 'Intensity', breaks = 20, main = 'Histogram of feature Intensity')
hist(as.numeric(data$AvePitch), xlab = 'Average Pitch',breaks = 20, main = 'Histogram of feature Average Pitch')
hist(data$AveHarmonicity, xlab = 'Average Harmonicity',breaks = 20, main = 'Histogram of feature Average Harmonicity')
hist(data$F1, xlab = 'F1' ,breaks = 20, main ='Histogram of feature F1')
hist(data$F2, xlab = 'F2' , breaks = 20, main ='Histogram of feature F2')
hist(data$F3, xlab = 'F3' , breaks = 20,main ='Histogram of feature F3')
hist(as.numeric(data$F4), xlab = 'F4' ,breaks = 20, main ='Histogram of feature F4')
hist(as.numeric(data$F5), xlab = 'F5' ,breaks = 20, main ='Histogram of feature F5')
hist(data$F1_bandwidth, xlab = 'F1_bandwidth',breaks = 20, main = 'Histogram of feature F1 bandwidth')
hist(data$F2_bandwidth, xlab = 'F2_bandwidth', breaks = 20, main = 'Histogram of feature F2 bandwidth')
hist(data$F3_bandwidth, xlab = 'F3_bandwidth', breaks = 20,main = 'Histogram of feature F3 bandwidth')
hist(as.numeric(data$F4_bandwidth), xlab = 'F4_bandwidth', breaks = 20,main = 'Histogram of feature F4 bandwidth')
hist(as.numeric(data$F5_bandwidth), xlab = 'F5_bandwidth',breaks = 20, main = 'Histogram of feature F5 bandwidth')
######################################################################
'find out features with NA values'
#####################################################################
na_row = c()
for (item in colnames(data)){
if (sum(is.na(data[item])) > 0| sum(data[item] == '--undefined--') > 0 ){
na_row <- c(na_row,item)
}
}
na_row
data <- data[is.na(data[,'F4']) != T,]
data <- data[is.na(data[,'AvePitch']) != T,]
data <- data[,c(-17,-22)] # delete F5 and F5_bandwidth because of ba
####################################################################
'Use a function to plot multiple ggplots in same plot'
###################################################################
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
####################################################################
'data type conversion'
####################################################################
data$intensity <- as.double(data$intensity) # convert from integer to double
data$F4 <- as.double(as.character(data$F4)) # convert from integer to double
#data$F5 <- as.double(data$F5) # convert from integer to double
data$F4_bandwidth <- as.double(data$F4_bandwidth) # convert from integer to double
#data$F5_bandwidth <- as.double(data$F5_bandwidth) # convert from integer to double
data$AvePitch <- as.double(data$AvePitch)
#maybe we need give it a log function
hist(data$duration, xlab = 'Phoneme Duration' ,breaks = 20, main = 'Histogram of feature Duration')
p <- ggplot(data, aes(sample = duration))
p + stat_qq() + ggtitle('QQ-plot of Duration before transformation')
# for duration, we need to give it a transformation, log 10
data$duration = log10(data$duration)
hist(data$duration,xlab = 'Phoneme Duration' ,breaks = 20, main = 'Histogram of feature Duration after log10 transformation')
# much better after log 10
p <- ggplot(data, aes(sample = duration))
p + stat_qq() + ggtitle('QQ-plot of Duration after log10 transformation')
##########################################################################
hist(data$intensity, xlab = 'intensity', main = 'Histogram of feature intensity',breaks = 20)
p <- ggplot(data, aes(sample = intensity))
p + stat_qq() + ggtitle('QQ-plot of intensity before transformation')
BoxCox.lambda(data$intensity, method = "loglik", lower = -5, upper = 5)
data$intensity <- BoxCox(data$intensity, lambda = 3.5)
hist(data$intensity,xlab = 'intensity', main = 'Histogram of feature intensity after \n lambda 3.5 boxcox transformation', breaks = 20)
p <- ggplot(data, aes(sample = intensity))
p + stat_qq() + ggtitle('QQ-plot of intensity after boxcox transformation')
########################################################################
hist(as.numeric(data$AvePitch), xlab = 'Average Pitch',breaks = 20, main = 'Histogram of feature Average Pitch')
p <- ggplot(data, aes(sample = AvePitch))
p + stat_qq() +ggtitle('QQ-plot of average pitch before transformation')
BoxCox.lambda(data$AvePitch, method = 'loglik')
data$AvePitch <- BoxCox(data$AvePitch, lambda = 0.5)
hist(data$AvePitch, breaks = 20, xlab = 'Average Pitch', main = 'Histogram of feature Average Pitch after \n lambda 0.5 boxcox transformation')
p <- ggplot(data, aes(sample = AvePitch))
p + stat_qq() +ggtitle('QQ-plot of average pitch after square root transformation')
#######################################################################
hist(data$AveHarmonicity, xlab = 'Average Harmonicity',breaks = 20, main = 'Histogram of feature Average Harmonicity')
hist(data$AveHarmonicity, xlab = 'Average Harmonicity',breaks = 20, main = 'Histogram of feature Average Harmonicity \n without transformation')
p <- ggplot(data, aes(sample = AveHarmonicity))
p + stat_qq() +ggtitle('QQ-plot of average harmonicity without transformation')
########################################################################
hist(data$F1, xlab = 'F1' , main ='Histogram of feature F1', breaks =20)
p <- ggplot(data, aes(sample = F1))
p + stat_qq() + ggtitle('QQ-plot of F1 before transformation')
data$F1 <- log10(data$F1)
hist(data$F1, xlab = 'F1' , main ='Histogram of feature F1 after log10 transformation', breaks = 20)
p <- ggplot(data, aes(sample = F1))
p + stat_qq() + ggtitle('QQ-plot of F1 after log10 transformation')
##########################################################################
hist(data$F2, xlab = 'F2' , main ='Histogram of feature F2', breaks = 20)
p <- ggplot(data, aes(sample = F2))
p + stat_qq() + ggtitle('QQ-plot of F2 before transformation')
BoxCox.lambda(data$F2, method = 'loglik',lower = -5, upper =5 )
data$F2 <- BoxCox(data$F2, lambda = 2.15)
hist(data$F2, xlab = 'F2' , main ='Histogram of feature F2 after \n lambda 2.15 boxcox transformation', breaks =20)
p <- ggplot(data, aes(sample = F2))
p + stat_qq() + ggtitle('QQ-plot of F2 after boxcox transformation')
###########################################################################
hist(data$F3, xlab = 'F3' , main ='Histogram of feature F3', breaks = 20)
p <- ggplot(data, aes(sample = F3))
p + stat_qq() + ggtitle('QQ-plot of F3 before transformation')
BoxCox.lambda(data$F3, lower = -5, upper =5 ,method = 'loglik' )
data$F3 = BoxCox(data$F3, lambda = 0.4)
hist(data$F3, xlab = 'F3' , main ='Histogram of feature F3 after \n lambda 0.4 boxcox transformation', breaks =20)
p <- ggplot(data, aes(sample = F3))
p + stat_qq() + ggtitle('QQ-plot of F3 after boxcox transformation')
##########################################################################
hist(data$F4, xlab = 'F4' , main ='Histogram of feature F4', breaks = 20)
p <- ggplot(data, aes(sample = F4))
p + stat_qq() + ggtitle('QQ-plot of F4 before transformation')
BoxCox.lambda(data$F4, lower = -5, upper =5 , method = 'loglik')
data$F4 = BoxCox(data$F4, lambda = 0.4)
hist(data$F4, xlab = 'F4' , main ='Histogram of feature F4 after \n lambda 0.4 boxcox transformation', breaks =20)
p <- ggplot(data, aes(sample = F4))
p + stat_qq() + ggtitle('QQ-plot of F4 after boxcox transformation')
##########################################################################
hist(data$F1_bandwidth, xlab = 'F1_bandwidth', main = 'Histogram of feature F1 bandwidth', breaks = 20)
p <- ggplot(data, aes(sample = F1_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F1_bandwidth before transformation')
data$F1_bandwidth <- log10(data$F1_bandwidth)
hist(data$F1_bandwidth, xlab = 'F1_bandwidth', main = 'Histogram of feature F1 bandwidth after \n log10 transformation', breaks = 20)
p <- ggplot(data, aes(sample = F1_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F1_bandwidth after log10 transformation')
##########################################################################
hist(data$F2_bandwidth, xlab = 'F2_bandwidth', main = 'Histogram of feature F2 bandwidth', breaks = 20)
p <- ggplot(data, aes(sample = F2_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F2_bandwidth before transformation')
data$F2_bandwidth <- log10(data$F2_bandwidth)
hist(data$F2_bandwidth, xlab = 'F2_bandwidth', main = 'Histogram of feature F2 bandwidth after \n log10 transformation', breaks = 20)
p <- ggplot(data, aes(sample = F2_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F2_bandwidth after log10 transformation')
##########################################################################
hist(data$F3_bandwidth, xlab = 'F3_bandwidth', main = 'Histogram of feature F3 bandwidth', breaks = 20)
p <- ggplot(data, aes(sample = F3_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F3_bandwidth before transformation')
data$F3_bandwidth <- log10(data$F3_bandwidth)
hist(data$F3_bandwidth, xlab = 'F3_bandwidth', main = 'Histogram of feature F3 bandwidth after \n log10 transformation', breaks =20)
p <- ggplot(data, aes(sample = F3_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F3_bandwidth after log10 transformation')
########################################################################
hist(data$F4_bandwidth, xlab = 'F4_bandwidth', main = 'Histogram of feature F4 bandwidth', breaks =20)
p <- ggplot(data, aes(sample = F4_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F4_bandwidth before transformation')
BoxCox.lambda(data$F4_bandwidth, lower = -5, upper = 5, method = 'loglik')
data$F4_bandwidth<- BoxCox(data$F4_bandwidth, lambda = 0.65)
hist(data$F4_bandwidth, xlab = 'F4_bandwidth', main = 'Histogram of feature F4 bandwidth after \n lambda 0.65 boxcox transformation ', breaks = 20)
p <- ggplot(data, aes(sample = F4_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F4_bandwidth after boxcox transformation')
########################################################################
head(data)
test_data <- data[,c(5:20)]
test_data$type
vowel <- test_data%>%filter(type == 'vowel')
glide <- test_data%>%filter(type == 'glide')
nrow(vowel) + nrow(glide) == nrow(test_data)
t.test(vowel$F1, glide$F1) #significant e-16
t.test(vowel$F2, glide$F2) #significant e-6
t.test(vowel$F3, glide$F3) #not significant
t.test(vowel$F4, glide$F4) # not significant
t.test(vowel$F1_bandwidth, glide$F1_bandwidth) # not significant
t.test(vowel$F2_bandwidth, glide$F2_bandwidth) # significant 0.002355
t.test(vowel$F3_bandwidth, glide$F3_bandwidth) # not significant
t.test(vowel$F4_bandwidth, glide$F4_bandwidth) # not significant
t.test(vowel$duration, glide$duration) # significant e-16
t.test(vowel$intensity, glide$intensity) # significant e-7
t.test(vowel$AvePitch, glide$AvePitch) # significant 0.15
t.test(vowel$AveHarmonicity,glide$AveHarmonicity) # not significant
two_way_data <- test_data[,c(-3,-4)]
two_way_data$sound <- as.character(two_way_data$sound)
two_way_data$sound[two_way_data$sound == 'i'| two_way_data$sound == 'j'] <- 'ji'
two_way_data$sound[two_way_data$sound == 'y'| two_way_data$sound == 'h'] <- 'hy'
two_way_data$sound[two_way_data$sound == 'u'| two_way_data$sound == 'w'] <- 'wu'
head(two_way_data)
aov_duration <- aov(duration ~ sound * type,data = two_way_data)
summary(aov_duration)
aov_intensity <- aov(intensity ~ sound * type,data = two_way_data)
summary(aov_intensity)
aov_AvePitch <- aov(AvePitch ~ sound * type,data = two_way_data)
summary(aov_AvePitch)
aov_AveHarmonicity <- aov(AveHarmonicity ~ sound * type,data = two_way_data)
summary(aov_AveHarmonicity)
aov_F1 <- aov(F1 ~ sound * type,data = two_way_data)
summary(aov_F1)
aov_F2 <- aov(F2 ~ sound * type,data = two_way_data)
summary(aov_F2)
aov_F3 <- aov(F3 ~ sound * type,data = two_way_data)
summary(aov_F3)
aov_F4 <- aov(F4 ~ sound * type,data = two_way_data)
summary(aov_F4)
aov_F1_bandwidth <- aov(F1_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F1_bandwidth)
aov_F2_bandwidth <- aov(F2_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F2_bandwidth)
aov_F3_bandwidth <- aov(F3_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F3_bandwidth)
aov_F4_bandwidth <- aov(F4_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F4_bandwidth)
sound_data = test_data[,-2]
head(sound_data)
sound.lda <- lda(sound ~ ., data = sound_data)
sound.lda.values <- predict(sound.lda)
sound.lda.values$x[,1]
ldahist(data = sound.lda.values$x[,1],g=sound_data[,1])
#The interesting thing is, u and w are two different sound!
type_data = test_data[,-1]
head(type_data)
type.lda <- lda(type ~ ., data = type_data)
type.lda.values <- predict(type.lda)
ldahist(data = type.lda.values$x[,1],g=type_data[,1], col = 1 )
ldahist(data = data.pca$x[,3],g=type_data[,1], col = 1)
##############################################
# LDA for pairs
###############################3##############
pairs_lda_data <- two_way_data[,-2]
pair.lda <-lda(sound ~., data = pairs_lda_data)
pair.lda.values <- predict(pair.lda)
ldahist(data = pair.lda.values$x[,1],g=pairs_lda_data[,1], col = 1 )
ldahist(data = pair.lda.values$x[,2],g=pairs_lda_data[,1], col = 1 )
pca_data <- na.omit(data[,colnames(data)[9:20]])
pca_data <- data[,colnames(data)[9:20]]
data.pca<- prcomp(na.omit(pca_data), center=T, scale. = T)
str(data)
plot(data.pca, type = 'lines')
summary(data.pca)
plot_data <- as.data.frame(data.pca$x[,1:6])
plot_data<-cbind(sapply(data$sound, as.character), sapply(data$type, as.character),plot_data)
colnames(plot_data) <- c('sound','type','PC1','PC2','PC3','PC4','PC5','PC6')
plot_data<-as.data.frame(plot_data)
plot_data <- plot_data[sample(nrow(plot_data)),] # shuffle data
str(plot_data)
#######################
# visualization of glide
#######################
glide_data_pca <- plot_data %>% filter(type == 'glide')
p1 <- ggplot(glide_data_pca, aes(x=PC1, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p2 <- ggplot(glide_data_pca, aes(x=PC1, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p3 <- ggplot(glide_data_pca, aes(x=PC1, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p4 <- ggplot(glide_data_pca, aes(x=PC2, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p5 <- ggplot(glide_data_pca, aes(x=PC2, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p6 <- ggplot(glide_data_pca, aes(x=PC2, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p7 <- ggplot(glide_data_pca, aes(x=PC3, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p8 <- ggplot(glide_data_pca, aes(x=PC3, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p9 <- ggplot(glide_data_pca, aes(x=PC3, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p10 <- ggplot(glide_data_pca, aes(x=PC4, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p11 <- ggplot(glide_data_pca, aes(x=PC4, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p12 <- ggplot(glide_data_pca, aes(x=PC4, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, cols=4)
#######################
# visualization of vowel
#######################
glide_data_pca <- plot_data %>% filter(type == 'vowel')
p1 <- ggplot(glide_data_pca, aes(x=PC1, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p2 <- ggplot(glide_data_pca, aes(x=PC1, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p3 <- ggplot(glide_data_pca, aes(x=PC1, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p4 <- ggplot(glide_data_pca, aes(x=PC2, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p5 <- ggplot(glide_data_pca, aes(x=PC2, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p6 <- ggplot(glide_data_pca, aes(x=PC2, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p7 <- ggplot(glide_data_pca, aes(x=PC3, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p8 <- ggplot(glide_data_pca, aes(x=PC3, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p9 <- ggplot(glide_data_pca, aes(x=PC3, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p10 <- ggplot(glide_data_pca, aes(x=PC4, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p11 <- ggplot(glide_data_pca, aes(x=PC4, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p12 <- ggplot(glide_data_pca, aes(x=PC4, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, cols=4)
q1 <- ggplot(plot_data, aes(x=PC1, y =PC2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q2 <- ggplot(plot_data, aes(x=PC1, y =PC3, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q3 <- ggplot(plot_data, aes(x=PC1, y =PC4, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
q4 <- ggplot(plot_data, aes(x=PC2, y =PC1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q5 <- ggplot(plot_data, aes(x=PC2, y =PC3, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q6 <- ggplot(plot_data, aes(x=PC2, y =PC4, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
q7 <- ggplot(plot_data, aes(x=PC3, y =PC1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q8 <- ggplot(plot_data, aes(x=PC3, y =PC2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q9 <- ggplot(plot_data, aes(x=PC3, y =PC4, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
q10 <- ggplot(plot_data, aes(x=PC4, y =PC1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q11 <- ggplot(plot_data, aes(x=PC4, y =PC2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q12 <- ggplot(plot_data, aes(x=PC4, y =PC3, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
multiplot(q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, cols=4)
head(data)
test_data <- data[,c(5:20)]
test_data$type
head(test_data)
vowel <- test_data%>%filter(type == 'vowel')
glide <- test_data%>%filter(type == 'glide')
nrow(vowel) + nrow(glide) == nrow(test_data)
matlab_data <- test_data[,c(-1,-3,-4)]
matlab_data$type <- as.character(matlab_data$type)
matlab_data$type[matlab_data$type == 'vowel'] <- 1
matlab_data$type[matlab_data$type == 'glide'] <- 2
group_data <- as.integer(matlab_data[,1])
matlab_data <- matlab_data[,-1]
matlab_data <- t(matlab_data)
glide_group <- glide[,1]
glide_swiss_data <- glide[,c(-1,-2)]
glide_group <- as.character(glide_group)
glide_group[glide_group == 'h'] <- 1
glide_group[glide_group == 'j'] <- 2
glide_group[glide_group == 'w'] <- 3
glide_group <- as.integer(glide_group)
glide_hj <- glide%>% filter(sound == 'h'| sound =='j')
glide_hj_group <-glide_hj[,1]
glide_hj_data <- glide_hj[,c(-1,-2)]
glide_hj_group <- as.character(glide_hj_group)
glide_hj_group[glide_hj_group == 'h'] <- 1
glide_hj_group[glide_hj_group == 'j'] <- 2
glide_hj_group <- as.integer(glide_hj_group)
glide_hw <- glide%>% filter(sound == 'h'| sound =='w')
glide_hw_group <-glide_hw[,1]
glide_hw_data <- glide_hw[,c(-1,-2)]
glide_hw_group <- as.character(glide_hw_group)
glide_hw_group[glide_hw_group == 'h'] <- 1
glide_hw_group[glide_hw_group == 'w'] <- 2
glide_hw_group <- as.integer(glide_hw_group)
glide_jw <- glide%>% filter(sound == 'j'| sound =='w')
glide_jw_group <-glide_jw[,1]
glide_jw_data <- glide_jw[,c(-1,-2)]
glide_jw_group <- as.character(glide_jw_group)
glide_jw_group[glide_jw_group == 'j'] <- 1
glide_jw_group[glide_jw_group == 'w'] <- 2
glide_jw_group <- as.integer(glide_jw_group)
vowel_group <- vowel[,1]
vowel_swiss_data <- vowel[,c(-1,-2)]
vowel_group <- as.character(vowel_group)
vowel_group[vowel_group == 'i'] <- 1
vowel_group[vowel_group == 'u'] <- 2
vowel_group[vowel_group == 'y'] <- 3
vowel_group <- as.integer(vowel_group)
vowel_iu <- vowel%>% filter(sound == 'i'| sound =='u')
vowel_iu_group <- vowel_iu[,1]
vowel_iu_data <- vowel_iu[,c(-1,-2)]
vowel_iu_group <- as.character(vowel_iu_group)
vowel_iu_group[vowel_iu_group == 'i'] <- 1
vowel_iu_group[vowel_iu_group == 'u'] <- 2
vowel_iu_group <- as.integer(vowel_iu_group)
vowel_iy <- vowel%>% filter(sound == 'i'| sound =='y')
vowel_iy_group <- vowel_iy[,1]
vowel_iy_data <- vowel_iy[,c(-1,-2)]
vowel_iy_group <- as.character(vowel_iy_group)
vowel_iy_group[vowel_iy_group == 'i'] <- 1
vowel_iy_group[vowel_iy_group == 'y'] <- 2
vowel_iy_group <- as.integer(vowel_iy_group)
vowel_uy <- vowel%>% filter(sound == 'u'| sound =='y')
vowel_uy_group <- vowel_uy[,1]
vowel_uy_data <- vowel_uy[,c(-1,-2)]
vowel_uy_group <- as.character(vowel_uy_group)
vowel_uy_group[vowel_uy_group == 'u'] <- 1
vowel_uy_group[vowel_uy_group == 'y'] <- 2
vowel_uy_group <- as.integer(vowel_uy_group)
head(two_way_data)
swiss_total <- two_way_data[,-2]
swiss_total_group <- swiss_total[,1]
swiss_total_data <- swiss_total[,-1]
swiss_total_group <- as.character(swiss_total_group)
swiss_total_group[swiss_total_group == 'ji'] <- 1
swiss_total_group[swiss_total_group == 'wu'] <- 2
swiss_total_group[swiss_total_group == 'hy'] <- 3
swiss_total_group <- as.integer(swiss_total_group)
ji_hy <- swiss_total%>%filter(sound =='ji'|sound =='hy')
ji_hy_group <- ji_hy[,1]
ji_hy_data <- ji_hy[,-1]
ji_hy_group <- as.character(ji_hy_group)
ji_hy_group[ji_hy_group == 'ji'] <-1
ji_hy_group[ji_hy_group == 'hy'] <-2
ji_hy_group <- as.integer(ji_hy_group)
ji_uw <- swiss_total%>%filter(sound =='ji'|sound =='wu')
ji_uw_group <- ji_uw[,1]
ji_uw_data <- ji_uw[,-1]
ji_uw_group <- as.character(ji_uw_group)
ji_uw_group[ji_uw_group == 'ji'] <-1
ji_uw_group[ji_uw_group == 'wu'] <-2
ji_uw_group <- as.integer(ji_uw_group)
hy_uw <- swiss_total%>%filter(sound =='hy'|sound =='wu')
hy_uw_group <- hy_uw[,1]
hy_uw_data <- hy_uw[,-1]
hy_uw_group <- as.character(hy_uw_group)
hy_uw_group[hy_uw_group == 'hy'] <-1
hy_uw_group[hy_uw_group == 'wu'] <-2
hy_uw_group <- as.integer(hy_uw_group)
head(test_data)
ji_pair <- test_data[,c(-2,-3,-4)] %>% filter(sound == 'i'| sound == 'j')
ji_pair_group <- as.character(ji_pair$sound)
ji_pair_group[ji_pair_group == 'i'] <- 1
ji_pair_group[ji_pair_group == 'j'] <- 2
ji_pair_group <- as.integer(ji_pair_group)
ji_pair_data <- ji_pair[,-1]
hy_pair <- test_data[,c(-2,-3,-4)] %>% filter(sound == 'h'| sound == 'y')
hy_pair_group <- as.character(hy_pair$sound)
hy_pair_group[hy_pair_group == 'h'] <- 1
hy_pair_group[hy_pair_group == 'y'] <- 2
hy_pair_group <- as.integer(hy_pair_group)
hy_pair_data <- hy_pair[,-1]
wu_pair <- test_data[,c(-2,-3,-4)] %>% filter(sound == 'w'| sound == 'u')
wu_pair_group <- as.character(wu_pair$sound)
wu_pair_group[wu_pair_group == 'w'] <- 1
wu_pair_group[wu_pair_group == 'u'] <- 2
wu_pair_group <- as.integer(wu_pair_group)
wu_pair_data <- wu_pair[,-1]
type_pair <- test_data[,c(-1,-3,-4)]
type_pair_group <- as.character(type_pair[,1])
type_pair_group[type_pair_group =='vowel']<-1
type_pair_group[type_pair_group == 'glide']<-2
type_pair_group <- as.integer(type_pair_group)
type_pair_data <- type_pair[,-1]
##############################################################################
## SWISS.R
## Compute the standardized within class sum of square score.
## Author: Meilei
##############################################################################
swiss = function(dat, class){
# @ dat: data matrix, rows are samples and columns are features
# @ class: class label of samples
group = unique(class)
gpairs = combn(group,2)
n = dim(gpairs)[2]
sw = NULL
if(is.null(n)){
g1 = gpairs[1]
g2 = gpairs[2]
c1 = as.matrix(dat[which(class == g1),])
c2 = as.matrix(dat[which(class == g2),])
c = rbind(c1, c2)
sc1 = scale(c1, center = T, scale = F)
sc2 = scale(c2, center = T, scale = F)
sc = scale(c, center = T, scale = F)
sw = (norm(sc1,"F")^2 + norm(sc2,"F")^2)/norm(sc,"F")^2
}else{
for(i in 1:n){
g1 = gpairs[1,i]
g2 = gpairs[2,i]
c1 = as.matrix(dat[which(class == g1),])
c2 = as.matrix(dat[which(class == g2),])
c = rbind(c1, c2)
sc1 = scale(c1, center = T, scale = F)
sc2 = scale(c2, center = T, scale = F)
sc = scale(c, center = T, scale = F)
sw[i] = (norm(sc1,"F")^2 + norm(sc2,"F")^2)/norm(sc,"F")^2
}
}
return(mean(sw))
}
swiss(glide_swiss_data,glide_group)
swiss(glide_hj_data,glide_hj_group)
swiss(glide_hw_data,glide_hw_group)
swiss(glide_jw_data,glide_jw_group)
swiss(vowel_swiss_data,vowel_group)
swiss(vowel_iu_data,vowel_iu_group)
swiss(vowel_iy_data,vowel_iy_group)
swiss(vowel_uy_data,vowel_uy_group)
swiss(swiss_total_data, swiss_total_group)
swiss(ji_hy_data, ji_hy_group)
swiss(ji_uw_data, ji_uw_group)
swiss(hy_uw_data, hy_uw_group)
swiss(type_pair_data,type_pair_group)
swiss(ji_pair_data,ji_pair_group)
swiss(hy_pair_data,hy_pair_group)
swiss(wu_pair_data,wu_pair_group)
###################################################
# Use fewer features to for separation.
##################################################
####
# less than 4 features, how to get lowest swiss score
####
library(utils)
swiss_score_vector <- n_vector <- sub_n_vector <- c()
for (n in c(2,3,4)){
names <- colnames(type_pair_data)
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(type_pair_data[,combn(names,n)[,item]],type_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
###################################################################
swiss_score_vector <- n_vector <- sub_n_vector <- c()
for (n in c(2,3,4)){
names <- colnames(ji_pair_data)
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(ji_pair_data[,combn(names,n)[,item]],ji_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
###############################
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(hy_pair_data)
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(hy_pair_data[,combn(names,n)[,item]],hy_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
##################
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(wu_pair_data)
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(wu_pair_data[,combn(names,n)[,item]],wu_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
##################
################################
# in glide!!!!
#############################
glide_data_list <- list(glide_swiss_data[,c(-1,-2)], glide_hj_data[,c(-1,-2)], glide_hw_data[,c(-1,-2)], glide_jw_data[,c(-1,-2)])
glide_group_list <- list(glide_group, glide_hj_group, glide_hw_group, glide_jw_group)
names_new <- c('swiss', 'hj','hw','jw')
for (list_num in c(1:4)){
print(paste('this is ', names_new[list_num] ,'group'))
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(as.data.frame(glide_data_list[list_num]))
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(as.data.frame(glide_data_list[list_num])[,combn(names,n)[,item]],unlist(glide_group_list[list_num]))
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
print(min(swiss_score_vector))
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
print(combn(names,best_n)[,best_sub_n])
}
############################################
#swiss(vowel_swiss_data,vowel_group)
#swiss(vowel_iu_data,vowel_iu_group)
#swiss(vowel_iy_data,vowel_iy_group)
#swiss(vowel_uy_data,vowel_uy_group)
#############################################
vowel_data_list <- list(vowel_swiss_data[,c(-1,-2)], vowel_iu_data[,c(-1,-2)], vowel_iy_data[,c(-1,-2)], vowel_uy_data[,c(-1,-2)])
vowel_group_list <- list(vowel_group, vowel_iu_group, vowel_iy_group, vowel_uy_group)
names_new <- c('swiss', 'iu','iy','uy')
for (list_num in c(1:4)){
print(paste('this is ', names_new[list_num] ,'group'))
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(as.data.frame(vowel_data_list[list_num]))
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(as.data.frame(vowel_data_list[list_num])[,combn(names,n)[,item]],unlist(vowel_group_list[list_num]))
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
print(min(swiss_score_vector))
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
print(combn(names,best_n)[,best_sub_n])
}
#################################
#swiss(swiss_total_data, swiss_total_group)
#swiss(ji_hy_data, ji_hy_group)
#swiss(ji_uw_data, ji_uw_group)
#swiss(hy_uw_data, hy_uw_group)
total_data_list <- list(swiss_total_data, ji_hy_data, ji_uw_data, hy_uw_data)
total_group_list <- list(swiss_total_group, ji_hy_group, ji_uw_group, hy_uw_group)
names_new <- c('swiss', 'ji_hy','ji_uw','hy_uw')
for (list_num in c(1:4)){
print(paste('this is ', names_new[list_num] ,'group'))
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(as.data.frame(total_data_list[list_num]))
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(as.data.frame(total_data_list[list_num])[,combn(names,n)[,item]],unlist(total_group_list[list_num]))
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
print(min(swiss_score_vector))
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
print(combn(names,best_n)[,best_sub_n])
}
scientific_10 <- function(x) {
parse(text=gsub("e", " %*% 10^", scientific_format()(x)))
}
test_data = test_data[sample(nrow(test_data)),]
c1 <- ggplot(test_data, aes(x=F1, y =F2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none") +scale_y_continuous(label=scientific_format())
c2 <- ggplot(test_data, aes(x=F1, y =intensity, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none") +scale_y_continuous(label=scientific_format())
c3 <- ggplot(test_data, aes(x=F1, y =duration, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
c4 <- ggplot(test_data, aes(x=F2, y =F1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c5 <- ggplot(test_data, aes(x=F2, y =intensity, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_y_continuous(label=scientific_format()) +scale_x_continuous(label=scientific_format())
c6 <- ggplot(test_data, aes(x=F2, y =duration, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c7 <- ggplot(test_data, aes(x= intensity, y = F1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c8 <- ggplot(test_data, aes(x= intensity, y = F2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_y_continuous(label=scientific_format()) +scale_x_continuous(label=scientific_format())
c9 <- ggplot(test_data, aes(x= intensity, y =duration, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c10 <- ggplot(test_data, aes(x= duration, y = F1, color = type)) + geom_point(alpha= 0.5)+ theme(legend.position = "none")
c11 <- ggplot(test_data, aes(x= duration, y = F2, color = type)) + geom_point(alpha= 0.5)+ theme(legend.position = "none") +scale_y_continuous(label=scientific_format())
c12 <- ggplot(test_data, aes(x= duration, y = intensity, color = type)) + geom_point(alpha= 0.5)+ theme(legend.position = "bottom")+scale_y_continuous(label=scientific_format())
multiplot(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, cols=4)
######################################################################
# use One way anova and box plot to detect differences between phonemes in same type.
######################################################################
# first we use glide data
glide_data <- test_data%>% filter(type == 'glide')
vowel_data <- test_data%>% filter(type == 'vowel')
ggplot(glide_data, aes(x = sound, y = duration)) +
geom_boxplot(fill = "grey80", colour = "blue") +
scale_x_discrete() + xlab("Treatment Group") +
ylab("Duration of each glide sound")
ggplot(glide_data, aes(x = sound, y = F4_bandwidth)) + # F1 works # F2 significant! #F3 also works # F4 works
geom_boxplot(fill = "grey80", colour = "blue") +
scale_x_discrete() + xlab("Treatment Group") +
ylab("Duration of each glide sound")
ggplot(vowel_data, aes(x = sound, y = F4_bandwidth)) + # F2 works # F3 probably # F4 important
geom_boxplot(fill = "grey80", colour = "blue") +
scale_x_discrete() + xlab("Treatment Group") +
ylab("Duration of each glide sound")
#################################################################
options(contrasts=c("contr.treatment", "contr.treatment"))
lm_glide_1 <- lm(F1 ~ sound, data = glide_data) # F1
summary(lm_glide_1)
lm_glide_2 <- lm(F2 ~ sound, data = glide_data) # F2
summary(lm_glide_2)
lm_glide_3 <- lm(F3 ~ sound, data = glide_data) # F3
summary(lm_glide_3)
lm_glide_4 <- lm(F4 ~ sound, data = glide_data) # F4
summary(lm_glide_4)
# all these four are significant!
##################################################
options(contrasts=c("contr.treatment", "contr.treatment"))
lm_vowel_2 <- lm(F2 ~ sound, data = vowel_data) # F2 less than e-16
summary(lm_vowel_2)
lm_vowel_3 <- lm(F3 ~ sound, data = vowel_data) # F3 less than e-16
summary(lm_vowel_3)
lm_vowel_4 <- lm(F4 ~ sound, data = vowel_data) # F4 less than e-16
summary(lm_vowel_4)
lm_vowel_5 <- lm(duration ~ sound, data = vowel_data) # duration works!
summary(lm_vowel_5)
lm_vowel_6 <- lm(AvePitch ~ sound, data = vowel_data) # AvePitch works!
summary(lm_vowel_6)
lm_vowel_7 <- lm(F1_bandwidth ~ sound, data = vowel_data) # F1_bandwidth
summary(lm_vowel_7)
lm_vowel_8 <- lm(F2_bandwidth ~ sound, data = vowel_data) # F2_bandwidth
summary(lm_vowel_8)
lm_vowel_9 <- lm(F3_bandwidth ~ sound, data = vowel_data) # F3_bandwidth
summary(lm_vowel_9)
lm_vowel_10 <- lm(F4_bandwidth ~ sound, data = vowel_data) # F4_bandwidth really significant
summary(lm_vowel_10)
'F1, F2, F3, F4, duration, AvePitch, F1_bandwidth, F2_bandwidth, F3_bandwidth, F4_bandwidth'
###################################################
lm_all_1 <- lm(F1 ~ sound, data = test_data) # Only F1 significant in all six sounds
summary(lm_all_1)
################################################
head(vowel_data)
summary(aov(duration ~ sound, data = vowel_data))
summary(aov(intensity ~ sound, data = vowel_data))
summary(aov(AvePitch ~ sound, data = vowel_data))
summary(aov(AveHarmonicity ~ sound, data = vowel_data))
summary(aov(F1 ~ sound, data = vowel_data))
summary(aov(F2 ~ sound, data = vowel_data))
summary(aov(F3 ~ sound, data = vowel_data))
summary(aov(F4 ~ sound, data = vowel_data))
summary(aov(F1_bandwidth ~ sound, data = vowel_data))
summary(aov(F2_bandwidth ~ sound, data = vowel_data))
summary(aov(F3_bandwidth ~ sound, data = vowel_data))
summary(aov(F4_bandwidth ~ sound, data = vowel_data))
head(glide_data)
summary(aov(duration ~ sound, data = glide_data))
summary(aov(intensity ~ sound, data = glide_data))
summary(aov(AvePitch ~ sound, data = glide_data))
summary(aov(AveHarmonicity ~ sound, data = glide_data))
summary(aov(F1 ~ sound, data = glide_data))
summary(aov(F2 ~ sound, data = glide_data))
summary(aov(F3 ~ sound, data = glide_data))
summary(aov(F4 ~ sound, data = glide_data))
summary(aov(F1_bandwidth ~ sound, data = glide_data))
summary(aov(F2_bandwidth ~ sound, data = glide_data))
summary(aov(F3_bandwidth ~ sound, data = glide_data))
summary(aov(F4_bandwidth ~ sound, data = glide_data))
| /final_R_2.r | no_license | brucebismarck/STOR_765_project | R | false | false | 40,423 | r | rm(list=ls())
library(ggplot2)
library(MASS)
#install.packages('ggfortify')
library(ggfortify)
library(scatterplot3d)
library(dplyr)
library(forecast)
library(grid)
library(scales)
library(combinat)
setwd("C:/Users/bruce/Google Drive/765 Project/765_project")
setwd("~/Google Drive/765 Project/765_project")
data <- as.data.frame(read.csv('data-wenyue.csv'))
data<- data[,!(colnames(data) %in% c('F2.F1','F3.F2'))]
data[data == '--undefined--'] <- NA
unique((data%>%filter(type =='glide'))$sound)
sum(is.na(data$F4))
######################################################################
'Histogram visualization part'
'Before any transformation'
######################################################################
head(data)
hist(data$duration,xlab = 'Phone Duration' , breaks = 20, main = 'Histogram of feature Duration')
hist(data$intensity, xlab = 'Intensity', breaks = 20, main = 'Histogram of feature Intensity')
hist(as.numeric(data$AvePitch), xlab = 'Average Pitch',breaks = 20, main = 'Histogram of feature Average Pitch')
hist(data$AveHarmonicity, xlab = 'Average Harmonicity',breaks = 20, main = 'Histogram of feature Average Harmonicity')
hist(data$F1, xlab = 'F1' ,breaks = 20, main ='Histogram of feature F1')
hist(data$F2, xlab = 'F2' , breaks = 20, main ='Histogram of feature F2')
hist(data$F3, xlab = 'F3' , breaks = 20,main ='Histogram of feature F3')
hist(as.numeric(data$F4), xlab = 'F4' ,breaks = 20, main ='Histogram of feature F4')
hist(as.numeric(data$F5), xlab = 'F5' ,breaks = 20, main ='Histogram of feature F5')
hist(data$F1_bandwidth, xlab = 'F1_bandwidth',breaks = 20, main = 'Histogram of feature F1 bandwidth')
hist(data$F2_bandwidth, xlab = 'F2_bandwidth', breaks = 20, main = 'Histogram of feature F2 bandwidth')
hist(data$F3_bandwidth, xlab = 'F3_bandwidth', breaks = 20,main = 'Histogram of feature F3 bandwidth')
hist(as.numeric(data$F4_bandwidth), xlab = 'F4_bandwidth', breaks = 20,main = 'Histogram of feature F4 bandwidth')
hist(as.numeric(data$F5_bandwidth), xlab = 'F5_bandwidth',breaks = 20, main = 'Histogram of feature F5 bandwidth')
######################################################################
'find out features with NA values'
#####################################################################
na_row = c()
for (item in colnames(data)){
if (sum(is.na(data[item])) > 0| sum(data[item] == '--undefined--') > 0 ){
na_row <- c(na_row,item)
}
}
na_row
data <- data[is.na(data[,'F4']) != T,]
data <- data[is.na(data[,'AvePitch']) != T,]
data <- data[,c(-17,-22)] # delete F5 and F5_bandwidth because of ba
####################################################################
'Use a function to plot multiple ggplots in same plot'
###################################################################
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)), ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
####################################################################
'data type conversion'
####################################################################
data$intensity <- as.double(data$intensity) # convert from integer to double
data$F4 <- as.double(as.character(data$F4)) # convert from integer to double
#data$F5 <- as.double(data$F5) # convert from integer to double
data$F4_bandwidth <- as.double(data$F4_bandwidth) # convert from integer to double
#data$F5_bandwidth <- as.double(data$F5_bandwidth) # convert from integer to double
data$AvePitch <- as.double(data$AvePitch)
#maybe we need give it a log function
hist(data$duration, xlab = 'Phoneme Duration' ,breaks = 20, main = 'Histogram of feature Duration')
p <- ggplot(data, aes(sample = duration))
p + stat_qq() + ggtitle('QQ-plot of Duration before transformation')
# for duration, we need to give it a transformation, log 10
data$duration = log10(data$duration)
hist(data$duration,xlab = 'Phoneme Duration' ,breaks = 20, main = 'Histogram of feature Duration after log10 transformation')
# much better after log 10
p <- ggplot(data, aes(sample = duration))
p + stat_qq() + ggtitle('QQ-plot of Duration after log10 transformation')
##########################################################################
hist(data$intensity, xlab = 'intensity', main = 'Histogram of feature intensity',breaks = 20)
p <- ggplot(data, aes(sample = intensity))
p + stat_qq() + ggtitle('QQ-plot of intensity before transformation')
BoxCox.lambda(data$intensity, method = "loglik", lower = -5, upper = 5)
data$intensity <- BoxCox(data$intensity, lambda = 3.5)
hist(data$intensity,xlab = 'intensity', main = 'Histogram of feature intensity after \n lambda 3.5 boxcox transformation', breaks = 20)
p <- ggplot(data, aes(sample = intensity))
p + stat_qq() + ggtitle('QQ-plot of intensity after boxcox transformation')
########################################################################
hist(as.numeric(data$AvePitch), xlab = 'Average Pitch',breaks = 20, main = 'Histogram of feature Average Pitch')
p <- ggplot(data, aes(sample = AvePitch))
p + stat_qq() +ggtitle('QQ-plot of average pitch before transformation')
BoxCox.lambda(data$AvePitch, method = 'loglik')
data$AvePitch <- BoxCox(data$AvePitch, lambda = 0.5)
hist(data$AvePitch, breaks = 20, xlab = 'Average Pitch', main = 'Histogram of feature Average Pitch after \n lambda 0.5 boxcox transformation')
p <- ggplot(data, aes(sample = AvePitch))
p + stat_qq() +ggtitle('QQ-plot of average pitch after square root transformation')
#######################################################################
hist(data$AveHarmonicity, xlab = 'Average Harmonicity',breaks = 20, main = 'Histogram of feature Average Harmonicity')
hist(data$AveHarmonicity, xlab = 'Average Harmonicity',breaks = 20, main = 'Histogram of feature Average Harmonicity \n without transformation')
p <- ggplot(data, aes(sample = AveHarmonicity))
p + stat_qq() +ggtitle('QQ-plot of average harmonicity without transformation')
########################################################################
hist(data$F1, xlab = 'F1' , main ='Histogram of feature F1', breaks =20)
p <- ggplot(data, aes(sample = F1))
p + stat_qq() + ggtitle('QQ-plot of F1 before transformation')
data$F1 <- log10(data$F1)
hist(data$F1, xlab = 'F1' , main ='Histogram of feature F1 after log10 transformation', breaks = 20)
p <- ggplot(data, aes(sample = F1))
p + stat_qq() + ggtitle('QQ-plot of F1 after log10 transformation')
##########################################################################
hist(data$F2, xlab = 'F2' , main ='Histogram of feature F2', breaks = 20)
p <- ggplot(data, aes(sample = F2))
p + stat_qq() + ggtitle('QQ-plot of F2 before transformation')
BoxCox.lambda(data$F2, method = 'loglik',lower = -5, upper =5 )
data$F2 <- BoxCox(data$F2, lambda = 2.15)
hist(data$F2, xlab = 'F2' , main ='Histogram of feature F2 after \n lambda 2.15 boxcox transformation', breaks =20)
p <- ggplot(data, aes(sample = F2))
p + stat_qq() + ggtitle('QQ-plot of F2 after boxcox transformation')
###########################################################################
hist(data$F3, xlab = 'F3' , main ='Histogram of feature F3', breaks = 20)
p <- ggplot(data, aes(sample = F3))
p + stat_qq() + ggtitle('QQ-plot of F3 before transformation')
BoxCox.lambda(data$F3, lower = -5, upper =5 ,method = 'loglik' )
data$F3 = BoxCox(data$F3, lambda = 0.4)
hist(data$F3, xlab = 'F3' , main ='Histogram of feature F3 after \n lambda 0.4 boxcox transformation', breaks =20)
p <- ggplot(data, aes(sample = F3))
p + stat_qq() + ggtitle('QQ-plot of F3 after boxcox transformation')
##########################################################################
hist(data$F4, xlab = 'F4' , main ='Histogram of feature F4', breaks = 20)
p <- ggplot(data, aes(sample = F4))
p + stat_qq() + ggtitle('QQ-plot of F4 before transformation')
BoxCox.lambda(data$F4, lower = -5, upper =5 , method = 'loglik')
data$F4 = BoxCox(data$F4, lambda = 0.4)
hist(data$F4, xlab = 'F4' , main ='Histogram of feature F4 after \n lambda 0.4 boxcox transformation', breaks =20)
p <- ggplot(data, aes(sample = F4))
p + stat_qq() + ggtitle('QQ-plot of F4 after boxcox transformation')
##########################################################################
hist(data$F1_bandwidth, xlab = 'F1_bandwidth', main = 'Histogram of feature F1 bandwidth', breaks = 20)
p <- ggplot(data, aes(sample = F1_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F1_bandwidth before transformation')
data$F1_bandwidth <- log10(data$F1_bandwidth)
hist(data$F1_bandwidth, xlab = 'F1_bandwidth', main = 'Histogram of feature F1 bandwidth after \n log10 transformation', breaks = 20)
p <- ggplot(data, aes(sample = F1_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F1_bandwidth after log10 transformation')
##########################################################################
hist(data$F2_bandwidth, xlab = 'F2_bandwidth', main = 'Histogram of feature F2 bandwidth', breaks = 20)
p <- ggplot(data, aes(sample = F2_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F2_bandwidth before transformation')
data$F2_bandwidth <- log10(data$F2_bandwidth)
hist(data$F2_bandwidth, xlab = 'F2_bandwidth', main = 'Histogram of feature F2 bandwidth after \n log10 transformation', breaks = 20)
p <- ggplot(data, aes(sample = F2_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F2_bandwidth after log10 transformation')
##########################################################################
hist(data$F3_bandwidth, xlab = 'F3_bandwidth', main = 'Histogram of feature F3 bandwidth', breaks = 20)
p <- ggplot(data, aes(sample = F3_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F3_bandwidth before transformation')
data$F3_bandwidth <- log10(data$F3_bandwidth)
hist(data$F3_bandwidth, xlab = 'F3_bandwidth', main = 'Histogram of feature F3 bandwidth after \n log10 transformation', breaks =20)
p <- ggplot(data, aes(sample = F3_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F3_bandwidth after log10 transformation')
########################################################################
hist(data$F4_bandwidth, xlab = 'F4_bandwidth', main = 'Histogram of feature F4 bandwidth', breaks =20)
p <- ggplot(data, aes(sample = F4_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F4_bandwidth before transformation')
BoxCox.lambda(data$F4_bandwidth, lower = -5, upper = 5, method = 'loglik')
data$F4_bandwidth<- BoxCox(data$F4_bandwidth, lambda = 0.65)
hist(data$F4_bandwidth, xlab = 'F4_bandwidth', main = 'Histogram of feature F4 bandwidth after \n lambda 0.65 boxcox transformation ', breaks = 20)
p <- ggplot(data, aes(sample = F4_bandwidth))
p + stat_qq() + ggtitle('QQ-plot of F4_bandwidth after boxcox transformation')
########################################################################
head(data)
test_data <- data[,c(5:20)]
test_data$type
vowel <- test_data%>%filter(type == 'vowel')
glide <- test_data%>%filter(type == 'glide')
nrow(vowel) + nrow(glide) == nrow(test_data)
t.test(vowel$F1, glide$F1) #significant e-16
t.test(vowel$F2, glide$F2) #significant e-6
t.test(vowel$F3, glide$F3) #not significant
t.test(vowel$F4, glide$F4) # not significant
t.test(vowel$F1_bandwidth, glide$F1_bandwidth) # not significant
t.test(vowel$F2_bandwidth, glide$F2_bandwidth) # significant 0.002355
t.test(vowel$F3_bandwidth, glide$F3_bandwidth) # not significant
t.test(vowel$F4_bandwidth, glide$F4_bandwidth) # not significant
t.test(vowel$duration, glide$duration) # significant e-16
t.test(vowel$intensity, glide$intensity) # significant e-7
t.test(vowel$AvePitch, glide$AvePitch) # significant 0.15
t.test(vowel$AveHarmonicity,glide$AveHarmonicity) # not significant
two_way_data <- test_data[,c(-3,-4)]
two_way_data$sound <- as.character(two_way_data$sound)
two_way_data$sound[two_way_data$sound == 'i'| two_way_data$sound == 'j'] <- 'ji'
two_way_data$sound[two_way_data$sound == 'y'| two_way_data$sound == 'h'] <- 'hy'
two_way_data$sound[two_way_data$sound == 'u'| two_way_data$sound == 'w'] <- 'wu'
head(two_way_data)
aov_duration <- aov(duration ~ sound * type,data = two_way_data)
summary(aov_duration)
aov_intensity <- aov(intensity ~ sound * type,data = two_way_data)
summary(aov_intensity)
aov_AvePitch <- aov(AvePitch ~ sound * type,data = two_way_data)
summary(aov_AvePitch)
aov_AveHarmonicity <- aov(AveHarmonicity ~ sound * type,data = two_way_data)
summary(aov_AveHarmonicity)
aov_F1 <- aov(F1 ~ sound * type,data = two_way_data)
summary(aov_F1)
aov_F2 <- aov(F2 ~ sound * type,data = two_way_data)
summary(aov_F2)
aov_F3 <- aov(F3 ~ sound * type,data = two_way_data)
summary(aov_F3)
aov_F4 <- aov(F4 ~ sound * type,data = two_way_data)
summary(aov_F4)
aov_F1_bandwidth <- aov(F1_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F1_bandwidth)
aov_F2_bandwidth <- aov(F2_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F2_bandwidth)
aov_F3_bandwidth <- aov(F3_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F3_bandwidth)
aov_F4_bandwidth <- aov(F4_bandwidth ~ sound * type,data = two_way_data)
summary(aov_F4_bandwidth)
sound_data = test_data[,-2]
head(sound_data)
sound.lda <- lda(sound ~ ., data = sound_data)
sound.lda.values <- predict(sound.lda)
sound.lda.values$x[,1]
ldahist(data = sound.lda.values$x[,1],g=sound_data[,1])
#The interesting thing is, u and w are two different sound!
type_data = test_data[,-1]
head(type_data)
type.lda <- lda(type ~ ., data = type_data)
type.lda.values <- predict(type.lda)
ldahist(data = type.lda.values$x[,1],g=type_data[,1], col = 1 )
ldahist(data = data.pca$x[,3],g=type_data[,1], col = 1)
##############################################
# LDA for pairs
###############################3##############
pairs_lda_data <- two_way_data[,-2]
pair.lda <-lda(sound ~., data = pairs_lda_data)
pair.lda.values <- predict(pair.lda)
ldahist(data = pair.lda.values$x[,1],g=pairs_lda_data[,1], col = 1 )
ldahist(data = pair.lda.values$x[,2],g=pairs_lda_data[,1], col = 1 )
pca_data <- na.omit(data[,colnames(data)[9:20]])
pca_data <- data[,colnames(data)[9:20]]
data.pca<- prcomp(na.omit(pca_data), center=T, scale. = T)
str(data)
plot(data.pca, type = 'lines')
summary(data.pca)
plot_data <- as.data.frame(data.pca$x[,1:6])
plot_data<-cbind(sapply(data$sound, as.character), sapply(data$type, as.character),plot_data)
colnames(plot_data) <- c('sound','type','PC1','PC2','PC3','PC4','PC5','PC6')
plot_data<-as.data.frame(plot_data)
plot_data <- plot_data[sample(nrow(plot_data)),] # shuffle data
str(plot_data)
#######################
# visualization of glide
#######################
glide_data_pca <- plot_data %>% filter(type == 'glide')
p1 <- ggplot(glide_data_pca, aes(x=PC1, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p2 <- ggplot(glide_data_pca, aes(x=PC1, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p3 <- ggplot(glide_data_pca, aes(x=PC1, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p4 <- ggplot(glide_data_pca, aes(x=PC2, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p5 <- ggplot(glide_data_pca, aes(x=PC2, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p6 <- ggplot(glide_data_pca, aes(x=PC2, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p7 <- ggplot(glide_data_pca, aes(x=PC3, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p8 <- ggplot(glide_data_pca, aes(x=PC3, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p9 <- ggplot(glide_data_pca, aes(x=PC3, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p10 <- ggplot(glide_data_pca, aes(x=PC4, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p11 <- ggplot(glide_data_pca, aes(x=PC4, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p12 <- ggplot(glide_data_pca, aes(x=PC4, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, cols=4)
#######################
# visualization of vowel
#######################
glide_data_pca <- plot_data %>% filter(type == 'vowel')
p1 <- ggplot(glide_data_pca, aes(x=PC1, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p2 <- ggplot(glide_data_pca, aes(x=PC1, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p3 <- ggplot(glide_data_pca, aes(x=PC1, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p4 <- ggplot(glide_data_pca, aes(x=PC2, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p5 <- ggplot(glide_data_pca, aes(x=PC2, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p6 <- ggplot(glide_data_pca, aes(x=PC2, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p7 <- ggplot(glide_data_pca, aes(x=PC3, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p8 <- ggplot(glide_data_pca, aes(x=PC3, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p9 <- ggplot(glide_data_pca, aes(x=PC3, y =PC4, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
p10 <- ggplot(glide_data_pca, aes(x=PC4, y =PC1, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p11 <- ggplot(glide_data_pca, aes(x=PC4, y =PC2, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5)+ theme(legend.position = "none")
p12 <- ggplot(glide_data_pca, aes(x=PC4, y =PC3, color = sound)) + geom_point(alpha= 0.5) + xlim(-5, 5) + ylim(-5, 5) + theme(legend.position = "bottom")
multiplot(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, cols=4)
q1 <- ggplot(plot_data, aes(x=PC1, y =PC2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q2 <- ggplot(plot_data, aes(x=PC1, y =PC3, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q3 <- ggplot(plot_data, aes(x=PC1, y =PC4, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
q4 <- ggplot(plot_data, aes(x=PC2, y =PC1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q5 <- ggplot(plot_data, aes(x=PC2, y =PC3, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q6 <- ggplot(plot_data, aes(x=PC2, y =PC4, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
q7 <- ggplot(plot_data, aes(x=PC3, y =PC1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q8 <- ggplot(plot_data, aes(x=PC3, y =PC2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q9 <- ggplot(plot_data, aes(x=PC3, y =PC4, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
q10 <- ggplot(plot_data, aes(x=PC4, y =PC1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q11 <- ggplot(plot_data, aes(x=PC4, y =PC2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none")
q12 <- ggplot(plot_data, aes(x=PC4, y =PC3, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
multiplot(q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11, q12, cols=4)
head(data)
test_data <- data[,c(5:20)]
test_data$type
head(test_data)
vowel <- test_data%>%filter(type == 'vowel')
glide <- test_data%>%filter(type == 'glide')
nrow(vowel) + nrow(glide) == nrow(test_data)
matlab_data <- test_data[,c(-1,-3,-4)]
matlab_data$type <- as.character(matlab_data$type)
matlab_data$type[matlab_data$type == 'vowel'] <- 1
matlab_data$type[matlab_data$type == 'glide'] <- 2
group_data <- as.integer(matlab_data[,1])
matlab_data <- matlab_data[,-1]
matlab_data <- t(matlab_data)
glide_group <- glide[,1]
glide_swiss_data <- glide[,c(-1,-2)]
glide_group <- as.character(glide_group)
glide_group[glide_group == 'h'] <- 1
glide_group[glide_group == 'j'] <- 2
glide_group[glide_group == 'w'] <- 3
glide_group <- as.integer(glide_group)
glide_hj <- glide%>% filter(sound == 'h'| sound =='j')
glide_hj_group <-glide_hj[,1]
glide_hj_data <- glide_hj[,c(-1,-2)]
glide_hj_group <- as.character(glide_hj_group)
glide_hj_group[glide_hj_group == 'h'] <- 1
glide_hj_group[glide_hj_group == 'j'] <- 2
glide_hj_group <- as.integer(glide_hj_group)
glide_hw <- glide%>% filter(sound == 'h'| sound =='w')
glide_hw_group <-glide_hw[,1]
glide_hw_data <- glide_hw[,c(-1,-2)]
glide_hw_group <- as.character(glide_hw_group)
glide_hw_group[glide_hw_group == 'h'] <- 1
glide_hw_group[glide_hw_group == 'w'] <- 2
glide_hw_group <- as.integer(glide_hw_group)
glide_jw <- glide%>% filter(sound == 'j'| sound =='w')
glide_jw_group <-glide_jw[,1]
glide_jw_data <- glide_jw[,c(-1,-2)]
glide_jw_group <- as.character(glide_jw_group)
glide_jw_group[glide_jw_group == 'j'] <- 1
glide_jw_group[glide_jw_group == 'w'] <- 2
glide_jw_group <- as.integer(glide_jw_group)
vowel_group <- vowel[,1]
vowel_swiss_data <- vowel[,c(-1,-2)]
vowel_group <- as.character(vowel_group)
vowel_group[vowel_group == 'i'] <- 1
vowel_group[vowel_group == 'u'] <- 2
vowel_group[vowel_group == 'y'] <- 3
vowel_group <- as.integer(vowel_group)
vowel_iu <- vowel%>% filter(sound == 'i'| sound =='u')
vowel_iu_group <- vowel_iu[,1]
vowel_iu_data <- vowel_iu[,c(-1,-2)]
vowel_iu_group <- as.character(vowel_iu_group)
vowel_iu_group[vowel_iu_group == 'i'] <- 1
vowel_iu_group[vowel_iu_group == 'u'] <- 2
vowel_iu_group <- as.integer(vowel_iu_group)
vowel_iy <- vowel%>% filter(sound == 'i'| sound =='y')
vowel_iy_group <- vowel_iy[,1]
vowel_iy_data <- vowel_iy[,c(-1,-2)]
vowel_iy_group <- as.character(vowel_iy_group)
vowel_iy_group[vowel_iy_group == 'i'] <- 1
vowel_iy_group[vowel_iy_group == 'y'] <- 2
vowel_iy_group <- as.integer(vowel_iy_group)
vowel_uy <- vowel%>% filter(sound == 'u'| sound =='y')
vowel_uy_group <- vowel_uy[,1]
vowel_uy_data <- vowel_uy[,c(-1,-2)]
vowel_uy_group <- as.character(vowel_uy_group)
vowel_uy_group[vowel_uy_group == 'u'] <- 1
vowel_uy_group[vowel_uy_group == 'y'] <- 2
vowel_uy_group <- as.integer(vowel_uy_group)
head(two_way_data)
swiss_total <- two_way_data[,-2]
swiss_total_group <- swiss_total[,1]
swiss_total_data <- swiss_total[,-1]
swiss_total_group <- as.character(swiss_total_group)
swiss_total_group[swiss_total_group == 'ji'] <- 1
swiss_total_group[swiss_total_group == 'wu'] <- 2
swiss_total_group[swiss_total_group == 'hy'] <- 3
swiss_total_group <- as.integer(swiss_total_group)
ji_hy <- swiss_total%>%filter(sound =='ji'|sound =='hy')
ji_hy_group <- ji_hy[,1]
ji_hy_data <- ji_hy[,-1]
ji_hy_group <- as.character(ji_hy_group)
ji_hy_group[ji_hy_group == 'ji'] <-1
ji_hy_group[ji_hy_group == 'hy'] <-2
ji_hy_group <- as.integer(ji_hy_group)
ji_uw <- swiss_total%>%filter(sound =='ji'|sound =='wu')
ji_uw_group <- ji_uw[,1]
ji_uw_data <- ji_uw[,-1]
ji_uw_group <- as.character(ji_uw_group)
ji_uw_group[ji_uw_group == 'ji'] <-1
ji_uw_group[ji_uw_group == 'wu'] <-2
ji_uw_group <- as.integer(ji_uw_group)
hy_uw <- swiss_total%>%filter(sound =='hy'|sound =='wu')
hy_uw_group <- hy_uw[,1]
hy_uw_data <- hy_uw[,-1]
hy_uw_group <- as.character(hy_uw_group)
hy_uw_group[hy_uw_group == 'hy'] <-1
hy_uw_group[hy_uw_group == 'wu'] <-2
hy_uw_group <- as.integer(hy_uw_group)
head(test_data)
ji_pair <- test_data[,c(-2,-3,-4)] %>% filter(sound == 'i'| sound == 'j')
ji_pair_group <- as.character(ji_pair$sound)
ji_pair_group[ji_pair_group == 'i'] <- 1
ji_pair_group[ji_pair_group == 'j'] <- 2
ji_pair_group <- as.integer(ji_pair_group)
ji_pair_data <- ji_pair[,-1]
hy_pair <- test_data[,c(-2,-3,-4)] %>% filter(sound == 'h'| sound == 'y')
hy_pair_group <- as.character(hy_pair$sound)
hy_pair_group[hy_pair_group == 'h'] <- 1
hy_pair_group[hy_pair_group == 'y'] <- 2
hy_pair_group <- as.integer(hy_pair_group)
hy_pair_data <- hy_pair[,-1]
wu_pair <- test_data[,c(-2,-3,-4)] %>% filter(sound == 'w'| sound == 'u')
wu_pair_group <- as.character(wu_pair$sound)
wu_pair_group[wu_pair_group == 'w'] <- 1
wu_pair_group[wu_pair_group == 'u'] <- 2
wu_pair_group <- as.integer(wu_pair_group)
wu_pair_data <- wu_pair[,-1]
type_pair <- test_data[,c(-1,-3,-4)]
type_pair_group <- as.character(type_pair[,1])
type_pair_group[type_pair_group =='vowel']<-1
type_pair_group[type_pair_group == 'glide']<-2
type_pair_group <- as.integer(type_pair_group)
type_pair_data <- type_pair[,-1]
##############################################################################
## SWISS.R
## Compute the standardized within class sum of square score.
## Author: Meilei
##############################################################################
swiss = function(dat, class){
# @ dat: data matrix, rows are samples and columns are features
# @ class: class label of samples
group = unique(class)
gpairs = combn(group,2)
n = dim(gpairs)[2]
sw = NULL
if(is.null(n)){
g1 = gpairs[1]
g2 = gpairs[2]
c1 = as.matrix(dat[which(class == g1),])
c2 = as.matrix(dat[which(class == g2),])
c = rbind(c1, c2)
sc1 = scale(c1, center = T, scale = F)
sc2 = scale(c2, center = T, scale = F)
sc = scale(c, center = T, scale = F)
sw = (norm(sc1,"F")^2 + norm(sc2,"F")^2)/norm(sc,"F")^2
}else{
for(i in 1:n){
g1 = gpairs[1,i]
g2 = gpairs[2,i]
c1 = as.matrix(dat[which(class == g1),])
c2 = as.matrix(dat[which(class == g2),])
c = rbind(c1, c2)
sc1 = scale(c1, center = T, scale = F)
sc2 = scale(c2, center = T, scale = F)
sc = scale(c, center = T, scale = F)
sw[i] = (norm(sc1,"F")^2 + norm(sc2,"F")^2)/norm(sc,"F")^2
}
}
return(mean(sw))
}
swiss(glide_swiss_data,glide_group)
swiss(glide_hj_data,glide_hj_group)
swiss(glide_hw_data,glide_hw_group)
swiss(glide_jw_data,glide_jw_group)
swiss(vowel_swiss_data,vowel_group)
swiss(vowel_iu_data,vowel_iu_group)
swiss(vowel_iy_data,vowel_iy_group)
swiss(vowel_uy_data,vowel_uy_group)
swiss(swiss_total_data, swiss_total_group)
swiss(ji_hy_data, ji_hy_group)
swiss(ji_uw_data, ji_uw_group)
swiss(hy_uw_data, hy_uw_group)
swiss(type_pair_data,type_pair_group)
swiss(ji_pair_data,ji_pair_group)
swiss(hy_pair_data,hy_pair_group)
swiss(wu_pair_data,wu_pair_group)
###################################################
# Use fewer features to for separation.
##################################################
####
# less than 4 features, how to get lowest swiss score
####
library(utils)
swiss_score_vector <- n_vector <- sub_n_vector <- c()
for (n in c(2,3,4)){
names <- colnames(type_pair_data)
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(type_pair_data[,combn(names,n)[,item]],type_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
###################################################################
swiss_score_vector <- n_vector <- sub_n_vector <- c()
for (n in c(2,3,4)){
names <- colnames(ji_pair_data)
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(ji_pair_data[,combn(names,n)[,item]],ji_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
###############################
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(hy_pair_data)
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(hy_pair_data[,combn(names,n)[,item]],hy_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
##################
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(wu_pair_data)
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(wu_pair_data[,combn(names,n)[,item]],wu_pair_group)
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
min(swiss_score_vector)
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
combn(names,best_n)[,best_sub_n]
##################
################################
# in glide!!!!
#############################
glide_data_list <- list(glide_swiss_data[,c(-1,-2)], glide_hj_data[,c(-1,-2)], glide_hw_data[,c(-1,-2)], glide_jw_data[,c(-1,-2)])
glide_group_list <- list(glide_group, glide_hj_group, glide_hw_group, glide_jw_group)
names_new <- c('swiss', 'hj','hw','jw')
for (list_num in c(1:4)){
print(paste('this is ', names_new[list_num] ,'group'))
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(as.data.frame(glide_data_list[list_num]))
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(as.data.frame(glide_data_list[list_num])[,combn(names,n)[,item]],unlist(glide_group_list[list_num]))
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
print(min(swiss_score_vector))
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
print(combn(names,best_n)[,best_sub_n])
}
############################################
#swiss(vowel_swiss_data,vowel_group)
#swiss(vowel_iu_data,vowel_iu_group)
#swiss(vowel_iy_data,vowel_iy_group)
#swiss(vowel_uy_data,vowel_uy_group)
#############################################
vowel_data_list <- list(vowel_swiss_data[,c(-1,-2)], vowel_iu_data[,c(-1,-2)], vowel_iy_data[,c(-1,-2)], vowel_uy_data[,c(-1,-2)])
vowel_group_list <- list(vowel_group, vowel_iu_group, vowel_iy_group, vowel_uy_group)
names_new <- c('swiss', 'iu','iy','uy')
for (list_num in c(1:4)){
print(paste('this is ', names_new[list_num] ,'group'))
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(as.data.frame(vowel_data_list[list_num]))
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(as.data.frame(vowel_data_list[list_num])[,combn(names,n)[,item]],unlist(vowel_group_list[list_num]))
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
print(min(swiss_score_vector))
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
print(combn(names,best_n)[,best_sub_n])
}
#################################
#swiss(swiss_total_data, swiss_total_group)
#swiss(ji_hy_data, ji_hy_group)
#swiss(ji_uw_data, ji_uw_group)
#swiss(hy_uw_data, hy_uw_group)
total_data_list <- list(swiss_total_data, ji_hy_data, ji_uw_data, hy_uw_data)
total_group_list <- list(swiss_total_group, ji_hy_group, ji_uw_group, hy_uw_group)
names_new <- c('swiss', 'ji_hy','ji_uw','hy_uw')
for (list_num in c(1:4)){
print(paste('this is ', names_new[list_num] ,'group'))
swiss_score_vector <- n_vector <- sub_n_vector <- c()
names <- colnames(as.data.frame(total_data_list[list_num]))
for (n in c(2,3,4)){
sub_n <- ncol(combn(names,n))
for (item in c(1:sub_n)){
swiss_score <- swiss(as.data.frame(total_data_list[list_num])[,combn(names,n)[,item]],unlist(total_group_list[list_num]))
swiss_score_vector <- c(swiss_score_vector,swiss_score)
n_vector <- c(n_vector,n)
sub_n_vector <- c(sub_n_vector,item)
}
}
print(min(swiss_score_vector))
best_n <- n_vector[which.min(swiss_score_vector)]
best_sub_n<- sub_n_vector[which.min(swiss_score_vector)]
print(combn(names,best_n)[,best_sub_n])
}
scientific_10 <- function(x) {
parse(text=gsub("e", " %*% 10^", scientific_format()(x)))
}
test_data = test_data[sample(nrow(test_data)),]
c1 <- ggplot(test_data, aes(x=F1, y =F2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none") +scale_y_continuous(label=scientific_format())
c2 <- ggplot(test_data, aes(x=F1, y =intensity, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none") +scale_y_continuous(label=scientific_format())
c3 <- ggplot(test_data, aes(x=F1, y =duration, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom")
c4 <- ggplot(test_data, aes(x=F2, y =F1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c5 <- ggplot(test_data, aes(x=F2, y =intensity, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_y_continuous(label=scientific_format()) +scale_x_continuous(label=scientific_format())
c6 <- ggplot(test_data, aes(x=F2, y =duration, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c7 <- ggplot(test_data, aes(x= intensity, y = F1, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c8 <- ggplot(test_data, aes(x= intensity, y = F2, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "none",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_y_continuous(label=scientific_format()) +scale_x_continuous(label=scientific_format())
c9 <- ggplot(test_data, aes(x= intensity, y =duration, color = type)) + geom_point(alpha= 0.5) + theme(legend.position = "bottom",axis.text.x = element_text(angle = 90, hjust = 1)) +scale_x_continuous(label=scientific_format())
c10 <- ggplot(test_data, aes(x= duration, y = F1, color = type)) + geom_point(alpha= 0.5)+ theme(legend.position = "none")
c11 <- ggplot(test_data, aes(x= duration, y = F2, color = type)) + geom_point(alpha= 0.5)+ theme(legend.position = "none") +scale_y_continuous(label=scientific_format())
c12 <- ggplot(test_data, aes(x= duration, y = intensity, color = type)) + geom_point(alpha= 0.5)+ theme(legend.position = "bottom")+scale_y_continuous(label=scientific_format())
multiplot(c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, cols=4)
######################################################################
# use One way anova and box plot to detect differences between phonemes in same type.
######################################################################
# first we use glide data
glide_data <- test_data%>% filter(type == 'glide')
vowel_data <- test_data%>% filter(type == 'vowel')
ggplot(glide_data, aes(x = sound, y = duration)) +
geom_boxplot(fill = "grey80", colour = "blue") +
scale_x_discrete() + xlab("Treatment Group") +
ylab("Duration of each glide sound")
ggplot(glide_data, aes(x = sound, y = F4_bandwidth)) + # F1 works # F2 significant! #F3 also works # F4 works
geom_boxplot(fill = "grey80", colour = "blue") +
scale_x_discrete() + xlab("Treatment Group") +
ylab("Duration of each glide sound")
ggplot(vowel_data, aes(x = sound, y = F4_bandwidth)) + # F2 works # F3 probably # F4 important
geom_boxplot(fill = "grey80", colour = "blue") +
scale_x_discrete() + xlab("Treatment Group") +
ylab("Duration of each glide sound")
#################################################################
options(contrasts=c("contr.treatment", "contr.treatment"))
lm_glide_1 <- lm(F1 ~ sound, data = glide_data) # F1
summary(lm_glide_1)
lm_glide_2 <- lm(F2 ~ sound, data = glide_data) # F2
summary(lm_glide_2)
lm_glide_3 <- lm(F3 ~ sound, data = glide_data) # F3
summary(lm_glide_3)
lm_glide_4 <- lm(F4 ~ sound, data = glide_data) # F4
summary(lm_glide_4)
# all these four are significant!
##################################################
options(contrasts=c("contr.treatment", "contr.treatment"))
lm_vowel_2 <- lm(F2 ~ sound, data = vowel_data) # F2 less than e-16
summary(lm_vowel_2)
lm_vowel_3 <- lm(F3 ~ sound, data = vowel_data) # F3 less than e-16
summary(lm_vowel_3)
lm_vowel_4 <- lm(F4 ~ sound, data = vowel_data) # F4 less than e-16
summary(lm_vowel_4)
lm_vowel_5 <- lm(duration ~ sound, data = vowel_data) # duration works!
summary(lm_vowel_5)
lm_vowel_6 <- lm(AvePitch ~ sound, data = vowel_data) # AvePitch works!
summary(lm_vowel_6)
lm_vowel_7 <- lm(F1_bandwidth ~ sound, data = vowel_data) # F1_bandwidth
summary(lm_vowel_7)
lm_vowel_8 <- lm(F2_bandwidth ~ sound, data = vowel_data) # F2_bandwidth
summary(lm_vowel_8)
lm_vowel_9 <- lm(F3_bandwidth ~ sound, data = vowel_data) # F3_bandwidth
summary(lm_vowel_9)
lm_vowel_10 <- lm(F4_bandwidth ~ sound, data = vowel_data) # F4_bandwidth really significant
summary(lm_vowel_10)
'F1, F2, F3, F4, duration, AvePitch, F1_bandwidth, F2_bandwidth, F3_bandwidth, F4_bandwidth'
###################################################
lm_all_1 <- lm(F1 ~ sound, data = test_data) # Only F1 significant in all six sounds
summary(lm_all_1)
################################################
head(vowel_data)
summary(aov(duration ~ sound, data = vowel_data))
summary(aov(intensity ~ sound, data = vowel_data))
summary(aov(AvePitch ~ sound, data = vowel_data))
summary(aov(AveHarmonicity ~ sound, data = vowel_data))
summary(aov(F1 ~ sound, data = vowel_data))
summary(aov(F2 ~ sound, data = vowel_data))
summary(aov(F3 ~ sound, data = vowel_data))
summary(aov(F4 ~ sound, data = vowel_data))
summary(aov(F1_bandwidth ~ sound, data = vowel_data))
summary(aov(F2_bandwidth ~ sound, data = vowel_data))
summary(aov(F3_bandwidth ~ sound, data = vowel_data))
summary(aov(F4_bandwidth ~ sound, data = vowel_data))
head(glide_data)
summary(aov(duration ~ sound, data = glide_data))
summary(aov(intensity ~ sound, data = glide_data))
summary(aov(AvePitch ~ sound, data = glide_data))
summary(aov(AveHarmonicity ~ sound, data = glide_data))
summary(aov(F1 ~ sound, data = glide_data))
summary(aov(F2 ~ sound, data = glide_data))
summary(aov(F3 ~ sound, data = glide_data))
summary(aov(F4 ~ sound, data = glide_data))
summary(aov(F1_bandwidth ~ sound, data = glide_data))
summary(aov(F2_bandwidth ~ sound, data = glide_data))
summary(aov(F3_bandwidth ~ sound, data = glide_data))
summary(aov(F4_bandwidth ~ sound, data = glide_data))
|
## Course project 1 uses data from the Electric power consumption data set.
# Load date from working directory
dat <- read.table(file = "household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
# Subset to only include dates from Feb. 1st and 2nd, 2007
power <- subset(dat, dat$Date %in% c("1/2/2007", "2/2/2007"))
# Convert date and time columns to correct class
power$Time <- strptime(paste(power$Date, power$Time, sep = ", "), format = "%d/%m/%Y, %H:%M:%S") # Replace Time column with date and time info
power <- power[, -1] # Remove date column
colnames(power)[1] <- "Date_and_time" # Rename time column to a date and time column
# Plot 2 (480 x 480)
plot(x = power$Date_and_time,
y = power$Global_active_power,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = "")
dev.copy(png,'plot2.png')
dev.off() | /plot2.R | no_license | snamjoshi/ExData_Plotting1 | R | false | false | 890 | r | ## Course project 1 uses data from the Electric power consumption data set.
# Load date from working directory
dat <- read.table(file = "household_power_consumption.txt", sep = ";", header = TRUE, stringsAsFactors = FALSE)
# Subset to only include dates from Feb. 1st and 2nd, 2007
power <- subset(dat, dat$Date %in% c("1/2/2007", "2/2/2007"))
# Convert date and time columns to correct class
power$Time <- strptime(paste(power$Date, power$Time, sep = ", "), format = "%d/%m/%Y, %H:%M:%S") # Replace Time column with date and time info
power <- power[, -1] # Remove date column
colnames(power)[1] <- "Date_and_time" # Rename time column to a date and time column
# Plot 2 (480 x 480)
plot(x = power$Date_and_time,
y = power$Global_active_power,
type = "l",
ylab = "Global Active Power (kilowatts)",
xlab = "")
dev.copy(png,'plot2.png')
dev.off() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.