content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relabel.R
\name{fct_relabel}
\alias{fct_relabel}
\title{Automatically relabel factor levels, collapse as necessary}
\usage{
fct_relabel(.f, .fun, ...)
}
\arguments{
\item{.f}{A factor.}
\item{.fun}{A bare or character function name or an actual function in
formula, quosure, or ordinary notation to be applied to each level. Must
accept one character argument and return a character vector of the same
length as its input.}
\item{...}{Additional arguments to \code{fun}.}
}
\description{
Automatically relabel factor levels, collapse as necessary
}
\examples{
gss_cat$partyid \%>\% fct_count()
gss_cat$partyid \%>\% fct_relabel(~gsub(",", ", ", .x)) \%>\% fct_count()
convert_income <- function(x) {
regex <- "^(?:Lt |)[$]([0-9]+).*$"
is_range <- grepl(regex, x)
num_income <- as.numeric(gsub(regex, "\\\\1", x[is_range]))
num_income <- trunc(num_income / 5000) * 5000
x[is_range] <- paste0("Gt $", num_income)
x
}
fct_count(gss_cat$rincome)
convert_income(levels(gss_cat$rincome))
rincome2 <- fct_relabel(gss_cat$rincome, convert_income)
fct_count(rincome2)
}
|
/man/fct_relabel.Rd
|
no_license
|
alistaire47/forcats
|
R
| false
| true
| 1,155
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relabel.R
\name{fct_relabel}
\alias{fct_relabel}
\title{Automatically relabel factor levels, collapse as necessary}
\usage{
fct_relabel(.f, .fun, ...)
}
\arguments{
\item{.f}{A factor.}
\item{.fun}{A bare or character function name or an actual function in
formula, quosure, or ordinary notation to be applied to each level. Must
accept one character argument and return a character vector of the same
length as its input.}
\item{...}{Additional arguments to \code{fun}.}
}
\description{
Automatically relabel factor levels, collapse as necessary
}
\examples{
gss_cat$partyid \%>\% fct_count()
gss_cat$partyid \%>\% fct_relabel(~gsub(",", ", ", .x)) \%>\% fct_count()
convert_income <- function(x) {
regex <- "^(?:Lt |)[$]([0-9]+).*$"
is_range <- grepl(regex, x)
num_income <- as.numeric(gsub(regex, "\\\\1", x[is_range]))
num_income <- trunc(num_income / 5000) * 5000
x[is_range] <- paste0("Gt $", num_income)
x
}
fct_count(gss_cat$rincome)
convert_income(levels(gss_cat$rincome))
rincome2 <- fct_relabel(gss_cat$rincome, convert_income)
fct_count(rincome2)
}
|
## Download the Data
filesPath <- "/Users/taejinjo/OneDrive/Data_Analytics/04.Exploratory Data Analysis/Week1/"
setwd(filesPath)
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
###Unzip DataSet to /data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
##Load required packages
library(ggplot2)
library(lattice)
library(data.table)
library(dplyr)
##Files in folder ./data that will be used are:
## household_power_consumption.txt
## Read the above files and create data tables.
filesPath <- "/Users/taejinjo/OneDrive/Data_Analytics/04.Exploratory Data Analysis/Week1/data/"
# Read subject files
dataPlot <- tbl_df(read.table(file.path(filesPath, "household_power_consumption.txt"), header = TRUE, sep = ";", colClasses = c("character", "character", rep("numeric", 7)),na = "?"))
dataPlot$Time <- strptime(paste(dataPlot$Date, dataPlot$Time), "%d/%m/%Y %H:%M:%S")
dataPlot$Date <- as.Date(dataPlot$Date, "%d/%m/%Y")
subDateData <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
subDataPlot <- subset(dataPlot, Date %in% subDateData)
# Plot 1
png(filename = "Plot1.png", width = 480, height = 480)
hist(subDataPlot$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
# Plot 2
png(filename = "Plot2.png", width = 480, height = 480)
plot(subDataPlot$Time, subDataPlot$Global_active_power, type = "l", xlab ="", ylab = "Global Active Power (kilowatts)")
dev.off()
# Plot 3
png(filename = "Plot3.png", width = 480, height = 480)
plot(subDataPlot$Time, subDataPlot$Sub_metering_1, type = "l", ylab = "Energy sub metering", col = "black")
lines(subDataPlot$Time, subDataPlot$Sub_metering_2, type = "l", ylab = "Energy sub metering", col = "red")
lines(subDataPlot$Time, subDataPlot$Sub_metering_3, type = "l", ylab = "Energy sub metering", col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2, col=c("black", "red", "blue"))
dev.off()
# Plot 4
png(filename = "Plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(subDataPlot$Time, subDataPlot$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(subDataPlot$Time, subDataPlot$Voltage, type = "l", xlab = "datetime", ylab = "Global Active Power")
plot(subDataPlot$Time, subDataPlot$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(subDataPlot$Time, subDataPlot$Sub_metering_2, type="l", col="red")
lines(subDataPlot$Time, subDataPlot$Sub_metering_3, type="l", col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2, col=c("black", "red", "blue"))
plot(subDataPlot$Time, subDataPlot$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global Active Power")
dev.off()
|
/Plot1_4.R
|
no_license
|
taejinjo/ExData_Plotting1
|
R
| false
| false
| 2,947
|
r
|
## Download the Data
filesPath <- "/Users/taejinjo/OneDrive/Data_Analytics/04.Exploratory Data Analysis/Week1/"
setwd(filesPath)
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
###Unzip DataSet to /data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
##Load required packages
library(ggplot2)
library(lattice)
library(data.table)
library(dplyr)
##Files in folder ./data that will be used are:
## household_power_consumption.txt
## Read the above files and create data tables.
filesPath <- "/Users/taejinjo/OneDrive/Data_Analytics/04.Exploratory Data Analysis/Week1/data/"
# Read subject files
dataPlot <- tbl_df(read.table(file.path(filesPath, "household_power_consumption.txt"), header = TRUE, sep = ";", colClasses = c("character", "character", rep("numeric", 7)),na = "?"))
dataPlot$Time <- strptime(paste(dataPlot$Date, dataPlot$Time), "%d/%m/%Y %H:%M:%S")
dataPlot$Date <- as.Date(dataPlot$Date, "%d/%m/%Y")
subDateData <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
subDataPlot <- subset(dataPlot, Date %in% subDateData)
# Plot 1
png(filename = "Plot1.png", width = 480, height = 480)
hist(subDataPlot$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red")
dev.off()
# Plot 2
png(filename = "Plot2.png", width = 480, height = 480)
plot(subDataPlot$Time, subDataPlot$Global_active_power, type = "l", xlab ="", ylab = "Global Active Power (kilowatts)")
dev.off()
# Plot 3
png(filename = "Plot3.png", width = 480, height = 480)
plot(subDataPlot$Time, subDataPlot$Sub_metering_1, type = "l", ylab = "Energy sub metering", col = "black")
lines(subDataPlot$Time, subDataPlot$Sub_metering_2, type = "l", ylab = "Energy sub metering", col = "red")
lines(subDataPlot$Time, subDataPlot$Sub_metering_3, type = "l", ylab = "Energy sub metering", col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2, col=c("black", "red", "blue"))
dev.off()
# Plot 4
png(filename = "Plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(subDataPlot$Time, subDataPlot$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(subDataPlot$Time, subDataPlot$Voltage, type = "l", xlab = "datetime", ylab = "Global Active Power")
plot(subDataPlot$Time, subDataPlot$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering")
lines(subDataPlot$Time, subDataPlot$Sub_metering_2, type="l", col="red")
lines(subDataPlot$Time, subDataPlot$Sub_metering_3, type="l", col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=2, col=c("black", "red", "blue"))
plot(subDataPlot$Time, subDataPlot$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global Active Power")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write.xlsx.R, R/write.xlsx2.R
\name{write.xlsx}
\alias{write.xlsx}
\alias{write.xlsx2}
\title{Write a data.frame to an Excel workbook.}
\usage{
write.xlsx(
x,
file,
sheetName = "Sheet1",
col.names = TRUE,
row.names = TRUE,
append = FALSE,
showNA = TRUE,
password = NULL
)
write.xlsx2(
x,
file,
sheetName = "Sheet1",
col.names = TRUE,
row.names = TRUE,
append = FALSE,
password = NULL,
...
)
}
\arguments{
\item{x}{a \code{data.frame} to write to the workbook.}
\item{file}{the path to the output file.}
\item{sheetName}{a character string with the sheet name.}
\item{col.names}{a logical value indicating if the column names of \code{x}
are to be written along with \code{x} to the file.}
\item{row.names}{a logical value indicating whether the row names of
\code{x} are to be written along with \code{x} to the file.}
\item{append}{a logical value indicating if \code{x} should be appended to
an existing file. If \code{TRUE} the file is read from disk.}
\item{showNA}{a logical value. If set to \code{FALSE}, NA values will be
left as empty cells.}
\item{password}{a String with the password.}
\item{...}{other arguments to \code{addDataFrame} in the case of
\code{read.xlsx2}.}
}
\description{
Write a \code{data.frame} to an Excel workbook.
}
\details{
This function provides a high level API for writing a \code{data.frame} to
an Excel 2007 worksheet. It calls several low level functions in the
process. Its goal is to provide the conveniency of
\code{\link[utils]{write.csv}} by borrowing from its signature.
Internally, \code{write.xlsx} uses a double loop in over all the elements of
the \code{data.frame} so performance for very large \code{data.frame} may be
an issue. Please report if you experience slow performance. Dates and
POSIXct classes are formatted separately after the insertion. This also
adds to processing time.
If \code{x} is not a \code{data.frame} it will be converted to one.
Function \code{write.xlsx2} uses \code{addDataFrame} which speeds up the
execution compared to \code{write.xlsx} by an order of magnitude for large
spreadsheets (with more than 100,000 cells).
The default formats for Date and DateTime columns can be changed via the two
package options \code{xlsx.date.format} and \code{xlsx.datetime.format}.
They need to be specified in Java date format
\url{http://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html}.
Writing of password protected workbooks is supported for Excel 2007 OOXML
format only. Note that in Linux, LibreOffice is not able to read password
protected spreadsheets.
}
\examples{
\dontrun{
file <- paste(tempdir(), "/usarrests.xlsx", sep="")
res <- write.xlsx(USArrests, file)
# to change the default date format
oldOpt <- options()
options(xlsx.date.format="dd MMM, yyyy")
write.xlsx(x, sheet) # where x is a data.frame with a Date column.
options(oldOpt) # revert back to defaults
}
}
\seealso{
\code{\link{read.xlsx}} for reading \code{xlsx} documents. See
also \code{\link{addDataFrame}} for writing a \code{data.frame} to a sheet.
}
\author{
Adrian Dragulescu
}
|
/man/write.xlsx.Rd
|
no_license
|
bdaubney/xlsx
|
R
| false
| true
| 3,189
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write.xlsx.R, R/write.xlsx2.R
\name{write.xlsx}
\alias{write.xlsx}
\alias{write.xlsx2}
\title{Write a data.frame to an Excel workbook.}
\usage{
write.xlsx(
x,
file,
sheetName = "Sheet1",
col.names = TRUE,
row.names = TRUE,
append = FALSE,
showNA = TRUE,
password = NULL
)
write.xlsx2(
x,
file,
sheetName = "Sheet1",
col.names = TRUE,
row.names = TRUE,
append = FALSE,
password = NULL,
...
)
}
\arguments{
\item{x}{a \code{data.frame} to write to the workbook.}
\item{file}{the path to the output file.}
\item{sheetName}{a character string with the sheet name.}
\item{col.names}{a logical value indicating if the column names of \code{x}
are to be written along with \code{x} to the file.}
\item{row.names}{a logical value indicating whether the row names of
\code{x} are to be written along with \code{x} to the file.}
\item{append}{a logical value indicating if \code{x} should be appended to
an existing file. If \code{TRUE} the file is read from disk.}
\item{showNA}{a logical value. If set to \code{FALSE}, NA values will be
left as empty cells.}
\item{password}{a String with the password.}
\item{...}{other arguments to \code{addDataFrame} in the case of
\code{read.xlsx2}.}
}
\description{
Write a \code{data.frame} to an Excel workbook.
}
\details{
This function provides a high level API for writing a \code{data.frame} to
an Excel 2007 worksheet. It calls several low level functions in the
process. Its goal is to provide the conveniency of
\code{\link[utils]{write.csv}} by borrowing from its signature.
Internally, \code{write.xlsx} uses a double loop in over all the elements of
the \code{data.frame} so performance for very large \code{data.frame} may be
an issue. Please report if you experience slow performance. Dates and
POSIXct classes are formatted separately after the insertion. This also
adds to processing time.
If \code{x} is not a \code{data.frame} it will be converted to one.
Function \code{write.xlsx2} uses \code{addDataFrame} which speeds up the
execution compared to \code{write.xlsx} by an order of magnitude for large
spreadsheets (with more than 100,000 cells).
The default formats for Date and DateTime columns can be changed via the two
package options \code{xlsx.date.format} and \code{xlsx.datetime.format}.
They need to be specified in Java date format
\url{http://docs.oracle.com/javase/7/docs/api/java/text/SimpleDateFormat.html}.
Writing of password protected workbooks is supported for Excel 2007 OOXML
format only. Note that in Linux, LibreOffice is not able to read password
protected spreadsheets.
}
\examples{
\dontrun{
file <- paste(tempdir(), "/usarrests.xlsx", sep="")
res <- write.xlsx(USArrests, file)
# to change the default date format
oldOpt <- options()
options(xlsx.date.format="dd MMM, yyyy")
write.xlsx(x, sheet) # where x is a data.frame with a Date column.
options(oldOpt) # revert back to defaults
}
}
\seealso{
\code{\link{read.xlsx}} for reading \code{xlsx} documents. See
also \code{\link{addDataFrame}} for writing a \code{data.frame} to a sheet.
}
\author{
Adrian Dragulescu
}
|
#' BGM to map script
#' original code from Alexander Keth
#'
bgm_to_map <- function(bgm_file, bgm_string){
bgm <- readLines(con = bgm_file)
proj_in <- stringr::str_split(string = bgm[grep(pattern = "projection", x = bgm)], pattern = " ", n = 2)[[1]][2]
n_boxes <- str_split_twice(char = bgm[grep(pattern = "nbox", x = bgm)], min_only = T)
box_strings <- paste0("box", 0:(n_boxes - 1), bgm_string)
for (i in seq_along(box_strings)) {
if (i == 1) bgm_data <- list()
bgm_data[[i]] <- bgm[grep(pattern = box_strings[i], x = bgm)]
# remove entries with "vertmix" (also found with box_strings)
if (bgm_string == ".vert") bgm_data[[i]] <- bgm_data[[i]][-grep(pattern = "vertmix", x = bgm_data[[i]])]
bgm_data[[i]] <- lapply(bgm_data[[i]], str_split_twice, min_only = F)
bgm_data[[i]] <- data.frame(x = as.numeric(sapply(bgm_data[[i]], function(x) x[2])),
y = as.numeric(sapply(bgm_data[[i]], function(x) x[3])),
polygon = i - 1)
}
bgm_data <- do.call(rbind, bgm_data)
lat_long <- proj4::project(bgm_data[, 1:2], proj = proj_in, inverse = T)
bgm_data$long <- lat_long$x
bgm_data$lat <- lat_long$y
return(bgm_data)
}
|
/NOBA/BGM to map.R
|
permissive
|
erikjsolsen/AtlantisNEUS_R
|
R
| false
| false
| 1,235
|
r
|
#' BGM to map script
#' original code from Alexander Keth
#'
bgm_to_map <- function(bgm_file, bgm_string){
bgm <- readLines(con = bgm_file)
proj_in <- stringr::str_split(string = bgm[grep(pattern = "projection", x = bgm)], pattern = " ", n = 2)[[1]][2]
n_boxes <- str_split_twice(char = bgm[grep(pattern = "nbox", x = bgm)], min_only = T)
box_strings <- paste0("box", 0:(n_boxes - 1), bgm_string)
for (i in seq_along(box_strings)) {
if (i == 1) bgm_data <- list()
bgm_data[[i]] <- bgm[grep(pattern = box_strings[i], x = bgm)]
# remove entries with "vertmix" (also found with box_strings)
if (bgm_string == ".vert") bgm_data[[i]] <- bgm_data[[i]][-grep(pattern = "vertmix", x = bgm_data[[i]])]
bgm_data[[i]] <- lapply(bgm_data[[i]], str_split_twice, min_only = F)
bgm_data[[i]] <- data.frame(x = as.numeric(sapply(bgm_data[[i]], function(x) x[2])),
y = as.numeric(sapply(bgm_data[[i]], function(x) x[3])),
polygon = i - 1)
}
bgm_data <- do.call(rbind, bgm_data)
lat_long <- proj4::project(bgm_data[, 1:2], proj = proj_in, inverse = T)
bgm_data$long <- lat_long$x
bgm_data$lat <- lat_long$y
return(bgm_data)
}
|
# load necessary packages
library(lubridate)
library(dplyr)
# create the directory if it doesn't exist
if(!file.exists("./data")){
dir.create("./data")
}
# download and unzip the 'DataSet.zip' file if it doesn't exist
if(!file.exists("./data/DataSet.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","./data/DataSet.zip")
unzip(zipfile = "./data/DataSet.zip", exdir = "./data")
}
# read entire file
data <- read.table("data/household_power_consumption.txt", header = TRUE, sep = ";")
# change Date column data type to Date
data <- mutate(data, Date = dmy(Date))
# filter data based on required dates
plotData <- data %>% filter(Date>=ymd("2007-02-01") & Date<=ymd("2007-02-02"))
# add DateTime column
plotData <- cbind(plotData, ymd_hms(paste(plotData$Date, plotData$Time)))
names(plotData)[10] = "dateTime"
# change Global_active_power column data type to numeric
plotData <- mutate(plotData, Global_active_power = as.numeric(Global_active_power))
# change Voltage column data type to numeric
plotData <- mutate(plotData, Voltage = as.numeric(Voltage))
# change Global_reactive_power column data type to numeric
plotData <- mutate(plotData, Global_reactive_power = as.numeric(Global_reactive_power))
# open png graphics device
png(filename = "plot4.png", height = 480, width = 480)
par(mfrow = c(2,2))
# plot 1
with(plotData, plot(dateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power"))
# plot 2
with(plotData, plot(dateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage"))
# plot 3
plotData <- mutate(plotData, Sub_metering_1 = as.numeric(Sub_metering_1), Sub_metering_2 = as.numeric(Sub_metering_2))
plot(plotData$dateTime, plotData$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
points(plotData$dateTime, plotData$Sub_metering_2, type = "l", col = "red")
points(plotData$dateTime, plotData$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty=1, col = c("black", "red", "blue"), bty = "n", legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), cex = 0.50)
# plot 4
with(plotData, plot(dateTime, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power"))
# close the graphics device
dev.off()
|
/plot4.R
|
no_license
|
sonofposeidonnnn/EDAPROJECT1.1
|
R
| false
| false
| 2,323
|
r
|
# load necessary packages
library(lubridate)
library(dplyr)
# create the directory if it doesn't exist
if(!file.exists("./data")){
dir.create("./data")
}
# download and unzip the 'DataSet.zip' file if it doesn't exist
if(!file.exists("./data/DataSet.zip")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","./data/DataSet.zip")
unzip(zipfile = "./data/DataSet.zip", exdir = "./data")
}
# read entire file
data <- read.table("data/household_power_consumption.txt", header = TRUE, sep = ";")
# change Date column data type to Date
data <- mutate(data, Date = dmy(Date))
# filter data based on required dates
plotData <- data %>% filter(Date>=ymd("2007-02-01") & Date<=ymd("2007-02-02"))
# add DateTime column
plotData <- cbind(plotData, ymd_hms(paste(plotData$Date, plotData$Time)))
names(plotData)[10] = "dateTime"
# change Global_active_power column data type to numeric
plotData <- mutate(plotData, Global_active_power = as.numeric(Global_active_power))
# change Voltage column data type to numeric
plotData <- mutate(plotData, Voltage = as.numeric(Voltage))
# change Global_reactive_power column data type to numeric
plotData <- mutate(plotData, Global_reactive_power = as.numeric(Global_reactive_power))
# open png graphics device
png(filename = "plot4.png", height = 480, width = 480)
par(mfrow = c(2,2))
# plot 1
with(plotData, plot(dateTime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power"))
# plot 2
with(plotData, plot(dateTime, Voltage, type = "l", xlab = "datetime", ylab = "Voltage"))
# plot 3
plotData <- mutate(plotData, Sub_metering_1 = as.numeric(Sub_metering_1), Sub_metering_2 = as.numeric(Sub_metering_2))
plot(plotData$dateTime, plotData$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
points(plotData$dateTime, plotData$Sub_metering_2, type = "l", col = "red")
points(plotData$dateTime, plotData$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty=1, col = c("black", "red", "blue"), bty = "n", legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), cex = 0.50)
# plot 4
with(plotData, plot(dateTime, Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power"))
# close the graphics device
dev.off()
|
library(microbiome)
library(tidyverse)
source("sim_data_soil.R")
data("GlobalPatterns")
pseq = GlobalPatterns
# Simulations were evaluated for soil environments
meta.data = meta(pseq)
pseq.subset = subset_samples(pseq, SampleType == "Soil")
# Prune taxa
pseq.prune = prune_taxa(taxa_sums(pseq.subset) > 50, pseq.subset)
template = taxa_sums(pseq.prune)
# The number of taxa, sampling depth, and sample size
n.taxa=1000; n.samp=c("20_30", "50_50")
# The proportion of differentially abundant taxa
prop.diff=c(0.05, 0.15, 0.25)
# Set seeds
iterNum=100
abn.seed=seq(iterNum)
# Define the simulation parameters combinations
simparams=expand.grid(n.taxa, n.samp, prop.diff, abn.seed)
colnames(simparams)=c("n.taxa", "n.samp", "prop.diff", "abn.seed")
simparams=simparams%>%mutate(obs.seed=abn.seed+1)
simparams=simparams%>%separate(col = n.samp, into = c("n.samp.grp1", "n.samp.grp2"), sep = "_")
simparams=simparams%>%arrange(n.taxa, n.samp.grp1, prop.diff, abn.seed, obs.seed)
simparams.list=apply(simparams, 1, paste0, collapse="_")
simparamslabels=c("n.taxa", "n.samp.grp1", "n.samp.grp2", "prop.diff", "abn.seed", "obs.seed")
library(DESeq2)
library(doParallel)
library(foreach)
start_time <- Sys.time()
simlist=foreach(i = simparams.list, .combine = 'cbind') %do% {
# i=simparams.list[[1]]
print(i)
params = strsplit(i, "_")[[1]]
names(params) <- simparamslabels
# Paras for data generation
n.taxa=as.numeric(params["n.taxa"])
n.samp.grp1=as.numeric(params["n.samp.grp1"])
n.samp.grp2=as.numeric(params["n.samp.grp2"])
prop.diff=as.numeric(params["prop.diff"])
abn.seed=as.numeric(params["abn.seed"])
obs.seed=as.numeric(params["obs.seed"])
# Data generation
test.dat=abn.tab.gen(template, n.taxa, n.samp.grp1, n.samp.grp2, prop.diff, abn.seed, obs.seed,
struc.zero.prop=0.2)
# Prepare data for DESeq2
countdata=test.dat$obs.abn # Format for DESeq2: taxa are rows
coldata=data.frame(group=as.factor(rep(c(1, 2), c(n.samp.grp1, n.samp.grp2))))
rownames(coldata)=paste0("sub", seq(n.samp.grp1+n.samp.grp2))
zero.threshold=0.90
taxa.info.ind=apply(countdata, 1, function(x) sum(x==0)/(n.samp.grp1+n.samp.grp2))
feature_table=round(countdata[which(taxa.info.ind<zero.threshold), ])+1L
count.table=DESeqDataSetFromMatrix(
countData = feature_table, colData = coldata, design = ~ group)
# Run DESeq2
suppressWarnings(dds <- try(DESeq(count.table, quiet = TRUE), silent = TRUE))
if (inherits(dds, "try-error")) {
# If the parametric fit failed, try the local.
suppressWarnings(dds <- try(DESeq(count.table, fitType = "local", quiet = TRUE), silent = TRUE))
if (inherits(dds, "try-error")) {
# If local fails, try the mean
suppressWarnings(dds <- try(DESeq(count.table, fitType = "mean", quiet = TRUE), silent = TRUE))
}
}
if (inherits(dds, "try-error")) {
# If still bad
FDR=NA; power=NA
}else{
res = results(dds)
res$id = rownames(res)
# Some DESeq2 results (for example) had NA adjusted p-values, so replace them with 1
res[is.na(res[, "padj"]), "padj"] = 1
res=data.frame(taxa=res$id,
diff.test=ifelse(res$padj<0.05, 1, 0),
diff.ind=test.dat$diff.taxa[which(taxa.info.ind<zero.threshold)],
effec.size=test.dat$effect.size[which(taxa.info.ind<zero.threshold)])
FDR=ifelse(sum(res$diff.test==1, na.rm = T)==0, 0,
sum(ifelse(res$diff.ind==0&res$diff.test==1, 1, 0), na.rm = T)/
sum(res$diff.test==1, na.rm = T))
power=sum(ifelse(res$diff.ind!=0&res$diff.test==1, 1, 0), na.rm = T)/
sum(res$diff.ind!=0, na.rm = T)
}
c(FDR, power)
}
end_time <- Sys.time()
end_time - start_time
write_csv(data.frame(simlist), "fdr_power_deseq2_soil.csv")
|
/simulations/sim_soil/sim_deseq2.R
|
permissive
|
wnq13579/ANCOM-BC-Code-Archive
|
R
| false
| false
| 3,816
|
r
|
library(microbiome)
library(tidyverse)
source("sim_data_soil.R")
data("GlobalPatterns")
pseq = GlobalPatterns
# Simulations were evaluated for soil environments
meta.data = meta(pseq)
pseq.subset = subset_samples(pseq, SampleType == "Soil")
# Prune taxa
pseq.prune = prune_taxa(taxa_sums(pseq.subset) > 50, pseq.subset)
template = taxa_sums(pseq.prune)
# The number of taxa, sampling depth, and sample size
n.taxa=1000; n.samp=c("20_30", "50_50")
# The proportion of differentially abundant taxa
prop.diff=c(0.05, 0.15, 0.25)
# Set seeds
iterNum=100
abn.seed=seq(iterNum)
# Define the simulation parameters combinations
simparams=expand.grid(n.taxa, n.samp, prop.diff, abn.seed)
colnames(simparams)=c("n.taxa", "n.samp", "prop.diff", "abn.seed")
simparams=simparams%>%mutate(obs.seed=abn.seed+1)
simparams=simparams%>%separate(col = n.samp, into = c("n.samp.grp1", "n.samp.grp2"), sep = "_")
simparams=simparams%>%arrange(n.taxa, n.samp.grp1, prop.diff, abn.seed, obs.seed)
simparams.list=apply(simparams, 1, paste0, collapse="_")
simparamslabels=c("n.taxa", "n.samp.grp1", "n.samp.grp2", "prop.diff", "abn.seed", "obs.seed")
library(DESeq2)
library(doParallel)
library(foreach)
start_time <- Sys.time()
simlist=foreach(i = simparams.list, .combine = 'cbind') %do% {
# i=simparams.list[[1]]
print(i)
params = strsplit(i, "_")[[1]]
names(params) <- simparamslabels
# Paras for data generation
n.taxa=as.numeric(params["n.taxa"])
n.samp.grp1=as.numeric(params["n.samp.grp1"])
n.samp.grp2=as.numeric(params["n.samp.grp2"])
prop.diff=as.numeric(params["prop.diff"])
abn.seed=as.numeric(params["abn.seed"])
obs.seed=as.numeric(params["obs.seed"])
# Data generation
test.dat=abn.tab.gen(template, n.taxa, n.samp.grp1, n.samp.grp2, prop.diff, abn.seed, obs.seed,
struc.zero.prop=0.2)
# Prepare data for DESeq2
countdata=test.dat$obs.abn # Format for DESeq2: taxa are rows
coldata=data.frame(group=as.factor(rep(c(1, 2), c(n.samp.grp1, n.samp.grp2))))
rownames(coldata)=paste0("sub", seq(n.samp.grp1+n.samp.grp2))
zero.threshold=0.90
taxa.info.ind=apply(countdata, 1, function(x) sum(x==0)/(n.samp.grp1+n.samp.grp2))
feature_table=round(countdata[which(taxa.info.ind<zero.threshold), ])+1L
count.table=DESeqDataSetFromMatrix(
countData = feature_table, colData = coldata, design = ~ group)
# Run DESeq2
suppressWarnings(dds <- try(DESeq(count.table, quiet = TRUE), silent = TRUE))
if (inherits(dds, "try-error")) {
# If the parametric fit failed, try the local.
suppressWarnings(dds <- try(DESeq(count.table, fitType = "local", quiet = TRUE), silent = TRUE))
if (inherits(dds, "try-error")) {
# If local fails, try the mean
suppressWarnings(dds <- try(DESeq(count.table, fitType = "mean", quiet = TRUE), silent = TRUE))
}
}
if (inherits(dds, "try-error")) {
# If still bad
FDR=NA; power=NA
}else{
res = results(dds)
res$id = rownames(res)
# Some DESeq2 results (for example) had NA adjusted p-values, so replace them with 1
res[is.na(res[, "padj"]), "padj"] = 1
res=data.frame(taxa=res$id,
diff.test=ifelse(res$padj<0.05, 1, 0),
diff.ind=test.dat$diff.taxa[which(taxa.info.ind<zero.threshold)],
effec.size=test.dat$effect.size[which(taxa.info.ind<zero.threshold)])
FDR=ifelse(sum(res$diff.test==1, na.rm = T)==0, 0,
sum(ifelse(res$diff.ind==0&res$diff.test==1, 1, 0), na.rm = T)/
sum(res$diff.test==1, na.rm = T))
power=sum(ifelse(res$diff.ind!=0&res$diff.test==1, 1, 0), na.rm = T)/
sum(res$diff.ind!=0, na.rm = T)
}
c(FDR, power)
}
end_time <- Sys.time()
end_time - start_time
write_csv(data.frame(simlist), "fdr_power_deseq2_soil.csv")
|
library(ape)
testtree <- read.tree("11090_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11090_0_unrooted.txt")
|
/codeml_files/newick_trees_processed_and_cleaned/11090_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 137
|
r
|
library(ape)
testtree <- read.tree("11090_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11090_0_unrooted.txt")
|
# Diferencias entre tibbles y dataframes
library(tidyverse)
library(datos)
# los tibbles se muestran por consola, sin sobrepasar el ancho y con un máximo de elementos por pantalla
tibble(
a = lubridate::now() + runif(1e3) * 86400,
b = lubridate::today() + runif(1e3) * 30,
c = 1:1e3,
d = runif(1e3),
e = sample(letters, 1e3, replace = TRUE)
)
# Aquí indicamos que por consola muestre 15 elementos
datos::vuelos %>%
print(n=15, width= Inf)
# Mostrar ayuda de tibble
package?tibble
# Mostrar vuelos del paquete datos como dataset en una pestaña de RStudio, no por consola
datos::vuelos %>%
view()
df <- tibble(
x = runif(5),
y = rnorm(5)
)
# Ocupando el nombre con simbolo dolar muestra los valores de la variable
df$x
# con doble corchete se obtiene el mismo resultado, con " " el nombre
df[["x"]]
# o por la posición (En R se empieza por 1), aquí se mostrarían los valores de y
df[[2]]
# también se puede con pipeline, el punto y simbolo de dólar
df %>% .$x
# los tibbles son más estricos y emiten advertencia si se intenta acceder
# a una columna que no existe
df[[3]] # No hay 3 columnas
|
/Tema 03 - Data Analysis y Tibbles/T03_09_diferencias tibbles y dataframes.R
|
no_license
|
carlos4Dev/CursoDeRparaBigDatayDataScience
|
R
| false
| false
| 1,139
|
r
|
# Diferencias entre tibbles y dataframes
library(tidyverse)
library(datos)
# los tibbles se muestran por consola, sin sobrepasar el ancho y con un máximo de elementos por pantalla
tibble(
a = lubridate::now() + runif(1e3) * 86400,
b = lubridate::today() + runif(1e3) * 30,
c = 1:1e3,
d = runif(1e3),
e = sample(letters, 1e3, replace = TRUE)
)
# Aquí indicamos que por consola muestre 15 elementos
datos::vuelos %>%
print(n=15, width= Inf)
# Mostrar ayuda de tibble
package?tibble
# Mostrar vuelos del paquete datos como dataset en una pestaña de RStudio, no por consola
datos::vuelos %>%
view()
df <- tibble(
x = runif(5),
y = rnorm(5)
)
# Ocupando el nombre con simbolo dolar muestra los valores de la variable
df$x
# con doble corchete se obtiene el mismo resultado, con " " el nombre
df[["x"]]
# o por la posición (En R se empieza por 1), aquí se mostrarían los valores de y
df[[2]]
# también se puede con pipeline, el punto y simbolo de dólar
df %>% .$x
# los tibbles son más estricos y emiten advertencia si se intenta acceder
# a una columna que no existe
df[[3]] # No hay 3 columnas
|
##' @title Get thesis dependencies
##'
##' For use in `render_with_deps()`
##' @return
##' @author Shir Dekel
##' @export
get_thesis_deps <- function() {
front_matter <-
file.path("rmd", "front_matter") %>%
list.files(full.names = TRUE)
rmd <-
file.path("_bookdown.yml") %>%
yaml::read_yaml() %>%
pluck("rmd_files") %>%
c(front_matter)
templates <-
file.path("templates") %>%
list.files(full.names = TRUE)
output <- "_output.yml"
lst(rmd, templates, output)
}
|
/R/get_thesis_deps.R
|
no_license
|
shirdekel/phd_thesis
|
R
| false
| false
| 507
|
r
|
##' @title Get thesis dependencies
##'
##' For use in `render_with_deps()`
##' @return
##' @author Shir Dekel
##' @export
get_thesis_deps <- function() {
front_matter <-
file.path("rmd", "front_matter") %>%
list.files(full.names = TRUE)
rmd <-
file.path("_bookdown.yml") %>%
yaml::read_yaml() %>%
pluck("rmd_files") %>%
c(front_matter)
templates <-
file.path("templates") %>%
list.files(full.names = TRUE)
output <- "_output.yml"
lst(rmd, templates, output)
}
|
# NB: Several objects used in testing are defined in
# tests/testthat/helper-make-test-data.R
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity
###
context("MethPat validity methods")
test_that(".valid.MethPat.rowRanges works for empty MTuples", {
expect_error(MethPat(rowRanges = granges(gt0)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.rowRanges works for 1-tuples", {
expect_error(MethPat(rowRanges = granges(mt1)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.rowRanges works for 2-tuples", {
expect_error(MethPat(rowRanges = granges(mt2)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.rowRanges works for 3-tuples", {
expect_error(MethPat(rowRanges = granges(mt3)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.assays works for 1-tuples", {
expect_error(MethPat(assays = list(), rowRanges = mt1),
paste0("Assay names must include all of: M, U"))
# Extra assays are allowed
ea <- c(assays(mp1), list('extraAssay' =
matrix(1:20, ncol = 2,
dimnames = list(NULL, c('A', 'B')))))
expect_is(MethPat(assays = ea, rowRanges = rowRanges(mp1)), "MethPat")
# Assays must be non-negative (except extraAssays)
a <- endoapply(ea, `-`, 10)
expect_error(MethPat(assays = a, rowRanges = rowRanges(mp1)),
paste0("All counts of methylation patterns \\(stored in assays ",
"slot\\) must be non-negative integers."))
a <- ea
a[['extraAssay']] <- a[['extraAssay']] - 100L
expect_is(MethPat(assays = a, rowRanges = rowRanges(mp1)), "MethPat")
})
test_that(".valid.MethPat.assays works for 2-tuples", {
expect_error(MethPat(assays = list(), rowRanges = mt2),
paste0("Assay names must include all of: MM, MU, UM, UU"))
# Extra assays are allowed
ea <- c(assays(mp2), list('extraAssay' =
matrix(1:20, ncol = 2,
dimnames = list(NULL, c('A', 'B')))))
expect_is(MethPat(assays = ea, rowRanges = rowRanges(mp2)), "MethPat")
# Assays must be non-negative (except extraAssays)
a <- endoapply(ea, `-`, 10)
expect_error(MethPat(assays = a, rowRanges = rowRanges(mp2)),
paste0("All counts of methylation patterns \\(stored in assays ",
"slot\\) must be non-negative integers."))
a <- ea
a[['extraAssay']] <- a[['extraAssay']] - 100L
expect_is(MethPat(assays = a, rowRanges = rowRanges(mp2)), "MethPat")
})
test_that(".valid.MethPat.assays works for 3-tuples", {
expect_error(MethPat(assays = list(), rowRanges = mt3),
paste0("Assay names must include all of: MMM, MMU, MUM, MUU, ",
"UMM, UMU, UUM, UUU"))
# Extra assays are allowed
ea <- c(assays(mp3), list('extraAssay' =
matrix(1:20, ncol = 2,
dimnames = list(NULL, c('A', 'B')))))
expect_is(MethPat(assays = ea, rowRanges = rowRanges(mp3)), "MethPat")
# Assays must be non-negative (except extraAssays)
a <- endoapply(ea, `-`, 10)
expect_error(MethPat(assays = a, rowRanges = rowRanges(mp3)),
paste0("All counts of methylation patterns \\(stored in assays ",
"slot\\) must be non-negative integers."))
a <- ea
a[['extraAssay']] <- a[['extraAssay']] - 100L
expect_is(MethPat(assays = a, rowRanges = rowRanges(mp3)), "MethPat")
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor
###
context("MethPat constructor")
test_that("MethPat constructor returns a valid MethPat object when m = 0", {
expect_true(validObject(mp0))
})
test_that("MethPat constructor returns a valid object when m = 1", {
expect_true(validObject(mp1))
})
test_that("MethPat constructor returns a valid object when m = 2", {
expect_true(validObject(mp2))
})
test_that("MethPat constructor returns a valid object when m = 3", {
expect_true(validObject(mp3))
})
test_that("MethPat constructor returns errors on bad input", {
# TODO: None yet since the constructor doesn't check the input but relies on
# the validity methods.
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combining
###
context("Combining MethPat objects")
test_that("cbind,MethPat-method works on good input", {
expect_is(z <- cbind(mp1, mp1), "MethPat")
expect_identical(dim(z), c(10L, 4L))
expect_is(z <- cbind(mp1, mp1, mp1), "MethPat")
expect_identical(dim(z), c(10L, 6L))
expect_is(z <- cbind(mp2, mp2), "MethPat")
expect_identical(dim(z), c(10L, 4L))
expect_is(z <- cbind(mp2, mp2, mp2), "MethPat")
expect_identical(dim(z), c(10L, 6L))
expect_is(z <- cbind(mp3, mp3), "MethPat")
expect_identical(dim(z), c(10L, 4L))
expect_is(z <- cbind(mp3, mp3, mp3), "MethPat")
expect_identical(dim(z), c(10L, 6L))
})
test_that("cbind,MethPat-method returns error on bad input", {
# TODO: Write a more informative error message.
expect_error(cbind(mp1, mp2),
"Cannot compare 'MTuples' objects of different 'size'.")
# TODO: Write a more informative error message.
expect_error(cbind(mp1, mp1[1:3]),
"'...' object ranges \\(rows\\) are not compatible")
# TODO: Write a more informative error message.
expect_error(cbind(mp1, mp1[10:1]),
"'...' object ranges \\(rows\\) are not compatible")
})
test_that("rbind,MethPat-method works on good input", {
expect_is(z <- rbind(mp1, mp1), "MethPat")
expect_identical(dim(z), c(20L, 2L))
expect_is(z <- rbind(mp1, mp1, mp1), "MethPat")
expect_identical(dim(z), c(30L, 2L))
expect_is(z <- rbind(mp2, mp2), "MethPat")
expect_identical(dim(z), c(20L, 2L))
expect_is(z <- rbind(mp2, mp2, mp2), "MethPat")
expect_identical(dim(z), c(30L, 2L))
expect_is(z <- rbind(mp3, mp3), "MethPat")
expect_identical(dim(z), c(20L, 2L))
expect_is(z <- rbind(mp3, mp3, mp3), "MethPat")
expect_identical(dim(z), c(30L, 2L))
})
test_that("rbind,MethPat-method returns error on bad input", {
# TODO: Check error message is improved in new version of GenomicTuples
expect_error(rbind(mp1, mp2),
"Cannot combine MTuples containing tuples of different 'size'.")
mp1_ <- mp1
colnames(mp1_) <- c('A', 'b')
expect_error(rbind(mp1, mp1_), "'...' objects must have the same colnames")
})
test_that("combine,MethPat-method works for two MethPat objects", {
# 1-tuples
x <- mp1[1:2]
y <- mp1[2:3]
colnames(y) <- c('C', 'D')
# Can't expect_identical because identical doesn't work on assay slot,
# because it is a refernece class (I think).
expect_equal(combine(x, y),
MethPat(
assays = list(
M = matrix(as.integer(c(10, 9, NA, 1, 2, NA, NA, 9, 8, NA,
2, 3)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
U = matrix(as.integer(c(11, 12, NA, 20, 19, NA, NA, 12, 13,
NA, 19, 18)), ncol = 4,
dimnames = list(NULL,
c('A', 'B', 'C', 'D')))),
rowRanges = rowRanges(mp1)[1:3]
)
)
# 2-tuples
x <- mp2[1:2]
y <- mp2[2:3]
colnames(y) <- c('C', 'D')
# Can't expect_identical because identical() returns false on assays slot,
# (I think this is because it is a reference class).
expect_equal(combine(x, y),
MethPat(
assays = list(
MM = matrix(as.integer(c(10, 9, NA, 1, 2, NA, NA, 9, 8, NA,
2, 3)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MU = matrix(as.integer(c(11, 12, NA, 20, 19, NA, NA, 12, 13,
NA, 19, 18)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UM = matrix(as.integer(c(30, 29, NA, 21, 22, NA, NA, 29, 28,
NA, 22, 23)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UU = matrix(as.integer(c(40, 39, NA, 31, 32, NA, NA, 39, 38,
NA, 32, 33)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D')))),
rowRanges = rowRanges(mp2)[1:3]
)
)
# 3-tuples
x <- mp3[1:2]
y <- mp3[2:3]
colnames(y) <- c('C', 'D')
# Can't expect_identical because identical() returns false on assays slot,
# (I think this is because it is a reference class).
expect_equal(combine(x, y),
MethPat(
assays = list(
MMM = matrix(as.integer(c(10, 9, NA, 1, 2, NA, NA, 9, 8, NA,
2, 3)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MMU = matrix(as.integer(c(11, 12, NA, 20, 19, NA, NA, 12,
13, NA, 19, 18)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MUM = matrix(as.integer(c(30, 29, NA, 21, 22, NA, NA, 29,
28, NA, 22, 23)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MUU = matrix(as.integer(c(40, 39, NA, 31, 32, NA, NA, 39,
38, NA, 32, 33)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UMM = matrix(as.integer(c(50, 49, NA, 41, 42, NA, NA, 49,
48, NA, 42, 43)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UMU = matrix(as.integer(c(60, 59, NA, 51, 52, NA, NA, 59,
58, NA, 52, 53)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UUM = matrix(as.integer(c(70, 69, NA, 61, 62, NA, NA, 69,
68, NA, 62, 63)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UUU = matrix(as.integer(c(80, 79, NA, 71, 72, NA, NA, 79,
78, NA, 72, 73)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D')))),
rowRanges = rowRanges(mp3)[1:3]
)
)
})
test_that("combine,MethPat-method returns error on bad input", {
x <- mp1[1:2]
y <- mp1[2:3]
expect_error(combine(x, y),
"Cannot combine 'MethPat' objects with duplicate colnames.")
# TODO: Check error message is improved in new version of GenomicTuples
expect_error(combine(mp1, mp2),
"Cannot combine MTuples containing tuples of different 'size'.")
x <- mp1[1:2]
y <- mp1[2:3]
colnames(y) <- c('C', 'D')
mcols(y) <- NULL
# TODO: Write a more informative error message - might need to be specified
# for SummarizedExperiment.
expect_error(combine(x, y),
"number of columns for arg 2 do not match those of first arg")
x <- mp1[1:2]
y <- mp1[2:3]
colnames(y) <- c('C', 'D')
assays(y) <- c(assays(y), list('extraAssay' =
matrix(1:4, ncol = 2,
dimnames = list(NULL, c('C', 'D')))))
expect_error(combine(x, y),
"'MethPat' objects must all contain the same assays.")
x <- mp3
y <- mp3
genome(y) <- 'mock2'
expect_error(combine(x, y),
"sequences chr1, chr2, chr3 have incompatible genomes")
y <- mp3
colnames(y) <- c('C', 'D')
seqlevelsStyle(y) <- 'NCBI'
expect_warning(combine(x, y),
"The 2 combined objects have no sequence levels in common")
y <- renameSeqlevels(y, c('chr1', '2', '3'))
expect_identical(seqlevels(combine(x, y)),
c('chr1', 'chr2', 'chr3', '2', '3'))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters
###
context("MethPat getters")
test_that("SummarizedExperiment inherited getters work", {
expect_identical(nrow(mp0), 0L)
expect_identical(ncol(mp0), 0L)
expect_identical(nrow(mp1), 10L)
expect_identical(ncol(mp1), 2L)
expect_identical(nrow(mp2), 10L)
expect_identical(ncol(mp2), 2L)
expect_identical(nrow(mp3), 10L)
expect_identical(ncol(mp3), 2L)
expect_identical(seqnames(mp1), mp1@rowData@seqnames)
expect_identical(ranges(mp2), mp2@rowData@ranges)
expect_identical(strand(mp3), mp3@rowData@strand)
expect_identical(mcols(mp3), mp3@rowData@elementMetadata)
expect_identical(elementMetadata(mp3), mp3@rowData@elementMetadata)
expect_identical(seqinfo(mp3), mp3@rowData@seqinfo)
expect_identical(seqlevels(mp3), seqlevels(mp3@rowData@seqinfo))
expect_identical(seqlengths(mp3), seqlengths(mp3@rowData@seqinfo))
expect_identical(isCircular(mp3), isCircular(mp3@rowData@seqinfo))
expect_identical(genome(mp3), genome(mp3@rowData@seqinfo))
expect_identical(seqlevelsStyle(mp3), seqlevelsStyle(mp3@rowData@seqinfo))
# TODO: Notifiy Bioc-Devel that granges,SummarizedExperiment-method should
# return granges(rowRanges(x)) rather than rowRanges(x) since rowRanges may
# not be a GRanges object (e.g. might be a GTuples object)?
# expect_identical(granges(mp3), granges(mp3@rowData))
expect_error(granges(mp3), "Not yet implemented")
})
test_that("methinfo getters work", {
expect_identical(methinfo(mp1), methinfo(rowRanges(mp1)))
expect_identical(methtype(mp1), methtype(rowRanges(mp1)))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Splitting
###
context("MethPat splitting")
test_that("inherited split works", {
# Split by integer
mp3_s <- split(mp3, 1:10)
expect_identical(length(mp3_s), 10L)
expect_is(mp3_s, "SimpleList")
expect_true(all(sapply(mp3_s, is, class = "MethPat")))
# Split by Rle
expect_message(mp3_s <- split(mp3, seqnames(mp3)),
paste0("Note: method with signature ",
sQuote("SummarizedExperiment#ANY"), " chosen for ",
"function ", sQuote("split"), "."))
expect_identical(length(mp3_s), 3L)
expect_is(mp3_s, "SimpleList")
expect_true(all(sapply(mp3_s, is, class = "MethPat")))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Setters
###
context("MethPat setters")
test_that("SummarizedExperiment inherited setters work", {
mp3_ <- mp3
# TODO: Why isn't there a seqnames<-,SummarizedExperiment-method?
expect_error(seqnames(mp3_) <- rev(seqnames(mp3)),
paste0("unable to find an inherited method for function ",
sQuote("seqnames<-"), " for signature ",
sQuote("\"MethPat\"")))
mp3_ <- mp3
ranges(mp3_) <- rev(ranges(mp3))
expect_identical(ranges(mp3_), rev(ranges(mp3)))
mp3_ <- mp3
strand(mp3_) <- rev(strand(mp3))
expect_identical(strand(mp3_), rev(strand(mp3)))
mp3_ <- mp3
mcols(mp3_) <- DataFrame(score = rev(mcols(mp3)$score))
expect_identical(mcols(mp3_), DataFrame(score = rev(mcols(mp3)$score)))
mp3_ <- mp3
seqinfo(mp3_) <- Seqinfo(seqnames = c("chr1", "chr2", "chr3"),
seqlengths = c(10000L, 20000L, 15000L),
isCircular = c(NA, NA, NA),
genome = c("mock1", "mock1", "mock1"))
expect_identical(seqinfo(mp3_), Seqinfo(seqnames = c("chr1", "chr2", "chr3"),
seqlengths = c(10000L, 20000L,
15000L),
isCircular = c(NA, NA, NA),
genome = c("mock1", "mock1",
"mock1")))
mp3_ <- mp3
seqlevels(mp3_) <- c('chrI', 'chrII', 'chrIII')
expect_identical(seqlevels(mp3_), c('chrI', 'chrII', 'chrIII'))
mp3_ <- mp3
seqlengths(mp3_) <- c(10000L, 20000L, 15000L)
expect_identical(seqlengths(mp3_), c('chr1' = 10000L, 'chr2' = 20000L,
'chr3' = 15000L))
mp3_ <- mp3
isCircular(mp3_) <- c('chr1' = TRUE, 'chr2' = FALSE, 'chr3' = FALSE)
expect_identical(isCircular(mp3_), c('chr1' = TRUE, 'chr2' = FALSE,
'chr3' = FALSE))
mp3_ <- mp3
genome(mp3_) <- 'foo'
expect_identical(genome(mp3_), c('chr1' = 'foo', 'chr2' = 'foo',
'chr3' = 'foo'))
})
test_that("methinfo setters work", {
methinfo(mp1) <- MethInfo(c('CG', 'CHG'))
expect_identical(methinfo(mp1), MethInfo(c('CG', 'CHG')))
methtype(mp1) <- c("CG", "CHG", "CHH")
expect_identical(methtype(mp1), c("CG", "CHG", "CHH"))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Tuples methods
###
test_that("IPD,Methpat-method works", {
expect_error(IPD(mp0), "Cannot compute IPD from an empty 'MTuples'.")
expect_error(IPD(mp1),
"It does not make sense to compute IPD when 'size' = 1.")
expect_identical(IPD(mp2), IPD(mt2))
expect_identical(IPD(mp3), IPD(mt3))
})
test_that("size,MethPat-method works", {
expect_identical(size(mp0), NA_integer_)
expect_identical(size(mp1), 1L)
expect_identical(size(mp2), 2L)
expect_identical(size(mp3), 3L)
})
test_that("tuples,MethPat-method works", {
expect_identical(tuples(mp0), tuples(gt0))
expect_identical(tuples(mp1), tuples(mt1))
expect_identical(tuples(mp2), tuples(mt2))
expect_identical(tuples(mp3), tuples(mt3))
})
test_that("tuples<-,MethPat-method works", {
tuples(mp1) <- matrix(101:110, ncol = 1)
expect_identical(tuples(mp1),
matrix(101:110, ncol = 1, dimnames = list(NULL, 'pos1')))
tuples(mp2) <- matrix(c(101:110, 102:111), ncol = 2)
expect_identical(tuples(mp2),
matrix(c(101:110, 102:111), ncol = 2,
dimnames = list(NULL, c('pos1', 'pos2'))))
tuples(mp3) <- matrix(c(101:110, 102:111, 103:112), ncol = 3)
expect_identical(tuples(mp3),
matrix(c(101:110, 102:111, 103:112), ncol = 3,
dimnames = list(NULL, c('pos1', 'pos2', 'pos3'))))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting
###
context("MethPat subsetting")
# No tests yet.
# Subsetting behaviour is entirely inherited via SummarizedExperiment.
|
/tests/testthat/test-MethPat-class.R
|
no_license
|
PeteHaitch/MethylationTuples
|
R
| false
| false
| 19,290
|
r
|
# NB: Several objects used in testing are defined in
# tests/testthat/helper-make-test-data.R
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity
###
context("MethPat validity methods")
test_that(".valid.MethPat.rowRanges works for empty MTuples", {
expect_error(MethPat(rowRanges = granges(gt0)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.rowRanges works for 1-tuples", {
expect_error(MethPat(rowRanges = granges(mt1)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.rowRanges works for 2-tuples", {
expect_error(MethPat(rowRanges = granges(mt2)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.rowRanges works for 3-tuples", {
expect_error(MethPat(rowRanges = granges(mt3)),
paste0("'rowRanges' slot of a 'MethPat' object must be a ",
"'MTuples' object."))
})
test_that(".valid.MethPat.assays works for 1-tuples", {
expect_error(MethPat(assays = list(), rowRanges = mt1),
paste0("Assay names must include all of: M, U"))
# Extra assays are allowed
ea <- c(assays(mp1), list('extraAssay' =
matrix(1:20, ncol = 2,
dimnames = list(NULL, c('A', 'B')))))
expect_is(MethPat(assays = ea, rowRanges = rowRanges(mp1)), "MethPat")
# Assays must be non-negative (except extraAssays)
a <- endoapply(ea, `-`, 10)
expect_error(MethPat(assays = a, rowRanges = rowRanges(mp1)),
paste0("All counts of methylation patterns \\(stored in assays ",
"slot\\) must be non-negative integers."))
a <- ea
a[['extraAssay']] <- a[['extraAssay']] - 100L
expect_is(MethPat(assays = a, rowRanges = rowRanges(mp1)), "MethPat")
})
test_that(".valid.MethPat.assays works for 2-tuples", {
expect_error(MethPat(assays = list(), rowRanges = mt2),
paste0("Assay names must include all of: MM, MU, UM, UU"))
# Extra assays are allowed
ea <- c(assays(mp2), list('extraAssay' =
matrix(1:20, ncol = 2,
dimnames = list(NULL, c('A', 'B')))))
expect_is(MethPat(assays = ea, rowRanges = rowRanges(mp2)), "MethPat")
# Assays must be non-negative (except extraAssays)
a <- endoapply(ea, `-`, 10)
expect_error(MethPat(assays = a, rowRanges = rowRanges(mp2)),
paste0("All counts of methylation patterns \\(stored in assays ",
"slot\\) must be non-negative integers."))
a <- ea
a[['extraAssay']] <- a[['extraAssay']] - 100L
expect_is(MethPat(assays = a, rowRanges = rowRanges(mp2)), "MethPat")
})
test_that(".valid.MethPat.assays works for 3-tuples", {
expect_error(MethPat(assays = list(), rowRanges = mt3),
paste0("Assay names must include all of: MMM, MMU, MUM, MUU, ",
"UMM, UMU, UUM, UUU"))
# Extra assays are allowed
ea <- c(assays(mp3), list('extraAssay' =
matrix(1:20, ncol = 2,
dimnames = list(NULL, c('A', 'B')))))
expect_is(MethPat(assays = ea, rowRanges = rowRanges(mp3)), "MethPat")
# Assays must be non-negative (except extraAssays)
a <- endoapply(ea, `-`, 10)
expect_error(MethPat(assays = a, rowRanges = rowRanges(mp3)),
paste0("All counts of methylation patterns \\(stored in assays ",
"slot\\) must be non-negative integers."))
a <- ea
a[['extraAssay']] <- a[['extraAssay']] - 100L
expect_is(MethPat(assays = a, rowRanges = rowRanges(mp3)), "MethPat")
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor
###
context("MethPat constructor")
test_that("MethPat constructor returns a valid MethPat object when m = 0", {
expect_true(validObject(mp0))
})
test_that("MethPat constructor returns a valid object when m = 1", {
expect_true(validObject(mp1))
})
test_that("MethPat constructor returns a valid object when m = 2", {
expect_true(validObject(mp2))
})
test_that("MethPat constructor returns a valid object when m = 3", {
expect_true(validObject(mp3))
})
test_that("MethPat constructor returns errors on bad input", {
# TODO: None yet since the constructor doesn't check the input but relies on
# the validity methods.
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combining
###
context("Combining MethPat objects")
test_that("cbind,MethPat-method works on good input", {
expect_is(z <- cbind(mp1, mp1), "MethPat")
expect_identical(dim(z), c(10L, 4L))
expect_is(z <- cbind(mp1, mp1, mp1), "MethPat")
expect_identical(dim(z), c(10L, 6L))
expect_is(z <- cbind(mp2, mp2), "MethPat")
expect_identical(dim(z), c(10L, 4L))
expect_is(z <- cbind(mp2, mp2, mp2), "MethPat")
expect_identical(dim(z), c(10L, 6L))
expect_is(z <- cbind(mp3, mp3), "MethPat")
expect_identical(dim(z), c(10L, 4L))
expect_is(z <- cbind(mp3, mp3, mp3), "MethPat")
expect_identical(dim(z), c(10L, 6L))
})
test_that("cbind,MethPat-method returns error on bad input", {
# TODO: Write a more informative error message.
expect_error(cbind(mp1, mp2),
"Cannot compare 'MTuples' objects of different 'size'.")
# TODO: Write a more informative error message.
expect_error(cbind(mp1, mp1[1:3]),
"'...' object ranges \\(rows\\) are not compatible")
# TODO: Write a more informative error message.
expect_error(cbind(mp1, mp1[10:1]),
"'...' object ranges \\(rows\\) are not compatible")
})
test_that("rbind,MethPat-method works on good input", {
expect_is(z <- rbind(mp1, mp1), "MethPat")
expect_identical(dim(z), c(20L, 2L))
expect_is(z <- rbind(mp1, mp1, mp1), "MethPat")
expect_identical(dim(z), c(30L, 2L))
expect_is(z <- rbind(mp2, mp2), "MethPat")
expect_identical(dim(z), c(20L, 2L))
expect_is(z <- rbind(mp2, mp2, mp2), "MethPat")
expect_identical(dim(z), c(30L, 2L))
expect_is(z <- rbind(mp3, mp3), "MethPat")
expect_identical(dim(z), c(20L, 2L))
expect_is(z <- rbind(mp3, mp3, mp3), "MethPat")
expect_identical(dim(z), c(30L, 2L))
})
test_that("rbind,MethPat-method returns error on bad input", {
# TODO: Check error message is improved in new version of GenomicTuples
expect_error(rbind(mp1, mp2),
"Cannot combine MTuples containing tuples of different 'size'.")
mp1_ <- mp1
colnames(mp1_) <- c('A', 'b')
expect_error(rbind(mp1, mp1_), "'...' objects must have the same colnames")
})
test_that("combine,MethPat-method works for two MethPat objects", {
# 1-tuples
x <- mp1[1:2]
y <- mp1[2:3]
colnames(y) <- c('C', 'D')
# Can't expect_identical because identical doesn't work on assay slot,
# because it is a refernece class (I think).
expect_equal(combine(x, y),
MethPat(
assays = list(
M = matrix(as.integer(c(10, 9, NA, 1, 2, NA, NA, 9, 8, NA,
2, 3)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
U = matrix(as.integer(c(11, 12, NA, 20, 19, NA, NA, 12, 13,
NA, 19, 18)), ncol = 4,
dimnames = list(NULL,
c('A', 'B', 'C', 'D')))),
rowRanges = rowRanges(mp1)[1:3]
)
)
# 2-tuples
x <- mp2[1:2]
y <- mp2[2:3]
colnames(y) <- c('C', 'D')
# Can't expect_identical because identical() returns false on assays slot,
# (I think this is because it is a reference class).
expect_equal(combine(x, y),
MethPat(
assays = list(
MM = matrix(as.integer(c(10, 9, NA, 1, 2, NA, NA, 9, 8, NA,
2, 3)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MU = matrix(as.integer(c(11, 12, NA, 20, 19, NA, NA, 12, 13,
NA, 19, 18)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UM = matrix(as.integer(c(30, 29, NA, 21, 22, NA, NA, 29, 28,
NA, 22, 23)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UU = matrix(as.integer(c(40, 39, NA, 31, 32, NA, NA, 39, 38,
NA, 32, 33)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D')))),
rowRanges = rowRanges(mp2)[1:3]
)
)
# 3-tuples
x <- mp3[1:2]
y <- mp3[2:3]
colnames(y) <- c('C', 'D')
# Can't expect_identical because identical() returns false on assays slot,
# (I think this is because it is a reference class).
expect_equal(combine(x, y),
MethPat(
assays = list(
MMM = matrix(as.integer(c(10, 9, NA, 1, 2, NA, NA, 9, 8, NA,
2, 3)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MMU = matrix(as.integer(c(11, 12, NA, 20, 19, NA, NA, 12,
13, NA, 19, 18)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MUM = matrix(as.integer(c(30, 29, NA, 21, 22, NA, NA, 29,
28, NA, 22, 23)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
MUU = matrix(as.integer(c(40, 39, NA, 31, 32, NA, NA, 39,
38, NA, 32, 33)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UMM = matrix(as.integer(c(50, 49, NA, 41, 42, NA, NA, 49,
48, NA, 42, 43)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UMU = matrix(as.integer(c(60, 59, NA, 51, 52, NA, NA, 59,
58, NA, 52, 53)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UUM = matrix(as.integer(c(70, 69, NA, 61, 62, NA, NA, 69,
68, NA, 62, 63)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D'))),
UUU = matrix(as.integer(c(80, 79, NA, 71, 72, NA, NA, 79,
78, NA, 72, 73)), ncol = 4,
dimnames = list(NULL, c('A', 'B', 'C', 'D')))),
rowRanges = rowRanges(mp3)[1:3]
)
)
})
test_that("combine,MethPat-method returns error on bad input", {
x <- mp1[1:2]
y <- mp1[2:3]
expect_error(combine(x, y),
"Cannot combine 'MethPat' objects with duplicate colnames.")
# TODO: Check error message is improved in new version of GenomicTuples
expect_error(combine(mp1, mp2),
"Cannot combine MTuples containing tuples of different 'size'.")
x <- mp1[1:2]
y <- mp1[2:3]
colnames(y) <- c('C', 'D')
mcols(y) <- NULL
# TODO: Write a more informative error message - might need to be specified
# for SummarizedExperiment.
expect_error(combine(x, y),
"number of columns for arg 2 do not match those of first arg")
x <- mp1[1:2]
y <- mp1[2:3]
colnames(y) <- c('C', 'D')
assays(y) <- c(assays(y), list('extraAssay' =
matrix(1:4, ncol = 2,
dimnames = list(NULL, c('C', 'D')))))
expect_error(combine(x, y),
"'MethPat' objects must all contain the same assays.")
x <- mp3
y <- mp3
genome(y) <- 'mock2'
expect_error(combine(x, y),
"sequences chr1, chr2, chr3 have incompatible genomes")
y <- mp3
colnames(y) <- c('C', 'D')
seqlevelsStyle(y) <- 'NCBI'
expect_warning(combine(x, y),
"The 2 combined objects have no sequence levels in common")
y <- renameSeqlevels(y, c('chr1', '2', '3'))
expect_identical(seqlevels(combine(x, y)),
c('chr1', 'chr2', 'chr3', '2', '3'))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Getters
###
context("MethPat getters")
test_that("SummarizedExperiment inherited getters work", {
expect_identical(nrow(mp0), 0L)
expect_identical(ncol(mp0), 0L)
expect_identical(nrow(mp1), 10L)
expect_identical(ncol(mp1), 2L)
expect_identical(nrow(mp2), 10L)
expect_identical(ncol(mp2), 2L)
expect_identical(nrow(mp3), 10L)
expect_identical(ncol(mp3), 2L)
expect_identical(seqnames(mp1), mp1@rowData@seqnames)
expect_identical(ranges(mp2), mp2@rowData@ranges)
expect_identical(strand(mp3), mp3@rowData@strand)
expect_identical(mcols(mp3), mp3@rowData@elementMetadata)
expect_identical(elementMetadata(mp3), mp3@rowData@elementMetadata)
expect_identical(seqinfo(mp3), mp3@rowData@seqinfo)
expect_identical(seqlevels(mp3), seqlevels(mp3@rowData@seqinfo))
expect_identical(seqlengths(mp3), seqlengths(mp3@rowData@seqinfo))
expect_identical(isCircular(mp3), isCircular(mp3@rowData@seqinfo))
expect_identical(genome(mp3), genome(mp3@rowData@seqinfo))
expect_identical(seqlevelsStyle(mp3), seqlevelsStyle(mp3@rowData@seqinfo))
# TODO: Notifiy Bioc-Devel that granges,SummarizedExperiment-method should
# return granges(rowRanges(x)) rather than rowRanges(x) since rowRanges may
# not be a GRanges object (e.g. might be a GTuples object)?
# expect_identical(granges(mp3), granges(mp3@rowData))
expect_error(granges(mp3), "Not yet implemented")
})
test_that("methinfo getters work", {
expect_identical(methinfo(mp1), methinfo(rowRanges(mp1)))
expect_identical(methtype(mp1), methtype(rowRanges(mp1)))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Splitting
###
context("MethPat splitting")
test_that("inherited split works", {
# Split by integer
mp3_s <- split(mp3, 1:10)
expect_identical(length(mp3_s), 10L)
expect_is(mp3_s, "SimpleList")
expect_true(all(sapply(mp3_s, is, class = "MethPat")))
# Split by Rle
expect_message(mp3_s <- split(mp3, seqnames(mp3)),
paste0("Note: method with signature ",
sQuote("SummarizedExperiment#ANY"), " chosen for ",
"function ", sQuote("split"), "."))
expect_identical(length(mp3_s), 3L)
expect_is(mp3_s, "SimpleList")
expect_true(all(sapply(mp3_s, is, class = "MethPat")))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Setters
###
context("MethPat setters")
test_that("SummarizedExperiment inherited setters work", {
mp3_ <- mp3
# TODO: Why isn't there a seqnames<-,SummarizedExperiment-method?
expect_error(seqnames(mp3_) <- rev(seqnames(mp3)),
paste0("unable to find an inherited method for function ",
sQuote("seqnames<-"), " for signature ",
sQuote("\"MethPat\"")))
mp3_ <- mp3
ranges(mp3_) <- rev(ranges(mp3))
expect_identical(ranges(mp3_), rev(ranges(mp3)))
mp3_ <- mp3
strand(mp3_) <- rev(strand(mp3))
expect_identical(strand(mp3_), rev(strand(mp3)))
mp3_ <- mp3
mcols(mp3_) <- DataFrame(score = rev(mcols(mp3)$score))
expect_identical(mcols(mp3_), DataFrame(score = rev(mcols(mp3)$score)))
mp3_ <- mp3
seqinfo(mp3_) <- Seqinfo(seqnames = c("chr1", "chr2", "chr3"),
seqlengths = c(10000L, 20000L, 15000L),
isCircular = c(NA, NA, NA),
genome = c("mock1", "mock1", "mock1"))
expect_identical(seqinfo(mp3_), Seqinfo(seqnames = c("chr1", "chr2", "chr3"),
seqlengths = c(10000L, 20000L,
15000L),
isCircular = c(NA, NA, NA),
genome = c("mock1", "mock1",
"mock1")))
mp3_ <- mp3
seqlevels(mp3_) <- c('chrI', 'chrII', 'chrIII')
expect_identical(seqlevels(mp3_), c('chrI', 'chrII', 'chrIII'))
mp3_ <- mp3
seqlengths(mp3_) <- c(10000L, 20000L, 15000L)
expect_identical(seqlengths(mp3_), c('chr1' = 10000L, 'chr2' = 20000L,
'chr3' = 15000L))
mp3_ <- mp3
isCircular(mp3_) <- c('chr1' = TRUE, 'chr2' = FALSE, 'chr3' = FALSE)
expect_identical(isCircular(mp3_), c('chr1' = TRUE, 'chr2' = FALSE,
'chr3' = FALSE))
mp3_ <- mp3
genome(mp3_) <- 'foo'
expect_identical(genome(mp3_), c('chr1' = 'foo', 'chr2' = 'foo',
'chr3' = 'foo'))
})
test_that("methinfo setters work", {
methinfo(mp1) <- MethInfo(c('CG', 'CHG'))
expect_identical(methinfo(mp1), MethInfo(c('CG', 'CHG')))
methtype(mp1) <- c("CG", "CHG", "CHH")
expect_identical(methtype(mp1), c("CG", "CHG", "CHH"))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Tuples methods
###
test_that("IPD,Methpat-method works", {
expect_error(IPD(mp0), "Cannot compute IPD from an empty 'MTuples'.")
expect_error(IPD(mp1),
"It does not make sense to compute IPD when 'size' = 1.")
expect_identical(IPD(mp2), IPD(mt2))
expect_identical(IPD(mp3), IPD(mt3))
})
test_that("size,MethPat-method works", {
expect_identical(size(mp0), NA_integer_)
expect_identical(size(mp1), 1L)
expect_identical(size(mp2), 2L)
expect_identical(size(mp3), 3L)
})
test_that("tuples,MethPat-method works", {
expect_identical(tuples(mp0), tuples(gt0))
expect_identical(tuples(mp1), tuples(mt1))
expect_identical(tuples(mp2), tuples(mt2))
expect_identical(tuples(mp3), tuples(mt3))
})
test_that("tuples<-,MethPat-method works", {
tuples(mp1) <- matrix(101:110, ncol = 1)
expect_identical(tuples(mp1),
matrix(101:110, ncol = 1, dimnames = list(NULL, 'pos1')))
tuples(mp2) <- matrix(c(101:110, 102:111), ncol = 2)
expect_identical(tuples(mp2),
matrix(c(101:110, 102:111), ncol = 2,
dimnames = list(NULL, c('pos1', 'pos2'))))
tuples(mp3) <- matrix(c(101:110, 102:111, 103:112), ncol = 3)
expect_identical(tuples(mp3),
matrix(c(101:110, 102:111, 103:112), ncol = 3,
dimnames = list(NULL, c('pos1', 'pos2', 'pos3'))))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting
###
context("MethPat subsetting")
# No tests yet.
# Subsetting behaviour is entirely inherited via SummarizedExperiment.
|
calculateVariance <- function(X,Q,R,U,V,K){
n = dim(X)[1]
p = dim(X)[2]
QXR = Q %*% X %*% R
denomenator = sum(diag(t(X) %*% QXR))
var = rep(0,K)
if(K >1){
for(i in 1:K){
if(sum(abs(U[,i])) != 0 && sum(abs(V[,i])) != 0){
Pu = U[,1:i] %*% solve(t(U[,1:i]) %*% Q %*% U[,1:i]) %*% t(U[,1:i])
Pv = V[,1:i] %*% solve(t(V[,1:i]) %*% R %*% V[,1:i]) %*% t(V[,1:i])
Xk = Pu %*% QXR %*% Pv
numerator = sum(diag(t(Xk) %*% Q %*% Xk %*% R))
var[i] = numerator/denomenator
}else if(i > 1){
var[i]= var[i-1]
}
}
}
if(K ==1){
if(sum(abs(U)) != 0 && sum(abs(V)) != 0){
Pu = U %*% solve(t(U) %*% Q %*% U) %*% t(U)
Pv = V %*% solve(t(V) %*% R %*% V) %*% t(V)
Xk = Pu %*% QXR %*% Pv
numerator = sum(diag(t(Xk) %*% Q %*% Xk %*% R))
var[1] = numerator/denomenator
}else if(i >1){
var[i] = var[i-1]
}
}
return(var)
}
|
/sGPCA/R/calculateVariance.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 879
|
r
|
calculateVariance <- function(X,Q,R,U,V,K){
n = dim(X)[1]
p = dim(X)[2]
QXR = Q %*% X %*% R
denomenator = sum(diag(t(X) %*% QXR))
var = rep(0,K)
if(K >1){
for(i in 1:K){
if(sum(abs(U[,i])) != 0 && sum(abs(V[,i])) != 0){
Pu = U[,1:i] %*% solve(t(U[,1:i]) %*% Q %*% U[,1:i]) %*% t(U[,1:i])
Pv = V[,1:i] %*% solve(t(V[,1:i]) %*% R %*% V[,1:i]) %*% t(V[,1:i])
Xk = Pu %*% QXR %*% Pv
numerator = sum(diag(t(Xk) %*% Q %*% Xk %*% R))
var[i] = numerator/denomenator
}else if(i > 1){
var[i]= var[i-1]
}
}
}
if(K ==1){
if(sum(abs(U)) != 0 && sum(abs(V)) != 0){
Pu = U %*% solve(t(U) %*% Q %*% U) %*% t(U)
Pv = V %*% solve(t(V) %*% R %*% V) %*% t(V)
Xk = Pu %*% QXR %*% Pv
numerator = sum(diag(t(Xk) %*% Q %*% Xk %*% R))
var[1] = numerator/denomenator
}else if(i >1){
var[i] = var[i-1]
}
}
return(var)
}
|
piusBenchmark = function(MVec = NULL, NVec = NULL, numSubgroupsVec = NULL, lambdaVec = NULL,
univVec = NULL, iStrength = 1, sigmaMax = 0.1, thresholdMult = 10 ^ (-1),
maxSteps = 10 ^ 4, tStep = 10 ^ (-2), intTime = 1, interSmplMult = 0.01,
returnParams = FALSE, conGraph = NULL, rollRange = 0.025, numClust = NULL,
numReplicates = 2){
# ARGS:
#
# RETURNS:
source("piusBenchmarkDFCreator.r")
source("piusDFTest.r")
treatmentDF = piusBenchmarkDFCreator(MVec = MVec, NVec = NVec, numSubgroupsVec = numSubgroupsVec,
lambdaVec = lambdaVec, univVec = univVec)
testResultsDF = piusDFTest(treatmentDF = treatmentDF, iStrength = iStrength, sigmaMax = sigmaMax,
thresholdMult = thresholdMult, maxSteps = maxSteps, tStep = tStep,
intTime = intTime, interSmplMult = interSmplMult, returnParams = returnParams,
conGraph = conGraph, rollRange = rollRange, numClust = numClust,
numReplicates = numReplicates)
return(testResultsDF)
} # end piusBenchmark function
|
/piusBenchmark.r
|
permissive
|
JamesRekow/Canine_Cohort_GLV_Model
|
R
| false
| false
| 1,273
|
r
|
piusBenchmark = function(MVec = NULL, NVec = NULL, numSubgroupsVec = NULL, lambdaVec = NULL,
univVec = NULL, iStrength = 1, sigmaMax = 0.1, thresholdMult = 10 ^ (-1),
maxSteps = 10 ^ 4, tStep = 10 ^ (-2), intTime = 1, interSmplMult = 0.01,
returnParams = FALSE, conGraph = NULL, rollRange = 0.025, numClust = NULL,
numReplicates = 2){
# ARGS:
#
# RETURNS:
source("piusBenchmarkDFCreator.r")
source("piusDFTest.r")
treatmentDF = piusBenchmarkDFCreator(MVec = MVec, NVec = NVec, numSubgroupsVec = numSubgroupsVec,
lambdaVec = lambdaVec, univVec = univVec)
testResultsDF = piusDFTest(treatmentDF = treatmentDF, iStrength = iStrength, sigmaMax = sigmaMax,
thresholdMult = thresholdMult, maxSteps = maxSteps, tStep = tStep,
intTime = intTime, interSmplMult = interSmplMult, returnParams = returnParams,
conGraph = conGraph, rollRange = rollRange, numClust = numClust,
numReplicates = numReplicates)
return(testResultsDF)
} # end piusBenchmark function
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objective-dispersion.R
\name{dispersion_objective}
\alias{dispersion_objective}
\title{Cluster dispersion}
\usage{
dispersion_objective(x, clusters)
}
\arguments{
\item{x}{The data input. Can be one of two structures: (1) A
feature matrix where rows correspond to elements and columns
correspond to variables (a single numeric variable can be
passed as a vector). (2) An N x N matrix dissimilarity matrix;
can be an object of class \code{dist} (e.g., returned by
\code{\link{dist}} or \code{\link{as.dist}}) or a \code{matrix}
where the entries of the upper and lower triangular matrix
represent pairwise dissimilarities.}
\item{clusters}{A vector representing (anti)clusters (e.g.,
returned by \code{\link{anticlustering}}).}
}
\description{
Compute the dispersion objective for a given clustering (i.e., the
minimum distance between two elements within the same cluster).
}
\details{
The dispersion is the minimum distance between two elements within
the same cluster. When the input \code{x} is a feature matrix, the
Euclidean distance is used as the distance unit. Maximizing the
dispersion maximizes the minimum heterogeneity within clusters and
is an anticlustering task.
}
\examples{
N <- 50 # number of elements
M <- 2 # number of variables per element
K <- 5 # number of clusters
random_data <- matrix(rnorm(N * M), ncol = M)
random_clusters <- sample(rep_len(1:K, N))
dispersion_objective(random_data, random_clusters)
# Maximize the dispersion
optimized_clusters <- anticlustering(
random_data,
K = random_clusters,
objective = dispersion_objective
)
dispersion_objective(random_data, optimized_clusters)
}
\references{
Brusco, M. J., Cradit, J. D., & Steinley, D. (in press). Combining
diversity and dispersion criteria for anticlustering: A bicriterion
approach. British Journal of Mathematical and Statistical
Psychology. https://doi.org/10.1111/bmsp.12186
}
|
/man/dispersion_objective.Rd
|
permissive
|
ManaLama/anticlust
|
R
| false
| true
| 1,965
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objective-dispersion.R
\name{dispersion_objective}
\alias{dispersion_objective}
\title{Cluster dispersion}
\usage{
dispersion_objective(x, clusters)
}
\arguments{
\item{x}{The data input. Can be one of two structures: (1) A
feature matrix where rows correspond to elements and columns
correspond to variables (a single numeric variable can be
passed as a vector). (2) An N x N matrix dissimilarity matrix;
can be an object of class \code{dist} (e.g., returned by
\code{\link{dist}} or \code{\link{as.dist}}) or a \code{matrix}
where the entries of the upper and lower triangular matrix
represent pairwise dissimilarities.}
\item{clusters}{A vector representing (anti)clusters (e.g.,
returned by \code{\link{anticlustering}}).}
}
\description{
Compute the dispersion objective for a given clustering (i.e., the
minimum distance between two elements within the same cluster).
}
\details{
The dispersion is the minimum distance between two elements within
the same cluster. When the input \code{x} is a feature matrix, the
Euclidean distance is used as the distance unit. Maximizing the
dispersion maximizes the minimum heterogeneity within clusters and
is an anticlustering task.
}
\examples{
N <- 50 # number of elements
M <- 2 # number of variables per element
K <- 5 # number of clusters
random_data <- matrix(rnorm(N * M), ncol = M)
random_clusters <- sample(rep_len(1:K, N))
dispersion_objective(random_data, random_clusters)
# Maximize the dispersion
optimized_clusters <- anticlustering(
random_data,
K = random_clusters,
objective = dispersion_objective
)
dispersion_objective(random_data, optimized_clusters)
}
\references{
Brusco, M. J., Cradit, J. D., & Steinley, D. (in press). Combining
diversity and dispersion criteria for anticlustering: A bicriterion
approach. British Journal of Mathematical and Statistical
Psychology. https://doi.org/10.1111/bmsp.12186
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/xml_structure.R
\name{xml_structure}
\alias{html_structure}
\alias{xml_structure}
\title{Show the structure of an html/xml document.}
\usage{
xml_structure(x, indent = 2)
html_structure(x, indent = 2)
}
\arguments{
\item{x}{HTML/XML document (or part there of)}
\item{indent}{Number of spaces to ident}
}
\description{
Show the structure of an html/xml document without displaying any of
the values. This is useful if you want to get a high level view of the
way a document is organised. Compared to \code{xml_structure},
\code{html_structure} prints the id and class attributes.
}
\examples{
xml_structure(read_xml("<a><b><c/><c/></b><d/></a>"))
rproj <- read_html(system.file("extdata","r-project.html", package = "xml2"))
xml_structure(rproj)
xml_structure(xml_find_all(rproj, ".//p"))
h <- read_html("<body><p id = 'a'></p><p class = 'c d'></p></body>")
html_structure(h)
}
|
/man/xml_structure.Rd
|
no_license
|
darrkj/xml2
|
R
| false
| false
| 969
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/xml_structure.R
\name{xml_structure}
\alias{html_structure}
\alias{xml_structure}
\title{Show the structure of an html/xml document.}
\usage{
xml_structure(x, indent = 2)
html_structure(x, indent = 2)
}
\arguments{
\item{x}{HTML/XML document (or part there of)}
\item{indent}{Number of spaces to ident}
}
\description{
Show the structure of an html/xml document without displaying any of
the values. This is useful if you want to get a high level view of the
way a document is organised. Compared to \code{xml_structure},
\code{html_structure} prints the id and class attributes.
}
\examples{
xml_structure(read_xml("<a><b><c/><c/></b><d/></a>"))
rproj <- read_html(system.file("extdata","r-project.html", package = "xml2"))
xml_structure(rproj)
xml_structure(xml_find_all(rproj, ".//p"))
h <- read_html("<body><p id = 'a'></p><p class = 'c d'></p></body>")
html_structure(h)
}
|
# ANY UNIT RECTANGLE CAN BE DIVIDED INTO A FINITE NUMBER OF SMALLER RECTANGLES
# FOR EXAMPLE, A 3x2 RECTANGLE CAN BE DIVIDED INTO 18 RECTANGLES
# NO RECTANGLE IS DIVIDED INTO EXACTLY 2,000,000 BUT FIND THE CLOSEST SOLUTION
# SOLVES IN ~5 SECONDS
subdivide <- function(x,y){
# TAKE RECTANGLE SIDES AND RETURN NUMBER OF SUBDIVISIONS POSSIBLE
s <- x*y
if(x >= 2){
for(i in 1:(x-1)){
s <- s + i*y
}
}
if(y >= 2){
for(j in 1:(y-1)){
s <- s + j*x
}
}
if(x >= 2 & y >= 2){
for(i in 1:(x-1)){
for(j in 1:(y-1)){
s <- s + i*j
}
}
}
return(s)
}
target <- 2000000
distance <- target
x <- 1
y <- x
s <- subdivide(x,y)
while(s < target){
y <- y + 1
s <- subdivide(x,y)
if(abs(s-target) < distance){
distance <- abs(s - target)
best_x <- x
best_y <- y
}
}
for(x in 2:100){
s <- subdivide(x,y)
while(s > target){
y <- y - 1
s <- subdivide(x,y)
if(abs(s-target) < distance){
distance <- abs(s-target)
best_x <- x
best_y <- y
}
}
}
print(c(best_x,best_y,best_x*best_y))
|
/Problems_76_to_100/Euler085.R
|
permissive
|
lawphill/ProjectEuler
|
R
| false
| false
| 1,099
|
r
|
# ANY UNIT RECTANGLE CAN BE DIVIDED INTO A FINITE NUMBER OF SMALLER RECTANGLES
# FOR EXAMPLE, A 3x2 RECTANGLE CAN BE DIVIDED INTO 18 RECTANGLES
# NO RECTANGLE IS DIVIDED INTO EXACTLY 2,000,000 BUT FIND THE CLOSEST SOLUTION
# SOLVES IN ~5 SECONDS
subdivide <- function(x,y){
# TAKE RECTANGLE SIDES AND RETURN NUMBER OF SUBDIVISIONS POSSIBLE
s <- x*y
if(x >= 2){
for(i in 1:(x-1)){
s <- s + i*y
}
}
if(y >= 2){
for(j in 1:(y-1)){
s <- s + j*x
}
}
if(x >= 2 & y >= 2){
for(i in 1:(x-1)){
for(j in 1:(y-1)){
s <- s + i*j
}
}
}
return(s)
}
target <- 2000000
distance <- target
x <- 1
y <- x
s <- subdivide(x,y)
while(s < target){
y <- y + 1
s <- subdivide(x,y)
if(abs(s-target) < distance){
distance <- abs(s - target)
best_x <- x
best_y <- y
}
}
for(x in 2:100){
s <- subdivide(x,y)
while(s > target){
y <- y - 1
s <- subdivide(x,y)
if(abs(s-target) < distance){
distance <- abs(s-target)
best_x <- x
best_y <- y
}
}
}
print(c(best_x,best_y,best_x*best_y))
|
#' @importFrom rcmdcheck compare_checks
try_compare_checks <- function(old, new, package, version, maintainer) {
if (inherits(old, "error") || inherits(new, "error")) {
structure(
list(
old = old,
new = new,
cmp = NULL,
package = package,
version = version,
maintainer = maintainer
),
class = "rcmdcheck_error"
)
} else {
tryCatch(
{
check <- compare_checks(old, new)
check$package <- package
check$version <- version
check$maintainer <- maintainer
check
},
error = function(e) {
structure(
list(
old = old,
new = new,
cmp = NULL,
package = package,
version = version,
maintainer = maintainer
),
class = "rcmdcheck_error"
)
}
)
}
}
#' @importFrom clisymbols symbol
#' @importFrom crayon make_style red
summary.rcmdcheck_error <- function(object, ...) {
pale <- make_style("darkgrey")
header <- paste(red(symbol$cross), object$package, object$version)
if (inherits(object$old, "error") && inherits(object$new, "error")) {
cat(
pale(paste0(
col_align(header, width = 40),
" ", symbol$line, symbol$line, " ",
"E: ", red("?-?+?"), " | ",
"W: ", red("?-?+?"), " | ",
"N: ", red("?-?+?")
)),
"\n",
sep = ""
)
} else if (inherits(object$old, "error")) {
cat(
pale(paste0(
col_align(header, width = 40),
" ", symbol$line, symbol$line, " ",
"E: ", red(paste0("? +", length(object$new$errors))), " | ",
"W: ", red(paste0("? +", length(object$new$warnings))), " | ",
"N: ", red(paste0("? +", length(object$new$notes))), " | "
)),
"\n",
sep = ""
)
} else {
cat(
pale(paste0(
col_align(header, width = 40),
" ", symbol$line, symbol$line, " ",
"E: ", red(paste0(length(object$old$errors), "-?+?")), " | ",
"W: ", red(paste0(length(object$old$warnings), "-?+?")), " | ",
"N: ", red(paste0(length(object$old$notes), "-?+?")), " | "
)),
"\n",
sep = ""
)
}
}
|
/R/compare.R
|
permissive
|
bbolker/revdepcheck-1
|
R
| false
| false
| 2,251
|
r
|
#' @importFrom rcmdcheck compare_checks
try_compare_checks <- function(old, new, package, version, maintainer) {
if (inherits(old, "error") || inherits(new, "error")) {
structure(
list(
old = old,
new = new,
cmp = NULL,
package = package,
version = version,
maintainer = maintainer
),
class = "rcmdcheck_error"
)
} else {
tryCatch(
{
check <- compare_checks(old, new)
check$package <- package
check$version <- version
check$maintainer <- maintainer
check
},
error = function(e) {
structure(
list(
old = old,
new = new,
cmp = NULL,
package = package,
version = version,
maintainer = maintainer
),
class = "rcmdcheck_error"
)
}
)
}
}
#' @importFrom clisymbols symbol
#' @importFrom crayon make_style red
summary.rcmdcheck_error <- function(object, ...) {
pale <- make_style("darkgrey")
header <- paste(red(symbol$cross), object$package, object$version)
if (inherits(object$old, "error") && inherits(object$new, "error")) {
cat(
pale(paste0(
col_align(header, width = 40),
" ", symbol$line, symbol$line, " ",
"E: ", red("?-?+?"), " | ",
"W: ", red("?-?+?"), " | ",
"N: ", red("?-?+?")
)),
"\n",
sep = ""
)
} else if (inherits(object$old, "error")) {
cat(
pale(paste0(
col_align(header, width = 40),
" ", symbol$line, symbol$line, " ",
"E: ", red(paste0("? +", length(object$new$errors))), " | ",
"W: ", red(paste0("? +", length(object$new$warnings))), " | ",
"N: ", red(paste0("? +", length(object$new$notes))), " | "
)),
"\n",
sep = ""
)
} else {
cat(
pale(paste0(
col_align(header, width = 40),
" ", symbol$line, symbol$line, " ",
"E: ", red(paste0(length(object$old$errors), "-?+?")), " | ",
"W: ", red(paste0(length(object$old$warnings), "-?+?")), " | ",
"N: ", red(paste0(length(object$old$notes), "-?+?")), " | "
)),
"\n",
sep = ""
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcl.R
\name{mcl}
\alias{mcl}
\title{Run mcl.}
\usage{
mcl(mcl_input, mcl_output, i_value, e_value, other_args = NULL, ...)
}
\arguments{
\item{mcl_input}{Character vector of length one; the path to the input file
for mcl clustering.}
\item{mcl_output}{Character vector of length one; the path to the output
file produced by the mcl algorithm.}
\item{i_value}{Numeric or character vector of length one; the inflation value.}
\item{e_value}{Numeric or character vector of length one; the minimal -log
transformed evalue to be considered by the algorithm.}
\item{other_args}{Character vector; other arguments to pass to mcl. Each should
be an element of the vector. For example, to pass "-abc" to specify the input
file format and "--te" to specify number of threads, use \code{c("--abc", "-te", "2")}.}
\item{...}{Other arguments. Not used by this function, but meant to be used
by \code{\link[drake]{drake_plan}} for tracking during workflows.}
}
\value{
A plain text file of tab-separated values, where each value on a line
belongs to the same cluster.
}
\description{
This is a wrapper for the Markov Cluster Algorithm (mcl), a clustering algorithm
for graphs. Here, it is meant to be used on genetic distances from BLAST.
}
\examples{
\dontrun{mcl(mcl_input = "some/folder/distance.file", mcl_output = "some/folder/mcl_output.txt", i_value = 1.4, evalue = 5)}
}
\references{
Stijn van Dongen, A cluster algorithm for graphs. Technical Report INS-R0010, National Research Institute for Mathematics and Computer Science in the Netherlands, Amsterdam, May 2000. \url{https://micans.org/mcl/}
}
\author{
Joel H Nitta, \email{joelnitta@gmail.com}
}
|
/man/mcl.Rd
|
permissive
|
joelnitta/baitfindR
|
R
| false
| true
| 1,729
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcl.R
\name{mcl}
\alias{mcl}
\title{Run mcl.}
\usage{
mcl(mcl_input, mcl_output, i_value, e_value, other_args = NULL, ...)
}
\arguments{
\item{mcl_input}{Character vector of length one; the path to the input file
for mcl clustering.}
\item{mcl_output}{Character vector of length one; the path to the output
file produced by the mcl algorithm.}
\item{i_value}{Numeric or character vector of length one; the inflation value.}
\item{e_value}{Numeric or character vector of length one; the minimal -log
transformed evalue to be considered by the algorithm.}
\item{other_args}{Character vector; other arguments to pass to mcl. Each should
be an element of the vector. For example, to pass "-abc" to specify the input
file format and "--te" to specify number of threads, use \code{c("--abc", "-te", "2")}.}
\item{...}{Other arguments. Not used by this function, but meant to be used
by \code{\link[drake]{drake_plan}} for tracking during workflows.}
}
\value{
A plain text file of tab-separated values, where each value on a line
belongs to the same cluster.
}
\description{
This is a wrapper for the Markov Cluster Algorithm (mcl), a clustering algorithm
for graphs. Here, it is meant to be used on genetic distances from BLAST.
}
\examples{
\dontrun{mcl(mcl_input = "some/folder/distance.file", mcl_output = "some/folder/mcl_output.txt", i_value = 1.4, evalue = 5)}
}
\references{
Stijn van Dongen, A cluster algorithm for graphs. Technical Report INS-R0010, National Research Institute for Mathematics and Computer Science in the Netherlands, Amsterdam, May 2000. \url{https://micans.org/mcl/}
}
\author{
Joel H Nitta, \email{joelnitta@gmail.com}
}
|
#' hospitals.
#'
#' @name hospitals
#' @docType package
NULL
|
/R/hospitals-package.r
|
no_license
|
jjchern/hospitals
|
R
| false
| false
| 61
|
r
|
#' hospitals.
#'
#' @name hospitals
#' @docType package
NULL
|
########################################
# Gaussian process
########################################
# Element-wise matrix autocorrelation function
cov.m <- function(rows, k, d) {
params <- k$params
k <- k$k
# Covariance of the elements from matrix d
k(d[rows[1],], d[rows[2],], params)
}
# Matrix autocorrelation
K.m <- function(X, k, symmetric = F, diagonal=1) {
# Get row indexes in the right order to fill the lower triangle
# of the covariance matrix
rows <- combn(1:nrow(X), 2)
# get the covariances of the rows
covs <- as.matrix(apply(rows, 2, cov.m, k = k, d = X))
Ks <- list()
# If the parameter is a vector (e.g. the lengthscale parameter in an ARD kernel)
if(ncol(covs) > 1) {
for(i in 1:nrow(covs)) {
K <- diag(diagonal, nrow(X))
K[lower.tri(K)] <- covs[i,]
K <- t(K)
if (symmetric)
K[lower.tri(K)] <- t(K)[lower.tri(K)]
Ks[[i]] <- K
}
Ks
} else { #if the parameter is a scalar
K <- diag(diagonal, nrow(X))
K[lower.tri(K)] <- covs
K <- t(K)
if (symmetric)
K[lower.tri(K)] <- t(K)[lower.tri(K)]
K
}
}
# Element-wise Matrix-Matrix covariance
# rows - set of indexes
cov.mm <- function(rows, k, A, B) {
params <- k$params
k <- k$k
k(A[rows[1],],
B[rows[2],], params)
}
# Matrix-Matrix covariance
K.mm <- function(X, Y, k) {
rows <- expand.grid(1:nrow(X), 1:nrow(Y))
covs <- apply(
X = rows,
MARGIN = 1,
FUN = cov.mm,
A = X,
B = Y,
k = k
)
matrix(covs, nrow = nrow(X), ncol = nrow(Y))
}
# trace of a matrix
trace <- function(M) {
sum(diag(M))
}
# Partial derivative of the marginal likelihood
gp.marginal.pd <- function(gp, k.pd) {
# Compute the matrix of element-wise partial derivatives of the kernel function
d_K <- K.m(gp$X,
k = list(k=k.pd, params=gp$kernel$params),
symmetric = T,
diagonal = 0)
pds <- c()
if(is.list(d_K)) {
for (i in 1:length(d_K)) {
pd_i <- (1 / 2) * trace((gp$alpha %*% t(gp$alpha) - chol2inv(gp$L)) %*% d_K[[i]])
pds <- c(pds, pd_i)
}
pds
} else {
(1 / 2) * trace((gp$alpha %*% t(gp$alpha) - chol2inv(gp$L)) %*% d_K)
}
}
# Both the marginal likelihood and gradient functions have the form: function(param_values, gp)
# This is because the optim function requires this format. param_values is a vector of kernel that is converted
# back to kernel parameters by the gp.kernel.refit function
# Marginal Likelihood
gp.marginal <- function(param_values, gp) {
# TODO: check if params are different
gp <- gp.kernel.reset(param_values, gp)
gp <- gp.refit(gp)
# marginal likelihood
D <- gp$det_Ky
n <- nrow(gp$X)
ml <- -1 / 2 * t(gp$y) %*% gp$alpha - D - n/2 * log(2 * pi)
if(gp$verbose){
print('--------optimizer params------------')
print(param_values)
print('--------marginal likelihood---------')
print(ml)
}
ml
}
gp.gradient <- function(param_values, gp) {
gp <- gp.kernel.reset(param_values, gp)
gp <- gp.refit(gp)
gradient <- c()
# Append each partial derivative (pd) to the gradient
for (param in names(gp$kernel$params)) {
pd <- gp$kernel$pds[[param]]
g <- gp.marginal.pd(gp, pd)
gradient <- c(gradient, g)
}
if(gp$verbose){
print('-----------gradient-----------')
print(gradient)
print(length(gradient))
print('------------------------------')
}
gradient
}
gp.optimize <- function(gp, lower, upper) {
init_params = unlist(gp$kernel$params)
optimized <- optim(
par = init_params,
fn = gp.marginal,
gr = gp.gradient,
method = 'L-BFGS-B',
lower=lower, # upper bound of parameters for optimizer
upper=upper, # lower bound
control = list(fnscale = -1), #maximize marginal likelihood, optim minimizes by default
gp=gp
)
best_params <- optimized$par
if(gp$verbose) {
print('-------optimized---------')
print(optimized)
}
gp <- gp.kernel.reset(best_params, gp)
gp <- gp.refit(gp)
gp
}
# Resets the kernel parameters before re-fitting the model
gp.kernel.reset <- function(params, gp) {
offset <- 0
for (i in 1:length(gp$kernel$params)) {
# Handle the special case of vector kernel parameters.
# Usually these vector parameters are the length scales in ARD kernels
if(length(gp$kernel$params[[i]]) > 1) {
offset <- offset + length(gp$kernel$params[[i]]) - 1
gp$kernel$params[[i]] <- params[i:(i+offset)]
} else {
gp$kernel$params[[i]] <- params[(i+offset)]
}
}
gp
}
# Fit a gaussian process using the supplied kernel
gp.fit <- function(X, y, kernel, sigma_n, verbose=F) {
# Covariance matrix
K <- K.m(X, kernel) + diag(sigma_n, nrow(X))
# Cholesky factors of covariance matrix
L <- chol(K)
gp <- list()
gp$X <- X
gp$y <- y
gp$kernel <- kernel
gp$alpha <- chol2inv(L) %*% y
gp$L <- L
gp$det_Ky <- det(K.m(gp$y, kernel))
gp$sigma_n <- sigma_n
gp$verbose = verbose
gp
}
# Simplified
gp.refit <- function(gp) {
K <- K.m(gp$X, gp$kernel) + diag(gp$sigma_n, nrow(gp$X))
L <- chol(K)
gp$L <- L
gp$alpha <- chol2inv(L) %*% gp$y
gp$det_Ky <- det(K.m(gp$y, gp$kernel))
gp
}
gp.predict <- function(gp, x_test) {
covs <- K.mm(gp$X, x_test, gp$kernel)
f_mean <- t(covs) %*% gp$alpha
vars <- K.m(x_test, gp$kernel) - t(covs) %*% chol2inv(gp$L) %*% covs
list(means = f_mean, vars = vars)
}
#Kernel functions
# each function should be of the form function(x, y, params)
# params is a list of parameters for the function
# Radial Basis Function (RBF) kernel
rbf <- function(x, y, params) {
sigma <- params$sigma
exp(-1/2*sum(sigma^-2*(x - y)^2))
}
# List of partial derivative functions for each parameter
rbf.pds <- list(
sigma = function(x, y, params){
sigma <- params$sigma
exp(-1/2*sum(sigma^-2*(x - y)^2)) * (sigma^(-3)*(x-y)^2)
}
)
# List holding the covariance function, partial derivatives and parameters
rbf.k <- function(sigma) {
list(
k = rbf,
pds = rbf.pds,
params = list(sigma=sigma)
)
}
# RBF kernel with Automatic Relevence Determination
# The lengthscale parameter (l) is a vector
rbf.ard <- function(x, y, params) {
l <- unlist(params$l)
sigma <- params$sigma
sigma^2 * exp(-1/2*sum(l^-2*(x - y)^2))
}
rbf.ard.pds <- list(
l = function(x, y, params) {
l <- unlist(params$l)
sigma <- params$sigma
sigma^2 * exp(-1/2*sum(l^-2*(x - y)^2)) * (l^-3*(x-y)^2)
},
sigma = function(x, y, params) {
l <- unlist(params$l)
sigma <- params$sigma
2*sigma * exp(-1/2*sum(l^-2*(x - y)^2))
}
)
rbf.ard.k <- function(sigma, l) {
list(
k = rbf.ard,
pds = rbf.ard.pds,
params = list(sigma=sigma, l=l)
)
}
# Example of fitting a complex 1 dimensional function
gp.example.1d <- function() {
missing <- c(seq(.1, 5, .1), seq(8.5, 10, .1))
full <- seq(.1, 10, .1)
sparse <- seq(.1, 20, 3)
# 1-d example
X <- matrix(full)
y <- matrix(log(abs(cos(X))) ^ 2 + cos(X) ^ 2 + sin(log(X)) + sin(X) + log(abs(cos(X) - cos(X) * log(abs(cosh(X))))))
# y <- matrix(log(cos(X)^4) ^ 2 + cos(X) ^ 3 + runif(nrow(X), -.1, .1))
gp <- gp.fit(X, y, sigma_n = 0.2, k = rbf.k(sigma=.05))
gp <- gp.optimize(gp,
lower=c(-Inf),
upper=c(Inf))
print(gp$kernel$params)
x_test <- matrix(seq(.1, max(X), .1))
p <- gp.predict(gp, x_test)
plot(X, y)
lines(x_test, p$means, pch = 'x', col = 'red')
points(x_test,
p$means + diag(p$vars),
pch = '-',
col = 'blue')
points(x_test,
p$means - diag(p$vars),
pch = '-',
col = 'green')
}
# 2-d example
# Requires the rgl library
# This takes a long time to run
# Runtime can be reduced by changing the full variable
gp.example.2d <- function() {
library('rgl')
full <- seq(.1, 10, 1)+runif(10,-.01,.01)
X <- data.matrix(expand.grid(full, full), rownames = F)
y <- sin(X[, 1]*pi^2) + cos(-X[,2])*log(X[,1]^2)
y <- matrix(y + runif(nrow(X),-.5, .5))
persp3d(full, full, y, color = "green", alpha = .5)
gp <- gp.fit(X, y,
sigma_n = 0.2,
k = rbf.ard.k(sigma = .5, l = c(1,1)))
gp <- gp.optimize(gp,
lower=c(0,-Inf,-Inf),
upper=c(1,Inf,Inf))
print('------final params-----------')
print(gp$kernel$params)
x_test <- X
p <- gp.predict(gp, x_test)
points3d(X[, 1], X[, 2], p$means, col = 'red')
}
gp.example.1d()
# gp.example.2d()
# TODO: simplifying code for handling scalar vs vector partial derivatives
# TODO: matern 3/2, matern 5/2, periodic kernel, etc.
# TODO:
|
/rgp.R
|
no_license
|
bglazer/rgp
|
R
| false
| false
| 8,691
|
r
|
########################################
# Gaussian process
########################################
# Element-wise matrix autocorrelation function
cov.m <- function(rows, k, d) {
params <- k$params
k <- k$k
# Covariance of the elements from matrix d
k(d[rows[1],], d[rows[2],], params)
}
# Matrix autocorrelation
K.m <- function(X, k, symmetric = F, diagonal=1) {
# Get row indexes in the right order to fill the lower triangle
# of the covariance matrix
rows <- combn(1:nrow(X), 2)
# get the covariances of the rows
covs <- as.matrix(apply(rows, 2, cov.m, k = k, d = X))
Ks <- list()
# If the parameter is a vector (e.g. the lengthscale parameter in an ARD kernel)
if(ncol(covs) > 1) {
for(i in 1:nrow(covs)) {
K <- diag(diagonal, nrow(X))
K[lower.tri(K)] <- covs[i,]
K <- t(K)
if (symmetric)
K[lower.tri(K)] <- t(K)[lower.tri(K)]
Ks[[i]] <- K
}
Ks
} else { #if the parameter is a scalar
K <- diag(diagonal, nrow(X))
K[lower.tri(K)] <- covs
K <- t(K)
if (symmetric)
K[lower.tri(K)] <- t(K)[lower.tri(K)]
K
}
}
# Element-wise Matrix-Matrix covariance
# rows - set of indexes
cov.mm <- function(rows, k, A, B) {
params <- k$params
k <- k$k
k(A[rows[1],],
B[rows[2],], params)
}
# Matrix-Matrix covariance
K.mm <- function(X, Y, k) {
rows <- expand.grid(1:nrow(X), 1:nrow(Y))
covs <- apply(
X = rows,
MARGIN = 1,
FUN = cov.mm,
A = X,
B = Y,
k = k
)
matrix(covs, nrow = nrow(X), ncol = nrow(Y))
}
# trace of a matrix
trace <- function(M) {
sum(diag(M))
}
# Partial derivative of the marginal likelihood
gp.marginal.pd <- function(gp, k.pd) {
# Compute the matrix of element-wise partial derivatives of the kernel function
d_K <- K.m(gp$X,
k = list(k=k.pd, params=gp$kernel$params),
symmetric = T,
diagonal = 0)
pds <- c()
if(is.list(d_K)) {
for (i in 1:length(d_K)) {
pd_i <- (1 / 2) * trace((gp$alpha %*% t(gp$alpha) - chol2inv(gp$L)) %*% d_K[[i]])
pds <- c(pds, pd_i)
}
pds
} else {
(1 / 2) * trace((gp$alpha %*% t(gp$alpha) - chol2inv(gp$L)) %*% d_K)
}
}
# Both the marginal likelihood and gradient functions have the form: function(param_values, gp)
# This is because the optim function requires this format. param_values is a vector of kernel that is converted
# back to kernel parameters by the gp.kernel.refit function
# Marginal Likelihood
gp.marginal <- function(param_values, gp) {
# TODO: check if params are different
gp <- gp.kernel.reset(param_values, gp)
gp <- gp.refit(gp)
# marginal likelihood
D <- gp$det_Ky
n <- nrow(gp$X)
ml <- -1 / 2 * t(gp$y) %*% gp$alpha - D - n/2 * log(2 * pi)
if(gp$verbose){
print('--------optimizer params------------')
print(param_values)
print('--------marginal likelihood---------')
print(ml)
}
ml
}
gp.gradient <- function(param_values, gp) {
gp <- gp.kernel.reset(param_values, gp)
gp <- gp.refit(gp)
gradient <- c()
# Append each partial derivative (pd) to the gradient
for (param in names(gp$kernel$params)) {
pd <- gp$kernel$pds[[param]]
g <- gp.marginal.pd(gp, pd)
gradient <- c(gradient, g)
}
if(gp$verbose){
print('-----------gradient-----------')
print(gradient)
print(length(gradient))
print('------------------------------')
}
gradient
}
gp.optimize <- function(gp, lower, upper) {
init_params = unlist(gp$kernel$params)
optimized <- optim(
par = init_params,
fn = gp.marginal,
gr = gp.gradient,
method = 'L-BFGS-B',
lower=lower, # upper bound of parameters for optimizer
upper=upper, # lower bound
control = list(fnscale = -1), #maximize marginal likelihood, optim minimizes by default
gp=gp
)
best_params <- optimized$par
if(gp$verbose) {
print('-------optimized---------')
print(optimized)
}
gp <- gp.kernel.reset(best_params, gp)
gp <- gp.refit(gp)
gp
}
# Resets the kernel parameters before re-fitting the model
gp.kernel.reset <- function(params, gp) {
offset <- 0
for (i in 1:length(gp$kernel$params)) {
# Handle the special case of vector kernel parameters.
# Usually these vector parameters are the length scales in ARD kernels
if(length(gp$kernel$params[[i]]) > 1) {
offset <- offset + length(gp$kernel$params[[i]]) - 1
gp$kernel$params[[i]] <- params[i:(i+offset)]
} else {
gp$kernel$params[[i]] <- params[(i+offset)]
}
}
gp
}
# Fit a gaussian process using the supplied kernel
gp.fit <- function(X, y, kernel, sigma_n, verbose=F) {
# Covariance matrix
K <- K.m(X, kernel) + diag(sigma_n, nrow(X))
# Cholesky factors of covariance matrix
L <- chol(K)
gp <- list()
gp$X <- X
gp$y <- y
gp$kernel <- kernel
gp$alpha <- chol2inv(L) %*% y
gp$L <- L
gp$det_Ky <- det(K.m(gp$y, kernel))
gp$sigma_n <- sigma_n
gp$verbose = verbose
gp
}
# Simplified
gp.refit <- function(gp) {
K <- K.m(gp$X, gp$kernel) + diag(gp$sigma_n, nrow(gp$X))
L <- chol(K)
gp$L <- L
gp$alpha <- chol2inv(L) %*% gp$y
gp$det_Ky <- det(K.m(gp$y, gp$kernel))
gp
}
gp.predict <- function(gp, x_test) {
covs <- K.mm(gp$X, x_test, gp$kernel)
f_mean <- t(covs) %*% gp$alpha
vars <- K.m(x_test, gp$kernel) - t(covs) %*% chol2inv(gp$L) %*% covs
list(means = f_mean, vars = vars)
}
#Kernel functions
# each function should be of the form function(x, y, params)
# params is a list of parameters for the function
# Radial Basis Function (RBF) kernel
rbf <- function(x, y, params) {
sigma <- params$sigma
exp(-1/2*sum(sigma^-2*(x - y)^2))
}
# List of partial derivative functions for each parameter
rbf.pds <- list(
sigma = function(x, y, params){
sigma <- params$sigma
exp(-1/2*sum(sigma^-2*(x - y)^2)) * (sigma^(-3)*(x-y)^2)
}
)
# List holding the covariance function, partial derivatives and parameters
rbf.k <- function(sigma) {
list(
k = rbf,
pds = rbf.pds,
params = list(sigma=sigma)
)
}
# RBF kernel with Automatic Relevence Determination
# The lengthscale parameter (l) is a vector
rbf.ard <- function(x, y, params) {
l <- unlist(params$l)
sigma <- params$sigma
sigma^2 * exp(-1/2*sum(l^-2*(x - y)^2))
}
rbf.ard.pds <- list(
l = function(x, y, params) {
l <- unlist(params$l)
sigma <- params$sigma
sigma^2 * exp(-1/2*sum(l^-2*(x - y)^2)) * (l^-3*(x-y)^2)
},
sigma = function(x, y, params) {
l <- unlist(params$l)
sigma <- params$sigma
2*sigma * exp(-1/2*sum(l^-2*(x - y)^2))
}
)
rbf.ard.k <- function(sigma, l) {
list(
k = rbf.ard,
pds = rbf.ard.pds,
params = list(sigma=sigma, l=l)
)
}
# Example of fitting a complex 1 dimensional function
gp.example.1d <- function() {
missing <- c(seq(.1, 5, .1), seq(8.5, 10, .1))
full <- seq(.1, 10, .1)
sparse <- seq(.1, 20, 3)
# 1-d example
X <- matrix(full)
y <- matrix(log(abs(cos(X))) ^ 2 + cos(X) ^ 2 + sin(log(X)) + sin(X) + log(abs(cos(X) - cos(X) * log(abs(cosh(X))))))
# y <- matrix(log(cos(X)^4) ^ 2 + cos(X) ^ 3 + runif(nrow(X), -.1, .1))
gp <- gp.fit(X, y, sigma_n = 0.2, k = rbf.k(sigma=.05))
gp <- gp.optimize(gp,
lower=c(-Inf),
upper=c(Inf))
print(gp$kernel$params)
x_test <- matrix(seq(.1, max(X), .1))
p <- gp.predict(gp, x_test)
plot(X, y)
lines(x_test, p$means, pch = 'x', col = 'red')
points(x_test,
p$means + diag(p$vars),
pch = '-',
col = 'blue')
points(x_test,
p$means - diag(p$vars),
pch = '-',
col = 'green')
}
# 2-d example
# Requires the rgl library
# This takes a long time to run
# Runtime can be reduced by changing the full variable
gp.example.2d <- function() {
library('rgl')
full <- seq(.1, 10, 1)+runif(10,-.01,.01)
X <- data.matrix(expand.grid(full, full), rownames = F)
y <- sin(X[, 1]*pi^2) + cos(-X[,2])*log(X[,1]^2)
y <- matrix(y + runif(nrow(X),-.5, .5))
persp3d(full, full, y, color = "green", alpha = .5)
gp <- gp.fit(X, y,
sigma_n = 0.2,
k = rbf.ard.k(sigma = .5, l = c(1,1)))
gp <- gp.optimize(gp,
lower=c(0,-Inf,-Inf),
upper=c(1,Inf,Inf))
print('------final params-----------')
print(gp$kernel$params)
x_test <- X
p <- gp.predict(gp, x_test)
points3d(X[, 1], X[, 2], p$means, col = 'red')
}
gp.example.1d()
# gp.example.2d()
# TODO: simplifying code for handling scalar vs vector partial derivatives
# TODO: matern 3/2, matern 5/2, periodic kernel, etc.
# TODO:
|
dist.multiPhylo <- function(x, method="geodesic", force.multi2di = FALSE,
outgroup = NULL, convert.multifurcating = FALSE, use.random.resolution =
FALSE, scale = NULL, verbose = FALSE)
{
if(length(x) < 2)
return(matrix())
if(class(x) == "multiPhylo") # ideally, we will have this
{
# run checks if appropriate
# separate it out into a vector of strings
if(!is.null(outgroup))
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
if(!is.rooted(k))
root(k, outgroup, resolve.root = TRUE)
else k
}
else NA
})
}
if(force.multi2di == TRUE)
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
multi2di(k,random=use.random.resolution)
}
else NA
})
}
else if(convert.multifurcating == TRUE) # won't resolve multifurcations at the root
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
if(!is.binary.tree(k))
multi2di(k,random=use.random.resolution)
else k
}
else NA
})
}
if(!is.null(scale))
{
if(class(scale) == "phylo")
{
T <- sum(scale$edge.length)
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
k$edge.length = k$edge.length * (T /
sum(k$edge.length))
k
}
else NA
})
}
else if(class(scale) == "numeric")
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
k$edge.length = k$edge.length * (scale /
sum(k$edge.length))
k
}
else NA
})
}
else
{
stop("Scale parameter not understood.\n")
}
}
# do some sanity checks before we start out
r <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
is.rooted(k)
}
else NA
})
if(!all(as.logical(r), na.rm=TRUE))
{
stop("Some trees are not rooted. Specify an outgroup to fix this problem. All trees must be rooted.\n")
}
r <- lapply(x, function(k) { if(class(k) == "phylo") is.binary.tree(k) else NA })
if(!all(as.logical(r), na.rm=TRUE))
{
stop("Some trees are not binary. All input trees must be strictly binary.\n")
}
# check to make sure all trees have the same tip labels
tips = x[[1]]$tip.label
r <- lapply(x, function(k) { if(class(k) == "phylo") setequal(k$tip.label,
tips) else NA})
if(!all(as.logical(r), na.rm=TRUE))
{
stop("Not all trees have the same tips.\n")
}
# convert our list of class phylo to a list of strings
treestrs <- lapply(x, function(k) { if(class(k) == "phylo")
write.tree(k) else "" })
method=tolower(method)
method.id = pmatch(method, c("edgeset", "geodesic"))
# call the C interface function and return the value automatically
if(method.id == 1)
{
rv <- .Call("phycpp_bin_trees", treestrs, PACKAGE="distory")
}
else if(method.id == 2)
{
rv <- .Call("phycpp_compute_tree_distance_set", treestrs, as.logical(verbose), PACKAGE="distory")
}
else
{
stop("Specified method is not valid")
}
as.dist(rv)
}
else if(typeof(x) == "list")
{
if(class(x[[1]]) == "phylo") # a list of phylo's that for some reason is not classed as multiPhylo
{
class(x) <- "multiPhylo" # it already is basically a multiPhylo anyways - we'll mark it as such
dist.multiPhylo(x, method=method, force.multi2di=force.multi2di, outgroup=outgroup,
convert.multifurcating=convert.multifurcating,
use.random.resolution=use.random.resolution,
scale=scale, verbose=verbose)
}
else if(class(x[[1]]) == "character") # a list of strings, presuming one tree each, properly terminated
{
# read with /ape/, run checks, dump back
t <- paste(x, sep="", collapse="")
k <- read.tree(text=t)
dist.multiPhylo(x, method=method, force.multi2di=force.multi2di, outgroup=outgroup,
convert.multifurcating=convert.multifurcating,
use.random.resolution=use.random.resolution,
scale=scale, verbose=verbose)
}
}
else if(class(x) == "character") # this is for one string containing multiple trees
{
# read with ape and dump back to a vector of strings
k <- read.tree(text=x)
# call this to process it properly
dist.multiPhylo(x, method=method, force.multi2di=force.multi2di, outgroup=outgroup,
convert.multifurcating=convert.multifurcating,
use.random.resolution=use.random.resolution,
scale=scale, verbose=verbose)
}
else
{
stop("Cannot coerce the argument into a usable type.")
}
}
|
/distory/R/dist.multiPhylo.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,239
|
r
|
dist.multiPhylo <- function(x, method="geodesic", force.multi2di = FALSE,
outgroup = NULL, convert.multifurcating = FALSE, use.random.resolution =
FALSE, scale = NULL, verbose = FALSE)
{
if(length(x) < 2)
return(matrix())
if(class(x) == "multiPhylo") # ideally, we will have this
{
# run checks if appropriate
# separate it out into a vector of strings
if(!is.null(outgroup))
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
if(!is.rooted(k))
root(k, outgroup, resolve.root = TRUE)
else k
}
else NA
})
}
if(force.multi2di == TRUE)
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
multi2di(k,random=use.random.resolution)
}
else NA
})
}
else if(convert.multifurcating == TRUE) # won't resolve multifurcations at the root
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
if(!is.binary.tree(k))
multi2di(k,random=use.random.resolution)
else k
}
else NA
})
}
if(!is.null(scale))
{
if(class(scale) == "phylo")
{
T <- sum(scale$edge.length)
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
k$edge.length = k$edge.length * (T /
sum(k$edge.length))
k
}
else NA
})
}
else if(class(scale) == "numeric")
{
x <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
k$edge.length = k$edge.length * (scale /
sum(k$edge.length))
k
}
else NA
})
}
else
{
stop("Scale parameter not understood.\n")
}
}
# do some sanity checks before we start out
r <- lapply(x, function(k)
{
if(class(k) == "phylo")
{
is.rooted(k)
}
else NA
})
if(!all(as.logical(r), na.rm=TRUE))
{
stop("Some trees are not rooted. Specify an outgroup to fix this problem. All trees must be rooted.\n")
}
r <- lapply(x, function(k) { if(class(k) == "phylo") is.binary.tree(k) else NA })
if(!all(as.logical(r), na.rm=TRUE))
{
stop("Some trees are not binary. All input trees must be strictly binary.\n")
}
# check to make sure all trees have the same tip labels
tips = x[[1]]$tip.label
r <- lapply(x, function(k) { if(class(k) == "phylo") setequal(k$tip.label,
tips) else NA})
if(!all(as.logical(r), na.rm=TRUE))
{
stop("Not all trees have the same tips.\n")
}
# convert our list of class phylo to a list of strings
treestrs <- lapply(x, function(k) { if(class(k) == "phylo")
write.tree(k) else "" })
method=tolower(method)
method.id = pmatch(method, c("edgeset", "geodesic"))
# call the C interface function and return the value automatically
if(method.id == 1)
{
rv <- .Call("phycpp_bin_trees", treestrs, PACKAGE="distory")
}
else if(method.id == 2)
{
rv <- .Call("phycpp_compute_tree_distance_set", treestrs, as.logical(verbose), PACKAGE="distory")
}
else
{
stop("Specified method is not valid")
}
as.dist(rv)
}
else if(typeof(x) == "list")
{
if(class(x[[1]]) == "phylo") # a list of phylo's that for some reason is not classed as multiPhylo
{
class(x) <- "multiPhylo" # it already is basically a multiPhylo anyways - we'll mark it as such
dist.multiPhylo(x, method=method, force.multi2di=force.multi2di, outgroup=outgroup,
convert.multifurcating=convert.multifurcating,
use.random.resolution=use.random.resolution,
scale=scale, verbose=verbose)
}
else if(class(x[[1]]) == "character") # a list of strings, presuming one tree each, properly terminated
{
# read with /ape/, run checks, dump back
t <- paste(x, sep="", collapse="")
k <- read.tree(text=t)
dist.multiPhylo(x, method=method, force.multi2di=force.multi2di, outgroup=outgroup,
convert.multifurcating=convert.multifurcating,
use.random.resolution=use.random.resolution,
scale=scale, verbose=verbose)
}
}
else if(class(x) == "character") # this is for one string containing multiple trees
{
# read with ape and dump back to a vector of strings
k <- read.tree(text=x)
# call this to process it properly
dist.multiPhylo(x, method=method, force.multi2di=force.multi2di, outgroup=outgroup,
convert.multifurcating=convert.multifurcating,
use.random.resolution=use.random.resolution,
scale=scale, verbose=verbose)
}
else
{
stop("Cannot coerce the argument into a usable type.")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coersion-tk_xts.R
\name{tk_xts}
\alias{tk_xts}
\alias{tk_xts_}
\title{Coerce time series objects and tibbles with date/date-time columns to xts.}
\usage{
tk_xts(data, select = NULL, date_var = NULL, silent = FALSE, ...)
tk_xts_(data, select = NULL, date_var = NULL, silent = FALSE, ...)
}
\arguments{
\item{data}{A time-based tibble or time-series object.}
\item{select}{\strong{Applicable to tibbles and data frames only}.
The column or set of columns to be coerced to \code{ts} class.}
\item{date_var}{\strong{Applicable to tibbles and data frames only}.
Column name to be used to \code{order.by}.
\code{NULL} by default. If \code{NULL}, function will find the date or date-time column.}
\item{silent}{Used to toggle printing of messages and warnings.}
\item{...}{Additional parameters to be passed to \code{xts::xts()}. Refer to \code{xts::xts()}.}
}
\value{
Returns a \code{xts} object.
}
\description{
Coerce time series objects and tibbles with date/date-time columns to xts.
}
\details{
\code{tk_xts} is a wrapper for \code{xts::xts()} that is designed
to coerce \code{tibble} objects that have a "time-base" (meaning the values vary with time)
to \code{xts} class objects. There are three main advantages:
\enumerate{
\item Non-numeric columns that are not removed via \code{select} are dropped and the user is warned.
This prevents an error or coercion issue from occurring.
\item The date column is auto-detected if not specified by \code{date_var}. This takes
the effort off the user to assign a date vector during coercion.
\item \code{ts} objects are automatically coerced if a "timetk index" is present. Refer to \code{\link[=tk_ts]{tk_ts()}}.
}
The \code{select} argument can be used to select subsets
of columns from the incoming data.frame.
Only columns containing numeric data are coerced.
The \code{date_var} can be used to specify the column with the date index.
If \code{date_var = NULL}, the date / date-time column is interpreted.
Optionally, the \code{order.by} argument from the underlying \code{xts::xts()} function can be used.
The user must pass a vector of dates or date-times if \code{order.by} is used.
For non-data.frame object classes (e.g. \code{xts}, \code{zoo}, \code{timeSeries}, etc) the objects are coerced
using \code{xts::xts()}.
\code{tk_xts_} is a nonstandard evaluation method.
}
\examples{
library(tibble)
library(dplyr)
library(timetk)
### tibble to xts: Comparison between tk_xts() and xts::xts()
data_tbl <- tibble::tibble(
date = seq.Date(as.Date("2016-01-01"), by = 1, length.out = 5),
x = rep("chr values", 5),
y = cumsum(1:5),
z = cumsum(11:15) * rnorm(1))
# xts: Character columns cause coercion issues; order.by must be passed a vector of dates
xts::xts(data_tbl[,-1], order.by = data_tbl$date)
# tk_xts: Non-numeric columns automatically dropped; No need to specify date column
tk_xts(data_tbl)
# ts can be coerced back to xts
data_tbl \%>\%
tk_ts(start = 2016, freq = 365) \%>\%
tk_xts()
### Using select and date_var
tk_xts(data_tbl, select = y, date_var = date)
### NSE: Enables programming
date_var <- "date"
select <- "y"
tk_xts_(data_tbl, select = select, date_var = date_var)
}
\seealso{
\code{\link[=tk_tbl]{tk_tbl()}}, \code{\link[=tk_zoo]{tk_zoo()}}, \code{\link[=tk_zooreg]{tk_zooreg()}}, \code{\link[=tk_ts]{tk_ts()}}
}
|
/man/tk_xts.Rd
|
no_license
|
business-science/timetk
|
R
| false
| true
| 3,416
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coersion-tk_xts.R
\name{tk_xts}
\alias{tk_xts}
\alias{tk_xts_}
\title{Coerce time series objects and tibbles with date/date-time columns to xts.}
\usage{
tk_xts(data, select = NULL, date_var = NULL, silent = FALSE, ...)
tk_xts_(data, select = NULL, date_var = NULL, silent = FALSE, ...)
}
\arguments{
\item{data}{A time-based tibble or time-series object.}
\item{select}{\strong{Applicable to tibbles and data frames only}.
The column or set of columns to be coerced to \code{ts} class.}
\item{date_var}{\strong{Applicable to tibbles and data frames only}.
Column name to be used to \code{order.by}.
\code{NULL} by default. If \code{NULL}, function will find the date or date-time column.}
\item{silent}{Used to toggle printing of messages and warnings.}
\item{...}{Additional parameters to be passed to \code{xts::xts()}. Refer to \code{xts::xts()}.}
}
\value{
Returns a \code{xts} object.
}
\description{
Coerce time series objects and tibbles with date/date-time columns to xts.
}
\details{
\code{tk_xts} is a wrapper for \code{xts::xts()} that is designed
to coerce \code{tibble} objects that have a "time-base" (meaning the values vary with time)
to \code{xts} class objects. There are three main advantages:
\enumerate{
\item Non-numeric columns that are not removed via \code{select} are dropped and the user is warned.
This prevents an error or coercion issue from occurring.
\item The date column is auto-detected if not specified by \code{date_var}. This takes
the effort off the user to assign a date vector during coercion.
\item \code{ts} objects are automatically coerced if a "timetk index" is present. Refer to \code{\link[=tk_ts]{tk_ts()}}.
}
The \code{select} argument can be used to select subsets
of columns from the incoming data.frame.
Only columns containing numeric data are coerced.
The \code{date_var} can be used to specify the column with the date index.
If \code{date_var = NULL}, the date / date-time column is interpreted.
Optionally, the \code{order.by} argument from the underlying \code{xts::xts()} function can be used.
The user must pass a vector of dates or date-times if \code{order.by} is used.
For non-data.frame object classes (e.g. \code{xts}, \code{zoo}, \code{timeSeries}, etc) the objects are coerced
using \code{xts::xts()}.
\code{tk_xts_} is a nonstandard evaluation method.
}
\examples{
library(tibble)
library(dplyr)
library(timetk)
### tibble to xts: Comparison between tk_xts() and xts::xts()
data_tbl <- tibble::tibble(
date = seq.Date(as.Date("2016-01-01"), by = 1, length.out = 5),
x = rep("chr values", 5),
y = cumsum(1:5),
z = cumsum(11:15) * rnorm(1))
# xts: Character columns cause coercion issues; order.by must be passed a vector of dates
xts::xts(data_tbl[,-1], order.by = data_tbl$date)
# tk_xts: Non-numeric columns automatically dropped; No need to specify date column
tk_xts(data_tbl)
# ts can be coerced back to xts
data_tbl \%>\%
tk_ts(start = 2016, freq = 365) \%>\%
tk_xts()
### Using select and date_var
tk_xts(data_tbl, select = y, date_var = date)
### NSE: Enables programming
date_var <- "date"
select <- "y"
tk_xts_(data_tbl, select = select, date_var = date_var)
}
\seealso{
\code{\link[=tk_tbl]{tk_tbl()}}, \code{\link[=tk_zoo]{tk_zoo()}}, \code{\link[=tk_zooreg]{tk_zooreg()}}, \code{\link[=tk_ts]{tk_ts()}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{as.omega.model}
\alias{as.omega.model}
\title{Extract Omegas from Model}
\usage{
\method{as.omega}{model}(x, ...)
}
\arguments{
\item{x}{model}
\item{...}{passed arguments}
}
\value{
omega (subset of model)
}
\description{
Extracts omegas from model.
}
\seealso{
Other as.omega:
\code{\link{as.omega}()}
}
\concept{as.omega}
\keyword{internal}
|
/man/as.omega.model.Rd
|
no_license
|
cran/nonmemica
|
R
| false
| true
| 463
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{as.omega.model}
\alias{as.omega.model}
\title{Extract Omegas from Model}
\usage{
\method{as.omega}{model}(x, ...)
}
\arguments{
\item{x}{model}
\item{...}{passed arguments}
}
\value{
omega (subset of model)
}
\description{
Extracts omegas from model.
}
\seealso{
Other as.omega:
\code{\link{as.omega}()}
}
\concept{as.omega}
\keyword{internal}
|
\name{maxPWMScore-methods}
\docType{methods}
\alias{maxPWMScore-methods}
\title{ ~~ Methods for Function \code{maxPWMScore} ~~}
\description{
Accessor method for \code{maxPWMScore}
}
\section{Methods}{
\describe{
\item{\code{maxPWMScore(object)}}{
%% ~~describe this method here~~
}
}}
\keyword{methods}
\keyword{ ~~ other possible keyword(s) ~~ }
|
/man/maxPWMScore-methods.Rd
|
no_license
|
patrickCNMartin/ChIPanalyser
|
R
| false
| false
| 353
|
rd
|
\name{maxPWMScore-methods}
\docType{methods}
\alias{maxPWMScore-methods}
\title{ ~~ Methods for Function \code{maxPWMScore} ~~}
\description{
Accessor method for \code{maxPWMScore}
}
\section{Methods}{
\describe{
\item{\code{maxPWMScore(object)}}{
%% ~~describe this method here~~
}
}}
\keyword{methods}
\keyword{ ~~ other possible keyword(s) ~~ }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EOO_functions.R
\name{getAreaEOO}
\alias{getAreaEOO}
\title{Calculates area of the created EOO polygon.}
\usage{
getAreaEOO(EOO.polygon)
}
\arguments{
\item{EOO.polygon}{An object of class SpatialPolygons, usually the output
from \code{makeEOO}.}
}
\value{
The area of the \code{EOO.polygon} in km2
}
\description{
\code{getAreaEOO} calculates the area of the EOO polygon generated from
\code{makeEOO} the provided data
}
\examples{
crs.UTM55S <- '+proj=utm +zone=55 +south +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
r1 <- raster(ifelse((volcano<130), NA, 1), crs = crs.UTM55S)
extent(r1) <- extent(0, 6100, 0, 8700)
EOO.polygon <- makeEOO(r1)
EOO.area <- getAreaEOO(EOO.polygon)
}
\seealso{
Other EOO functions: \code{\link{makeEOO}}
}
\author{
Nicholas Murray \email{murr.nick@gmail.com}, Calvin Lee
\email{calvinkflee@gmail.com}
}
\concept{EOO functions}
|
/man/getAreaEOO.Rd
|
no_license
|
xtbgtraining/redlistr
|
R
| false
| true
| 940
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EOO_functions.R
\name{getAreaEOO}
\alias{getAreaEOO}
\title{Calculates area of the created EOO polygon.}
\usage{
getAreaEOO(EOO.polygon)
}
\arguments{
\item{EOO.polygon}{An object of class SpatialPolygons, usually the output
from \code{makeEOO}.}
}
\value{
The area of the \code{EOO.polygon} in km2
}
\description{
\code{getAreaEOO} calculates the area of the EOO polygon generated from
\code{makeEOO} the provided data
}
\examples{
crs.UTM55S <- '+proj=utm +zone=55 +south +ellps=WGS84 +datum=WGS84 +units=m +no_defs'
r1 <- raster(ifelse((volcano<130), NA, 1), crs = crs.UTM55S)
extent(r1) <- extent(0, 6100, 0, 8700)
EOO.polygon <- makeEOO(r1)
EOO.area <- getAreaEOO(EOO.polygon)
}
\seealso{
Other EOO functions: \code{\link{makeEOO}}
}
\author{
Nicholas Murray \email{murr.nick@gmail.com}, Calvin Lee
\email{calvinkflee@gmail.com}
}
\concept{EOO functions}
|
RF_output <- function(dataFrameTrain, dataFrameTest, targetVecTrain, targetVecTest, featureSelected, nTree = 100, title = "Random Forest ROC Curve"){
#package
require(randomForest)
require(ROCR)
require(R.utils)
sourceDirectory("/Users/yuezhao/Desktop/projects/R_lib_jason/", modifiedOnly=TRUE)
data_rf_train <- as.data.frame(t(cbind(dataFrameTrain)))
data_rf_test <- as.data.frame(t(cbind(dataFrameTest)))
data_rf_train$progress <- as.factor(c(targetVecTrain))
data_rf_test$progress <- as.factor(c(targetVecTest))
colnames(data_rf_train) <- hyphenToUnderscore(colnames(data_rf_train))
colnames(data_rf_test) <- hyphenToUnderscore(colnames(data_rf_test))
featureSelected <- hyphenToUnderscore(featureSelected)
#print(featureSelected)
#body
varNames <- paste(featureSelected, collapse = "+")
rf.form <- as.formula(paste("as.factor(progress)", varNames, sep = " ~ "))
my_forest <- randomForest(rf.form, data=data_rf_train, importance=TRUE, ntree=nTree, keep.forest=TRUE)
rank <- round(importance(my_forest), 2)
# Make your prediction using the test set
my_prediction <- predict(my_forest, data_rf_test)
# generate probabilities instead of class labels type="prob" ensures that
#randomForest generates probabilities for both the class labels,
#we are selecting one of the value [2] at the end does that
my_forest.pr = predict(my_forest,type="prob",newdata=data_rf_test)[,2]
#prediction is ROCR function
my_forest.pred = prediction(my_forest.pr, data_rf_test$progress)
#performance in terms of true and false positive rates
my_forest.perf = performance(my_forest.pred,"tpr","fpr")
#Not sure if this part is necessary.
par(mar=c(3,3,3,3))
#plot the curve
plot_handle <- plot(my_forest.perf,main=title,col=2,lwd=3, cex=4)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
#compute area under curve
auc <- performance(my_forest.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
auc
text(0.6, 0.02, paste("AUC = ", round(auc,3)), cex=1, pos=4, col="red")
}
|
/RF_output.R
|
no_license
|
jasonzhao0307/R_lib_jason
|
R
| false
| false
| 1,951
|
r
|
RF_output <- function(dataFrameTrain, dataFrameTest, targetVecTrain, targetVecTest, featureSelected, nTree = 100, title = "Random Forest ROC Curve"){
#package
require(randomForest)
require(ROCR)
require(R.utils)
sourceDirectory("/Users/yuezhao/Desktop/projects/R_lib_jason/", modifiedOnly=TRUE)
data_rf_train <- as.data.frame(t(cbind(dataFrameTrain)))
data_rf_test <- as.data.frame(t(cbind(dataFrameTest)))
data_rf_train$progress <- as.factor(c(targetVecTrain))
data_rf_test$progress <- as.factor(c(targetVecTest))
colnames(data_rf_train) <- hyphenToUnderscore(colnames(data_rf_train))
colnames(data_rf_test) <- hyphenToUnderscore(colnames(data_rf_test))
featureSelected <- hyphenToUnderscore(featureSelected)
#print(featureSelected)
#body
varNames <- paste(featureSelected, collapse = "+")
rf.form <- as.formula(paste("as.factor(progress)", varNames, sep = " ~ "))
my_forest <- randomForest(rf.form, data=data_rf_train, importance=TRUE, ntree=nTree, keep.forest=TRUE)
rank <- round(importance(my_forest), 2)
# Make your prediction using the test set
my_prediction <- predict(my_forest, data_rf_test)
# generate probabilities instead of class labels type="prob" ensures that
#randomForest generates probabilities for both the class labels,
#we are selecting one of the value [2] at the end does that
my_forest.pr = predict(my_forest,type="prob",newdata=data_rf_test)[,2]
#prediction is ROCR function
my_forest.pred = prediction(my_forest.pr, data_rf_test$progress)
#performance in terms of true and false positive rates
my_forest.perf = performance(my_forest.pred,"tpr","fpr")
#Not sure if this part is necessary.
par(mar=c(3,3,3,3))
#plot the curve
plot_handle <- plot(my_forest.perf,main=title,col=2,lwd=3, cex=4)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
#compute area under curve
auc <- performance(my_forest.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
auc
text(0.6, 0.02, paste("AUC = ", round(auc,3)), cex=1, pos=4, col="red")
}
|
install_load <- function (package1, ...) {
# convert arguments to vector
packages <- c(package1, ...)
# start loop to determine if each package is installed
for(package in packages){
# if package is installed locally, load
if(package %in% rownames(installed.packages()))
do.call('library', list(package))
# if package is not installed locally, download, then load
else {
install.packages(package)
do.call("library", list(package))
}
}
}
|
/install_load.R
|
no_license
|
wdewithub/Getting-cleaning-Data_Wk4_Assignment
|
R
| false
| false
| 523
|
r
|
install_load <- function (package1, ...) {
# convert arguments to vector
packages <- c(package1, ...)
# start loop to determine if each package is installed
for(package in packages){
# if package is installed locally, load
if(package %in% rownames(installed.packages()))
do.call('library', list(package))
# if package is not installed locally, download, then load
else {
install.packages(package)
do.call("library", list(package))
}
}
}
|
#QUESTION 1
#Neither of the hypothesis are true
#exp(𝑈𝑖) = 2 or 3 - could see a state with triple the smoking
#
smokeFile = Pmisc::downloadIfOld("http://pbrown.ca/teaching/appliedstats/data/smoke.RData")
#Loading required namespace: R.utils
load(smokeFile)
smoke = smoke[smoke$Age > 9, ] #getting rid of 9 year olds since data is suspicious
forInla = smoke[, c("Age", "Age_first_tried_cigt_smkg",
"Sex", "Race", "state", "school", "RuralUrban")]
forInla = na.omit(forInla)
forInla$school = factor(forInla$school) #censoring problem since we do not know when they will try smoking
library("INLA")
forSurv = data.frame(time = (pmin(forInla$Age_first_tried_cigt_smkg,
forInla$Age) - 4)/10, event = forInla$Age_first_tried_cigt_smkg <=
forInla$Age) #recode the variables, impossible to start smoking before 4- put time columns in range from 0 to 1
#if you havent smoked yet, you get your age. If you started smoking, its the age when you first started smoking
# left censoring
forSurv[forInla$Age_first_tried_cigt_smkg == 8, "event"] = 2
cbind(forInla$Age, forInla$Age_first_tried_cigt_smkg, forSurv)[1:10,]
smokeResponse = inla.surv(forSurv$time, forSurv$event) #event 0,1,2 -> 2 means left censoring, means you could have tried smoking anytime between 4 and 8, event 0 means havent smoked yet, 1 means they did smoke
fitS2 = inla(smokeResponse ~ RuralUrban + Sex * Race +
f(school, model = "iid", hyper = list(prec = list(prior = "pc.prec",
param = c(4*log(1.5), 0.05)))) + f(state, model = "iid", #within four standard deviations, within the worst and best school we expect to see at most a 50% difference in smoking
hyper = list(prec = list(prior = "pc.prec", param = c(log(10), #exponential dist with median 2.5 - exp(2.5), P(x>10) =.02
0.02)))), control.family = list(variant = 1,
hyper = list(alpha = list(prior = "normal", param = c(log(1),
.7^(-2))))), control.mode = list(theta = c(8,
2, 5), restart = TRUE), data = forInla, family = "weibullsurv",
verbose = TRUE, control.compute=list(config = TRUE))
exp(qnorm(c(0.025, 0.5, 0.975), mean = log(1), sd = .7)) #shape parameter - doesnt allow for more than four and median is 1 - allows alpha to be one and does not allow alpha to be 4
rbind(exp(-fitS2$summary.fixed[, c("mean", "0.025quant",
"0.975quant")]), Pmisc::priorPostSd(fitS2)$summary[,
c("mean", "0.025quant", "0.975quant")])
fitS2$summary.hyper[1,c(1,2,3,5)] #since the mean of the posterior distribution of the alpha paramter is 3, and 1 is not in the credible interval, we do not have a flat hazard, the hazard is increasing. There is no evidence that as age increases, the propesity to smoke is constant
#we can see that the mean of the posterior distribution for the standard deviation for the log relative rate of school is .144 and the mean for the posterior distribution of the standard deviation of for the log relative rate of state is .0584. Notice the C.I do not overlap
#therefore there is higher deviation within schools than states. We have a wide CI interval for state, the the std could be as low as .05 or as high as .10, but its definitley lower than school, whose lowest possible std is .11
#the rate at which rural children start smoking is 1-.8957% faster than non rural children
#positive B -> higher risk
#schools that are 1 std. apart have a clock that runs 14.39% faster, rate at which children start smoking is 14.39% faster than other schools
#the rate at which children start smoking is between 2.6% and 10.23% faster in some states when compared to other states
#so there is more variation among schools than states
#If the model parameter is greater than 1, then the clock runs slower for that group. If the model
#parameter is less than one, then the rate at which children start smoking in faster
#prior and posterior plots
sdState = Pmisc::priorPostSd(fitS2)
do.call(matplot, sdState$school$matplot) # prior and posterior density for school
sdState = Pmisc::priorPostSd(fitS2)
do.call(matplot, sdState$state$matplot)# prior and posterior density for state
#creating a chart to see the deviation between schools
library(dplyr)
library(ggplot2)
library(magrittr)
school_sum <- forInla[!is.infinite(forInla$Age_first_tried_cigt_smkg),]
school_sum <- school_sum %>%
group_by(school) %>%
summarize(avg = mean(Age_first_tried_cigt_smkg))
school_sum <- school_sum[order(school_sum$avg),]
ggplot(data=school_sum, aes(x= reorder(school, avg), y= avg))+ geom_point()+ylab("Average Age First Try Smoking")+
xlab("School")+ggtitle("Average Age First Start Smoking Per School")+ theme(axis.text.x = element_blank(), axis.ticks = element_blank())
#creating a map of mean age trying cigarettes in each state
library(openintro)
new_inla <- forInla[!is.infinite(forInla$Age_first_tried_cigt_smkg),]
new_inla[,"state"] <- tolower(abbr2state(new_inla$state))
colnames(new_inla)[colnames(new_inla)=="state"] <- "region"
new_inla <- new_inla %>%
group_by(region) %>%
summarize(avg = mean(Age_first_tried_cigt_smkg))
states <- map_data("state")
dt2 <- left_join(states, new_inla, by = "region")
ggplot(data = dt2, aes(x=long, y=lat, group=group))+
geom_polygon(aes(fill=avg))+
geom_path()+
scale_fill_gradientn(colours = rev(terrain.colors(5)),na.value = "grey",
guide = guide_colourbar(barwidth = 25, barheight = 0.4,
#put legend title on top of legend
title.position = "top")) +
# map scale
ggsn::scalebar(data = dt2, dist = 500, dist_unit = "km",
border.size = 0.4, st.size = 4,
box.fill = c('black','white'),
transform = TRUE, model = "WGS84") +
# put legend at the bottom, adjust legend title and text font sizes
theme(legend.position = "bottom",
legend.title=element_text(size=12), # font size of the legend
legend.text=element_text(size=10),
axis.title.x=element_blank(), # remove axis, title, ticks
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.y = element_blank(),
axis.line=element_blank())+ggtitle("Map of US States Avg Age Start Smoking")
#QUESTION 2
pedestrainFile = Pmisc::downloadIfOld("http://pbrown.ca/teaching/appliedstats/data/pedestrians.rds")
pedestrians = readRDS(pedestrainFile)
pedestrians = pedestrians[!is.na(pedestrians$time),
]
pedestrians$y = pedestrians$Casualty_Severity == "Fatal"
pedestrians$timeCat = format(pedestrians$time, "%Y_%b_%a_h%H") #create a strata from time and weather and date -> same weather, same month, and same day and same hour
pedestrians$strata = paste(pedestrians$Light_Conditions,
pedestrians$Weather_Conditions, pedestrians$timeCat)
length(unique(pedestrians$strata)) #there are 176391 different strata
#remove strata with no cases or no controls - otherwise we have no idea what is going on
theTable = table(pedestrians$strata, pedestrians$y) #this table tells us if there has been a person with injury or not in each strata - notice the first 5 are not helpful, we need one case and once control in each strata
#since we match on weather, time, etc, we do not have to worry about the fact that more men are out at night
onlyOne = rownames(theTable)[which(theTable[, 1] ==
0 | theTable[, 2] == 0)]
x = pedestrians[!pedestrians$strata %in% onlyOne, ] #25-36 men is the baseline group
library("survival")
theClogit = clogit(y ~ age + age:sex + strata(strata),
data = x)
#summary(glm(y ~ sex + age:sex + strata(strata)+ Light_Conditions + Weather_Conditions,
# data = x, family = "binomial"))$coef[1:4, ]
#IMPORTANT -given that your an accent, a 2 means that there is a double probability that it is fatal compared to age 26, this is conditioning on being in an accident (so 50 twice as likely to be in fatal)
#theCoef[,c(1,2,6,7)]
#females is relative to males of the same age
#more males its relative to the baseline
#throwing away assumptions by not making assumptions, but that's okay since it is a big data set
#by saying that 2am cloudy and 2am partly cloudy is different we are getting better estimates
#conditioning on the number of controls - shouldn't effect the results, the more controls the better
theCoef = rbind(as.data.frame(summary(theClogit)$coef),
`age 26 - 35` = c(0, 1, 0, NA, NA))
theCoef$sex = c("Male", "Female")[1 + grepl("Female",
rownames(theCoef))]
theCoef$age = as.numeric(gsub("age|Over| - [[:digit:]].*|[:].*",
"", rownames(theCoef)))
theCoef = theCoef[order(theCoef$sex, theCoef$age),
]
matplot(theCoef[theCoef$sex == "Male", "age"], exp(as.matrix(theCoef[theCoef$sex ==
"Male", c("coef", "se(coef)")]) %*% Pmisc::ciMat(0.99)),
log = "y", type = "l", col = "black", lty = c(1,2, 2), xaxs = "i", yaxs = "i", xlab = "Age", ylab="Odds of Fatal Accident Compared to Baseline")
matplot(theCoef[theCoef$sex == "Female", "age"], exp(as.matrix(theCoef[theCoef$sex ==
"Female", c("coef", "se(coef)")]) %*% Pmisc::ciMat(0.99)),
log = "y", type = "l", col = "black", lty = c(1,
2, 2), xaxs = "i", xlab="Age", ylab= "Odds of Fatal Accident Compared to Male of Same Age")
#the plot for women is compared to men, so this looks like 70 year old women and men are about the same
# given that a man is in an accident, the man at age x, the odds of being in a fatal accident is y times higher than those of age 25-30
#the odds of woman being in fatal accident, given they are in an accident, is the rate times the odds of the man at the given age.
|
/Assignment 4.R
|
no_license
|
FionaMcLean/UK_road_fatalities_analysis
|
R
| false
| false
| 10,242
|
r
|
#QUESTION 1
#Neither of the hypothesis are true
#exp(𝑈𝑖) = 2 or 3 - could see a state with triple the smoking
#
smokeFile = Pmisc::downloadIfOld("http://pbrown.ca/teaching/appliedstats/data/smoke.RData")
#Loading required namespace: R.utils
load(smokeFile)
smoke = smoke[smoke$Age > 9, ] #getting rid of 9 year olds since data is suspicious
forInla = smoke[, c("Age", "Age_first_tried_cigt_smkg",
"Sex", "Race", "state", "school", "RuralUrban")]
forInla = na.omit(forInla)
forInla$school = factor(forInla$school) #censoring problem since we do not know when they will try smoking
library("INLA")
forSurv = data.frame(time = (pmin(forInla$Age_first_tried_cigt_smkg,
forInla$Age) - 4)/10, event = forInla$Age_first_tried_cigt_smkg <=
forInla$Age) #recode the variables, impossible to start smoking before 4- put time columns in range from 0 to 1
#if you havent smoked yet, you get your age. If you started smoking, its the age when you first started smoking
# left censoring
forSurv[forInla$Age_first_tried_cigt_smkg == 8, "event"] = 2
cbind(forInla$Age, forInla$Age_first_tried_cigt_smkg, forSurv)[1:10,]
smokeResponse = inla.surv(forSurv$time, forSurv$event) #event 0,1,2 -> 2 means left censoring, means you could have tried smoking anytime between 4 and 8, event 0 means havent smoked yet, 1 means they did smoke
fitS2 = inla(smokeResponse ~ RuralUrban + Sex * Race +
f(school, model = "iid", hyper = list(prec = list(prior = "pc.prec",
param = c(4*log(1.5), 0.05)))) + f(state, model = "iid", #within four standard deviations, within the worst and best school we expect to see at most a 50% difference in smoking
hyper = list(prec = list(prior = "pc.prec", param = c(log(10), #exponential dist with median 2.5 - exp(2.5), P(x>10) =.02
0.02)))), control.family = list(variant = 1,
hyper = list(alpha = list(prior = "normal", param = c(log(1),
.7^(-2))))), control.mode = list(theta = c(8,
2, 5), restart = TRUE), data = forInla, family = "weibullsurv",
verbose = TRUE, control.compute=list(config = TRUE))
exp(qnorm(c(0.025, 0.5, 0.975), mean = log(1), sd = .7)) #shape parameter - doesnt allow for more than four and median is 1 - allows alpha to be one and does not allow alpha to be 4
rbind(exp(-fitS2$summary.fixed[, c("mean", "0.025quant",
"0.975quant")]), Pmisc::priorPostSd(fitS2)$summary[,
c("mean", "0.025quant", "0.975quant")])
fitS2$summary.hyper[1,c(1,2,3,5)] #since the mean of the posterior distribution of the alpha paramter is 3, and 1 is not in the credible interval, we do not have a flat hazard, the hazard is increasing. There is no evidence that as age increases, the propesity to smoke is constant
#we can see that the mean of the posterior distribution for the standard deviation for the log relative rate of school is .144 and the mean for the posterior distribution of the standard deviation of for the log relative rate of state is .0584. Notice the C.I do not overlap
#therefore there is higher deviation within schools than states. We have a wide CI interval for state, the the std could be as low as .05 or as high as .10, but its definitley lower than school, whose lowest possible std is .11
#the rate at which rural children start smoking is 1-.8957% faster than non rural children
#positive B -> higher risk
#schools that are 1 std. apart have a clock that runs 14.39% faster, rate at which children start smoking is 14.39% faster than other schools
#the rate at which children start smoking is between 2.6% and 10.23% faster in some states when compared to other states
#so there is more variation among schools than states
#If the model parameter is greater than 1, then the clock runs slower for that group. If the model
#parameter is less than one, then the rate at which children start smoking in faster
#prior and posterior plots
sdState = Pmisc::priorPostSd(fitS2)
do.call(matplot, sdState$school$matplot) # prior and posterior density for school
sdState = Pmisc::priorPostSd(fitS2)
do.call(matplot, sdState$state$matplot)# prior and posterior density for state
#creating a chart to see the deviation between schools
library(dplyr)
library(ggplot2)
library(magrittr)
school_sum <- forInla[!is.infinite(forInla$Age_first_tried_cigt_smkg),]
school_sum <- school_sum %>%
group_by(school) %>%
summarize(avg = mean(Age_first_tried_cigt_smkg))
school_sum <- school_sum[order(school_sum$avg),]
ggplot(data=school_sum, aes(x= reorder(school, avg), y= avg))+ geom_point()+ylab("Average Age First Try Smoking")+
xlab("School")+ggtitle("Average Age First Start Smoking Per School")+ theme(axis.text.x = element_blank(), axis.ticks = element_blank())
#creating a map of mean age trying cigarettes in each state
library(openintro)
new_inla <- forInla[!is.infinite(forInla$Age_first_tried_cigt_smkg),]
new_inla[,"state"] <- tolower(abbr2state(new_inla$state))
colnames(new_inla)[colnames(new_inla)=="state"] <- "region"
new_inla <- new_inla %>%
group_by(region) %>%
summarize(avg = mean(Age_first_tried_cigt_smkg))
states <- map_data("state")
dt2 <- left_join(states, new_inla, by = "region")
ggplot(data = dt2, aes(x=long, y=lat, group=group))+
geom_polygon(aes(fill=avg))+
geom_path()+
scale_fill_gradientn(colours = rev(terrain.colors(5)),na.value = "grey",
guide = guide_colourbar(barwidth = 25, barheight = 0.4,
#put legend title on top of legend
title.position = "top")) +
# map scale
ggsn::scalebar(data = dt2, dist = 500, dist_unit = "km",
border.size = 0.4, st.size = 4,
box.fill = c('black','white'),
transform = TRUE, model = "WGS84") +
# put legend at the bottom, adjust legend title and text font sizes
theme(legend.position = "bottom",
legend.title=element_text(size=12), # font size of the legend
legend.text=element_text(size=10),
axis.title.x=element_blank(), # remove axis, title, ticks
axis.text.x=element_blank(),
axis.ticks.x=element_blank(),
axis.text.y = element_blank(),
axis.line=element_blank())+ggtitle("Map of US States Avg Age Start Smoking")
#QUESTION 2
pedestrainFile = Pmisc::downloadIfOld("http://pbrown.ca/teaching/appliedstats/data/pedestrians.rds")
pedestrians = readRDS(pedestrainFile)
pedestrians = pedestrians[!is.na(pedestrians$time),
]
pedestrians$y = pedestrians$Casualty_Severity == "Fatal"
pedestrians$timeCat = format(pedestrians$time, "%Y_%b_%a_h%H") #create a strata from time and weather and date -> same weather, same month, and same day and same hour
pedestrians$strata = paste(pedestrians$Light_Conditions,
pedestrians$Weather_Conditions, pedestrians$timeCat)
length(unique(pedestrians$strata)) #there are 176391 different strata
#remove strata with no cases or no controls - otherwise we have no idea what is going on
theTable = table(pedestrians$strata, pedestrians$y) #this table tells us if there has been a person with injury or not in each strata - notice the first 5 are not helpful, we need one case and once control in each strata
#since we match on weather, time, etc, we do not have to worry about the fact that more men are out at night
onlyOne = rownames(theTable)[which(theTable[, 1] ==
0 | theTable[, 2] == 0)]
x = pedestrians[!pedestrians$strata %in% onlyOne, ] #25-36 men is the baseline group
library("survival")
theClogit = clogit(y ~ age + age:sex + strata(strata),
data = x)
#summary(glm(y ~ sex + age:sex + strata(strata)+ Light_Conditions + Weather_Conditions,
# data = x, family = "binomial"))$coef[1:4, ]
#IMPORTANT -given that your an accent, a 2 means that there is a double probability that it is fatal compared to age 26, this is conditioning on being in an accident (so 50 twice as likely to be in fatal)
#theCoef[,c(1,2,6,7)]
#females is relative to males of the same age
#more males its relative to the baseline
#throwing away assumptions by not making assumptions, but that's okay since it is a big data set
#by saying that 2am cloudy and 2am partly cloudy is different we are getting better estimates
#conditioning on the number of controls - shouldn't effect the results, the more controls the better
theCoef = rbind(as.data.frame(summary(theClogit)$coef),
`age 26 - 35` = c(0, 1, 0, NA, NA))
theCoef$sex = c("Male", "Female")[1 + grepl("Female",
rownames(theCoef))]
theCoef$age = as.numeric(gsub("age|Over| - [[:digit:]].*|[:].*",
"", rownames(theCoef)))
theCoef = theCoef[order(theCoef$sex, theCoef$age),
]
matplot(theCoef[theCoef$sex == "Male", "age"], exp(as.matrix(theCoef[theCoef$sex ==
"Male", c("coef", "se(coef)")]) %*% Pmisc::ciMat(0.99)),
log = "y", type = "l", col = "black", lty = c(1,2, 2), xaxs = "i", yaxs = "i", xlab = "Age", ylab="Odds of Fatal Accident Compared to Baseline")
matplot(theCoef[theCoef$sex == "Female", "age"], exp(as.matrix(theCoef[theCoef$sex ==
"Female", c("coef", "se(coef)")]) %*% Pmisc::ciMat(0.99)),
log = "y", type = "l", col = "black", lty = c(1,
2, 2), xaxs = "i", xlab="Age", ylab= "Odds of Fatal Accident Compared to Male of Same Age")
#the plot for women is compared to men, so this looks like 70 year old women and men are about the same
# given that a man is in an accident, the man at age x, the odds of being in a fatal accident is y times higher than those of age 25-30
#the odds of woman being in fatal accident, given they are in an accident, is the rate times the odds of the man at the given age.
|
countrypair_df = data.frame(Date = c(as.Date("2000-01-01"),
as.Date("2001-01-01")),
CountryPair = c("A-B", "B-C"),
Val = c(1,2))
country_df = data.frame(Date = rep(c(as.Date("2000-01-01"),
as.Date("2001-01-01")),3),
Country = rep(c("A","B","C"), each = 2),
Temp_val = c(1,2,4,8,10,15))
test_df = append.countrypair.dataframe(countrypair_df,country_df)
expected_df = data.frame(Date = c(as.Date("2000-01-01"),
as.Date("2001-01-01")),
CountryPair = c("A-B", "B-C"),
Val = c(1,2),
Temp_val_A = c(1,8),
Temp_val_B = c(4,15))
testthat::expect_equivalent(test_df, expected_df)
|
/tests/append_countrypair_test.R
|
no_license
|
MichaelGurkov/GlobalFinancialCycleSynch
|
R
| false
| false
| 904
|
r
|
countrypair_df = data.frame(Date = c(as.Date("2000-01-01"),
as.Date("2001-01-01")),
CountryPair = c("A-B", "B-C"),
Val = c(1,2))
country_df = data.frame(Date = rep(c(as.Date("2000-01-01"),
as.Date("2001-01-01")),3),
Country = rep(c("A","B","C"), each = 2),
Temp_val = c(1,2,4,8,10,15))
test_df = append.countrypair.dataframe(countrypair_df,country_df)
expected_df = data.frame(Date = c(as.Date("2000-01-01"),
as.Date("2001-01-01")),
CountryPair = c("A-B", "B-C"),
Val = c(1,2),
Temp_val_A = c(1,8),
Temp_val_B = c(4,15))
testthat::expect_equivalent(test_df, expected_df)
|
\name{CausalImpact}
\alias{CausalImpact}
\title{Inferring causal impact using structural time-series models}
\description{
\code{CausalImpact()} performs causal inference through
counterfactual predictions using a Bayesian structural
time-series model.
See the package documentation
(http://google.github.io/CausalImpact/) to understand the
underlying assumptions. In particular, the model assumes
that the time series of the treated unit can be explained
in terms of a set of covariates which were themselves not
affected by the intervention whose causal effect we are
interested in.
The easiest way of running a causal analysis is to call
\code{CausalImpact()} with \code{data}, \code{pre.period},
\code{post.period}, \code{model.args} (optional), and
\code{alpha} (optional). In this case, a time-series model
is automatically constructed and estimated. The argument
\code{model.args} offers some control over the model. See
Example 1 below.
An alternative is to supply a custom model. In this case,
the function is called with \code{bsts.model},
\code{post.period.response}, and \code{alpha} (optional).
See Example 3 below.
}
\usage{
CausalImpact(data = NULL, pre.period = NULL,
post.period = NULL, model.args = NULL,
bsts.model = NULL, post.period.response = NULL,
alpha = 0.05)
}
\arguments{
\item{data}{Time series of response variable and any
covariates. This can be a \code{zoo} object, a
\code{vector}, a \code{matrix}, or a \code{data.frame}.
In any of these cases, the response variable must be in
the first column, and any covariates in subsequent
columns. A \code{zoo} object is recommended, as its time
indices will be used to format the x-axis in
\code{plot()}.}
\item{pre.period}{A vector specifying the first and the last time point of the
pre-intervention period in the response vector \code{y}. This
period can be thought of as a training period, used to
determine the relationship between the response variable and
the covariates. If \code{data} is a \code{zoo} object with
a \code{time} attribute, \code{pre.period} must be indicated
using the same time scale (i.e. using the same class as
\code{time(data)}, see Example 2 below). If \code{data} doesn't have
a \code{time} attribute, \code{post.period} is indicated with
indices.}
\item{post.period}{A vector specifying the first and the last day of the
post-intervention period we wish to study. This is the period
after the intervention has begun whose effect we are
interested in. The relationship between response variable and
covariates, as determined during the pre-period, will be used
to predict how the response variable should have evolved
during the post-period had no intervention taken place. If
\code{data} is a \code{zoo} object with a \code{time}
attribute, \code{post.period} must be indicated using the
same time scale. If \code{data} doesn't have a \code{time}
attribute, \code{post.period} is indicated with indices.}
\item{model.args}{Further arguments to adjust the default
construction of the state-space model used for inference.
One particularly important parameter is
\code{prior.level.sd}, which specifies our a priori
knowledge about the volatility of the data. For even more
control over the model, you can construct your own model
using the \code{bsts} package and feed the fitted model
into \code{CausalImpact()}, as shown in Example 3.}
\item{bsts.model}{Instead of passing in \code{data} and
having \code{CausalImpact()} construct a model, it is
possible to create a custom model using the
\code{bsts} package. In this case, omit \code{data},
\code{pre.period}, and \code{post.period}. Instead only
pass in \code{bsts.model}, \code{post.period.response},
and \code{alpha} (optional). The model must have been
fitted on data where the response variable was set to
\code{NA} during the post-treatment period. The actual
observed data during this period must then be passed to
the function in \code{post.period.response}.}
\item{post.period.response}{Actual observed data during
the post-intervention period. This is required if and
only if a fitted \code{bsts.model} is provided instead
of \code{data}.}
\item{alpha}{Desired tail-area probability for posterior
intervals. Defaults to 0.05, which will produce central
95\% intervals.}
}
\value{
\code{CausalImpact()} returns a \code{CausalImpact}
object containing the original observed response, its
counterfactual predictions, as well as pointwise and
cumulative impact estimates along with posterior credible
intervals. Results can summarised using \code{summary()}
and visualized using \code{plot()}. The object is a list
with the following fields:
\itemize{
\item \code{series}. Time-series object (\code{zoo})
containing the original response \code{response} as
well as the computed inferences. The added columns are
listed in the table below.
\item \code{summary}. Summary statistics for the
post-intervention period. This includes the posterior
expectation of the overall effect, the corresponding
posterior credible interval, and the posterior
probability that the intervention had any effect,
expressed in terms of a one-sided p-value. Note that
checking whether the posterior interval includes zero
corresponds to a two-sided hypothesis test. In contrast,
checking whether the p-value is below \code{alpha}
corresponds to a one-sided hypothesis test.
\item \code{report}. A suggested verbal
interpretation of the results.
\item \code{model}. A list with four elements \code{pre.period},
\code{post.period}, \code{bsts.model} and \code{alpha}. \code{pre.period}
and \code{post.period} indicate the first and last time point of
the time series in the respective period, \code{bsts.model} is
the fitted model returned by \code{bsts()}, and \code{alpha}
is the user-specified tail-area probability.
}
The field \code{series} is a
\code{zoo} time-series object with the following columns:
\tabular{ll}{
\code{response} \tab
Observed response as supplied to \code{CausalImpact()}. \cr
\code{cum.response} \tab
Cumulative response during the modeling period. \cr
\code{point.pred} \tab
Posterior mean of counterfactual predictions. \cr
\code{point.pred.lower} \tab
Lower limit of a (\code{1 - alpha}) posterior interval. \cr
\code{point.pred.upper} \tab
Upper limit of a (\code{1 - alpha}) posterior interval. \cr
\code{cum.pred} \tab
Posterior cumulative counterfactual predictions. \cr
\code{cum.pred.lower} \tab
Lower limit of a (\code{1 - alpha}) posterior interval.
\cr
\code{cum.pred.upper} \tab
Upper limit of a (\code{1 - alpha}) posterior interval.
\cr
\code{point.effect} \tab
Point-wise posterior causal effect. \cr
\code{point.effect.lower} \tab
Lower limit of the posterior interval (as above). \cr
\code{point.effect.lower} \tab Upper limit of the
posterior interval (as above). \cr
\code{cum.effect} \tab
Posterior cumulative effect. \cr
\code{cum.effect.lower} \tab
Lower limit of the posterior interval (as above). \cr
\code{cum.effect.lower} \tab
Upper limit of the posterior interval (as above). \cr
}
}
\note{
Optional arguments can be passed as a list in \code{model.args},
providing additional control over model construction:
\itemize{
\item \code{niter}. Number of MCMC samples to draw. Higher numbers
yield more accurate inferences. Defaults to 1000.
\item \code{standardize.data}. Whether to standardize all columns of
the data before fitting the model. This is equivalent to an empirical Bayes
approach to setting the priors. It ensures that results are invariant to
linear transformations of the data. Defaults to \code{TRUE}.
\item \code{prior.level.sd}. Prior standard deviation of the Gaussian random
walk of the local level, expressed in terms of data standard deviations.
Defaults to 0.01, a typical choice for well-behaved and stable datasets
with low residual volatility after regressing out known predictors (e.g.,
web searches or sales in high quantities). When in doubt, a safer option is
to use 0.1, as validated on synthetic data, although this may sometimes give
rise to unrealistically wide prediction intervals.
\item \code{nseasons}. Period of the seasonal components. In order to
include a seasonal component, set this to a whole number greater than 1. For
example, if the data represent daily observations, use 7 for a day-of-week
component. This interface currently only supports up to one seasonal
component. To specify multiple seasonal components, use \code{bsts} to
specify the model directly, then pass the fitted model in as
\code{bsts.model}. Defaults to 1, which means no seasonal component is used.
\item \code{season.duration}. Duration of each season, i.e., number of data
points each season spans. For example, to add a day-of-week component to
data with daily granularity, supply the arguments
\code{model.args = list(nseasons = 7, season.duration = 1)}.
Alternatively, use
\code{model.args = list(nseasons = 7, season.duration = 24)}
to add a day-of-week component to data with hourly granularity.
Defaults to 1.
\item \code{dynamic.regression}. Whether to include time-varying regression
coefficients. In combination with a time-varying local trend or even a
time-varying local level, this often leads to overspecification, in which
case a static regression is safer. Defaults to \code{FALSE}.
}
}
\author{
Kay H. Brodersen \email{kbrodersen@google.com}
}
\examples{
# Example 1
#
# Example analysis on a simple artificial dataset
# consisting of a response variable y and a
# single covariate x1.
set.seed(1)
x1 <- 100 + arima.sim(model = list(ar = 0.999), n = 52)
y <- 1.2 * x1 + rnorm(52)
y[41:52] <- y[41:52] + 10
data <- cbind(y, x1)
pre.period <- c(1, 40)
post.period <- c(41, 52)
impact <- CausalImpact(data, pre.period, post.period)
# Print and plot results
summary(impact)
summary(impact, "report")
plot(impact)
# For further output, type:
names(impact)
# Example 2
#
# Weekly time series: same data as in example 1, annotated
# with dates.
times <- seq.Date(as.Date("2016-01-03"), by = 7, length.out = 52)
data <- zoo(cbind(y, x1), times)
impact <- CausalImpact(data, times[pre.period], times[post.period])
summary(impact) # Same as in example 1.
plot(impact) # Plot now has dates on the x axis.
# Example 3
#
# For full flexibility, specify a custom model and pass the
# fitted model to CausalImpact(). To run this example, run
# the code for Example 1 first.
post.period.response <- y[post.period[1] : post.period[2]]
y[post.period[1] : post.period[2]] <- NA
ss <- AddLocalLevel(list(), y)
bsts.model <- bsts(y ~ x1, ss, niter = 100)
impact <- CausalImpact(bsts.model = bsts.model,
post.period.response = post.period.response)
plot(impact)
}
|
/man/CausalImpact.Rd
|
permissive
|
dexterpante/CausalImpact
|
R
| false
| false
| 11,225
|
rd
|
\name{CausalImpact}
\alias{CausalImpact}
\title{Inferring causal impact using structural time-series models}
\description{
\code{CausalImpact()} performs causal inference through
counterfactual predictions using a Bayesian structural
time-series model.
See the package documentation
(http://google.github.io/CausalImpact/) to understand the
underlying assumptions. In particular, the model assumes
that the time series of the treated unit can be explained
in terms of a set of covariates which were themselves not
affected by the intervention whose causal effect we are
interested in.
The easiest way of running a causal analysis is to call
\code{CausalImpact()} with \code{data}, \code{pre.period},
\code{post.period}, \code{model.args} (optional), and
\code{alpha} (optional). In this case, a time-series model
is automatically constructed and estimated. The argument
\code{model.args} offers some control over the model. See
Example 1 below.
An alternative is to supply a custom model. In this case,
the function is called with \code{bsts.model},
\code{post.period.response}, and \code{alpha} (optional).
See Example 3 below.
}
\usage{
CausalImpact(data = NULL, pre.period = NULL,
post.period = NULL, model.args = NULL,
bsts.model = NULL, post.period.response = NULL,
alpha = 0.05)
}
\arguments{
\item{data}{Time series of response variable and any
covariates. This can be a \code{zoo} object, a
\code{vector}, a \code{matrix}, or a \code{data.frame}.
In any of these cases, the response variable must be in
the first column, and any covariates in subsequent
columns. A \code{zoo} object is recommended, as its time
indices will be used to format the x-axis in
\code{plot()}.}
\item{pre.period}{A vector specifying the first and the last time point of the
pre-intervention period in the response vector \code{y}. This
period can be thought of as a training period, used to
determine the relationship between the response variable and
the covariates. If \code{data} is a \code{zoo} object with
a \code{time} attribute, \code{pre.period} must be indicated
using the same time scale (i.e. using the same class as
\code{time(data)}, see Example 2 below). If \code{data} doesn't have
a \code{time} attribute, \code{post.period} is indicated with
indices.}
\item{post.period}{A vector specifying the first and the last day of the
post-intervention period we wish to study. This is the period
after the intervention has begun whose effect we are
interested in. The relationship between response variable and
covariates, as determined during the pre-period, will be used
to predict how the response variable should have evolved
during the post-period had no intervention taken place. If
\code{data} is a \code{zoo} object with a \code{time}
attribute, \code{post.period} must be indicated using the
same time scale. If \code{data} doesn't have a \code{time}
attribute, \code{post.period} is indicated with indices.}
\item{model.args}{Further arguments to adjust the default
construction of the state-space model used for inference.
One particularly important parameter is
\code{prior.level.sd}, which specifies our a priori
knowledge about the volatility of the data. For even more
control over the model, you can construct your own model
using the \code{bsts} package and feed the fitted model
into \code{CausalImpact()}, as shown in Example 3.}
\item{bsts.model}{Instead of passing in \code{data} and
having \code{CausalImpact()} construct a model, it is
possible to create a custom model using the
\code{bsts} package. In this case, omit \code{data},
\code{pre.period}, and \code{post.period}. Instead only
pass in \code{bsts.model}, \code{post.period.response},
and \code{alpha} (optional). The model must have been
fitted on data where the response variable was set to
\code{NA} during the post-treatment period. The actual
observed data during this period must then be passed to
the function in \code{post.period.response}.}
\item{post.period.response}{Actual observed data during
the post-intervention period. This is required if and
only if a fitted \code{bsts.model} is provided instead
of \code{data}.}
\item{alpha}{Desired tail-area probability for posterior
intervals. Defaults to 0.05, which will produce central
95\% intervals.}
}
\value{
\code{CausalImpact()} returns a \code{CausalImpact}
object containing the original observed response, its
counterfactual predictions, as well as pointwise and
cumulative impact estimates along with posterior credible
intervals. Results can summarised using \code{summary()}
and visualized using \code{plot()}. The object is a list
with the following fields:
\itemize{
\item \code{series}. Time-series object (\code{zoo})
containing the original response \code{response} as
well as the computed inferences. The added columns are
listed in the table below.
\item \code{summary}. Summary statistics for the
post-intervention period. This includes the posterior
expectation of the overall effect, the corresponding
posterior credible interval, and the posterior
probability that the intervention had any effect,
expressed in terms of a one-sided p-value. Note that
checking whether the posterior interval includes zero
corresponds to a two-sided hypothesis test. In contrast,
checking whether the p-value is below \code{alpha}
corresponds to a one-sided hypothesis test.
\item \code{report}. A suggested verbal
interpretation of the results.
\item \code{model}. A list with four elements \code{pre.period},
\code{post.period}, \code{bsts.model} and \code{alpha}. \code{pre.period}
and \code{post.period} indicate the first and last time point of
the time series in the respective period, \code{bsts.model} is
the fitted model returned by \code{bsts()}, and \code{alpha}
is the user-specified tail-area probability.
}
The field \code{series} is a
\code{zoo} time-series object with the following columns:
\tabular{ll}{
\code{response} \tab
Observed response as supplied to \code{CausalImpact()}. \cr
\code{cum.response} \tab
Cumulative response during the modeling period. \cr
\code{point.pred} \tab
Posterior mean of counterfactual predictions. \cr
\code{point.pred.lower} \tab
Lower limit of a (\code{1 - alpha}) posterior interval. \cr
\code{point.pred.upper} \tab
Upper limit of a (\code{1 - alpha}) posterior interval. \cr
\code{cum.pred} \tab
Posterior cumulative counterfactual predictions. \cr
\code{cum.pred.lower} \tab
Lower limit of a (\code{1 - alpha}) posterior interval.
\cr
\code{cum.pred.upper} \tab
Upper limit of a (\code{1 - alpha}) posterior interval.
\cr
\code{point.effect} \tab
Point-wise posterior causal effect. \cr
\code{point.effect.lower} \tab
Lower limit of the posterior interval (as above). \cr
\code{point.effect.lower} \tab Upper limit of the
posterior interval (as above). \cr
\code{cum.effect} \tab
Posterior cumulative effect. \cr
\code{cum.effect.lower} \tab
Lower limit of the posterior interval (as above). \cr
\code{cum.effect.lower} \tab
Upper limit of the posterior interval (as above). \cr
}
}
\note{
Optional arguments can be passed as a list in \code{model.args},
providing additional control over model construction:
\itemize{
\item \code{niter}. Number of MCMC samples to draw. Higher numbers
yield more accurate inferences. Defaults to 1000.
\item \code{standardize.data}. Whether to standardize all columns of
the data before fitting the model. This is equivalent to an empirical Bayes
approach to setting the priors. It ensures that results are invariant to
linear transformations of the data. Defaults to \code{TRUE}.
\item \code{prior.level.sd}. Prior standard deviation of the Gaussian random
walk of the local level, expressed in terms of data standard deviations.
Defaults to 0.01, a typical choice for well-behaved and stable datasets
with low residual volatility after regressing out known predictors (e.g.,
web searches or sales in high quantities). When in doubt, a safer option is
to use 0.1, as validated on synthetic data, although this may sometimes give
rise to unrealistically wide prediction intervals.
\item \code{nseasons}. Period of the seasonal components. In order to
include a seasonal component, set this to a whole number greater than 1. For
example, if the data represent daily observations, use 7 for a day-of-week
component. This interface currently only supports up to one seasonal
component. To specify multiple seasonal components, use \code{bsts} to
specify the model directly, then pass the fitted model in as
\code{bsts.model}. Defaults to 1, which means no seasonal component is used.
\item \code{season.duration}. Duration of each season, i.e., number of data
points each season spans. For example, to add a day-of-week component to
data with daily granularity, supply the arguments
\code{model.args = list(nseasons = 7, season.duration = 1)}.
Alternatively, use
\code{model.args = list(nseasons = 7, season.duration = 24)}
to add a day-of-week component to data with hourly granularity.
Defaults to 1.
\item \code{dynamic.regression}. Whether to include time-varying regression
coefficients. In combination with a time-varying local trend or even a
time-varying local level, this often leads to overspecification, in which
case a static regression is safer. Defaults to \code{FALSE}.
}
}
\author{
Kay H. Brodersen \email{kbrodersen@google.com}
}
\examples{
# Example 1
#
# Example analysis on a simple artificial dataset
# consisting of a response variable y and a
# single covariate x1.
set.seed(1)
x1 <- 100 + arima.sim(model = list(ar = 0.999), n = 52)
y <- 1.2 * x1 + rnorm(52)
y[41:52] <- y[41:52] + 10
data <- cbind(y, x1)
pre.period <- c(1, 40)
post.period <- c(41, 52)
impact <- CausalImpact(data, pre.period, post.period)
# Print and plot results
summary(impact)
summary(impact, "report")
plot(impact)
# For further output, type:
names(impact)
# Example 2
#
# Weekly time series: same data as in example 1, annotated
# with dates.
times <- seq.Date(as.Date("2016-01-03"), by = 7, length.out = 52)
data <- zoo(cbind(y, x1), times)
impact <- CausalImpact(data, times[pre.period], times[post.period])
summary(impact) # Same as in example 1.
plot(impact) # Plot now has dates on the x axis.
# Example 3
#
# For full flexibility, specify a custom model and pass the
# fitted model to CausalImpact(). To run this example, run
# the code for Example 1 first.
post.period.response <- y[post.period[1] : post.period[2]]
y[post.period[1] : post.period[2]] <- NA
ss <- AddLocalLevel(list(), y)
bsts.model <- bsts(y ~ x1, ss, niter = 100)
impact <- CausalImpact(bsts.model = bsts.model,
post.period.response = post.period.response)
plot(impact)
}
|
# theme_psychro {{{
#' Custom theme for psychrometric chart.
#'
#' @param asp Aspect ratio of plot. Defaults to NULL.
#'
#' @return A ggplot2 theme.
#'
#' @keywords internal
#' @author Hongyuan Jia
#' @importFrom ggplot2 element_blank element_line element_rect element_text theme theme_bw
#' @examples
#' theme_psychro()
theme_psychro <- function(background = "white", base_size = 11, base_family = "", base_line_size = base_size/22, base_rect_size = base_size/22) {
# use theme_bw as the base
th <- theme_bw(base_size = base_size, base_family = base_family,
base_line_size = base_line_size, base_rect_size = base_rect_size
)
th <- th + theme(
# color for grids
panel.grid.major.x = element_line(color = "#870021", size = 0.15),
panel.grid.minor.x = element_line(color = "#870021", size = 0.15),
panel.grid.major.y = element_line(color = "#4E6390", size = 0.15),
panel.grid.minor.y = element_line(color = "#4E6390", size = 0.15),
axis.line.x = element_line(color = "#313329"),
axis.line.y = element_line(color = "#313329"),
axis.text.x = element_text(color = "#313329"),
axis.text.y = element_text(color = "#313329"),
axis.ticks.x = element_line(color = "#313329"),
axis.ticks.y = element_line(color = "#313329"),
# remove panel border
panel.border = element_rect(color = "white"),
# set chart background
panel.background = element_rect(fill = background, color = "white", size = 0.8)
)
th
}
# }}}
# theme_psychro_ashrae {{{
theme_psychro_ashrae <- function (base_size = 11, base_family = "",
base_line_size = base_size/22,
base_rect_size = base_size/22) {
theme_bw() +
ggplot2::theme(
axis.line = element_line(color = "black", size = 0.8, linetype = 1),
axis.text = element_text(color = "black", size = 8),
axis.ticks = element_line(size = 0.2),
axis.ticks.length.x = unit(0.3, "lines"),
axis.ticks.length.y = unit(0.3, "lines"),
axis.text.x.bottom = element_text(margin = margin(t = .5, unit = "lines")),
panel.border = element_blank()
)
}
# }}}
|
/R/theme.R
|
permissive
|
ariel32/ggpsychro
|
R
| false
| false
| 2,259
|
r
|
# theme_psychro {{{
#' Custom theme for psychrometric chart.
#'
#' @param asp Aspect ratio of plot. Defaults to NULL.
#'
#' @return A ggplot2 theme.
#'
#' @keywords internal
#' @author Hongyuan Jia
#' @importFrom ggplot2 element_blank element_line element_rect element_text theme theme_bw
#' @examples
#' theme_psychro()
theme_psychro <- function(background = "white", base_size = 11, base_family = "", base_line_size = base_size/22, base_rect_size = base_size/22) {
# use theme_bw as the base
th <- theme_bw(base_size = base_size, base_family = base_family,
base_line_size = base_line_size, base_rect_size = base_rect_size
)
th <- th + theme(
# color for grids
panel.grid.major.x = element_line(color = "#870021", size = 0.15),
panel.grid.minor.x = element_line(color = "#870021", size = 0.15),
panel.grid.major.y = element_line(color = "#4E6390", size = 0.15),
panel.grid.minor.y = element_line(color = "#4E6390", size = 0.15),
axis.line.x = element_line(color = "#313329"),
axis.line.y = element_line(color = "#313329"),
axis.text.x = element_text(color = "#313329"),
axis.text.y = element_text(color = "#313329"),
axis.ticks.x = element_line(color = "#313329"),
axis.ticks.y = element_line(color = "#313329"),
# remove panel border
panel.border = element_rect(color = "white"),
# set chart background
panel.background = element_rect(fill = background, color = "white", size = 0.8)
)
th
}
# }}}
# theme_psychro_ashrae {{{
theme_psychro_ashrae <- function (base_size = 11, base_family = "",
base_line_size = base_size/22,
base_rect_size = base_size/22) {
theme_bw() +
ggplot2::theme(
axis.line = element_line(color = "black", size = 0.8, linetype = 1),
axis.text = element_text(color = "black", size = 8),
axis.ticks = element_line(size = 0.2),
axis.ticks.length.x = unit(0.3, "lines"),
axis.ticks.length.y = unit(0.3, "lines"),
axis.text.x.bottom = element_text(margin = margin(t = .5, unit = "lines")),
panel.border = element_blank()
)
}
# }}}
|
#' Get an aggregate value from the indegree of nodes
#' @description Get a single,
#' aggregate value from the
#' indegree values for all nodes
#' in a graph, or, a subset of
#' graph nodes.
#' @param graph a graph object of
#' class \code{dgr_graph}.
#' @param agg the aggregation
#' function to use for summarizing
#' indegree values from graph nodes.
#' The following aggregation functions
#' can be used: \code{sum}, \code{min},
#' \code{max}, \code{mean}, or
#' \code{median}.
#' @param conditions an option to use
#' filtering conditions for the nodes
#' to consider.
#' @return a vector with an aggregate
#' indegree value.
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 20,
#' m = 35,
#' set_seed = 23) %>%
#' set_node_attrs(
#' node_attr = value,
#' values = rnorm(
#' n = count_nodes(.),
#' mean = 5,
#' sd = 1) %>% round(1))
#'
#' # Get the mean indegree value
#' # from all nodes in the graph
#' graph %>%
#' get_agg_degree_in(
#' agg = "mean")
#'
#' # Other aggregation functions
#' # can be used (`min`, `max`,
#' # `median`, `sum`); let's get
#' # the median in this example
#' graph %>%
#' get_agg_degree_in(
#' agg = "median")
#'
#' # The aggregation of indegree
#' # can occur for a subset of the
#' # graph nodes and this is made
#' # possible by specifying
#' # `conditions` for the nodes
#' graph %>%
#' get_agg_degree_in(
#' agg = "mean",
#' conditions = value > 5.0)
#' @importFrom dplyr group_by summarize_ select filter ungroup pull
#' @importFrom stats as.formula
#' @importFrom purrr flatten_dbl
#' @importFrom rlang enquo UQ
#' @export get_agg_degree_in
get_agg_degree_in <- function(graph,
agg,
conditions = NULL) {
conditions <- rlang::enquo(conditions)
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop(
"The graph object is not valid.",
call. = FALSE)
}
# Create binding for variable
id <- NULL
# If filtering conditions are provided then
# pass in those conditions and filter the ndf
if (!((rlang::UQ(conditions) %>% paste())[2] == "NULL")) {
# Extract the node data frame from the graph
ndf <- get_node_df(graph)
ndf <-
filter(
.data = ndf,
rlang::UQ(conditions))
# Get a vector of node ID values
node_ids <-
ndf %>%
dplyr::pull(id)
}
# Get a data frame with indegree values for
# all nodes in the graph
indegree_df <- get_degree_in(graph)
if (exists("node_ids")) {
indegree_df <-
indegree_df %>%
dplyr::filter(id %in% node_ids)
}
# Verify that the value provided for `agg`
# is one of the accepted aggregation types
if (!(agg %in% c("sum", "min", "max", "mean", "median"))) {
stop(
"The aggregation method must be either `min`, `max`, `mean`, `median`, or `sum`.",
call. = FALSE)
}
# Get the aggregate value of total degree based
# on the aggregate function provided
indegree_agg <-
indegree_df %>%
dplyr::group_by() %>%
dplyr::summarize_(stats::as.formula(
paste0("~", agg, "(indegree, na.rm = TRUE)"))) %>%
dplyr::ungroup() %>%
purrr::flatten_dbl()
indegree_agg
}
|
/R/get_agg_degree_in.R
|
permissive
|
OleksiyAnokhin/DiagrammeR
|
R
| false
| false
| 3,343
|
r
|
#' Get an aggregate value from the indegree of nodes
#' @description Get a single,
#' aggregate value from the
#' indegree values for all nodes
#' in a graph, or, a subset of
#' graph nodes.
#' @param graph a graph object of
#' class \code{dgr_graph}.
#' @param agg the aggregation
#' function to use for summarizing
#' indegree values from graph nodes.
#' The following aggregation functions
#' can be used: \code{sum}, \code{min},
#' \code{max}, \code{mean}, or
#' \code{median}.
#' @param conditions an option to use
#' filtering conditions for the nodes
#' to consider.
#' @return a vector with an aggregate
#' indegree value.
#' @examples
#' # Create a random graph using the
#' # `add_gnm_graph()` function
#' graph <-
#' create_graph() %>%
#' add_gnm_graph(
#' n = 20,
#' m = 35,
#' set_seed = 23) %>%
#' set_node_attrs(
#' node_attr = value,
#' values = rnorm(
#' n = count_nodes(.),
#' mean = 5,
#' sd = 1) %>% round(1))
#'
#' # Get the mean indegree value
#' # from all nodes in the graph
#' graph %>%
#' get_agg_degree_in(
#' agg = "mean")
#'
#' # Other aggregation functions
#' # can be used (`min`, `max`,
#' # `median`, `sum`); let's get
#' # the median in this example
#' graph %>%
#' get_agg_degree_in(
#' agg = "median")
#'
#' # The aggregation of indegree
#' # can occur for a subset of the
#' # graph nodes and this is made
#' # possible by specifying
#' # `conditions` for the nodes
#' graph %>%
#' get_agg_degree_in(
#' agg = "mean",
#' conditions = value > 5.0)
#' @importFrom dplyr group_by summarize_ select filter ungroup pull
#' @importFrom stats as.formula
#' @importFrom purrr flatten_dbl
#' @importFrom rlang enquo UQ
#' @export get_agg_degree_in
get_agg_degree_in <- function(graph,
agg,
conditions = NULL) {
conditions <- rlang::enquo(conditions)
# Validation: Graph object is valid
if (graph_object_valid(graph) == FALSE) {
stop(
"The graph object is not valid.",
call. = FALSE)
}
# Create binding for variable
id <- NULL
# If filtering conditions are provided then
# pass in those conditions and filter the ndf
if (!((rlang::UQ(conditions) %>% paste())[2] == "NULL")) {
# Extract the node data frame from the graph
ndf <- get_node_df(graph)
ndf <-
filter(
.data = ndf,
rlang::UQ(conditions))
# Get a vector of node ID values
node_ids <-
ndf %>%
dplyr::pull(id)
}
# Get a data frame with indegree values for
# all nodes in the graph
indegree_df <- get_degree_in(graph)
if (exists("node_ids")) {
indegree_df <-
indegree_df %>%
dplyr::filter(id %in% node_ids)
}
# Verify that the value provided for `agg`
# is one of the accepted aggregation types
if (!(agg %in% c("sum", "min", "max", "mean", "median"))) {
stop(
"The aggregation method must be either `min`, `max`, `mean`, `median`, or `sum`.",
call. = FALSE)
}
# Get the aggregate value of total degree based
# on the aggregate function provided
indegree_agg <-
indegree_df %>%
dplyr::group_by() %>%
dplyr::summarize_(stats::as.formula(
paste0("~", agg, "(indegree, na.rm = TRUE)"))) %>%
dplyr::ungroup() %>%
purrr::flatten_dbl()
indegree_agg
}
|
#' Rational Number
#'
#' Data type and functions to work with rational numbers.
#'
#' @param num (integer) numerator
#' @param denom (integer) denominator
#'
#' @details ...
#'
#' @value ...
#'
#' @export
#' @examples
#' rat <- rational(1:3, 3:5)
#' rat + 1
rational <- function(num, denom) {
out <- mapply(c, num, denom, SIMPLIFY = FALSE)
class(out) <- "rational"
out
}
#' @export
print.rational <- function(x, ...) {
map(x, paste, collapse = "/") %>%
flatten_chr() %>%
cat()
invisible(x)
}
#' @export
"+.rational" <- function(x, y) {
denom <- sapply(x, function(x) x[2])
num <- sapply(x, function(x) x[1])
num <- num + y * denom
rational(num, denom)
}
|
/package/R/rational.R
|
no_license
|
compStat/programming-course
|
R
| false
| false
| 689
|
r
|
#' Rational Number
#'
#' Data type and functions to work with rational numbers.
#'
#' @param num (integer) numerator
#' @param denom (integer) denominator
#'
#' @details ...
#'
#' @value ...
#'
#' @export
#' @examples
#' rat <- rational(1:3, 3:5)
#' rat + 1
rational <- function(num, denom) {
out <- mapply(c, num, denom, SIMPLIFY = FALSE)
class(out) <- "rational"
out
}
#' @export
print.rational <- function(x, ...) {
map(x, paste, collapse = "/") %>%
flatten_chr() %>%
cat()
invisible(x)
}
#' @export
"+.rational" <- function(x, y) {
denom <- sapply(x, function(x) x[2])
num <- sapply(x, function(x) x[1])
num <- num + y * denom
rational(num, denom)
}
|
library(dplyr)
library(Seurat)
library(SeuratData)
library(patchwork)
library(edgeR)
library(limma)
library(scater)
library(miloR)
library(statmod)
library(MultinomialCI)
# Loading the data
data_dir_old <- '/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/10XData/01_Ovary_Old_2_Oct_19'
data_dir_young <- '/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/10XData/02_Ovary_Young_26_Nov_19/filtered'
expression_matrix_old <- Read10X(data.dir = data_dir_old)
old = CreateSeuratObject(counts = expression_matrix_old, project = "Old")
expression_matrix_young <- Read10X(data.dir = data_dir_young)
young = CreateSeuratObject(counts = expression_matrix_young, project = "young")
# Merge old and young
all_data <- merge(young, y = old, add.cell.ids = c("Young", "Old"), project = "All10X")
### All data analysis ###
all_data[["percent.mt"]] <- PercentageFeatureSet(all_data, pattern = "^mt-")
VlnPlot(all_data, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
all_data <- subset(all_data, subset = nFeature_RNA > 200 & nFeature_RNA < 2500 & percent.mt < 10)
all_data <- NormalizeData(all_data, normalization.method = "LogNormalize", scale.factor = 10000)
all_data <- FindVariableFeatures(all_data, selection.method = "vst", nfeatures = 2000)
all.genes <- rownames(all_data)
all_data <- ScaleData(all_data, features = all.genes)
all_data <- RunPCA(all_data, features = VariableFeatures(object = all_data))
all_data <- FindNeighbors(all_data, dims = 1:15)
all_data <- FindClusters(all_data, resolution = 0.55)
all_data <- RunTSNE(all_data, dims = 1:15)
all_data <- RunUMAP(all_data, dims = 1:15)
DimPlot(all_data, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
DimPlot(all_data, reduction = "umap")
###############################################
# Write to csv
data_to_write_out <- as.data.frame(as.matrix(all_data@reductions$tsne))
fwrite(x = data_to_write_out, file = "outfile.csv")
write.csv(x=all_data@reductions$tsne@cell.embeddings,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_wo_batch.csv')
write.csv(x=all_data@meta.data$seurat_clusters,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_wo_batch_clusters.csv')
# only good clusters
# clustred_data <- subset(all_data,idents=c(13,14) , invert = TRUE)
# DimPlot(clustred_data, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
######################### Batch correction #########################
age.list <- SplitObject(all_data, split.by = "orig.ident")
features <- SelectIntegrationFeatures(object.list = age.list)
immune.anchors <- FindIntegrationAnchors(object.list = age.list, anchor.features = features)
immune.combined <- IntegrateData(anchorset = immune.anchors)
immune.combined <- ScaleData(immune.combined, features = all.genes)
immune.combined <- RunPCA(immune.combined, features = VariableFeatures(object = all_data))
immune.combined <- FindNeighbors(immune.combined, dims = 1:15)
immune.combined <- FindClusters(immune.combined, resolution = 0.55)
immune.combined <- RunTSNE(immune.combined, dims = 1:15)
DimPlot(immune.combined, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
# write.csv(x=immune.combined@reductions$tsne@cell.embeddings,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_w_batch.csv')
# write.csv(x=immune.combined@meta.data$seurat_clusters,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_w_batch_clusters.csv')
# write.csv(x=immune.combined@meta.data$orig.ident,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/age_factor.csv')
# write.csv(x=immune.combined@meta.data$seurat_clusters,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_w_batch_clusters.csv')
immune.combined_new_cluster <- immune.combined
# load new clustering idx file and update values in immune seurat object
new_clustering_value <- read.csv('/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/new_clusters_idx_with_cord.csv')
for (idx in 1:length(new_clustering_value$idx_aligned)) {
immune.combined_new_cluster@meta.data$seurat_clusters[idx] = new_clustering_value$idx_aligned[idx]
immune.combined_new_cluster@active.ident[idx] = new_clustering_value$idx_aligned[idx]
}
immune.combined_new_cluster <- subset(immune.combined_new_cluster, ident=c(15) , invert = TRUE)
DimPlot(immune.combined_new_cluster, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
DimPlot(immune.combined_new_cluster, reduction = "tsne",label = "TRUE")
###########################################################################
# DotPlot for all clusters (equivalent to Fig. 1C)
immune.combined_new_cluster_tby <- all_data
new_clustering_value <- read.csv('C:/Users/SavirLab/Technion/Yoni Savir - TalBenYakov/R Code/EA/new_clusters_idx_with_cord.csv')
for (idx in 1:length(new_clustering_value$idx_aligned)) {
all_data@meta.data$seurat_clusters[idx] = new_clustering_value$idx_aligned[idx]
all_data@active.ident[idx] = new_clustering_value$idx_aligned[idx]
}
all_data <- subset(immune.combined_new_cluster_tby, ident=c(14) , invert = TRUE)
DimPlot(all_data, reduction = "tsne",split.by = "orig.ident",label = "FALSE")
DimPlot(all_data, reduction = "tsne",label = "FALSE")
new.cluster.ids <- c("ILC1", "DNT", "CD8+CD4 T", "NKT", "Neutrophils", "NK",
"Macrophages-1", "B cells", "Dendritic cells-1","Macrophages-2",
"Dendritic cells-2","ILC2","ILC3","X","X")
names(new.cluster.ids) <- levels(immune.combined_new_cluster_tby)
immune.combined_new_cluster_tby <- RenameIdents(immune.combined_new_cluster_tby, new.cluster.ids)
cd_genes <- c("S100a8","Itgam","Adgre1","Itgax","Klrb1c",
"Ccl5","Itga1","Gata3","Il17rb","Ly6c2","Cd3e",
"Trbc2","Cd8b1","Cd4","Cd28","Tmem176b","Il7r",
"Tcrg-C2","Il2ra","Cd19")
DotPlot(object = immune.combined_new_cluster_tby, features = cd_genes)
###########################################################################
# Perform Milo on the immune.combined_new_cluster (after batch correction) - EA
immune.combined_bc_sce <- as.SingleCellExperiment(immune.combined_new_cluster) # make a sc object
immune.combined_bc_milo <- Milo(immune.combined_bc_sce)
k = 13
d = 30
immune.combined_bc_milo <- buildGraph(immune.combined_bc_milo, k = k, d = d) #k=13, d=30: n=330
immune.combined_bc_milo <- makeNhoods(immune.combined_bc_milo, prop = 0.1, k = k, d = d, refined = TRUE)
plotNhoodSizeHist(immune.combined_bc_milo)
immune.combined_bc_milo@colData$Samples <- c(rep(1, 1657), rep(2, 1658), rep(3, 2738), rep(4, 2738))
immune.combined_bc_milo <- countCells(immune.combined_bc_milo, meta.data = as.data.frame(colData(immune.combined_bc_milo)), sample="Samples")
immune.combined_bc_milo <- calcNhoodDistance(immune.combined_bc_milo, d=60)
immune.combined_bc_milo <- buildNhoodGraph(immune.combined_bc_milo)
head(nhoodCounts(immune.combined_bc_milo))
age_design <- data.frame(colData(immune.combined_bc_milo))[c("Samples", "orig.ident")]
age_design <- distinct(age_design)
rownames(age_design) <- age_design$Samples
da_results <- testNhoods(immune.combined_bc_milo, design = ~ orig.ident, design.df = age_design)
head(da_results)
# plot milo da results
tsne_plot <-plotReducedDim(immune.combined_bc_sce, colour_by="orig.ident", dimred = "TSNE")
ggplot(da_results, aes(logFC, -log10(SpatialFDR))) +
geom_point() +
geom_hline(yintercept = 1) ## Mark significance threshold (10% FDR)
nh_graph_pl <- plotNhoodGraphDA(immune.combined_bc_milo, da_results, layout="TSNE",alpha=0.1)
tsne_plot + nh_graph_pl +
plot_layout(guides="collect")
# add cluster data
da_results <- annotateNhoods(immune.combined_bc_milo, da_results, coldata_col = "ident")
head(da_results)
da_results <- groupNhoods(immune.combined_bc_milo, da_results, max.lfc.delta = 5)
plotNhoodGroups(immune.combined_bc_milo, da_results, layout="TSNE")
plotDAbeeswarm(da_results, group.by = "ident")
da_results$logFC <- (da_results$logFC * (-1) - log(5476/3315, 2))
ident_name <- da_results$ident
ident_name <- plyr::mapvalues(ident_name, from=c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"),
to=c("Neutrophils", "Macrophages", "cDC2", "cDC1", "NK cells", "ILC1", "ILC2", "ILC3", "NKT cells",
"CD8+ T", "CD4+ T", "CD8- CD4- cells", "B cells"))
da_results$ident_name <- ident_name
plotDAbeeswarm(da_results, group.by = "ident_name")
#####################
write.csv(x=da_results,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/da_results.csv')
write.csv(x=immune.combined_bc_milo@nhoods, file='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/bc_milo.csv')
# do some changes in da_results
# first we want to inverse the LFC, for examples ident '12' should be with positive LFC
# second we want to normalize the number
da_results_fc_correction <- da_results
da_results_fc_correction$logFC <- (da_results_fc_correction$logFC * (-1) )
plotDAbeeswarm(da_results_fc_correction, group.by = "ident")
#load new da after normalize data
da_results_norm <- read.csv('/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/da_results_cell_591nhoods_after_correction_v2.csv')
da_results_norm <- annotateNhoods(immune.combined_bc_milo, da_results_norm, coldata_col = "ident")
plotDAbeeswarm(da_results_norm, group.by = "ident")
ident_name <- da_results_norm$ident
ident_name <- plyr::mapvalues(ident_name, from=c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"),
to=c("Neutrophils", "Macrophages", "cDC2", "cDC1", "NK cells", "ILC1", "ILC2", "ILC3",
"NKT cells", "CD8+ T", "CD4+ T", "CD8- CD4- cells", "B cells"))
da_results_norm$ident_name <- ident_name
plotDAbeeswarm(da_results_norm, group.by = "ident_name")
################################################################################
# multiomial error
m = multinomialCI(c(1308,1990,462,124,134,181,258,227,146,152,47,71), 0.05)
# Perform Milo on the clustred data - OLD
clustred_data_sce <- as.SingleCellExperiment(clustred_data) # make a sc object
clustred_data_milo <- Milo(clustred_data_sce) # make a milo obkect
clustred_data_milo <- buildGraph(clustred_data_milo, k = 13, d = 30)
clustred_data_milo <- makeNhoods(clustred_data_milo, prop = 0.1, k = 13, d=30, refined = TRUE)
plotNhoodSizeHist(clustred_data_milo)
clustred_data_milo@colData$Samples <- c(rep(1, 1657), rep(2, 1658), rep(3, 2738), rep(4, 2738))
clustred_data_milo <- countCells(clustred_data_milo, meta.data = as.data.frame(colData(clustred_data_milo)), sample="Samples")
clustred_data_milo <- calcNhoodDistance(clustred_data_milo, d=30, reduced.dim = "pca.corrected")
head(nhoodCounts(clustred_data_milo))
###############
age_design <- data.frame(colData(clustred_data_milo))[c("Samples", "orig.ident")]
age_design <- distinct(age_design)
rownames(age_design) <- age_design$Samples
da_results <- testNhoods(clustred_data_milo, design = ~ orig.ident, design.df = age_design)
clustred_data_milo <- buildNhoodGraph(clustred_data_milo)
head(da_results)
# plot milo da results
tsne_plot <-plotReducedDim(clustred_data_sce, colour_by="orig.ident", dimred = "TSNE")
ggplot(da_results, aes(logFC, -log10(SpatialFDR))) +
geom_point() +
geom_hline(yintercept = 1) ## Mark significance threshold (10% FDR)
nh_graph_pl <- plotNhoodGraphDA(clustred_data_milo, da_results, layout="TSNE",alpha=0.1)
tsne_plot + nh_graph_pl +
plot_layout(guides="collect")
# add cluster data
da_results <- annotateNhoods(clustred_data_milo, da_results, coldata_col = "ident")
head(da_results)
da_results <- groupNhoods(clustred_data_milo, da_results, max.lfc.delta = 5)
plotNhoodGroups(clustred_data_milo, da_results, layout="TSNE")
plotDAbeeswarm(da_results, group.by = "ident")
write.csv(x=da_results,file ='C:/Users/SavirLab/OneDrive - Technion/TalBenYakov/R Code/da_results.csv')
# multiomial error
m = multinomialCI(c(1308,1990,462,124,134,181,258,227,146,152,47,71), 0.05)
|
/BatchCorrection_MILO.R
|
no_license
|
SavirLab/AgingOvarianImmuneMilieu
|
R
| false
| false
| 12,372
|
r
|
library(dplyr)
library(Seurat)
library(SeuratData)
library(patchwork)
library(edgeR)
library(limma)
library(scater)
library(miloR)
library(statmod)
library(MultinomialCI)
# Loading the data
data_dir_old <- '/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/10XData/01_Ovary_Old_2_Oct_19'
data_dir_young <- '/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/10XData/02_Ovary_Young_26_Nov_19/filtered'
expression_matrix_old <- Read10X(data.dir = data_dir_old)
old = CreateSeuratObject(counts = expression_matrix_old, project = "Old")
expression_matrix_young <- Read10X(data.dir = data_dir_young)
young = CreateSeuratObject(counts = expression_matrix_young, project = "young")
# Merge old and young
all_data <- merge(young, y = old, add.cell.ids = c("Young", "Old"), project = "All10X")
### All data analysis ###
all_data[["percent.mt"]] <- PercentageFeatureSet(all_data, pattern = "^mt-")
VlnPlot(all_data, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3)
all_data <- subset(all_data, subset = nFeature_RNA > 200 & nFeature_RNA < 2500 & percent.mt < 10)
all_data <- NormalizeData(all_data, normalization.method = "LogNormalize", scale.factor = 10000)
all_data <- FindVariableFeatures(all_data, selection.method = "vst", nfeatures = 2000)
all.genes <- rownames(all_data)
all_data <- ScaleData(all_data, features = all.genes)
all_data <- RunPCA(all_data, features = VariableFeatures(object = all_data))
all_data <- FindNeighbors(all_data, dims = 1:15)
all_data <- FindClusters(all_data, resolution = 0.55)
all_data <- RunTSNE(all_data, dims = 1:15)
all_data <- RunUMAP(all_data, dims = 1:15)
DimPlot(all_data, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
DimPlot(all_data, reduction = "umap")
###############################################
# Write to csv
data_to_write_out <- as.data.frame(as.matrix(all_data@reductions$tsne))
fwrite(x = data_to_write_out, file = "outfile.csv")
write.csv(x=all_data@reductions$tsne@cell.embeddings,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_wo_batch.csv')
write.csv(x=all_data@meta.data$seurat_clusters,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_wo_batch_clusters.csv')
# only good clusters
# clustred_data <- subset(all_data,idents=c(13,14) , invert = TRUE)
# DimPlot(clustred_data, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
######################### Batch correction #########################
age.list <- SplitObject(all_data, split.by = "orig.ident")
features <- SelectIntegrationFeatures(object.list = age.list)
immune.anchors <- FindIntegrationAnchors(object.list = age.list, anchor.features = features)
immune.combined <- IntegrateData(anchorset = immune.anchors)
immune.combined <- ScaleData(immune.combined, features = all.genes)
immune.combined <- RunPCA(immune.combined, features = VariableFeatures(object = all_data))
immune.combined <- FindNeighbors(immune.combined, dims = 1:15)
immune.combined <- FindClusters(immune.combined, resolution = 0.55)
immune.combined <- RunTSNE(immune.combined, dims = 1:15)
DimPlot(immune.combined, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
# write.csv(x=immune.combined@reductions$tsne@cell.embeddings,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_w_batch.csv')
# write.csv(x=immune.combined@meta.data$seurat_clusters,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_w_batch_clusters.csv')
# write.csv(x=immune.combined@meta.data$orig.ident,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/age_factor.csv')
# write.csv(x=immune.combined@meta.data$seurat_clusters,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/tsnecorr_w_batch_clusters.csv')
immune.combined_new_cluster <- immune.combined
# load new clustering idx file and update values in immune seurat object
new_clustering_value <- read.csv('/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/new_clusters_idx_with_cord.csv')
for (idx in 1:length(new_clustering_value$idx_aligned)) {
immune.combined_new_cluster@meta.data$seurat_clusters[idx] = new_clustering_value$idx_aligned[idx]
immune.combined_new_cluster@active.ident[idx] = new_clustering_value$idx_aligned[idx]
}
immune.combined_new_cluster <- subset(immune.combined_new_cluster, ident=c(15) , invert = TRUE)
DimPlot(immune.combined_new_cluster, reduction = "tsne",split.by = "orig.ident",label = "TRUE")
DimPlot(immune.combined_new_cluster, reduction = "tsne",label = "TRUE")
###########################################################################
# DotPlot for all clusters (equivalent to Fig. 1C)
immune.combined_new_cluster_tby <- all_data
new_clustering_value <- read.csv('C:/Users/SavirLab/Technion/Yoni Savir - TalBenYakov/R Code/EA/new_clusters_idx_with_cord.csv')
for (idx in 1:length(new_clustering_value$idx_aligned)) {
all_data@meta.data$seurat_clusters[idx] = new_clustering_value$idx_aligned[idx]
all_data@active.ident[idx] = new_clustering_value$idx_aligned[idx]
}
all_data <- subset(immune.combined_new_cluster_tby, ident=c(14) , invert = TRUE)
DimPlot(all_data, reduction = "tsne",split.by = "orig.ident",label = "FALSE")
DimPlot(all_data, reduction = "tsne",label = "FALSE")
new.cluster.ids <- c("ILC1", "DNT", "CD8+CD4 T", "NKT", "Neutrophils", "NK",
"Macrophages-1", "B cells", "Dendritic cells-1","Macrophages-2",
"Dendritic cells-2","ILC2","ILC3","X","X")
names(new.cluster.ids) <- levels(immune.combined_new_cluster_tby)
immune.combined_new_cluster_tby <- RenameIdents(immune.combined_new_cluster_tby, new.cluster.ids)
cd_genes <- c("S100a8","Itgam","Adgre1","Itgax","Klrb1c",
"Ccl5","Itga1","Gata3","Il17rb","Ly6c2","Cd3e",
"Trbc2","Cd8b1","Cd4","Cd28","Tmem176b","Il7r",
"Tcrg-C2","Il2ra","Cd19")
DotPlot(object = immune.combined_new_cluster_tby, features = cd_genes)
###########################################################################
# Perform Milo on the immune.combined_new_cluster (after batch correction) - EA
immune.combined_bc_sce <- as.SingleCellExperiment(immune.combined_new_cluster) # make a sc object
immune.combined_bc_milo <- Milo(immune.combined_bc_sce)
k = 13
d = 30
immune.combined_bc_milo <- buildGraph(immune.combined_bc_milo, k = k, d = d) #k=13, d=30: n=330
immune.combined_bc_milo <- makeNhoods(immune.combined_bc_milo, prop = 0.1, k = k, d = d, refined = TRUE)
plotNhoodSizeHist(immune.combined_bc_milo)
immune.combined_bc_milo@colData$Samples <- c(rep(1, 1657), rep(2, 1658), rep(3, 2738), rep(4, 2738))
immune.combined_bc_milo <- countCells(immune.combined_bc_milo, meta.data = as.data.frame(colData(immune.combined_bc_milo)), sample="Samples")
immune.combined_bc_milo <- calcNhoodDistance(immune.combined_bc_milo, d=60)
immune.combined_bc_milo <- buildNhoodGraph(immune.combined_bc_milo)
head(nhoodCounts(immune.combined_bc_milo))
age_design <- data.frame(colData(immune.combined_bc_milo))[c("Samples", "orig.ident")]
age_design <- distinct(age_design)
rownames(age_design) <- age_design$Samples
da_results <- testNhoods(immune.combined_bc_milo, design = ~ orig.ident, design.df = age_design)
head(da_results)
# plot milo da results
tsne_plot <-plotReducedDim(immune.combined_bc_sce, colour_by="orig.ident", dimred = "TSNE")
ggplot(da_results, aes(logFC, -log10(SpatialFDR))) +
geom_point() +
geom_hline(yintercept = 1) ## Mark significance threshold (10% FDR)
nh_graph_pl <- plotNhoodGraphDA(immune.combined_bc_milo, da_results, layout="TSNE",alpha=0.1)
tsne_plot + nh_graph_pl +
plot_layout(guides="collect")
# add cluster data
da_results <- annotateNhoods(immune.combined_bc_milo, da_results, coldata_col = "ident")
head(da_results)
da_results <- groupNhoods(immune.combined_bc_milo, da_results, max.lfc.delta = 5)
plotNhoodGroups(immune.combined_bc_milo, da_results, layout="TSNE")
plotDAbeeswarm(da_results, group.by = "ident")
da_results$logFC <- (da_results$logFC * (-1) - log(5476/3315, 2))
ident_name <- da_results$ident
ident_name <- plyr::mapvalues(ident_name, from=c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"),
to=c("Neutrophils", "Macrophages", "cDC2", "cDC1", "NK cells", "ILC1", "ILC2", "ILC3", "NKT cells",
"CD8+ T", "CD4+ T", "CD8- CD4- cells", "B cells"))
da_results$ident_name <- ident_name
plotDAbeeswarm(da_results, group.by = "ident_name")
#####################
write.csv(x=da_results,file ='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/da_results.csv')
write.csv(x=immune.combined_bc_milo@nhoods, file='/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/bc_milo.csv')
# do some changes in da_results
# first we want to inverse the LFC, for examples ident '12' should be with positive LFC
# second we want to normalize the number
da_results_fc_correction <- da_results
da_results_fc_correction$logFC <- (da_results_fc_correction$logFC * (-1) )
plotDAbeeswarm(da_results_fc_correction, group.by = "ident")
#load new da after normalize data
da_results_norm <- read.csv('/Users/eliel/Library/CloudStorage/OneDrive-Technion/R Code/EA/da_results_cell_591nhoods_after_correction_v2.csv')
da_results_norm <- annotateNhoods(immune.combined_bc_milo, da_results_norm, coldata_col = "ident")
plotDAbeeswarm(da_results_norm, group.by = "ident")
ident_name <- da_results_norm$ident
ident_name <- plyr::mapvalues(ident_name, from=c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"),
to=c("Neutrophils", "Macrophages", "cDC2", "cDC1", "NK cells", "ILC1", "ILC2", "ILC3",
"NKT cells", "CD8+ T", "CD4+ T", "CD8- CD4- cells", "B cells"))
da_results_norm$ident_name <- ident_name
plotDAbeeswarm(da_results_norm, group.by = "ident_name")
################################################################################
# multiomial error
m = multinomialCI(c(1308,1990,462,124,134,181,258,227,146,152,47,71), 0.05)
# Perform Milo on the clustred data - OLD
clustred_data_sce <- as.SingleCellExperiment(clustred_data) # make a sc object
clustred_data_milo <- Milo(clustred_data_sce) # make a milo obkect
clustred_data_milo <- buildGraph(clustred_data_milo, k = 13, d = 30)
clustred_data_milo <- makeNhoods(clustred_data_milo, prop = 0.1, k = 13, d=30, refined = TRUE)
plotNhoodSizeHist(clustred_data_milo)
clustred_data_milo@colData$Samples <- c(rep(1, 1657), rep(2, 1658), rep(3, 2738), rep(4, 2738))
clustred_data_milo <- countCells(clustred_data_milo, meta.data = as.data.frame(colData(clustred_data_milo)), sample="Samples")
clustred_data_milo <- calcNhoodDistance(clustred_data_milo, d=30, reduced.dim = "pca.corrected")
head(nhoodCounts(clustred_data_milo))
###############
age_design <- data.frame(colData(clustred_data_milo))[c("Samples", "orig.ident")]
age_design <- distinct(age_design)
rownames(age_design) <- age_design$Samples
da_results <- testNhoods(clustred_data_milo, design = ~ orig.ident, design.df = age_design)
clustred_data_milo <- buildNhoodGraph(clustred_data_milo)
head(da_results)
# plot milo da results
tsne_plot <-plotReducedDim(clustred_data_sce, colour_by="orig.ident", dimred = "TSNE")
ggplot(da_results, aes(logFC, -log10(SpatialFDR))) +
geom_point() +
geom_hline(yintercept = 1) ## Mark significance threshold (10% FDR)
nh_graph_pl <- plotNhoodGraphDA(clustred_data_milo, da_results, layout="TSNE",alpha=0.1)
tsne_plot + nh_graph_pl +
plot_layout(guides="collect")
# add cluster data
da_results <- annotateNhoods(clustred_data_milo, da_results, coldata_col = "ident")
head(da_results)
da_results <- groupNhoods(clustred_data_milo, da_results, max.lfc.delta = 5)
plotNhoodGroups(clustred_data_milo, da_results, layout="TSNE")
plotDAbeeswarm(da_results, group.by = "ident")
write.csv(x=da_results,file ='C:/Users/SavirLab/OneDrive - Technion/TalBenYakov/R Code/da_results.csv')
# multiomial error
m = multinomialCI(c(1308,1990,462,124,134,181,258,227,146,152,47,71), 0.05)
|
cat( #Dale-Madsen model for lee with 2 stages
"model{
#data inputs:
# nYears
# nSites
# nAges
# y dim=c(nSites,nYears,nAges,nPasses)
# siteWidth dim=c(nSites,nYears) standardized to have mean zero and sd 1
#Priors
#initial abundance parameters
for(a in 1:nAges){
for(b in 1:3){
betaInit[b,a]~dnorm(0,0.01)
}
lambdaSigma[a]~dunif(0,5) #variance for random site effect for starting abundance
lambdaTau[a]<-1/pow(lambdaSigma[a],2)
for(s in 1:nSites){
lambdaEps[s,a]~dnorm(0,lambdaTau[a]) #random site effect for initial abundance
logLambda[s,a]<-betaInit[1,a] + betaInit[2,a]*covariates[s,1] +
betaInit[3,a]*covariates[s,2] + lambdaEps[s,a]
lambda[s,a]<-exp(logLambda[s,a])
}
}
#survival parameters
for(a in 1:nAges){
phiSigma[a]~dunif(0,10)#variance for random site effect on surival
phiTau[a]<-1/pow(phiSigma[a],2)
}
for(t in 1:(nYears-1)){
for(a in 1:nAges){
phiMu[t,a]~dnorm(0,0.37) #average phi for each time/stage
for(s in 1:nSites){
logitPhi[s,t,a]~dnorm(phiMu[t,a],phiTau[a]) #random site effect on survival constant across time
#logitPhi[s,t,a]<-phiMu[t,a]
phi[s,t,a]<-1/(1+exp(logitPhi[s,t,a]))
}
}
}
#arrivals, a=1 is reproduction (in place or somewhere else), could have a=2 for adult immigration
alphaSigma~dunif(0,10) #variation on random site effect
alphaTau<-1/pow(alphaSigma,2)
for(t in 1:(nYears-1)){
for(a in 1){
for(b in 1:3){
betaRecruit[b,t,a]~dnorm(0,0.01) #fixed effect for year
}
for(s in 1:nSites){
alphaEps[s,t,a]~dnorm(0,alphaTau) #random site effect
logAlpha[s,t,a]<-betaRecruit[1,t,a] + betaRecruit[2,t,a]*covariates[s,1] +
betaRecruit[3,t,a]*covariates[s,2] +alphaEps[s,t,a]
alpha[s,t,a]<-exp(logAlpha[s,t,a])
}
}
}
#detection, depends only on siteWidth with random site effect
for(b in 1:2){ #2 betas intercept and slope for siteWidth
for(a in 1:nAges){
beta[b,a]~dnorm(0,0.37) #Jeffrey's prior, uninformative on logit scale
}
}
for(s in 1:nSites){
for(t in 1:nYears){
for(a in 1:nAges){
logitP[s,t,a]<-beta[1,a]+beta[2,a]*siteWidthOverall[s,t]
p[s,t,a]<-1/(1+exp(logitP[s,t,a]))
}
}
}
#Likelihood
#State process
#establish initial abundance for each stage
for(s in 1:nSites){
for(a in 1:nAges){
N[s,1,a]~dpois(lambda[s,a])
}
}
#loop through subsequent years
for(s in 1:nSites){
for(t in 1:(nYears-1)){
#stages: 1=yoy, 2=1+
#yoy abundance is only reproduction (or arrival of reproduced inviduals)
N[s,t+1,1]~dpois(alpha[s,t,1])
#1+ in t+1 split into maturing yoy, surviving 1+, and arriving 1+
S[s,t,1]~dbin(phi[s,t,1],N[s,t,1]) #apparent survival of yoy
S[s,t,2]~dbin(phi[s,t,2],N[s,t,2]) #apparent survival of 1+ year olds
N[s,t+1,2]<-S[s,t,1]+S[s,t,2] #summed adults from all processes
}
}
#Observation process
for(s in 1:nSites){
for(t in 1:nYears){
for(a in 1:nAges){ #stages
y[s,t,a,1]~dbin(p[s,t,a],N[s,t,a]) #pass 1
y[s,t,a,2]~dbin(p[s,t,a],N[s,t,a]-y[s,t,a,1]) #pass 2
y[s,t,a,3]~dbin(p[s,t,a],N[s,t,a]-y[s,t,a,1]-y[s,t,a,2]) #pass 3
}
}
}
}",file="~/lee/model.txt")
|
/analyze/constructDailMadsenCovariates.R
|
no_license
|
evanchildress/lee
|
R
| false
| false
| 3,418
|
r
|
cat( #Dale-Madsen model for lee with 2 stages
"model{
#data inputs:
# nYears
# nSites
# nAges
# y dim=c(nSites,nYears,nAges,nPasses)
# siteWidth dim=c(nSites,nYears) standardized to have mean zero and sd 1
#Priors
#initial abundance parameters
for(a in 1:nAges){
for(b in 1:3){
betaInit[b,a]~dnorm(0,0.01)
}
lambdaSigma[a]~dunif(0,5) #variance for random site effect for starting abundance
lambdaTau[a]<-1/pow(lambdaSigma[a],2)
for(s in 1:nSites){
lambdaEps[s,a]~dnorm(0,lambdaTau[a]) #random site effect for initial abundance
logLambda[s,a]<-betaInit[1,a] + betaInit[2,a]*covariates[s,1] +
betaInit[3,a]*covariates[s,2] + lambdaEps[s,a]
lambda[s,a]<-exp(logLambda[s,a])
}
}
#survival parameters
for(a in 1:nAges){
phiSigma[a]~dunif(0,10)#variance for random site effect on surival
phiTau[a]<-1/pow(phiSigma[a],2)
}
for(t in 1:(nYears-1)){
for(a in 1:nAges){
phiMu[t,a]~dnorm(0,0.37) #average phi for each time/stage
for(s in 1:nSites){
logitPhi[s,t,a]~dnorm(phiMu[t,a],phiTau[a]) #random site effect on survival constant across time
#logitPhi[s,t,a]<-phiMu[t,a]
phi[s,t,a]<-1/(1+exp(logitPhi[s,t,a]))
}
}
}
#arrivals, a=1 is reproduction (in place or somewhere else), could have a=2 for adult immigration
alphaSigma~dunif(0,10) #variation on random site effect
alphaTau<-1/pow(alphaSigma,2)
for(t in 1:(nYears-1)){
for(a in 1){
for(b in 1:3){
betaRecruit[b,t,a]~dnorm(0,0.01) #fixed effect for year
}
for(s in 1:nSites){
alphaEps[s,t,a]~dnorm(0,alphaTau) #random site effect
logAlpha[s,t,a]<-betaRecruit[1,t,a] + betaRecruit[2,t,a]*covariates[s,1] +
betaRecruit[3,t,a]*covariates[s,2] +alphaEps[s,t,a]
alpha[s,t,a]<-exp(logAlpha[s,t,a])
}
}
}
#detection, depends only on siteWidth with random site effect
for(b in 1:2){ #2 betas intercept and slope for siteWidth
for(a in 1:nAges){
beta[b,a]~dnorm(0,0.37) #Jeffrey's prior, uninformative on logit scale
}
}
for(s in 1:nSites){
for(t in 1:nYears){
for(a in 1:nAges){
logitP[s,t,a]<-beta[1,a]+beta[2,a]*siteWidthOverall[s,t]
p[s,t,a]<-1/(1+exp(logitP[s,t,a]))
}
}
}
#Likelihood
#State process
#establish initial abundance for each stage
for(s in 1:nSites){
for(a in 1:nAges){
N[s,1,a]~dpois(lambda[s,a])
}
}
#loop through subsequent years
for(s in 1:nSites){
for(t in 1:(nYears-1)){
#stages: 1=yoy, 2=1+
#yoy abundance is only reproduction (or arrival of reproduced inviduals)
N[s,t+1,1]~dpois(alpha[s,t,1])
#1+ in t+1 split into maturing yoy, surviving 1+, and arriving 1+
S[s,t,1]~dbin(phi[s,t,1],N[s,t,1]) #apparent survival of yoy
S[s,t,2]~dbin(phi[s,t,2],N[s,t,2]) #apparent survival of 1+ year olds
N[s,t+1,2]<-S[s,t,1]+S[s,t,2] #summed adults from all processes
}
}
#Observation process
for(s in 1:nSites){
for(t in 1:nYears){
for(a in 1:nAges){ #stages
y[s,t,a,1]~dbin(p[s,t,a],N[s,t,a]) #pass 1
y[s,t,a,2]~dbin(p[s,t,a],N[s,t,a]-y[s,t,a,1]) #pass 2
y[s,t,a,3]~dbin(p[s,t,a],N[s,t,a]-y[s,t,a,1]-y[s,t,a,2]) #pass 3
}
}
}
}",file="~/lee/model.txt")
|
d<-read.csv2("household_power_consumption.txt",dec=".",sep=";",na.strings = "?",colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
d$Date<-strptime(paste(d$Date,d$Time,sep=" "),format = "%d/%m/%Y %H:%M:%S")
d<-d[,-2]
d<-d[as.Date(d$Date) %in% c(as.Date("2007-02-01"),as.Date("2007-02-02")),]
Sys.setlocale("LC_ALL","English")
png("plot4.png", height = 480, width = 480)
par(mfrow=c(2,2))
plot(d$Date,d$Global_active_power,pch="",ylab="Global Active Power",xlab="", type = "l")
plot(d$Date,d$Voltage,pch="",ylab="Voltage",xlab="datetime", type = "l")
plot(d$Date,d$Sub_metering_1,pch="",ylab="Energy sub metering",xlab="", type = "l")
lines(d$Date,d$Sub_metering_2,col="red")
lines(d$Date,d$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd="3", bty="n")
plot(d$Date,d$Global_reactive_power,pch="",ylab="Global_reactive_power",xlab="datetime", type = "l")
dev.off()
|
/plot4.R
|
no_license
|
hafdraven/CourseraDataPlotting
|
R
| false
| false
| 1,024
|
r
|
d<-read.csv2("household_power_consumption.txt",dec=".",sep=";",na.strings = "?",colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
d$Date<-strptime(paste(d$Date,d$Time,sep=" "),format = "%d/%m/%Y %H:%M:%S")
d<-d[,-2]
d<-d[as.Date(d$Date) %in% c(as.Date("2007-02-01"),as.Date("2007-02-02")),]
Sys.setlocale("LC_ALL","English")
png("plot4.png", height = 480, width = 480)
par(mfrow=c(2,2))
plot(d$Date,d$Global_active_power,pch="",ylab="Global Active Power",xlab="", type = "l")
plot(d$Date,d$Voltage,pch="",ylab="Voltage",xlab="datetime", type = "l")
plot(d$Date,d$Sub_metering_1,pch="",ylab="Energy sub metering",xlab="", type = "l")
lines(d$Date,d$Sub_metering_2,col="red")
lines(d$Date,d$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd="3", bty="n")
plot(d$Date,d$Global_reactive_power,pch="",ylab="Global_reactive_power",xlab="datetime", type = "l")
dev.off()
|
#rm(list=ls())
data1<-read.delim("household.txt",sep = ";")
s<-split(data1,data1$Date)
a1<-s$`1/2/2007`
a1<-transform(a1, Date=as.Date(Date))
a2<-s$`2/2/2007`
a2<-transform(a2, Date=as.Date(Date))
data2<-a1
dates<-as.character(data2$Date)
times<-as.character(data2$Time)
data2$Date.time<-paste(dates,times,sep = " ")
timetoplot<-strptime(data2$Date.time, format = "%Y-%m-%d %H:%M:%S")
png("plot3.png", width=480, height = 480)
plot(timetoplot,as.numeric(data2$Sub_metering_1), type="l",xlab="",ylab="Energy sub metering")
lines(timetoplot,as.numeric(data2$Sub_metering_2), col="red")
lines(timetoplot,as.numeric(data2$Sub_metering_3), col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1, bty="n")
dev.off()
|
/figure/plot3.R
|
no_license
|
andbrand22/ExData_Plotting1
|
R
| false
| false
| 788
|
r
|
#rm(list=ls())
data1<-read.delim("household.txt",sep = ";")
s<-split(data1,data1$Date)
a1<-s$`1/2/2007`
a1<-transform(a1, Date=as.Date(Date))
a2<-s$`2/2/2007`
a2<-transform(a2, Date=as.Date(Date))
data2<-a1
dates<-as.character(data2$Date)
times<-as.character(data2$Time)
data2$Date.time<-paste(dates,times,sep = " ")
timetoplot<-strptime(data2$Date.time, format = "%Y-%m-%d %H:%M:%S")
png("plot3.png", width=480, height = 480)
plot(timetoplot,as.numeric(data2$Sub_metering_1), type="l",xlab="",ylab="Energy sub metering")
lines(timetoplot,as.numeric(data2$Sub_metering_2), col="red")
lines(timetoplot,as.numeric(data2$Sub_metering_3), col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"), lty=1, bty="n")
dev.off()
|
# loading packages
library(shiny)
library(haven)
library(tidyverse)
library(shinythemes)
library(maps)
library(mapproj)
# server
server <- function(input, output) {
# data set
pgsf <- read_dta("PGSFV2.dta")
# relevant variables
pgsf_app <- pgsf %>%
select("county_id", "county_lab", "start",
"end", "rperiod", "eingtotal", "pop") %>%
# Keep only observations with eingtotal non-missing
filter(!is.na(eingtotal)) %>%
# Group observations by county_id and midyear.
# county_lab is not necessary, but we need it later on.
group_by(county_lab, county_id, midyear) %>%
# Collapse observations:
# If there are multiple obersvations for one county
# for a specific year (e.g. 1st & 2nd quarter),
# combine them into one observation
summarise(eingtotal = sum(eingtotal, na.rm = TRUE),
rperiod = sum(rperiod, na.rm = TRUE),
myear = max(myear, na.rm = TRUE),
district_id = first(district_id),
district_lab = first(district_lab),
pop = first(pop)) %>%
# Create variable eingdens (Eingaben density).
# Eingaben per day and 1000 people
mutate(eingdens = (eingtotal / rperiod) / (pop/1000 ) ) %>%
ungroup()
# Load server files for different tabPanels
source('tab_kreisauswahl.R', local = TRUE, encoding = "utf-8")
}
|
/server.R
|
no_license
|
faclass/App
|
R
| false
| false
| 1,368
|
r
|
# loading packages
library(shiny)
library(haven)
library(tidyverse)
library(shinythemes)
library(maps)
library(mapproj)
# server
server <- function(input, output) {
# data set
pgsf <- read_dta("PGSFV2.dta")
# relevant variables
pgsf_app <- pgsf %>%
select("county_id", "county_lab", "start",
"end", "rperiod", "eingtotal", "pop") %>%
# Keep only observations with eingtotal non-missing
filter(!is.na(eingtotal)) %>%
# Group observations by county_id and midyear.
# county_lab is not necessary, but we need it later on.
group_by(county_lab, county_id, midyear) %>%
# Collapse observations:
# If there are multiple obersvations for one county
# for a specific year (e.g. 1st & 2nd quarter),
# combine them into one observation
summarise(eingtotal = sum(eingtotal, na.rm = TRUE),
rperiod = sum(rperiod, na.rm = TRUE),
myear = max(myear, na.rm = TRUE),
district_id = first(district_id),
district_lab = first(district_lab),
pop = first(pop)) %>%
# Create variable eingdens (Eingaben density).
# Eingaben per day and 1000 people
mutate(eingdens = (eingtotal / rperiod) / (pop/1000 ) ) %>%
ungroup()
# Load server files for different tabPanels
source('tab_kreisauswahl.R', local = TRUE, encoding = "utf-8")
}
|
wine_color = wine %>%
group_by(color)%>%
select(-ID)%>%
summarize_all(mean)%>%
column_to_rownames(var = "color")
wine_qual = wine %>%
group_by(quality)%>%
select(-ID)%>%
summarize_all(mean)%>%
column_to_rownames(var = "quality")
pca_color = prcomp(wine_color, rank=5, scale=TRUE)
loadings_color = pca_color$rotation
scores_color = pca_color$x
summary(pca_color)
pca_qual = prcomp(wine_qual, rank=5, scale=TRUE)
loadings_qual = wine_qual$rotation
scores_qual = wine_qual$x
summary(pca_qual)
dat = readLines("https://raw.githubusercontent.com/jgscott/ECO395M/master/data/groceries.txt")
dat = as.data.frame(do.call(rbind,strsplit(dat, split = ",")), stringsAsFactors=FALSE)
groceries = lapply(dat, unique)
grocery_df<- data.frame(cart = rep(groceries$cart, sapply(lists, length)), V2 = unlist(lists))
authors = dir('/Users/hannahjones/Documents/GitHub/ECO395M/data/ReutersC50/C50train')
authors_df = as.data.frame(authors)
#joeys
readerPlain = function(fname){
readPlain(elem=list(content=readLines(fname)),
id=fname, language='en') }
## Rolling two directories together into a single training corpus
train_dirs = Sys.glob('/Users/josephherrera/Desktop/ECO395M/data/ReutersC50/C50train/*')
train_dirs = train_dirs[c(1:50)]
file_list = NULL
labels_train = NULL
for(author in train_dirs) {
author_name = substring(author, first=1)
files_to_add = Sys.glob(paste0(author, '/*.txt'))
file_list = append(file_list, files_to_add)
labels_train = append(labels_train, rep(author_name, length(files_to_add)))
}
train_dirs
corpus_train = Corpus(DirSource(train_dirs))
corpus_train = corpus_train %>% tm_map(., content_transformer(tolower)) %>%
tm_map(., content_transformer(removeNumbers)) %>%
tm_map(., content_transformer(removeNumbers)) %>%
tm_map(., content_transformer(removePunctuation)) %>%
tm_map(., content_transformer(stripWhitespace)) %>%
tm_map(., content_transformer(removeWords), stopwords("SMART"))
DTM_train = DocumentTermMatrix(corpus_train)
DTM_train # some basic summary statistics
# Parse out words in the bottom five percent of all terms
DTM_train2 = removeSparseTerms(DTM_train, 0.95)
DTM_train2
# Data frame of 2500 variables and 641 variables (The first matrix is completed)
DF_train <- data.frame(as.matrix(DTM_train2), stringsAsFactors=FALSE)
# I need a vector of labels
labels_train = append(labels_train, rep(author_name, length(files_to_add)))
#Clean the label names
author_names = labels_train %>%
{ strsplit(., '/', fixed=TRUE) } %>%
{ lapply(., tail, n=2) } %>%
{ lapply(., paste0, collapse = '') } %>%
unlist
author_names = as.data.frame(author_names)
author_names = gsub("C([0-9]+)train", "\\1", author_names$author_names)
author_names = gsub("([0-9]+)", "", author_names)
author_names = as.data.frame(author_names)
split_names <- colsplit(mynames, "(?<=\\p{L})(?=[\\d+$])", c("Author", "File"))
hclustfunc <- function(x, method = "single", dmeth = "euclidean") {
hclust(dist(x, method = dmeth), method = method)
}
fit <- hclustfunc(X)
tweet_dist = dist(X)
hier_tweet = hclust(tweet_dist, method = 'average')
plot(hier_tweet, cex=0.8)
cluster1 = cutree(fit, k=5)
summary(factor(cluster1))
|
/hw4/scratch_4.R
|
no_license
|
hannahjonesut/HannahHW
|
R
| false
| false
| 3,211
|
r
|
wine_color = wine %>%
group_by(color)%>%
select(-ID)%>%
summarize_all(mean)%>%
column_to_rownames(var = "color")
wine_qual = wine %>%
group_by(quality)%>%
select(-ID)%>%
summarize_all(mean)%>%
column_to_rownames(var = "quality")
pca_color = prcomp(wine_color, rank=5, scale=TRUE)
loadings_color = pca_color$rotation
scores_color = pca_color$x
summary(pca_color)
pca_qual = prcomp(wine_qual, rank=5, scale=TRUE)
loadings_qual = wine_qual$rotation
scores_qual = wine_qual$x
summary(pca_qual)
dat = readLines("https://raw.githubusercontent.com/jgscott/ECO395M/master/data/groceries.txt")
dat = as.data.frame(do.call(rbind,strsplit(dat, split = ",")), stringsAsFactors=FALSE)
groceries = lapply(dat, unique)
grocery_df<- data.frame(cart = rep(groceries$cart, sapply(lists, length)), V2 = unlist(lists))
authors = dir('/Users/hannahjones/Documents/GitHub/ECO395M/data/ReutersC50/C50train')
authors_df = as.data.frame(authors)
#joeys
readerPlain = function(fname){
readPlain(elem=list(content=readLines(fname)),
id=fname, language='en') }
## Rolling two directories together into a single training corpus
train_dirs = Sys.glob('/Users/josephherrera/Desktop/ECO395M/data/ReutersC50/C50train/*')
train_dirs = train_dirs[c(1:50)]
file_list = NULL
labels_train = NULL
for(author in train_dirs) {
author_name = substring(author, first=1)
files_to_add = Sys.glob(paste0(author, '/*.txt'))
file_list = append(file_list, files_to_add)
labels_train = append(labels_train, rep(author_name, length(files_to_add)))
}
train_dirs
corpus_train = Corpus(DirSource(train_dirs))
corpus_train = corpus_train %>% tm_map(., content_transformer(tolower)) %>%
tm_map(., content_transformer(removeNumbers)) %>%
tm_map(., content_transformer(removeNumbers)) %>%
tm_map(., content_transformer(removePunctuation)) %>%
tm_map(., content_transformer(stripWhitespace)) %>%
tm_map(., content_transformer(removeWords), stopwords("SMART"))
DTM_train = DocumentTermMatrix(corpus_train)
DTM_train # some basic summary statistics
# Parse out words in the bottom five percent of all terms
DTM_train2 = removeSparseTerms(DTM_train, 0.95)
DTM_train2
# Data frame of 2500 variables and 641 variables (The first matrix is completed)
DF_train <- data.frame(as.matrix(DTM_train2), stringsAsFactors=FALSE)
# I need a vector of labels
labels_train = append(labels_train, rep(author_name, length(files_to_add)))
#Clean the label names
author_names = labels_train %>%
{ strsplit(., '/', fixed=TRUE) } %>%
{ lapply(., tail, n=2) } %>%
{ lapply(., paste0, collapse = '') } %>%
unlist
author_names = as.data.frame(author_names)
author_names = gsub("C([0-9]+)train", "\\1", author_names$author_names)
author_names = gsub("([0-9]+)", "", author_names)
author_names = as.data.frame(author_names)
split_names <- colsplit(mynames, "(?<=\\p{L})(?=[\\d+$])", c("Author", "File"))
hclustfunc <- function(x, method = "single", dmeth = "euclidean") {
hclust(dist(x, method = dmeth), method = method)
}
fit <- hclustfunc(X)
tweet_dist = dist(X)
hier_tweet = hclust(tweet_dist, method = 'average')
plot(hier_tweet, cex=0.8)
cluster1 = cutree(fit, k=5)
summary(factor(cluster1))
|
#
# (c) 2012 -- 2014 Georgios Gousios <gousiosg@gmail.com>
#
# BSD licensed, see LICENSE in top level dir
#
# Predicting merge_time of pull requests
source(file = "R/packages.R")
source(file = "R/cmdline.R")
source(file = "R/utils.R")
source(file = "R/classification.R")
library(pROC)
library(sqldf)
merge.time.model = merge_time ~ team_size + files_changed +
perc_external_contribs + sloc + src_churn + test_churn +
commits_on_files_touched + test_lines_per_kloc + prev_pullreqs +
requester_succ_rate + main_team_member + conflict + forward_links
prepare.data.mergetime <- function(df, num_samples = nrow(df),
bins = c(0, mean(df$mergetime_minutes), max(df$mergetime_minutes)),
labels = c('FAST', 'SLOW')) {
# Prepare the data for prediction
a <- prepare.project.df(df)
# sample filter pull-requests that have not been merged
a <- subset(a, merged == TRUE)
if (num_samples >= nrow(a)) {
num_samples = nrow(a) - 1
}
a$merge_time <- cut(a$mergetime_minutes, breaks = bins, labels = labels,
ordered_result = T)
a <- a[sample(nrow(a), size=num_samples), ]
# split data into training and test data
a.train <- a[1:floor(nrow(a)*.90), ]
a.test <- a[(floor(nrow(a)*.90)+1):nrow(a), ]
list(train=a.train, test=a.test)
}
# Bining to fast and slow based on median time
prepare.data.mergetime.2bins <- function(df, num_samples = nrow(df)) {
merged <- subset(df, merged == T)
prepare.data.mergetime(merged, num_samples)
}
# Bining to 1 hour, 1 day and rest
prepare.data.mergetime.3bins <- function(df, num_samples = nrow(df)) {
merged <- subset(df, merged == T)
prepare.data.mergetime(merged, num_samples,
c(-1,60,1440,max(merged$mergetime_minutes) + 1),
c('HOUR', 'DAY', 'REST'))
}
# Bining to 1 hour, 1 day, 1 week and rest
prepare.data.mergetime.4bins <- function(df, num_samples = nrow(df)) {
merged <- subset(df, merged == T)
prepare.data.mergetime(merged, num_samples,
c(-1,60,1440,10080,max(merged$mergetime_minutes) + 1),
c('HOUR', 'DAY', 'WEEK', 'REST'))
}
format.results <- function(name, test, predictions) {
metrics = data.frame(actual = test$merge_time,
predicted = as.ordered(predictions))
if (length(levels(metrics$actual)) == length(levels(metrics$predicted))) {
metrics$correct <- metrics$actual == metrics$predicted
metric.stats <- sqldf("select a.actual, a.incor, b.cor, b.cor * 1.0/(a.incor + b.cor) as accuracy from (select actual, count(*) as incor from metrics m where correct = 0 group by actual) a, (select actual, count(*) as cor from metrics m where correct = 1 group by actual) b where a.actual = b.actual")
roc <- multiclass.roc(predictions, test$merge_time)
auc <- as.numeric(roc$auc)
printf("%s auc: %f, acc: %f", name, auc, mean(metric.stats$accuracy))
c(name, auc, mean(metric.stats$accuracy))
} else {
printf("%s failed to classify all levels", name)
# Classifier ailed to predict at least one item to some level
c(name, 0, 0)
}
}
# Returns a dataframe with the AUC, PREC, REC values per classifier
# Plots classification ROC curves
run.classifiers.mergetime <- function(model, train, test) {
sample_size = nrow(train) + nrow(test)
results = data.frame(classifier = rep(NA, 3), auc = rep(0, 3), acc = rep(0, 3),
stringsAsFactors=FALSE)
#
### Random Forest
rfmodel <- rf.train(model, train)
predictions <- predict(rfmodel, test, type="response")
results[1,] <- format.results("randomforest", test, predictions)
#
### Multinomial regression
multinommodel <- multinom.train(model, train)
predictions <- predict(multinommodel, test, type="class")
results[2,] <- format.results("multinomregr", test, predictions)
#
### Naive Bayes
bayesModel <- bayes.train(model, train)
predictions <- predict(bayesModel, test)
results[3,] <- format.results("naivebayes", test, predictions)
subset(results, auc > 0)
}
|
/R/merge-time.R
|
permissive
|
igorsteinmacher/pullreqs
|
R
| false
| false
| 4,103
|
r
|
#
# (c) 2012 -- 2014 Georgios Gousios <gousiosg@gmail.com>
#
# BSD licensed, see LICENSE in top level dir
#
# Predicting merge_time of pull requests
source(file = "R/packages.R")
source(file = "R/cmdline.R")
source(file = "R/utils.R")
source(file = "R/classification.R")
library(pROC)
library(sqldf)
merge.time.model = merge_time ~ team_size + files_changed +
perc_external_contribs + sloc + src_churn + test_churn +
commits_on_files_touched + test_lines_per_kloc + prev_pullreqs +
requester_succ_rate + main_team_member + conflict + forward_links
prepare.data.mergetime <- function(df, num_samples = nrow(df),
bins = c(0, mean(df$mergetime_minutes), max(df$mergetime_minutes)),
labels = c('FAST', 'SLOW')) {
# Prepare the data for prediction
a <- prepare.project.df(df)
# sample filter pull-requests that have not been merged
a <- subset(a, merged == TRUE)
if (num_samples >= nrow(a)) {
num_samples = nrow(a) - 1
}
a$merge_time <- cut(a$mergetime_minutes, breaks = bins, labels = labels,
ordered_result = T)
a <- a[sample(nrow(a), size=num_samples), ]
# split data into training and test data
a.train <- a[1:floor(nrow(a)*.90), ]
a.test <- a[(floor(nrow(a)*.90)+1):nrow(a), ]
list(train=a.train, test=a.test)
}
# Bining to fast and slow based on median time
prepare.data.mergetime.2bins <- function(df, num_samples = nrow(df)) {
merged <- subset(df, merged == T)
prepare.data.mergetime(merged, num_samples)
}
# Bining to 1 hour, 1 day and rest
prepare.data.mergetime.3bins <- function(df, num_samples = nrow(df)) {
merged <- subset(df, merged == T)
prepare.data.mergetime(merged, num_samples,
c(-1,60,1440,max(merged$mergetime_minutes) + 1),
c('HOUR', 'DAY', 'REST'))
}
# Bining to 1 hour, 1 day, 1 week and rest
prepare.data.mergetime.4bins <- function(df, num_samples = nrow(df)) {
merged <- subset(df, merged == T)
prepare.data.mergetime(merged, num_samples,
c(-1,60,1440,10080,max(merged$mergetime_minutes) + 1),
c('HOUR', 'DAY', 'WEEK', 'REST'))
}
format.results <- function(name, test, predictions) {
metrics = data.frame(actual = test$merge_time,
predicted = as.ordered(predictions))
if (length(levels(metrics$actual)) == length(levels(metrics$predicted))) {
metrics$correct <- metrics$actual == metrics$predicted
metric.stats <- sqldf("select a.actual, a.incor, b.cor, b.cor * 1.0/(a.incor + b.cor) as accuracy from (select actual, count(*) as incor from metrics m where correct = 0 group by actual) a, (select actual, count(*) as cor from metrics m where correct = 1 group by actual) b where a.actual = b.actual")
roc <- multiclass.roc(predictions, test$merge_time)
auc <- as.numeric(roc$auc)
printf("%s auc: %f, acc: %f", name, auc, mean(metric.stats$accuracy))
c(name, auc, mean(metric.stats$accuracy))
} else {
printf("%s failed to classify all levels", name)
# Classifier ailed to predict at least one item to some level
c(name, 0, 0)
}
}
# Returns a dataframe with the AUC, PREC, REC values per classifier
# Plots classification ROC curves
run.classifiers.mergetime <- function(model, train, test) {
sample_size = nrow(train) + nrow(test)
results = data.frame(classifier = rep(NA, 3), auc = rep(0, 3), acc = rep(0, 3),
stringsAsFactors=FALSE)
#
### Random Forest
rfmodel <- rf.train(model, train)
predictions <- predict(rfmodel, test, type="response")
results[1,] <- format.results("randomforest", test, predictions)
#
### Multinomial regression
multinommodel <- multinom.train(model, train)
predictions <- predict(multinommodel, test, type="class")
results[2,] <- format.results("multinomregr", test, predictions)
#
### Naive Bayes
bayesModel <- bayes.train(model, train)
predictions <- predict(bayesModel, test)
results[3,] <- format.results("naivebayes", test, predictions)
subset(results, auc > 0)
}
|
\name{as_conc}
\alias{as_conc}
\title{
Coerce data frame to a concordance object
}
\description{
Coerces a data frame to an object of the class \code{conc}.
}
\usage{
as_conc(x,
left = NA,
match = NA,
right = NA,
keep_original = FALSE,
...)
}
\arguments{
\item{x}{
a data frame.
}
\item{left}{
the name of the column in \code{x} that contains the left co-text of
the concordance. Is \code{is.na(left)}, then this column is assumed
to have the name \code{"left"}.
}
\item{match}{
the name of the column in \code{x} that contains the match of
the concordance. Is \code{is.na(match)}, then this column is assumed
to have the name \code{"match"}.
}
\item{right}{
the name of the column in \code{x} that contains the right co-text of
the concordance. Is \code{is.na(right)}, then this column is assumed
to have the name \code{"right"}.
}
\item{keep_original}{
if any of the arguments \code{left}, \code{match}, or \code{right} has
a non-\code{NA} value different from resp. \code{"left"}, \code{"match"},
or \code{"right"}, and moreover \code{keep_original} is \code{FALSE},
then columns with those names are not kept in the output of
\code{as_conc}.
}
\item{...}{
any additional arguments.
}
}
\value{
This function returns an object of the class \code{"conc"}.
}
\examples{
(conc_data <- conc_re('\\\\w+', 'A very small corpus.', as_text = TRUE))
df <- as.data.frame(conc_data)
as_conc(df)
}
|
/man/as_conc.Rd
|
no_license
|
wai-wong-reimagine/mclm
|
R
| false
| false
| 1,499
|
rd
|
\name{as_conc}
\alias{as_conc}
\title{
Coerce data frame to a concordance object
}
\description{
Coerces a data frame to an object of the class \code{conc}.
}
\usage{
as_conc(x,
left = NA,
match = NA,
right = NA,
keep_original = FALSE,
...)
}
\arguments{
\item{x}{
a data frame.
}
\item{left}{
the name of the column in \code{x} that contains the left co-text of
the concordance. Is \code{is.na(left)}, then this column is assumed
to have the name \code{"left"}.
}
\item{match}{
the name of the column in \code{x} that contains the match of
the concordance. Is \code{is.na(match)}, then this column is assumed
to have the name \code{"match"}.
}
\item{right}{
the name of the column in \code{x} that contains the right co-text of
the concordance. Is \code{is.na(right)}, then this column is assumed
to have the name \code{"right"}.
}
\item{keep_original}{
if any of the arguments \code{left}, \code{match}, or \code{right} has
a non-\code{NA} value different from resp. \code{"left"}, \code{"match"},
or \code{"right"}, and moreover \code{keep_original} is \code{FALSE},
then columns with those names are not kept in the output of
\code{as_conc}.
}
\item{...}{
any additional arguments.
}
}
\value{
This function returns an object of the class \code{"conc"}.
}
\examples{
(conc_data <- conc_re('\\\\w+', 'A very small corpus.', as_text = TRUE))
df <- as.data.frame(conc_data)
as_conc(df)
}
|
#####################################################################################
### PLSR models AISA ################################################################
### Anna K Schweiger July 31 2020 ###################################################
### with lines of code and the VIPjh function from Shawn P. Serbin's awesome GitHub repo #
### https://github.com/serbinsh/Spectra_PLSR_models #################################
library(pls)
library(reshape)
library(agricolae)
library(caret)
VIPjh=function(object, j, h) {
if (object$method != "oscorespls") stop("Only implemented for orthogonal scores algorithm. Refit with 'method = \"oscorespls\"'")
if (nrow(object$Yloadings) > 1) stop("Only implemented for single-response models")
b=c(object$Yloadings)[1:h]
T=object$scores[,1:h, drop = FALSE]
SS=b^2 * colSums(T^2)
W=object$loading.weights[,1:h, drop = FALSE]
Wnorm2=colSums(W^2)
VIP=sqrt(nrow(W) * sum(SS * W[j,]^2 / Wnorm2) / sum(SS))
return(VIP)
}
### Read data
# allData <- read.csv("./plot_chem_spec.csv")
allData <- read.csv("./subplot_chem_spec.csv")
# allData <- read.csv("./transect_chem_spec.csv")
inBands <- names(allData)[10:187] # bands to work with
inVars <- names(allData[c(3:9)]) # traits to work with
###############################################
### Find number of components
for (i in 1:length(inVars)){
inVar <- inVars[i]
iForm <- paste(inBands,collapse="+")
iForm <- as.formula(paste(inVar,"~",iForm))
### Separate data into model CAL/VAL and IVAL (independent or external validation) data
set.seed(19841002+i)
rand <- createDataPartition(allData[, inVars[i]], p=0.75, list=F) # here: 75% data for model CAL/VAL
subData <- allData[rand,] ## CAL
tstData <- allData[-rand,] ## IVAL
### Calibrate the model
nComps <- 15 # max no of components to be evaluated
nsims <- 50 # no of simulations
outMat <- matrix(data=NA,nrow=nsims,ncol=nComps)
outMatRM <- matrix(data=NA,nrow=nsims,ncol=nComps)
for (nsim in seq(nsims)){
print(nsim)
flush.console()
segs <- cvsegments(N = nrow(subData), k = 5, type="random") # k fold CV, 5 folds = 80:20 split
resNCOMP <- plsr(iForm,data=subData,ncomp=nComps,
validation="CV", segments=segs,
method="oscorespls")
resPRESS <- as.vector(resNCOMP$validation$PRESS)
outMat[nsim,seq(resNCOMP$validation$ncomp)] <-resPRESS
resRMSEP <- as.numeric(RMSEP(resNCOMP,estimate="CV",intercept=F)$val)
outMatRM[nsim,seq(resNCOMP$validation$ncomp)] <-resRMSEP
}
### PRESS statistic: Tukey test - sign diff among no of components?
pressDF <- as.data.frame(outMat)
names(pressDF) <- as.character(seq(nComps))
pressDFres <- melt(pressDF)
modi <- lm (value~variable, pressDFres)
tuk <- HSD.test (modi,"variable")
tuk_dat <- as.data.frame(tuk$groups)
tuk_dat$var <- as.numeric(row.names(tuk_dat))
tuk_dat <- tuk_dat[order(tuk_dat$var,decreasing = F),]
letters <- as.character(tuk_dat$groups)
jpeg(paste("./", inVar, "_PRESS.jpg",sep=""),width=6,height=4.5,units="in",res=200)
par(bty="l")
boxplot(pressDFres$value~pressDFres$variable, xlab="n Components",ylab="PRESS",main=inVar)
text(x=1:max(as.numeric(pressDFres$variable)), y=rep(max(pressDFres$value),15),letters)
dev.off()
### RMSEP: Tukey test
RMDF <- as.data.frame(outMatRM)
names(RMDF) <- as.character(seq(nComps))
RMDFres <- melt(RMDF)
modi <- lm (value~variable, RMDFres)
tuk <- HSD.test (modi,"variable")
tuk_dat <- as.data.frame(tuk$groups)
tuk_dat$var <- as.numeric(row.names(tuk_dat))
tuk_dat <- tuk_dat[order(tuk_dat$var,decreasing = F),]
letters <- as.character(tuk_dat$groups)
jpeg(paste("./", inVar, "_RMSEP.jpg",sep=""),width=6,height=4.5,units="in",res=200)
par(bty="l")
boxplot(RMDFres$value~RMDFres$variable, xlab="n Components",ylab="RMSEP",main=inVar)
text(x=1:max(as.numeric(RMDFres$variable)), y=rep(max(RMDFres$value),15),letters)
dev.off()
}
#######################################
###### Final models ###################
### Make dataframe containing selected no of components (=data) per trait
compis <- as.data.frame(matrix(nrow=1, ncol=length(inVars),dimnames = list(NULL, c(inVars)),
data=c(3,4,2,4,2,3,2)))
### One trait at a time ...
k <- 1
inVar <- inVars[k]
nCompss <- compis[, inVar] # nCompss could also be a vector of a number of options for ncomps
### Separate data into CAL and IVAL data, use same seed same as above
set.seed(19841002+k)
rand <- createDataPartition(allData[, inVars[k]], p=0.75, list=F) ## 75% data for calibration
subData <- allData[rand,] ## CAL
tstData <- allData[-rand,] ## IVAL
iForm <- paste(inBands,collapse="+")
iForm <- as.formula(paste(inVar,"~",iForm))
set.seed(1840)
for (i in 1:length(nCompss)){
nsims <- 500
nComps <- nCompss[i]
nCutmod <- floor(0.8*nrow(subData)) ## 80% data for calibration, 20% for CV
coefMat <- matrix(data=NA,nrow=nsims,ncol=length(inBands)+1)
coefStd <- matrix(data=NA,nrow=nsims,ncol=length(inBands)+1)
vipMat <- matrix(data=NA,ncol=nsims,nrow=length(inBands))
statMat <- matrix(data=NA,nrow=nsims,ncol=10)
for (nsim in seq(nsims)){
print(nsim)
flush.console()
set.seed (19818411+nsim)
randx <-createDataPartition(subData[, inVars[k]], p=0.8, list=F)
calData <- allData[randx,] ## CAL
valData <- allData[-randx,] ## VAL (internal)
resX <- plsr(iForm,data=calData,ncomp=nComps,method="oscorespls") ###
resS <- plsr(iForm,data=calData,ncomp=nComps, method="oscorespls",scale=T) ### scaled by SD
### Coefficients (raw and standardized)
coefs <- as.vector(coef(resX,ncomp=nComps,intercept=T))
zcoef <- as.vector(coef(resS,ncomp=nComps,intercept=T))
coefMat[nsim,] <- coefs
coefStd[nsim,] <- zcoef ### standardized coeffis for importance of wvls
### VIP
vip <- c()
for (j in seq(length(inBands))){
vip <- c(vip,VIPjh(resS,j,nComps))
}
vipMat[,nsim] <- vip
### Model stats
fitX <- as.vector(unlist(resX$fitted.values[,1,nComps])) ### fitted values
preX <- as.vector(unlist(predict(resX,ncomp=nComps,newdata=valData))) ### internal val
fitBias <- mean(calData[,inVar]-fitX)
valBias <- mean(valData[,inVar]-preX)
fitR2 <- summary(lm(calData[,inVar]~fitX))$r.squared
valR2 <- summary(lm(valData[,inVar]~preX))$r.squared
fitP <- summary(lm(calData[,inVar]~fitX))[[4]][[8]]
valP <- summary(lm(valData[,inVar]~preX))[[4]][[8]]
fitRMSE <- sqrt(mean((calData[,inVar]-fitX)^2))
valRMSE <- sqrt(mean((valData[,inVar]-preX)^2))
fitRMSEperc <- fitRMSE/(max(calData[,inVar])-min(subData[,inVar]))*100
valRMSEperc <- valRMSE/(max(valData[,inVar])-min(valData[,inVar]))*100
outVec <- c(fitR2,fitP, fitRMSE,fitBias,fitRMSEperc,
valR2,valP, valRMSE,valBias,valRMSEperc)
statMat[nsim,] <- outVec
}
statMat <- as.data.frame(statMat)
names(statMat) <- c("fitR2", "fitP","fitRMSE","fitBias","fitRMSEperc",
"valR2","valP","valRMSE","valBias","valRMSEperc")
meanStat <- as.data.frame(t(colMeans(statMat)))
write.csv(statMat,paste("./",inVar,"_", nComps,"comps_stats.csv", sep=""),row.names=FALSE)
write.csv(meanStat, paste("./",inVar,"_", nComps, "_mean_stats.csv", sep=""))
### Coefficients
coeffis <- data.frame(matrix(nrow = length(inBands)+1, ncol = 3))
names(coeffis) <- c("bands", "mean","stdv")
coeffis$bands <- c("Intercept",inBands)
coeffis$mean <- apply(coefMat,MAR=2,FUN=mean)
coeffis$stdv <- apply(coefMat,MAR=2,FUN=sd)
write.table(coeffis, paste("./", inVar, "_",nComps,"comps_coeffMEAN.csv", sep=""),
sep=",",col.names=T,row.names=F)
### Predictions
specMat <- subData[,inBands]
specMat <- cbind(rep(1,nrow(specMat)),specMat)
specMat <- as.matrix(specMat)
predMat <- specMat%*%t(coefMat)
predMean <- apply(predMat,FUN=mean,MAR=1)
predStdv <- apply(predMat,FUN=sd,MAR=1)
preds <- subData[, !(names(subData) %in% inBands|names(subData)=="RAND")]
preds[,paste("predMean_",inVar,sep="")] <- predMean
preds[,paste("predStdv_",inVar,sep="")] <- predStdv
write.csv(preds,paste("./", inVar, "_", nComps,"comps_preds_mod.csv", sep=""), row.names=FALSE)
### Plot predictions
modCI <- quantile(statMat$fitR2, probs=c(0.05,0.95))
modCIval <- quantile(statMat$valR2, probs=c(0.05,0.95))
RMCI <- quantile(statMat$fitRMSE, probs=c(0.05,0.95))
RMCIperc <- quantile(statMat$fitRMSEperc, probs=c(0.05,0.95))
RMCIval <- quantile(statMat$valRMSE, probs=c(0.05,0.95))
RMCIvalperc <- quantile(statMat$valRMSEperc, probs=c(0.05,0.95))
formi <- as.formula(paste(paste(inVar," ~ predMean_",inVar, sep="")))
lmformi <- as.formula(paste(paste("predMean_",inVar," ~ ", inVar, sep="")))
jpeg(paste("./",inVar,"_", nComps, "comp_predplot.jpg",sep=""),
width=6,height=5,units="in",res=300)
plot(formi, data= preds, pch=16,cex=0.8,ylab="measured",xlab="predicted",
main=inVar,xlim=c(min(preds[,grepl(paste0("predMean_", inVar),names(preds))] -
preds[,grepl(paste0("predStdv_", inVar),names(preds))]),
max(preds[,grepl(paste0("predMean_", inVar),names(preds))] +
preds[,grepl(paste0("predStdv_", inVar),names(preds))])))
abline(lm(lmformi, data= preds))
abline(a = 0,b = 1, lty=2)
arrows(preds[,grepl(paste0("predMean_", inVar),names(preds))],
preds[,match(inVar,names(preds))],
preds[,grepl(paste0("predMean_", inVar),names(preds))]+preds[,grepl(paste0("predStdv_", inVar),names(preds))],
preds[,match(inVar,names(preds))],angle=90,length=0.05, lwd=0.8)
arrows(preds[,grepl(paste0("predMean_", inVar),names(preds))],
preds[,match(inVar,names(preds))],
preds[,grepl(paste0("predMean_", inVar),names(preds))]-preds[,grepl(paste0("predStdv_", inVar),names(preds))],
preds[,match(inVar,names(preds))],angle=90,length=0.05, lwd=0.8)
legend("topleft", bty="n", cex=0.8,
c(paste("R² cal= ", sprintf("%.2f",signif (mean(statMat$fitR2),3))," [",signif(modCI[1],2),",",signif(modCI[2],2),"]", sep = ""),
paste("R² val= ", sprintf("%.2f",signif (mean(statMat$valR2),3))," [",signif(modCIval[1],2),",",signif(modCIval[2],2),"]", sep = ""),
paste("RMSEP % cal= ", sprintf("%.2f",signif (mean(statMat$fitRMSEperc),3)), " [",signif(RMCIperc[1],3),",",signif(RMCIperc[2],3),"]", sep=""),
paste("RMSEP % val= ", sprintf("%.2f",signif (mean(statMat$valRMSEperc),3)), " [",signif(RMCIvalperc[1],3),",",signif(RMCIvalperc[2],3),"]", sep=""),
paste("ncomps =", nComps, sep=" ")))
dev.off()
### VIP for plotting
vipAggr <- as.data.frame(t(apply(vipMat,MAR=1,FUN=quantile,probs=c(0.05,0.5,0.95))))
vipAggr$mean_VIP <- apply(vipMat,MAR=1,FUN=mean)
vipAggr$stdv <- apply(vipMat,MAR=1,FUN=sd)
serr <- function(x) sqrt(var(x,na.rm=TRUE)/length(na.omit(x))) ## standard error of mean
vipAggr$se <- apply(vipMat,MAR=1,FUN=serr)
vipAggr$band <- inBands
### Standardized coefficients for plotting
coeff_std <- data.frame(matrix(nrow = length(inBands)+1, ncol = 3))
names(coeff_std) <- c("bands", "mean","stdv")
coeff_std$bands <- c("Intercept",inBands)
coeff_std$mean <- apply(coefStd,MAR=2,FUN=mean)
coeff_std$stdv <- apply(coefStd,MAR=2,FUN=sd)
### Plot VIP and standardized coefficients
jpeg(paste("./", inVar, "_", nComps, "comp_varimp.jpg",sep=""),
width=6,height=7,units="in",res=300)
par(mfrow=c(2,1), mar=c(1.5,4,2.5,1.5), oma=c(3,0,0,0))
plot(coeff_std$mean[-1]~as.numeric(substr(coeff_std$bands[-1],2,nchar(coeff_std$bands[-1]))),
type="p",pch=19, xlab="",ylab="coeff_stdmean",main=paste(inVar,nComps,"comps",sep = "_"),
ylim=c(-max(abs(coeff_std$mean[-1])),max(abs(coeff_std$mean[-1]))), bty="l")
abline(h=0)
points(abs(coeff_std$mean)[-1]~as.numeric(substr(coeff_std$bands[-1],2,nchar(coeff_std$bands[-1]))),
xlab="wvl",ylab="coeff_stdmean", col=2, pch=16, cex=0.8)
lines(abs(coeff_std$mean)[-1]~as.numeric(substr(coeff_std$band[-1],2,nchar(coeff_std$band[-1]))), col=2)
plot(as.numeric(substr(vipAggr$band,2,nchar(vipAggr$band))),vipAggr$mean_VIP, type="l",
xlab = "wvl",ylab = "VIP", bty="l")
polygon(x=c(as.numeric(substr(vipAggr$band,2,nchar(vipAggr$band))),
rev(as.numeric(substr(vipAggr$band,2,nchar(vipAggr$band))))),
y=c(vipAggr$mean_VIP+vipAggr$stdv*1.96, rev(vipAggr$mean_VIP-vipAggr$stdv*1.96)),
col = adjustcolor("red", alpha.f = 0.2), border = NA)
mtext("wavelength(nm)",1,outer = T,line = 1)
dev.off()
######################################
### Independent (external) validation
specMatIVAL <- tstData[,inBands]
specMatIVAL <- cbind(rep(1,nrow(specMatIVAL)),specMatIVAL)
specMatIVAL <- as.matrix(specMatIVAL)
predMatIVAL <- specMatIVAL%*%t(coefMat)
predMeanIVAL <- apply(predMatIVAL,FUN=mean,MAR=1)
predStdvIVAL <- apply(predMatIVAL,FUN=sd,MAR=1)
predsIVAL <- tstData[, !(names(tstData) %in% inBands|names(tstData)=="RAND")]
predsIVAL[,paste("predMeanIVAL_",inVar,sep="")] <- predMeanIVAL
predsIVAL[,paste("predStdvIVAL_",inVar,sep="")] <- predStdvIVAL
write.csv(predsIVAL,paste("./", inVar, "_", nComps, "comps_preds_IVAL.csv", sep=""),
row.names=FALSE)
### Model stats
statMatIVAL <- matrix(data=NA,nrow=nsims,ncol=5)
for (nsim in seq(nsims)){
valBias <- mean(tstData[,inVar]-predMatIVAL[,nsim])
valR2 <- summary(lm(tstData[,inVar]~predMatIVAL[,nsim]))$r.squared
valRMSE <- sqrt(mean((tstData[,inVar]-predMatIVAL[,nsim])^2))
valRMSEperc <- valRMSE/(max(tstData[,inVar])-min(tstData[,inVar]))*100
valP <- summary(lm(tstData[,inVar]~predMatIVAL[,nsim]))[[4]][[8]]
outVec <- c(valR2,valP,valRMSE,valBias,valRMSEperc)
statMatIVAL[nsim,] <- outVec
}
statMatIVAL <- as.data.frame(statMatIVAL)
names(statMatIVAL) <- c("IvalR2","IvalP","IvalRMSE","IvalBias","IvalRMSEperc")
meanIVAL <- as.data.frame(t(colMeans(statMatIVAL)))
write.csv(statMatIVAL,paste("./",inVar,"_", nComps, "comps_stats_IVAL.csv", sep=""),
row.names=FALSE)
write.csv(meanIVAL, paste("./",inVar,"_", nComps, "_mean_stats_IVAL.csv", sep=""))
### Plot
modCIval <- quantile(statMatIVAL$IvalR2, probs=c(0.05,0.95))
RMCIval <- quantile(statMatIVAL$IvalRMSE, probs=c(0.05,0.95))
RMCIvalperc <- quantile(statMatIVAL$IvalRMSEperc, probs=c(0.05,0.95))
formi <- as.formula(paste(paste(inVar," ~ predMeanIVAL_",inVar, sep="")))
lmformi <- as.formula(paste(paste("predMeanIVAL_",inVar," ~ ", inVar, sep="")))
jpeg(paste("./", inVar,"_", nComps, "comp_predplot_IVAL.jpg",sep=""),
width=6,height=5,units="in",res=300)
plot(formi, data= predsIVAL, pch=16,cex=0.8,ylab="measured",xlab="predicted",
main=inVar,xlim=c(min(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))] -
predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))]),
max(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))] +
predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))])))
abline(lm(lmformi, data= predsIVAL))
abline(a = 0,b = 1, lty=2)
arrows(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],
predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))]+predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],angle=90,length=0.05, lwd=0.8)
arrows(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],
predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))]-predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],angle=90,length=0.05, lwd=0.8)
legend("topleft", bty="n", cex=0.8,
c(paste("R² = ", sprintf("%.2f",signif (mean(statMatIVAL$IvalR2),3))," [",signif(modCIval[1],2),",",signif(modCIval[2],2),"]", sep = ""),
paste("RMSEP % = ", sprintf("%.2f",signif (mean(statMatIVAL$IvalRMSEperc),3)), " [",signif(RMCIvalperc[1],2),",",signif(RMCIvalperc[2],2),"]", sep=""),
paste("ncomps =", nComps, sep=" ")))
dev.off()
}
###### END #########
|
/PLSR_WoodRiver.R
|
permissive
|
annakat/PLSR_WoodRiver
|
R
| false
| false
| 16,513
|
r
|
#####################################################################################
### PLSR models AISA ################################################################
### Anna K Schweiger July 31 2020 ###################################################
### with lines of code and the VIPjh function from Shawn P. Serbin's awesome GitHub repo #
### https://github.com/serbinsh/Spectra_PLSR_models #################################
library(pls)
library(reshape)
library(agricolae)
library(caret)
VIPjh=function(object, j, h) {
if (object$method != "oscorespls") stop("Only implemented for orthogonal scores algorithm. Refit with 'method = \"oscorespls\"'")
if (nrow(object$Yloadings) > 1) stop("Only implemented for single-response models")
b=c(object$Yloadings)[1:h]
T=object$scores[,1:h, drop = FALSE]
SS=b^2 * colSums(T^2)
W=object$loading.weights[,1:h, drop = FALSE]
Wnorm2=colSums(W^2)
VIP=sqrt(nrow(W) * sum(SS * W[j,]^2 / Wnorm2) / sum(SS))
return(VIP)
}
### Read data
# allData <- read.csv("./plot_chem_spec.csv")
allData <- read.csv("./subplot_chem_spec.csv")
# allData <- read.csv("./transect_chem_spec.csv")
inBands <- names(allData)[10:187] # bands to work with
inVars <- names(allData[c(3:9)]) # traits to work with
###############################################
### Find number of components
for (i in 1:length(inVars)){
inVar <- inVars[i]
iForm <- paste(inBands,collapse="+")
iForm <- as.formula(paste(inVar,"~",iForm))
### Separate data into model CAL/VAL and IVAL (independent or external validation) data
set.seed(19841002+i)
rand <- createDataPartition(allData[, inVars[i]], p=0.75, list=F) # here: 75% data for model CAL/VAL
subData <- allData[rand,] ## CAL
tstData <- allData[-rand,] ## IVAL
### Calibrate the model
nComps <- 15 # max no of components to be evaluated
nsims <- 50 # no of simulations
outMat <- matrix(data=NA,nrow=nsims,ncol=nComps)
outMatRM <- matrix(data=NA,nrow=nsims,ncol=nComps)
for (nsim in seq(nsims)){
print(nsim)
flush.console()
segs <- cvsegments(N = nrow(subData), k = 5, type="random") # k fold CV, 5 folds = 80:20 split
resNCOMP <- plsr(iForm,data=subData,ncomp=nComps,
validation="CV", segments=segs,
method="oscorespls")
resPRESS <- as.vector(resNCOMP$validation$PRESS)
outMat[nsim,seq(resNCOMP$validation$ncomp)] <-resPRESS
resRMSEP <- as.numeric(RMSEP(resNCOMP,estimate="CV",intercept=F)$val)
outMatRM[nsim,seq(resNCOMP$validation$ncomp)] <-resRMSEP
}
### PRESS statistic: Tukey test - sign diff among no of components?
pressDF <- as.data.frame(outMat)
names(pressDF) <- as.character(seq(nComps))
pressDFres <- melt(pressDF)
modi <- lm (value~variable, pressDFres)
tuk <- HSD.test (modi,"variable")
tuk_dat <- as.data.frame(tuk$groups)
tuk_dat$var <- as.numeric(row.names(tuk_dat))
tuk_dat <- tuk_dat[order(tuk_dat$var,decreasing = F),]
letters <- as.character(tuk_dat$groups)
jpeg(paste("./", inVar, "_PRESS.jpg",sep=""),width=6,height=4.5,units="in",res=200)
par(bty="l")
boxplot(pressDFres$value~pressDFres$variable, xlab="n Components",ylab="PRESS",main=inVar)
text(x=1:max(as.numeric(pressDFres$variable)), y=rep(max(pressDFres$value),15),letters)
dev.off()
### RMSEP: Tukey test
RMDF <- as.data.frame(outMatRM)
names(RMDF) <- as.character(seq(nComps))
RMDFres <- melt(RMDF)
modi <- lm (value~variable, RMDFres)
tuk <- HSD.test (modi,"variable")
tuk_dat <- as.data.frame(tuk$groups)
tuk_dat$var <- as.numeric(row.names(tuk_dat))
tuk_dat <- tuk_dat[order(tuk_dat$var,decreasing = F),]
letters <- as.character(tuk_dat$groups)
jpeg(paste("./", inVar, "_RMSEP.jpg",sep=""),width=6,height=4.5,units="in",res=200)
par(bty="l")
boxplot(RMDFres$value~RMDFres$variable, xlab="n Components",ylab="RMSEP",main=inVar)
text(x=1:max(as.numeric(RMDFres$variable)), y=rep(max(RMDFres$value),15),letters)
dev.off()
}
#######################################
###### Final models ###################
### Make dataframe containing selected no of components (=data) per trait
compis <- as.data.frame(matrix(nrow=1, ncol=length(inVars),dimnames = list(NULL, c(inVars)),
data=c(3,4,2,4,2,3,2)))
### One trait at a time ...
k <- 1
inVar <- inVars[k]
nCompss <- compis[, inVar] # nCompss could also be a vector of a number of options for ncomps
### Separate data into CAL and IVAL data, use same seed same as above
set.seed(19841002+k)
rand <- createDataPartition(allData[, inVars[k]], p=0.75, list=F) ## 75% data for calibration
subData <- allData[rand,] ## CAL
tstData <- allData[-rand,] ## IVAL
iForm <- paste(inBands,collapse="+")
iForm <- as.formula(paste(inVar,"~",iForm))
set.seed(1840)
for (i in 1:length(nCompss)){
nsims <- 500
nComps <- nCompss[i]
nCutmod <- floor(0.8*nrow(subData)) ## 80% data for calibration, 20% for CV
coefMat <- matrix(data=NA,nrow=nsims,ncol=length(inBands)+1)
coefStd <- matrix(data=NA,nrow=nsims,ncol=length(inBands)+1)
vipMat <- matrix(data=NA,ncol=nsims,nrow=length(inBands))
statMat <- matrix(data=NA,nrow=nsims,ncol=10)
for (nsim in seq(nsims)){
print(nsim)
flush.console()
set.seed (19818411+nsim)
randx <-createDataPartition(subData[, inVars[k]], p=0.8, list=F)
calData <- allData[randx,] ## CAL
valData <- allData[-randx,] ## VAL (internal)
resX <- plsr(iForm,data=calData,ncomp=nComps,method="oscorespls") ###
resS <- plsr(iForm,data=calData,ncomp=nComps, method="oscorespls",scale=T) ### scaled by SD
### Coefficients (raw and standardized)
coefs <- as.vector(coef(resX,ncomp=nComps,intercept=T))
zcoef <- as.vector(coef(resS,ncomp=nComps,intercept=T))
coefMat[nsim,] <- coefs
coefStd[nsim,] <- zcoef ### standardized coeffis for importance of wvls
### VIP
vip <- c()
for (j in seq(length(inBands))){
vip <- c(vip,VIPjh(resS,j,nComps))
}
vipMat[,nsim] <- vip
### Model stats
fitX <- as.vector(unlist(resX$fitted.values[,1,nComps])) ### fitted values
preX <- as.vector(unlist(predict(resX,ncomp=nComps,newdata=valData))) ### internal val
fitBias <- mean(calData[,inVar]-fitX)
valBias <- mean(valData[,inVar]-preX)
fitR2 <- summary(lm(calData[,inVar]~fitX))$r.squared
valR2 <- summary(lm(valData[,inVar]~preX))$r.squared
fitP <- summary(lm(calData[,inVar]~fitX))[[4]][[8]]
valP <- summary(lm(valData[,inVar]~preX))[[4]][[8]]
fitRMSE <- sqrt(mean((calData[,inVar]-fitX)^2))
valRMSE <- sqrt(mean((valData[,inVar]-preX)^2))
fitRMSEperc <- fitRMSE/(max(calData[,inVar])-min(subData[,inVar]))*100
valRMSEperc <- valRMSE/(max(valData[,inVar])-min(valData[,inVar]))*100
outVec <- c(fitR2,fitP, fitRMSE,fitBias,fitRMSEperc,
valR2,valP, valRMSE,valBias,valRMSEperc)
statMat[nsim,] <- outVec
}
statMat <- as.data.frame(statMat)
names(statMat) <- c("fitR2", "fitP","fitRMSE","fitBias","fitRMSEperc",
"valR2","valP","valRMSE","valBias","valRMSEperc")
meanStat <- as.data.frame(t(colMeans(statMat)))
write.csv(statMat,paste("./",inVar,"_", nComps,"comps_stats.csv", sep=""),row.names=FALSE)
write.csv(meanStat, paste("./",inVar,"_", nComps, "_mean_stats.csv", sep=""))
### Coefficients
coeffis <- data.frame(matrix(nrow = length(inBands)+1, ncol = 3))
names(coeffis) <- c("bands", "mean","stdv")
coeffis$bands <- c("Intercept",inBands)
coeffis$mean <- apply(coefMat,MAR=2,FUN=mean)
coeffis$stdv <- apply(coefMat,MAR=2,FUN=sd)
write.table(coeffis, paste("./", inVar, "_",nComps,"comps_coeffMEAN.csv", sep=""),
sep=",",col.names=T,row.names=F)
### Predictions
specMat <- subData[,inBands]
specMat <- cbind(rep(1,nrow(specMat)),specMat)
specMat <- as.matrix(specMat)
predMat <- specMat%*%t(coefMat)
predMean <- apply(predMat,FUN=mean,MAR=1)
predStdv <- apply(predMat,FUN=sd,MAR=1)
preds <- subData[, !(names(subData) %in% inBands|names(subData)=="RAND")]
preds[,paste("predMean_",inVar,sep="")] <- predMean
preds[,paste("predStdv_",inVar,sep="")] <- predStdv
write.csv(preds,paste("./", inVar, "_", nComps,"comps_preds_mod.csv", sep=""), row.names=FALSE)
### Plot predictions
modCI <- quantile(statMat$fitR2, probs=c(0.05,0.95))
modCIval <- quantile(statMat$valR2, probs=c(0.05,0.95))
RMCI <- quantile(statMat$fitRMSE, probs=c(0.05,0.95))
RMCIperc <- quantile(statMat$fitRMSEperc, probs=c(0.05,0.95))
RMCIval <- quantile(statMat$valRMSE, probs=c(0.05,0.95))
RMCIvalperc <- quantile(statMat$valRMSEperc, probs=c(0.05,0.95))
formi <- as.formula(paste(paste(inVar," ~ predMean_",inVar, sep="")))
lmformi <- as.formula(paste(paste("predMean_",inVar," ~ ", inVar, sep="")))
jpeg(paste("./",inVar,"_", nComps, "comp_predplot.jpg",sep=""),
width=6,height=5,units="in",res=300)
plot(formi, data= preds, pch=16,cex=0.8,ylab="measured",xlab="predicted",
main=inVar,xlim=c(min(preds[,grepl(paste0("predMean_", inVar),names(preds))] -
preds[,grepl(paste0("predStdv_", inVar),names(preds))]),
max(preds[,grepl(paste0("predMean_", inVar),names(preds))] +
preds[,grepl(paste0("predStdv_", inVar),names(preds))])))
abline(lm(lmformi, data= preds))
abline(a = 0,b = 1, lty=2)
arrows(preds[,grepl(paste0("predMean_", inVar),names(preds))],
preds[,match(inVar,names(preds))],
preds[,grepl(paste0("predMean_", inVar),names(preds))]+preds[,grepl(paste0("predStdv_", inVar),names(preds))],
preds[,match(inVar,names(preds))],angle=90,length=0.05, lwd=0.8)
arrows(preds[,grepl(paste0("predMean_", inVar),names(preds))],
preds[,match(inVar,names(preds))],
preds[,grepl(paste0("predMean_", inVar),names(preds))]-preds[,grepl(paste0("predStdv_", inVar),names(preds))],
preds[,match(inVar,names(preds))],angle=90,length=0.05, lwd=0.8)
legend("topleft", bty="n", cex=0.8,
c(paste("R² cal= ", sprintf("%.2f",signif (mean(statMat$fitR2),3))," [",signif(modCI[1],2),",",signif(modCI[2],2),"]", sep = ""),
paste("R² val= ", sprintf("%.2f",signif (mean(statMat$valR2),3))," [",signif(modCIval[1],2),",",signif(modCIval[2],2),"]", sep = ""),
paste("RMSEP % cal= ", sprintf("%.2f",signif (mean(statMat$fitRMSEperc),3)), " [",signif(RMCIperc[1],3),",",signif(RMCIperc[2],3),"]", sep=""),
paste("RMSEP % val= ", sprintf("%.2f",signif (mean(statMat$valRMSEperc),3)), " [",signif(RMCIvalperc[1],3),",",signif(RMCIvalperc[2],3),"]", sep=""),
paste("ncomps =", nComps, sep=" ")))
dev.off()
### VIP for plotting
vipAggr <- as.data.frame(t(apply(vipMat,MAR=1,FUN=quantile,probs=c(0.05,0.5,0.95))))
vipAggr$mean_VIP <- apply(vipMat,MAR=1,FUN=mean)
vipAggr$stdv <- apply(vipMat,MAR=1,FUN=sd)
serr <- function(x) sqrt(var(x,na.rm=TRUE)/length(na.omit(x))) ## standard error of mean
vipAggr$se <- apply(vipMat,MAR=1,FUN=serr)
vipAggr$band <- inBands
### Standardized coefficients for plotting
coeff_std <- data.frame(matrix(nrow = length(inBands)+1, ncol = 3))
names(coeff_std) <- c("bands", "mean","stdv")
coeff_std$bands <- c("Intercept",inBands)
coeff_std$mean <- apply(coefStd,MAR=2,FUN=mean)
coeff_std$stdv <- apply(coefStd,MAR=2,FUN=sd)
### Plot VIP and standardized coefficients
jpeg(paste("./", inVar, "_", nComps, "comp_varimp.jpg",sep=""),
width=6,height=7,units="in",res=300)
par(mfrow=c(2,1), mar=c(1.5,4,2.5,1.5), oma=c(3,0,0,0))
plot(coeff_std$mean[-1]~as.numeric(substr(coeff_std$bands[-1],2,nchar(coeff_std$bands[-1]))),
type="p",pch=19, xlab="",ylab="coeff_stdmean",main=paste(inVar,nComps,"comps",sep = "_"),
ylim=c(-max(abs(coeff_std$mean[-1])),max(abs(coeff_std$mean[-1]))), bty="l")
abline(h=0)
points(abs(coeff_std$mean)[-1]~as.numeric(substr(coeff_std$bands[-1],2,nchar(coeff_std$bands[-1]))),
xlab="wvl",ylab="coeff_stdmean", col=2, pch=16, cex=0.8)
lines(abs(coeff_std$mean)[-1]~as.numeric(substr(coeff_std$band[-1],2,nchar(coeff_std$band[-1]))), col=2)
plot(as.numeric(substr(vipAggr$band,2,nchar(vipAggr$band))),vipAggr$mean_VIP, type="l",
xlab = "wvl",ylab = "VIP", bty="l")
polygon(x=c(as.numeric(substr(vipAggr$band,2,nchar(vipAggr$band))),
rev(as.numeric(substr(vipAggr$band,2,nchar(vipAggr$band))))),
y=c(vipAggr$mean_VIP+vipAggr$stdv*1.96, rev(vipAggr$mean_VIP-vipAggr$stdv*1.96)),
col = adjustcolor("red", alpha.f = 0.2), border = NA)
mtext("wavelength(nm)",1,outer = T,line = 1)
dev.off()
######################################
### Independent (external) validation
specMatIVAL <- tstData[,inBands]
specMatIVAL <- cbind(rep(1,nrow(specMatIVAL)),specMatIVAL)
specMatIVAL <- as.matrix(specMatIVAL)
predMatIVAL <- specMatIVAL%*%t(coefMat)
predMeanIVAL <- apply(predMatIVAL,FUN=mean,MAR=1)
predStdvIVAL <- apply(predMatIVAL,FUN=sd,MAR=1)
predsIVAL <- tstData[, !(names(tstData) %in% inBands|names(tstData)=="RAND")]
predsIVAL[,paste("predMeanIVAL_",inVar,sep="")] <- predMeanIVAL
predsIVAL[,paste("predStdvIVAL_",inVar,sep="")] <- predStdvIVAL
write.csv(predsIVAL,paste("./", inVar, "_", nComps, "comps_preds_IVAL.csv", sep=""),
row.names=FALSE)
### Model stats
statMatIVAL <- matrix(data=NA,nrow=nsims,ncol=5)
for (nsim in seq(nsims)){
valBias <- mean(tstData[,inVar]-predMatIVAL[,nsim])
valR2 <- summary(lm(tstData[,inVar]~predMatIVAL[,nsim]))$r.squared
valRMSE <- sqrt(mean((tstData[,inVar]-predMatIVAL[,nsim])^2))
valRMSEperc <- valRMSE/(max(tstData[,inVar])-min(tstData[,inVar]))*100
valP <- summary(lm(tstData[,inVar]~predMatIVAL[,nsim]))[[4]][[8]]
outVec <- c(valR2,valP,valRMSE,valBias,valRMSEperc)
statMatIVAL[nsim,] <- outVec
}
statMatIVAL <- as.data.frame(statMatIVAL)
names(statMatIVAL) <- c("IvalR2","IvalP","IvalRMSE","IvalBias","IvalRMSEperc")
meanIVAL <- as.data.frame(t(colMeans(statMatIVAL)))
write.csv(statMatIVAL,paste("./",inVar,"_", nComps, "comps_stats_IVAL.csv", sep=""),
row.names=FALSE)
write.csv(meanIVAL, paste("./",inVar,"_", nComps, "_mean_stats_IVAL.csv", sep=""))
### Plot
modCIval <- quantile(statMatIVAL$IvalR2, probs=c(0.05,0.95))
RMCIval <- quantile(statMatIVAL$IvalRMSE, probs=c(0.05,0.95))
RMCIvalperc <- quantile(statMatIVAL$IvalRMSEperc, probs=c(0.05,0.95))
formi <- as.formula(paste(paste(inVar," ~ predMeanIVAL_",inVar, sep="")))
lmformi <- as.formula(paste(paste("predMeanIVAL_",inVar," ~ ", inVar, sep="")))
jpeg(paste("./", inVar,"_", nComps, "comp_predplot_IVAL.jpg",sep=""),
width=6,height=5,units="in",res=300)
plot(formi, data= predsIVAL, pch=16,cex=0.8,ylab="measured",xlab="predicted",
main=inVar,xlim=c(min(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))] -
predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))]),
max(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))] +
predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))])))
abline(lm(lmformi, data= predsIVAL))
abline(a = 0,b = 1, lty=2)
arrows(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],
predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))]+predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],angle=90,length=0.05, lwd=0.8)
arrows(predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],
predsIVAL[,grepl(paste0("predMeanIVAL_", inVar),names(predsIVAL))]-predsIVAL[,grepl(paste0("predStdvIVAL_", inVar),names(predsIVAL))],
predsIVAL[,match(inVar,names(predsIVAL))],angle=90,length=0.05, lwd=0.8)
legend("topleft", bty="n", cex=0.8,
c(paste("R² = ", sprintf("%.2f",signif (mean(statMatIVAL$IvalR2),3))," [",signif(modCIval[1],2),",",signif(modCIval[2],2),"]", sep = ""),
paste("RMSEP % = ", sprintf("%.2f",signif (mean(statMatIVAL$IvalRMSEperc),3)), " [",signif(RMCIvalperc[1],2),",",signif(RMCIvalperc[2],2),"]", sep=""),
paste("ncomps =", nComps, sep=" ")))
dev.off()
}
###### END #########
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/books.R
\name{books}
\alias{books}
\title{Data frame of Ming and Qing novels}
\usage{
books()
}
\value{
A data frame with two columns: \code{text} and \code{book}
}
\description{
Returns a data frame of Ming and Qing novels with
only 2 columns: \code{text}, which contains the text,
and \code{book}, which contains the titles.
}
\examples{
library(dplyr)
books() \%>\% group_by(book) \%>\%
summarise(total_lines = n())
}
|
/man/books.Rd
|
permissive
|
boltomli/mingqingxiaoshuor
|
R
| false
| true
| 505
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/books.R
\name{books}
\alias{books}
\title{Data frame of Ming and Qing novels}
\usage{
books()
}
\value{
A data frame with two columns: \code{text} and \code{book}
}
\description{
Returns a data frame of Ming and Qing novels with
only 2 columns: \code{text}, which contains the text,
and \code{book}, which contains the titles.
}
\examples{
library(dplyr)
books() \%>\% group_by(book) \%>\%
summarise(total_lines = n())
}
|
##Peer reviewed programming assignment for the course R programming as part of the
##data science specialisation on Coursera
##Date: 14-09-2020
##Author: Janine
##github: Neurogenbat
##I will now write a function that creates a matrix that can cache its inverse
makeChacheMatrix <- fubction(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
##I have defined the argument as "matrix", the initialisation as NULL holds the inverse
##I have assigned the values of inv in the parent einvironment
##I can return matrix value with get
##I can get the value of inv with getInverse
##now I will write a function that generates the inverse of the matrix makeChacheMaktrix
cacheSolve <- function(x, ...){
inv <- x$getInverse()
if(!is.null(inv)){
message("get cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
##cacheSolve returns a matrix as invers of x
|
/cachematrix.R
|
no_license
|
Neurogenbat/ProgrammingAssignment2
|
R
| false
| false
| 1,140
|
r
|
##Peer reviewed programming assignment for the course R programming as part of the
##data science specialisation on Coursera
##Date: 14-09-2020
##Author: Janine
##github: Neurogenbat
##I will now write a function that creates a matrix that can cache its inverse
makeChacheMatrix <- fubction(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
##I have defined the argument as "matrix", the initialisation as NULL holds the inverse
##I have assigned the values of inv in the parent einvironment
##I can return matrix value with get
##I can get the value of inv with getInverse
##now I will write a function that generates the inverse of the matrix makeChacheMaktrix
cacheSolve <- function(x, ...){
inv <- x$getInverse()
if(!is.null(inv)){
message("get cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
##cacheSolve returns a matrix as invers of x
|
testlist <- list(x = c(1289093589L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962904-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 362
|
r
|
testlist <- list(x = c(1289093589L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcc.fci.R
\name{vcc.fci}
\alias{vcc.fci}
\title{First-echo Cover Index of Vertical Canopy Cover}
\usage{
vcc.fci(las = NA, thresh.val = 1.25, silent = FALSE)
}
\arguments{
\item{las}{Path or name of LAS file. Defaults to NA.}
\item{thresh.val}{Specifies the value to use for canopy height thresholding. Defaults to 1.25.}
\item{silent}{Boolean switch for the interactive display of plots. Defaults to FALSE.}
}
\value{
The results of \code{vcc.fci}
}
\description{
This function calculates fractional cover per the First-echo Cover Index
}
\examples{
vcc.fci(las='C:/plot.las', thresh.val=1.25, silent=FALSE)
}
\references{
\url{http://geography.swan.ac.uk/silvilaser/papers/oral_papers/Forestry\%20Applications\%20\%26\%20Inventory/Holmgren.pdf}
\url{http://link.springer.com/chapter/10.1007\%2F978-94-017-8663-8_20}
}
\author{
Adam Erickson, \email{adam.erickson@ubc.ca}
}
\keyword{canopy}
\keyword{cover}
\keyword{cover,}
\keyword{fractional}
\keyword{vertical}
|
/man/vcc.fci.Rd
|
permissive
|
fdbesanto2/gapfraction
|
R
| false
| true
| 1,046
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcc.fci.R
\name{vcc.fci}
\alias{vcc.fci}
\title{First-echo Cover Index of Vertical Canopy Cover}
\usage{
vcc.fci(las = NA, thresh.val = 1.25, silent = FALSE)
}
\arguments{
\item{las}{Path or name of LAS file. Defaults to NA.}
\item{thresh.val}{Specifies the value to use for canopy height thresholding. Defaults to 1.25.}
\item{silent}{Boolean switch for the interactive display of plots. Defaults to FALSE.}
}
\value{
The results of \code{vcc.fci}
}
\description{
This function calculates fractional cover per the First-echo Cover Index
}
\examples{
vcc.fci(las='C:/plot.las', thresh.val=1.25, silent=FALSE)
}
\references{
\url{http://geography.swan.ac.uk/silvilaser/papers/oral_papers/Forestry\%20Applications\%20\%26\%20Inventory/Holmgren.pdf}
\url{http://link.springer.com/chapter/10.1007\%2F978-94-017-8663-8_20}
}
\author{
Adam Erickson, \email{adam.erickson@ubc.ca}
}
\keyword{canopy}
\keyword{cover}
\keyword{cover,}
\keyword{fractional}
\keyword{vertical}
|
\name{OrdKrig_optim_krige}
\alias{OrdKrig_optim_krige}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Optimization function local ordinary kriging
}
\description{
Optimization function performing 5-fold validation and returning RMSE of estimates for local ordinary kriging.
}
\usage{
OrdKrig_optim_krige(par = c(cutoff=300, anis_deg=0, anis_ax=.5, nmax=12, omax=3, nugget=1),
wpath = "/home/jbre/R/OrdKrig",
datafile = "raw/Masterfile_Adige.txt",
variable = "Humus____",
model="Sph", kfold=5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{par}{
see \code{\link[stats]{optim}} or \code{\link[hydroPSO]{hydroPSO}}. For names of vector \emph{par} see \code{\link[gstat]{idw}}.
}
\item{wpath}{
see \code{\link{OrdKrig}}
}
\item{datafile}{
path to and name of the data to optimize on
}
\item{variable}{
see \code{\link{OrdKrig}}
}
\item{model}{
see \code{\link[gstat]{fit.variogram}}
}
\item{kfold}{
see \code{\link[caret]{createFolds}}
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
RMSE of local ordinary kriging estimations vs. observed data
}
\references{
Johannes Brenner, \email{johannesbrenner@gmail.com}
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# do not run!
# hydroPSO::hydroPSO(fn = OrdKrig_optim_krige, method="spso2011",
# lower = c(0,0,0.01,8,1,0), upper = c(1000,359,1,100,25,10),
# control=list(drty.out = "/home/jbre/R/OrdKrig/PSO_krige", npart=40,
# parallel="none", par.pkgs = c("gstat","caret","hydroGOF","sp")))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/OrdKrig_optim_krige.Rd
|
no_license
|
andbal/SpatialInterpol
|
R
| false
| false
| 2,085
|
rd
|
\name{OrdKrig_optim_krige}
\alias{OrdKrig_optim_krige}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Optimization function local ordinary kriging
}
\description{
Optimization function performing 5-fold validation and returning RMSE of estimates for local ordinary kriging.
}
\usage{
OrdKrig_optim_krige(par = c(cutoff=300, anis_deg=0, anis_ax=.5, nmax=12, omax=3, nugget=1),
wpath = "/home/jbre/R/OrdKrig",
datafile = "raw/Masterfile_Adige.txt",
variable = "Humus____",
model="Sph", kfold=5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{par}{
see \code{\link[stats]{optim}} or \code{\link[hydroPSO]{hydroPSO}}. For names of vector \emph{par} see \code{\link[gstat]{idw}}.
}
\item{wpath}{
see \code{\link{OrdKrig}}
}
\item{datafile}{
path to and name of the data to optimize on
}
\item{variable}{
see \code{\link{OrdKrig}}
}
\item{model}{
see \code{\link[gstat]{fit.variogram}}
}
\item{kfold}{
see \code{\link[caret]{createFolds}}
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
RMSE of local ordinary kriging estimations vs. observed data
}
\references{
Johannes Brenner, \email{johannesbrenner@gmail.com}
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# do not run!
# hydroPSO::hydroPSO(fn = OrdKrig_optim_krige, method="spso2011",
# lower = c(0,0,0.01,8,1,0), upper = c(1000,359,1,100,25,10),
# control=list(drty.out = "/home/jbre/R/OrdKrig/PSO_krige", npart=40,
# parallel="none", par.pkgs = c("gstat","caret","hydroGOF","sp")))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
loglh <- function(Y_fit, Y) {
.Call(`_fect_loglh`, Y_fit, Y)
}
loglh_ub <- function(Y_fit, Y, I) {
.Call(`_fect_loglh_ub`, Y_fit, Y, I)
}
data_ub_adj <- function(I_data, data) {
.Call(`_fect_data_ub_adj`, I_data, data)
}
XXinv <- function(X) {
.Call(`_fect_XXinv`, X)
}
panel_beta <- function(X, xxinv, Y, FE) {
.Call(`_fect_panel_beta`, X, xxinv, Y, FE)
}
panel_est <- function(X, Y, MF) {
.Call(`_fect_panel_est`, X, Y, MF)
}
inter_fe_d_qr <- function(Y, Y_fit0, FE0, factor0, xi0, X, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d_qr`, Y, Y_fit0, FE0, factor0, xi0, X, r, force, mniter, w, tol)
}
inter_fe_d_qr_ub <- function(Y, Y_fit0, FE0, factor0, xi0, X, I, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d_qr_ub`, Y, Y_fit0, FE0, factor0, xi0, X, I, r, force, mniter, w, tol)
}
qr_factor <- function(F, L) {
.Call(`_fect_qr_factor`, F, L)
}
IND <- function(I) {
.Call(`_fect_IND`, I)
}
subfe <- function(Y, X, I, intercept) {
.Call(`_fect_subfe`, Y, X, I, intercept)
}
l_ub <- function(Y, F, I, r, force) {
.Call(`_fect_l_ub`, Y, F, I, r, force)
}
f_ub <- function(Y, L, I, r, force) {
.Call(`_fect_f_ub`, Y, L, I, r, force)
}
fe <- function(E, F_old, xi_old, force, r) {
.Call(`_fect_fe`, E, F_old, xi_old, force, r)
}
fe_ub <- function(E, I, F_old, xi_old, force, r) {
.Call(`_fect_fe_ub`, E, I, F_old, xi_old, force, r)
}
inter_fe_d <- function(Y, Y_fit0, FE0, X, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d`, Y, Y_fit0, FE0, X, r, force, mniter, w, tol)
}
inter_fe_d_ub <- function(Y, Y_fit0, FE0, X, I, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d_ub`, Y, Y_fit0, FE0, X, I, r, force, mniter, w, tol)
}
Y_demean <- function(Y, force) {
.Call(`_fect_Y_demean`, Y, force)
}
fe_add <- function(alpha_Y, xi_Y, mu_Y, T, N, force) {
.Call(`_fect_fe_add`, alpha_Y, xi_Y, mu_Y, T, N, force)
}
panel_factor <- function(E, r) {
.Call(`_fect_panel_factor`, E, r)
}
panel_FE <- function(E, lambda, hard) {
.Call(`_fect_panel_FE`, E, lambda, hard)
}
ife <- function(E, force, mc, r, hard, lambda) {
.Call(`_fect_ife`, E, force, mc, r, hard, lambda)
}
inter_fe <- function(Y, X, r, force, beta0, tol = 1e-5) {
.Call(`_fect_inter_fe`, Y, X, r, force, beta0, tol)
}
inter_fe_ub <- function(Y, Y0, X, I, beta0, r, force, tol = 1e-5) {
.Call(`_fect_inter_fe_ub`, Y, Y0, X, I, beta0, r, force, tol)
}
fe_ad_iter <- function(Y, Y0, I, force, tolerate) {
.Call(`_fect_fe_ad_iter`, Y, Y0, I, force, tolerate)
}
fe_ad_covar_iter <- function(XX, xxinv, Y, Y0, I, beta0, force, tolerate) {
.Call(`_fect_fe_ad_covar_iter`, XX, xxinv, Y, Y0, I, beta0, force, tolerate)
}
fe_ad_inter_iter <- function(Y, Y0, I, force, mc, r, hard, lambda, tolerate) {
.Call(`_fect_fe_ad_inter_iter`, Y, Y0, I, force, mc, r, hard, lambda, tolerate)
}
fe_ad_inter_covar_iter <- function(XX, xxinv, Y, Y0, I, beta0, force, mc, r, hard, lambda, tolerate) {
.Call(`_fect_fe_ad_inter_covar_iter`, XX, xxinv, Y, Y0, I, beta0, force, mc, r, hard, lambda, tolerate)
}
beta_iter <- function(X, xxinv, Y, r, tolerate, beta0) {
.Call(`_fect_beta_iter`, X, xxinv, Y, r, tolerate, beta0)
}
inter_fe_mc <- function(Y, Y0, X, I, beta0, r, lambda, force, tol = 1e-5) {
.Call(`_fect_inter_fe_mc`, Y, Y0, X, I, beta0, r, lambda, force, tol)
}
|
/R/RcppExports.R
|
no_license
|
danrthomas/fect
|
R
| false
| false
| 3,699
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
loglh <- function(Y_fit, Y) {
.Call(`_fect_loglh`, Y_fit, Y)
}
loglh_ub <- function(Y_fit, Y, I) {
.Call(`_fect_loglh_ub`, Y_fit, Y, I)
}
data_ub_adj <- function(I_data, data) {
.Call(`_fect_data_ub_adj`, I_data, data)
}
XXinv <- function(X) {
.Call(`_fect_XXinv`, X)
}
panel_beta <- function(X, xxinv, Y, FE) {
.Call(`_fect_panel_beta`, X, xxinv, Y, FE)
}
panel_est <- function(X, Y, MF) {
.Call(`_fect_panel_est`, X, Y, MF)
}
inter_fe_d_qr <- function(Y, Y_fit0, FE0, factor0, xi0, X, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d_qr`, Y, Y_fit0, FE0, factor0, xi0, X, r, force, mniter, w, tol)
}
inter_fe_d_qr_ub <- function(Y, Y_fit0, FE0, factor0, xi0, X, I, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d_qr_ub`, Y, Y_fit0, FE0, factor0, xi0, X, I, r, force, mniter, w, tol)
}
qr_factor <- function(F, L) {
.Call(`_fect_qr_factor`, F, L)
}
IND <- function(I) {
.Call(`_fect_IND`, I)
}
subfe <- function(Y, X, I, intercept) {
.Call(`_fect_subfe`, Y, X, I, intercept)
}
l_ub <- function(Y, F, I, r, force) {
.Call(`_fect_l_ub`, Y, F, I, r, force)
}
f_ub <- function(Y, L, I, r, force) {
.Call(`_fect_f_ub`, Y, L, I, r, force)
}
fe <- function(E, F_old, xi_old, force, r) {
.Call(`_fect_fe`, E, F_old, xi_old, force, r)
}
fe_ub <- function(E, I, F_old, xi_old, force, r) {
.Call(`_fect_fe_ub`, E, I, F_old, xi_old, force, r)
}
inter_fe_d <- function(Y, Y_fit0, FE0, X, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d`, Y, Y_fit0, FE0, X, r, force, mniter, w, tol)
}
inter_fe_d_ub <- function(Y, Y_fit0, FE0, X, I, r, force, mniter = 5000L, w = 1.0, tol = 1e-5) {
.Call(`_fect_inter_fe_d_ub`, Y, Y_fit0, FE0, X, I, r, force, mniter, w, tol)
}
Y_demean <- function(Y, force) {
.Call(`_fect_Y_demean`, Y, force)
}
fe_add <- function(alpha_Y, xi_Y, mu_Y, T, N, force) {
.Call(`_fect_fe_add`, alpha_Y, xi_Y, mu_Y, T, N, force)
}
panel_factor <- function(E, r) {
.Call(`_fect_panel_factor`, E, r)
}
panel_FE <- function(E, lambda, hard) {
.Call(`_fect_panel_FE`, E, lambda, hard)
}
ife <- function(E, force, mc, r, hard, lambda) {
.Call(`_fect_ife`, E, force, mc, r, hard, lambda)
}
inter_fe <- function(Y, X, r, force, beta0, tol = 1e-5) {
.Call(`_fect_inter_fe`, Y, X, r, force, beta0, tol)
}
inter_fe_ub <- function(Y, Y0, X, I, beta0, r, force, tol = 1e-5) {
.Call(`_fect_inter_fe_ub`, Y, Y0, X, I, beta0, r, force, tol)
}
fe_ad_iter <- function(Y, Y0, I, force, tolerate) {
.Call(`_fect_fe_ad_iter`, Y, Y0, I, force, tolerate)
}
fe_ad_covar_iter <- function(XX, xxinv, Y, Y0, I, beta0, force, tolerate) {
.Call(`_fect_fe_ad_covar_iter`, XX, xxinv, Y, Y0, I, beta0, force, tolerate)
}
fe_ad_inter_iter <- function(Y, Y0, I, force, mc, r, hard, lambda, tolerate) {
.Call(`_fect_fe_ad_inter_iter`, Y, Y0, I, force, mc, r, hard, lambda, tolerate)
}
fe_ad_inter_covar_iter <- function(XX, xxinv, Y, Y0, I, beta0, force, mc, r, hard, lambda, tolerate) {
.Call(`_fect_fe_ad_inter_covar_iter`, XX, xxinv, Y, Y0, I, beta0, force, mc, r, hard, lambda, tolerate)
}
beta_iter <- function(X, xxinv, Y, r, tolerate, beta0) {
.Call(`_fect_beta_iter`, X, xxinv, Y, r, tolerate, beta0)
}
inter_fe_mc <- function(Y, Y0, X, I, beta0, r, lambda, force, tol = 1e-5) {
.Call(`_fect_inter_fe_mc`, Y, Y0, X, I, beta0, r, lambda, force, tol)
}
|
library(e1071)
get_csv_and_use_it_for_testing<- function(name){
data_set <- read.csv(sprintf("./%s.csv",name))
y <- take_lables_from_data_set(data_set)
X <- take_set_from_data_set(data_set)
spliting <- sample(1:nrow(data_set))
X_ <- split.data.frame(as.data.frame(X[spliting,]),rep(1:5))
y_ <- split.data.frame(as.data.frame(y[spliting]),rep(1:5))
ERR <- 0
MAD <- 0
MSE <- 0
for(i in 1:5){
X_df <- data.frame(X_[[i]])
subX <- data.frame(X[-as.integer(rownames(X_df)),],stringsAsFactors = FALSE)
suby <- y[-as.integer(rownames(y_[[i]]))]
subX$etykieta <- factor(suby, levels = 1:max(suby))
subX <- subX[,c(ncol(subX),1:(ncol(subX)-1))]
classifier <- e1071::svm(formula = formula(subX),data = subX)
res_agg <- predict(classifier, newdata = X_df)
res_agg <- as.numeric(res_agg)
ERR <- ERR+sum(ifelse((res_agg - y_[[i]])!=0,1,0))/length(res_agg)
MAD <- MAD+sum(abs(res_agg - y_[[i]]))/length(res_agg)
MSE <- MSE+sum(abs(res_agg - y_[[i]])^2)/length(res_agg)
}
ERR <- ERR/5
MAD <- MAD/5
MSE <- MSE/5
print(ERR)
print(MAD)
print(MSE)
return(c(name,ERR,MAD,MSE))
}
data_sets <- c("abalone","abalone_ord","affairs","ailerons","auto_ord","auto_riskness","bostonhousing","bostonhousing_ord","californiahousing","cement_strength","fireman_example","glass","kinematics","machine_ord","skill","stock_ord","winequality-red","winequality-white","wisconsin_breast_ord")
bench_results <- data.frame(name=character(),ERR=numeric(),MAD=numeric(),MSE=numeric(),stringsAsFactors = FALSE)
for(name in data_sets){
print(name)
bench_results[nrow(bench_results)+1,] <- get_csv_and_use_it_for_testing(name)
}
|
/projekt2/svm.R
|
no_license
|
michalMilewski-8/knnAlgorithImplementationAndBenchmarking
|
R
| false
| false
| 1,697
|
r
|
library(e1071)
get_csv_and_use_it_for_testing<- function(name){
data_set <- read.csv(sprintf("./%s.csv",name))
y <- take_lables_from_data_set(data_set)
X <- take_set_from_data_set(data_set)
spliting <- sample(1:nrow(data_set))
X_ <- split.data.frame(as.data.frame(X[spliting,]),rep(1:5))
y_ <- split.data.frame(as.data.frame(y[spliting]),rep(1:5))
ERR <- 0
MAD <- 0
MSE <- 0
for(i in 1:5){
X_df <- data.frame(X_[[i]])
subX <- data.frame(X[-as.integer(rownames(X_df)),],stringsAsFactors = FALSE)
suby <- y[-as.integer(rownames(y_[[i]]))]
subX$etykieta <- factor(suby, levels = 1:max(suby))
subX <- subX[,c(ncol(subX),1:(ncol(subX)-1))]
classifier <- e1071::svm(formula = formula(subX),data = subX)
res_agg <- predict(classifier, newdata = X_df)
res_agg <- as.numeric(res_agg)
ERR <- ERR+sum(ifelse((res_agg - y_[[i]])!=0,1,0))/length(res_agg)
MAD <- MAD+sum(abs(res_agg - y_[[i]]))/length(res_agg)
MSE <- MSE+sum(abs(res_agg - y_[[i]])^2)/length(res_agg)
}
ERR <- ERR/5
MAD <- MAD/5
MSE <- MSE/5
print(ERR)
print(MAD)
print(MSE)
return(c(name,ERR,MAD,MSE))
}
data_sets <- c("abalone","abalone_ord","affairs","ailerons","auto_ord","auto_riskness","bostonhousing","bostonhousing_ord","californiahousing","cement_strength","fireman_example","glass","kinematics","machine_ord","skill","stock_ord","winequality-red","winequality-white","wisconsin_breast_ord")
bench_results <- data.frame(name=character(),ERR=numeric(),MAD=numeric(),MSE=numeric(),stringsAsFactors = FALSE)
for(name in data_sets){
print(name)
bench_results[nrow(bench_results)+1,] <- get_csv_and_use_it_for_testing(name)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynamodb_operations.R
\name{dynamodb_describe_table}
\alias{dynamodb_describe_table}
\title{Returns information about the table, including the current status of the
table, when it was created, the primary key schema, and any indexes on
the table}
\usage{
dynamodb_describe_table(TableName)
}
\arguments{
\item{TableName}{[required] The name of the table to describe.}
}
\description{
Returns information about the table, including the current status of the
table, when it was created, the primary key schema, and any indexes on
the table.
If you issue a \code{DescribeTable} request immediately after a \code{CreateTable}
request, DynamoDB might return a \code{ResourceNotFoundException}. This is
because \code{DescribeTable} uses an eventually consistent query, and the
metadata for your table might not be available at that moment. Wait for
a few seconds, and then try the \code{DescribeTable} request again.
}
\section{Request syntax}{
\preformatted{svc$describe_table(
TableName = "string"
)
}
}
\examples{
\dontrun{
# This example describes the Music table.
svc$describe_table(
TableName = "Music"
)
}
}
\keyword{internal}
|
/cran/paws.database/man/dynamodb_describe_table.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 1,213
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dynamodb_operations.R
\name{dynamodb_describe_table}
\alias{dynamodb_describe_table}
\title{Returns information about the table, including the current status of the
table, when it was created, the primary key schema, and any indexes on
the table}
\usage{
dynamodb_describe_table(TableName)
}
\arguments{
\item{TableName}{[required] The name of the table to describe.}
}
\description{
Returns information about the table, including the current status of the
table, when it was created, the primary key schema, and any indexes on
the table.
If you issue a \code{DescribeTable} request immediately after a \code{CreateTable}
request, DynamoDB might return a \code{ResourceNotFoundException}. This is
because \code{DescribeTable} uses an eventually consistent query, and the
metadata for your table might not be available at that moment. Wait for
a few seconds, and then try the \code{DescribeTable} request again.
}
\section{Request syntax}{
\preformatted{svc$describe_table(
TableName = "string"
)
}
}
\examples{
\dontrun{
# This example describes the Music table.
svc$describe_table(
TableName = "Music"
)
}
}
\keyword{internal}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 80928
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 80928
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-133.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 30687
c no.of clauses 80928
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 80928
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-133.qdimacs 30687 80928 E1 [] 0 134 30153 80928 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-133/tlc02-nonuniform-depth-133.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 697
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 80928
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 80928
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-133.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 30687
c no.of clauses 80928
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 80928
c
c QBFLIB/Miller-Marin/trafficlight-controller/tlc02-nonuniform-depth-133.qdimacs 30687 80928 E1 [] 0 134 30153 80928 NONE
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/regression.R
\name{BinaryLogit}
\alias{BinaryLogit}
\title{\code{BinaryLogit} Binary Logit Regression.}
\usage{
BinaryLogit(formula, data, subset = NULL, weights = NULL, ...)
}
\arguments{
\item{formula}{An object of class \code{\link{formula}} (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ‘Details’.}
\item{data}{A \code{\link{data.frame}}.}
\item{subset}{An optional vector specifying a subset of observations to be used in the fitting process.}
\item{weights}{An optional vector of sampling weights.}
\item{...}{Additional argments to be past to \code{\link{lm}} or, if the data
is weighted, \code{\link{survey::svyglm}}.}
}
\description{
\code{BinaryLogit} Binary Logit Regression.
}
|
/man/BinaryLogit.Rd
|
no_license
|
xtmwang/flipMultivariates
|
R
| false
| false
| 885
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/regression.R
\name{BinaryLogit}
\alias{BinaryLogit}
\title{\code{BinaryLogit} Binary Logit Regression.}
\usage{
BinaryLogit(formula, data, subset = NULL, weights = NULL, ...)
}
\arguments{
\item{formula}{An object of class \code{\link{formula}} (or one that can be coerced to that class): a symbolic description of the model to be fitted. The details of model specification are given under ‘Details’.}
\item{data}{A \code{\link{data.frame}}.}
\item{subset}{An optional vector specifying a subset of observations to be used in the fitting process.}
\item{weights}{An optional vector of sampling weights.}
\item{...}{Additional argments to be past to \code{\link{lm}} or, if the data
is weighted, \code{\link{survey::svyglm}}.}
}
\description{
\code{BinaryLogit} Binary Logit Regression.
}
|
###########################
##Liss panel data study
##predicting attrition
##raw data file: Peter Lugtig 18-2-2014
##clean data
###########################
##read spss file:
library("foreign")
liss.dat <- read.spss("Participation_panelmember_fieldworkperiod.sav", use.value.labels = F, to.data.frame = T)
#count how often respondents received a questionnaire:
liss.dat$count <- apply(liss.dat[,7:56], 1, FUN=function(x) sum(x!=-1,na.rm=T) )
#select the respondents that got at least one questionnaire
my.dat.wide <- as.data.frame(liss.dat[which(liss.dat$count!=0),])
#in order to get some insight in how often respondents react
my.dat.wide$som <- apply(my.dat.wide[,7:56], 1, FUN=function(x){sum(x==1)})
my.dat.wide$prob <- my.dat.wide$som/my.dat.wide$count
#some respondents apprear to have dropped out before 11/2007, these respondents have te be removed from the data set:
my.dat.wide$select<- 1
for(i in 1:nrow(my.dat.wide))
{
if(my.dat.wide$stoplid[i]>200711|is.na(my.dat.wide$stoplid[i])) my.dat.wide$select[i] <- 1
else {my.dat.wide$select[i] <- 0}
}
my.dat.wide <- my.dat.wide[my.dat.wide$select==1,]
my.dat.wide <- my.dat.wide[my.dat.wide$startlid!=201111,] #we only want the respondents that started before the last questionnaire
my.dat.wide$stoplid[which(my.dat.wide$stoplid==201107)] <- 201108 #201107 no one answered this questionnaire so this one will be deleted
#for the analysis we want to take into account when someone is denoted as a drop out,
#therefore we change the month labels by questionnaire numbers
nummering<-cbind(1:50, c(200711,200712,200801:200812,200901:200912,201001:201012,201101:201112))
my.dat.wide$stop <- 999
for(i in 1:nrow(my.dat.wide))
{
if(is.na(my.dat.wide$stoplid[i])){my.dat.wide$stop[i] <- NA}
else {my.dat.wide$stop[i] <- nummering[which(my.dat.wide$stoplid[i]==nummering[,2]),1]}
}
my.dat.wide <- my.dat.wide[!is.na(my.dat.wide[,2]),] #some people dont have an id number (yet?)
my.dat.wide <- my.dat.wide[,-51] #july 2011 has 0 responses
my.dat.wide <- my.dat.wide[order(my.dat.wide[,2]),]
write.table(my.dat.wide, "liss_wide.txt", sep=" ")
#the analysis is will be a replay of a data stream, for that purpose we change the data file from wide to long:
my.dat.long <- data.frame(id=numeric(0), response=numeric(0),
vragenlijst_nummer=numeric(0), prob = numeric(0), stop=numeric(0))
#7th variable is november 2007, start data set
my.dat.long <- rbind(my.dat.long,cbind(id=my.dat.wide[my.dat.wide[,7]!=-1,2],
response=my.dat.wide[my.dat.wide[,7]!=-1,7],
vragenlijst_nummer=rep(1, length(my.dat.wide[my.dat.wide[,7]!=-1,7])),
prob=my.dat.wide[my.dat.wide[,7]!=-1,58], stop=my.dat.wide[my.dat.wide[,7]!=-1,60] ))
#we randomly order the respondents within a questionnaire, order of the questionnaires is maintained:
set.seed(864531)
for(i in 1:48) #50 questionnaires, the first is used to initiate the dataset, 1 questionnaire is dropped because there werent any responses
{
nieuwe_lijst<-my.dat.wide[my.dat.wide[,7+i]!=-1,c(2,7+i,58,60)]
new.order<- sample(nrow(nieuwe_lijst) )
nieuwe_mensen<-nieuwe_lijst[new.order,]
my.dat.long<-rbind(my.dat.long, cbind(id=nieuwe_mensen[,1],
response=nieuwe_mensen[,2],
vragenlijst_nummer=rep(i+1, nrow(nieuwe_mensen)),
prob=nieuwe_mensen[,3], stop=nieuwe_mensen[,4]))
}
my.dat.long <- my.dat.long[-which(my.dat.long$stop<my.dat.long$vragenlijst_nummer),]
write.table(my.dat.long, "liss_long.txt", sep=" ")
|
/liss.R
|
no_license
|
L-Ippel/EstimatingShrinkageFactors_CSDA
|
R
| false
| false
| 3,523
|
r
|
###########################
##Liss panel data study
##predicting attrition
##raw data file: Peter Lugtig 18-2-2014
##clean data
###########################
##read spss file:
library("foreign")
liss.dat <- read.spss("Participation_panelmember_fieldworkperiod.sav", use.value.labels = F, to.data.frame = T)
#count how often respondents received a questionnaire:
liss.dat$count <- apply(liss.dat[,7:56], 1, FUN=function(x) sum(x!=-1,na.rm=T) )
#select the respondents that got at least one questionnaire
my.dat.wide <- as.data.frame(liss.dat[which(liss.dat$count!=0),])
#in order to get some insight in how often respondents react
my.dat.wide$som <- apply(my.dat.wide[,7:56], 1, FUN=function(x){sum(x==1)})
my.dat.wide$prob <- my.dat.wide$som/my.dat.wide$count
#some respondents apprear to have dropped out before 11/2007, these respondents have te be removed from the data set:
my.dat.wide$select<- 1
for(i in 1:nrow(my.dat.wide))
{
if(my.dat.wide$stoplid[i]>200711|is.na(my.dat.wide$stoplid[i])) my.dat.wide$select[i] <- 1
else {my.dat.wide$select[i] <- 0}
}
my.dat.wide <- my.dat.wide[my.dat.wide$select==1,]
my.dat.wide <- my.dat.wide[my.dat.wide$startlid!=201111,] #we only want the respondents that started before the last questionnaire
my.dat.wide$stoplid[which(my.dat.wide$stoplid==201107)] <- 201108 #201107 no one answered this questionnaire so this one will be deleted
#for the analysis we want to take into account when someone is denoted as a drop out,
#therefore we change the month labels by questionnaire numbers
nummering<-cbind(1:50, c(200711,200712,200801:200812,200901:200912,201001:201012,201101:201112))
my.dat.wide$stop <- 999
for(i in 1:nrow(my.dat.wide))
{
if(is.na(my.dat.wide$stoplid[i])){my.dat.wide$stop[i] <- NA}
else {my.dat.wide$stop[i] <- nummering[which(my.dat.wide$stoplid[i]==nummering[,2]),1]}
}
my.dat.wide <- my.dat.wide[!is.na(my.dat.wide[,2]),] #some people dont have an id number (yet?)
my.dat.wide <- my.dat.wide[,-51] #july 2011 has 0 responses
my.dat.wide <- my.dat.wide[order(my.dat.wide[,2]),]
write.table(my.dat.wide, "liss_wide.txt", sep=" ")
#the analysis is will be a replay of a data stream, for that purpose we change the data file from wide to long:
my.dat.long <- data.frame(id=numeric(0), response=numeric(0),
vragenlijst_nummer=numeric(0), prob = numeric(0), stop=numeric(0))
#7th variable is november 2007, start data set
my.dat.long <- rbind(my.dat.long,cbind(id=my.dat.wide[my.dat.wide[,7]!=-1,2],
response=my.dat.wide[my.dat.wide[,7]!=-1,7],
vragenlijst_nummer=rep(1, length(my.dat.wide[my.dat.wide[,7]!=-1,7])),
prob=my.dat.wide[my.dat.wide[,7]!=-1,58], stop=my.dat.wide[my.dat.wide[,7]!=-1,60] ))
#we randomly order the respondents within a questionnaire, order of the questionnaires is maintained:
set.seed(864531)
for(i in 1:48) #50 questionnaires, the first is used to initiate the dataset, 1 questionnaire is dropped because there werent any responses
{
nieuwe_lijst<-my.dat.wide[my.dat.wide[,7+i]!=-1,c(2,7+i,58,60)]
new.order<- sample(nrow(nieuwe_lijst) )
nieuwe_mensen<-nieuwe_lijst[new.order,]
my.dat.long<-rbind(my.dat.long, cbind(id=nieuwe_mensen[,1],
response=nieuwe_mensen[,2],
vragenlijst_nummer=rep(i+1, nrow(nieuwe_mensen)),
prob=nieuwe_mensen[,3], stop=nieuwe_mensen[,4]))
}
my.dat.long <- my.dat.long[-which(my.dat.long$stop<my.dat.long$vragenlijst_nummer),]
write.table(my.dat.long, "liss_long.txt", sep=" ")
|
library(ape)
testtree <- read.tree("10090_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10090_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/10090_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 137
|
r
|
library(ape)
testtree <- read.tree("10090_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10090_0_unrooted.txt")
|
library(checkarg)
### Name: isNonZeroIntegerOrInfVectorOrNull
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isNonZeroIntegerOrInfVectorOrNull
### ** Examples
isNonZeroIntegerOrInfVectorOrNull(2)
# returns TRUE (argument is valid)
isNonZeroIntegerOrInfVectorOrNull("X")
# returns FALSE (argument is invalid)
#isNonZeroIntegerOrInfVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isNonZeroIntegerOrInfVectorOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isNonZeroIntegerOrInfVectorOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isNonZeroIntegerOrInfVectorOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
|
/data/genthat_extracted_code/checkarg/examples/isNonZeroIntegerOrInfVectorOrNull.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 903
|
r
|
library(checkarg)
### Name: isNonZeroIntegerOrInfVectorOrNull
### Title: Wrapper for the checkarg function, using specific parameter
### settings.
### Aliases: isNonZeroIntegerOrInfVectorOrNull
### ** Examples
isNonZeroIntegerOrInfVectorOrNull(2)
# returns TRUE (argument is valid)
isNonZeroIntegerOrInfVectorOrNull("X")
# returns FALSE (argument is invalid)
#isNonZeroIntegerOrInfVectorOrNull("X", stopIfNot = TRUE)
# throws exception with message defined by message and argumentName parameters
isNonZeroIntegerOrInfVectorOrNull(2, default = 1)
# returns 2 (the argument, rather than the default, since it is not NULL)
#isNonZeroIntegerOrInfVectorOrNull("X", default = 1)
# throws exception with message defined by message and argumentName parameters
isNonZeroIntegerOrInfVectorOrNull(NULL, default = 1)
# returns 1 (the default, rather than the argument, since it is NULL)
|
\name{getQuantilesParams}
\alias{getQuantilesParams}
\title{
A function to extract the quantiles and parameters
}
\description{
This extracts the quantiles and parameters.
}
\usage{
getQuantilesParams(fit.i, qFunc = qLOGNO, quantiles = seq(0.006,
0.996, length.out = 1000), linksq = c(identity, exp, NULL,
NULL), freeParams, fixedParams)
}
\arguments{
\item{fit.i}{
a (non-empty) object of class gamlss, which is the fitted distribution.
}
\item{qFunc}{
a (non-empty) quantile generating function from gamlss.
}
\item{quantiles}{
an optional numeric vector of the desired quantiles.
}
\item{linksq}{
a (non-empty) vector is link functions.
}
\item{freeParams}{
a (non-empty) logical vector inidicating whether parameters are fixed == FALSE or free == TRUE.
}
\item{fixedParams}{
a (non-empty) numeric vector of fixed parameter values.
}
}
\details{
Extracts the quantile and parameter estimates.
}
\value{
Returns a list with: samps = the quantiles extracted at the locations specified in quantiles and params = the parameter values of the fitted model.
}
\references{
FIXME - references
}
\examples{
#not run, this function is used internally
}
|
/man/getQuantilesParams.Rd
|
no_license
|
scarpino/binequality
|
R
| false
| false
| 1,163
|
rd
|
\name{getQuantilesParams}
\alias{getQuantilesParams}
\title{
A function to extract the quantiles and parameters
}
\description{
This extracts the quantiles and parameters.
}
\usage{
getQuantilesParams(fit.i, qFunc = qLOGNO, quantiles = seq(0.006,
0.996, length.out = 1000), linksq = c(identity, exp, NULL,
NULL), freeParams, fixedParams)
}
\arguments{
\item{fit.i}{
a (non-empty) object of class gamlss, which is the fitted distribution.
}
\item{qFunc}{
a (non-empty) quantile generating function from gamlss.
}
\item{quantiles}{
an optional numeric vector of the desired quantiles.
}
\item{linksq}{
a (non-empty) vector is link functions.
}
\item{freeParams}{
a (non-empty) logical vector inidicating whether parameters are fixed == FALSE or free == TRUE.
}
\item{fixedParams}{
a (non-empty) numeric vector of fixed parameter values.
}
}
\details{
Extracts the quantile and parameter estimates.
}
\value{
Returns a list with: samps = the quantiles extracted at the locations specified in quantiles and params = the parameter values of the fitted model.
}
\references{
FIXME - references
}
\examples{
#not run, this function is used internally
}
|
library(magclass)
### Name: mcalc
### Title: mcalc
### Aliases: mcalc mcalc<-
### ** Examples
data(population_magpie)
population_magpie
mcalc(population_magpie,X12 ~ A2*B1,append=TRUE)
population_magpie
mcalc(population_magpie,`Nearly B1` ~ 0.5*A2 + 99.5*B1)
|
/data/genthat_extracted_code/magclass/examples/mcalc.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 275
|
r
|
library(magclass)
### Name: mcalc
### Title: mcalc
### Aliases: mcalc mcalc<-
### ** Examples
data(population_magpie)
population_magpie
mcalc(population_magpie,X12 ~ A2*B1,append=TRUE)
population_magpie
mcalc(population_magpie,`Nearly B1` ~ 0.5*A2 + 99.5*B1)
|
\name{tracks}
\alias{tracks}
\alias{alignPlots}
\alias{align.plots}
\alias{Tracks-class}
\alias{xlim}
\alias{xlim,Tracks-method}
\alias{xlim,numeric-method}
\alias{xlim,IRanges-method}
\alias{xlim,GRanges-method}
\alias{xlim<-}
\alias{xlim<-,Tracks,numeric-method}
\alias{xlim<-,Tracks,IRanges-method}
\alias{xlim<-,Tracks,GRanges-method}
\alias{summary}
\alias{summary,Tracks-method}
\alias{show}
\alias{show,Tracks-method}
\alias{print}
\alias{print,Tracks-method}
\alias{Arith}
\alias{Arith,Tracks,ANY-method}
\alias{reset}
\alias{reset,Tracks-method}
\alias{backup}
\alias{backup,Tracks-method}
\alias{}
\title{Tracks for genomic graphics}
\usage{
tracks(..., heights, xlim, xlab = NULL, main = NULL,
title = NULL, theme = NULL,
track.plot.color = NULL,
track.bg.color = NULL,
main.height = unit(2, "lines"),
scale.height = unit(2, "lines"),
xlab.height = unit(2, "lines"),
padding = unit(-1, "lines"),
label.bg.color = "white",
label.bg.fill = "gray80",
label.text.color = "black",
label.text.cex = 1,
label.width = unit(2.5, "lines"))
}
\arguments{
\item{...}{plots of class ggplot, generated from ggplot2 or ggbio.}
\item{heights}{numeric vector of the same length of passed graphic
object to indicate the ratio of each track.}
\item{xlim}{limits on x. could be \code{\linkS4class{IRanges}}, \code{\linkS4class{GRanges}}, \code{numeric} value}
\item{xlab}{label for x axis.}
\item{main}{title for the tracks.}
\item{title}{title for the tracks, alias like main.}
\item{theme}{theme object used for building tracks, this will
set to default, which could be reseted later. }
\item{track.plot.color}{
Vector of characters of length 1 or the same length of passed plots,
background color for each track, default is white.
}
\item{track.bg.color}{
background color for the whole tracks.
}
\item{main.height}{
unit. Height to control the title track height.
}
\item{scale.height}{
unit. Height to control the scale track height.
}
\item{xlab.height}{
unit. Height to control the xlab track height.
}
\item{padding}{
single numeric value or unit, if numeric value, the unit would be
"lines" by default.
}
\item{label.bg.color}{
track labeling background rectangle border color.
}
\item{label.bg.fill}{
track labeling background fill color.
}
\item{label.text.color}{
track labeling text color.
}
\item{label.text.cex}{
track labeling text size.
}
\item{label.width}{
track labeling size.
}
}
\section{Track class}{
constructor \code{tracks} will return a Tracks object, which has
following slots.
\describe{
\item{\code{grobs}}{
a \code{ggplotGrobList} object contains a list of \code{ggplot}
object, which is our passed graphics.
}
\item{\code{backup}}{
a backup of all the slots for holding the original tracks, so users
could edit it and reset it back at any time later, and \code{backup}
method will reset the backupped copy.
}
\item{\code{ylim}}{
y limits for each plot.
}
\item{\code{labeled}}{
vector of logical value indicates whether a track is labeled or not,
for \code{labeled} attributes please check utilities section.
}
\item{\code{mutable}}{
vector of logical value indicates whether a track is mutable for
theme editing or not, for \code{mutable} attributes please check utilities section.
}
\item{\code{hasAxis}}{
vector of logical value indicates whether a track has axis or not,
for \code{hasAxis} attributes please check utilities section.
}
\item{\code{heights, xlim, xlab, main, title, theme, fixed, track.plot.color,
track.bg.color, main.height, scale.height, xlab.height, padding,
label.bg.color, label.bg.fill, label.text.color, label.text.cex, label.width}}{
those slots are described in arguments section for constructor.
}
}
}
\section{Utilities}{
Please check examples for usage.
\describe{
\item{\code{summary(object)}}{
summary information about tracks object.
}
\item{\code{fixed(x)}, \code{fixed(x) <- value}}{ \code{x} is the
ggplot object, this controls if a track has a fixed x scale or not, if the \code{fixed}
attributes is \code{TRUE}, then when you pass this plot to a
tracks, this plot won't be re-aligned with other tracks and will
keep the original x-axis, this allow you to pass some plot like
ideogram. \code{fixed} function will return a logical value
}
\item{\code{labeled(x), labeled(x) <- value}}{\code{x} is the
ggplot object, if you pass named graphics into \code{tracks}, it will create the
labels on the left for you. Several ways supported to name it. You can
pass a list of graphics with names. Or you can use
\code{tracks('name1' = p1, 'name 2' = p2, ...)} with quotes for
complicated words or simply \code{tracks(part1 = p1, part = p2, ...)}.
}
\item{\code{mutable(x), mutable(x) <- value}}{
\code{x} is the ggplot object, this controls whether a plot in the
tracks mutable to theme changing or not, when you use \code{+}
method for Tracks object, add-on edit will only be applied to the
the mutable plots.
}
\item{\code{bgColor(x), bgColor(x) <- value}}{
\code{x} is the ggplot object, this change the background color
for single plot shown in the tracks.
}
\item{\code{xlim(x), xlim(x) <- value}}{
when \code{x} is the numeric value, it calls
ggplot2::coord_cartesian(xlim = ...) method, we doesn't use
ggplot2::xlim() for the reason it will cut data outside the range,
and we believe the best behavior would be zoom-in/out like
most browser. when \code{x} is \code{\linkS4class{IRanges}},
\code{\linkS4class{GRanges}}, it get the range and passed to
ggplot2::coord_cartesian function.
when \code{x} is Tracks object, \code{xlim(x)} will
return x limits for that tracks. \code{xlim(x) <- value}
replace method only works for Tracks object. \code{value} could be
\code{numeric}, \code{\linkS4class{IRanges}},
\code{\linkS4class{GRanges}} object. This will change the x
limits associated with tracks.
\code{+ xlim(obj)}:\code{obj} is the numeric range, or \code{\linkS4class{IRanges}},
\code{\linkS4class{GRanges}} object.
\code{+ coord_cartesian()}:
please read manual in ggplot2, this controls both xlim an
ylim, only accept numerical range.
}
\item{\code{+}}{
The most nice features about \code{\linkS4class{Tracks}} object is
the one inherited from ggplot2's components additive features,
with \code{+} method you can use any \code{theme} object and
utilities in ggplot2 package, to add them on a
\code{\linkS4class{Tracks}} object, for example, if x is our
\code{\linkS4class{Tracks}} object, \code{x + theme} would apply
theme to any plots in the tracks except those are immutable.
}
}
}
\section{Backup and reset}{
\describe{
\item{reset(obj)}{
\code{obj} is the Tracks object, this reset the tracks back to
original or backuped version.
}
\item{backup(obj)}{
\code{obj} is the Tracks object, this clear previous backup and
use current setting for a new backup.
}
}
}
\description{
\code{tracks} is a conventient constructor for bindind graphics as trakcs. You dont' have
to worry about adjusting different graphics, \code{tracks} did that for you. It's NOT
just limited to bind genomic tracks, you can use this function to bind any
tracks with the same defination of x axis, for example, sets of time series
plots you made.
Tracks view is most common way to viewing genome features and annotation data
and widely used by most genome browsers. Our assumption is that, most graphics
you made with ggbio or by yourself using ggplot2, are almost always sitting
on the genomic coordinates or the same x axis. And to compare annotation
information along with genome features, we need to align those plots on exactly
the same x axis in order to form your hypothesis. This function leaves you the
flexibility to construct each tracks separately with worrying your alignments
later.
}
\details{
\code{tracks} did following modification for passed plots.
\itemize{
\item{}{
remove x-axis, ticks, xlab and tile for each track and add scales
at bottom. We suppose a new xlab and title would be provided by the
\code{tracks} function for the whole tracks, but we still keep
individual's y axis.
}
\item{}{
align x-scale limits to make sure every plots sitting on exactly
the same x scale.
}
\item{}{
squeezing plots together to some extent.
}
\item{
labeling tracks if names are provided, please check utilities section
about \code{labeled} method.
}
\item{
return a track object. This would allow many features introduced
in this manual.
}
}
}
\seealso{\code{\link{align.plots}}}
\value{
A \code{Tracks} object.
}
\examples{
## make a simulated time series data set
df1 <- data.frame(time = 1:100, score = sin((1:100)/20)*10)
p1 <- qplot(data = df1, x = time, y = score, geom = "line")
df2 <- data.frame(time = 30:120, score = sin((30:120)/20)*10, value = rnorm(120-30 + 1))
p2 <- ggplot(data = df2, aes(x = time, y = score)) +
geom_line() + geom_point(size = 4, aes(color = value))
## check p2
p1
## check p2
p2
## binding
tracks(p1, p2)
## or
tks <- tracks(p1, p2)
tks
## combine
c(tks, tks)
tks + tks
cbind(tks, tks)
rbind(tks, tks) ## different wth c()!
library(grid)
x <- ggbio:::get_gtable(tks)
grid.draw(cbind(x, x))
## labeling: default labeling a named graphic
## simply pass a name with it
tracks(time1 = p1, time2 = p2)
## or pass a named list with it
lst <- list(time1 = p1, time2 = p2)
tracks(lst)
## more complicated case please use quotes
tracks(time1 = p1, "second time" = p2)
## set heights
tracks(time1 = p1, time2 = p2, heights = c(1, 3))
## if you want to disable label arbitrarily
## default label is always TRUE
labeled(p2)
labeled(p2) <- FALSE
## set labeled to FALSE, remove label even the plot has a name
tracks(time1 = p1, time2 = p2)
labeled(p2) <- TRUE
## fix a plot, not synchronize with other plots
p3 <- p1
## default is always FALSE
fixed(p3)
## set to TRUE
fixed(p3) <- TRUE
fixed(p3)
tracks(time1 = p1, time2 = p2, "time3(fixed)" = p3)
fixed(p3) <- FALSE
## otherwise you could run
%% tracks(time1 = p1, time2 = p2, "time3(fixed)" = p3, fixed = c(FALSE, FALSE, TRUE))
## control axis
hasAxis(p1)
hasAxis(p1) <- TRUE
# ready for weird looking
tracks(time1 = p1, time2 = p2)
# set it back
hasAxis(p1) <- FALSE
## mutable
mutable(p1)
tracks(time1 = p1, time2 = p2) + theme_bw()
mutable(p1) <- FALSE
# mutable for "+" method
tracks(time1 = p1, time2 = p2) + theme_bw()
mutable(p1) <- TRUE
## bgColor
bgColor(p1)
tracks(time1 = p1, time2 = p2)
bgColor(p1) <- "brown"
# mutable for "+" method
tracks(time1 = p1, time2 = p2)
# set it back
bgColor(p1) <- "white"
## apply a theme to each track
tks <- tracks(time1 = p1, time2 = p2) + theme_bw()
tks
reset(tks)
## store it with tracks
tks <- tracks(time1 = p1, time2 = p2, theme = theme_bw())
tks
tks <- tks + theme_gray()
tks
## reset will be introduced later
reset(tks)
## apply a pre-defiend theme for tracks!
tracks(time1 = p1, time2 = p2) + theme_tracks_sunset()
tracks(p1, p2) + theme_tracks_sunset()
## change limits
tracks(time1 = p1, time2 = p2) + xlim(c(1, 40))
tracks(time1 = p1, time2 = p2) + xlim(1, 40)
tracks(time1 = p1, time2 = p2) + coord_cartesian(xlim = c(1, 40))
# change y
tracks(time1 = p1, time2 = p2) + xlim(1, 40) + ylim(0, 10)
library(GenomicRanges)
gr <- GRanges("chr", IRanges(1, 40))
# GRanges
tracks(time1 = p1, time2 = p2) + xlim(gr)
# IRanges
tracks(time1 = p1, time2 = p2) + xlim(ranges(gr))
tks <- tracks(time1 = p1, time2 = p2)
xlim(tks)
xlim(tks) <- c(1, 35)
xlim(tks) <- gr
xlim(tks) <- ranges(gr)
## xlab, title
tracks(time1 = p1, time2 = p2, xlab = "time")
tracks(time1 = p1, time2 = p2, main = "title")
tracks(time1 = p1, time2 = p2, title = "title")
tracks(time1 = p1, time2 = p2, xlab = "time", title = "title") + theme_tracks_sunset()
## backup and restore
tks <- tracks(time1 = p1, time2 = p2)
tks
tks <- tks + xlim(1, 40)
tks
reset(tks)
tks <- tks + xlim(1, 40)
tks
tks <- backup(tks)
tks <- tks + theme_bw()
tks
reset(tks)
## padding(need to be fixed for more delicate control)
tracks(time1 = p1, time2 = p2, padding = 2)
## track color
tracks(time1 = p1, time2 = p2, track.bg.color = "yellow")
tracks(time1 = p1, time2 = p2, track.plot.color = c("yellow", "brown"))
}
\author{Tengfei Yin}
|
/man/tracks.Rd
|
no_license
|
dronga/ggbio
|
R
| false
| false
| 12,903
|
rd
|
\name{tracks}
\alias{tracks}
\alias{alignPlots}
\alias{align.plots}
\alias{Tracks-class}
\alias{xlim}
\alias{xlim,Tracks-method}
\alias{xlim,numeric-method}
\alias{xlim,IRanges-method}
\alias{xlim,GRanges-method}
\alias{xlim<-}
\alias{xlim<-,Tracks,numeric-method}
\alias{xlim<-,Tracks,IRanges-method}
\alias{xlim<-,Tracks,GRanges-method}
\alias{summary}
\alias{summary,Tracks-method}
\alias{show}
\alias{show,Tracks-method}
\alias{print}
\alias{print,Tracks-method}
\alias{Arith}
\alias{Arith,Tracks,ANY-method}
\alias{reset}
\alias{reset,Tracks-method}
\alias{backup}
\alias{backup,Tracks-method}
\alias{}
\title{Tracks for genomic graphics}
\usage{
tracks(..., heights, xlim, xlab = NULL, main = NULL,
title = NULL, theme = NULL,
track.plot.color = NULL,
track.bg.color = NULL,
main.height = unit(2, "lines"),
scale.height = unit(2, "lines"),
xlab.height = unit(2, "lines"),
padding = unit(-1, "lines"),
label.bg.color = "white",
label.bg.fill = "gray80",
label.text.color = "black",
label.text.cex = 1,
label.width = unit(2.5, "lines"))
}
\arguments{
\item{...}{plots of class ggplot, generated from ggplot2 or ggbio.}
\item{heights}{numeric vector of the same length of passed graphic
object to indicate the ratio of each track.}
\item{xlim}{limits on x. could be \code{\linkS4class{IRanges}}, \code{\linkS4class{GRanges}}, \code{numeric} value}
\item{xlab}{label for x axis.}
\item{main}{title for the tracks.}
\item{title}{title for the tracks, alias like main.}
\item{theme}{theme object used for building tracks, this will
set to default, which could be reseted later. }
\item{track.plot.color}{
Vector of characters of length 1 or the same length of passed plots,
background color for each track, default is white.
}
\item{track.bg.color}{
background color for the whole tracks.
}
\item{main.height}{
unit. Height to control the title track height.
}
\item{scale.height}{
unit. Height to control the scale track height.
}
\item{xlab.height}{
unit. Height to control the xlab track height.
}
\item{padding}{
single numeric value or unit, if numeric value, the unit would be
"lines" by default.
}
\item{label.bg.color}{
track labeling background rectangle border color.
}
\item{label.bg.fill}{
track labeling background fill color.
}
\item{label.text.color}{
track labeling text color.
}
\item{label.text.cex}{
track labeling text size.
}
\item{label.width}{
track labeling size.
}
}
\section{Track class}{
constructor \code{tracks} will return a Tracks object, which has
following slots.
\describe{
\item{\code{grobs}}{
a \code{ggplotGrobList} object contains a list of \code{ggplot}
object, which is our passed graphics.
}
\item{\code{backup}}{
a backup of all the slots for holding the original tracks, so users
could edit it and reset it back at any time later, and \code{backup}
method will reset the backupped copy.
}
\item{\code{ylim}}{
y limits for each plot.
}
\item{\code{labeled}}{
vector of logical value indicates whether a track is labeled or not,
for \code{labeled} attributes please check utilities section.
}
\item{\code{mutable}}{
vector of logical value indicates whether a track is mutable for
theme editing or not, for \code{mutable} attributes please check utilities section.
}
\item{\code{hasAxis}}{
vector of logical value indicates whether a track has axis or not,
for \code{hasAxis} attributes please check utilities section.
}
\item{\code{heights, xlim, xlab, main, title, theme, fixed, track.plot.color,
track.bg.color, main.height, scale.height, xlab.height, padding,
label.bg.color, label.bg.fill, label.text.color, label.text.cex, label.width}}{
those slots are described in arguments section for constructor.
}
}
}
\section{Utilities}{
Please check examples for usage.
\describe{
\item{\code{summary(object)}}{
summary information about tracks object.
}
\item{\code{fixed(x)}, \code{fixed(x) <- value}}{ \code{x} is the
ggplot object, this controls if a track has a fixed x scale or not, if the \code{fixed}
attributes is \code{TRUE}, then when you pass this plot to a
tracks, this plot won't be re-aligned with other tracks and will
keep the original x-axis, this allow you to pass some plot like
ideogram. \code{fixed} function will return a logical value
}
\item{\code{labeled(x), labeled(x) <- value}}{\code{x} is the
ggplot object, if you pass named graphics into \code{tracks}, it will create the
labels on the left for you. Several ways supported to name it. You can
pass a list of graphics with names. Or you can use
\code{tracks('name1' = p1, 'name 2' = p2, ...)} with quotes for
complicated words or simply \code{tracks(part1 = p1, part = p2, ...)}.
}
\item{\code{mutable(x), mutable(x) <- value}}{
\code{x} is the ggplot object, this controls whether a plot in the
tracks mutable to theme changing or not, when you use \code{+}
method for Tracks object, add-on edit will only be applied to the
the mutable plots.
}
\item{\code{bgColor(x), bgColor(x) <- value}}{
\code{x} is the ggplot object, this change the background color
for single plot shown in the tracks.
}
\item{\code{xlim(x), xlim(x) <- value}}{
when \code{x} is the numeric value, it calls
ggplot2::coord_cartesian(xlim = ...) method, we doesn't use
ggplot2::xlim() for the reason it will cut data outside the range,
and we believe the best behavior would be zoom-in/out like
most browser. when \code{x} is \code{\linkS4class{IRanges}},
\code{\linkS4class{GRanges}}, it get the range and passed to
ggplot2::coord_cartesian function.
when \code{x} is Tracks object, \code{xlim(x)} will
return x limits for that tracks. \code{xlim(x) <- value}
replace method only works for Tracks object. \code{value} could be
\code{numeric}, \code{\linkS4class{IRanges}},
\code{\linkS4class{GRanges}} object. This will change the x
limits associated with tracks.
\code{+ xlim(obj)}:\code{obj} is the numeric range, or \code{\linkS4class{IRanges}},
\code{\linkS4class{GRanges}} object.
\code{+ coord_cartesian()}:
please read manual in ggplot2, this controls both xlim an
ylim, only accept numerical range.
}
\item{\code{+}}{
The most nice features about \code{\linkS4class{Tracks}} object is
the one inherited from ggplot2's components additive features,
with \code{+} method you can use any \code{theme} object and
utilities in ggplot2 package, to add them on a
\code{\linkS4class{Tracks}} object, for example, if x is our
\code{\linkS4class{Tracks}} object, \code{x + theme} would apply
theme to any plots in the tracks except those are immutable.
}
}
}
\section{Backup and reset}{
\describe{
\item{reset(obj)}{
\code{obj} is the Tracks object, this reset the tracks back to
original or backuped version.
}
\item{backup(obj)}{
\code{obj} is the Tracks object, this clear previous backup and
use current setting for a new backup.
}
}
}
\description{
\code{tracks} is a conventient constructor for bindind graphics as trakcs. You dont' have
to worry about adjusting different graphics, \code{tracks} did that for you. It's NOT
just limited to bind genomic tracks, you can use this function to bind any
tracks with the same defination of x axis, for example, sets of time series
plots you made.
Tracks view is most common way to viewing genome features and annotation data
and widely used by most genome browsers. Our assumption is that, most graphics
you made with ggbio or by yourself using ggplot2, are almost always sitting
on the genomic coordinates or the same x axis. And to compare annotation
information along with genome features, we need to align those plots on exactly
the same x axis in order to form your hypothesis. This function leaves you the
flexibility to construct each tracks separately with worrying your alignments
later.
}
\details{
\code{tracks} did following modification for passed plots.
\itemize{
\item{}{
remove x-axis, ticks, xlab and tile for each track and add scales
at bottom. We suppose a new xlab and title would be provided by the
\code{tracks} function for the whole tracks, but we still keep
individual's y axis.
}
\item{}{
align x-scale limits to make sure every plots sitting on exactly
the same x scale.
}
\item{}{
squeezing plots together to some extent.
}
\item{
labeling tracks if names are provided, please check utilities section
about \code{labeled} method.
}
\item{
return a track object. This would allow many features introduced
in this manual.
}
}
}
\seealso{\code{\link{align.plots}}}
\value{
A \code{Tracks} object.
}
\examples{
## make a simulated time series data set
df1 <- data.frame(time = 1:100, score = sin((1:100)/20)*10)
p1 <- qplot(data = df1, x = time, y = score, geom = "line")
df2 <- data.frame(time = 30:120, score = sin((30:120)/20)*10, value = rnorm(120-30 + 1))
p2 <- ggplot(data = df2, aes(x = time, y = score)) +
geom_line() + geom_point(size = 4, aes(color = value))
## check p2
p1
## check p2
p2
## binding
tracks(p1, p2)
## or
tks <- tracks(p1, p2)
tks
## combine
c(tks, tks)
tks + tks
cbind(tks, tks)
rbind(tks, tks) ## different wth c()!
library(grid)
x <- ggbio:::get_gtable(tks)
grid.draw(cbind(x, x))
## labeling: default labeling a named graphic
## simply pass a name with it
tracks(time1 = p1, time2 = p2)
## or pass a named list with it
lst <- list(time1 = p1, time2 = p2)
tracks(lst)
## more complicated case please use quotes
tracks(time1 = p1, "second time" = p2)
## set heights
tracks(time1 = p1, time2 = p2, heights = c(1, 3))
## if you want to disable label arbitrarily
## default label is always TRUE
labeled(p2)
labeled(p2) <- FALSE
## set labeled to FALSE, remove label even the plot has a name
tracks(time1 = p1, time2 = p2)
labeled(p2) <- TRUE
## fix a plot, not synchronize with other plots
p3 <- p1
## default is always FALSE
fixed(p3)
## set to TRUE
fixed(p3) <- TRUE
fixed(p3)
tracks(time1 = p1, time2 = p2, "time3(fixed)" = p3)
fixed(p3) <- FALSE
## otherwise you could run
%% tracks(time1 = p1, time2 = p2, "time3(fixed)" = p3, fixed = c(FALSE, FALSE, TRUE))
## control axis
hasAxis(p1)
hasAxis(p1) <- TRUE
# ready for weird looking
tracks(time1 = p1, time2 = p2)
# set it back
hasAxis(p1) <- FALSE
## mutable
mutable(p1)
tracks(time1 = p1, time2 = p2) + theme_bw()
mutable(p1) <- FALSE
# mutable for "+" method
tracks(time1 = p1, time2 = p2) + theme_bw()
mutable(p1) <- TRUE
## bgColor
bgColor(p1)
tracks(time1 = p1, time2 = p2)
bgColor(p1) <- "brown"
# mutable for "+" method
tracks(time1 = p1, time2 = p2)
# set it back
bgColor(p1) <- "white"
## apply a theme to each track
tks <- tracks(time1 = p1, time2 = p2) + theme_bw()
tks
reset(tks)
## store it with tracks
tks <- tracks(time1 = p1, time2 = p2, theme = theme_bw())
tks
tks <- tks + theme_gray()
tks
## reset will be introduced later
reset(tks)
## apply a pre-defiend theme for tracks!
tracks(time1 = p1, time2 = p2) + theme_tracks_sunset()
tracks(p1, p2) + theme_tracks_sunset()
## change limits
tracks(time1 = p1, time2 = p2) + xlim(c(1, 40))
tracks(time1 = p1, time2 = p2) + xlim(1, 40)
tracks(time1 = p1, time2 = p2) + coord_cartesian(xlim = c(1, 40))
# change y
tracks(time1 = p1, time2 = p2) + xlim(1, 40) + ylim(0, 10)
library(GenomicRanges)
gr <- GRanges("chr", IRanges(1, 40))
# GRanges
tracks(time1 = p1, time2 = p2) + xlim(gr)
# IRanges
tracks(time1 = p1, time2 = p2) + xlim(ranges(gr))
tks <- tracks(time1 = p1, time2 = p2)
xlim(tks)
xlim(tks) <- c(1, 35)
xlim(tks) <- gr
xlim(tks) <- ranges(gr)
## xlab, title
tracks(time1 = p1, time2 = p2, xlab = "time")
tracks(time1 = p1, time2 = p2, main = "title")
tracks(time1 = p1, time2 = p2, title = "title")
tracks(time1 = p1, time2 = p2, xlab = "time", title = "title") + theme_tracks_sunset()
## backup and restore
tks <- tracks(time1 = p1, time2 = p2)
tks
tks <- tks + xlim(1, 40)
tks
reset(tks)
tks <- tks + xlim(1, 40)
tks
tks <- backup(tks)
tks <- tks + theme_bw()
tks
reset(tks)
## padding(need to be fixed for more delicate control)
tracks(time1 = p1, time2 = p2, padding = 2)
## track color
tracks(time1 = p1, time2 = p2, track.bg.color = "yellow")
tracks(time1 = p1, time2 = p2, track.plot.color = c("yellow", "brown"))
}
\author{Tengfei Yin}
|
library('shiny')
library("shinythemes")
# To load some ui elements
source("firearms.R")
ui <- fluidPage(theme = shinytheme("flatly"),
navbarPage("United States Crime Analysis",
fluid = TRUE,
#############
#### Home ###
#############
tabPanel("Home",
fluidRow(
column(8, includeMarkdown("home.md")),
column(4, includeMarkdown("logo.md"))
)
),
##############
### Part 1 ###
##############
# Create tab for population size
tabPanel("Population Size",
h2("Crimes By Population Size"),
p(tags$strong("Introduction:"),
"This section analyzes the effect of a city's
population size on the frequency of crime and whether
or not people should be concerned about crimes
relative to other population sizes. This section
displays the rate of crime per 100,000 people or
the number of crimes that have occured based on a
given year, chosen metric, and specific crime. In
particular, this section answers the question of what
people should be most concerned about in regards
to how population size is or isn't related to crime
prevalence."),
# Organize by sidebars
sidebarLayout(
sidebarPanel(
# User inputs
sliderInput("year.choice", "Select a Year",
min = 2005, max = 2016, step = 1,
value = 2005,
sep = ""),
selectInput("metric.choice", "Select a Metric",
choices = c("Number of Crimes",
"Rate of Crimes Per
100,000"),
selected = "Number of Crimes"),
selectInput("crime.choice", "Select a Crime",
choices = c("Violent crime",
"Murder and nonnegligent manslaughter",
"Robbery",
"Aggravated assault",
"Property crime",
"Burglary",
"Larceny theft",
"Motor vehicle theft",
"Arson"),
selected = "Violent")
),
# Contains plot output with click interactions
mainPanel(
plotOutput('population.plot',
click = "plot_click"),
verbatimTextOutput("x_value"),
p("Based on the ",
textOutput("metric.choice", inline = TRUE),
" of ",
textOutput("pop.crime.choice", inline = TRUE),
" in ", textOutput("population.year",
inline = TRUE),
" the value of concern is ",
textOutput("sig.value", inline = TRUE),
". This crime value comes from cities that
have a population size of ",
textOutput("pop.size", inline = TRUE),
". When the chosen metric is rate per 100,000
people, the relationship between population
size and crime rate for most crimes is not entirely
linear. This shows that population size does not
necessarily correlate with increased crime rate,
and people should be more concerned with cities
that have a population of less than 1,000,000.
A factor that could explain this result is
a lack of strong police force. Without one, cities
outside major metropolitan statistical areas
are neglected and allow crime to flourish to a
greater extent. When the chosen metric is number of
crimes, the relationship between population size
and the number of crimes is only more strongly
pronounced in violent crime. Non-violent crime
is much more prevalent in smaller cities and
indicates that a lack of a strong police force
or socioeconomic factors are at play. In general,
a chosen year did not hold very much weight in
changing any relationship between population size
and the metric.")
)
)
),
##############
### Part 2 ###
##############
# Create tab for Firearms and Murder in the US
tabPanel("Firearms and Murder",
h2("Firearm and Murders By State"),
p(tags$strong("Introduction:"),"A question many
Americans are think about due to recent events is
whether or not our gun laws need to be more strict.
Would this prevent mass shootings, murders, and
overall violence? The data below compares the total
amount of murders in a state, and whether the murder
was committed with a firearm or not."),
sidebarLayout(
sidebarPanel(
# Create drop down menu for choosing a states
selectInput("states", label = "Choose a State:",
choices = join.final$State,
multiple = FALSE,
selected = "Washington")
),
mainPanel(
plotOutput("plot"), width = 8,
br(), # Insert break
tags$i("Alabama had no data reported for 2011. Small
amounts of reported data for years 2012-2016."),
br(),
p("In the plots above it shows how many murders there
were in a given year in a state. Some states, like
Alabama had issues with reporting murders and for
this reason some of the data may be skewed."),
br(), # Insert break
p("Some factors that could skew the data include:"),
tags$ol(
tags$li ("Population Size"),
tags$li ("Gun Laws within the State"),
tags$li ("Citizens that exercise their right to
bear arms"),
tags$li ("Crime within the States")
),
br(), # Insert break
textOutput("analysis"),
br() # Insert break
)
)
),
##############
### Part 3 ###
##############
# Organizes the tab for county crimes by state
tabPanel("County Crimes By State",
h2("County Crimes By State"),
# Introduces the tab section
p(strong("Introduction: "), "This section compares the
report counts for a particular crime type between
counties within a chosen state, during a certain
year. It also encourages the understanding on
what counties may need further crime prevention
and intervention, how crimes have evolved in counties
over the years, and insight on actions regarding
personal safety.", align = "left"),
# Bulleted list of tips for interacting with map
tags$ul(
tags$li("The ", em("darker"), " in color a county is,
the more crimes have been reported there."),
tags$li("Data was unable to be provided for counties
that are", em("grey.")),
tags$li(em("Hover"), " over a county to discover below
its ", strong("county name"), " and exact ",
strong("report count"),
" for the chosen crime.")
),
# Organizes two-sided layout
sidebarLayout(
# Organizes side panel
sidebarPanel(
# Allows user to slide and choose a year between
# 2005 and 2016
sliderInput('slide.year',
label = "Choose a year:", min = 2005,
max = 2016, value = 2010, sep = ""),
# Allows user to select a state using drop
# down menu
selectInput('select.state',
label = "Select a state:",
choices = c("Alabama", "Arizona",
"Arkansas", "California",
"Colorado", "Delaware",
"Florida", "Georgia",
"Idaho", "Illinois",
"Indiana", "Iowa", "Kansas",
"Kentucky", "Louisiana",
"Maine", "Maryland",
"Michigan", "Minnesota",
"Mississippi", "Missouri",
"Montana", "Nebraska",
"Nevada", "New Hampshire",
"New Jersey", "New Mexico",
"New York", "North Carolina",
"North Dakota", "Ohio",
"Oklahoma", "Oregon",
"Pennsylvania",
"South Carolina",
"South Dakota", "Tennessee",
"Texas", "Utah", "Vermont",
"Virginia", "Washington",
"West Virginia", "Wisconsin",
"Wyoming"),
selected = "Washington"),
# Allows user to select a crime using drop down menu
selectInput('select.crime',
label = "Select a crime of interest:",
choices = c("Murder and nonnegligent
manslaughter" =
"Murder.and.Nonnegligent.Manslaughter",
"Forcible rape" =
"Forcible.Rape",
"Robbery" = "Robbery",
"Aggravated assault" =
"Aggravated.Assault",
"Burglary" = "Burglary",
"Larceny theft" =
"Larceny.Theft",
"Motor vehicle theft" =
"Motor.Vehicle.Theft",
"Arson" = "Arson"),
selected = "Burglary")
),
# Main map panel
mainPanel(
# Creates map title
h3(textOutput('map.title'), align = "center"),
# Creates the map itself
plotOutput('county.plot',
hover = "county.plot.hover"),
# Creates the text provided when hovering
verbatimTextOutput("county.plot.info"),
# Writes an analysis statement
textOutput('analysis.statement'),
br(),
# Writes a disclaimer message
em(p(strong("Disclaimer: "), "A significant amount
of states are missing for the 2006 data.
Additionally, the visualization above does not
account for population sizes which may slightly
alter the meaning of the crime report counts to
be more or less severe."))
)
)
),
##############
### Part 4 ###
##############
tabPanel("Hate Crimes By Category",
# Include a title
titlePanel("Hate Crime Statistics"),
# introduce the section
p(strong("Introduction: "), "This section compares the
number of incidents for hate crimes against
different populations from 2005 to 2016. The question we
hope to answer is how do the trends we see over time
correspond to real world events? These trends should give
individuals a better sense of the tensions that exist
toward different groups. They should aslo help law
enforcement initiate preventative programs to defuse
these tensions.",
align = "left"),
# Include a `sidebarLayout()
sidebarLayout(
sidebarPanel(
# A slectInput labeled "Category", with choices
# "Race", "Religion", "Sexual Orientaiton" and
# "Disability"
selectInput(inputId = "category",
label = "Category:",
choices = c("Race", "Religion",
"Sexual Orientation",
"Disability"),
selected = "Race")
),
# The layout's `mainPanel()` should have the following
# reactive outputs:
mainPanel(
# A line graph showing the trend of hate crime
# incidents
plotOutput('hatecrime'),
p(strong("Race: "), "The graph shows a steady trend
for most of the ethnic groups. However, there is
a dramatic drop for hate crimes against African
Americans starting from 2008, which is when president
Obama was elected, until 2016.",
align = "left"),
# Analysis paragraph for Religion graph
p(strong("Religion: "), "The graph shows that
anti-islamic incidents increased dramaticly after
2014, which is when the Islamic State seized large
swathes of territory in Syria and Iraq. On the other
hand, anti-Jewish incidents had dropped since 2008.",
align = "left"),
# Analysis paragraph for Sexual Orientation graph
p(strong("Sexual Orientation: "), "The graph shows
that the number of anti-homosexual incidents dropped
steadily since 2011 when New York became the largest
state to allow same-sex marriage.", align = "left"),
# Analysis paragraph for Disability graph
p(strong("Disability: "), "The graph shows
a huge drop for anti-mental disability incidents in
2010. The year in which The Equality Act was passed
to protect people with disabilities and prevent
disability discrimination. However, the number
bounced back up from 2010 to 2012.", align = "left"),
br(),
# Writes a disclaimer message
em(p(strong("Disclaimer: "), "The categories above
were provided by the FBI publications. It does not
include all the hate crimes against each group.
Additionally, the visualization above does not
account for the population size for each group
which may slightly alter the meaning of the crime
report."))
)
)
),
br(),
hr(),
p("INFO 201 | Winter 2018: Lea Quan, Tammy Ho, Ciarra Hart,
Daniel Lee", align = "center")
)
)
shinyUI(ui)
|
/ui.R
|
no_license
|
daniellee0/usa-crime-analysis
|
R
| false
| false
| 20,937
|
r
|
library('shiny')
library("shinythemes")
# To load some ui elements
source("firearms.R")
ui <- fluidPage(theme = shinytheme("flatly"),
navbarPage("United States Crime Analysis",
fluid = TRUE,
#############
#### Home ###
#############
tabPanel("Home",
fluidRow(
column(8, includeMarkdown("home.md")),
column(4, includeMarkdown("logo.md"))
)
),
##############
### Part 1 ###
##############
# Create tab for population size
tabPanel("Population Size",
h2("Crimes By Population Size"),
p(tags$strong("Introduction:"),
"This section analyzes the effect of a city's
population size on the frequency of crime and whether
or not people should be concerned about crimes
relative to other population sizes. This section
displays the rate of crime per 100,000 people or
the number of crimes that have occured based on a
given year, chosen metric, and specific crime. In
particular, this section answers the question of what
people should be most concerned about in regards
to how population size is or isn't related to crime
prevalence."),
# Organize by sidebars
sidebarLayout(
sidebarPanel(
# User inputs
sliderInput("year.choice", "Select a Year",
min = 2005, max = 2016, step = 1,
value = 2005,
sep = ""),
selectInput("metric.choice", "Select a Metric",
choices = c("Number of Crimes",
"Rate of Crimes Per
100,000"),
selected = "Number of Crimes"),
selectInput("crime.choice", "Select a Crime",
choices = c("Violent crime",
"Murder and nonnegligent manslaughter",
"Robbery",
"Aggravated assault",
"Property crime",
"Burglary",
"Larceny theft",
"Motor vehicle theft",
"Arson"),
selected = "Violent")
),
# Contains plot output with click interactions
mainPanel(
plotOutput('population.plot',
click = "plot_click"),
verbatimTextOutput("x_value"),
p("Based on the ",
textOutput("metric.choice", inline = TRUE),
" of ",
textOutput("pop.crime.choice", inline = TRUE),
" in ", textOutput("population.year",
inline = TRUE),
" the value of concern is ",
textOutput("sig.value", inline = TRUE),
". This crime value comes from cities that
have a population size of ",
textOutput("pop.size", inline = TRUE),
". When the chosen metric is rate per 100,000
people, the relationship between population
size and crime rate for most crimes is not entirely
linear. This shows that population size does not
necessarily correlate with increased crime rate,
and people should be more concerned with cities
that have a population of less than 1,000,000.
A factor that could explain this result is
a lack of strong police force. Without one, cities
outside major metropolitan statistical areas
are neglected and allow crime to flourish to a
greater extent. When the chosen metric is number of
crimes, the relationship between population size
and the number of crimes is only more strongly
pronounced in violent crime. Non-violent crime
is much more prevalent in smaller cities and
indicates that a lack of a strong police force
or socioeconomic factors are at play. In general,
a chosen year did not hold very much weight in
changing any relationship between population size
and the metric.")
)
)
),
##############
### Part 2 ###
##############
# Create tab for Firearms and Murder in the US
tabPanel("Firearms and Murder",
h2("Firearm and Murders By State"),
p(tags$strong("Introduction:"),"A question many
Americans are think about due to recent events is
whether or not our gun laws need to be more strict.
Would this prevent mass shootings, murders, and
overall violence? The data below compares the total
amount of murders in a state, and whether the murder
was committed with a firearm or not."),
sidebarLayout(
sidebarPanel(
# Create drop down menu for choosing a states
selectInput("states", label = "Choose a State:",
choices = join.final$State,
multiple = FALSE,
selected = "Washington")
),
mainPanel(
plotOutput("plot"), width = 8,
br(), # Insert break
tags$i("Alabama had no data reported for 2011. Small
amounts of reported data for years 2012-2016."),
br(),
p("In the plots above it shows how many murders there
were in a given year in a state. Some states, like
Alabama had issues with reporting murders and for
this reason some of the data may be skewed."),
br(), # Insert break
p("Some factors that could skew the data include:"),
tags$ol(
tags$li ("Population Size"),
tags$li ("Gun Laws within the State"),
tags$li ("Citizens that exercise their right to
bear arms"),
tags$li ("Crime within the States")
),
br(), # Insert break
textOutput("analysis"),
br() # Insert break
)
)
),
##############
### Part 3 ###
##############
# Organizes the tab for county crimes by state
tabPanel("County Crimes By State",
h2("County Crimes By State"),
# Introduces the tab section
p(strong("Introduction: "), "This section compares the
report counts for a particular crime type between
counties within a chosen state, during a certain
year. It also encourages the understanding on
what counties may need further crime prevention
and intervention, how crimes have evolved in counties
over the years, and insight on actions regarding
personal safety.", align = "left"),
# Bulleted list of tips for interacting with map
tags$ul(
tags$li("The ", em("darker"), " in color a county is,
the more crimes have been reported there."),
tags$li("Data was unable to be provided for counties
that are", em("grey.")),
tags$li(em("Hover"), " over a county to discover below
its ", strong("county name"), " and exact ",
strong("report count"),
" for the chosen crime.")
),
# Organizes two-sided layout
sidebarLayout(
# Organizes side panel
sidebarPanel(
# Allows user to slide and choose a year between
# 2005 and 2016
sliderInput('slide.year',
label = "Choose a year:", min = 2005,
max = 2016, value = 2010, sep = ""),
# Allows user to select a state using drop
# down menu
selectInput('select.state',
label = "Select a state:",
choices = c("Alabama", "Arizona",
"Arkansas", "California",
"Colorado", "Delaware",
"Florida", "Georgia",
"Idaho", "Illinois",
"Indiana", "Iowa", "Kansas",
"Kentucky", "Louisiana",
"Maine", "Maryland",
"Michigan", "Minnesota",
"Mississippi", "Missouri",
"Montana", "Nebraska",
"Nevada", "New Hampshire",
"New Jersey", "New Mexico",
"New York", "North Carolina",
"North Dakota", "Ohio",
"Oklahoma", "Oregon",
"Pennsylvania",
"South Carolina",
"South Dakota", "Tennessee",
"Texas", "Utah", "Vermont",
"Virginia", "Washington",
"West Virginia", "Wisconsin",
"Wyoming"),
selected = "Washington"),
# Allows user to select a crime using drop down menu
selectInput('select.crime',
label = "Select a crime of interest:",
choices = c("Murder and nonnegligent
manslaughter" =
"Murder.and.Nonnegligent.Manslaughter",
"Forcible rape" =
"Forcible.Rape",
"Robbery" = "Robbery",
"Aggravated assault" =
"Aggravated.Assault",
"Burglary" = "Burglary",
"Larceny theft" =
"Larceny.Theft",
"Motor vehicle theft" =
"Motor.Vehicle.Theft",
"Arson" = "Arson"),
selected = "Burglary")
),
# Main map panel
mainPanel(
# Creates map title
h3(textOutput('map.title'), align = "center"),
# Creates the map itself
plotOutput('county.plot',
hover = "county.plot.hover"),
# Creates the text provided when hovering
verbatimTextOutput("county.plot.info"),
# Writes an analysis statement
textOutput('analysis.statement'),
br(),
# Writes a disclaimer message
em(p(strong("Disclaimer: "), "A significant amount
of states are missing for the 2006 data.
Additionally, the visualization above does not
account for population sizes which may slightly
alter the meaning of the crime report counts to
be more or less severe."))
)
)
),
##############
### Part 4 ###
##############
tabPanel("Hate Crimes By Category",
# Include a title
titlePanel("Hate Crime Statistics"),
# introduce the section
p(strong("Introduction: "), "This section compares the
number of incidents for hate crimes against
different populations from 2005 to 2016. The question we
hope to answer is how do the trends we see over time
correspond to real world events? These trends should give
individuals a better sense of the tensions that exist
toward different groups. They should aslo help law
enforcement initiate preventative programs to defuse
these tensions.",
align = "left"),
# Include a `sidebarLayout()
sidebarLayout(
sidebarPanel(
# A slectInput labeled "Category", with choices
# "Race", "Religion", "Sexual Orientaiton" and
# "Disability"
selectInput(inputId = "category",
label = "Category:",
choices = c("Race", "Religion",
"Sexual Orientation",
"Disability"),
selected = "Race")
),
# The layout's `mainPanel()` should have the following
# reactive outputs:
mainPanel(
# A line graph showing the trend of hate crime
# incidents
plotOutput('hatecrime'),
p(strong("Race: "), "The graph shows a steady trend
for most of the ethnic groups. However, there is
a dramatic drop for hate crimes against African
Americans starting from 2008, which is when president
Obama was elected, until 2016.",
align = "left"),
# Analysis paragraph for Religion graph
p(strong("Religion: "), "The graph shows that
anti-islamic incidents increased dramaticly after
2014, which is when the Islamic State seized large
swathes of territory in Syria and Iraq. On the other
hand, anti-Jewish incidents had dropped since 2008.",
align = "left"),
# Analysis paragraph for Sexual Orientation graph
p(strong("Sexual Orientation: "), "The graph shows
that the number of anti-homosexual incidents dropped
steadily since 2011 when New York became the largest
state to allow same-sex marriage.", align = "left"),
# Analysis paragraph for Disability graph
p(strong("Disability: "), "The graph shows
a huge drop for anti-mental disability incidents in
2010. The year in which The Equality Act was passed
to protect people with disabilities and prevent
disability discrimination. However, the number
bounced back up from 2010 to 2012.", align = "left"),
br(),
# Writes a disclaimer message
em(p(strong("Disclaimer: "), "The categories above
were provided by the FBI publications. It does not
include all the hate crimes against each group.
Additionally, the visualization above does not
account for the population size for each group
which may slightly alter the meaning of the crime
report."))
)
)
),
br(),
hr(),
p("INFO 201 | Winter 2018: Lea Quan, Tammy Ho, Ciarra Hart,
Daniel Lee", align = "center")
)
)
shinyUI(ui)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{generate.results}
\alias{generate.results}
\title{Generates clustering results}
\usage{
generate.results(g_clusters, distance.matrix)
}
\arguments{
\item{g_clusters}{A object of class igraph containing all component connected (cc=1).}
\item{distance.matrix}{A numeric matrix or data.frame with equals names and numbers of rows and columns representing objects to group.}
}
\value{
A list with the elements
\item{cnumber}{A numeric value representing the number of clusters of the solution.}
\item{cluster}{A named vector of integers of size n with values in range \code{1:cnumber}, representing the cluster to which each object is assigned.}
\item{partition}{A partition matrix order by cluster where are shown the objects and the cluster where they are assigned.}
\item{csize}{A vector of size k with the cardinality of each cluster in the solution.}
\item{network}{An object of class "igraph" as a network representing the clustering solution.}
}
\description{
This function performs the union the all component connected (cc) yield in each recursion of the MST-kNN algorithm.
}
\keyword{internal}
|
/man/generate.results.Rd
|
no_license
|
cran/mstknnclust
|
R
| false
| true
| 1,192
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{generate.results}
\alias{generate.results}
\title{Generates clustering results}
\usage{
generate.results(g_clusters, distance.matrix)
}
\arguments{
\item{g_clusters}{A object of class igraph containing all component connected (cc=1).}
\item{distance.matrix}{A numeric matrix or data.frame with equals names and numbers of rows and columns representing objects to group.}
}
\value{
A list with the elements
\item{cnumber}{A numeric value representing the number of clusters of the solution.}
\item{cluster}{A named vector of integers of size n with values in range \code{1:cnumber}, representing the cluster to which each object is assigned.}
\item{partition}{A partition matrix order by cluster where are shown the objects and the cluster where they are assigned.}
\item{csize}{A vector of size k with the cardinality of each cluster in the solution.}
\item{network}{An object of class "igraph" as a network representing the clustering solution.}
}
\description{
This function performs the union the all component connected (cc) yield in each recursion of the MST-kNN algorithm.
}
\keyword{internal}
|
library(plotly)
setwd("/Users/Talal/Tesi/familiarity/R/AllCodeNGram")
Codedata <- read.csv("allCodeNgram.csv")
p <- plot_ly(codeData ,y= ~android, name = 'Android',type = 'box') %>%
add_trace(y = ~swing, name = 'Swing Code') %>%
add_trace(y = ~swift, name = 'Swift Code') %>%
add_trace(y = ~perl, name = 'Perl Code') %>%
add_trace(y = ~matlab, name = 'MatLAb Code') %>%
layout(title = 'All Code N-Gram')
p
chart_link = plotly_POST(p, filename="All-code-nGram")
|
/R/AllCodeNGram/allCodeNgram.R
|
no_license
|
talalelafchal/familiarity
|
R
| false
| false
| 473
|
r
|
library(plotly)
setwd("/Users/Talal/Tesi/familiarity/R/AllCodeNGram")
Codedata <- read.csv("allCodeNgram.csv")
p <- plot_ly(codeData ,y= ~android, name = 'Android',type = 'box') %>%
add_trace(y = ~swing, name = 'Swing Code') %>%
add_trace(y = ~swift, name = 'Swift Code') %>%
add_trace(y = ~perl, name = 'Perl Code') %>%
add_trace(y = ~matlab, name = 'MatLAb Code') %>%
layout(title = 'All Code N-Gram')
p
chart_link = plotly_POST(p, filename="All-code-nGram")
|
############
# A script to process Bills and Bills Details data for analysis
# Project: Texas Bipartisanship by Mark Clayton Hand
# Script Author: Igor Holas
# Date: 06-20-2017
###########
# use sponsors csv
# -- import repub value from people_small
# -- calculate mean repub score per bill_id
# use create bill_facts table
# -- bill id, signed, mean_repub
# libs
library(jsonlite)
library(plyr)
#set wd
setwd('~/tx_lege')
# merge sponsors and people_small
# sponsors.csv and action_dates.csv come from get_bills.R
# people_small.csv comes from get_people.R
sponsors <- read.csv("data/TX_85_sponsors.csv")
action_dates <- read.csv("data/TX_85_action_dates.csv")
people_small <- read.csv("data/TX_85_people_small.csv")
spons_repub <- merge(sponsors, people_small, by='leg_id')
bill_facts <- as.data.frame(as.list(aggregate(spons_repub[, c('repub')], list(spons_repub$bill_id), FUN=function(x) c(mn = mean(x), n = length(x) ) ) ) )
#cleaning up - setting the corret bill_id column name
names(bill_facts)[names(bill_facts)=="Group.1"] <- "bill_id"
names(bill_facts)[names(bill_facts)=="x.mn"] <- "pct_repub"
names(bill_facts)[names(bill_facts)=="x.n"] <- "num_sponsors"
#adding signed flag
bill_facts2 <- merge (bill_facts,action_dates, by='bill_id')
bill_facts2$signed <- ifelse(bill_facts2$signed !=0, 1, 0)
#export csv
write.csv(bill_facts2[c('bill_id','pct_repub','num_sponsors','signed')], file = "data/TX_85_bill_facts.csv")
|
/clean_bill_details.R
|
no_license
|
iholas/tx_lege
|
R
| false
| false
| 1,438
|
r
|
############
# A script to process Bills and Bills Details data for analysis
# Project: Texas Bipartisanship by Mark Clayton Hand
# Script Author: Igor Holas
# Date: 06-20-2017
###########
# use sponsors csv
# -- import repub value from people_small
# -- calculate mean repub score per bill_id
# use create bill_facts table
# -- bill id, signed, mean_repub
# libs
library(jsonlite)
library(plyr)
#set wd
setwd('~/tx_lege')
# merge sponsors and people_small
# sponsors.csv and action_dates.csv come from get_bills.R
# people_small.csv comes from get_people.R
sponsors <- read.csv("data/TX_85_sponsors.csv")
action_dates <- read.csv("data/TX_85_action_dates.csv")
people_small <- read.csv("data/TX_85_people_small.csv")
spons_repub <- merge(sponsors, people_small, by='leg_id')
bill_facts <- as.data.frame(as.list(aggregate(spons_repub[, c('repub')], list(spons_repub$bill_id), FUN=function(x) c(mn = mean(x), n = length(x) ) ) ) )
#cleaning up - setting the corret bill_id column name
names(bill_facts)[names(bill_facts)=="Group.1"] <- "bill_id"
names(bill_facts)[names(bill_facts)=="x.mn"] <- "pct_repub"
names(bill_facts)[names(bill_facts)=="x.n"] <- "num_sponsors"
#adding signed flag
bill_facts2 <- merge (bill_facts,action_dates, by='bill_id')
bill_facts2$signed <- ifelse(bill_facts2$signed !=0, 1, 0)
#export csv
write.csv(bill_facts2[c('bill_id','pct_repub','num_sponsors','signed')], file = "data/TX_85_bill_facts.csv")
|
context("showArticles")
test_that("showArticles", {
meta <- data.frame(id=as.character(1:3), date=as.Date(c("1960-01-01","1987-06-25","2014-08-06")), title=c("Title 1", "Title 2", "Title 3"), page=c(24,60,1), stringsAsFactors=FALSE)
text <- list("1"="Text 1", "2"="Text 2", "3"="Text 3")
corpus <- list(meta=meta, text=text)
M <- cbind(meta[,c(1,3,2)],text=unlist(text))
M <- data.frame(apply(M,2,as.character), stringsAsFactors = FALSE)
M2 <- M[c(1,3,2),]
M3 <- rbind(M[1,], c(NA,NA,NA,""), c(NA,NA,NA,""))
rownames(M) <- rownames(M2) <- rownames(M3)<- 1:3
m <- showArticles(corpus,id=as.character(1:3), file="test.csv")
expect_equal(M,m)
m <- showArticles(corpus,id=matrix(as.character(c(1:3,1,3,2,1,NA,NA)),3,3), file="test.csv")
expect_equal(list("1"=M, "2"=M2, "3"=M3),m)
})
|
/tests/testthat/test_showArticles.R
|
no_license
|
mm28ajos/tmT
|
R
| false
| false
| 830
|
r
|
context("showArticles")
test_that("showArticles", {
meta <- data.frame(id=as.character(1:3), date=as.Date(c("1960-01-01","1987-06-25","2014-08-06")), title=c("Title 1", "Title 2", "Title 3"), page=c(24,60,1), stringsAsFactors=FALSE)
text <- list("1"="Text 1", "2"="Text 2", "3"="Text 3")
corpus <- list(meta=meta, text=text)
M <- cbind(meta[,c(1,3,2)],text=unlist(text))
M <- data.frame(apply(M,2,as.character), stringsAsFactors = FALSE)
M2 <- M[c(1,3,2),]
M3 <- rbind(M[1,], c(NA,NA,NA,""), c(NA,NA,NA,""))
rownames(M) <- rownames(M2) <- rownames(M3)<- 1:3
m <- showArticles(corpus,id=as.character(1:3), file="test.csv")
expect_equal(M,m)
m <- showArticles(corpus,id=matrix(as.character(c(1:3,1,3,2,1,NA,NA)),3,3), file="test.csv")
expect_equal(list("1"=M, "2"=M2, "3"=M3),m)
})
|
#' Wiggle style
#'
#' 'Wiggle' is a theme style that adds an amount of cumulative uniform noise to
#' interpolated lines, making them wiggle a bit. The functions are used in the
#' following way: \describe{
#' \item{\code{wiggle()}}{is a function factory that produces a function that
#' is subsequently used in elements to make lines wiggle}
#' \item{\code{element_rect_wiggle()}, \code{element_line_wiggle}}{are
#' convenience wrappers around \code{element_*_seq()} that pass down the
#' function generated by \code{wiggle()}.}
#' \item{\code{wiggling_geoms()}}{is a convenience theme setter for the
#' \code{elementalist.geom_rect} and \code{elementalist.geom_line} elements.}
#' }
#'
#' @param amount A \code{numeric} of length 1 setting the amount of wiggling to
#' occur.
#' @param seed An \code{integer} to set the seed for reproducible wiggling.
#' @inheritParams element_line_seq
#' @inheritParams element_rect_seq
#' @param fill Fill colour.
#' @param ... Arguments passed to \code{element_*_seq()}.
#'
#' @details The amount of wiggle added to lines and rectangles is absolute. This
#' makes it easier to make more uniform wiggles, but causes relative distortion
#' when resizing the plot window or device.
#'
#' @return For \code{wiggle()}, a \code{function}.
#' @return For \code{element_rect_wiggle()}, an \code{element_rect_seq} type
#' list.
#' @return For \code{element_line_wiggle()}, an \code{element_line_seq} type
#' list.
#' @return For \code{wiggling_geoms}, a partial \code{theme} object.
#' @export
#' @family theme styles
#'
#' @examples
#' barplot <- ggplot(mpg, aes(class)) +
#' geom_bar_theme(aes(colour = class)) +
#' geom_line_theme(stat = "count", aes(group = -1))
#'
#' # Making geoms wiggle
#' barplot + wiggling_geoms()
#'
#' # Making only line geoms wiggle
#' barplot + theme(elementalist.geom_line = element_line_wiggle(10))
#'
#' # Making only rect geoms wiggle
#' barplot + theme(elementalist.geom_rect = element_rect_wiggle(5))
#'
#' # Let other theme elements wiggle
#' barplot + theme(
#' axis.line.x = element_line_wiggle(),
#' axis.line.y = element_line_wiggle(),
#' legend.background = element_rect_wiggle(colour = "grey20")
#' )
wiggle <- function(amount = 3, seed = NULL) {
seed <- force(seed)
amount <- amount / 2
function(x, y, colour, size = NULL, id, n) {
nn <- n * (length(x) - 1) + 1
id <- rep(id[1], nn)
if (!is.null(seed)) {
set.seed(seed)
on.exit(set.seed(NULL)) # Reset on exit
}
z <- cumsum(runif(nn, -amount, amount))
xy <- fit_along(unclass(x), unclass(y), z)
x <- seq_between(unclass(x), n)
y <- seq_between(unclass(y), n)
col <- c(col_interpol(colour, nn - 1), NA)
if (!is.null(size)) {
size <- c(rep_len(size, length(x) - 1), NA)
}
out <- list(
x = x,
y = y,
dx = unclass(xy$x) - x,
dy = unclass(xy$y) - y,
col = col,
lwd = size,
id = id
)
out[vapply(out, is.null, logical(1))] <- NULL
out
}
}
#' @rdname wiggle
#' @export
element_line_wiggle <- function(amount = 3, seed = NULL, ...) {
element_line_seq(fun = wiggle(amount, seed), ...)
}
#' @rdname wiggle
#' @export
element_rect_wiggle <- function(amount = 3, seed = NULL, ...) {
element_rect_seq(fun = wiggle(amount, seed), ...)
}
#' @rdname wiggle
#' @export
wiggling_geoms <- function(
amount = 5, fill = NULL,
colour = NULL, size = NULL,
linetype = NULL, color = NULL, lineend = NULL,
sides = "tlbr", seed = NULL,
n = 50
) {
theme(
elementalist.geom_rect = element_rect_wiggle(
amount = amount, fill = fill, colour = colour, size = size,
linetype = linetype, color = color, n = n, sides = sides,
seed = seed
),
elementalist.geom_line = element_line_wiggle(
amount = amount, colour = colour, size = size,
linetype = linetype, color = color, n = n, lineend = lineend,
seed = seed
)
)
}
|
/R/style_wiggle.R
|
permissive
|
gejielin/elementalist
|
R
| false
| false
| 3,935
|
r
|
#' Wiggle style
#'
#' 'Wiggle' is a theme style that adds an amount of cumulative uniform noise to
#' interpolated lines, making them wiggle a bit. The functions are used in the
#' following way: \describe{
#' \item{\code{wiggle()}}{is a function factory that produces a function that
#' is subsequently used in elements to make lines wiggle}
#' \item{\code{element_rect_wiggle()}, \code{element_line_wiggle}}{are
#' convenience wrappers around \code{element_*_seq()} that pass down the
#' function generated by \code{wiggle()}.}
#' \item{\code{wiggling_geoms()}}{is a convenience theme setter for the
#' \code{elementalist.geom_rect} and \code{elementalist.geom_line} elements.}
#' }
#'
#' @param amount A \code{numeric} of length 1 setting the amount of wiggling to
#' occur.
#' @param seed An \code{integer} to set the seed for reproducible wiggling.
#' @inheritParams element_line_seq
#' @inheritParams element_rect_seq
#' @param fill Fill colour.
#' @param ... Arguments passed to \code{element_*_seq()}.
#'
#' @details The amount of wiggle added to lines and rectangles is absolute. This
#' makes it easier to make more uniform wiggles, but causes relative distortion
#' when resizing the plot window or device.
#'
#' @return For \code{wiggle()}, a \code{function}.
#' @return For \code{element_rect_wiggle()}, an \code{element_rect_seq} type
#' list.
#' @return For \code{element_line_wiggle()}, an \code{element_line_seq} type
#' list.
#' @return For \code{wiggling_geoms}, a partial \code{theme} object.
#' @export
#' @family theme styles
#'
#' @examples
#' barplot <- ggplot(mpg, aes(class)) +
#' geom_bar_theme(aes(colour = class)) +
#' geom_line_theme(stat = "count", aes(group = -1))
#'
#' # Making geoms wiggle
#' barplot + wiggling_geoms()
#'
#' # Making only line geoms wiggle
#' barplot + theme(elementalist.geom_line = element_line_wiggle(10))
#'
#' # Making only rect geoms wiggle
#' barplot + theme(elementalist.geom_rect = element_rect_wiggle(5))
#'
#' # Let other theme elements wiggle
#' barplot + theme(
#' axis.line.x = element_line_wiggle(),
#' axis.line.y = element_line_wiggle(),
#' legend.background = element_rect_wiggle(colour = "grey20")
#' )
wiggle <- function(amount = 3, seed = NULL) {
seed <- force(seed)
amount <- amount / 2
function(x, y, colour, size = NULL, id, n) {
nn <- n * (length(x) - 1) + 1
id <- rep(id[1], nn)
if (!is.null(seed)) {
set.seed(seed)
on.exit(set.seed(NULL)) # Reset on exit
}
z <- cumsum(runif(nn, -amount, amount))
xy <- fit_along(unclass(x), unclass(y), z)
x <- seq_between(unclass(x), n)
y <- seq_between(unclass(y), n)
col <- c(col_interpol(colour, nn - 1), NA)
if (!is.null(size)) {
size <- c(rep_len(size, length(x) - 1), NA)
}
out <- list(
x = x,
y = y,
dx = unclass(xy$x) - x,
dy = unclass(xy$y) - y,
col = col,
lwd = size,
id = id
)
out[vapply(out, is.null, logical(1))] <- NULL
out
}
}
#' @rdname wiggle
#' @export
element_line_wiggle <- function(amount = 3, seed = NULL, ...) {
element_line_seq(fun = wiggle(amount, seed), ...)
}
#' @rdname wiggle
#' @export
element_rect_wiggle <- function(amount = 3, seed = NULL, ...) {
element_rect_seq(fun = wiggle(amount, seed), ...)
}
#' @rdname wiggle
#' @export
wiggling_geoms <- function(
amount = 5, fill = NULL,
colour = NULL, size = NULL,
linetype = NULL, color = NULL, lineend = NULL,
sides = "tlbr", seed = NULL,
n = 50
) {
theme(
elementalist.geom_rect = element_rect_wiggle(
amount = amount, fill = fill, colour = colour, size = size,
linetype = linetype, color = color, n = n, sides = sides,
seed = seed
),
elementalist.geom_line = element_line_wiggle(
amount = amount, colour = colour, size = size,
linetype = linetype, color = color, n = n, lineend = lineend,
seed = seed
)
)
}
|
data <- read.csv("data.csv", header=TRUE, sep=";",dec=",")
library(ggplot2)
levels(data$Langue) <- c("D", "F", "D")
# relation bivariée
png(filename="figure1.png", width=500, height=500)
ggplot(data, aes(x=EtrTaux2012, y=VotTaux2014)) + geom_point(aes(size=Pop2012)) + geom_smooth(aes(group=1),method="lm") + theme_bw() + xlab("Proportion d'étrangers") + ylab('Proportion de "oui"') + theme(legend.title=element_text("Population du canton")) + scale_size_continuous(name="Population du canton") + theme(legend.position="bottom")
dev.off()
summary(lm(VotTaux2014 ~ EtrTaux2012, data=data))
# introduisons l'effet de la religion et de la langue
# la religion en tant que telle n'a pas d'effet
ggplot(data, aes(x=Protestants, y=VotTaux2014)) + geom_point(aes(size=Pop2012)) + geom_smooth(aes(group=1),method="lm") + theme_bw()
summary(lm(VotTaux2014 ~ EtrTaux2012 + Langue + Protestants, data=data))
reg2 <- lm(VotTaux2014 ~ Langue + Protestants, data=data)
data$resid <- reg2$residuals
ggplot(data, aes(x=EtrTaux2012, y=resid)) + geom_point(aes(size=Pop2012)) + geom_smooth(aes(group=1),method="lm") + theme_bw() + xlab("Proportion d'étrangers") + ylab('Résidus') + theme(legend.title=element_text("Population du canton")) + scale_size_continuous(name="Population du canton") + theme(legend.position="bottom")
|
/script.R
|
no_license
|
joelgombin/VotationCH
|
R
| false
| false
| 1,323
|
r
|
data <- read.csv("data.csv", header=TRUE, sep=";",dec=",")
library(ggplot2)
levels(data$Langue) <- c("D", "F", "D")
# relation bivariée
png(filename="figure1.png", width=500, height=500)
ggplot(data, aes(x=EtrTaux2012, y=VotTaux2014)) + geom_point(aes(size=Pop2012)) + geom_smooth(aes(group=1),method="lm") + theme_bw() + xlab("Proportion d'étrangers") + ylab('Proportion de "oui"') + theme(legend.title=element_text("Population du canton")) + scale_size_continuous(name="Population du canton") + theme(legend.position="bottom")
dev.off()
summary(lm(VotTaux2014 ~ EtrTaux2012, data=data))
# introduisons l'effet de la religion et de la langue
# la religion en tant que telle n'a pas d'effet
ggplot(data, aes(x=Protestants, y=VotTaux2014)) + geom_point(aes(size=Pop2012)) + geom_smooth(aes(group=1),method="lm") + theme_bw()
summary(lm(VotTaux2014 ~ EtrTaux2012 + Langue + Protestants, data=data))
reg2 <- lm(VotTaux2014 ~ Langue + Protestants, data=data)
data$resid <- reg2$residuals
ggplot(data, aes(x=EtrTaux2012, y=resid)) + geom_point(aes(size=Pop2012)) + geom_smooth(aes(group=1),method="lm") + theme_bw() + xlab("Proportion d'étrangers") + ylab('Résidus') + theme(legend.title=element_text("Population du canton")) + scale_size_continuous(name="Population du canton") + theme(legend.position="bottom")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_values.R
\name{generate_sobol_set}
\alias{generate_sobol_set}
\title{Generate Sobol Set}
\usage{
generate_sobol_set(n, dim, seed = 0)
}
\arguments{
\item{n}{The number of values (per dimension) to extract.}
\item{dim}{The number of dimensions of the sequence.}
\item{seed}{Default `0`. The random seed.}
}
\value{
A single numeric value representing the `i`th element in the `dim` dimension.
}
\description{
Generate a set of values from a Sobol set.
}
\examples{
#Generate a 2D sample:
points2d = generate_sobol_set(n=1000, dim = 2)
plot(points2d, xlim=c(0,1),ylim=c(0,1))
#Generate a longer sequence of values from that set
points2d = generate_sobol_set(n=1500, dim = 2)
plot(points2d, xlim=c(0,1),ylim=c(0,1))
#'#Integrate the value of pi by counting the number of randomly generated points that fall
#within the unit circle.
pointset = matrix(generate_sobol_set(10000,dim=2),ncol=2)
pi_estimate = 4*sum(pointset[,1] * pointset[,1] + pointset[,2] * pointset[,2] < 1)/10000
pi_estimate
}
|
/man/generate_sobol_set.Rd
|
permissive
|
KirillShaman/spacefillr
|
R
| false
| true
| 1,082
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_values.R
\name{generate_sobol_set}
\alias{generate_sobol_set}
\title{Generate Sobol Set}
\usage{
generate_sobol_set(n, dim, seed = 0)
}
\arguments{
\item{n}{The number of values (per dimension) to extract.}
\item{dim}{The number of dimensions of the sequence.}
\item{seed}{Default `0`. The random seed.}
}
\value{
A single numeric value representing the `i`th element in the `dim` dimension.
}
\description{
Generate a set of values from a Sobol set.
}
\examples{
#Generate a 2D sample:
points2d = generate_sobol_set(n=1000, dim = 2)
plot(points2d, xlim=c(0,1),ylim=c(0,1))
#Generate a longer sequence of values from that set
points2d = generate_sobol_set(n=1500, dim = 2)
plot(points2d, xlim=c(0,1),ylim=c(0,1))
#'#Integrate the value of pi by counting the number of randomly generated points that fall
#within the unit circle.
pointset = matrix(generate_sobol_set(10000,dim=2),ncol=2)
pi_estimate = 4*sum(pointset[,1] * pointset[,1] + pointset[,2] * pointset[,2] < 1)/10000
pi_estimate
}
|
# Coursera and Johns Hopkins University
# Data Science Specialization
# Course04 : Exploratory Data Analysis
# Project01 : ExData_Plotting1
# 2019-02-27
data_raw <- read.table(file="household_power_consumption.txt",head=TRUE,sep=";",colClasses=c("character","character","character","character","character","character","character","character","character"))
# only be using data from the dates 2007-02-01 and 2007-02-02.
data_part <- subset(data_raw, Date=="1/2/2007" | Date=="2/2/2007")
# missing values are coded as ?.
data <- data_part[!apply(data_part, 1, function(x) {any(x == "?")}),]
data$Date<-as.Date(data$Date,"%d/%m/%Y")
data$Time <- chron(times=data$Time)
data$Global_active_power<-as.numeric(data$Global_active_power)
data$Global_reactive_power<-as.numeric(data$Global_reactive_power)
data$Voltage<-as.numeric(data$Voltage)
data$Global_intensity<-as.numeric(data$Global_intensity)
data$Sub_metering_1<-as.numeric(data$Sub_metering_1)
data$Sub_metering_2<-as.numeric(data$Sub_metering_2)
data$Sub_metering_3<-as.numeric(data$Sub_metering_3)
assign("data", data, envir = .GlobalEnv)
# head(data)
source("plot1.R")
plot1(data)
source("plot2.R")
plot2(data)
source("plot3.R")
plot3(data)
source("plot4.R")
plot4(data)
|
/Cleaning_Data.R
|
no_license
|
NamWoo/ExData_Plotting1
|
R
| false
| false
| 1,243
|
r
|
# Coursera and Johns Hopkins University
# Data Science Specialization
# Course04 : Exploratory Data Analysis
# Project01 : ExData_Plotting1
# 2019-02-27
data_raw <- read.table(file="household_power_consumption.txt",head=TRUE,sep=";",colClasses=c("character","character","character","character","character","character","character","character","character"))
# only be using data from the dates 2007-02-01 and 2007-02-02.
data_part <- subset(data_raw, Date=="1/2/2007" | Date=="2/2/2007")
# missing values are coded as ?.
data <- data_part[!apply(data_part, 1, function(x) {any(x == "?")}),]
data$Date<-as.Date(data$Date,"%d/%m/%Y")
data$Time <- chron(times=data$Time)
data$Global_active_power<-as.numeric(data$Global_active_power)
data$Global_reactive_power<-as.numeric(data$Global_reactive_power)
data$Voltage<-as.numeric(data$Voltage)
data$Global_intensity<-as.numeric(data$Global_intensity)
data$Sub_metering_1<-as.numeric(data$Sub_metering_1)
data$Sub_metering_2<-as.numeric(data$Sub_metering_2)
data$Sub_metering_3<-as.numeric(data$Sub_metering_3)
assign("data", data, envir = .GlobalEnv)
# head(data)
source("plot1.R")
plot1(data)
source("plot2.R")
plot2(data)
source("plot3.R")
plot3(data)
source("plot4.R")
plot4(data)
|
library(tidyverse)
library(effsize)
library(plyr)
library(reshape2)
library(caret)
library(glmnet)
options(scipen=999)
setwd("C:\\Users\\winbase\\MIDS\\w203\\w203_lab3")
crime_data = read.csv("crime_v2.csv")
#clean data, dropping all NA rows
crime_data = crime_data[which(crime_data$county != 'NA'),]
# what is wrong with prbconv?
class(crime_data$prbconv[1][1]) #r thinks it's a factor?
# let's clean these up
crime_data$prbconv = as.numeric(levels(crime_data$prbconv)[as.integer(crime_data$prbconv)])
##########################################################################################
# Compute R^2 from true and predicted values
eval_results = function(true, predicted, df) {
SSE = sum((predicted - true)^2)
SST = sum((true - mean(true))^2)
R_square = 1 - SSE / SST
RMSE = sqrt(SSE/nrow(df))
# Model performance metrics
data.frame(
RMSE = RMSE,
Rsquare = R_square
)
}
##########################################################################################
#keep .75 of original
sample_size = floor(0.75 * nrow(crime_data))
training_index = sample(seq_len(nrow(crime_data)), size = sample_size)
#split into a training and testing
train = crime_data[training_index, ]
test = crime_data[-training_index, ]
#now build a matrix of our set
x = model.matrix(crmrte~., train)[,-1]# Response
y = train$crmrte
x_test = model.matrix(crmrte~., test)[,-1]# Response
y_test = test$crmrte
#first create a ridge model
cv.ridge = cv.glmnet(x, y, alpha = 0)
print(cv.ridge$lambda.min) #what's our L?
model.ridge = glmnet(x, y, alpha = 0, lambda = cv.ridge$lambda.min)
ridgecoefs = coef(model.ridge) #what are our coefficients
#try on our training data
predictions_train <- predict(cv.ridge, s = cv.ridge$lambda.min, newx = x)
eval_results(y, predictions_train, train)
# Prediction and evaluation on test data
predictions_test <- predict(cv.ridge, s = cv.ridge$lambda.min, newx = x_test)
eval_results(y_test, predictions_test, test) #awful rsquared, why?
# now work up a lasso
cv.lasso = cv.glmnet(x, y, alpha = 1)
print(cv.lasso$lambda.min) #what's our L?
model.lasso = glmnet(x, y, alpha = 1, lambda = cv.lasso$lambda.min)
lassocoefs = coef(model.lasso)
lasso_predictions_train = predict(cv.lasso, s = cv.lasso$lambda.min, newx = x)
eval_results(y, lasso_predictions_train, train)
lasso_predictions_test = predict(cv.lasso, s = cv.lasso$lambda.min, newx = x_test)
eval_results(y_test, lasso_predictions_test, test) #similarly awful?
enet_model = train(crmrte ~., data = train, method = "glmnet",trControl = trainControl("cv", number = 10), tuneLength = 10)
print(enet_model$bestTune)
coef(enet_model$finalModel, enet_model$bestTune$lambda)
enet_train = predict(enet_model, s = enet_model$bestTune$lambda, newx = x)
eval_results(y, enet_train, train)
enet_predictions = predict(enet_model, x_test)
eval_results(y_test, enet_predictions, test) #also bad. Hmm
data.frame( RMSE.net = RMSE(predictions.net, test$crmrte), Rsquare.net = R2(predictions.net, test$crmrte))
|
/lasso_ridge_elasticnet_jn.R
|
no_license
|
joshuajnoble/w203_lab3
|
R
| false
| false
| 3,015
|
r
|
library(tidyverse)
library(effsize)
library(plyr)
library(reshape2)
library(caret)
library(glmnet)
options(scipen=999)
setwd("C:\\Users\\winbase\\MIDS\\w203\\w203_lab3")
crime_data = read.csv("crime_v2.csv")
#clean data, dropping all NA rows
crime_data = crime_data[which(crime_data$county != 'NA'),]
# what is wrong with prbconv?
class(crime_data$prbconv[1][1]) #r thinks it's a factor?
# let's clean these up
crime_data$prbconv = as.numeric(levels(crime_data$prbconv)[as.integer(crime_data$prbconv)])
##########################################################################################
# Compute R^2 from true and predicted values
eval_results = function(true, predicted, df) {
SSE = sum((predicted - true)^2)
SST = sum((true - mean(true))^2)
R_square = 1 - SSE / SST
RMSE = sqrt(SSE/nrow(df))
# Model performance metrics
data.frame(
RMSE = RMSE,
Rsquare = R_square
)
}
##########################################################################################
#keep .75 of original
sample_size = floor(0.75 * nrow(crime_data))
training_index = sample(seq_len(nrow(crime_data)), size = sample_size)
#split into a training and testing
train = crime_data[training_index, ]
test = crime_data[-training_index, ]
#now build a matrix of our set
x = model.matrix(crmrte~., train)[,-1]# Response
y = train$crmrte
x_test = model.matrix(crmrte~., test)[,-1]# Response
y_test = test$crmrte
#first create a ridge model
cv.ridge = cv.glmnet(x, y, alpha = 0)
print(cv.ridge$lambda.min) #what's our L?
model.ridge = glmnet(x, y, alpha = 0, lambda = cv.ridge$lambda.min)
ridgecoefs = coef(model.ridge) #what are our coefficients
#try on our training data
predictions_train <- predict(cv.ridge, s = cv.ridge$lambda.min, newx = x)
eval_results(y, predictions_train, train)
# Prediction and evaluation on test data
predictions_test <- predict(cv.ridge, s = cv.ridge$lambda.min, newx = x_test)
eval_results(y_test, predictions_test, test) #awful rsquared, why?
# now work up a lasso
cv.lasso = cv.glmnet(x, y, alpha = 1)
print(cv.lasso$lambda.min) #what's our L?
model.lasso = glmnet(x, y, alpha = 1, lambda = cv.lasso$lambda.min)
lassocoefs = coef(model.lasso)
lasso_predictions_train = predict(cv.lasso, s = cv.lasso$lambda.min, newx = x)
eval_results(y, lasso_predictions_train, train)
lasso_predictions_test = predict(cv.lasso, s = cv.lasso$lambda.min, newx = x_test)
eval_results(y_test, lasso_predictions_test, test) #similarly awful?
enet_model = train(crmrte ~., data = train, method = "glmnet",trControl = trainControl("cv", number = 10), tuneLength = 10)
print(enet_model$bestTune)
coef(enet_model$finalModel, enet_model$bestTune$lambda)
enet_train = predict(enet_model, s = enet_model$bestTune$lambda, newx = x)
eval_results(y, enet_train, train)
enet_predictions = predict(enet_model, x_test)
eval_results(y_test, enet_predictions, test) #also bad. Hmm
data.frame( RMSE.net = RMSE(predictions.net, test$crmrte), Rsquare.net = R2(predictions.net, test$crmrte))
|
# BSD_2_clause
map_page <- {
tabPanel(
title="Map",
tags$head(
tags$link(
href='https://fonts.googleapis.com/css?family=Open+Sans:300,400',
rel='stylesheet',
type='text/css'
),
tags$link(
href='http://maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css',
rel='stylesheet'
),
includeCSS("custom_styles.css") #,
# includeScript("www/gomap.js"),
# includeScript("www/leaflet.zoomhome.js")
),
# tags$style(type="text/css", "body {padding-top: 80px;}"),
div(class = "mapper",
leafletOutput("map", height="1000px")
)
)
}
|
/map_page.R
|
no_license
|
jacob-ogre/NMFS_WC_salmon
|
R
| false
| false
| 649
|
r
|
# BSD_2_clause
map_page <- {
tabPanel(
title="Map",
tags$head(
tags$link(
href='https://fonts.googleapis.com/css?family=Open+Sans:300,400',
rel='stylesheet',
type='text/css'
),
tags$link(
href='http://maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css',
rel='stylesheet'
),
includeCSS("custom_styles.css") #,
# includeScript("www/gomap.js"),
# includeScript("www/leaflet.zoomhome.js")
),
# tags$style(type="text/css", "body {padding-top: 80px;}"),
div(class = "mapper",
leafletOutput("map", height="1000px")
)
)
}
|
#! This file was automatically produced by the testextra package.
#! Changes will be overwritten.
context('tests extracted from file `testing_blocks.R`')
#line 103 "/rdtf/parsetools/R/testing_blocks.R"
test_that('extract_test_block', {#!@testing
pd <- get_parse_data(parse(text={'
if(F){#!@testing
# a malplaced testing block
FALSE
}
hello_world <- function(){
print("hello world")
}
if(FALSE){#!@testthat
expect_output(hello_world(), "hello world")
}
ldf <- data.frame(id = 1:26, letters)
if(FALSE){#!@testing
# not a function assignment
}
f2 <- function(){stop("this does nothing")}
if(F){#! @example
hw()
}
if(F){#! @test
expect_error(f2())
}
setClass("A")
if(F){#!@testing
#testing a setClass
}
setMethod("print", "A")
if(F){#!@testing
#testing a setMethod
}
setGeneric("my_generic", function(x){x})
if(F){#!@testing
#testing a setClass
}
rnorm(10)
if(F){#!@testing
# no previous name
}
setAs("class1", "class2", function(from){new(from[[1]], "class2")})
if(F){#!@testing
#testing setAs
}
'}, keep.source=TRUE))
iff.ids <- all_tagged_iff_block_ids(pd, c('testing', 'testthat', 'test'))
expect_error( extract_test_block(iff.ids[[1L]], pd)
, "illformed block at <text>:2:5"
, info = "cannot find name for block"
)
expect_equal( extract_test_block(iff.ids[[2L]], pd)
, structure(c( '#line 9 "<text>"'
, 'test_that(\'hello_world\', {#!@testthat'
, ' expect_output(hello_world(), "hello world")'
, ' })'
), name=structure("hello_world", type = "function_assignment"))
, info="testing after function assignment")
expect_equal( extract_test_block(iff.ids[[3L]], pd)
, structure(c( '#line 14 "<text>"'
, 'test_that(\'ldf\', {#!@testing'
, ' # not a function assignment'
, ' })'
), name = structure("ldf", type = "assignment"))
, info="testing after other assignment")
expect_equal( extract_test_block(iff.ids[[4L]], pd)
, structure(c( '#line 22 "<text>"'
, 'test_that(\'f2\', {#! @test'
, ' expect_error(f2())'
, ' })'
), name=structure("f2", type = "function_assignment"))
, info="testing after other iff")
expect_equal( extract_test_block(iff.ids[[5L]], pd)
, structure(c( '#line 27 "<text>"'
, 'test_that(\'setClass("A", ...)\', {#!@testing'
, ' #testing a setClass'
, ' })'
), name="setClass(\"A\", ...)")
, info="testing after setClass")
expect_equal( extract_test_block(iff.ids[[6L]], pd)
, structure(c( '#line 32 "<text>"'
, 'test_that(\'print,A-method\', {#!@testing'
, ' #testing a setMethod'
, ' })'
), name=structure("print,A-method", type = "setMethod"))
, info="testing after setMethod")
expect_equal( extract_test_block(iff.ids[[7L]], pd)
, structure(c( '#line 37 "<text>"'
, 'test_that(\'setGeneric("my_generic", ...)\', {#!@testing'
, ' #testing a setClass'
, ' })'
), name="setGeneric(\"my_generic\", ...)")
, info="testing after setGeneric")
expect_error( extract_test_block(iff.ids[[8L]], pd)
, info="following call")
expect_equal( extract_test_block(iff.ids[2:3], pd)
, structure(c( '#line 9 "<text>"'
, 'test_that(\'hello_world\', {#!@testthat'
, ' expect_output(hello_world(), "hello world")'
, ' })'
, '#line 14 "<text>"'
, 'test_that(\'ldf\', {#!@testing'
, ' # not a function assignment'
, ' })'
)
, test.names = c("hello_world", "ldf")
, start.locations = c(1, 5)
)
, info = "multiple ids")
expect_equal( extract_test_block(iff.ids[9], pd)
, structure(c( '#line 47 "<text>"'
, 'test_that(\'as(class1, "class2")\', {#!@testing'
, ' #testing setAs'
, ' })'
)
, name = c("as(class1, \"class2\")")
)
, info = "setAs")
})
#line 230 "/rdtf/parsetools/R/testing_blocks.R"
test_that('Extraction with block tag.', {#@testing Extraction with block tag.
pd <- get_parse_data(parse(text={"
if(FALSE){#@testing An info string
expect_true(T)
}
"}, keep.source = TRUE))
expect_equal( extract_test_block(roots(pd), pd)
, structure(c( "#line 2 \"<text>\""
, "test_that('An info string', {#@testing An info string"
, " expect_true(T)"
, " })"
)
, name = "An info string")
, info = "using text string")
})
#line 258 "/rdtf/parsetools/R/testing_blocks.R"
test_that('extract_test_blocks_parse_data', {#@testing
ex.file <- system.file("examples", "example.R", package="parsetools")
exprs <- parse(ex.file, keep.source = TRUE)
pd <- get_parse_data(exprs)
expect_null(extract_test_blocks_parse_data(pd))
})
#line 279 "/rdtf/parsetools/R/testing_blocks.R"
test_that('extract_test_blocks', {#! @testthat
text <- {'hello_world <- function(){
print("hello world")
}
if(FALSE){#!@testthat
expect_output(hello_world(), "hello world")
}
f2 <- function(){stop("this does nothing")}
if(F){#! @test
expect_error(f2())
}
if(F){#! example
hw()
}
'}
tmp <- tempfile(fileext = ".R")
writeLines(text, tmp)
test.blocks <- extract_test_blocks(tmp)
expect_equal( test.blocks
, structure(c( sprintf('#line 4 "%s"', tmp)
, 'test_that(\'hello_world\', {#!@testthat'
, ' expect_output(hello_world(), "hello world")'
, '})'
, sprintf('#line 9 "%s"', tmp)
, 'test_that(\'f2\', {#! @test'
, ' expect_error(f2())'
, '})'
)
, test.names = c("hello_world", "f2")
, start.locations = c(1, 5)
)
, info = "Write to file and read back.")
})
|
/tests/testthat/test-testing_blocks.R
|
no_license
|
RDocTaskForce/parsetools
|
R
| false
| false
| 7,439
|
r
|
#! This file was automatically produced by the testextra package.
#! Changes will be overwritten.
context('tests extracted from file `testing_blocks.R`')
#line 103 "/rdtf/parsetools/R/testing_blocks.R"
test_that('extract_test_block', {#!@testing
pd <- get_parse_data(parse(text={'
if(F){#!@testing
# a malplaced testing block
FALSE
}
hello_world <- function(){
print("hello world")
}
if(FALSE){#!@testthat
expect_output(hello_world(), "hello world")
}
ldf <- data.frame(id = 1:26, letters)
if(FALSE){#!@testing
# not a function assignment
}
f2 <- function(){stop("this does nothing")}
if(F){#! @example
hw()
}
if(F){#! @test
expect_error(f2())
}
setClass("A")
if(F){#!@testing
#testing a setClass
}
setMethod("print", "A")
if(F){#!@testing
#testing a setMethod
}
setGeneric("my_generic", function(x){x})
if(F){#!@testing
#testing a setClass
}
rnorm(10)
if(F){#!@testing
# no previous name
}
setAs("class1", "class2", function(from){new(from[[1]], "class2")})
if(F){#!@testing
#testing setAs
}
'}, keep.source=TRUE))
iff.ids <- all_tagged_iff_block_ids(pd, c('testing', 'testthat', 'test'))
expect_error( extract_test_block(iff.ids[[1L]], pd)
, "illformed block at <text>:2:5"
, info = "cannot find name for block"
)
expect_equal( extract_test_block(iff.ids[[2L]], pd)
, structure(c( '#line 9 "<text>"'
, 'test_that(\'hello_world\', {#!@testthat'
, ' expect_output(hello_world(), "hello world")'
, ' })'
), name=structure("hello_world", type = "function_assignment"))
, info="testing after function assignment")
expect_equal( extract_test_block(iff.ids[[3L]], pd)
, structure(c( '#line 14 "<text>"'
, 'test_that(\'ldf\', {#!@testing'
, ' # not a function assignment'
, ' })'
), name = structure("ldf", type = "assignment"))
, info="testing after other assignment")
expect_equal( extract_test_block(iff.ids[[4L]], pd)
, structure(c( '#line 22 "<text>"'
, 'test_that(\'f2\', {#! @test'
, ' expect_error(f2())'
, ' })'
), name=structure("f2", type = "function_assignment"))
, info="testing after other iff")
expect_equal( extract_test_block(iff.ids[[5L]], pd)
, structure(c( '#line 27 "<text>"'
, 'test_that(\'setClass("A", ...)\', {#!@testing'
, ' #testing a setClass'
, ' })'
), name="setClass(\"A\", ...)")
, info="testing after setClass")
expect_equal( extract_test_block(iff.ids[[6L]], pd)
, structure(c( '#line 32 "<text>"'
, 'test_that(\'print,A-method\', {#!@testing'
, ' #testing a setMethod'
, ' })'
), name=structure("print,A-method", type = "setMethod"))
, info="testing after setMethod")
expect_equal( extract_test_block(iff.ids[[7L]], pd)
, structure(c( '#line 37 "<text>"'
, 'test_that(\'setGeneric("my_generic", ...)\', {#!@testing'
, ' #testing a setClass'
, ' })'
), name="setGeneric(\"my_generic\", ...)")
, info="testing after setGeneric")
expect_error( extract_test_block(iff.ids[[8L]], pd)
, info="following call")
expect_equal( extract_test_block(iff.ids[2:3], pd)
, structure(c( '#line 9 "<text>"'
, 'test_that(\'hello_world\', {#!@testthat'
, ' expect_output(hello_world(), "hello world")'
, ' })'
, '#line 14 "<text>"'
, 'test_that(\'ldf\', {#!@testing'
, ' # not a function assignment'
, ' })'
)
, test.names = c("hello_world", "ldf")
, start.locations = c(1, 5)
)
, info = "multiple ids")
expect_equal( extract_test_block(iff.ids[9], pd)
, structure(c( '#line 47 "<text>"'
, 'test_that(\'as(class1, "class2")\', {#!@testing'
, ' #testing setAs'
, ' })'
)
, name = c("as(class1, \"class2\")")
)
, info = "setAs")
})
#line 230 "/rdtf/parsetools/R/testing_blocks.R"
test_that('Extraction with block tag.', {#@testing Extraction with block tag.
pd <- get_parse_data(parse(text={"
if(FALSE){#@testing An info string
expect_true(T)
}
"}, keep.source = TRUE))
expect_equal( extract_test_block(roots(pd), pd)
, structure(c( "#line 2 \"<text>\""
, "test_that('An info string', {#@testing An info string"
, " expect_true(T)"
, " })"
)
, name = "An info string")
, info = "using text string")
})
#line 258 "/rdtf/parsetools/R/testing_blocks.R"
test_that('extract_test_blocks_parse_data', {#@testing
ex.file <- system.file("examples", "example.R", package="parsetools")
exprs <- parse(ex.file, keep.source = TRUE)
pd <- get_parse_data(exprs)
expect_null(extract_test_blocks_parse_data(pd))
})
#line 279 "/rdtf/parsetools/R/testing_blocks.R"
test_that('extract_test_blocks', {#! @testthat
text <- {'hello_world <- function(){
print("hello world")
}
if(FALSE){#!@testthat
expect_output(hello_world(), "hello world")
}
f2 <- function(){stop("this does nothing")}
if(F){#! @test
expect_error(f2())
}
if(F){#! example
hw()
}
'}
tmp <- tempfile(fileext = ".R")
writeLines(text, tmp)
test.blocks <- extract_test_blocks(tmp)
expect_equal( test.blocks
, structure(c( sprintf('#line 4 "%s"', tmp)
, 'test_that(\'hello_world\', {#!@testthat'
, ' expect_output(hello_world(), "hello world")'
, '})'
, sprintf('#line 9 "%s"', tmp)
, 'test_that(\'f2\', {#! @test'
, ' expect_error(f2())'
, '})'
)
, test.names = c("hello_world", "f2")
, start.locations = c(1, 5)
)
, info = "Write to file and read back.")
})
|
exon.len<-10.439090
intron.len<-317.326271
pdf("intron_exon_cpg_ct.pdf",8,2)
layout(matrix(1:4,nrow = 1,byrow = T))
par(mar=c(4,4,4,1),mgp=c(2,.5,0))
#==========================cpg data==================
exp.exon<-scan("data/exp_exon.txt",what="s")
exp.exon<-as.numeric(exp.exon)/c(100,100,100,10,100,100)/exon.len
exp.intron<-scan("data/exp_intron.txt",what="s")
exp.intron<-as.numeric(exp.intron)/c(100,100,100,10,100,100)/intron.len
obs.exon<-scan("data/obs_exon.txt",what="s")
obs.exon<-as.numeric(obs.exon)/exon.len
obs.intron<-scan("data/obs_intron.txt",what="s")
obs.intron<-as.numeric(obs.intron)/intron.len
#----------------------------------------------
all.data<-matrix(c(obs.intron,exp.intron,obs.exon,exp.exon),ncol=4)
all.data<-signif(all.data,3)
sap<-scan("data/sap_name",what="s")
sap[3]<-"MBD4 mutants"
sap[4]<-"POLE mutants"
sap[5]<-"MMRd(MSI)"
sap[6]<-"MSS"
cl<-c("#fbb4ae","#b3cde3")
for (i in c(6,4,5,3)){
a<-barplot(all.data[i,],main=paste0(sap[i]),ylab="CpG C>T/Mb",names.arg=c("Obs","Exp","Obs","Exp"),col=cl,border=cl,space=c(0.1,0.1,0.4,0.1))
tmp<-all.data[i,]/8
text(x=a,y=all.data[i,]-tmp,labels = all.data[i,],cex=.8)
mtext("Intron",1,cex=.8,line=2,adj=.2)
mtext("Exon",1,cex=.8,line=2,adj=.8)
}
dev.off()
|
/Fig5_epigenetics/A-D_intron_exon_mut/intron_exon_cpg_ct.R
|
no_license
|
fanghu-hku/MMR
|
R
| false
| false
| 1,247
|
r
|
exon.len<-10.439090
intron.len<-317.326271
pdf("intron_exon_cpg_ct.pdf",8,2)
layout(matrix(1:4,nrow = 1,byrow = T))
par(mar=c(4,4,4,1),mgp=c(2,.5,0))
#==========================cpg data==================
exp.exon<-scan("data/exp_exon.txt",what="s")
exp.exon<-as.numeric(exp.exon)/c(100,100,100,10,100,100)/exon.len
exp.intron<-scan("data/exp_intron.txt",what="s")
exp.intron<-as.numeric(exp.intron)/c(100,100,100,10,100,100)/intron.len
obs.exon<-scan("data/obs_exon.txt",what="s")
obs.exon<-as.numeric(obs.exon)/exon.len
obs.intron<-scan("data/obs_intron.txt",what="s")
obs.intron<-as.numeric(obs.intron)/intron.len
#----------------------------------------------
all.data<-matrix(c(obs.intron,exp.intron,obs.exon,exp.exon),ncol=4)
all.data<-signif(all.data,3)
sap<-scan("data/sap_name",what="s")
sap[3]<-"MBD4 mutants"
sap[4]<-"POLE mutants"
sap[5]<-"MMRd(MSI)"
sap[6]<-"MSS"
cl<-c("#fbb4ae","#b3cde3")
for (i in c(6,4,5,3)){
a<-barplot(all.data[i,],main=paste0(sap[i]),ylab="CpG C>T/Mb",names.arg=c("Obs","Exp","Obs","Exp"),col=cl,border=cl,space=c(0.1,0.1,0.4,0.1))
tmp<-all.data[i,]/8
text(x=a,y=all.data[i,]-tmp,labels = all.data[i,],cex=.8)
mtext("Intron",1,cex=.8,line=2,adj=.2)
mtext("Exon",1,cex=.8,line=2,adj=.8)
}
dev.off()
|
############S#################
## TRAIT CHANGE OVER SPACE ##
############S#################
rm(list=ls())
#Detach packages####
detachAllPackages <- function() {
basic.packages <- c("package:stats","package:graphics","package:grDevices","package:utils","package:datasets","package:methods","package:base")
package.list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package.list <- setdiff(package.list,basic.packages)
if (length(package.list)>0) for (package in package.list) detach(package, character.only=TRUE)
}
detachAllPackages()
#Question 1 - Differences between teas
####Open packages####
library(raster)
library(rgdal)
library(lme4)
library(nlme)
library(stringr)
library(plyr)
library(dplyr)
library(ggplot2)
require(gridExtra)
#library(brms)
library(rstan)
library(StanHeaders)
library(MuMIn)
library(MCMCglmm)
library(postMCMCglmm)
#### CHOOSE TRAIT NAME AND CLIMATE VARIABLE HERE ----
###Read in tea
tea<-read.csv("scripts/users/hthomas/tea/combined_tea.csv", stringsAsFactors = F)
#Remove daily tea - too confusing!
tea<-subset(tea,!grepl("CG_DT_HT",tea$Plot))
#Remove sub zero plots
tea<-subset(tea,Loss>0)
tea[tea$Tea_Type=="Rooibos" & tea$Loss >0.5,]$Loss<-NA
#Make sure only using control plots
ambient<-subset(tea,Treatment=="None")
#Split into seasons to make things easier
Summer<-subset(ambient,Season=="Summer")
year<-subset(ambient,Season=="Year")
winter<-subset(ambient,Season=="Winter")
## STAN MODEL - soil temperature ----
#soil temperature#
var.list <- c("Loss", "Loss_Day", "k", "TBI_k", "TBI_S")
#Calculate mean burial length
#Get column number
i=1
var.num<-which(colnames(year)==var.list[i])
season_narm<-year %>%
filter(is.finite(year[,var.num]),is.finite(soiltemp_mean))
#Subset for tea types
#season_narm_r<-subset(season_narm,Tea_Type=="Rooibos") #AB NOTE: Keeping both tea types and including as interaction in model
season_narm_r <- season_narm #just so I don't have to rename everything
# AB: MULTIPLE OBSERVATION
season_narm_r <- ddply(season_narm_r, c("ESA_cell","Site","Plot","Tea_Type"), transform, NObsPlot = length(Loss))
season_narm_r$MultipleObs <- ifelse(season_narm_r$NObsPlot > 4, 1, 0)
# Multiple Sites
count.sites <- ddply(season_narm_r, c("ESA_cell"), summarise, n.sub = length(unique(Site)))
season_narm_r$MultipleSites <- ifelse(season_narm_r$ESA_cell %in% count.sites$ESA_cell[count.sites$n.sub > 1], 1, 0)
# Multiple plots per Site (more than 1)
count.plots <- ddply(season_narm_r, c("ESA_cell", "Site"), summarise, n.plots = length(unique(Plot)))
season_narm_r$MultiplePlots <- ifelse(season_narm_r$Site %in% count.plots$Site[count.plots$n.plots > 1], 1, 0)
#Add env.levels (alternative)
#Add env.levels (new - based on nestedness)
env.levels<- season_narm_r %>%
select(soiltemp_mean,ESA_cell,Site,Plot)
season_narm_r$envlevel_soil<-0
env.levels2<-ddply(env.levels, c("ESA_cell"), summarise, n.plots = length(unique(soiltemp_mean)))
season_narm_r$envlevel_soil <- ifelse(season_narm_r$ESA_cell %in% env.levels2$ESA_cell[env.levels2$n.plots > 1], 1, season_narm_r$envlevel_soil)
env.levels2<-ddply(env.levels, c("ESA_cell","Site"), summarise, n.plots = length(unique(soiltemp_mean)))
season_narm_r$envlevel_soil <- ifelse(season_narm_r$Site %in% env.levels2$Site[env.levels2$n.plots > 1], 2, season_narm_r$envlevel_soil)
#And for moisture
env.levels<- season_narm_r %>%
select(moisture_mean,ESA_cell,Site,Plot)
season_narm_r$envlevel_moisture<-0
env.levels2<-ddply(env.levels, c("ESA_cell"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel_moisture <- ifelse(season_narm_r$ESA_cell %in% env.levels2$ESA_cell[env.levels2$n.plots > 1], 1, season_narm_r$envlevel_moisture)
env.levels2<-ddply(env.levels, c("ESA_cell","Site"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel_moisture <- ifelse(season_narm_r$Site %in% env.levels2$Site[env.levels2$n.plots > 1], 2, season_narm_r$envlevel_moisture)
#Now take lowest env level as overall one
season_narm_r$envlevel <- apply(season_narm_r[, 63:64], 1, max)#Add categories
#Subset so only using data I want to check model
#season_narm_r<-subset(season_narm_r, Cat == 1 | Cat == 2 | Cat == 3)
# AB: REMOVE MISSING VALUES OF SOIL soiltemp AND TEMPERATURE FOR THE soiltemp X TEMPERATURE INTERACTION MODEL
season_narm_r <- season_narm_r[!is.na(season_narm_r$soiltemp_mean) & !is.na(season_narm_r$moisture_mean),]
#Add Region numbers
season_narm_r<-season_narm_r %>%
mutate(RegionNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$RegionNum),]
#Add Site numbers
season_narm_r<-season_narm_r %>%
mutate(SiteNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$SiteNum),]
#Add Plot numbers
season_narm_r<-season_narm_r %>%
mutate(PlotNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Plot","Tea_Type"))) #AB NOTE: This now includes tea type as well! So there will be a unique plot number for each tea type within a plot
#Reorder by plot number
season_narm_r<-season_narm_r[order(season_narm_r$PlotNum),]
LQ_moist<-quantile(season_narm_r$moisture_mean,0.25)
UQ_moist<-quantile(season_narm_r$moisture_mean,0.75)
mean_moist<-mean(season_narm_r$moisture_mean)
med_moist<-quantile(season_narm_r$moisture_mean,0.5)
#Centre values - AB note: Either don't name this the same thing or save the amount you center by first so we can add it to the xhats later
soiltemp_cent_amount <- attr(scale(season_narm_r$soiltemp_mean, center = TRUE, scale = TRUE), 'scaled:center')
soiltemp_scale_amount <- attr(scale(season_narm_r$soiltemp_mean, center = TRUE, scale = TRUE), 'scaled:scale')
season_narm_r$soiltemp_mean<-scale(season_narm_r$soiltemp_mean, center = TRUE, scale = TRUE)
moist_cent_amount <- attr(scale(season_narm_r$moisture_mean, center = TRUE, scale = TRUE), 'scaled:center')
moist_scale_amount <- attr(scale(season_narm_r$moisture_mean, center = TRUE, scale = TRUE), 'scaled:scale')
season_narm_r$moisture_mean<-scale(season_narm_r$moisture_mean, center = TRUE, scale = TRUE)
days_cent_amount <- attr(scale(season_narm_r$Days, center = TRUE, scale = TRUE), 'scaled:center')
days_scale_amount <- attr(scale(season_narm_r$Days, center = TRUE, scale = TRUE), 'scaled:scale')
season_narm_r$Days<-scale(season_narm_r$Days, center = TRUE, scale = TRUE)
#AB: caluclate mean and sd per site - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_sites<-season_narm_r %>%
group_by(SiteNum) %>%
summarise(soiltemp_mean_site = mean(soiltemp_mean),
soiltemp_sd_site = sd(soiltemp_mean),
moist_mean_site = mean(moisture_mean),
moist_sd_site = sd(moisture_mean))
season_narm_r$soiltemp_mean_site<-season_narm_r_sites$soiltemp_mean_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$soiltemp_sd_site<-season_narm_r_sites$soiltemp_sd_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moist_mean_site<-season_narm_r_sites$moist_mean_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moist_sd_site<-season_narm_r_sites$moist_sd_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$soiltemp_sd_site[season_narm_r$soiltemp_sd_site==0] <- mean(season_narm_r$soiltemp_sd_site[season_narm_r$soiltemp_sd_site>0],na.rm = T)
season_narm_r$soiltemp_sd_site[is.na(season_narm_r$soiltemp_sd_site)] <- 0.001
season_narm_r$moist_sd_site[season_narm_r$moist_sd_site==0] <- mean(season_narm_r$moist_sd_site[season_narm_r$moist_sd_site>0],na.rm = T)
season_narm_r$moist_sd_site[is.na(season_narm_r$moist_sd_site)] <- 0.001
#AB: caluclate mean and sd per region - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_regions<-season_narm_r %>%
group_by(RegionNum) %>%
summarise(soiltemp_mean_region = mean(soiltemp_mean),
soiltemp_sd_region = sd(soiltemp_mean),
moist_mean_region = mean(moisture_mean),
moist_sd_region = sd(moisture_mean))
season_narm_r$soiltemp_mean_region<-season_narm_r_regions$soiltemp_mean_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$soiltemp_sd_region<-season_narm_r_regions$soiltemp_sd_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moist_mean_region<-season_narm_r_regions$moist_mean_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moist_sd_region<-season_narm_r_regions$moist_sd_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$soiltemp_sd_region[season_narm_r$soiltemp_sd_region==0] <- mean(season_narm_r$soiltemp_sd_region[season_narm_r$soiltemp_sd_region>0],na.rm = T)
season_narm_r$soiltemp_sd_region[is.na(season_narm_r$soiltemp_sd_region)] <- 0.001
season_narm_r$moist_sd_region[season_narm_r$moist_sd_region==0] <- mean(season_narm_r$moist_sd_region[season_narm_r$moist_sd_region>0],na.rm = T)
season_narm_r$moist_sd_region[is.na(season_narm_r$moist_sd_region)] <- 0.001
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(SiteNum) %>%
mutate(SiteDays = mean(Days),
SiteDays_sd = sd(Days))
season_narm_r$SiteDays_sd[season_narm_r$SiteDays_sd==0 | is.na(season_narm_r$SiteDays_sd)] <- 0.001
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(RegionNum) %>%
mutate(RegionDays = mean(Days),
RegionDays_sd = sd(Days))
season_narm_r$RegionDays_sd[season_narm_r$RegionDays_sd==0 | is.na(season_narm_r$RegionDays_sd)] <- 0.001
mean_burial<-mean(season_narm_r$Days)
min_soil<-min(season_narm_r$soiltemp_mean,na.rm=TRUE)
max_soil<-max(season_narm_r$soiltemp_mean,na.rm=TRUE)
min_soiltemp<-min(season_narm_r$soiltemp_mean,na.rm=TRUE)
max_soiltemp<-max(season_narm_r$soiltemp_mean,na.rm=TRUE)
moist_x_cent_amount <- attr(scale(c(LQ_moist,UQ_moist,mean_moist,med_moist), center = TRUE, scale = TRUE), 'scaled:center')
moist_x_scale_amount <- attr(scale(c(LQ_moist,UQ_moist,mean_moist,med_moist), center = TRUE, scale = TRUE), 'scaled:scale')
xhats <- expand.grid(xhat1=seq(min_soiltemp, max_soiltemp,by=0.01), xhat2=scale(c(LQ_moist,UQ_moist,mean_moist,med_moist), center = TRUE, scale = TRUE),xhat3 = mean_burial) #AB: predicting soil soiltemp at 25% and 75% (assuming you will graph temperature as continuous) but of course you can change this to whatever you want
####Third attempt - adding temperature levels#######
jags.dat<-list(
Nobs=nrow(season_narm_r),
NSite=length(unique(season_narm_r$SiteNum)),
NRegion=length(unique(season_narm_r$RegionNum)),
NPlot=length(unique(season_narm_r$PlotNum)),
NSiteDays=length(unique(season_narm_r$SiteDays)),
NRegionDays=length(unique(season_narm_r$RegionDays)),
NTea=length(unique(season_narm_r$Tea_Type)),
Region=season_narm_r$RegionNum,
Site=season_narm_r$SiteNum,
Plot=season_narm_r$PlotNum,
SiteDays=season_narm_r$SiteDays[!duplicated(season_narm_r$SiteNum)],
SiteDays_sd=season_narm_r$SiteDays_sd[!duplicated(season_narm_r$SiteNum)],
RegionDays=season_narm_r$RegionDays[!duplicated(season_narm_r$RegionNum)],
RegionDays_sd=season_narm_r$RegionDays_sd[!duplicated(season_narm_r$RegionNum)],
Site_short=season_narm_r$SiteNum[!duplicated(season_narm_r$PlotNum)],
Plot_short=unique(season_narm_r$PlotNum),
tea_type_site=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$SiteNum)]=="Green", 1, 2),
tea_type_region=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$RegionNum)]=="Green", 1, 2),
multobs_lobs=season_narm_r$MultipleObs,
multobs_lplot=season_narm_r$MultipleObs[!duplicated(season_narm_r$PlotNum)],
multsites_lobs=season_narm_r$MultipleSites,
multsites_lplot=season_narm_r$MultipleSites[!duplicated(season_narm_r$PlotNum)],
multsites_lsite=season_narm_r$MultipleSites[!duplicated(season_narm_r$SiteNum)],
multsites_lregion=season_narm_r$MultipleSites[!duplicated(season_narm_r$RegionNum)],
multplots_lobs=season_narm_r$MultiplePlots,
multplots_lplot=season_narm_r$MultiplePlots[!duplicated(season_narm_r$PlotNum)],
multplots_lsite=season_narm_r$MultiplePlots[!duplicated(season_narm_r$SiteNum)],
multplots_lregion=season_narm_r$MultiplePlots[!duplicated(season_narm_r$RegionNum)],
multplots_region_lobs=season_narm_r$MultiplePlots_Region,
multplots_region_lplot=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$PlotNum)],
multplots_region_lsite=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$SiteNum)],
multplots_region_lregion=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$RegionNum)],
traitobs=season_narm_r$Loss,
#temp_plot=as.numeric(season_narm_r[!duplicated(season_narm_r$PlotNum),]$soiltemp_mean),
#temp_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$soiltemp_mean),
temp_mean_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$soiltemp_mean_region),
temp_sd_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$soiltemp_sd_region),
temp_mean_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$soiltemp_mean_site),
temp_sd_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$soiltemp_sd_site),
moist_mean_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moist_mean_region),
moist_sd_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moist_sd_region),
moist_mean_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moist_mean_site),
moist_sd_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moist_sd_site),
obs_envlevel=season_narm_r$envlevel,
plot_envlevel=season_narm_r[!duplicated(season_narm_r$PlotNum),]$envlevel,
site_envlevel=season_narm_r[!duplicated(season_narm_r$SiteNum),]$envlevel,
region_envlevel=season_narm_r[!duplicated(season_narm_r$RegionNum),]$envlevel,
meanT=mean(as.numeric(season_narm_r$soiltemp_mean[!duplicated(season_narm_r$ESA_cell)])),
xhat1=xhats$xhat1,
xhat2=xhats$xhat2,
xhat3=xhats$xhat3,
Nxhat=length(xhats$xhat1)
)
str(jags.dat)
# MODEL - ANNE EDITS####
write("
data {
int<lower=0> Nobs; //Number of observations
int<lower=0> NRegion; //Number of regions
int<lower=0> NSite; //Number of sites
int<lower=0> NPlot; //Number of plots
int<lower=0> Nxhat; //No. predictor variables
int<lower=0> NTea; //No. of tea types
int<lower=0> NSiteDays; //No. of days
int<lower=0> NRegionDays; //No. of days
int<lower=1,upper=NPlot> Plot[Nobs]; //Plots (all observations)
int<lower=1,upper=NSite> Site[Nobs]; //Plots (all observations)
int<lower=1,upper=NRegion> Region[Nobs]; //Plots (all observations)
int<lower=1,upper=2> tea_type_site[NSite]; //Tea type (1=Green, 2=Rooibos)
int<lower=1,upper=2> tea_type_region[NRegion]; //Tea type (1=Green, 2=Rooibos)
int<lower=0,upper=1> multobs_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multobs_lplot[NPlot]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lplot[NPlot]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lsite[NSite]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lregion[NRegion]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multplots_lobs[Nobs]; //Are plots nested in site (all obs)
int<lower=0,upper=1> multplots_lplot[NPlot]; //Are plots nested in site (no plots)
int<lower=0,upper=2> obs_envlevel[Nobs];
int<lower=0,upper=2> site_envlevel[NSite];
int<lower=0,upper=2> region_envlevel[NRegion];
vector[Nobs] traitobs; //Mass Loss
vector[NSite] temp_mean_site; //Temperature (unique regions)
vector[NSite] temp_sd_site; //Temperature SD (unique regions)
vector[NRegion] temp_mean_region; //Temperature (unique regions)
vector[NRegion] temp_sd_region; //Temperature SD (unique regions)
vector[NSite] moist_mean_site; //Temperature (unique regions)
vector[NSite] moist_sd_site; //Temperature SD (unique regions)
vector[NRegion] moist_mean_region; //Temperature (unique regions)
vector[NRegion] moist_sd_region; //Temperature SD (unique regions)
vector[NSite] SiteDays; //
vector[NSite] SiteDays_sd; //
vector[NRegion] RegionDays; //
vector[NRegion] RegionDays_sd; //
vector[Nxhat] xhat1; //Predictor variables
vector[Nxhat] xhat2; //Predictor variables
vector[Nxhat] xhat3; //Predictor variables
}
parameters {
real<lower=-3,upper=3> aMeanRegion[NRegion]; // Region effect
real<lower=-5,upper=5> ap[NPlot];
real<lower=-5,upper=5> as[NSite];
real<lower=-2,upper=2> gamma0[NTea]; // intercept of relationship between mass loss and temp change
real<lower=-2,upper=2> gamma1; // slope of temperature - loss relationship
real<lower=-2,upper=2> gamma2; // slope of soiltemp - loss relationship
real<lower=-2,upper=2> gamma3; // temperature - soiltemp interaction
real<lower=-2,upper=2> gamma4; // temperature - soiltemp interaction
real<lower=0,upper=5> sigma_overall; //Error around loss- temp relationship
real<lower=0,upper=5> sigma_plot;
real<lower=0,upper=5> sigma_site;
real<lower=0,upper=5> sigma_region;
real<lower=0,upper=5> sigma_resid;
vector[NSite] temp_pred_site;
vector[NSite] moist_pred_site;
vector[NSite] days_pred_site;
vector[NRegion] temp_pred_region;
vector[NRegion] moist_pred_region;
vector[NRegion] days_pred_region;
}
transformed parameters {
vector[Nobs] mu;
vector[Nobs] app;
vector[Nobs] ass;
vector[Nobs] arr;
for (i in 1:Nobs){
if((multobs_lobs[i]==1 && multplots_lobs[i]==1))
app[i] = ap[Plot[i]];
// set plot effects to 0 for plots that don't have multiple obs or are the only plot within a site
else app[i] = 0;
if(multsites_lobs[i] == 1)
ass[i] = as[Site[i]];
else ass[i] = 0;
if(multsites_lobs[i]==1 && obs_envlevel[i] >0)
arr[i] = 0;
else arr[i] = aMeanRegion[Region[i]];
mu[i] = app[i] + ass[i] + arr[i];;
}
//print(\"ap=\",ap[1:10],\"as=\",as[1:10],\"aMeanSite=\",aMeanSite[1:8],\"mu=\",mu[1:10])
}
model {
for (i in 1:Nobs){
traitobs[i] ~ normal(mu[i], sigma_resid);
}
//Set up plot and site random effects
for (i in 1:NPlot){
if(multobs_lplot[i]==1 && multplots_lplot[i]==1)
ap[i] ~ normal(0, sigma_plot);
}
//Bring in environmental data means and SD per region
for (i in 1:NRegion){
temp_pred_region[i] ~ normal(temp_mean_region[i], temp_sd_region[i]); //temp_mean_region and temp_sd are given as data
moist_pred_region[i] ~ normal(moist_mean_region[i], moist_sd_region[i]); //temp_mean_region and temp_sd are given as data
days_pred_region[i] ~ normal(RegionDays[i], RegionDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
for (i in 1:NSite){
temp_pred_site[i] ~ normal(temp_mean_site[i], temp_sd_site[i]); //temp_mean_region and temp_sd are given as data
moist_pred_site[i] ~ normal(moist_mean_site[i], moist_sd_site[i]); //temp_mean_region and temp_sd are given as data
days_pred_site[i] ~ normal(SiteDays[i], SiteDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
//Relationship between mass loss at the region level and temperature and soiltemp, per tea type
for (i in 1:NSite){
if(multsites_lsite[i] == 1 && site_envlevel[i] >0)
as[i] ~ normal(gamma0[tea_type_site[i]] + gamma1*temp_pred_site[i] + gamma2*moist_pred_site[i] + gamma3*temp_pred_site[i]*moist_pred_site[i] + gamma4*days_pred_site[i], sigma_overall);
else as[i] ~ normal(0, sigma_site);
}
for (i in 1:NRegion){
if(multsites_lregion[i] == 1 && region_envlevel[i] >0)
aMeanRegion[i] ~ normal(0, sigma_region);
else aMeanRegion[i] ~ normal(gamma0[tea_type_region[i]] + gamma1*temp_pred_region[i] + gamma2*moist_pred_region[i] + gamma3*temp_pred_region[i]*moist_pred_region[i] + gamma4*days_pred_region[i], sigma_overall);
}
} //Close model
generated quantities{
matrix[Nxhat,NTea] preds; //matrix of predictions
real<lower=-5,upper=5> teaDiff;
real<lower=-5,upper=5> tempDiff;
real<lower=-5,upper=5> moistDiff;
real<lower=-5,upper=5> intDiff;
real<lower=-5,upper=5> daysDiff;
real<lower=-5,upper=5> teaDiffG;
//real<lower=-5,upper=5> tempDiffG;
//real<lower=-5,upper=5> moistDiffG;
//real<lower=-5,upper=5> intDiffG;
//real<lower=-5,upper=5> daysDiffG;
real<lower=-5,upper=5> teaDiffR;
//real<lower=-5,upper=5> tempDiffR;
//real<lower=-5,upper=5> moistDiffR;
//real<lower=-5,upper=5> intDiffR;
//real<lower=-5,upper=5> daysDiffR;
for (i in 1:Nxhat){
for (j in 1:NTea){
preds[i,j] = (gamma0[j] + gamma1*xhat1[i] + gamma2*xhat2[i] + gamma3*xhat1[i]*xhat2[i] + gamma4*xhat3[i]); //predictions
}
}
teaDiff <- gamma0[1]-gamma0[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
tempDiff <- gamma1; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
moistDiff <- gamma2; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
intDiff <- gamma3; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
daysDiff <- gamma4; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
teaDiffG <- gamma0[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//tempDiffG <- gamma1[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//moistDiffG <- gamma2[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//intDiffG <- gamma3[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//daysDiffG <- gamma4[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
teaDiffR <- gamma0[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//tempDiffR <- gamma1[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//moistDiffR <- gamma2[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//intDiffR <- gamma3[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//daysDiffR <- gamma4[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
}
","scripts/users/hthomas/Tea/soiltemp_loss_4.stan")
stanc('scripts/users/hthomas/Tea/soiltemp_loss_4.stan') #check model
options(mc.cores = parallel::detectCores())
initsA <- list(ap=rep(0.6,jags.dat$NPlot), aMeanRegion=rep(0.6,jags.dat$NRegion),as=rep(0.6,jags.dat$NSite))
initsB <- list(ap=rep(0.3,jags.dat$NPlot), aMeanRegion=rep(0.3,jags.dat$NRegion),as=rep(0.3,jags.dat$NSite))
inits <- list(initsA, initsB)
fit_space <- stan(file = 'scripts/users/hthomas/Tea/soiltemp_loss_4.stan', data = jags.dat, init=inits, iter = 15000, chains = 2, thin = 1, verbose = TRUE, control=list(adapt_delta=0.99,max_treedepth = 15), algorithm = "NUTS")
s = summary(fit_space)
rownames(s$summary)
(s$summary)[202]
max(s$summary[,10],na.rm = T) # max Rhat
hist(s$summary[,"Rhat"], breaks=100)
hist(s$summary[,"n_eff"])
print(fit_space)
stan_trace(fit_space, inc_warmup = TRUE, pars = c("gamma0","gamma1"))
stan_trace(fit_space, inc_warmup = TRUE, pars = c("aMeanSite[1]","aMeanSite[2]"))
cout <- as.data.frame(s$summary)
cout$Param <- unlist(lapply(rownames(cout), function (x) {strsplit(x,split="[",fixed=T)}[[1]][1]))
cout$Number <- as.vector(sapply(strsplit(rownames(cout),"[^0-9]+",fixed=FALSE), "[", 2))
cout[cout$Rhat > 1.1 & !is.na(cout$Rhat),]
hist(cout$mean[cout$Param=="aMeanSite"])
cout[cout$Param %in% c("gamma0","gamma1","gamma2","gamma3"),] #these will tell you about the "significance" of your environmental predictors
#gamma1 = temperature, gamma2 = soiltemp, gamma3 = temp X soiltemp interaction (for each tea type)
#Compare to raw data
# plot.compare <- ddply(season_narm_r[season_narm_r$MultipleObs==1,], c("Site","Plot","PlotNum","Tea_Type"), summarise,
# rawLoss = mean(Loss))
#
# plot.compare$StanEst <- cout$mean[match(plot.compare$PlotNum, cout$Number[cout$Param=="ap"])]
# ggplot(plot.compare)+
# geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
region.compare <- ddply(season_narm_r, c("RegionNum","Tea_Type"), summarise,
rawLoss = mean(Loss))
region.compare$StanEst <- cout$mean[match(region.compare$RegionNum, cout$Number[cout$Param=="aMeanSite"])]
ggplot(region.compare)+
geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
# Graph predictions
predsout.space <- cout[cout$Param %in% c("preds"),]
predsout.space$soiltemp <- rep(jags.dat$xhat1, each=2)
predsout.space$TempBT <- predsout.space$soiltemp * soiltemp_scale_amount+ soiltemp_cent_amount
predsout.space$Moisture <- rep(jags.dat$xhat2, each=2)
predsout.space$MoistureBT <- predsout.space$Moisture * moist_x_scale_amount+ moist_x_cent_amount
predsout.space$Tea_TypeNum <- rep(c(1,2), times = (length(predsout.space$mean)/2))
predsout.space$Tea_Type <- ifelse(predsout.space$Tea_TypeNum==1,"Green","Rooibos")
save(predsout.space, file = "scripts/users/hthomas/Tea/Stan_outputs/soiltemp_moisture_preds_int_scaled_year.Rdata")
save(cout, file = "scripts/users/hthomas/Tea/Stan_outputs/soiltemp_moisture_fits_int_scaled_year.Rdata")
#Anne graph
pdf("scripts/users/hthomas/Output_Images/Tea/soiltemp_moisture_scaled_year.pdf", width = 3, height = 3)
ggplot()+
geom_ribbon(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp * soiltemp_scale_amount+soiltemp_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type):factor(Moisture)),alpha=0.2)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Green",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss),colour = "#006400",pch =16 ,alpha=0.6)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Rooibos",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss), colour = "#8B2323",pch =16 ,alpha=0.6)+
geom_line(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp* soiltemp_scale_amount+soiltemp_cent_amount,y=mean, colour=factor(Tea_Type):factor(Moisture)), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_fill_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "Moisture", labels = c("low","high"))+
labs(x = "Soil Temperature (°C)", y = "Mass Loss (%)")+
theme(legend.position = "none")
dev.off()
(soiltemp_moist_tea_int_scaled_year<-ggplot()+
geom_ribbon(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp * soiltemp_scale_amount+soiltemp_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type):factor(Moisture)),alpha=0.2)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Green",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss),colour = "#006400",pch =16 ,alpha=0.6)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Rooibos",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss), colour = "#8B2323",pch =16 ,alpha=0.6)+
geom_line(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp* soiltemp_scale_amount+soiltemp_cent_amount,y=mean, colour=factor(Tea_Type):factor(Moisture)), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_fill_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "Moisture", labels = c("low","high"))+
labs(x = "Soil Temperature (°C)", y = "Mass Loss (%)")+
theme(legend.position = "none"))
save(soiltemp_moist_tea_int_scaled_year, file = "scripts/users/hthomas/Tea/soiltemp_moist_tea_int_scaled_year.Rdata")
|
/7. New_Soil_moisture_interaction_scaled_FINAL_tea_int_septeas_year_2.R
|
no_license
|
gejielin/Tundra_teabag_experiment
|
R
| false
| false
| 31,396
|
r
|
############S#################
## TRAIT CHANGE OVER SPACE ##
############S#################
rm(list=ls())
#Detach packages####
detachAllPackages <- function() {
basic.packages <- c("package:stats","package:graphics","package:grDevices","package:utils","package:datasets","package:methods","package:base")
package.list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package.list <- setdiff(package.list,basic.packages)
if (length(package.list)>0) for (package in package.list) detach(package, character.only=TRUE)
}
detachAllPackages()
#Question 1 - Differences between teas
####Open packages####
library(raster)
library(rgdal)
library(lme4)
library(nlme)
library(stringr)
library(plyr)
library(dplyr)
library(ggplot2)
require(gridExtra)
#library(brms)
library(rstan)
library(StanHeaders)
library(MuMIn)
library(MCMCglmm)
library(postMCMCglmm)
#### CHOOSE TRAIT NAME AND CLIMATE VARIABLE HERE ----
###Read in tea
tea<-read.csv("scripts/users/hthomas/tea/combined_tea.csv", stringsAsFactors = F)
#Remove daily tea - too confusing!
tea<-subset(tea,!grepl("CG_DT_HT",tea$Plot))
#Remove sub zero plots
tea<-subset(tea,Loss>0)
tea[tea$Tea_Type=="Rooibos" & tea$Loss >0.5,]$Loss<-NA
#Make sure only using control plots
ambient<-subset(tea,Treatment=="None")
#Split into seasons to make things easier
Summer<-subset(ambient,Season=="Summer")
year<-subset(ambient,Season=="Year")
winter<-subset(ambient,Season=="Winter")
## STAN MODEL - soil temperature ----
#soil temperature#
var.list <- c("Loss", "Loss_Day", "k", "TBI_k", "TBI_S")
#Calculate mean burial length
#Get column number
i=1
var.num<-which(colnames(year)==var.list[i])
season_narm<-year %>%
filter(is.finite(year[,var.num]),is.finite(soiltemp_mean))
#Subset for tea types
#season_narm_r<-subset(season_narm,Tea_Type=="Rooibos") #AB NOTE: Keeping both tea types and including as interaction in model
season_narm_r <- season_narm #just so I don't have to rename everything
# AB: MULTIPLE OBSERVATION
season_narm_r <- ddply(season_narm_r, c("ESA_cell","Site","Plot","Tea_Type"), transform, NObsPlot = length(Loss))
season_narm_r$MultipleObs <- ifelse(season_narm_r$NObsPlot > 4, 1, 0)
# Multiple Sites
count.sites <- ddply(season_narm_r, c("ESA_cell"), summarise, n.sub = length(unique(Site)))
season_narm_r$MultipleSites <- ifelse(season_narm_r$ESA_cell %in% count.sites$ESA_cell[count.sites$n.sub > 1], 1, 0)
# Multiple plots per Site (more than 1)
count.plots <- ddply(season_narm_r, c("ESA_cell", "Site"), summarise, n.plots = length(unique(Plot)))
season_narm_r$MultiplePlots <- ifelse(season_narm_r$Site %in% count.plots$Site[count.plots$n.plots > 1], 1, 0)
#Add env.levels (alternative)
#Add env.levels (new - based on nestedness)
env.levels<- season_narm_r %>%
select(soiltemp_mean,ESA_cell,Site,Plot)
season_narm_r$envlevel_soil<-0
env.levels2<-ddply(env.levels, c("ESA_cell"), summarise, n.plots = length(unique(soiltemp_mean)))
season_narm_r$envlevel_soil <- ifelse(season_narm_r$ESA_cell %in% env.levels2$ESA_cell[env.levels2$n.plots > 1], 1, season_narm_r$envlevel_soil)
env.levels2<-ddply(env.levels, c("ESA_cell","Site"), summarise, n.plots = length(unique(soiltemp_mean)))
season_narm_r$envlevel_soil <- ifelse(season_narm_r$Site %in% env.levels2$Site[env.levels2$n.plots > 1], 2, season_narm_r$envlevel_soil)
#And for moisture
env.levels<- season_narm_r %>%
select(moisture_mean,ESA_cell,Site,Plot)
season_narm_r$envlevel_moisture<-0
env.levels2<-ddply(env.levels, c("ESA_cell"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel_moisture <- ifelse(season_narm_r$ESA_cell %in% env.levels2$ESA_cell[env.levels2$n.plots > 1], 1, season_narm_r$envlevel_moisture)
env.levels2<-ddply(env.levels, c("ESA_cell","Site"), summarise, n.plots = length(unique(moisture_mean)))
season_narm_r$envlevel_moisture <- ifelse(season_narm_r$Site %in% env.levels2$Site[env.levels2$n.plots > 1], 2, season_narm_r$envlevel_moisture)
#Now take lowest env level as overall one
season_narm_r$envlevel <- apply(season_narm_r[, 63:64], 1, max)#Add categories
#Subset so only using data I want to check model
#season_narm_r<-subset(season_narm_r, Cat == 1 | Cat == 2 | Cat == 3)
# AB: REMOVE MISSING VALUES OF SOIL soiltemp AND TEMPERATURE FOR THE soiltemp X TEMPERATURE INTERACTION MODEL
season_narm_r <- season_narm_r[!is.na(season_narm_r$soiltemp_mean) & !is.na(season_narm_r$moisture_mean),]
#Add Region numbers
season_narm_r<-season_narm_r %>%
mutate(RegionNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$RegionNum),]
#Add Site numbers
season_narm_r<-season_narm_r %>%
mutate(SiteNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Tea_Type")))
#Reorder by site number
season_narm_r<-season_narm_r[order(season_narm_r$SiteNum),]
#Add Plot numbers
season_narm_r<-season_narm_r %>%
mutate(PlotNum = group_indices_(season_narm_r, .dots=c("ESA_cell","Site","Plot","Tea_Type"))) #AB NOTE: This now includes tea type as well! So there will be a unique plot number for each tea type within a plot
#Reorder by plot number
season_narm_r<-season_narm_r[order(season_narm_r$PlotNum),]
LQ_moist<-quantile(season_narm_r$moisture_mean,0.25)
UQ_moist<-quantile(season_narm_r$moisture_mean,0.75)
mean_moist<-mean(season_narm_r$moisture_mean)
med_moist<-quantile(season_narm_r$moisture_mean,0.5)
#Centre values - AB note: Either don't name this the same thing or save the amount you center by first so we can add it to the xhats later
soiltemp_cent_amount <- attr(scale(season_narm_r$soiltemp_mean, center = TRUE, scale = TRUE), 'scaled:center')
soiltemp_scale_amount <- attr(scale(season_narm_r$soiltemp_mean, center = TRUE, scale = TRUE), 'scaled:scale')
season_narm_r$soiltemp_mean<-scale(season_narm_r$soiltemp_mean, center = TRUE, scale = TRUE)
moist_cent_amount <- attr(scale(season_narm_r$moisture_mean, center = TRUE, scale = TRUE), 'scaled:center')
moist_scale_amount <- attr(scale(season_narm_r$moisture_mean, center = TRUE, scale = TRUE), 'scaled:scale')
season_narm_r$moisture_mean<-scale(season_narm_r$moisture_mean, center = TRUE, scale = TRUE)
days_cent_amount <- attr(scale(season_narm_r$Days, center = TRUE, scale = TRUE), 'scaled:center')
days_scale_amount <- attr(scale(season_narm_r$Days, center = TRUE, scale = TRUE), 'scaled:scale')
season_narm_r$Days<-scale(season_narm_r$Days, center = TRUE, scale = TRUE)
#AB: caluclate mean and sd per site - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_sites<-season_narm_r %>%
group_by(SiteNum) %>%
summarise(soiltemp_mean_site = mean(soiltemp_mean),
soiltemp_sd_site = sd(soiltemp_mean),
moist_mean_site = mean(moisture_mean),
moist_sd_site = sd(moisture_mean))
season_narm_r$soiltemp_mean_site<-season_narm_r_sites$soiltemp_mean_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$soiltemp_sd_site<-season_narm_r_sites$soiltemp_sd_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moist_mean_site<-season_narm_r_sites$moist_mean_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$moist_sd_site<-season_narm_r_sites$moist_sd_site[match(season_narm_r$SiteNum, season_narm_r_sites$SiteNum)]
season_narm_r$soiltemp_sd_site[season_narm_r$soiltemp_sd_site==0] <- mean(season_narm_r$soiltemp_sd_site[season_narm_r$soiltemp_sd_site>0],na.rm = T)
season_narm_r$soiltemp_sd_site[is.na(season_narm_r$soiltemp_sd_site)] <- 0.001
season_narm_r$moist_sd_site[season_narm_r$moist_sd_site==0] <- mean(season_narm_r$moist_sd_site[season_narm_r$moist_sd_site>0],na.rm = T)
season_narm_r$moist_sd_site[is.na(season_narm_r$moist_sd_site)] <- 0.001
#AB: caluclate mean and sd per region - YOU CAN THINK ABOUT WHETHER YOU WANT THIS TO BE THE OVERALL MEAN OR THE MEAN OF MEANS - MEAN OF MEANS MIGHT BE BETTER IN THIS CASE
season_narm_r_regions<-season_narm_r %>%
group_by(RegionNum) %>%
summarise(soiltemp_mean_region = mean(soiltemp_mean),
soiltemp_sd_region = sd(soiltemp_mean),
moist_mean_region = mean(moisture_mean),
moist_sd_region = sd(moisture_mean))
season_narm_r$soiltemp_mean_region<-season_narm_r_regions$soiltemp_mean_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$soiltemp_sd_region<-season_narm_r_regions$soiltemp_sd_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moist_mean_region<-season_narm_r_regions$moist_mean_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$moist_sd_region<-season_narm_r_regions$moist_sd_region[match(season_narm_r$RegionNum, season_narm_r_regions$RegionNum)]
season_narm_r$soiltemp_sd_region[season_narm_r$soiltemp_sd_region==0] <- mean(season_narm_r$soiltemp_sd_region[season_narm_r$soiltemp_sd_region>0],na.rm = T)
season_narm_r$soiltemp_sd_region[is.na(season_narm_r$soiltemp_sd_region)] <- 0.001
season_narm_r$moist_sd_region[season_narm_r$moist_sd_region==0] <- mean(season_narm_r$moist_sd_region[season_narm_r$moist_sd_region>0],na.rm = T)
season_narm_r$moist_sd_region[is.na(season_narm_r$moist_sd_region)] <- 0.001
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(SiteNum) %>%
mutate(SiteDays = mean(Days),
SiteDays_sd = sd(Days))
season_narm_r$SiteDays_sd[season_narm_r$SiteDays_sd==0 | is.na(season_narm_r$SiteDays_sd)] <- 0.001
#Add mean days per region
season_narm_r<-season_narm_r %>%
group_by(RegionNum) %>%
mutate(RegionDays = mean(Days),
RegionDays_sd = sd(Days))
season_narm_r$RegionDays_sd[season_narm_r$RegionDays_sd==0 | is.na(season_narm_r$RegionDays_sd)] <- 0.001
mean_burial<-mean(season_narm_r$Days)
min_soil<-min(season_narm_r$soiltemp_mean,na.rm=TRUE)
max_soil<-max(season_narm_r$soiltemp_mean,na.rm=TRUE)
min_soiltemp<-min(season_narm_r$soiltemp_mean,na.rm=TRUE)
max_soiltemp<-max(season_narm_r$soiltemp_mean,na.rm=TRUE)
moist_x_cent_amount <- attr(scale(c(LQ_moist,UQ_moist,mean_moist,med_moist), center = TRUE, scale = TRUE), 'scaled:center')
moist_x_scale_amount <- attr(scale(c(LQ_moist,UQ_moist,mean_moist,med_moist), center = TRUE, scale = TRUE), 'scaled:scale')
xhats <- expand.grid(xhat1=seq(min_soiltemp, max_soiltemp,by=0.01), xhat2=scale(c(LQ_moist,UQ_moist,mean_moist,med_moist), center = TRUE, scale = TRUE),xhat3 = mean_burial) #AB: predicting soil soiltemp at 25% and 75% (assuming you will graph temperature as continuous) but of course you can change this to whatever you want
####Third attempt - adding temperature levels#######
jags.dat<-list(
Nobs=nrow(season_narm_r),
NSite=length(unique(season_narm_r$SiteNum)),
NRegion=length(unique(season_narm_r$RegionNum)),
NPlot=length(unique(season_narm_r$PlotNum)),
NSiteDays=length(unique(season_narm_r$SiteDays)),
NRegionDays=length(unique(season_narm_r$RegionDays)),
NTea=length(unique(season_narm_r$Tea_Type)),
Region=season_narm_r$RegionNum,
Site=season_narm_r$SiteNum,
Plot=season_narm_r$PlotNum,
SiteDays=season_narm_r$SiteDays[!duplicated(season_narm_r$SiteNum)],
SiteDays_sd=season_narm_r$SiteDays_sd[!duplicated(season_narm_r$SiteNum)],
RegionDays=season_narm_r$RegionDays[!duplicated(season_narm_r$RegionNum)],
RegionDays_sd=season_narm_r$RegionDays_sd[!duplicated(season_narm_r$RegionNum)],
Site_short=season_narm_r$SiteNum[!duplicated(season_narm_r$PlotNum)],
Plot_short=unique(season_narm_r$PlotNum),
tea_type_site=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$SiteNum)]=="Green", 1, 2),
tea_type_region=ifelse(season_narm_r$Tea_Type[!duplicated(season_narm_r$RegionNum)]=="Green", 1, 2),
multobs_lobs=season_narm_r$MultipleObs,
multobs_lplot=season_narm_r$MultipleObs[!duplicated(season_narm_r$PlotNum)],
multsites_lobs=season_narm_r$MultipleSites,
multsites_lplot=season_narm_r$MultipleSites[!duplicated(season_narm_r$PlotNum)],
multsites_lsite=season_narm_r$MultipleSites[!duplicated(season_narm_r$SiteNum)],
multsites_lregion=season_narm_r$MultipleSites[!duplicated(season_narm_r$RegionNum)],
multplots_lobs=season_narm_r$MultiplePlots,
multplots_lplot=season_narm_r$MultiplePlots[!duplicated(season_narm_r$PlotNum)],
multplots_lsite=season_narm_r$MultiplePlots[!duplicated(season_narm_r$SiteNum)],
multplots_lregion=season_narm_r$MultiplePlots[!duplicated(season_narm_r$RegionNum)],
multplots_region_lobs=season_narm_r$MultiplePlots_Region,
multplots_region_lplot=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$PlotNum)],
multplots_region_lsite=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$SiteNum)],
multplots_region_lregion=season_narm_r$MultiplePlots_Region[!duplicated(season_narm_r$RegionNum)],
traitobs=season_narm_r$Loss,
#temp_plot=as.numeric(season_narm_r[!duplicated(season_narm_r$PlotNum),]$soiltemp_mean),
#temp_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$soiltemp_mean),
temp_mean_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$soiltemp_mean_region),
temp_sd_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$soiltemp_sd_region),
temp_mean_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$soiltemp_mean_site),
temp_sd_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$soiltemp_sd_site),
moist_mean_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moist_mean_region),
moist_sd_region=as.numeric(season_narm_r[!duplicated(season_narm_r$RegionNum),]$moist_sd_region),
moist_mean_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moist_mean_site),
moist_sd_site=as.numeric(season_narm_r[!duplicated(season_narm_r$SiteNum),]$moist_sd_site),
obs_envlevel=season_narm_r$envlevel,
plot_envlevel=season_narm_r[!duplicated(season_narm_r$PlotNum),]$envlevel,
site_envlevel=season_narm_r[!duplicated(season_narm_r$SiteNum),]$envlevel,
region_envlevel=season_narm_r[!duplicated(season_narm_r$RegionNum),]$envlevel,
meanT=mean(as.numeric(season_narm_r$soiltemp_mean[!duplicated(season_narm_r$ESA_cell)])),
xhat1=xhats$xhat1,
xhat2=xhats$xhat2,
xhat3=xhats$xhat3,
Nxhat=length(xhats$xhat1)
)
str(jags.dat)
# MODEL - ANNE EDITS####
write("
data {
int<lower=0> Nobs; //Number of observations
int<lower=0> NRegion; //Number of regions
int<lower=0> NSite; //Number of sites
int<lower=0> NPlot; //Number of plots
int<lower=0> Nxhat; //No. predictor variables
int<lower=0> NTea; //No. of tea types
int<lower=0> NSiteDays; //No. of days
int<lower=0> NRegionDays; //No. of days
int<lower=1,upper=NPlot> Plot[Nobs]; //Plots (all observations)
int<lower=1,upper=NSite> Site[Nobs]; //Plots (all observations)
int<lower=1,upper=NRegion> Region[Nobs]; //Plots (all observations)
int<lower=1,upper=2> tea_type_site[NSite]; //Tea type (1=Green, 2=Rooibos)
int<lower=1,upper=2> tea_type_region[NRegion]; //Tea type (1=Green, 2=Rooibos)
int<lower=0,upper=1> multobs_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multobs_lplot[NPlot]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lobs[Nobs]; //Are sites nested in region (all obs)
int<lower=0,upper=1> multsites_lplot[NPlot]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lsite[NSite]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multsites_lregion[NRegion]; //Are sites nested in region (no. plots)
int<lower=0,upper=1> multplots_lobs[Nobs]; //Are plots nested in site (all obs)
int<lower=0,upper=1> multplots_lplot[NPlot]; //Are plots nested in site (no plots)
int<lower=0,upper=2> obs_envlevel[Nobs];
int<lower=0,upper=2> site_envlevel[NSite];
int<lower=0,upper=2> region_envlevel[NRegion];
vector[Nobs] traitobs; //Mass Loss
vector[NSite] temp_mean_site; //Temperature (unique regions)
vector[NSite] temp_sd_site; //Temperature SD (unique regions)
vector[NRegion] temp_mean_region; //Temperature (unique regions)
vector[NRegion] temp_sd_region; //Temperature SD (unique regions)
vector[NSite] moist_mean_site; //Temperature (unique regions)
vector[NSite] moist_sd_site; //Temperature SD (unique regions)
vector[NRegion] moist_mean_region; //Temperature (unique regions)
vector[NRegion] moist_sd_region; //Temperature SD (unique regions)
vector[NSite] SiteDays; //
vector[NSite] SiteDays_sd; //
vector[NRegion] RegionDays; //
vector[NRegion] RegionDays_sd; //
vector[Nxhat] xhat1; //Predictor variables
vector[Nxhat] xhat2; //Predictor variables
vector[Nxhat] xhat3; //Predictor variables
}
parameters {
real<lower=-3,upper=3> aMeanRegion[NRegion]; // Region effect
real<lower=-5,upper=5> ap[NPlot];
real<lower=-5,upper=5> as[NSite];
real<lower=-2,upper=2> gamma0[NTea]; // intercept of relationship between mass loss and temp change
real<lower=-2,upper=2> gamma1; // slope of temperature - loss relationship
real<lower=-2,upper=2> gamma2; // slope of soiltemp - loss relationship
real<lower=-2,upper=2> gamma3; // temperature - soiltemp interaction
real<lower=-2,upper=2> gamma4; // temperature - soiltemp interaction
real<lower=0,upper=5> sigma_overall; //Error around loss- temp relationship
real<lower=0,upper=5> sigma_plot;
real<lower=0,upper=5> sigma_site;
real<lower=0,upper=5> sigma_region;
real<lower=0,upper=5> sigma_resid;
vector[NSite] temp_pred_site;
vector[NSite] moist_pred_site;
vector[NSite] days_pred_site;
vector[NRegion] temp_pred_region;
vector[NRegion] moist_pred_region;
vector[NRegion] days_pred_region;
}
transformed parameters {
vector[Nobs] mu;
vector[Nobs] app;
vector[Nobs] ass;
vector[Nobs] arr;
for (i in 1:Nobs){
if((multobs_lobs[i]==1 && multplots_lobs[i]==1))
app[i] = ap[Plot[i]];
// set plot effects to 0 for plots that don't have multiple obs or are the only plot within a site
else app[i] = 0;
if(multsites_lobs[i] == 1)
ass[i] = as[Site[i]];
else ass[i] = 0;
if(multsites_lobs[i]==1 && obs_envlevel[i] >0)
arr[i] = 0;
else arr[i] = aMeanRegion[Region[i]];
mu[i] = app[i] + ass[i] + arr[i];;
}
//print(\"ap=\",ap[1:10],\"as=\",as[1:10],\"aMeanSite=\",aMeanSite[1:8],\"mu=\",mu[1:10])
}
model {
for (i in 1:Nobs){
traitobs[i] ~ normal(mu[i], sigma_resid);
}
//Set up plot and site random effects
for (i in 1:NPlot){
if(multobs_lplot[i]==1 && multplots_lplot[i]==1)
ap[i] ~ normal(0, sigma_plot);
}
//Bring in environmental data means and SD per region
for (i in 1:NRegion){
temp_pred_region[i] ~ normal(temp_mean_region[i], temp_sd_region[i]); //temp_mean_region and temp_sd are given as data
moist_pred_region[i] ~ normal(moist_mean_region[i], moist_sd_region[i]); //temp_mean_region and temp_sd are given as data
days_pred_region[i] ~ normal(RegionDays[i], RegionDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
for (i in 1:NSite){
temp_pred_site[i] ~ normal(temp_mean_site[i], temp_sd_site[i]); //temp_mean_region and temp_sd are given as data
moist_pred_site[i] ~ normal(moist_mean_site[i], moist_sd_site[i]); //temp_mean_region and temp_sd are given as data
days_pred_site[i] ~ normal(SiteDays[i], SiteDays_sd[i]); //temp_mean_region and temp_sd are given as data
}
//Relationship between mass loss at the region level and temperature and soiltemp, per tea type
for (i in 1:NSite){
if(multsites_lsite[i] == 1 && site_envlevel[i] >0)
as[i] ~ normal(gamma0[tea_type_site[i]] + gamma1*temp_pred_site[i] + gamma2*moist_pred_site[i] + gamma3*temp_pred_site[i]*moist_pred_site[i] + gamma4*days_pred_site[i], sigma_overall);
else as[i] ~ normal(0, sigma_site);
}
for (i in 1:NRegion){
if(multsites_lregion[i] == 1 && region_envlevel[i] >0)
aMeanRegion[i] ~ normal(0, sigma_region);
else aMeanRegion[i] ~ normal(gamma0[tea_type_region[i]] + gamma1*temp_pred_region[i] + gamma2*moist_pred_region[i] + gamma3*temp_pred_region[i]*moist_pred_region[i] + gamma4*days_pred_region[i], sigma_overall);
}
} //Close model
generated quantities{
matrix[Nxhat,NTea] preds; //matrix of predictions
real<lower=-5,upper=5> teaDiff;
real<lower=-5,upper=5> tempDiff;
real<lower=-5,upper=5> moistDiff;
real<lower=-5,upper=5> intDiff;
real<lower=-5,upper=5> daysDiff;
real<lower=-5,upper=5> teaDiffG;
//real<lower=-5,upper=5> tempDiffG;
//real<lower=-5,upper=5> moistDiffG;
//real<lower=-5,upper=5> intDiffG;
//real<lower=-5,upper=5> daysDiffG;
real<lower=-5,upper=5> teaDiffR;
//real<lower=-5,upper=5> tempDiffR;
//real<lower=-5,upper=5> moistDiffR;
//real<lower=-5,upper=5> intDiffR;
//real<lower=-5,upper=5> daysDiffR;
for (i in 1:Nxhat){
for (j in 1:NTea){
preds[i,j] = (gamma0[j] + gamma1*xhat1[i] + gamma2*xhat2[i] + gamma3*xhat1[i]*xhat2[i] + gamma4*xhat3[i]); //predictions
}
}
teaDiff <- gamma0[1]-gamma0[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
tempDiff <- gamma1; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
moistDiff <- gamma2; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
intDiff <- gamma3; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
daysDiff <- gamma4; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
teaDiffG <- gamma0[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//tempDiffG <- gamma1[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//moistDiffG <- gamma2[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//intDiffG <- gamma3[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//daysDiffG <- gamma4[1]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
teaDiffR <- gamma0[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//tempDiffR <- gamma1[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//moistDiffR <- gamma2[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//intDiffR <- gamma3[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
//daysDiffR <- gamma4[2]; //AB: if you want to know whether the intercepts of the tea types are significantly different, can also do this with the slopes or a prediction at a particular xhat if you want
}
","scripts/users/hthomas/Tea/soiltemp_loss_4.stan")
stanc('scripts/users/hthomas/Tea/soiltemp_loss_4.stan') #check model
options(mc.cores = parallel::detectCores())
initsA <- list(ap=rep(0.6,jags.dat$NPlot), aMeanRegion=rep(0.6,jags.dat$NRegion),as=rep(0.6,jags.dat$NSite))
initsB <- list(ap=rep(0.3,jags.dat$NPlot), aMeanRegion=rep(0.3,jags.dat$NRegion),as=rep(0.3,jags.dat$NSite))
inits <- list(initsA, initsB)
fit_space <- stan(file = 'scripts/users/hthomas/Tea/soiltemp_loss_4.stan', data = jags.dat, init=inits, iter = 15000, chains = 2, thin = 1, verbose = TRUE, control=list(adapt_delta=0.99,max_treedepth = 15), algorithm = "NUTS")
s = summary(fit_space)
rownames(s$summary)
(s$summary)[202]
max(s$summary[,10],na.rm = T) # max Rhat
hist(s$summary[,"Rhat"], breaks=100)
hist(s$summary[,"n_eff"])
print(fit_space)
stan_trace(fit_space, inc_warmup = TRUE, pars = c("gamma0","gamma1"))
stan_trace(fit_space, inc_warmup = TRUE, pars = c("aMeanSite[1]","aMeanSite[2]"))
cout <- as.data.frame(s$summary)
cout$Param <- unlist(lapply(rownames(cout), function (x) {strsplit(x,split="[",fixed=T)}[[1]][1]))
cout$Number <- as.vector(sapply(strsplit(rownames(cout),"[^0-9]+",fixed=FALSE), "[", 2))
cout[cout$Rhat > 1.1 & !is.na(cout$Rhat),]
hist(cout$mean[cout$Param=="aMeanSite"])
cout[cout$Param %in% c("gamma0","gamma1","gamma2","gamma3"),] #these will tell you about the "significance" of your environmental predictors
#gamma1 = temperature, gamma2 = soiltemp, gamma3 = temp X soiltemp interaction (for each tea type)
#Compare to raw data
# plot.compare <- ddply(season_narm_r[season_narm_r$MultipleObs==1,], c("Site","Plot","PlotNum","Tea_Type"), summarise,
# rawLoss = mean(Loss))
#
# plot.compare$StanEst <- cout$mean[match(plot.compare$PlotNum, cout$Number[cout$Param=="ap"])]
# ggplot(plot.compare)+
# geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
region.compare <- ddply(season_narm_r, c("RegionNum","Tea_Type"), summarise,
rawLoss = mean(Loss))
region.compare$StanEst <- cout$mean[match(region.compare$RegionNum, cout$Number[cout$Param=="aMeanSite"])]
ggplot(region.compare)+
geom_point(aes(x=rawLoss,y=StanEst,colour=Tea_Type))
# Graph predictions
predsout.space <- cout[cout$Param %in% c("preds"),]
predsout.space$soiltemp <- rep(jags.dat$xhat1, each=2)
predsout.space$TempBT <- predsout.space$soiltemp * soiltemp_scale_amount+ soiltemp_cent_amount
predsout.space$Moisture <- rep(jags.dat$xhat2, each=2)
predsout.space$MoistureBT <- predsout.space$Moisture * moist_x_scale_amount+ moist_x_cent_amount
predsout.space$Tea_TypeNum <- rep(c(1,2), times = (length(predsout.space$mean)/2))
predsout.space$Tea_Type <- ifelse(predsout.space$Tea_TypeNum==1,"Green","Rooibos")
save(predsout.space, file = "scripts/users/hthomas/Tea/Stan_outputs/soiltemp_moisture_preds_int_scaled_year.Rdata")
save(cout, file = "scripts/users/hthomas/Tea/Stan_outputs/soiltemp_moisture_fits_int_scaled_year.Rdata")
#Anne graph
pdf("scripts/users/hthomas/Output_Images/Tea/soiltemp_moisture_scaled_year.pdf", width = 3, height = 3)
ggplot()+
geom_ribbon(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp * soiltemp_scale_amount+soiltemp_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type):factor(Moisture)),alpha=0.2)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Green",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss),colour = "#006400",pch =16 ,alpha=0.6)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Rooibos",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss), colour = "#8B2323",pch =16 ,alpha=0.6)+
geom_line(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp* soiltemp_scale_amount+soiltemp_cent_amount,y=mean, colour=factor(Tea_Type):factor(Moisture)), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_fill_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "Moisture", labels = c("low","high"))+
labs(x = "Soil Temperature (°C)", y = "Mass Loss (%)")+
theme(legend.position = "none")
dev.off()
(soiltemp_moist_tea_int_scaled_year<-ggplot()+
geom_ribbon(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp * soiltemp_scale_amount+soiltemp_cent_amount,ymin=(`2.5%`),ymax=(`97.5%`),fill=factor(Tea_Type):factor(Moisture)),alpha=0.2)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Green",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss),colour = "#006400",pch =16 ,alpha=0.6)+
geom_point(data=season_narm_r[season_narm_r$Tea_Type=="Rooibos",],aes(x=soiltemp_mean* soiltemp_scale_amount+soiltemp_cent_amount,y=Loss), colour = "#8B2323",pch =16 ,alpha=0.6)+
geom_line(data=predsout.space[predsout.space$MoistureBT==LQ_moist | predsout.space$MoistureBT==UQ_moist,],aes(x=soiltemp* soiltemp_scale_amount+soiltemp_cent_amount,y=mean, colour=factor(Tea_Type):factor(Moisture)), alpha=0.8, lwd = 1.5)+
theme_classic()+
coord_cartesian(y = c(0,1))+
scale_colour_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_fill_manual(values = c("#00b100","#003100","#c83232","#4e1414"), name = "Tea Type")+
scale_linetype_manual(values = c("dashed","solid"), name = "Moisture", labels = c("low","high"))+
labs(x = "Soil Temperature (°C)", y = "Mass Loss (%)")+
theme(legend.position = "none"))
save(soiltemp_moist_tea_int_scaled_year, file = "scripts/users/hthomas/Tea/soiltemp_moist_tea_int_scaled_year.Rdata")
|
# not a real question,not constructive,off topic,open,too localized
# 38522,20897,20847,89176,8910
sample.priors <- c(0.21598860680003587,0.11716717502467032,0.11688683053736432,0.5,0.049957387637929486)
|
/sample-priors.R
|
no_license
|
dsmvwld/Kaggle-StackOverflow
|
R
| false
| false
| 204
|
r
|
# not a real question,not constructive,off topic,open,too localized
# 38522,20897,20847,89176,8910
sample.priors <- c(0.21598860680003587,0.11716717502467032,0.11688683053736432,0.5,0.049957387637929486)
|
#caveendish measurements on density of earth(1798)
cavendish<-c(5.5,5.57,5.42,5.61,5.53,5.47,4.88,5.62,5.63,4.07,
5.29,5.34,5.26,5.44,5.46,5.55,5.34,5.3,5.36,5.79,5.75,5.29,5.1,
5.86,5.58,5.27,5.85,5.65,5.39)
#empirical distribution function ecdf()
plot(ecdf(cavendish),xlab="density of the earth",ylab="cummulative curve",main="")
#binomial distribution
#dbinom(x,n,p)
barplot(pbinom(0:4,4,0.1),names=as.character(0:4),xlab="x",ylab="F(x)")
barplot(dbinom(0:4,4,0.1),names=as.character(0:4),xlab="x",ylab="f(x)")
#geometric
fx<-dgeom(0:20,0.2)
barplot(fx,names=as.character(0:20),xlab="x",ylab="f(x)")
table(rgeom(100,0.1))
#hypergeometric
#dhyper(x,K,N-K,n)
barplot(dhyper(0:5,6,14,5),names=as.character(0:5))
dhyper(5,6,14,5)
#multinomial 3 mutually exclusive outcomes from n independent trials
# p1 is the probability of type a outcome
#p2 is the probablity of type b outcome
multi<-function(n,a,b,p1,p2){
factorial(n)/(factorial(a)*factorial(b)*factorial(n-a-b))*p1^a*p2^b*(1-p1-p2)^(n-a-b)}
#suppose n=24,a=4,b=0:20,pl=0.2,p2=.25
psuc<-numeric(21)
#for (i in 0:20) psuc[i+1]<-multi(24,4,i,0.2,0.25)
psuc=multi(24,4,0:20,0.2,0.25)
barplot(psuc,names=as.character(0:20))
negbin<-function(x,u,k)
(1+u/k)^(-k)*(u/(u+k))^x*gamma(k+x)/(factorial(x)*gamma(k))
xf<-sapply(0:10,function(i) negbin(i,0.8,0.2))
xf
barplot(xf,names=as.character(0:10))
#dnbinom(x,size=r,p)
plot(5:100,dnbinom(5:100,5,0.1),type="s",xlab="x",ylab="f(x)")
count<-rnbinom(100,1,0.6)
table(count)
mean(count)
k<-mean(count)^2/(var(count)-mean(count))
k
x<-0:12
freq<-c(131,55,21,14,6,6,2,0,0,0,0,2,1)
barplot(freq,names=as.character(x))
y<-rep(x,freq)
sum(freq)
length(y)
|
/Fall2013/programming/empirical11-26-2013.R
|
no_license
|
NanaAkwasiAbayieBoateng/MemphisClasses
|
R
| false
| false
| 1,759
|
r
|
#caveendish measurements on density of earth(1798)
cavendish<-c(5.5,5.57,5.42,5.61,5.53,5.47,4.88,5.62,5.63,4.07,
5.29,5.34,5.26,5.44,5.46,5.55,5.34,5.3,5.36,5.79,5.75,5.29,5.1,
5.86,5.58,5.27,5.85,5.65,5.39)
#empirical distribution function ecdf()
plot(ecdf(cavendish),xlab="density of the earth",ylab="cummulative curve",main="")
#binomial distribution
#dbinom(x,n,p)
barplot(pbinom(0:4,4,0.1),names=as.character(0:4),xlab="x",ylab="F(x)")
barplot(dbinom(0:4,4,0.1),names=as.character(0:4),xlab="x",ylab="f(x)")
#geometric
fx<-dgeom(0:20,0.2)
barplot(fx,names=as.character(0:20),xlab="x",ylab="f(x)")
table(rgeom(100,0.1))
#hypergeometric
#dhyper(x,K,N-K,n)
barplot(dhyper(0:5,6,14,5),names=as.character(0:5))
dhyper(5,6,14,5)
#multinomial 3 mutually exclusive outcomes from n independent trials
# p1 is the probability of type a outcome
#p2 is the probablity of type b outcome
multi<-function(n,a,b,p1,p2){
factorial(n)/(factorial(a)*factorial(b)*factorial(n-a-b))*p1^a*p2^b*(1-p1-p2)^(n-a-b)}
#suppose n=24,a=4,b=0:20,pl=0.2,p2=.25
psuc<-numeric(21)
#for (i in 0:20) psuc[i+1]<-multi(24,4,i,0.2,0.25)
psuc=multi(24,4,0:20,0.2,0.25)
barplot(psuc,names=as.character(0:20))
negbin<-function(x,u,k)
(1+u/k)^(-k)*(u/(u+k))^x*gamma(k+x)/(factorial(x)*gamma(k))
xf<-sapply(0:10,function(i) negbin(i,0.8,0.2))
xf
barplot(xf,names=as.character(0:10))
#dnbinom(x,size=r,p)
plot(5:100,dnbinom(5:100,5,0.1),type="s",xlab="x",ylab="f(x)")
count<-rnbinom(100,1,0.6)
table(count)
mean(count)
k<-mean(count)^2/(var(count)-mean(count))
k
x<-0:12
freq<-c(131,55,21,14,6,6,2,0,0,0,0,2,1)
barplot(freq,names=as.character(x))
y<-rep(x,freq)
sum(freq)
length(y)
|
## While using UCI HAR dataset, I acknowledge the following publication:
##
## Davide Anguita, Alessandro Ghio, Luca Oneto, Xavier Parra
## and Jorge L. Reyes-Ortiz. Human Activity Recognition on Smartphones
## using a Multiclass Hardware-Friendly Support Vector Machine.
## International Workshop of Ambient Assisted Living (IWAAL 2012).
## Vitoria-Gasteiz, Spain. Dec 2012
# Prep Step: Download and unzip the files
# Make sure that the directory where the data is to be stored exist
if(!file.exists("./data")){dir.create("./data")}
# Create a URL named vector with the URL address
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# Set download path directory
dwnld_path <- "./data/UCI_HAR_Dataset.zip"
# Download file
download.file(URL, destfile=dwnld_path, method="curl")
# Unzip file
unzip(zipfile=dwnld_path, exdir="./data")
# 2nd Prep Step: See what files the dataset contains
# Define the path where the new folder has been unziped
pathdata = file.path("./data", "UCI HAR Dataset")
# Create a file vector which has all the file names
files = list.files(pathdata, recursive=TRUE)
# The dataset contains a "Inertial Signals" folder both in train and test that
# is of no interest to us.
# README.txt contains info about the experiments and
# feature_info.txt explains what the features name and variables mean.
# We can categorize the data files into four segments:
# training set / test set / features / activity
# Those are of our interest!
# 1st Step: Read files and create tables
# Read training data
x_train = read.table(file.path(pathdata, "train", "X_train.txt"),
header = FALSE)
y_train = read.table(file.path(pathdata, "train", "y_train.txt"),
header = FALSE)
subject_train = read.table(file.path(pathdata, "train", "subject_train.txt"),
header = FALSE)
# Reading testing data
x_test = read.table(file.path(pathdata, "test", "X_test.txt"),
header = FALSE)
y_test = read.table(file.path(pathdata, "test", "y_test.txt"),
header = FALSE)
subject_test = read.table(file.path(pathdata, "test", "subject_test.txt"),
header = FALSE)
# Read the features data
features = read.table(file.path(pathdata, "features.txt"),
header = FALSE)
# Read activity labels data
activity_labels = read.table(file.path(pathdata, "activity_labels.txt"),
header = FALSE)
# 2nd Step: concatenate rows of data to have feature, activity and subject tables
subject_table <- rbind(subject_train, subject_test)
activity_table<- rbind(y_train, y_test)
features_table<- rbind(x_train, x_test)
# 3rd Step: set the name of the columns of the tables
colnames(subject_table) <- "subjectID"
colnames(activity_table) <- "activityID"
colnames(activity_labels) <- c("activityID","activityType")
colnames(features_table) <- features[,2]
# 4th Step: merge the tables
full_data <- cbind(subject_table, activity_table, features_table)
# 5th Step: Extract only the measurements on the mean
# and standard deviation for each measurement
subset_features <- features[,2][grep("mean\\(\\)|std\\(\\)",
features[,2])]
selected_names<-c("subjectID", "activityID", as.character(subset_features))
subset_data <- subset(full_data, select=selected_names)
# 6th Step: Use descriptive activity names to name
# the activities in the data set
activityID <- activity_labels[,1]
activityType <- activity_labels[,2]
subset_data$activityID <- plyr::mapvalues(subset_data$activityID,
from=activityID, to=activityType)
colnames(subset_data)[colnames(subset_data) == "activityID"] <- "activityType"
# 7th Step: Appropriately labels the data set with descriptive variable names
# Names of Feteatures will labelled using descriptive variable names.
#
# prefix 't' is replaced by 'time'
# 'Acc' is replaced by 'Accelerometer'
# 'Gyro' is replaced by 'Gyroscope'
# prefix 'f' is replaced by 'frequency'
# 'Mag' is replaced by 'Magnitude'
# 'BodyBody' is replaced by 'Body'
colnames(subset_data) <- gsub("^t", "time", colnames(subset_data))
colnames(subset_data) <- gsub("^f", "frequency", colnames(subset_data))
colnames(subset_data) <- gsub("Acc", "Accelerometer", colnames(subset_data))
colnames(subset_data) <- gsub("Gyro", "Gyroscope", colnames(subset_data))
colnames(subset_data) <- gsub("Mag", "Magnitude", colnames(subset_data))
colnames(subset_data) <- gsub("BodyBody", "Body", colnames(subset_data))
# 8th Step: Create a second,independent tidy data set and ouput it
# Create tidy data set with the average of each variable for each activity
# and each subject
tidy_data <- aggregate(. ~subjectID + activityType, subset_data, mean)
# Order the tidy data by subject and then by activity type
tidy_data <- tidy_data[order(tidy_data$subjectID,tidy_data$activityType),]
# Write the ouput to a text file
write.table(tidy_data, file = "data/tidy_data.txt",row.name=FALSE)
|
/Getting and Cleaning Data - Course Project/run_analysis.R
|
no_license
|
JAMorello/datasciencecoursera
|
R
| false
| false
| 5,290
|
r
|
## While using UCI HAR dataset, I acknowledge the following publication:
##
## Davide Anguita, Alessandro Ghio, Luca Oneto, Xavier Parra
## and Jorge L. Reyes-Ortiz. Human Activity Recognition on Smartphones
## using a Multiclass Hardware-Friendly Support Vector Machine.
## International Workshop of Ambient Assisted Living (IWAAL 2012).
## Vitoria-Gasteiz, Spain. Dec 2012
# Prep Step: Download and unzip the files
# Make sure that the directory where the data is to be stored exist
if(!file.exists("./data")){dir.create("./data")}
# Create a URL named vector with the URL address
URL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# Set download path directory
dwnld_path <- "./data/UCI_HAR_Dataset.zip"
# Download file
download.file(URL, destfile=dwnld_path, method="curl")
# Unzip file
unzip(zipfile=dwnld_path, exdir="./data")
# 2nd Prep Step: See what files the dataset contains
# Define the path where the new folder has been unziped
pathdata = file.path("./data", "UCI HAR Dataset")
# Create a file vector which has all the file names
files = list.files(pathdata, recursive=TRUE)
# The dataset contains a "Inertial Signals" folder both in train and test that
# is of no interest to us.
# README.txt contains info about the experiments and
# feature_info.txt explains what the features name and variables mean.
# We can categorize the data files into four segments:
# training set / test set / features / activity
# Those are of our interest!
# 1st Step: Read files and create tables
# Read training data
x_train = read.table(file.path(pathdata, "train", "X_train.txt"),
header = FALSE)
y_train = read.table(file.path(pathdata, "train", "y_train.txt"),
header = FALSE)
subject_train = read.table(file.path(pathdata, "train", "subject_train.txt"),
header = FALSE)
# Reading testing data
x_test = read.table(file.path(pathdata, "test", "X_test.txt"),
header = FALSE)
y_test = read.table(file.path(pathdata, "test", "y_test.txt"),
header = FALSE)
subject_test = read.table(file.path(pathdata, "test", "subject_test.txt"),
header = FALSE)
# Read the features data
features = read.table(file.path(pathdata, "features.txt"),
header = FALSE)
# Read activity labels data
activity_labels = read.table(file.path(pathdata, "activity_labels.txt"),
header = FALSE)
# 2nd Step: concatenate rows of data to have feature, activity and subject tables
subject_table <- rbind(subject_train, subject_test)
activity_table<- rbind(y_train, y_test)
features_table<- rbind(x_train, x_test)
# 3rd Step: set the name of the columns of the tables
colnames(subject_table) <- "subjectID"
colnames(activity_table) <- "activityID"
colnames(activity_labels) <- c("activityID","activityType")
colnames(features_table) <- features[,2]
# 4th Step: merge the tables
full_data <- cbind(subject_table, activity_table, features_table)
# 5th Step: Extract only the measurements on the mean
# and standard deviation for each measurement
subset_features <- features[,2][grep("mean\\(\\)|std\\(\\)",
features[,2])]
selected_names<-c("subjectID", "activityID", as.character(subset_features))
subset_data <- subset(full_data, select=selected_names)
# 6th Step: Use descriptive activity names to name
# the activities in the data set
activityID <- activity_labels[,1]
activityType <- activity_labels[,2]
subset_data$activityID <- plyr::mapvalues(subset_data$activityID,
from=activityID, to=activityType)
colnames(subset_data)[colnames(subset_data) == "activityID"] <- "activityType"
# 7th Step: Appropriately labels the data set with descriptive variable names
# Names of Feteatures will labelled using descriptive variable names.
#
# prefix 't' is replaced by 'time'
# 'Acc' is replaced by 'Accelerometer'
# 'Gyro' is replaced by 'Gyroscope'
# prefix 'f' is replaced by 'frequency'
# 'Mag' is replaced by 'Magnitude'
# 'BodyBody' is replaced by 'Body'
colnames(subset_data) <- gsub("^t", "time", colnames(subset_data))
colnames(subset_data) <- gsub("^f", "frequency", colnames(subset_data))
colnames(subset_data) <- gsub("Acc", "Accelerometer", colnames(subset_data))
colnames(subset_data) <- gsub("Gyro", "Gyroscope", colnames(subset_data))
colnames(subset_data) <- gsub("Mag", "Magnitude", colnames(subset_data))
colnames(subset_data) <- gsub("BodyBody", "Body", colnames(subset_data))
# 8th Step: Create a second,independent tidy data set and ouput it
# Create tidy data set with the average of each variable for each activity
# and each subject
tidy_data <- aggregate(. ~subjectID + activityType, subset_data, mean)
# Order the tidy data by subject and then by activity type
tidy_data <- tidy_data[order(tidy_data$subjectID,tidy_data$activityType),]
# Write the ouput to a text file
write.table(tidy_data, file = "data/tidy_data.txt",row.name=FALSE)
|
#. Recognize handwritten digits using Random Forests
#. Author: @ dspk
#. IMPORTANT USAGE INSTRUCTIONS:
#. Get the data for the MNIST database of handwritten digits
#. Arrange the data in a list named Usedat with two fields:
#. X : a matrix of all the images so that a single row contains a single image
#. y : the labels of the images
setwd("SET CORRECT WORKING DIRECTORY")
#. A single row denotes a single training example for a handwritten digit in the matrix X
#. The digit '0' is labeled as '10'
#. Construct matrices X and y and separate data into training and test sets
X.matrix = as.matrix(Usedat$X)
y.matrix = as.factor(as.matrix(Usedat$y))
newdat3 = data.frame(X.matrix, y.matrix)
table(y.matrix) # shows how the y classes are ordered
#. Divide the data into test and training sets
set.seed(123)
for(i in 1:dim(table(newdat3$y.matrix))) {
leveldat = which(newdat3$y.matrix == names(table(newdat3$y.matrix)[i]))
# use 2/3 of each one of leveldat for training, 1/3 for test
traindat = sample(leveldat,floor(length(leveldat)*2/3))
if(i==1){
train = traindat
}
else{
train = c(train, traindat)
}
}
train.set = newdat3[train,]
test.set = newdat3[-train,]
require(randomForest)
forest.digit1 = randomForest(y.matrix ~., data=train.set, prox=TRUE)
print(forest.digit1) #. printing Confusion matrix, no. of variables randomly chosen at each split, OOB estimate of error
forest.digit1$ntree #. printing number of trees grown
forest.digit1$err.rate[50] #. printing OOB error rate for all trees up to the 50th tree
#. Obtain individual trees
getTree(forest.digit1, k=2) #. second tree
#. Predict model accuracy and print confusion matrix for test data error
predict.digit = predict(forest.digit1, test.set, type="class") #. class label outputs
predict.digit.prob = predict(forest.digit1, test.set) #. probability outputs
loss.matrix.test = table("predicted class" = predict.digit, "actual class"= test.set$y.matrix) #. confusion matrix test data(predicted vs observed)
error.test = 1.0 - (sum(diag(loss.matrix.test)))/sum(loss.matrix.test)
print(error.test) #. 9% error
accuracy.test = (sum(diag(loss.matrix.test)))/sum(loss.matrix.test)
print(accuracy.test) #. 91% accuracy on testing data
library(e1071)
classAgreement(loss.matrix.test) #. print classification agreement coefficients
#. Tune the model parameters - here, tune mintry - number of variables randomly chosen at each split
oob.error = rep(0, 100)
test.error = rep(0, 100)
for (i in 1:100) {
fit = randomForest(y.matrix ~., data=train.set, mtry = i, ntree = 50)
oob.error[i] = fit$err.rate[50]
predict.test = predict(fit, test.set, type="class")
test.error[i] = 1 - mean(predict.test == test.set$y.matrix)
}
#. Print and plot errors
print(oob.error)
print(test.error)
plot(seq(1,100), oob.error, ylim=c(0.05, 0.3), col="blue", type="o", pch=19, xlab='')
points(test.error,col="red", type="o", pch=19)
legend("topright", c("oob.error","test.error"),lwd=rep(2,3), col=c('blue','red'))
#. Collect all the examples in the test set which are classified correctly and misclassified (i.e. all the errors)
#. we're going to plot these examples and compare them
example.misclass = test.set[which(predict.digit != test.set[, 401]), ] # from the test set 142 examples were misclassified
example.right = test.set[which(predict.digit == test.set[, 401]), ] # the remaining 1528 examples were correctly classified
#. Print a sample of 9 correctly classified test examples
#. The rf model was able to correctly classify some rather difficult examples
color_spec = colorRampPalette(colors = c('grey', 'black'))
jpeg("RF_correctlyclassifieddigits.jpg")
rownum_increment = 0
row_numbers = c(10, 200, 350, 525, 650, 825, 955, 1125, 1260, 1400) + rownum_increment
par(mfrow=c(4,3),pty='s',mar=c(1,1,1,1),xaxt='n',yaxt='n')
for(i in row_numbers){
z = array(as.vector(as.matrix(example.right[i, -401])), dim=c(20,20))
z = t(z[20:1, ])
image(1:20,1:20,z,main=example.right[i, 401], col=color_spec(256))
}
dev.off()
#. Print a sample of those examples which were misclassified
jpeg("RF_Misclassifieddigits.jpg")
rownum_increment = 0
row_numbers = c(1, 10, 15, 30, 45, 60, 75, 90, 105, 120, 135)
par(mfrow=c(4,3),pty='s',mar=c(1,1,1,1),xaxt='n',yaxt='n')
for(i in row_numbers){
z = array(as.vector(as.matrix(example.misclass[i, -401])), dim=c(20,20))
z = t(z[20:1, ])
image(1:20,1:20,z,main=example.misclass[i, 401], col=color_spec(256))
}
dev.off()
|
/RandomForestDigitRec.R
|
no_license
|
dspk/Rcode
|
R
| false
| false
| 4,449
|
r
|
#. Recognize handwritten digits using Random Forests
#. Author: @ dspk
#. IMPORTANT USAGE INSTRUCTIONS:
#. Get the data for the MNIST database of handwritten digits
#. Arrange the data in a list named Usedat with two fields:
#. X : a matrix of all the images so that a single row contains a single image
#. y : the labels of the images
setwd("SET CORRECT WORKING DIRECTORY")
#. A single row denotes a single training example for a handwritten digit in the matrix X
#. The digit '0' is labeled as '10'
#. Construct matrices X and y and separate data into training and test sets
X.matrix = as.matrix(Usedat$X)
y.matrix = as.factor(as.matrix(Usedat$y))
newdat3 = data.frame(X.matrix, y.matrix)
table(y.matrix) # shows how the y classes are ordered
#. Divide the data into test and training sets
set.seed(123)
for(i in 1:dim(table(newdat3$y.matrix))) {
leveldat = which(newdat3$y.matrix == names(table(newdat3$y.matrix)[i]))
# use 2/3 of each one of leveldat for training, 1/3 for test
traindat = sample(leveldat,floor(length(leveldat)*2/3))
if(i==1){
train = traindat
}
else{
train = c(train, traindat)
}
}
train.set = newdat3[train,]
test.set = newdat3[-train,]
require(randomForest)
forest.digit1 = randomForest(y.matrix ~., data=train.set, prox=TRUE)
print(forest.digit1) #. printing Confusion matrix, no. of variables randomly chosen at each split, OOB estimate of error
forest.digit1$ntree #. printing number of trees grown
forest.digit1$err.rate[50] #. printing OOB error rate for all trees up to the 50th tree
#. Obtain individual trees
getTree(forest.digit1, k=2) #. second tree
#. Predict model accuracy and print confusion matrix for test data error
predict.digit = predict(forest.digit1, test.set, type="class") #. class label outputs
predict.digit.prob = predict(forest.digit1, test.set) #. probability outputs
loss.matrix.test = table("predicted class" = predict.digit, "actual class"= test.set$y.matrix) #. confusion matrix test data(predicted vs observed)
error.test = 1.0 - (sum(diag(loss.matrix.test)))/sum(loss.matrix.test)
print(error.test) #. 9% error
accuracy.test = (sum(diag(loss.matrix.test)))/sum(loss.matrix.test)
print(accuracy.test) #. 91% accuracy on testing data
library(e1071)
classAgreement(loss.matrix.test) #. print classification agreement coefficients
#. Tune the model parameters - here, tune mintry - number of variables randomly chosen at each split
oob.error = rep(0, 100)
test.error = rep(0, 100)
for (i in 1:100) {
fit = randomForest(y.matrix ~., data=train.set, mtry = i, ntree = 50)
oob.error[i] = fit$err.rate[50]
predict.test = predict(fit, test.set, type="class")
test.error[i] = 1 - mean(predict.test == test.set$y.matrix)
}
#. Print and plot errors
print(oob.error)
print(test.error)
plot(seq(1,100), oob.error, ylim=c(0.05, 0.3), col="blue", type="o", pch=19, xlab='')
points(test.error,col="red", type="o", pch=19)
legend("topright", c("oob.error","test.error"),lwd=rep(2,3), col=c('blue','red'))
#. Collect all the examples in the test set which are classified correctly and misclassified (i.e. all the errors)
#. we're going to plot these examples and compare them
example.misclass = test.set[which(predict.digit != test.set[, 401]), ] # from the test set 142 examples were misclassified
example.right = test.set[which(predict.digit == test.set[, 401]), ] # the remaining 1528 examples were correctly classified
#. Print a sample of 9 correctly classified test examples
#. The rf model was able to correctly classify some rather difficult examples
color_spec = colorRampPalette(colors = c('grey', 'black'))
jpeg("RF_correctlyclassifieddigits.jpg")
rownum_increment = 0
row_numbers = c(10, 200, 350, 525, 650, 825, 955, 1125, 1260, 1400) + rownum_increment
par(mfrow=c(4,3),pty='s',mar=c(1,1,1,1),xaxt='n',yaxt='n')
for(i in row_numbers){
z = array(as.vector(as.matrix(example.right[i, -401])), dim=c(20,20))
z = t(z[20:1, ])
image(1:20,1:20,z,main=example.right[i, 401], col=color_spec(256))
}
dev.off()
#. Print a sample of those examples which were misclassified
jpeg("RF_Misclassifieddigits.jpg")
rownum_increment = 0
row_numbers = c(1, 10, 15, 30, 45, 60, 75, 90, 105, 120, 135)
par(mfrow=c(4,3),pty='s',mar=c(1,1,1,1),xaxt='n',yaxt='n')
for(i in row_numbers){
z = array(as.vector(as.matrix(example.misclass[i, -401])), dim=c(20,20))
z = t(z[20:1, ])
image(1:20,1:20,z,main=example.misclass[i, 401], col=color_spec(256))
}
dev.off()
|
<<<<<<< HEAD
####Written by: AM Cook
####Purpose: to find the first two moments of a distrbution from quantile breaks (defined by the upper and lower percent)
####Date: February 10, 2012
####Note: Gamma distribution returns shape and scale rather than rate (as per winbugs distribution) for use in r rate=1/scale
find.moments <- function(lo=0.1,up=0.8,l.perc=0.025,u.perc=0.975,dist=c('norm','lnorm','weibull','gamma','inv.gamma'),plot=T) {
if(dist=='norm') {
sig2 = ((up-lo)/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (lo*qnorm(u.perc)-up*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='lnorm') {
sig2 = ((log(up)-log(lo))/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (log(lo)*qnorm(u.perc)-log(up)*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rlnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='weibull') {
gam = (log(-log(1-u.perc))-log(-log(1-l.perc)))/(log(up)-log(lo))
beta = lo/(-log(1-l.perc))^(1/gam)
if(plot) {
plot(density(rweibull(10000,gam,beta)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=gam,beta=beta))
}
if(dist=='gamma') {
a=mean(c(lo,up))
alpha <- function(a) {
b <- qgamma(u.perc,a,1) / qgamma(l.perc,a,1)
o = (b-(up/lo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- lo/qgamma(l.perc,alp,1)
if(plot) {
plot(density(rgamma(10000,alp,scale=en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
if(dist=='inv.gamma') {
a=mean(c(1/lo,1/up))
ilo<-1/lo
iup<-1/up
alpha <- function(a) {
b <- qgamma((1-u.perc),a,1) / qgamma((1-l.perc),a,1)
o = (b-(iup/ilo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- ilo/(1/qgamma((1-l.perc),alp,1))
if(plot) {
plot(density(1/rgamma(10000,alp,scale=1/en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
}
=======
####Written by: AM Cook
####Purpose: to find the first two moments of a distrbution from quantile breaks (defined by the upper and lower percent)
####Date: February 10, 2012
####Note: Gamma distribution returns shape and scale rather than rate (as per winbugs distribution) for use in r rate=1/scale
find.moments <- function(lo=0.1,up=0.8,l.perc=0.025,u.perc=0.975,dist=c('norm','lnorm','weibull','gamma','inv.gamma'),plot=T) {
if(dist=='norm') {
sig2 = ((up-lo)/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (lo*qnorm(u.perc)-up*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='lnorm') {
sig2 = ((log(up)-log(lo))/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (log(lo)*qnorm(u.perc)-log(up)*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rlnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='weibull') {
gam = (log(-log(1-u.perc))-log(-log(1-l.perc)))/(log(up)-log(lo))
beta = lo/(-log(1-l.perc))^(1/gam)
if(plot) {
plot(density(rweibull(10000,gam,beta)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=gam,beta=beta))
}
if(dist=='gamma') {
a=mean(c(lo,up))
alpha <- function(a) {
b <- qgamma(u.perc,a,1) / qgamma(l.perc,a,1)
o = (b-(up/lo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- lo/qgamma(l.perc,alp,1)
if(plot) {
plot(density(rgamma(10000,alp,scale=en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
if(dist=='inv.gamma') {
a=mean(c(1/lo,1/up))
ilo<-1/lo
iup<-1/up
alpha <- function(a) {
b <- qgamma((1-u.perc),a,1) / qgamma((1-l.perc),a,1)
o = (b-(iup/ilo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- ilo/(1/qgamma((1-l.perc),alp,1))
if(plot) {
plot(density(1/rgamma(10000,alp,scale=1/en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
}
>>>>>>> origin/utilitycleanup
|
/silverhake/src/_Rfunctions/find.moments.r
|
no_license
|
jgmunden/ecomod
|
R
| false
| false
| 4,695
|
r
|
<<<<<<< HEAD
####Written by: AM Cook
####Purpose: to find the first two moments of a distrbution from quantile breaks (defined by the upper and lower percent)
####Date: February 10, 2012
####Note: Gamma distribution returns shape and scale rather than rate (as per winbugs distribution) for use in r rate=1/scale
find.moments <- function(lo=0.1,up=0.8,l.perc=0.025,u.perc=0.975,dist=c('norm','lnorm','weibull','gamma','inv.gamma'),plot=T) {
if(dist=='norm') {
sig2 = ((up-lo)/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (lo*qnorm(u.perc)-up*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='lnorm') {
sig2 = ((log(up)-log(lo))/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (log(lo)*qnorm(u.perc)-log(up)*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rlnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='weibull') {
gam = (log(-log(1-u.perc))-log(-log(1-l.perc)))/(log(up)-log(lo))
beta = lo/(-log(1-l.perc))^(1/gam)
if(plot) {
plot(density(rweibull(10000,gam,beta)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=gam,beta=beta))
}
if(dist=='gamma') {
a=mean(c(lo,up))
alpha <- function(a) {
b <- qgamma(u.perc,a,1) / qgamma(l.perc,a,1)
o = (b-(up/lo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- lo/qgamma(l.perc,alp,1)
if(plot) {
plot(density(rgamma(10000,alp,scale=en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
if(dist=='inv.gamma') {
a=mean(c(1/lo,1/up))
ilo<-1/lo
iup<-1/up
alpha <- function(a) {
b <- qgamma((1-u.perc),a,1) / qgamma((1-l.perc),a,1)
o = (b-(iup/ilo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- ilo/(1/qgamma((1-l.perc),alp,1))
if(plot) {
plot(density(1/rgamma(10000,alp,scale=1/en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
}
=======
####Written by: AM Cook
####Purpose: to find the first two moments of a distrbution from quantile breaks (defined by the upper and lower percent)
####Date: February 10, 2012
####Note: Gamma distribution returns shape and scale rather than rate (as per winbugs distribution) for use in r rate=1/scale
find.moments <- function(lo=0.1,up=0.8,l.perc=0.025,u.perc=0.975,dist=c('norm','lnorm','weibull','gamma','inv.gamma'),plot=T) {
if(dist=='norm') {
sig2 = ((up-lo)/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (lo*qnorm(u.perc)-up*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='lnorm') {
sig2 = ((log(up)-log(lo))/(qnorm(u.perc)-qnorm(l.perc)))^2
xbar = (log(lo)*qnorm(u.perc)-log(up)*qnorm(l.perc))/(qnorm(u.perc)-qnorm(l.perc))
if(plot) {
plot(density(rlnorm(10000,xbar,sqrt(sig2))),main='')
abline(v=lo)
abline(v=up)
}
return(list(xbar=xbar,sig2=sig2))
}
if(dist=='weibull') {
gam = (log(-log(1-u.perc))-log(-log(1-l.perc)))/(log(up)-log(lo))
beta = lo/(-log(1-l.perc))^(1/gam)
if(plot) {
plot(density(rweibull(10000,gam,beta)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=gam,beta=beta))
}
if(dist=='gamma') {
a=mean(c(lo,up))
alpha <- function(a) {
b <- qgamma(u.perc,a,1) / qgamma(l.perc,a,1)
o = (b-(up/lo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- lo/qgamma(l.perc,alp,1)
if(plot) {
plot(density(rgamma(10000,alp,scale=en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
if(dist=='inv.gamma') {
a=mean(c(1/lo,1/up))
ilo<-1/lo
iup<-1/up
alpha <- function(a) {
b <- qgamma((1-u.perc),a,1) / qgamma((1-l.perc),a,1)
o = (b-(iup/ilo))^2
return(list(a=a,o=o))
}
op<-function(a) alpha(a)$o
ot <- optimize(f=op,interval=c(0,up))
alp <- ot$minimum
en <- ilo/(1/qgamma((1-l.perc),alp,1))
if(plot) {
plot(density(1/rgamma(10000,alp,scale=1/en)),main='')
abline(v=lo)
abline(v=up)
}
return(list(shape=alp,scale=en))
}
}
>>>>>>> origin/utilitycleanup
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/future.batchtools-package.R
\docType{package}
\name{future.batchtools}
\alias{future.batchtools}
\alias{future.batchtools-package}
\title{future.batchtools: A Future for batchtools}
\description{
The \pkg{future.batchtools} package implements the Future API
on top of \pkg{batchtools} such that futures can be resolved
on for instance high-performance compute (HPC) clusters via
job schedulers.
The Future API is defined by the \pkg{future} package.
}
\details{
To use batchtools futures, load \pkg{future.batchtools}, and
select the type of future you wish to use via
\code{\link[future:plan]{plan}()}.
}
\examples{
\donttest{
plan(batchtools_local)
demo("mandelbrot", package = "future", ask = FALSE)
}
## Use local batchtools futures
plan(batchtools_local)
## A global variable
a <- 1
v \%<-\% {
b <- 3
c <- 2
a * b * c
}
print(v)
}
|
/man/future.batchtools.Rd
|
no_license
|
jeremyrcoyle/future.batchtools
|
R
| false
| true
| 924
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/future.batchtools-package.R
\docType{package}
\name{future.batchtools}
\alias{future.batchtools}
\alias{future.batchtools-package}
\title{future.batchtools: A Future for batchtools}
\description{
The \pkg{future.batchtools} package implements the Future API
on top of \pkg{batchtools} such that futures can be resolved
on for instance high-performance compute (HPC) clusters via
job schedulers.
The Future API is defined by the \pkg{future} package.
}
\details{
To use batchtools futures, load \pkg{future.batchtools}, and
select the type of future you wish to use via
\code{\link[future:plan]{plan}()}.
}
\examples{
\donttest{
plan(batchtools_local)
demo("mandelbrot", package = "future", ask = FALSE)
}
## Use local batchtools futures
plan(batchtools_local)
## A global variable
a <- 1
v \%<-\% {
b <- 3
c <- 2
a * b * c
}
print(v)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{choose_files}
\alias{choose_files}
\title{Cross-platform choose files function}
\usage{
choose_files(
caption = "Select files",
default = "",
multi = TRUE,
filters = NULL
)
}
\arguments{
\item{caption}{Caption for the choose directory dialog}
\item{default}{Starting directory}
\item{multi}{Allow multiple files to be selected}
\item{filters}{A two-column matrix of filename filters, or a two-element
vector containing a single filter.}
}
\value{
The path to the selected file(s), or NA if the user canceled.
}
\description{
Cross-platform choose files function
}
|
/man/choose_files.Rd
|
permissive
|
joshrud/phenoptrReports
|
R
| false
| true
| 671
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{choose_files}
\alias{choose_files}
\title{Cross-platform choose files function}
\usage{
choose_files(
caption = "Select files",
default = "",
multi = TRUE,
filters = NULL
)
}
\arguments{
\item{caption}{Caption for the choose directory dialog}
\item{default}{Starting directory}
\item{multi}{Allow multiple files to be selected}
\item{filters}{A two-column matrix of filename filters, or a two-element
vector containing a single filter.}
}
\value{
The path to the selected file(s), or NA if the user canceled.
}
\description{
Cross-platform choose files function
}
|
Codcodom <-
function(x) {return(as.factor(Codadd(x)))}
|
/globalGSA/R/Codcodom.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 57
|
r
|
Codcodom <-
function(x) {return(as.factor(Codadd(x)))}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_examples.R
\name{drive_examples}
\alias{drive_examples}
\alias{drive_examples_local}
\alias{drive_examples_remote}
\alias{drive_example_local}
\alias{drive_example_remote}
\title{Example files}
\usage{
drive_examples_local(matches)
drive_examples_remote(matches)
drive_example_local(matches)
drive_example_remote(matches)
}
\arguments{
\item{matches}{A regular expression that matches the name of the desired
example file(s). This argument is optional for the plural forms
(\code{drive_examples_local()} and \code{drive_examples_remote()}) and, if provided,
multiple matches are allowed. The single forms (\code{drive_example_local()} and
\code{drive_example_remote()}) require this argument and require that there is
exactly one match.}
}
\value{
\itemize{
\item For \code{drive_example_local()} and \code{drive_examples_local()}, one or more local
filepaths.
\item For \code{drive_example_remote()} and \code{drive_examples_remote()}, a \code{dribble}.
}
}
\description{
googledrive makes a variety of example files -- both local and remote --
available for use in examples and reprexes. These functions help you access
the example files. See \code{vignette("example-files", package = "googledrive")}
for more.
}
\examples{
drive_examples_local() \%>\% basename()
drive_examples_local("chicken") \%>\% basename()
drive_example_local("imdb")
\dontshow{if (drive_has_token()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
drive_examples_remote()
drive_examples_remote("chicken")
drive_example_remote("chicken_doc")
\dontshow{\}) # examplesIf}
}
|
/man/drive_examples.Rd
|
permissive
|
tidyverse/googledrive
|
R
| false
| true
| 1,659
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_examples.R
\name{drive_examples}
\alias{drive_examples}
\alias{drive_examples_local}
\alias{drive_examples_remote}
\alias{drive_example_local}
\alias{drive_example_remote}
\title{Example files}
\usage{
drive_examples_local(matches)
drive_examples_remote(matches)
drive_example_local(matches)
drive_example_remote(matches)
}
\arguments{
\item{matches}{A regular expression that matches the name of the desired
example file(s). This argument is optional for the plural forms
(\code{drive_examples_local()} and \code{drive_examples_remote()}) and, if provided,
multiple matches are allowed. The single forms (\code{drive_example_local()} and
\code{drive_example_remote()}) require this argument and require that there is
exactly one match.}
}
\value{
\itemize{
\item For \code{drive_example_local()} and \code{drive_examples_local()}, one or more local
filepaths.
\item For \code{drive_example_remote()} and \code{drive_examples_remote()}, a \code{dribble}.
}
}
\description{
googledrive makes a variety of example files -- both local and remote --
available for use in examples and reprexes. These functions help you access
the example files. See \code{vignette("example-files", package = "googledrive")}
for more.
}
\examples{
drive_examples_local() \%>\% basename()
drive_examples_local("chicken") \%>\% basename()
drive_example_local("imdb")
\dontshow{if (drive_has_token()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
drive_examples_remote()
drive_examples_remote("chicken")
drive_example_remote("chicken_doc")
\dontshow{\}) # examplesIf}
}
|
# This srcipt is an Exploratory Data Analysis script
# The script evaluates a single households electricity house usage for two days in a year
# The script creates an Exploratory line graph of the sub meter reading instaled at the house
#
#Load the required library functions
library(dplyr)
# The function assumes that the input are in the working directory
# Get the users working directory
file_dir <- getwd()
# Create the file name
file_location <- paste0(file_dir,"/household_power_consumption.txt")
# Read the whole data set
edata <- read.table(file_location,header = TRUE,sep=";",na.strings = "?",dec = ".")
# filter the data to just the required two days of intrest
gdata <- filter(edata, Date == "1/2/2007"|Date == "2/2/2007")
# create a new field that holds both the data and time
gdata$DateTime <- strptime(paste(gdata$Date,gdata$Time,sep=" "),format="%d/%m/%Y %H:%M:%S")
# format the Date and time fields in from text to date fields
gdata$Date <- as.Date(gdata$Date,format="%d/%m/%Y")
gdata$Time <- as.Date(gdata$Time,format="%H/%M/%S")
#plot the Global active power line graph for diffrent types of the days
par(3,4,1,1)
#get the axis range ranges for the graph
xrange <- range(gdata$DateTime)
yrange <- range(gdata$Sub_metering_1)
plot(xrange,yrange,xlab="",ylab="Energy sub metering",type="n")
lines(gdata$DateTime,gdata$Sub_metering_1, col="black",lty=1)
lines(gdata$DateTime,gdata$Sub_metering_2, col="red",lty=1)
lines(gdata$DateTime,gdata$Sub_metering_3, col="blue",lty=1)
# format the graphs legend
legend("topright",
col=c("black","red","blue"),
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
cex=0.7,
lty=1
)
# copy the graph to a png file called plot3.png
dev.copy(png,file ="plot3.png",height=480,width=580)
dev.off()
|
/plot3.R
|
no_license
|
knjenga/ExData_Proj1
|
R
| false
| false
| 1,783
|
r
|
# This srcipt is an Exploratory Data Analysis script
# The script evaluates a single households electricity house usage for two days in a year
# The script creates an Exploratory line graph of the sub meter reading instaled at the house
#
#Load the required library functions
library(dplyr)
# The function assumes that the input are in the working directory
# Get the users working directory
file_dir <- getwd()
# Create the file name
file_location <- paste0(file_dir,"/household_power_consumption.txt")
# Read the whole data set
edata <- read.table(file_location,header = TRUE,sep=";",na.strings = "?",dec = ".")
# filter the data to just the required two days of intrest
gdata <- filter(edata, Date == "1/2/2007"|Date == "2/2/2007")
# create a new field that holds both the data and time
gdata$DateTime <- strptime(paste(gdata$Date,gdata$Time,sep=" "),format="%d/%m/%Y %H:%M:%S")
# format the Date and time fields in from text to date fields
gdata$Date <- as.Date(gdata$Date,format="%d/%m/%Y")
gdata$Time <- as.Date(gdata$Time,format="%H/%M/%S")
#plot the Global active power line graph for diffrent types of the days
par(3,4,1,1)
#get the axis range ranges for the graph
xrange <- range(gdata$DateTime)
yrange <- range(gdata$Sub_metering_1)
plot(xrange,yrange,xlab="",ylab="Energy sub metering",type="n")
lines(gdata$DateTime,gdata$Sub_metering_1, col="black",lty=1)
lines(gdata$DateTime,gdata$Sub_metering_2, col="red",lty=1)
lines(gdata$DateTime,gdata$Sub_metering_3, col="blue",lty=1)
# format the graphs legend
legend("topright",
col=c("black","red","blue"),
c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
cex=0.7,
lty=1
)
# copy the graph to a png file called plot3.png
dev.copy(png,file ="plot3.png",height=480,width=580)
dev.off()
|
plot4 <- function(){
data = read_data()
plot4_internal(data)
}
plot4_internal <- function(data){
png(file ="plot4.png")
par(mfrow = c(2, 2))
#plot1
t = subset(data, select=c(Time,Global_active_power))
t$Global_active_power = as.numeric(as.character(t$Global_active_power))
plot(t,type="l",xlab="",ylab="Global Active Power(kilowatts)")
#plot2
t = subset(data, select=c(Time,Voltage))
t$Voltage = as.numeric(as.character(t$Voltage))
plot(t,type="l",xlab="datetime",ylab="Voltage")
#plot3
t = subset(data, select=c(Time,Sub_metering_1,Sub_metering_2,Sub_metering_3))
t$Sub_metering_1 = as.numeric(as.character(t$Sub_metering_1))
t$Sub_metering_2 = as.numeric(as.character(t$Sub_metering_2))
t$Sub_metering_3 = as.numeric(as.character(t$Sub_metering_3))
plot( subset(t,select=c(Time,Sub_metering_1)),type="l",xlab="",ylab="Energy sub metering",ylim=c(0,40),col="grey50")
lines(subset(t,select=c(Time,Sub_metering_2)),col="red")
lines(subset(t,select=c(Time,Sub_metering_3)),col="blue")
legend("topright", col = c("grey50", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lwd = 1,cex = 1, merge = TRUE)
#plot 4
t = subset(data, select=c(Time,Global_reactive_power))
t$Global_reactive_power = as.numeric(as.character(t$Global_reactive_power))
plot(t,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
}
read_data <- function() {
##data <- read.csv2(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'))
t <-read.csv2("household_power_consumption.txt",nrows=0)
data <- read.csv2(pipe("findstr /B /R ^[1-2]/2/2007 household_power_consumption.txt"),header = FALSE)
names(data) <- names(t)
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
data$Time <- paste(data$Date,data$Time)
data$Time <- strptime(data$Time,format="%Y-%m-%d %H:%M:%S")
data
}
|
/plot4.R
|
no_license
|
darily/ExData_Plotting1
|
R
| false
| false
| 1,887
|
r
|
plot4 <- function(){
data = read_data()
plot4_internal(data)
}
plot4_internal <- function(data){
png(file ="plot4.png")
par(mfrow = c(2, 2))
#plot1
t = subset(data, select=c(Time,Global_active_power))
t$Global_active_power = as.numeric(as.character(t$Global_active_power))
plot(t,type="l",xlab="",ylab="Global Active Power(kilowatts)")
#plot2
t = subset(data, select=c(Time,Voltage))
t$Voltage = as.numeric(as.character(t$Voltage))
plot(t,type="l",xlab="datetime",ylab="Voltage")
#plot3
t = subset(data, select=c(Time,Sub_metering_1,Sub_metering_2,Sub_metering_3))
t$Sub_metering_1 = as.numeric(as.character(t$Sub_metering_1))
t$Sub_metering_2 = as.numeric(as.character(t$Sub_metering_2))
t$Sub_metering_3 = as.numeric(as.character(t$Sub_metering_3))
plot( subset(t,select=c(Time,Sub_metering_1)),type="l",xlab="",ylab="Energy sub metering",ylim=c(0,40),col="grey50")
lines(subset(t,select=c(Time,Sub_metering_2)),col="red")
lines(subset(t,select=c(Time,Sub_metering_3)),col="blue")
legend("topright", col = c("grey50", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),lwd = 1,cex = 1, merge = TRUE)
#plot 4
t = subset(data, select=c(Time,Global_reactive_power))
t$Global_reactive_power = as.numeric(as.character(t$Global_reactive_power))
plot(t,type="l",xlab="datetime",ylab="Global_reactive_power")
dev.off()
}
read_data <- function() {
##data <- read.csv2(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'))
t <-read.csv2("household_power_consumption.txt",nrows=0)
data <- read.csv2(pipe("findstr /B /R ^[1-2]/2/2007 household_power_consumption.txt"),header = FALSE)
names(data) <- names(t)
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
data$Time <- paste(data$Date,data$Time)
data$Time <- strptime(data$Time,format="%Y-%m-%d %H:%M:%S")
data
}
|
library(shiny)
library(tidyverse)
library(rlang)
library(sf)
library(tmap)
# prepare data -------------------------------------------------------------
data<-read_csv(here::here("data","FSNAU_proc.csv"))
shapes<-sf::st_read("./shapefile", "Som_Admbnda_Adm2_UNDP")
shapes1<-shapes%>%
rename(regions=admin1Name,
districts=admin2Name)
data1<-data%>%
full_join(.,shapes1)%>%
select(year,month,regions,districts,rainfall,price_maize,geometry)%>%
mutate(rainfall=as.numeric(rainfall),
price_maize=as.numeric(price_maize))%>%
st_as_sf()
data_names<-data1%>%
select(-month,-year,-regions,-districts)%>%
st_drop_geometry()
# static ------------------------------------------------------------------
#data1%>%
#filter(year==2019,
#month==4,
#regions=="Bakool")%>%
#ggplot()+
#geom_histogram(aes(x=as.numeric(rainfall)),fill="grey",colour = "black")
data1%>%
filter(year==2019,
month==4,
regions=="Bakool")%>%
tm_shape() +
tm_borders()+
tm_fill(col="rainfall",legend.show = TRUE,palette="Blues",contrast=c(0.2,1))
# dashboard ---------------------------------------------------------------
ui <- fluidPage(
tabsetPanel(
tabPanel("Histogram",
titlePanel("Histogram"),
fluidRow(column(9, plotOutput("histogram")),
column(3,selectInput(inputId = "year_hist",label = "select year",choices=2010:2019)),
column(3,selectInput(inputId = "month_hist",label = "select month",choices=1:12)),
column(3,selectInput(inputId = "regions_hist",label = "select region",choices=unique(data1$regions))),
column(3,selectInput(inputId = "variable_hist",label = "select variable",choices=names(data_names)))
)
),
tabPanel("Maps",
titlePanel("Maps"),
fluidRow(column(9, plotOutput("maps")),
column(3,selectInput(inputId = "year_map",label = "select year",choices=2010:2019)),
column(3,selectInput(inputId = "month_map",label = "select month",choices=1:12)),
column(3,selectInput(inputId = "regions_map",label = "select region",choices=unique(data1$regions))),
column(3,selectInput(inputId = "variable_map",label = "select variable",choices=names(data_names)))
)
)
)
)
server <- function(input, output, session) {
year_hist <- reactive({
input$year_hist
})
month_hist <- reactive({
input$month_hist
})
regions_hist <- reactive({
input$regions_hist
})
output$histogram<-renderPlot(data1%>%
filter(year==year_hist(),
month==month_hist(),
regions==regions_hist())%>%
ggplot()+
geom_histogram(aes(x=as.numeric(.data[[input$variable_hist]])),fill="grey",colour = "black"))
year_map <- reactive({
input$year_map
})
month_map <- reactive({
input$month_map
})
regions_map <- reactive({
input$regions_map
})
variable_map <- reactive({
input$variable_map
})
output$maps<-renderPlot(data1%>%
filter(year==year_map(),
month==month_map(),
regions==regions_map())%>%
tm_shape() +
tm_borders()+
tm_fill(col=variable_map(),legend.show = TRUE,palette="Blues",contrast=c(0.2,1)))
}
shinyApp(ui, server)
|
/app2.R
|
no_license
|
yorokoby/simulation_withgit2
|
R
| false
| false
| 3,514
|
r
|
library(shiny)
library(tidyverse)
library(rlang)
library(sf)
library(tmap)
# prepare data -------------------------------------------------------------
data<-read_csv(here::here("data","FSNAU_proc.csv"))
shapes<-sf::st_read("./shapefile", "Som_Admbnda_Adm2_UNDP")
shapes1<-shapes%>%
rename(regions=admin1Name,
districts=admin2Name)
data1<-data%>%
full_join(.,shapes1)%>%
select(year,month,regions,districts,rainfall,price_maize,geometry)%>%
mutate(rainfall=as.numeric(rainfall),
price_maize=as.numeric(price_maize))%>%
st_as_sf()
data_names<-data1%>%
select(-month,-year,-regions,-districts)%>%
st_drop_geometry()
# static ------------------------------------------------------------------
#data1%>%
#filter(year==2019,
#month==4,
#regions=="Bakool")%>%
#ggplot()+
#geom_histogram(aes(x=as.numeric(rainfall)),fill="grey",colour = "black")
data1%>%
filter(year==2019,
month==4,
regions=="Bakool")%>%
tm_shape() +
tm_borders()+
tm_fill(col="rainfall",legend.show = TRUE,palette="Blues",contrast=c(0.2,1))
# dashboard ---------------------------------------------------------------
ui <- fluidPage(
tabsetPanel(
tabPanel("Histogram",
titlePanel("Histogram"),
fluidRow(column(9, plotOutput("histogram")),
column(3,selectInput(inputId = "year_hist",label = "select year",choices=2010:2019)),
column(3,selectInput(inputId = "month_hist",label = "select month",choices=1:12)),
column(3,selectInput(inputId = "regions_hist",label = "select region",choices=unique(data1$regions))),
column(3,selectInput(inputId = "variable_hist",label = "select variable",choices=names(data_names)))
)
),
tabPanel("Maps",
titlePanel("Maps"),
fluidRow(column(9, plotOutput("maps")),
column(3,selectInput(inputId = "year_map",label = "select year",choices=2010:2019)),
column(3,selectInput(inputId = "month_map",label = "select month",choices=1:12)),
column(3,selectInput(inputId = "regions_map",label = "select region",choices=unique(data1$regions))),
column(3,selectInput(inputId = "variable_map",label = "select variable",choices=names(data_names)))
)
)
)
)
server <- function(input, output, session) {
year_hist <- reactive({
input$year_hist
})
month_hist <- reactive({
input$month_hist
})
regions_hist <- reactive({
input$regions_hist
})
output$histogram<-renderPlot(data1%>%
filter(year==year_hist(),
month==month_hist(),
regions==regions_hist())%>%
ggplot()+
geom_histogram(aes(x=as.numeric(.data[[input$variable_hist]])),fill="grey",colour = "black"))
year_map <- reactive({
input$year_map
})
month_map <- reactive({
input$month_map
})
regions_map <- reactive({
input$regions_map
})
variable_map <- reactive({
input$variable_map
})
output$maps<-renderPlot(data1%>%
filter(year==year_map(),
month==month_map(),
regions==regions_map())%>%
tm_shape() +
tm_borders()+
tm_fill(col=variable_map(),legend.show = TRUE,palette="Blues",contrast=c(0.2,1)))
}
shinyApp(ui, server)
|
#ASSIGNMENT#1
#QUESTION2A
mydata <- read.table("C:/Users/Joann/Documents/R Directory/Weight_birth.csv",
header=TRUE)
#QUESTION2B
smokedata <- subset(mydata, SMOKE < 1, select=c(ID:LOW))
#print(smokedata)
mean1 <- mean(smokedata$BWT)
#print(mean1)
#sprintf("The mean weight of children born to smoking mothers is %s grams.", mean1)
nosmokedata <- subset(mydata, SMOKE > 0, select=c(ID:LOW))
#print(nosmokedata)
mean2 <- mean(nosmokedata$BWT)
#print(mean2)
#sprintf("The mean weight of children born to non-smoking mothers is %s grams.", mean2)
sprintf("The mean weight of children born to smoking mothers is %s grams. The mean weight of children born to non-smoking mothers is %s grams.",
mean1, mean2)
#QUESTION3C
prematuredata <- subset(mydata, PTL > 0, select=c(ID:LOW))
stdv1 <- sd(prematuredata$BWT)
nonprematuredata <- subset(mydata, PTL < 1, select=c(ID:LOW))
stdv2 <- sd(nonprematuredata$BWT)
sprintf("The standard deviation of the birth weight of children born to mothers of previous premature births is %s. The standard deviation of the birth weight of children born to mothers of non-previous premature births is %s.",
stdv1, stdv2)
#QUESTION3D
numerator <- nrow(subset(mydata, SMOKE > 0 & PTL > 0))
denominator <- nrow(subset(mydata, SMOKE > 0))
sprintf("The fraction of smoking mothers which have previously had premature births is %s/%s.",
numerator, denominator)
|
/birthData.R
|
no_license
|
gpconsiglio/SCINETcourse2
|
R
| false
| false
| 1,436
|
r
|
#ASSIGNMENT#1
#QUESTION2A
mydata <- read.table("C:/Users/Joann/Documents/R Directory/Weight_birth.csv",
header=TRUE)
#QUESTION2B
smokedata <- subset(mydata, SMOKE < 1, select=c(ID:LOW))
#print(smokedata)
mean1 <- mean(smokedata$BWT)
#print(mean1)
#sprintf("The mean weight of children born to smoking mothers is %s grams.", mean1)
nosmokedata <- subset(mydata, SMOKE > 0, select=c(ID:LOW))
#print(nosmokedata)
mean2 <- mean(nosmokedata$BWT)
#print(mean2)
#sprintf("The mean weight of children born to non-smoking mothers is %s grams.", mean2)
sprintf("The mean weight of children born to smoking mothers is %s grams. The mean weight of children born to non-smoking mothers is %s grams.",
mean1, mean2)
#QUESTION3C
prematuredata <- subset(mydata, PTL > 0, select=c(ID:LOW))
stdv1 <- sd(prematuredata$BWT)
nonprematuredata <- subset(mydata, PTL < 1, select=c(ID:LOW))
stdv2 <- sd(nonprematuredata$BWT)
sprintf("The standard deviation of the birth weight of children born to mothers of previous premature births is %s. The standard deviation of the birth weight of children born to mothers of non-previous premature births is %s.",
stdv1, stdv2)
#QUESTION3D
numerator <- nrow(subset(mydata, SMOKE > 0 & PTL > 0))
denominator <- nrow(subset(mydata, SMOKE > 0))
sprintf("The fraction of smoking mothers which have previously had premature births is %s/%s.",
numerator, denominator)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/key-helpers.R
\name{check_set_equality}
\alias{check_set_equality}
\title{Test if the value sets of two different columns in two different tables are the same}
\usage{
check_set_equality(t1, c1, t2, c2)
}
\arguments{
\item{t1}{The data frame that contains column \code{c1}.}
\item{c1}{The column of \code{t1} that should only contain values that are also present in column \code{c2} of data frame \code{t2}.}
\item{t2}{The data frame that contains column \code{c2}.}
\item{c2}{The column of \code{t2} that should only contain values that are also present in column \code{c1} of data frame \code{t1}.}
}
\description{
\code{check_set_equality()} is a wrapper of \code{check_if_subset()}.
It tests if one value set is a subset of another and vice versa, i.e., if both sets are the same.
If not, it throws an error.
}
\examples{
data_1 <- tibble::tibble(a = c(1, 2, 1), b = c(1, 4, 1), c = c(5, 6, 7))
data_2 <- tibble::tibble(a = c(1, 2, 3), b = c(4, 5, 6), c = c(7, 8, 9))
# this is failing:
try(check_set_equality(data_1, a, data_2, a))
data_3 <- tibble::tibble(a = c(2, 1, 2), b = c(4, 5, 6), c = c(7, 8, 9))
# this is passing:
check_set_equality(data_1, a, data_3, a)
}
|
/man/check_set_equality.Rd
|
permissive
|
bbecane/dm
|
R
| false
| true
| 1,254
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/key-helpers.R
\name{check_set_equality}
\alias{check_set_equality}
\title{Test if the value sets of two different columns in two different tables are the same}
\usage{
check_set_equality(t1, c1, t2, c2)
}
\arguments{
\item{t1}{The data frame that contains column \code{c1}.}
\item{c1}{The column of \code{t1} that should only contain values that are also present in column \code{c2} of data frame \code{t2}.}
\item{t2}{The data frame that contains column \code{c2}.}
\item{c2}{The column of \code{t2} that should only contain values that are also present in column \code{c1} of data frame \code{t1}.}
}
\description{
\code{check_set_equality()} is a wrapper of \code{check_if_subset()}.
It tests if one value set is a subset of another and vice versa, i.e., if both sets are the same.
If not, it throws an error.
}
\examples{
data_1 <- tibble::tibble(a = c(1, 2, 1), b = c(1, 4, 1), c = c(5, 6, 7))
data_2 <- tibble::tibble(a = c(1, 2, 3), b = c(4, 5, 6), c = c(7, 8, 9))
# this is failing:
try(check_set_equality(data_1, a, data_2, a))
data_3 <- tibble::tibble(a = c(2, 1, 2), b = c(4, 5, 6), c = c(7, 8, 9))
# this is passing:
check_set_equality(data_1, a, data_3, a)
}
|
## Exploratory Data Analysis Week 1. Course Project 1.
## Step 0. Estimate memory required to read entire file
# Prior to loading the dataset into R, determine how much memory is required to read the entire file.
# Formula: memory required = no. of column * no. of rows * 8 bytes/numeric
# One gigabyte has 10^9 bytes
# The dataset has 2,075,259 rows and 9 columns.
giga_memory_req <- (9 * 2075259 * 8) / (10^9)
## Step 1. Stage the directory and get the data
#clear console
cat("\014")
#find out what the current directory is
getwd()
#set to the desired working directory using:
setwd()
#check if folder "data" exists in current directory. if it does not, a folder "data" will be created in current directory.
if(!file.exists("./data")){dir.create("./data")}
#create a new variable fileURL to store the URL
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#download file from fileURL and store it in folder "data" under filename "dataset.zip", specify method = "curl" to workaround the https component of the URL
download.file(fileURL, destfile = "./data/dataset.zip", method = "curl")
#unzip the zip file and stores files in the same folder as folder "data"
unzip("./data/dataset.zip")
#lists the files in the working directory
list.files()
#preview the data using a text editor such as Visual Studio Code
## Step 2. Read the data
# From course project question, "We will only be using data from the dates 2007-02-01 and 2007-02-02"
#the first column of the dataset are dates in dd-mm-yyyy format
# Note that in this dataset missing values are coded as "?"
#from preview of data in text editor, obtain the row number that contains data for 2007-02-01
#since these data are obtained per minute, and there are 1440 minutes in a day, we need 2800 rows to cover data from 2007-02-01 to 2007-02-02
mydata <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", row.names = NULL, na.strings = "?", skip = 66637, nrows = 2880)
#view the first 6 rows
head(mydata)
#add back column names
x <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
colnames(mydata) <- x
#check classes of each column
str(mydata)
## 3. Make the required plots
## Plot 4. Four different scatterplots (two already done)
#open up graphic file png
png(filename = "plot4.png", width = 480, height = 480, units = "px")
#set the required display matrix
par(mfcol = c(2,2))
#create the first plot of "datetime" versus "Global_active_power"
with(mydata, plot(datetime, Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l"))
#create the second plot of "datetime" versus "Sub_metering_1" followed by each of the other two, followed by the legend
with(mydata, plot(datetime, Sub_metering_1, col = "black", type = "l", pch = "15", xlab = "", ylab = "Energy sub metering"))
with(mydata, lines(datetime, Sub_metering_2, col = "red", type = "l", pch = "15"))
with(mydata, lines(datetime, Sub_metering_3, col = "blue", type = "l", pch = "15"))
legend("topright", lty = 1, lwd = 2, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#create the third plot of "datetime" versus "Voltage"
with(mydata, plot(datetime, Voltage, xlab = "datetime", ylab = "Voltage", type = "l"))
#create the fourth plot of "datetime" versus "Global_reactive_power"
with(mydata, plot(datetime, Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "l"))
#close off the graphical device
dev.off()
|
/plot4.R
|
no_license
|
conniehon/ExData_Plotting1
|
R
| false
| false
| 3,634
|
r
|
## Exploratory Data Analysis Week 1. Course Project 1.
## Step 0. Estimate memory required to read entire file
# Prior to loading the dataset into R, determine how much memory is required to read the entire file.
# Formula: memory required = no. of column * no. of rows * 8 bytes/numeric
# One gigabyte has 10^9 bytes
# The dataset has 2,075,259 rows and 9 columns.
giga_memory_req <- (9 * 2075259 * 8) / (10^9)
## Step 1. Stage the directory and get the data
#clear console
cat("\014")
#find out what the current directory is
getwd()
#set to the desired working directory using:
setwd()
#check if folder "data" exists in current directory. if it does not, a folder "data" will be created in current directory.
if(!file.exists("./data")){dir.create("./data")}
#create a new variable fileURL to store the URL
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#download file from fileURL and store it in folder "data" under filename "dataset.zip", specify method = "curl" to workaround the https component of the URL
download.file(fileURL, destfile = "./data/dataset.zip", method = "curl")
#unzip the zip file and stores files in the same folder as folder "data"
unzip("./data/dataset.zip")
#lists the files in the working directory
list.files()
#preview the data using a text editor such as Visual Studio Code
## Step 2. Read the data
# From course project question, "We will only be using data from the dates 2007-02-01 and 2007-02-02"
#the first column of the dataset are dates in dd-mm-yyyy format
# Note that in this dataset missing values are coded as "?"
#from preview of data in text editor, obtain the row number that contains data for 2007-02-01
#since these data are obtained per minute, and there are 1440 minutes in a day, we need 2800 rows to cover data from 2007-02-01 to 2007-02-02
mydata <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", row.names = NULL, na.strings = "?", skip = 66637, nrows = 2880)
#view the first 6 rows
head(mydata)
#add back column names
x <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
colnames(mydata) <- x
#check classes of each column
str(mydata)
## 3. Make the required plots
## Plot 4. Four different scatterplots (two already done)
#open up graphic file png
png(filename = "plot4.png", width = 480, height = 480, units = "px")
#set the required display matrix
par(mfcol = c(2,2))
#create the first plot of "datetime" versus "Global_active_power"
with(mydata, plot(datetime, Global_active_power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l"))
#create the second plot of "datetime" versus "Sub_metering_1" followed by each of the other two, followed by the legend
with(mydata, plot(datetime, Sub_metering_1, col = "black", type = "l", pch = "15", xlab = "", ylab = "Energy sub metering"))
with(mydata, lines(datetime, Sub_metering_2, col = "red", type = "l", pch = "15"))
with(mydata, lines(datetime, Sub_metering_3, col = "blue", type = "l", pch = "15"))
legend("topright", lty = 1, lwd = 2, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#create the third plot of "datetime" versus "Voltage"
with(mydata, plot(datetime, Voltage, xlab = "datetime", ylab = "Voltage", type = "l"))
#create the fourth plot of "datetime" versus "Global_reactive_power"
with(mydata, plot(datetime, Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "l"))
#close off the graphical device
dev.off()
|
library(simTool)
### Name: evalGrids
### Title: Workhorse for simulation studies
### Aliases: evalGrids
### ** Examples
rng = function(data, ...) {
ret = range(data)
names(ret) = c("min", "max")
ret
}
# call runif(n=1), runif(n=2), runif(n=3)
# and range on the three "datasets"
# generated by runif(n=1), runif(n=2), runif(n=3)
eg = evalGrids(
expandGrid(fun="runif", n=1:3),
expandGrid(proc="rng"),
rep=10
)
eg
# summarizing the results in a data.frame
as.data.frame(eg)
# we now generate data for a regression
# and fit different regression models
# not that we use SD and not sd (the
# reason for this is the cast() call below)
regData = function(n, SD){
data.frame(
x=seq(0,1,length=n),
y=rnorm(n, sd=SD))
}
eg = evalGrids(
expandGrid(fun="regData", n=20, SD=1:2),
expandGrid(proc="lm", formula=c("y~x", "y~I(x^2)")),
replications=2)
# can not be converted to data.frame, because
# an object of class "lm" can not converted to
# a data.frame
try(as.data.frame(eg))
# for the data.frame we just extract the r.squared
# from the fitted model
as.data.frame(eg, convert.result.fun=function(fit) c(rsq=summary(fit)$r.squared))
# for the data.frame we just extract the coefficients
# from the fitted model
df = as.data.frame(eg, convert.result.fun=coef)
# since we have done 2 replication we can calculate
# sum summary statistics
library("reshape")
df$replication=NULL
mdf = melt(df, id=1:7, na.rm=TRUE)
cast(mdf, ... ~ ., c(mean, length, sd))
# note if the data.frame would contain the column
# named "sd" instead of "SD" the cast will generate
# an error
names(df)[5] = "sd"
mdf = melt(df, id=1:7, na.rm=TRUE)
try(cast(mdf, ... ~ ., c(mean, length, sd)))
# extracting the summary of the fitted.model
as.data.frame(eg, convert.result.fun=function(x) {
ret = coef(summary(x))
data.frame(valueName = rownames(ret), ret, check.names=FALSE)
})
# we now compare to methods for
# calculating quantiles
# the functions and parameters
# that generate the data
N = c(10, 50, 100)
library("plyr")
dg = rbind.fill(
expandGrid(fun="rbeta", n=N, shape1=4, shape2=4),
expandGrid(fun="rnorm", n=N))
# definition of the two quantile methods
emp.q = function(data, probs) c(quantile(data, probs=probs))
nor.q = function(data, probs) {
ret = qnorm(probs, mean=mean(data), sd=sd(data))
names(ret) = names(quantile(1, probs=probs))
ret
}
# the functions and parameters that are
# applied to the generate data
pg = rbind.fill(expandGrid(proc=c("emp.q", "nor.q"), probs=c(0.01, 0.025, 0.05)))
# generate data and apply quantile methods
set.seed(1234)
eg = evalGrids(dg, pg, replication=50, progress=TRUE)
# convert the results to a data.frame
df = as.data.frame(eg)
df$replication=NULL
mdf = melt(df, id=1:8, na.rm=TRUE)
# calculate, print and plot summary statistics
require("ggplot2")
print(a <- arrange(cast(mdf, ... ~ ., c(mean, sd)), n))
ggplot(a, aes(x=fun, y=mean, color=proc)) + geom_point(size=I(3)) + facet_grid(probs ~ n)
|
/data/genthat_extracted_code/simTool/examples/evalGrids.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,961
|
r
|
library(simTool)
### Name: evalGrids
### Title: Workhorse for simulation studies
### Aliases: evalGrids
### ** Examples
rng = function(data, ...) {
ret = range(data)
names(ret) = c("min", "max")
ret
}
# call runif(n=1), runif(n=2), runif(n=3)
# and range on the three "datasets"
# generated by runif(n=1), runif(n=2), runif(n=3)
eg = evalGrids(
expandGrid(fun="runif", n=1:3),
expandGrid(proc="rng"),
rep=10
)
eg
# summarizing the results in a data.frame
as.data.frame(eg)
# we now generate data for a regression
# and fit different regression models
# not that we use SD and not sd (the
# reason for this is the cast() call below)
regData = function(n, SD){
data.frame(
x=seq(0,1,length=n),
y=rnorm(n, sd=SD))
}
eg = evalGrids(
expandGrid(fun="regData", n=20, SD=1:2),
expandGrid(proc="lm", formula=c("y~x", "y~I(x^2)")),
replications=2)
# can not be converted to data.frame, because
# an object of class "lm" can not converted to
# a data.frame
try(as.data.frame(eg))
# for the data.frame we just extract the r.squared
# from the fitted model
as.data.frame(eg, convert.result.fun=function(fit) c(rsq=summary(fit)$r.squared))
# for the data.frame we just extract the coefficients
# from the fitted model
df = as.data.frame(eg, convert.result.fun=coef)
# since we have done 2 replication we can calculate
# sum summary statistics
library("reshape")
df$replication=NULL
mdf = melt(df, id=1:7, na.rm=TRUE)
cast(mdf, ... ~ ., c(mean, length, sd))
# note if the data.frame would contain the column
# named "sd" instead of "SD" the cast will generate
# an error
names(df)[5] = "sd"
mdf = melt(df, id=1:7, na.rm=TRUE)
try(cast(mdf, ... ~ ., c(mean, length, sd)))
# extracting the summary of the fitted.model
as.data.frame(eg, convert.result.fun=function(x) {
ret = coef(summary(x))
data.frame(valueName = rownames(ret), ret, check.names=FALSE)
})
# we now compare to methods for
# calculating quantiles
# the functions and parameters
# that generate the data
N = c(10, 50, 100)
library("plyr")
dg = rbind.fill(
expandGrid(fun="rbeta", n=N, shape1=4, shape2=4),
expandGrid(fun="rnorm", n=N))
# definition of the two quantile methods
emp.q = function(data, probs) c(quantile(data, probs=probs))
nor.q = function(data, probs) {
ret = qnorm(probs, mean=mean(data), sd=sd(data))
names(ret) = names(quantile(1, probs=probs))
ret
}
# the functions and parameters that are
# applied to the generate data
pg = rbind.fill(expandGrid(proc=c("emp.q", "nor.q"), probs=c(0.01, 0.025, 0.05)))
# generate data and apply quantile methods
set.seed(1234)
eg = evalGrids(dg, pg, replication=50, progress=TRUE)
# convert the results to a data.frame
df = as.data.frame(eg)
df$replication=NULL
mdf = melt(df, id=1:8, na.rm=TRUE)
# calculate, print and plot summary statistics
require("ggplot2")
print(a <- arrange(cast(mdf, ... ~ ., c(mean, sd)), n))
ggplot(a, aes(x=fun, y=mean, color=proc)) + geom_point(size=I(3)) + facet_grid(probs ~ n)
|
library(marg)
### Name: rsm
### Title: Fit a Regression-Scale Model
### Aliases: rsm
### Keywords: models regression
### ** Examples
## House Price Data
data(houses)
houses.rsm <- rsm(price ~ ., family = student(5), data = houses)
## model fit including all covariates
houses.rsm <- rsm(price ~ ., family = student(5), data = houses,
method = "rsm.fit", control = glm.control(trace = TRUE))
## prints information about the iterative procedure at each iteration
update(houses.rsm, ~ . - bdroom + offset(7 * bdroom))
## "bdroom" is included as offset variable with fixed (= 7) coefficient
## Sea Level Data
data(venice)
attach(venice)
Year <- 1:51/51
venice.2.rsm <- rsm(sea ~ Year + I(Year^2), family = extreme)
## quadratic model fitted to sea level data
venice.1.rsm <- update(venice.2.rsm, ~. - I(Year^2))
## linear model fit
##
c11 <- cos(2*pi*1:51/11) ; s11 <- sin(2*pi*1:51/11)
c19 <- cos(2*pi*1:51/18.62) ; s19 <- sin(2*pi*1:51/18.62)
venice.rsm <- rsm(sea ~ Year + I(Year^2) + c11 + s11 + c19 + s19,
family = extreme)
## includes 18.62-year astronomical tidal cycle and 11-year sunspot cycle
venice.11.rsm <- rsm(sea ~ Year + I(Year^2) + c11 + s11, family = extreme)
venice.19.rsm <- rsm(sea ~ Year + I(Year^2) + c19 + s19, family = extreme)
## includes either astronomical cycle
##
## comparison of linear, quadratic and periodic (11-year, 19-year) models
plot(year, sea, ylab = "sea level")
lines(year, fitted(venice.1.rsm))
lines(year, fitted(venice.2.rsm), col="red")
lines(year, fitted(venice.11.rsm), col="blue")
lines(year, fitted(venice.19.rsm), col="green")
##
detach()
## Darwin's Data on Growth Rates of Plants
data(darwin)
darwin.rsm <- rsm(cross - self ~ pot - 1, family = student(3),
data = darwin)
## Maximum likelihood estimates
darwin.rsm <- rsm(cross - self ~ pot - 1, family = Huber, data = darwin)
## M-estimates
|
/data/genthat_extracted_code/marg/examples/rsm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,910
|
r
|
library(marg)
### Name: rsm
### Title: Fit a Regression-Scale Model
### Aliases: rsm
### Keywords: models regression
### ** Examples
## House Price Data
data(houses)
houses.rsm <- rsm(price ~ ., family = student(5), data = houses)
## model fit including all covariates
houses.rsm <- rsm(price ~ ., family = student(5), data = houses,
method = "rsm.fit", control = glm.control(trace = TRUE))
## prints information about the iterative procedure at each iteration
update(houses.rsm, ~ . - bdroom + offset(7 * bdroom))
## "bdroom" is included as offset variable with fixed (= 7) coefficient
## Sea Level Data
data(venice)
attach(venice)
Year <- 1:51/51
venice.2.rsm <- rsm(sea ~ Year + I(Year^2), family = extreme)
## quadratic model fitted to sea level data
venice.1.rsm <- update(venice.2.rsm, ~. - I(Year^2))
## linear model fit
##
c11 <- cos(2*pi*1:51/11) ; s11 <- sin(2*pi*1:51/11)
c19 <- cos(2*pi*1:51/18.62) ; s19 <- sin(2*pi*1:51/18.62)
venice.rsm <- rsm(sea ~ Year + I(Year^2) + c11 + s11 + c19 + s19,
family = extreme)
## includes 18.62-year astronomical tidal cycle and 11-year sunspot cycle
venice.11.rsm <- rsm(sea ~ Year + I(Year^2) + c11 + s11, family = extreme)
venice.19.rsm <- rsm(sea ~ Year + I(Year^2) + c19 + s19, family = extreme)
## includes either astronomical cycle
##
## comparison of linear, quadratic and periodic (11-year, 19-year) models
plot(year, sea, ylab = "sea level")
lines(year, fitted(venice.1.rsm))
lines(year, fitted(venice.2.rsm), col="red")
lines(year, fitted(venice.11.rsm), col="blue")
lines(year, fitted(venice.19.rsm), col="green")
##
detach()
## Darwin's Data on Growth Rates of Plants
data(darwin)
darwin.rsm <- rsm(cross - self ~ pot - 1, family = student(3),
data = darwin)
## Maximum likelihood estimates
darwin.rsm <- rsm(cross - self ~ pot - 1, family = Huber, data = darwin)
## M-estimates
|
\name{lambda.pcut.cv1}
\alias{lambda.pcut.cv1}
\title{ Choose the Tuning Parameter of the Ridge Inverse and
Thresholding Level of the Empirical p-Values.}
\description{ Calculate total prediction error for test data after
fitting partial correlations from train data for all
values of lambda and pcut.}
\usage{ lambda.pcut.cv1(train, test, lambda, pcut) }
\arguments{
\item{train }{An n x p data matrix from which the model is fitted.}
\item{test }{An m x p data matrix from which the model is evaluated.}
\item{lambda}{A vector of candidate tuning parameters.}
\item{pcut }{A vector of candidate cutoffs of pvalues.}
}
\value{ Total prediction error for all the candidate lambda and
pvalue cutoff values.}
\references{
Ha, M. J. and Sun, W.
(2014).
Partial correlation matrix estimation using ridge penalty followed
by thresholding and re-estimation.
Biometrics, 70, 762--770.
}
\author{ Min Jin Ha }
\examples{
p <- 100 # number of variables
n <- 50 # sample size
###############################
# Simulate data
###############################
simulation <- simulateData(G = p, etaA = 0.02, n = n, r = 1)
data <- simulation$data[[1L]]
###############################
# Split into train/test sets
###############################
testindex <- sample(1L:n, 10L)
train <- data[-testindex,,drop = FALSE]
stdTrain <- scale(x = train, center = TRUE, scale = TRUE)
test <- data[testindex,,drop = FALSE]
stdTest <- scale(x = test, center = TRUE, scale = TRUE)
###############################
# Calculate total prediction
# errors for all candidate
# lambda and p-value cutoffs
###############################
lambda.array <- seq(from = 0.1, to = 5, length = 10) * (n - 1.0)
pcut.array <- seq(from = 0.01, to = 0.05, by = 0.01)
tpe <- lambda.pcut.cv1(train = stdTrain,
test = stdTest,
lambda = lambda.array,
pcut = pcut.array)
}
|
/man/lambda.pcut.cv1.Rd
|
no_license
|
cran/GGMridge
|
R
| false
| false
| 2,011
|
rd
|
\name{lambda.pcut.cv1}
\alias{lambda.pcut.cv1}
\title{ Choose the Tuning Parameter of the Ridge Inverse and
Thresholding Level of the Empirical p-Values.}
\description{ Calculate total prediction error for test data after
fitting partial correlations from train data for all
values of lambda and pcut.}
\usage{ lambda.pcut.cv1(train, test, lambda, pcut) }
\arguments{
\item{train }{An n x p data matrix from which the model is fitted.}
\item{test }{An m x p data matrix from which the model is evaluated.}
\item{lambda}{A vector of candidate tuning parameters.}
\item{pcut }{A vector of candidate cutoffs of pvalues.}
}
\value{ Total prediction error for all the candidate lambda and
pvalue cutoff values.}
\references{
Ha, M. J. and Sun, W.
(2014).
Partial correlation matrix estimation using ridge penalty followed
by thresholding and re-estimation.
Biometrics, 70, 762--770.
}
\author{ Min Jin Ha }
\examples{
p <- 100 # number of variables
n <- 50 # sample size
###############################
# Simulate data
###############################
simulation <- simulateData(G = p, etaA = 0.02, n = n, r = 1)
data <- simulation$data[[1L]]
###############################
# Split into train/test sets
###############################
testindex <- sample(1L:n, 10L)
train <- data[-testindex,,drop = FALSE]
stdTrain <- scale(x = train, center = TRUE, scale = TRUE)
test <- data[testindex,,drop = FALSE]
stdTest <- scale(x = test, center = TRUE, scale = TRUE)
###############################
# Calculate total prediction
# errors for all candidate
# lambda and p-value cutoffs
###############################
lambda.array <- seq(from = 0.1, to = 5, length = 10) * (n - 1.0)
pcut.array <- seq(from = 0.01, to = 0.05, by = 0.01)
tpe <- lambda.pcut.cv1(train = stdTrain,
test = stdTest,
lambda = lambda.array,
pcut = pcut.array)
}
|
library(lubridate); library(tidyverse); library(shiny); library(nycflights13); library(ggthemes)
ui <- bootstrapPage( ## Define the UI
sliderInput(inputId = 'hr', label = 'Scheduled departure hour: ', min = 0, max = 23, value = 12),
sliderInput(inputId = 'min', label = 'Scheduled departure minute: ', min = 0, max = 59, value = 30),
plotOutput('plot')
)
server <- function(input, output) { ## Define the server code
output$plot <- renderPlot({
plot(x = rep(round(input$hr * 100 +input$min, 1), 3),
y = c(100/(1+exp(-(0.279603750 - (0.001999519 - 0.0000148) * abs(input$hr * 60 + input$min - (19 * 60 + 35))))),
100/(1+exp(-(0.279603750 - 0.001999519 * abs(input$hr * 60 + input$min - (19 * 60 + 35))))),
100/(1+exp(-(0.279603750 - (0.001999519 + 0.0000148) * abs(input$hr * 60 + input$min - (19 * 60 + 35)))))),
xlim = range(input$hr * 100 +input$min - 15, input$hr * 100 + input$min + 15),
type = 'b',
lwd = 3,
lty = 2,
width = 0,
tck = .02,
xlab = 'Scheduled departure time',
ylab = 'Percent chance of delay',
pch = 3,
main = paste('Plan around a chance of delay of ', round(100/(1+exp(-(0.279603750 - (0.001999519) * abs(input$hr * 60 + input$min - (19 * 60 + 35)))))), '%'))
})
}
shinyApp(ui = ui, server = server) ## Return a Shiny app object
|
/Assignments/Assignment 2/app.r
|
permissive
|
mariobonifacio/R4D
|
R
| false
| false
| 1,406
|
r
|
library(lubridate); library(tidyverse); library(shiny); library(nycflights13); library(ggthemes)
ui <- bootstrapPage( ## Define the UI
sliderInput(inputId = 'hr', label = 'Scheduled departure hour: ', min = 0, max = 23, value = 12),
sliderInput(inputId = 'min', label = 'Scheduled departure minute: ', min = 0, max = 59, value = 30),
plotOutput('plot')
)
server <- function(input, output) { ## Define the server code
output$plot <- renderPlot({
plot(x = rep(round(input$hr * 100 +input$min, 1), 3),
y = c(100/(1+exp(-(0.279603750 - (0.001999519 - 0.0000148) * abs(input$hr * 60 + input$min - (19 * 60 + 35))))),
100/(1+exp(-(0.279603750 - 0.001999519 * abs(input$hr * 60 + input$min - (19 * 60 + 35))))),
100/(1+exp(-(0.279603750 - (0.001999519 + 0.0000148) * abs(input$hr * 60 + input$min - (19 * 60 + 35)))))),
xlim = range(input$hr * 100 +input$min - 15, input$hr * 100 + input$min + 15),
type = 'b',
lwd = 3,
lty = 2,
width = 0,
tck = .02,
xlab = 'Scheduled departure time',
ylab = 'Percent chance of delay',
pch = 3,
main = paste('Plan around a chance of delay of ', round(100/(1+exp(-(0.279603750 - (0.001999519) * abs(input$hr * 60 + input$min - (19 * 60 + 35)))))), '%'))
})
}
shinyApp(ui = ui, server = server) ## Return a Shiny app object
|
/Nuova cartella/cluster.r
|
no_license
|
maria-natale/Progetto_SAD
|
R
| false
| false
| 6,995
|
r
| ||
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/oesophagus.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./oesophagus_007.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/oesophagus/oesophagus_007.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 352
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/oesophagus.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=TRUE)
sink('./oesophagus_007.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(tidyverse)
bits_2018_w <- read_rds(path = "../../datos_salida/bits_2018_weights.rdata")
edos <- c("Nacional", sort(unique(bits_2018_w$edo)))
leer_tabs_clase <- function(mi_edo) {
mi_edo_top_file <- list.files("../../datos_salida/estimaciones/estimaciones_clase_edo_top",
pattern = paste0(mi_edo, "_17"), full.names = TRUE, ignore.case = TRUE)
mi_edo_top <- read_csv(mi_edo_top_file)
mi_edo_top3_file <- list.files("../../datos_salida/estimaciones/estimaciones_clase_edo_top3",
pattern = paste0(mi_edo, "_17"), full.names = TRUE, ignore.case = TRUE)
mi_edo_top3 <- read_csv(mi_edo_top3_file)
list(top = mi_edo_top, top3 = mi_edo_top3)
}
nal <- leer_tabs_clase("Nacional")
leer_tabs_total <- function(agregacion){
agregacion_files <- list.files("../../datos_salida/estimaciones",
pattern = str_c(agregacion, ".*csv$"), full.names = TRUE,
ignore.case = TRUE, include.dirs = FALSE)
map_dfc(agregacion_files, read_csv) %>%
select(ends_with("edo"), contains("correcto")) %>%
gather(var, valor, correcto_17_top:correcto_31_top3_se) %>%
mutate(
tipo = ifelse(str_detect(var, "top3"), "Top3", "Top"),
tipo_valor = ifelse(str_detect(var, "_se"), "ee", "est"),
n_clases = ifelse(str_detect(var, "31"), as.integer(31),
as.integer(17))
) %>%
select(-var) %>%
spread(tipo_valor, valor) %>%
arrange(n_clases) %>%
select(tipo, n_clases, contains("edo"), est, ee)
}
tab_clases_17 <- read_csv("../../datos_salida/tablas_indice/clases_17.csv")
tab_clases_31 <- read_csv("../../datos_salida/tablas_indice/clases_31.csv")
|
/5-resultados/scripts/crear_datos.R
|
no_license
|
tereom/madmex_pixel_sample
|
R
| false
| false
| 1,720
|
r
|
library(tidyverse)
bits_2018_w <- read_rds(path = "../../datos_salida/bits_2018_weights.rdata")
edos <- c("Nacional", sort(unique(bits_2018_w$edo)))
leer_tabs_clase <- function(mi_edo) {
mi_edo_top_file <- list.files("../../datos_salida/estimaciones/estimaciones_clase_edo_top",
pattern = paste0(mi_edo, "_17"), full.names = TRUE, ignore.case = TRUE)
mi_edo_top <- read_csv(mi_edo_top_file)
mi_edo_top3_file <- list.files("../../datos_salida/estimaciones/estimaciones_clase_edo_top3",
pattern = paste0(mi_edo, "_17"), full.names = TRUE, ignore.case = TRUE)
mi_edo_top3 <- read_csv(mi_edo_top3_file)
list(top = mi_edo_top, top3 = mi_edo_top3)
}
nal <- leer_tabs_clase("Nacional")
leer_tabs_total <- function(agregacion){
agregacion_files <- list.files("../../datos_salida/estimaciones",
pattern = str_c(agregacion, ".*csv$"), full.names = TRUE,
ignore.case = TRUE, include.dirs = FALSE)
map_dfc(agregacion_files, read_csv) %>%
select(ends_with("edo"), contains("correcto")) %>%
gather(var, valor, correcto_17_top:correcto_31_top3_se) %>%
mutate(
tipo = ifelse(str_detect(var, "top3"), "Top3", "Top"),
tipo_valor = ifelse(str_detect(var, "_se"), "ee", "est"),
n_clases = ifelse(str_detect(var, "31"), as.integer(31),
as.integer(17))
) %>%
select(-var) %>%
spread(tipo_valor, valor) %>%
arrange(n_clases) %>%
select(tipo, n_clases, contains("edo"), est, ee)
}
tab_clases_17 <- read_csv("../../datos_salida/tablas_indice/clases_17.csv")
tab_clases_31 <- read_csv("../../datos_salida/tablas_indice/clases_31.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_cypher.R
\name{send_cypher}
\alias{send_cypher}
\title{Send a cypher file to be executed}
\usage{
send_cypher(
path,
con,
type = c("row", "graph"),
output = c("r", "json"),
include_stats = TRUE,
meta = FALSE
)
}
\arguments{
\item{path}{the path to the cypher file}
\item{con}{a connexion object created with neo4j_api$new()}
\item{type}{the type of the format to query for (row or graph)}
\item{output}{the printing method (r or json)}
\item{include_stats}{whether of not to include stats}
\item{meta}{whether of not to include meta info}
}
\value{
a cypher call
}
\description{
Send a cypher file to be executed
}
\examples{
\dontrun{
send_cypher("random/create.cypher")
path <- "data-raw/constraints.cypher"
}
}
|
/man/send_cypher.Rd
|
permissive
|
gregleleu/neo4r
|
R
| false
| true
| 814
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_cypher.R
\name{send_cypher}
\alias{send_cypher}
\title{Send a cypher file to be executed}
\usage{
send_cypher(
path,
con,
type = c("row", "graph"),
output = c("r", "json"),
include_stats = TRUE,
meta = FALSE
)
}
\arguments{
\item{path}{the path to the cypher file}
\item{con}{a connexion object created with neo4j_api$new()}
\item{type}{the type of the format to query for (row or graph)}
\item{output}{the printing method (r or json)}
\item{include_stats}{whether of not to include stats}
\item{meta}{whether of not to include meta info}
}
\value{
a cypher call
}
\description{
Send a cypher file to be executed
}
\examples{
\dontrun{
send_cypher("random/create.cypher")
path <- "data-raw/constraints.cypher"
}
}
|
## Matrix calc package can be used
## command is.singular.matrix()function checks if the matrix passed is invertible
## The can be used to result is TRUE or FALSE.
## This function sets and gets the passed matrix and its inverse.
## it will handle declaration of local variables and free variables
makeCacheMatrix <- function(A = matrix()) {
B<-matrix()
set <- function(a) {
A <<- a ## Non-singular invertible matrix
B <<- NULL
}
get <- function() A
setinverse <- function(invmatrix) B <<- invmatrix
getinverse <- function() B
as.matrix(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function will first check if the inverse Matrix is available in B
## it will check if B is the inverse of A
## i.e matrix A multiplied wit its Inverse=identity matrix
## if ok then return value of B
## else get matrix A and use "solve" to get inverse of A and pass it to B
## and then set the value of B .
cacheSolve <- function(A, ...) {
## Return a matrix that is the inverse of 'x'
B<- A$getinverse()
A<- get()
if (!is.matrix(B)=NULL)
{
if (B%*%A = identity matrix (ones on the diagonal) )
{
message("getting cached data")
return(B)
}
else
{
data <- A$get()
B <- solve(data)
A$setinverse(B)
B
} ##end of else
} ##end of if
} ## end of Cachesolve
|
/cachematrix.R
|
no_license
|
Taneesha/ProgrammingAssignment2
|
R
| false
| false
| 1,402
|
r
|
## Matrix calc package can be used
## command is.singular.matrix()function checks if the matrix passed is invertible
## The can be used to result is TRUE or FALSE.
## This function sets and gets the passed matrix and its inverse.
## it will handle declaration of local variables and free variables
makeCacheMatrix <- function(A = matrix()) {
B<-matrix()
set <- function(a) {
A <<- a ## Non-singular invertible matrix
B <<- NULL
}
get <- function() A
setinverse <- function(invmatrix) B <<- invmatrix
getinverse <- function() B
as.matrix(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function will first check if the inverse Matrix is available in B
## it will check if B is the inverse of A
## i.e matrix A multiplied wit its Inverse=identity matrix
## if ok then return value of B
## else get matrix A and use "solve" to get inverse of A and pass it to B
## and then set the value of B .
cacheSolve <- function(A, ...) {
## Return a matrix that is the inverse of 'x'
B<- A$getinverse()
A<- get()
if (!is.matrix(B)=NULL)
{
if (B%*%A = identity matrix (ones on the diagonal) )
{
message("getting cached data")
return(B)
}
else
{
data <- A$get()
B <- solve(data)
A$setinverse(B)
B
} ##end of else
} ##end of if
} ## end of Cachesolve
|
qat_call_save_set_addup <-
function(resultlist_part, element = -999, dim_mv=1, time = NULL, height = NULL, lat = NULL, lon = NULL, vec1 = NULL, vec2 = NULL, vec3 = NULL, vec4 = NULL, baseunit = NULL, savelist = list(), savelistcounter = 1) {
## functionality: calling save function for qat_save_set_addup_1d
## author: André Düsterhus
## date: 21.04.2011
## version: A0.1
## input: measurement_vector, workflowlist element, number of actual element, time vector (optional), latitude vector (optional), longitude vector (optional), 4 optional vectors, resultlist (optional), counter of resultlist (optional)
## output: list with the results and parameters of the lim rule analysis
# add informations to savelist
savelist[[savelistcounter <- savelistcounter+1]] <- list(element=element, tosave = qat_save_set_addup_1d(resultlist_part, baseunit=""))
return(savelist)
}
|
/R/qat_call_save_set_addup.R
|
no_license
|
cran/qat
|
R
| false
| false
| 871
|
r
|
qat_call_save_set_addup <-
function(resultlist_part, element = -999, dim_mv=1, time = NULL, height = NULL, lat = NULL, lon = NULL, vec1 = NULL, vec2 = NULL, vec3 = NULL, vec4 = NULL, baseunit = NULL, savelist = list(), savelistcounter = 1) {
## functionality: calling save function for qat_save_set_addup_1d
## author: André Düsterhus
## date: 21.04.2011
## version: A0.1
## input: measurement_vector, workflowlist element, number of actual element, time vector (optional), latitude vector (optional), longitude vector (optional), 4 optional vectors, resultlist (optional), counter of resultlist (optional)
## output: list with the results and parameters of the lim rule analysis
# add informations to savelist
savelist[[savelistcounter <- savelistcounter+1]] <- list(element=element, tosave = qat_save_set_addup_1d(resultlist_part, baseunit=""))
return(savelist)
}
|
library(rqdatatable)
### Name: ex_data_table.relop_order_expr
### Title: Order rows by expression.
### Aliases: ex_data_table.relop_order_expr
### ** Examples
dL <- build_frame(
"x", "y" |
2L , "b" |
-4L , "a" |
3L , "c" )
rquery_pipeline <- local_td(dL) %.>%
order_expr(., abs(x))
ex_data_table(rquery_pipeline)
|
/data/genthat_extracted_code/rqdatatable/examples/ex_data_table.relop_order_expr.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 339
|
r
|
library(rqdatatable)
### Name: ex_data_table.relop_order_expr
### Title: Order rows by expression.
### Aliases: ex_data_table.relop_order_expr
### ** Examples
dL <- build_frame(
"x", "y" |
2L , "b" |
-4L , "a" |
3L , "c" )
rquery_pipeline <- local_td(dL) %.>%
order_expr(., abs(x))
ex_data_table(rquery_pipeline)
|
\name{USstate}
\alias{USstate}
\docType{data}
\title{
US state dataset}
\description{
FIPS codes for the US states
}
\usage{data(USstate)}
\format{
A data.frame with 50 rows on 3 variables.
}
\details{
The dataset refers to the FIPS codes for the US states. The variables are \code{fips} (FIPS State Numeric Code), \code{usps} (Official USPS Code) and \code{name} (Name).
}
\source{
https://www.census.gov/
}
\references{
Giordani, P., Ferraro, M.B., Martella, F.: An Introduction to Clustering with R. Springer, Singapore (2020)
}
\author{
Paolo Giordani, Maria Brigida Ferraro, Francesca Martella
}
\examples{
data(USstate)
}
\keyword{data}
\keyword{multivariate}
|
/man/USstate.Rd
|
no_license
|
cran/datasetsICR
|
R
| false
| false
| 665
|
rd
|
\name{USstate}
\alias{USstate}
\docType{data}
\title{
US state dataset}
\description{
FIPS codes for the US states
}
\usage{data(USstate)}
\format{
A data.frame with 50 rows on 3 variables.
}
\details{
The dataset refers to the FIPS codes for the US states. The variables are \code{fips} (FIPS State Numeric Code), \code{usps} (Official USPS Code) and \code{name} (Name).
}
\source{
https://www.census.gov/
}
\references{
Giordani, P., Ferraro, M.B., Martella, F.: An Introduction to Clustering with R. Springer, Singapore (2020)
}
\author{
Paolo Giordani, Maria Brigida Ferraro, Francesca Martella
}
\examples{
data(USstate)
}
\keyword{data}
\keyword{multivariate}
|
check_version <- function() {
rv <- package_version(paste(R.Version()$major, R.Version()$minor, sep = "."))
if (rv < package_version("3.2.3")) {
message(sprintf(readRDS(.get_path("pattern.Rds")), rv, "3.3.3"))
}
if (packageVersion("swirl") <= package_version("101.5.9")) {
message(readRDS(.get_path("swirl_msg.Rds")))
FALSE
} else TRUE
}
|
/00-Hello-QTLCourse/customTests.R
|
no_license
|
wush978/QTLCourse
|
R
| false
| false
| 370
|
r
|
check_version <- function() {
rv <- package_version(paste(R.Version()$major, R.Version()$minor, sep = "."))
if (rv < package_version("3.2.3")) {
message(sprintf(readRDS(.get_path("pattern.Rds")), rv, "3.3.3"))
}
if (packageVersion("swirl") <= package_version("101.5.9")) {
message(readRDS(.get_path("swirl_msg.Rds")))
FALSE
} else TRUE
}
|
Chart$methods(
# Set the default parameters
get_params = function(){
params$port <<- params$port %or% 8000
params$font <<- params$font %or% "Rockwell, Helvetica, Arial, sans"
params$title <<- params$title %or% params$main %or% internal$file$names$template # alias (main)
params$subtitle <<- params$subtitle %or% ""
params$padding <<- validate_padding(params$padding)
params$palette <<- params$palette %or% params[["col"]] # alias (col)
params$rotate_y_title <<- params$rotate_y_title %or% TRUE
params$show_sidebar <<- params$show_sidebar %or% TRUE
},
# Ensure there are four padding values named top, right, left and bottom
validate_padding = function(padding){
padding <- as.list(padding)
valid_padding_names <- c("top", "right", "bottom", "left")
if (any(names(padding) %notin% valid_padding_names)){
bad_padding_elements <- padding[names(padding) %notin% valid_padding_names]
stop(sprintf("\nWrong padding elements:\n%s", enumerate(bad_padding_elements)), call. = FALSE)
} else {
padding$top <- padding$top %or% 150
padding$right <- padding$right %or% 400
padding$bottom <- padding$bottom %or% 70
padding$left <- padding$left %or% 100
}
padding
}
)
|
/R/Chart_params.R
|
no_license
|
freyreeste/clickme
|
R
| false
| false
| 1,379
|
r
|
Chart$methods(
# Set the default parameters
get_params = function(){
params$port <<- params$port %or% 8000
params$font <<- params$font %or% "Rockwell, Helvetica, Arial, sans"
params$title <<- params$title %or% params$main %or% internal$file$names$template # alias (main)
params$subtitle <<- params$subtitle %or% ""
params$padding <<- validate_padding(params$padding)
params$palette <<- params$palette %or% params[["col"]] # alias (col)
params$rotate_y_title <<- params$rotate_y_title %or% TRUE
params$show_sidebar <<- params$show_sidebar %or% TRUE
},
# Ensure there are four padding values named top, right, left and bottom
validate_padding = function(padding){
padding <- as.list(padding)
valid_padding_names <- c("top", "right", "bottom", "left")
if (any(names(padding) %notin% valid_padding_names)){
bad_padding_elements <- padding[names(padding) %notin% valid_padding_names]
stop(sprintf("\nWrong padding elements:\n%s", enumerate(bad_padding_elements)), call. = FALSE)
} else {
padding$top <- padding$top %or% 150
padding$right <- padding$right %or% 400
padding$bottom <- padding$bottom %or% 70
padding$left <- padding$left %or% 100
}
padding
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.