content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# Big thanks to Alex Skeels (https://alexskeels.com/) for help here!
DNM <- function (picante.cdm, tree, distances.among, abundance.matters = FALSE,
abundance.assigned = "directly")
{
tempCheck <- checkCDM(picante.cdm)
if (tempCheck == "fail") {
stop("CDM incompatible with dispersalNull model. See 'checkCDM' for details")
}
#if (length(setdiff(names(picante.cdm), tree$tip.label)) !=
# 0) {
# stop("You have included species in your cdm that are not in your phylogeny")
#}
if (length(setdiff(row.names(picante.cdm), row.names(distances.among))) !=
0 & length(setdiff(row.names(distances.among), row.names(picante.cdm))) !=
0) {
stop("Your cdm plot names and distance matrix names do no match")
}
if (any(row.names(picante.cdm) != row.names(distances.among))) {
stop("Your cdm and distance matrix are not in the same plot order")
}
richness <- apply(picante.cdm, 1, lengthNonZeros)
overallAbundance <- picante.cdm[picante.cdm != 0]
replacementList <- list()
for (i in 1:dim(picante.cdm)[1]) {
phylocom <- matrix(ncol = 3, nrow = richness[i], 0)
phylocom <- as.data.frame(phylocom)
names(phylocom) <- c("plot", "abund", "id")
j <- 0
while (length(phylocom[phylocom$plot == row.names(picante.cdm)[i],
]$id) < richness[i]) {
selectedPlot <- selectNear(distances.among[, i])
if (abundance.matters) {
temp <- sample(x = picante.cdm[selectedPlot,
], size = 1, prob = picante.cdm[selectedPlot,
])
}
else {
possible <- picante.cdm[selectedPlot, ][picante.cdm[selectedPlot,
] != 0]
names(possible) <- names(picante.cdm[selectedPlot,
])[picante.cdm[selectedPlot, ] != 0]
if(length(possible) == 1){ temp <- possible} else { temp <- sample(x = possible, size = 1)}
}
if (!(names(temp) %in% phylocom[phylocom$plot ==
row.names(picante.cdm)[i], ]$id)) {
j <- j + 1
phylocom[j, 1] <- row.names(picante.cdm)[i]
if (abundance.assigned == "directly") {
phylocom[j, 2] <- temp
}
else if (abundance.assigned == "explore") {
distribution <- round(rnorm(n = 100, mean = as.numeric(temp),
sd = 1))
distribution[distribution < 0] <- 1
chosen <- sample(distribution, 1)
phylocom[j, 2] <- chosen
}
else if (abundance.assigned == "overall") {
chosen <- sample(overallAbundance, 1)
phylocom[j, 2] <- chosen
}
else {
stop("abundance.assigned argument set to unrecognized value")
}
phylocom[j, 3] <- names(temp)
}
}
replacementList[[i]] <- phylocom
}
newCDM <- Reduce(rbind, replacementList)
newCDM <- sample2matrix(newCDM); print(c(ncol(newCDM), nrow(newCDM)))
notFound <- setdiff(colnames(picante.cdm), names(newCDM))
print(c(length(colnames(picante.cdm)), length(names(newCDM))))
if (length(notFound > 0)) {
toBind <- matrix(nrow = dim(newCDM)[[1]], ncol = length(notFound), 0)
toBind[1,] <- 1 # this is because otherwise the taxa with no occurrences will ruin the dbFD function
colnames(toBind) <- notFound
newCDM <- cbind(newCDM, toBind)
}
print(c(ncol(newCDM), nrow(newCDM)))
newCDM <- as.matrix(newCDM)
newCDM <- newCDM[row.names(picante.cdm), ]
newCDM <- newCDM[ , colnames(picante.cdm)]
return(newCDM)
}
selectNear <- function(distances.between)
{
#distances.between is a vector of distances between the focal cell and other cells
#first exclude distances to the focal cell (and any other with distance = 0)
distances.between <- distances.between[distances.between != 0]
#now sample a cell name with a probability proportional to the inverse of the distance
#from the focal cell
newCell <- sample(x=names(distances.between), size=1, prob=1/distances.between)
newCell
}
| /Spatial_Evo/DispersalNullModel.R | no_license | IanGBrennan/MonitorPhylogenomics | R | false | false | 4,207 | r | # Big thanks to Alex Skeels (https://alexskeels.com/) for help here!
DNM <- function (picante.cdm, tree, distances.among, abundance.matters = FALSE,
abundance.assigned = "directly")
{
tempCheck <- checkCDM(picante.cdm)
if (tempCheck == "fail") {
stop("CDM incompatible with dispersalNull model. See 'checkCDM' for details")
}
#if (length(setdiff(names(picante.cdm), tree$tip.label)) !=
# 0) {
# stop("You have included species in your cdm that are not in your phylogeny")
#}
if (length(setdiff(row.names(picante.cdm), row.names(distances.among))) !=
0 & length(setdiff(row.names(distances.among), row.names(picante.cdm))) !=
0) {
stop("Your cdm plot names and distance matrix names do no match")
}
if (any(row.names(picante.cdm) != row.names(distances.among))) {
stop("Your cdm and distance matrix are not in the same plot order")
}
richness <- apply(picante.cdm, 1, lengthNonZeros)
overallAbundance <- picante.cdm[picante.cdm != 0]
replacementList <- list()
for (i in 1:dim(picante.cdm)[1]) {
phylocom <- matrix(ncol = 3, nrow = richness[i], 0)
phylocom <- as.data.frame(phylocom)
names(phylocom) <- c("plot", "abund", "id")
j <- 0
while (length(phylocom[phylocom$plot == row.names(picante.cdm)[i],
]$id) < richness[i]) {
selectedPlot <- selectNear(distances.among[, i])
if (abundance.matters) {
temp <- sample(x = picante.cdm[selectedPlot,
], size = 1, prob = picante.cdm[selectedPlot,
])
}
else {
possible <- picante.cdm[selectedPlot, ][picante.cdm[selectedPlot,
] != 0]
names(possible) <- names(picante.cdm[selectedPlot,
])[picante.cdm[selectedPlot, ] != 0]
if(length(possible) == 1){ temp <- possible} else { temp <- sample(x = possible, size = 1)}
}
if (!(names(temp) %in% phylocom[phylocom$plot ==
row.names(picante.cdm)[i], ]$id)) {
j <- j + 1
phylocom[j, 1] <- row.names(picante.cdm)[i]
if (abundance.assigned == "directly") {
phylocom[j, 2] <- temp
}
else if (abundance.assigned == "explore") {
distribution <- round(rnorm(n = 100, mean = as.numeric(temp),
sd = 1))
distribution[distribution < 0] <- 1
chosen <- sample(distribution, 1)
phylocom[j, 2] <- chosen
}
else if (abundance.assigned == "overall") {
chosen <- sample(overallAbundance, 1)
phylocom[j, 2] <- chosen
}
else {
stop("abundance.assigned argument set to unrecognized value")
}
phylocom[j, 3] <- names(temp)
}
}
replacementList[[i]] <- phylocom
}
newCDM <- Reduce(rbind, replacementList)
newCDM <- sample2matrix(newCDM); print(c(ncol(newCDM), nrow(newCDM)))
notFound <- setdiff(colnames(picante.cdm), names(newCDM))
print(c(length(colnames(picante.cdm)), length(names(newCDM))))
if (length(notFound > 0)) {
toBind <- matrix(nrow = dim(newCDM)[[1]], ncol = length(notFound), 0)
toBind[1,] <- 1 # this is because otherwise the taxa with no occurrences will ruin the dbFD function
colnames(toBind) <- notFound
newCDM <- cbind(newCDM, toBind)
}
print(c(ncol(newCDM), nrow(newCDM)))
newCDM <- as.matrix(newCDM)
newCDM <- newCDM[row.names(picante.cdm), ]
newCDM <- newCDM[ , colnames(picante.cdm)]
return(newCDM)
}
selectNear <- function(distances.between)
{
#distances.between is a vector of distances between the focal cell and other cells
#first exclude distances to the focal cell (and any other with distance = 0)
distances.between <- distances.between[distances.between != 0]
#now sample a cell name with a probability proportional to the inverse of the distance
#from the focal cell
newCell <- sample(x=names(distances.between), size=1, prob=1/distances.between)
newCell
}
|
# --- Preparation --- #
# Setting Working Directory
setwd("...")
# Reading in the data
raw = read.table("hipdata.txt", header=T, dec = ".")
attach(raw)
# --------- Bland-Altman plot --------- #
# Creating individual vectors of Observer A and Observer B
A = raw[, 1]
B = raw[, 2]
diff = A-B # Differences vector of A and B
mean = numeric(38) # Empty mean vector
# Calculate the mean difference of A and B for every observation
for (i in 1:38) {
mean[i] = (A[i] + B[i])/2 # Input mean difference in vector mean
}
plot(mean,diff) # Plot mean and differences
d = mean(diff) # Systematic error
sd = sd(diff) # Standard deviation of diff
abline(h=d, col="red") # Line of systematic error in plot
# Upper and lower limit o agreement and lines in plot
upp = d + 1.96 * sd
low = d - 1.96 * sd
abline(h=upp, col="blue")
abline(h=low, col="blue")
# Quantify observations at each placement in Bland-Altman plot
zero = 0 #define number of 0
for (i in 1:38) {
if(diff[i] == 0){
zero = zero + 1
}
}
plusone = 0 # Define number of +1
for (i in 1:38) {
if(diff[i] == 1){
plusone = plusone + 1
}
}
minusone = 0 # Define number of -1
for (i in 1:38) {
if(diff[i] == -1 ){
minusone = minusone + 1
}
}
plustwo = 0 # Define number of +2
for (i in 1:38) {
if(diff[i] == 2){
plustwo = plustwo + 1
}
}
minustwo = 0 # Define number of -2
for (i in 1:38) {
if(diff[i] == -2){
minustwo = minustwo + 1
}
}
# Defining number of observations placement in the plot as green text
text(0.7, 2, "n=2", col="green")
text(0.7, 1,"n=8", col="green")
text(0.7, -0.1, "n=20", col="green")
text(0.7, -1, "n=7", col="green")
text(0.7, -2, "n=1", col="green")
# --- ICC --- #
icc(raw)
| /Seminar 4/Seminar4_Task2.R | no_license | Dashtid/Statistics | R | false | false | 1,731 | r | # --- Preparation --- #
# Setting Working Directory
setwd("...")
# Reading in the data
raw = read.table("hipdata.txt", header=T, dec = ".")
attach(raw)
# --------- Bland-Altman plot --------- #
# Creating individual vectors of Observer A and Observer B
A = raw[, 1]
B = raw[, 2]
diff = A-B # Differences vector of A and B
mean = numeric(38) # Empty mean vector
# Calculate the mean difference of A and B for every observation
for (i in 1:38) {
mean[i] = (A[i] + B[i])/2 # Input mean difference in vector mean
}
plot(mean,diff) # Plot mean and differences
d = mean(diff) # Systematic error
sd = sd(diff) # Standard deviation of diff
abline(h=d, col="red") # Line of systematic error in plot
# Upper and lower limit o agreement and lines in plot
upp = d + 1.96 * sd
low = d - 1.96 * sd
abline(h=upp, col="blue")
abline(h=low, col="blue")
# Quantify observations at each placement in Bland-Altman plot
zero = 0 #define number of 0
for (i in 1:38) {
if(diff[i] == 0){
zero = zero + 1
}
}
plusone = 0 # Define number of +1
for (i in 1:38) {
if(diff[i] == 1){
plusone = plusone + 1
}
}
minusone = 0 # Define number of -1
for (i in 1:38) {
if(diff[i] == -1 ){
minusone = minusone + 1
}
}
plustwo = 0 # Define number of +2
for (i in 1:38) {
if(diff[i] == 2){
plustwo = plustwo + 1
}
}
minustwo = 0 # Define number of -2
for (i in 1:38) {
if(diff[i] == -2){
minustwo = minustwo + 1
}
}
# Defining number of observations placement in the plot as green text
text(0.7, 2, "n=2", col="green")
text(0.7, 1,"n=8", col="green")
text(0.7, -0.1, "n=20", col="green")
text(0.7, -1, "n=7", col="green")
text(0.7, -2, "n=1", col="green")
# --- ICC --- #
icc(raw)
|
if (!require("pacman")) install.packages("pacman")
pacman::p_install_gh("kahaaga/tstools")
pacman::p_load(dplyr, dtplyr, data.table)
combos <- expand.grid(threshold = seq(0, 500, 25),
latitude = seq(-90, 90, 1))
window.sizes = seq(12, 1, -1)
for (i in 1:nrow(combos)) {
print(i)
path <- "results/crossmap_summerenergy_GSL_speleoice/"
# Proceed only if there are any files corresponding to this
# latitude-threshold configuration (there might not be, because some time
# series have toomany zeros too be analysed).
threshold <- combos[i, ]$threshold
latitude <- combos[i, ]$latitude
key <- paste("_", threshold, "_", latitude, ".RData", sep = "")
files <- list.files(path = path,
full.names = T,
recursive = F,
include.dirs = F,
pattern = key)
if (length(files) > 0) {
start.time <- Sys.time()
dt <- data.table::rbindlist(lapply(files, function(f) readRDS(f)))
summaries <- list()
for (window.size in window.sizes) {
cat("\twindow size: ", window.size, "\n")
# Summarise original analyses and surrogate analyses separately.
summary_orig <- directionalcausaltest(
dt %>% filter(analysis.type == "original",
lag %in% (-window.size):window.size)) %>%
mutate(analysis.type = "original")
summary_surr <- directionalcausaltest(
dt %>%
filter(analysis.type == "surrogate",
lag %in% (-window.size):window.size)) %>%
mutate(analysis.type = "surrogate")
# Change column names for the surrogate dataset, so we can append it
# horisontally to the summary of the original analysis.
rangemax <- length(colnames(summary_surr)) - 1
cols <- paste0(colnames(summary_surr)[1:rangemax], "_surr")
colnames(summary_surr)[1:rangemax] <- cols
# Combine original and surrogate summaries
summary <- cbind(summary_orig, summary_surr[, 2:rangemax]) %>%
mutate(threshold = threshold,
latitude = latitude,
window.size = window.size)
summaries[[toString(window.size)]] = summary
}
# Save the summary as an .RData file
summary_filename <- paste0("SummerEnergyDrivesGSL_speleoice_", threshold,
"_", latitude, ".RData")
path <- "results/crossmap_summerenergy_GSL_speleoice_summaries/"
file <- paste0(path, summary_filename)
print(file)
saveRDS(data.table::rbindlist(summaries), file)
end.time <- Sys.time()
cat("\tElapsed time: ", end.time - start.time, " seconds.\n")
cat("\n")
}
} | /analysis/analysis_summarise_crossmapping_summerenergy_GSL_speleoice.R | no_license | kahaaga/Haaga_et_al_insolation | R | false | false | 2,846 | r | if (!require("pacman")) install.packages("pacman")
pacman::p_install_gh("kahaaga/tstools")
pacman::p_load(dplyr, dtplyr, data.table)
combos <- expand.grid(threshold = seq(0, 500, 25),
latitude = seq(-90, 90, 1))
window.sizes = seq(12, 1, -1)
for (i in 1:nrow(combos)) {
print(i)
path <- "results/crossmap_summerenergy_GSL_speleoice/"
# Proceed only if there are any files corresponding to this
# latitude-threshold configuration (there might not be, because some time
# series have toomany zeros too be analysed).
threshold <- combos[i, ]$threshold
latitude <- combos[i, ]$latitude
key <- paste("_", threshold, "_", latitude, ".RData", sep = "")
files <- list.files(path = path,
full.names = T,
recursive = F,
include.dirs = F,
pattern = key)
if (length(files) > 0) {
start.time <- Sys.time()
dt <- data.table::rbindlist(lapply(files, function(f) readRDS(f)))
summaries <- list()
for (window.size in window.sizes) {
cat("\twindow size: ", window.size, "\n")
# Summarise original analyses and surrogate analyses separately.
summary_orig <- directionalcausaltest(
dt %>% filter(analysis.type == "original",
lag %in% (-window.size):window.size)) %>%
mutate(analysis.type = "original")
summary_surr <- directionalcausaltest(
dt %>%
filter(analysis.type == "surrogate",
lag %in% (-window.size):window.size)) %>%
mutate(analysis.type = "surrogate")
# Change column names for the surrogate dataset, so we can append it
# horisontally to the summary of the original analysis.
rangemax <- length(colnames(summary_surr)) - 1
cols <- paste0(colnames(summary_surr)[1:rangemax], "_surr")
colnames(summary_surr)[1:rangemax] <- cols
# Combine original and surrogate summaries
summary <- cbind(summary_orig, summary_surr[, 2:rangemax]) %>%
mutate(threshold = threshold,
latitude = latitude,
window.size = window.size)
summaries[[toString(window.size)]] = summary
}
# Save the summary as an .RData file
summary_filename <- paste0("SummerEnergyDrivesGSL_speleoice_", threshold,
"_", latitude, ".RData")
path <- "results/crossmap_summerenergy_GSL_speleoice_summaries/"
file <- paste0(path, summary_filename)
print(file)
saveRDS(data.table::rbindlist(summaries), file)
end.time <- Sys.time()
cat("\tElapsed time: ", end.time - start.time, " seconds.\n")
cat("\n")
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bird_wide.data.r
\name{bird_wide.data}
\alias{bird_wide.data}
\title{Creates wide data frame for Point Count data}
\usage{
bird_wide.data(df, distanceWide,
transect = c(levels(as.factor(df$Transect))),
surveyyear = c(levels(as.factor(df$YEAR))))
}
\arguments{
\item{df}{is a dataframe. Only works with newpc2 created by add.zeros() and it's previous steps.}
}
\value{
Data frame with the is the wide version of newpc2 with added column of Richness
}
\description{
For the output of add.zeros(). This function turns the dataframe from a long formate to wide formate and adds a richness column.
}
\examples{
bird_wide.data(newpc2)
}
| /man/bird_wide.data.Rd | no_license | pointblue/RMN.functions | R | false | true | 714 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bird_wide.data.r
\name{bird_wide.data}
\alias{bird_wide.data}
\title{Creates wide data frame for Point Count data}
\usage{
bird_wide.data(df, distanceWide,
transect = c(levels(as.factor(df$Transect))),
surveyyear = c(levels(as.factor(df$YEAR))))
}
\arguments{
\item{df}{is a dataframe. Only works with newpc2 created by add.zeros() and it's previous steps.}
}
\value{
Data frame with the is the wide version of newpc2 with added column of Richness
}
\description{
For the output of add.zeros(). This function turns the dataframe from a long formate to wide formate and adds a richness column.
}
\examples{
bird_wide.data(newpc2)
}
|
## get data
File <- "./data/exDatahhPowerConsum/household_power_consumption.txt"
alldata <- read.table(File, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
## get the subset of data needed for this project
data2days <- alldata[data2days$Date %in% c("1/2/2007","2/2/2007") ,]
## convert data type to date time
dtdata <- strptime(paste(data2days$Date, data2days$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## convert data type to numeric
ActivePower2Days <- as.numeric(data2days$Global_active_power)
## open graphic device
png("plot2.png", width=480, height=480)
## create plot
plot(dtdata, ActivePower2Days, type="l", xlab="", ylab="Global Active Power (kilowatts)")
## Close graphic device
dev.off() | /PlotExersize1/plot2.R | no_license | mdrobnisdata/DataScienceCoursera | R | false | false | 715 | r | ## get data
File <- "./data/exDatahhPowerConsum/household_power_consumption.txt"
alldata <- read.table(File, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
## get the subset of data needed for this project
data2days <- alldata[data2days$Date %in% c("1/2/2007","2/2/2007") ,]
## convert data type to date time
dtdata <- strptime(paste(data2days$Date, data2days$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## convert data type to numeric
ActivePower2Days <- as.numeric(data2days$Global_active_power)
## open graphic device
png("plot2.png", width=480, height=480)
## create plot
plot(dtdata, ActivePower2Days, type="l", xlab="", ylab="Global Active Power (kilowatts)")
## Close graphic device
dev.off() |
for(i in 1:50){
file<-download.file(paste("https://class.coursera.org/getdata-013/lecture/download.mp4?lecture_id=", i, sep=""), destfile = paste(getwd(),"/",i,".mp4",sep=""))
close(file)
}
i<-12
url<-paste("https://class.coursera.org/getdata-013/lecture/download.mp4?lecture_id=", i, sep="")
download.file(url, destfile = paste(getwd(),"/",i,".mp4",sep=""), method = "curl")
| /SaveCourseraLectures.R | no_license | ImennoOn/Coursera | R | false | false | 380 | r | for(i in 1:50){
file<-download.file(paste("https://class.coursera.org/getdata-013/lecture/download.mp4?lecture_id=", i, sep=""), destfile = paste(getwd(),"/",i,".mp4",sep=""))
close(file)
}
i<-12
url<-paste("https://class.coursera.org/getdata-013/lecture/download.mp4?lecture_id=", i, sep="")
download.file(url, destfile = paste(getwd(),"/",i,".mp4",sep=""), method = "curl")
|
# Project: SmartTab Anonymization
# Author: Tran Quoc Hoan
# Start date: 2013-Nov-13
# File: GetPattern.R
# Do: get pattern from time series data
# Last edited on: 2013-Nov-13
# Read data first
# rawData <- read.csv("../Data/selab/000D6F0001A44A42_20131012.csv")
# rawData <- read.csv("../Data/selab_compl/000D6F00039D9CDF_20131018_compl.csv")
# rawData <- read.csv("../Data/selab/dk_1105_1112.txt")
sData <- rawData$value
sampleTimeSeriesData <- ts(sData)
plot.ts(sampleTimeSeriesData, type="l",lwd=1,col="blue", col.main="red", main="Original Time Series Data")
# Find local-minimum points: X[t0] <= X[t0+-1]
# Find local-maximum points: X[t0] >= X[t0+-1]
# Store in vector (t,v,p) with
# t = time, v = value, p = min(-1) max(+1) both(0)
numberDataPoints <- length(sData)
localPoints <- list()
numberLocalPoints <- 0
epsilon <- 10
-epsilon
# get stable points
stablePoints <- list()
stablePointsTwo <- list()
numberStablePoints <- 0
# Get local points
for (i in 1:numberDataPoints) {
# mode for local-min, local-max
localMode <- 10
# get backward and forward difference
backwardDiff1 <- 0
forwardDiff1 <- 0
if (i==1) backwardDiff1 <- 0
else backwardDiff1 <- (sData[i] - sData[i-1])
if (i == numberDataPoints) forwardDiff1 <- 0
else forwardDiff1 <- (sData[i+1] - sData[i])
if ( (backwardDiff1 <= 0) && (forwardDiff1 >= 0) ) localMode <- -1
if ( (backwardDiff1 >= 0) && (forwardDiff1 <= 0) ) localMode <- 1
if ( (backwardDiff1 == 0)&&(forwardDiff1 == 0) ) localMode <- 0
if (localMode < 10) {
# write to local array
numberLocalPoints <- numberLocalPoints+1
localPoints[[numberLocalPoints]] <- c(i, sData[i], localMode)
}
}
# put localPoints (time,value) into new data set
localData <- NULL
localData$time <- 1:numberLocalPoints
localData$value <- 1:numberLocalPoints
for (i in 1:numberLocalPoints) {
localData$time[i] <- localPoints[[c(i,1)]]
localData$value[i] <- localPoints[[c(i,2)]]
}
# create a new set stable points
numberStablePoints <- 0
# Get local points
for (i in 1:numberLocalPoints) {
# mode for local-min, local-max
localMode <- 10
# get backward and forward difference
if (i == 1) backwardDiff1 <- 0
else backwardDiff1 <- (localPoints[[c(i,2)]] - localPoints[[c(i-1,2)]])
if (i == numberLocalPoints) forwardDiff1 <- 0
else forwardDiff1 <- (localPoints[[c(i+1,2)]] - localPoints[[c(i,2)]])
if (i <= 2) backwardDiff2 <- backwardDiff1
else backwardDiff2 <- (localPoints[[c(i,2)]] - localPoints[[c(i-2,2)]])
if (i >= numberLocalPoints-1) forwardDiff2 <- forwardDiff1
else forwardDiff2 <- (localPoints[[c(i+2,2)]] - localPoints[[c(i,2)]])
sumBackwardDiff <- backwardDiff1 + backwardDiff2
sumForwardDiff <- forwardDiff1 + forwardDiff2
# if ( (backwardDiff1 <= -epsilon) && (forwardDiff1 >= 0) ) localMode <- -1
# if ( (backwardDiff1 <= 0) && (forwardDiff1 >= epsilon) ) localMode <- -1
# if ( (backwardDiff1 >= epsilon) && (forwardDiff1 <= 0) ) localMode <- 1
# if ( (backwardDiff1 >= 0) && (forwardDiff1 <= -epsilon) ) localMode <- 1
# if ( (backwardDiff1 == 0)&&(forwardDiff1 == 0) ) localMode <- 0
if ( (backwardDiff1 <= -epsilon) || (backwardDiff1 >= epsilon) ) localMode <- -1
if ( (forwardDiff1 >= epsilon) || (forwardDiff1 <= -epsilon) ) localMode <- 1
# # vector backwardDiff mode
# backwardDiffMode <- 0
# if ( (backwardDiff1 >= 0) && (backwardDiff2 >= 0) && (sumBackwardDiff >= 0) ) {
# backwardDiffMode <- 1
# }
# if ( (backwardDiff1 <= 0) && (backwardDiff2 <= 0) && (sumBackwardDiff <= 0) ) {
# backwardDiffMode <- -1
# }
#
# # vector forwardDiff mode
# forwardDiffMode <- 0
# if ( (forwardDiff1 >= 0) && (forwardDiff2 >=0) && (sumForwardDiff >= 0) )
# forwardDiffMode <- 1
# if ( (forwardDiff1 <= 0) && (forwardDiff2 <= 0) && (sumForwardDiff <= 0) )
# forwardDiffMode <- -1
#
# stableMode <- 10
#
# # stableMin
# # if ((backwardDiff2 <= 0) && (forwardDiff2 >= 0)) stableMode <- -1
# if ((backwardDiffMode == -1) && (forwardDiffMode == 1)) stableMode <- -1
#
# # stableMax
# # if ((backwardDiff2 >= 0) && (forwardDiff2 <= 0)) stableMode <- 1
# if ((backwardDiffMode == 1) && (forwardDiffMode == -1)) stableMode <- 1
stableMode <- 1
if ((localMode < 10) && (stableMode < 10)) {
# write to stablePoints list
numberStablePoints <- numberStablePoints+1
stablePoints[[numberStablePoints]] <- c(localPoints[[c(i,1)]], localPoints[[c(i,2)]], stableMode)
}
}
# divide again
epsilon <- 20
# create a new set stable points
numberStablePointsTwo <- 0
# Get local points
for (i in 1:numberStablePoints) {
# mode for local-min, local-max
localMode <- 10
# get backward and forward difference
if (i == 1) backwardDiff1 <- 0
else backwardDiff1 <- (stablePoints[[c(i,2)]] - stablePoints[[c(i-1,2)]])
if (i == numberStablePoints) forwardDiff1 <- 0
else forwardDiff1 <- (stablePoints[[c(i+1,2)]] - stablePoints[[c(i,2)]])
if (i <= 2) backwardDiff2 <- backwardDiff1
else backwardDiff2 <- (stablePoints[[c(i,2)]] - stablePoints[[c(i-2,2)]])
if (i >= numberStablePoints-1) forwardDiff2 <- forwardDiff1
else forwardDiff2 <- (stablePoints[[c(i+2,2)]] - stablePoints[[c(i,2)]])
sumBackwardDiff <- backwardDiff1 + backwardDiff2
sumForwardDiff <- forwardDiff1 + forwardDiff2
if ( (backwardDiff1 <= -epsilon) || (backwardDiff1 >= epsilon) ) localMode <- -1
if ( (forwardDiff1 >= epsilon) || (forwardDiff1 <= -epsilon) ) localMode <- 1
# if ( (backwardDiff1 == 0)&&(forwardDiff1 == 0) ) localMode <- 0
# vector backwardDiff mode
backwardDiffMode <- 0
if ( (backwardDiff1 >= 0) && (backwardDiff2 >= 0) && (sumBackwardDiff > 0) ) {
backwardDiffMode <- 1
}
if ( (backwardDiff1 <= 0) && (backwardDiff2 <= 0) && (sumBackwardDiff < 0) ) {
backwardDiffMode <- -1
}
# vector forwardDiff mode
forwardDiffMode <- 0
if ( (forwardDiff1 >= 0) && (forwardDiff2 >=0) && (sumForwardDiff > 0) )
forwardDiffMode <- 1
if ( (forwardDiff1 <= 0) && (forwardDiff2 <= 0) && (sumForwardDiff < 0) )
forwardDiffMode <- -1
stableMode <- 10
# stableMin
if ((backwardDiff2 <= 0) && (forwardDiff2 >= 0)) stableMode <- -1
if ((backwardDiffMode == -1) && (forwardDiffMode == 1)) stableMode <- -1
# stableMax
if ((backwardDiff2 >= 0) && (forwardDiff2 <= 0)) stableMode <- 1
if ((backwardDiffMode == 1) && (forwardDiffMode == -1)) stableMode <- 1
stableMode <- 1
if ((localMode < 10) && (stableMode < 10)) {
# write to stablePoints list
numberStablePointsTwo <- numberStablePointsTwo+1
stablePointsTwo[[numberStablePointsTwo]] <- c(stablePoints[[c(i,1)]], stablePoints[[c(i,2)]], stableMode)
}
}
# draw new data set
# plot(localData$time, localData$value, type="o", col="blue", col.main="red",main="Local Points Graph")
# put stablePointsTwo (time,value) into new data set
nData <- NULL
nData$time <- 1:numberStablePoints
nData$value <- 1:numberStablePoints
for (i in 1:numberStablePoints) {
nData$time[i] <- stablePoints[[c(i,1)]]
nData$value[i] <- stablePoints[[c(i,2)]]
}
# put stablePointsTwo (time,value) into new data set
nDataTwo <- NULL
nDataTwo$time <- 1:numberStablePointsTwo
nDataTwo$value <- 1:numberStablePointsTwo
for (i in 1:numberStablePointsTwo) {
nDataTwo$time[i] <- stablePointsTwo[[c(i,1)]]
nDataTwo$value[i] <- stablePointsTwo[[c(i,2)]]
}
#
# extract convex & concave
# draw new data set
#plot (1, type="n", xlim=c(1,numberDataPoints), ylim=c(0,150), xlab="time", ylab="Value", main="")
lines(localData$time, localData$value, lwd=1, type="p",col="red")
lines(nData$time, nData$value, lwd=3, type="p", col="green")
lines(nDataTwo$time, nDataTwo$value, lwd=3, type="p", col="purple")
#lines(nDataTwo$time, nDataTwo$value, lwd=3, type="l", col="red")
#lines(nDataTwo$time, nDataTwo$value, type="l", col="red") | /GetPattern.R | no_license | OminiaVincit/SmartTabAno | R | false | false | 7,946 | r | # Project: SmartTab Anonymization
# Author: Tran Quoc Hoan
# Start date: 2013-Nov-13
# File: GetPattern.R
# Do: get pattern from time series data
# Last edited on: 2013-Nov-13
# Read data first
# rawData <- read.csv("../Data/selab/000D6F0001A44A42_20131012.csv")
# rawData <- read.csv("../Data/selab_compl/000D6F00039D9CDF_20131018_compl.csv")
# rawData <- read.csv("../Data/selab/dk_1105_1112.txt")
sData <- rawData$value
sampleTimeSeriesData <- ts(sData)
plot.ts(sampleTimeSeriesData, type="l",lwd=1,col="blue", col.main="red", main="Original Time Series Data")
# Find local-minimum points: X[t0] <= X[t0+-1]
# Find local-maximum points: X[t0] >= X[t0+-1]
# Store in vector (t,v,p) with
# t = time, v = value, p = min(-1) max(+1) both(0)
numberDataPoints <- length(sData)
localPoints <- list()
numberLocalPoints <- 0
epsilon <- 10
-epsilon
# get stable points
stablePoints <- list()
stablePointsTwo <- list()
numberStablePoints <- 0
# Get local points
for (i in 1:numberDataPoints) {
# mode for local-min, local-max
localMode <- 10
# get backward and forward difference
backwardDiff1 <- 0
forwardDiff1 <- 0
if (i==1) backwardDiff1 <- 0
else backwardDiff1 <- (sData[i] - sData[i-1])
if (i == numberDataPoints) forwardDiff1 <- 0
else forwardDiff1 <- (sData[i+1] - sData[i])
if ( (backwardDiff1 <= 0) && (forwardDiff1 >= 0) ) localMode <- -1
if ( (backwardDiff1 >= 0) && (forwardDiff1 <= 0) ) localMode <- 1
if ( (backwardDiff1 == 0)&&(forwardDiff1 == 0) ) localMode <- 0
if (localMode < 10) {
# write to local array
numberLocalPoints <- numberLocalPoints+1
localPoints[[numberLocalPoints]] <- c(i, sData[i], localMode)
}
}
# put localPoints (time,value) into new data set
localData <- NULL
localData$time <- 1:numberLocalPoints
localData$value <- 1:numberLocalPoints
for (i in 1:numberLocalPoints) {
localData$time[i] <- localPoints[[c(i,1)]]
localData$value[i] <- localPoints[[c(i,2)]]
}
# create a new set stable points
numberStablePoints <- 0
# Get local points
for (i in 1:numberLocalPoints) {
# mode for local-min, local-max
localMode <- 10
# get backward and forward difference
if (i == 1) backwardDiff1 <- 0
else backwardDiff1 <- (localPoints[[c(i,2)]] - localPoints[[c(i-1,2)]])
if (i == numberLocalPoints) forwardDiff1 <- 0
else forwardDiff1 <- (localPoints[[c(i+1,2)]] - localPoints[[c(i,2)]])
if (i <= 2) backwardDiff2 <- backwardDiff1
else backwardDiff2 <- (localPoints[[c(i,2)]] - localPoints[[c(i-2,2)]])
if (i >= numberLocalPoints-1) forwardDiff2 <- forwardDiff1
else forwardDiff2 <- (localPoints[[c(i+2,2)]] - localPoints[[c(i,2)]])
sumBackwardDiff <- backwardDiff1 + backwardDiff2
sumForwardDiff <- forwardDiff1 + forwardDiff2
# if ( (backwardDiff1 <= -epsilon) && (forwardDiff1 >= 0) ) localMode <- -1
# if ( (backwardDiff1 <= 0) && (forwardDiff1 >= epsilon) ) localMode <- -1
# if ( (backwardDiff1 >= epsilon) && (forwardDiff1 <= 0) ) localMode <- 1
# if ( (backwardDiff1 >= 0) && (forwardDiff1 <= -epsilon) ) localMode <- 1
# if ( (backwardDiff1 == 0)&&(forwardDiff1 == 0) ) localMode <- 0
if ( (backwardDiff1 <= -epsilon) || (backwardDiff1 >= epsilon) ) localMode <- -1
if ( (forwardDiff1 >= epsilon) || (forwardDiff1 <= -epsilon) ) localMode <- 1
# # vector backwardDiff mode
# backwardDiffMode <- 0
# if ( (backwardDiff1 >= 0) && (backwardDiff2 >= 0) && (sumBackwardDiff >= 0) ) {
# backwardDiffMode <- 1
# }
# if ( (backwardDiff1 <= 0) && (backwardDiff2 <= 0) && (sumBackwardDiff <= 0) ) {
# backwardDiffMode <- -1
# }
#
# # vector forwardDiff mode
# forwardDiffMode <- 0
# if ( (forwardDiff1 >= 0) && (forwardDiff2 >=0) && (sumForwardDiff >= 0) )
# forwardDiffMode <- 1
# if ( (forwardDiff1 <= 0) && (forwardDiff2 <= 0) && (sumForwardDiff <= 0) )
# forwardDiffMode <- -1
#
# stableMode <- 10
#
# # stableMin
# # if ((backwardDiff2 <= 0) && (forwardDiff2 >= 0)) stableMode <- -1
# if ((backwardDiffMode == -1) && (forwardDiffMode == 1)) stableMode <- -1
#
# # stableMax
# # if ((backwardDiff2 >= 0) && (forwardDiff2 <= 0)) stableMode <- 1
# if ((backwardDiffMode == 1) && (forwardDiffMode == -1)) stableMode <- 1
stableMode <- 1
if ((localMode < 10) && (stableMode < 10)) {
# write to stablePoints list
numberStablePoints <- numberStablePoints+1
stablePoints[[numberStablePoints]] <- c(localPoints[[c(i,1)]], localPoints[[c(i,2)]], stableMode)
}
}
# divide again
epsilon <- 20
# create a new set stable points
numberStablePointsTwo <- 0
# Get local points
for (i in 1:numberStablePoints) {
# mode for local-min, local-max
localMode <- 10
# get backward and forward difference
if (i == 1) backwardDiff1 <- 0
else backwardDiff1 <- (stablePoints[[c(i,2)]] - stablePoints[[c(i-1,2)]])
if (i == numberStablePoints) forwardDiff1 <- 0
else forwardDiff1 <- (stablePoints[[c(i+1,2)]] - stablePoints[[c(i,2)]])
if (i <= 2) backwardDiff2 <- backwardDiff1
else backwardDiff2 <- (stablePoints[[c(i,2)]] - stablePoints[[c(i-2,2)]])
if (i >= numberStablePoints-1) forwardDiff2 <- forwardDiff1
else forwardDiff2 <- (stablePoints[[c(i+2,2)]] - stablePoints[[c(i,2)]])
sumBackwardDiff <- backwardDiff1 + backwardDiff2
sumForwardDiff <- forwardDiff1 + forwardDiff2
if ( (backwardDiff1 <= -epsilon) || (backwardDiff1 >= epsilon) ) localMode <- -1
if ( (forwardDiff1 >= epsilon) || (forwardDiff1 <= -epsilon) ) localMode <- 1
# if ( (backwardDiff1 == 0)&&(forwardDiff1 == 0) ) localMode <- 0
# vector backwardDiff mode
backwardDiffMode <- 0
if ( (backwardDiff1 >= 0) && (backwardDiff2 >= 0) && (sumBackwardDiff > 0) ) {
backwardDiffMode <- 1
}
if ( (backwardDiff1 <= 0) && (backwardDiff2 <= 0) && (sumBackwardDiff < 0) ) {
backwardDiffMode <- -1
}
# vector forwardDiff mode
forwardDiffMode <- 0
if ( (forwardDiff1 >= 0) && (forwardDiff2 >=0) && (sumForwardDiff > 0) )
forwardDiffMode <- 1
if ( (forwardDiff1 <= 0) && (forwardDiff2 <= 0) && (sumForwardDiff < 0) )
forwardDiffMode <- -1
stableMode <- 10
# stableMin
if ((backwardDiff2 <= 0) && (forwardDiff2 >= 0)) stableMode <- -1
if ((backwardDiffMode == -1) && (forwardDiffMode == 1)) stableMode <- -1
# stableMax
if ((backwardDiff2 >= 0) && (forwardDiff2 <= 0)) stableMode <- 1
if ((backwardDiffMode == 1) && (forwardDiffMode == -1)) stableMode <- 1
stableMode <- 1
if ((localMode < 10) && (stableMode < 10)) {
# write to stablePoints list
numberStablePointsTwo <- numberStablePointsTwo+1
stablePointsTwo[[numberStablePointsTwo]] <- c(stablePoints[[c(i,1)]], stablePoints[[c(i,2)]], stableMode)
}
}
# draw new data set
# plot(localData$time, localData$value, type="o", col="blue", col.main="red",main="Local Points Graph")
# put stablePointsTwo (time,value) into new data set
nData <- NULL
nData$time <- 1:numberStablePoints
nData$value <- 1:numberStablePoints
for (i in 1:numberStablePoints) {
nData$time[i] <- stablePoints[[c(i,1)]]
nData$value[i] <- stablePoints[[c(i,2)]]
}
# put stablePointsTwo (time,value) into new data set
nDataTwo <- NULL
nDataTwo$time <- 1:numberStablePointsTwo
nDataTwo$value <- 1:numberStablePointsTwo
for (i in 1:numberStablePointsTwo) {
nDataTwo$time[i] <- stablePointsTwo[[c(i,1)]]
nDataTwo$value[i] <- stablePointsTwo[[c(i,2)]]
}
#
# extract convex & concave
# draw new data set
#plot (1, type="n", xlim=c(1,numberDataPoints), ylim=c(0,150), xlab="time", ylab="Value", main="")
lines(localData$time, localData$value, lwd=1, type="p",col="red")
lines(nData$time, nData$value, lwd=3, type="p", col="green")
lines(nDataTwo$time, nDataTwo$value, lwd=3, type="p", col="purple")
#lines(nDataTwo$time, nDataTwo$value, lwd=3, type="l", col="red")
#lines(nDataTwo$time, nDataTwo$value, type="l", col="red") |
setwd("chronicle/harvey/data/rainfall/")
options(stringsAsFactors = F)
library(rgdal)
library(ggplot2)
library(maptools)
library(maps)
library(mapdata)
setwd("chronicle/harvey/data/rainfall/")
#read in shapefile
gagemap <- readOGR("shapefiles/HCFCD_RainGages.shp")
| /harvey_rainfall.R | no_license | mihirzaveri/rainfall | R | false | false | 285 | r | setwd("chronicle/harvey/data/rainfall/")
options(stringsAsFactors = F)
library(rgdal)
library(ggplot2)
library(maptools)
library(maps)
library(mapdata)
setwd("chronicle/harvey/data/rainfall/")
#read in shapefile
gagemap <- readOGR("shapefiles/HCFCD_RainGages.shp")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h2otools_models.R
\name{h2o.model.info}
\alias{h2o.model.info}
\title{h2o.model.info}
\usage{
h2o.model.info(model)
}
| /man/h2o.model.info.Rd | no_license | rocalabern/h2otools | R | false | true | 197 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h2otools_models.R
\name{h2o.model.info}
\alias{h2o.model.info}
\title{h2o.model.info}
\usage{
h2o.model.info(model)
}
|
# example spreadsheets
library(broman)
# data dictionary
pdf("../Figs/data_dict.pdf", height=5, width=10)
mat <- data.frame(name=c("mouse", "sex", "sac_date", "partial_inflation", "coat_color",
"crumblers", "diet_days"),
plot_name=c("Mouse", "Sex", "Date of sac", "Partial inflation", "Coat color",
"Crumblers", "Days on diet"),
group=c("demographic", "demographic", "demographic", "clinical", "demographic", "clinical", "clinical"),
description=c("Animal identifier", "Male (M) or Female (F)",
"Date mouse was sacrificed",
"Indicates if mouse showed partial pancreatic inflation",
"Coat color, by visual inspection",
"Indicates if mouse stored food in their bedding",
"Number of days on high-fat diet"),
stringsAsFactors=FALSE)
excel_fig(mat, fig_width=690, fig_height=190, cellwidth=c(rep(130, 4), 400), direct2svg=FALSE)
dev.off()
| /R/data_dict.R | permissive | kbroman/Talk_DataCleaning2023 | R | false | false | 1,124 | r | # example spreadsheets
library(broman)
# data dictionary
pdf("../Figs/data_dict.pdf", height=5, width=10)
mat <- data.frame(name=c("mouse", "sex", "sac_date", "partial_inflation", "coat_color",
"crumblers", "diet_days"),
plot_name=c("Mouse", "Sex", "Date of sac", "Partial inflation", "Coat color",
"Crumblers", "Days on diet"),
group=c("demographic", "demographic", "demographic", "clinical", "demographic", "clinical", "clinical"),
description=c("Animal identifier", "Male (M) or Female (F)",
"Date mouse was sacrificed",
"Indicates if mouse showed partial pancreatic inflation",
"Coat color, by visual inspection",
"Indicates if mouse stored food in their bedding",
"Number of days on high-fat diet"),
stringsAsFactors=FALSE)
excel_fig(mat, fig_width=690, fig_height=190, cellwidth=c(rep(130, 4), 400), direct2svg=FALSE)
dev.off()
|
new_rand_eff_nor_nor_jm_weibull_deriv = "
data{
int<lower = 1> ntot;
int<lower = 1> ngroup;
int<lower = 1> p;
int<lower = 1> q;
int id[ntot];
vector[ntot] y;
matrix[ntot, p] x;
//matrix[ntot, q * ngroup] d;
matrix[ntot, q] d;
int<lower = 1> Q;
int<lower = 1> ntot_quad;
int d_ind[ngroup, 2];
int Q_ind[ngroup, 2];
vector<lower = 0.0>[ngroup] S;
int<lower = 1> ncol_c;
matrix[ngroup, ncol_c] c;
matrix[ntot_quad, ncol_c] c_quad;
matrix[ngroup, p] x_T;
matrix[ntot_quad, p] x_quad;
//matrix[ngroup, q * ngroup] d_T;
matrix[ngroup, q] d_T;
//matrix[ntot_quad, q * ngroup] d_quad;
matrix[ntot_quad, q] d_quad;
vector[ntot_quad] t_quad;
vector[ntot_quad] wt_quad;
int<lower = 1> p_deriv;
int<lower = 1> q_deriv;
matrix[ngroup, p_deriv] x_deriv_T;
matrix[ntot_quad, p_deriv] x_deriv_quad;
//matrix[ngroup, q_deriv * ngroup] d_deriv_T;
matrix[ngroup, q_deriv] d_deriv_T;
//matrix[ntot_quad, q_deriv * ngroup] d_deriv_quad;
matrix[ntot_quad, q_deriv] d_deriv_quad;
int deriv_alpha_ind[p_deriv];
int deriv_B_ind[q_deriv];
vector[p] alpha;
matrix[q, q] Sigma;
real sigma_Z;
real log_lambda;
real log_nu;
vector[ncol_c] omega;
real eta1;
real eta2;
}
transformed data{
vector[q] zero_B = rep_vector(0, q);
}
parameters{
//matrix[ngroup, q] B;
vector[q] B[ngroup];
}
transformed parameters{
//vector[ntot] linpred;
matrix[ngroup, q] Bmat;
vector[p_deriv] alpha_deriv;
matrix[ngroup, q_deriv] B_deriv;
//matrix[ngroup * q_deriv, 1] Bmat_deriv;
vector[ntot] d_B;
//vector[ngroup] d_T_B;
vector[ntot_quad] d_quad_B;
//vector[ngroup] d_deriv_T_B;
vector[ntot_quad] d_deriv_quad_B;
//vector[ngroup] lsd_expr1;
//vector[ngroup] lsd_expr1_bh;
//vector[ngroup] lsd_expr1_fix;
//vector[ngroup] lsd_expr1_ystar;
vector[ngroup] lsd_expr2;
vector[ntot_quad] lsd_expr2_quad;
vector[ntot_quad] lsd_expr2_quad_bh;
vector[ntot_quad] lsd_expr2_quad_fix;
vector[ntot_quad] lsd_expr2_quad_ystar;
vector[ntot_quad] lsd_expr2_quad_ystar_deriv;
//vector[ngroup] lsd;
//longitudinal sub-model
for(i in 1:q){
Bmat[, i] = to_vector(B[, i]);
}
for(i in 1:p_deriv) alpha_deriv[i] = alpha[deriv_alpha_ind[i]];
for(i in 1:q_deriv) B_deriv[, i] = Bmat[, deriv_B_ind[i]];
//Bmat_deriv = to_matrix(B_deriv', ngroup * q_deriv, 1);
for(i in 1:ngroup){
d_B[d_ind[i, 1]:d_ind[i, 2]] = d[d_ind[i, 1]:d_ind[i, 2], ] * to_vector(Bmat[i]);
d_quad_B[Q_ind[i, 1]:Q_ind[i, 2]] = d_quad[Q_ind[i, 1]:Q_ind[i, 2], ] * to_vector(Bmat[i]);
d_deriv_quad_B[Q_ind[i, 1]:Q_ind[i, 2]] = d_deriv_quad[Q_ind[i, 1]:Q_ind[i, 2]] * to_vector(B_deriv[i]);
}
//linpred = x * alpha + to_vector(d * Bmat);
//linpred = x * alpha + d_B;
//survival sub-model, lsd: log-survival density
//lsd_expr1_bh = log_lambda + log_nu + (exp(log_nu) - 1) * log(S);
//lsd_expr1_fix = c * omega;
//lsd_expr1_ystar = x_T * alpha + to_vector(d_T * Bmat);
//lsd_expr1 = E .* (lsd_expr1_bh + lsd_expr1_fix + rep_vector(eta, ngroup) .* lsd_expr1_ystar);
lsd_expr2_quad_bh = log_lambda + log_nu + (exp(log_nu) - 1) * log(t_quad);
lsd_expr2_quad_fix = c_quad * omega;
//lsd_expr2_quad_ystar = x_quad * alpha + to_vector(d_quad * Bmat);
lsd_expr2_quad_ystar = x_quad * alpha + d_quad_B;
//lsd_expr2_quad_ystar_deriv = x_deriv_quad * alpha_deriv + to_vector(d_deriv_quad * Bmat_deriv);
lsd_expr2_quad_ystar_deriv = x_deriv_quad * alpha_deriv + d_deriv_quad_B;
lsd_expr2_quad = wt_quad .* exp(lsd_expr2_quad_bh + lsd_expr2_quad_fix +
eta1 * lsd_expr2_quad_ystar +
eta2 * lsd_expr2_quad_ystar_deriv);
for(i in 1:ngroup){
lsd_expr2[i] = 0.5 * S[i] * sum(lsd_expr2_quad[Q_ind[i, 1]:Q_ind[i, 2]]);
}
//lsd_expr2 = -1.0 * lsd_expr2;
//lsd = lsd_expr1 - lsd_expr2;
}
model{
y ~ normal(x * alpha + d_B, sigma_Z);
B ~ multi_normal(zero_B, Sigma);
target += lsd_expr2;
}
" | /R/new_rand_eff_nor_nor_jm_weibull_deriv.R | no_license | ozgurasarstat/robjm | R | false | false | 3,828 | r | new_rand_eff_nor_nor_jm_weibull_deriv = "
data{
int<lower = 1> ntot;
int<lower = 1> ngroup;
int<lower = 1> p;
int<lower = 1> q;
int id[ntot];
vector[ntot] y;
matrix[ntot, p] x;
//matrix[ntot, q * ngroup] d;
matrix[ntot, q] d;
int<lower = 1> Q;
int<lower = 1> ntot_quad;
int d_ind[ngroup, 2];
int Q_ind[ngroup, 2];
vector<lower = 0.0>[ngroup] S;
int<lower = 1> ncol_c;
matrix[ngroup, ncol_c] c;
matrix[ntot_quad, ncol_c] c_quad;
matrix[ngroup, p] x_T;
matrix[ntot_quad, p] x_quad;
//matrix[ngroup, q * ngroup] d_T;
matrix[ngroup, q] d_T;
//matrix[ntot_quad, q * ngroup] d_quad;
matrix[ntot_quad, q] d_quad;
vector[ntot_quad] t_quad;
vector[ntot_quad] wt_quad;
int<lower = 1> p_deriv;
int<lower = 1> q_deriv;
matrix[ngroup, p_deriv] x_deriv_T;
matrix[ntot_quad, p_deriv] x_deriv_quad;
//matrix[ngroup, q_deriv * ngroup] d_deriv_T;
matrix[ngroup, q_deriv] d_deriv_T;
//matrix[ntot_quad, q_deriv * ngroup] d_deriv_quad;
matrix[ntot_quad, q_deriv] d_deriv_quad;
int deriv_alpha_ind[p_deriv];
int deriv_B_ind[q_deriv];
vector[p] alpha;
matrix[q, q] Sigma;
real sigma_Z;
real log_lambda;
real log_nu;
vector[ncol_c] omega;
real eta1;
real eta2;
}
transformed data{
vector[q] zero_B = rep_vector(0, q);
}
parameters{
//matrix[ngroup, q] B;
vector[q] B[ngroup];
}
transformed parameters{
//vector[ntot] linpred;
matrix[ngroup, q] Bmat;
vector[p_deriv] alpha_deriv;
matrix[ngroup, q_deriv] B_deriv;
//matrix[ngroup * q_deriv, 1] Bmat_deriv;
vector[ntot] d_B;
//vector[ngroup] d_T_B;
vector[ntot_quad] d_quad_B;
//vector[ngroup] d_deriv_T_B;
vector[ntot_quad] d_deriv_quad_B;
//vector[ngroup] lsd_expr1;
//vector[ngroup] lsd_expr1_bh;
//vector[ngroup] lsd_expr1_fix;
//vector[ngroup] lsd_expr1_ystar;
vector[ngroup] lsd_expr2;
vector[ntot_quad] lsd_expr2_quad;
vector[ntot_quad] lsd_expr2_quad_bh;
vector[ntot_quad] lsd_expr2_quad_fix;
vector[ntot_quad] lsd_expr2_quad_ystar;
vector[ntot_quad] lsd_expr2_quad_ystar_deriv;
//vector[ngroup] lsd;
//longitudinal sub-model
for(i in 1:q){
Bmat[, i] = to_vector(B[, i]);
}
for(i in 1:p_deriv) alpha_deriv[i] = alpha[deriv_alpha_ind[i]];
for(i in 1:q_deriv) B_deriv[, i] = Bmat[, deriv_B_ind[i]];
//Bmat_deriv = to_matrix(B_deriv', ngroup * q_deriv, 1);
for(i in 1:ngroup){
d_B[d_ind[i, 1]:d_ind[i, 2]] = d[d_ind[i, 1]:d_ind[i, 2], ] * to_vector(Bmat[i]);
d_quad_B[Q_ind[i, 1]:Q_ind[i, 2]] = d_quad[Q_ind[i, 1]:Q_ind[i, 2], ] * to_vector(Bmat[i]);
d_deriv_quad_B[Q_ind[i, 1]:Q_ind[i, 2]] = d_deriv_quad[Q_ind[i, 1]:Q_ind[i, 2]] * to_vector(B_deriv[i]);
}
//linpred = x * alpha + to_vector(d * Bmat);
//linpred = x * alpha + d_B;
//survival sub-model, lsd: log-survival density
//lsd_expr1_bh = log_lambda + log_nu + (exp(log_nu) - 1) * log(S);
//lsd_expr1_fix = c * omega;
//lsd_expr1_ystar = x_T * alpha + to_vector(d_T * Bmat);
//lsd_expr1 = E .* (lsd_expr1_bh + lsd_expr1_fix + rep_vector(eta, ngroup) .* lsd_expr1_ystar);
lsd_expr2_quad_bh = log_lambda + log_nu + (exp(log_nu) - 1) * log(t_quad);
lsd_expr2_quad_fix = c_quad * omega;
//lsd_expr2_quad_ystar = x_quad * alpha + to_vector(d_quad * Bmat);
lsd_expr2_quad_ystar = x_quad * alpha + d_quad_B;
//lsd_expr2_quad_ystar_deriv = x_deriv_quad * alpha_deriv + to_vector(d_deriv_quad * Bmat_deriv);
lsd_expr2_quad_ystar_deriv = x_deriv_quad * alpha_deriv + d_deriv_quad_B;
lsd_expr2_quad = wt_quad .* exp(lsd_expr2_quad_bh + lsd_expr2_quad_fix +
eta1 * lsd_expr2_quad_ystar +
eta2 * lsd_expr2_quad_ystar_deriv);
for(i in 1:ngroup){
lsd_expr2[i] = 0.5 * S[i] * sum(lsd_expr2_quad[Q_ind[i, 1]:Q_ind[i, 2]]);
}
//lsd_expr2 = -1.0 * lsd_expr2;
//lsd = lsd_expr1 - lsd_expr2;
}
model{
y ~ normal(x * alpha + d_B, sigma_Z);
B ~ multi_normal(zero_B, Sigma);
target += lsd_expr2;
}
" |
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(tidyverse)
library(ggpubr)
library(expm)
source("build_TD_Matrix.R")
source("VB.R")
source("strip_probability.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Toeplitz Matrix"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
numericInput(
inputId = "n",
label = "Dimension of Matrix",
value = 5,
min = 3,
max = 100
),
numericInput(
inputId = "a1",
label = "sub-diagonal",
value = 0.5,
min = 0,
max = 1
),
numericInput(
inputId = "c1",
label = "super-diagonal",
value = 0.3,
min = 0,
max = 1
),
numericInput(
inputId = "b",
label = "diagonal",
value = 0.6,
min = 0,
max = 1
),
numericInput(
inputId = "k",
label = "Take power",
value = 1,
min = 1,
max = 10000
),
numericInput(
inputId = "nrow",
label = "row number",
value = 1,
min = 0,
max = 1000
),numericInput(
inputId = "ncol",
label = "column numberl",
value = 1,
min = 0,
max = 1000
),
numericInput(
inputId = "row_normalize",
label = "row to normalize",
value = 1,
min = 0,
max = 1000
)
),
# Show a plot of the generated distribution
mainPanel(
uiOutput('matrix'),
#uiOutput('time_power'),
uiOutput('vb'),
#uiOutput('time_vb')
uiOutput('strip_data')
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
matrix_data <- reactive({
start_time <- Sys.time()
matrix <- build_TD_Matrix(input$n,
input$a1,
input$a1,
input$b,
input$c1,
input$c1
)
matrix <- matrix %^% input$k
end_time <- Sys.time()
time_diff <- format(end_time - start_time)
return(tibble(
matrix = matrix,
time = time_diff
))
})
strip_data <- reactive({
strip_probability(input$c1,
input$a1,
input$b,
input$n,
input$k,
input$row_normalize
)
})
vb_data <- reactive({
start_time <- Sys.time()
vb_value <- VB(
input$k,
input$n,
input$nrow,
input$ncol,
input$c1,
input$a1,
input$b
)
end_time <- Sys.time()
time_diff_vb <- format(end_time - start_time)
return(tibble(
vb = vb_value,
time = time_diff_vb
))
})
output$matrix <- renderTable({
matrix_data()$matrix
})
# output$time_power <- renderText({
#
# paste("Time required for calculation",matrix_data()$time[1])
# })
output$vb <- renderText({
paste("value using algorithm of the",input$k,
"th power in row", input$nrow,
"and column", input$ncol, "is",
vb_data()$vb)
})
# output$time_vb <- renderText({
#
# paste("Time required for calculation",vb_data()$time)
# })
output$strip_data <- renderTable({
strip_data()
})
# output$time_power <- renderText({
#
# paste("Time required for calculation",matrix_data()$time[1])
# })
}
# Run the application
shinyApp(ui = ui, server = server)
| /toeplitz_matrix/app.R | no_license | JeremyJosephLin/markovcpp | R | false | false | 4,677 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinydashboard)
library(tidyverse)
library(ggpubr)
library(expm)
source("build_TD_Matrix.R")
source("VB.R")
source("strip_probability.R")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Toeplitz Matrix"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
numericInput(
inputId = "n",
label = "Dimension of Matrix",
value = 5,
min = 3,
max = 100
),
numericInput(
inputId = "a1",
label = "sub-diagonal",
value = 0.5,
min = 0,
max = 1
),
numericInput(
inputId = "c1",
label = "super-diagonal",
value = 0.3,
min = 0,
max = 1
),
numericInput(
inputId = "b",
label = "diagonal",
value = 0.6,
min = 0,
max = 1
),
numericInput(
inputId = "k",
label = "Take power",
value = 1,
min = 1,
max = 10000
),
numericInput(
inputId = "nrow",
label = "row number",
value = 1,
min = 0,
max = 1000
),numericInput(
inputId = "ncol",
label = "column numberl",
value = 1,
min = 0,
max = 1000
),
numericInput(
inputId = "row_normalize",
label = "row to normalize",
value = 1,
min = 0,
max = 1000
)
),
# Show a plot of the generated distribution
mainPanel(
uiOutput('matrix'),
#uiOutput('time_power'),
uiOutput('vb'),
#uiOutput('time_vb')
uiOutput('strip_data')
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
matrix_data <- reactive({
start_time <- Sys.time()
matrix <- build_TD_Matrix(input$n,
input$a1,
input$a1,
input$b,
input$c1,
input$c1
)
matrix <- matrix %^% input$k
end_time <- Sys.time()
time_diff <- format(end_time - start_time)
return(tibble(
matrix = matrix,
time = time_diff
))
})
strip_data <- reactive({
strip_probability(input$c1,
input$a1,
input$b,
input$n,
input$k,
input$row_normalize
)
})
vb_data <- reactive({
start_time <- Sys.time()
vb_value <- VB(
input$k,
input$n,
input$nrow,
input$ncol,
input$c1,
input$a1,
input$b
)
end_time <- Sys.time()
time_diff_vb <- format(end_time - start_time)
return(tibble(
vb = vb_value,
time = time_diff_vb
))
})
output$matrix <- renderTable({
matrix_data()$matrix
})
# output$time_power <- renderText({
#
# paste("Time required for calculation",matrix_data()$time[1])
# })
output$vb <- renderText({
paste("value using algorithm of the",input$k,
"th power in row", input$nrow,
"and column", input$ncol, "is",
vb_data()$vb)
})
# output$time_vb <- renderText({
#
# paste("Time required for calculation",vb_data()$time)
# })
output$strip_data <- renderTable({
strip_data()
})
# output$time_power <- renderText({
#
# paste("Time required for calculation",matrix_data()$time[1])
# })
}
# Run the application
shinyApp(ui = ui, server = server)
|
library(dplyr)
# 1. Linear Regression to Predict MPG
# read data file as dataframe MechaCar_table
MechaCar_table <- read.csv('MechaCar_mpg.csv', stringsAsFactors = F)
# linear regression model
# MechaCar_linear_regression_AWD <- lm(mpg ~ AWD, data=MechaCar_table)
# summary(MechaCar_linear_regression_AWD)
# multiple regression model
MechaCar_multiple_regression <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD ,data=MechaCar_table)
summary(MechaCar_multiple_regression)
# Significance
#vehicle_length 6.2 .6553 2.60e-12 ***
# ground_clearance 3.5 .5412 5.21e-08 ***
# suspension coil summary statistics
Suspension_coil_table <- read.csv('Suspension_Coil.csv', check.names = F, stringsAsFactors = F)
total_summary <- summarize()
lot_summary <- Suspension_coil_table %>%
group_by(Manufacturing_Lot) %>%
summarise(Mean = mean(PSI), Median = median(PSI), Variance = var(PSI), SD = sd(PSI))
total_summary
lot_summary
t.test(Suspension_coil_table$PSI, mu=1500)
t.test(Suspension_coil_table$PSI, mu=1500, subset=Suspension_coil_table$Manufacturing_Lot == 'Lot1')
t.test(Suspension_coil_table$PSI, mu=1500, subset=Suspension_coil_table$Manufacturing_Lot == 'Lot2')
t.test(Suspension_coil_table$PSI, mu=1500, subset=Suspension_coil_table$Manufacturing_Lot == 'Lot3')
| /data/MechaCar_Challenge.R | no_license | DenverSherman/MechaCar_Statistical_Analysis | R | false | false | 1,319 | r | library(dplyr)
# 1. Linear Regression to Predict MPG
# read data file as dataframe MechaCar_table
MechaCar_table <- read.csv('MechaCar_mpg.csv', stringsAsFactors = F)
# linear regression model
# MechaCar_linear_regression_AWD <- lm(mpg ~ AWD, data=MechaCar_table)
# summary(MechaCar_linear_regression_AWD)
# multiple regression model
MechaCar_multiple_regression <- lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD ,data=MechaCar_table)
summary(MechaCar_multiple_regression)
# Significance
#vehicle_length 6.2 .6553 2.60e-12 ***
# ground_clearance 3.5 .5412 5.21e-08 ***
# suspension coil summary statistics
Suspension_coil_table <- read.csv('Suspension_Coil.csv', check.names = F, stringsAsFactors = F)
total_summary <- summarize()
lot_summary <- Suspension_coil_table %>%
group_by(Manufacturing_Lot) %>%
summarise(Mean = mean(PSI), Median = median(PSI), Variance = var(PSI), SD = sd(PSI))
total_summary
lot_summary
t.test(Suspension_coil_table$PSI, mu=1500)
t.test(Suspension_coil_table$PSI, mu=1500, subset=Suspension_coil_table$Manufacturing_Lot == 'Lot1')
t.test(Suspension_coil_table$PSI, mu=1500, subset=Suspension_coil_table$Manufacturing_Lot == 'Lot2')
t.test(Suspension_coil_table$PSI, mu=1500, subset=Suspension_coil_table$Manufacturing_Lot == 'Lot3')
|
# summarize bugs() results: used with summary.rube(...,drop>0)
# obj is sims.array component of a bugs object
summarize <- function(arr) {
if (dim(arr)[2]==1) {
bsummary <- function(x)
return(c(mean(x), sd(x), quantile(x,c(0.025,0.25,0.50,0.75,0.975))))
rtn <- t(apply(arr[,1,],2,bsummary))
colnames(rtn) <- c("mean", "sd", "2.5%", "25%", "50%", "75%", "97.5%")
} else {
rtn <- matrix(NA, dim(arr)[3], 7, dimnames=list(dimnames(arr)[[3]],NULL))
n <- dim(arr)[1]
for (i in 1:dim(arr)[3]) {
means <- apply(arr[,,i], 2, mean)
vars <- apply(arr[,,i], 2, var)
wts <- 1/vars / sum(1/vars)
rtn[i,1] <- sum(means*wts) # mean
rtn[i,2] <- sqrt(sum(vars*wts)) # sd
}
collapsed <- as.list(as.data.frame(apply(arr, 3, as.numeric)))
rtn[,3:7] <- t(sapply(collapsed, quantile, probs = c(0.025,0.25,0.50,0.75,0.975)))
rtn <- cbind(rtn, Rhat(arr))
colnames(rtn) <- c("mean", "sd", "2.5%", "25%", "50%", "75%", "97.5%", "Rhat")
}
return(rtn)
}
| /source/summarize.R | no_license | zuojung/rube_research | R | false | false | 1,022 | r | # summarize bugs() results: used with summary.rube(...,drop>0)
# obj is sims.array component of a bugs object
summarize <- function(arr) {
if (dim(arr)[2]==1) {
bsummary <- function(x)
return(c(mean(x), sd(x), quantile(x,c(0.025,0.25,0.50,0.75,0.975))))
rtn <- t(apply(arr[,1,],2,bsummary))
colnames(rtn) <- c("mean", "sd", "2.5%", "25%", "50%", "75%", "97.5%")
} else {
rtn <- matrix(NA, dim(arr)[3], 7, dimnames=list(dimnames(arr)[[3]],NULL))
n <- dim(arr)[1]
for (i in 1:dim(arr)[3]) {
means <- apply(arr[,,i], 2, mean)
vars <- apply(arr[,,i], 2, var)
wts <- 1/vars / sum(1/vars)
rtn[i,1] <- sum(means*wts) # mean
rtn[i,2] <- sqrt(sum(vars*wts)) # sd
}
collapsed <- as.list(as.data.frame(apply(arr, 3, as.numeric)))
rtn[,3:7] <- t(sapply(collapsed, quantile, probs = c(0.025,0.25,0.50,0.75,0.975)))
rtn <- cbind(rtn, Rhat(arr))
colnames(rtn) <- c("mean", "sd", "2.5%", "25%", "50%", "75%", "97.5%", "Rhat")
}
return(rtn)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/datasets.r
\docType{data}
\name{sofronts}
\alias{sofronts}
\title{Southern ocean climatologic fronts}
\format{A data frame of 9212 rows and 3 variables.}
\usage{
data(sofronts)
}
\description{
This dataset contains the coordinates of the the major front of the
southern ocean: SACCF (Southern Antarctic Circumpolar Current Front),
PF (Polar Front), SAF (Sub-Antarctic Front),
SSTF (Southern Sub Tropical Front) and NSTF.
}
\details{
The SSTF position is defined by the intersection
between the 11 deg. C isotherm and the 150 m isobath. It represents the limits between the northern
warm and salty waters (Atlantic, Indian and Pacific oceans) and the colder and less salty
water from the sub-antarctic area. It is considered as the northern limit of the antarctic circumpolar
current (but not of the Southern Ocean which has as been arbitrarily defined to the 40 deg. S
The SAF position is defined by the maximum meridian temperature
gradient between 3 deg. and 8 deg. C at 300 m. It is associated with strong currents. Around
Kerguelen it deviates to the north, thus reducing the extent of the sub-antarctic area and increasing
that of the polar-frontal area.
The PF position is defined as the northern limit of the minimum subsurface
temperature lower than 2 deg. C. It represents the lower trace of the winter-mixed layer, which
the upper part warms up during summer. It is associated with strong currents.
The SACCF front is defined as the southern limit of the Antarctic circumpolar
current. It represents the border between the antarctic
(to the north) and continental (to the south) areas.
Content:
\itemize{
\item Lon Longitude
\item Lat Latitude
\item name The front name.
}
}
\references{
Belkin, I.M. (1988) Main hydrological features of the Central South Pacific, in: Ecosystems of the
Subantarctic Zone of the Pacific Ocean, edited by M.E. Vinogradov and M.V. Flint, Nauka,
Moscow, 21 28 [Translated as "Pacific Subantarctic Ecosystems", pp.12-17, New Zealand Translation Centre Ltd., Wellington, 1996].
Belkin, I.M. (1993) Frontal structure of the South Atlantic, in: Pelagic Ecosystems of the
Southern Ocean, edited by N.M. Voronina, pp. 40 53 (in Russian), Nauka, Moscow.
Belkin, I.M., and A.L. Gordon (1996) Southern Ocean fronts from the Greenwich
meridian to Tasmania, J. Geophys. Res., 101(C2), 3675-3696.
Wessel, P., and W. H. F. Smith (1996) A Global Self-consistent, Hierarchical,
High-resolution Shoreline Database, J. Geophys. Res., 101(B4), 8741-8743.
}
\seealso{
\code{\link{front}}
}
\keyword{datasets}
| /man/sofronts.Rd | permissive | SWotherspoon/rbl | R | false | false | 2,632 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/datasets.r
\docType{data}
\name{sofronts}
\alias{sofronts}
\title{Southern ocean climatologic fronts}
\format{A data frame of 9212 rows and 3 variables.}
\usage{
data(sofronts)
}
\description{
This dataset contains the coordinates of the the major front of the
southern ocean: SACCF (Southern Antarctic Circumpolar Current Front),
PF (Polar Front), SAF (Sub-Antarctic Front),
SSTF (Southern Sub Tropical Front) and NSTF.
}
\details{
The SSTF position is defined by the intersection
between the 11 deg. C isotherm and the 150 m isobath. It represents the limits between the northern
warm and salty waters (Atlantic, Indian and Pacific oceans) and the colder and less salty
water from the sub-antarctic area. It is considered as the northern limit of the antarctic circumpolar
current (but not of the Southern Ocean which has as been arbitrarily defined to the 40 deg. S
The SAF position is defined by the maximum meridian temperature
gradient between 3 deg. and 8 deg. C at 300 m. It is associated with strong currents. Around
Kerguelen it deviates to the north, thus reducing the extent of the sub-antarctic area and increasing
that of the polar-frontal area.
The PF position is defined as the northern limit of the minimum subsurface
temperature lower than 2 deg. C. It represents the lower trace of the winter-mixed layer, which
the upper part warms up during summer. It is associated with strong currents.
The SACCF front is defined as the southern limit of the Antarctic circumpolar
current. It represents the border between the antarctic
(to the north) and continental (to the south) areas.
Content:
\itemize{
\item Lon Longitude
\item Lat Latitude
\item name The front name.
}
}
\references{
Belkin, I.M. (1988) Main hydrological features of the Central South Pacific, in: Ecosystems of the
Subantarctic Zone of the Pacific Ocean, edited by M.E. Vinogradov and M.V. Flint, Nauka,
Moscow, 21 28 [Translated as "Pacific Subantarctic Ecosystems", pp.12-17, New Zealand Translation Centre Ltd., Wellington, 1996].
Belkin, I.M. (1993) Frontal structure of the South Atlantic, in: Pelagic Ecosystems of the
Southern Ocean, edited by N.M. Voronina, pp. 40 53 (in Russian), Nauka, Moscow.
Belkin, I.M., and A.L. Gordon (1996) Southern Ocean fronts from the Greenwich
meridian to Tasmania, J. Geophys. Res., 101(C2), 3675-3696.
Wessel, P., and W. H. F. Smith (1996) A Global Self-consistent, Hierarchical,
High-resolution Shoreline Database, J. Geophys. Res., 101(B4), 8741-8743.
}
\seealso{
\code{\link{front}}
}
\keyword{datasets}
|
#' The class uwmwEstimate
#'
#' This class represents an estimate object resulting from a call to \code{\link{getEstimate}}. It contains all information about the estimate, including standard errors and confidence intervals if requested. For this class a number of methods is foreseen, including the accessors for the slots. The class is sortable and can be indexed, so you can use this for making custom forest plots using the function \code{\link{forestplot}}.
#'
#' #' @section Slots:
#' \describe{
#' \item{\code{esttype}:}{object of class \code{"character"}, containing the estimate type. This can be logor for log odds ratio, or for odds ratio, odds, logodds for the log odds or p for the probability.}
#' \item{\code{names}:}{object of class \code{"character"}, containing the names of the genes for which the estimates are calculated.}
#' \item{\code{est}:}{object of class \code{"numeric"}, containing the estimates itself.}
#' \item{\code{se}:}{object of class \code{"numeric"}, containing the estimates for the standard error, if applicable.}
#' \item{\code{ll}:}{object of class \code{"numeric"}, containing the lower limit of the confidence interval.}
#' \item{\code{ul}:}{object of class \code{"numeric"}, containing the upper limit of the confidence interval.}
#' \item{\code{refest}:}{object of class \code{"numeric"}, containing the estimate for the reference used in the analysis. Note that this only makes sense for log odds, odds and probabilities.}
#' \item{\code{refse}:}{object of class \code{"numeric"}, containing the se estimate for the reference if applicable.}
#' \item{\code{refll}:}{object of class \code{"numeric"}, containing the lower limit for the reference if applicable.}
#' \item{\code{reful}:}{object of class \code{"numeric"}, containing the upper limit for the reference if applicable.}
#' \item{\code{type}:}{vector of class \code{"character"}, containing the type of reference used in the original analysis. This can be either "O" or "H" for Overall respectively Housekeeping Expression as reference.}
#' \item{\code{confint}:}{vector of class \code{"numeric"}}, indicating the limit used for the confidence interval. 0.95 represents the 95\% confidence interval.
#' \item{\code{housekeeping}:}{object of class \code{"character"}, containing either NULL or the names of the housekeeping genes used in the H version of \code{\link{uWMW}}.}
#' \item{\code{groupinfo}:}{character vector of length 2, indicating the groups. This slot is mainly used to show how the probabilistic indices are calculated.}
#' }
#'
#' @note For this class, \code{\link{show}} and \code{\link{length}} methods are defined. \code{\link{length}} will give you the number of features.
#'
#' @name uwmwEstimate-class
#' @rdname uwmwEstimate-class
#' @aliases uwmwEstimate
#' @exportClass uwmwEstimate
#' @author Joris Meys
setClass("uwmwEstimate",
representation=list(esttype = 'character',
names = 'character',
est = 'numeric',
se = 'numeric',
ll = 'numeric',
ul = 'numeric',
refest = 'numeric',
refse = 'numeric',
refll = 'numeric',
reful = 'numeric',
type = 'character',
confint = 'numeric',
housekeeping = 'character',
groupinfo = 'character'
),
prototype=list(esttype = character(0),
names = character(0),
est = numeric(0),
se = numeric(0),
ll = numeric(0),
ul = numeric(0),
refest = numeric(0),
refse = numeric(0),
refll = numeric(0),
reful = numeric(0),
type = character(0),
confint = numeric(0),
housekeeping = character(0),
groupinfo = character(0)
),
validity=function(object){
esttype <- object@esttype
type <- object@type
refest <- object@refest
refse <- object@refse
refll <- object@refll
reful <- object@reful
if(!esttype %in% c("or","logor","odds","logodds","p"))
return("esttype should be one of or, logor, odds, logodds or p.")
if(!type %in% c("H","O"))
return("type should be either H or O.")
else if(type =="H" && is.null(object@housekeeping))
return("housekeeping genes have to be provided when type is H.")
if(esttype %in% c("or","logor"))
if(!all(is.na(c(refest,refse,refll,reful))))
return("Reference value makes no sense for odds ratio or logor.")
if(length(object@confint) > 1)
return("invalid specification for confint.")
if(object@confint > 1 | object@confint < 0)
return("confint should be between 0 and 1.")
if(length(object@groupinfo) != 2)
return("uwmwEstimate groupinfo must be of length 2.")
ns <- slotNames(object)[2:6]
lengths <- sapply(ns,function(i)length(slot(object,i)))
if(length(unique(lengths))==1) TRUE else
"The dimensions of the slots are incompatible."
}
)
# Constructor method
uwmwEstimate <- function(esttype,
names,
est,
se,
ll,
ul,
refest=NA,
refse=NA,
refll=NA,
reful=NA,
type,
confint,
housekeeping=character(0),
groupinfo
){
new("uwmwEstimate",
esttype = esttype,
names = names,
est = est,
se = as.numeric(se),
ll = ll,
ul = ul,
refest = as.numeric(refest),
refse = as.numeric(refse),
refll = as.numeric(refll),
reful = as.numeric(reful),
type = type,
confint = as.numeric(confint),
housekeeping = housekeeping,
groupinfo = groupinfo
)
}
| /R/uwmwEstimate_Class.R | no_license | CenterForStatistics-UGent/unifiedWMWqPCR | R | false | false | 7,008 | r | #' The class uwmwEstimate
#'
#' This class represents an estimate object resulting from a call to \code{\link{getEstimate}}. It contains all information about the estimate, including standard errors and confidence intervals if requested. For this class a number of methods is foreseen, including the accessors for the slots. The class is sortable and can be indexed, so you can use this for making custom forest plots using the function \code{\link{forestplot}}.
#'
#' #' @section Slots:
#' \describe{
#' \item{\code{esttype}:}{object of class \code{"character"}, containing the estimate type. This can be logor for log odds ratio, or for odds ratio, odds, logodds for the log odds or p for the probability.}
#' \item{\code{names}:}{object of class \code{"character"}, containing the names of the genes for which the estimates are calculated.}
#' \item{\code{est}:}{object of class \code{"numeric"}, containing the estimates itself.}
#' \item{\code{se}:}{object of class \code{"numeric"}, containing the estimates for the standard error, if applicable.}
#' \item{\code{ll}:}{object of class \code{"numeric"}, containing the lower limit of the confidence interval.}
#' \item{\code{ul}:}{object of class \code{"numeric"}, containing the upper limit of the confidence interval.}
#' \item{\code{refest}:}{object of class \code{"numeric"}, containing the estimate for the reference used in the analysis. Note that this only makes sense for log odds, odds and probabilities.}
#' \item{\code{refse}:}{object of class \code{"numeric"}, containing the se estimate for the reference if applicable.}
#' \item{\code{refll}:}{object of class \code{"numeric"}, containing the lower limit for the reference if applicable.}
#' \item{\code{reful}:}{object of class \code{"numeric"}, containing the upper limit for the reference if applicable.}
#' \item{\code{type}:}{vector of class \code{"character"}, containing the type of reference used in the original analysis. This can be either "O" or "H" for Overall respectively Housekeeping Expression as reference.}
#' \item{\code{confint}:}{vector of class \code{"numeric"}}, indicating the limit used for the confidence interval. 0.95 represents the 95\% confidence interval.
#' \item{\code{housekeeping}:}{object of class \code{"character"}, containing either NULL or the names of the housekeeping genes used in the H version of \code{\link{uWMW}}.}
#' \item{\code{groupinfo}:}{character vector of length 2, indicating the groups. This slot is mainly used to show how the probabilistic indices are calculated.}
#' }
#'
#' @note For this class, \code{\link{show}} and \code{\link{length}} methods are defined. \code{\link{length}} will give you the number of features.
#'
#' @name uwmwEstimate-class
#' @rdname uwmwEstimate-class
#' @aliases uwmwEstimate
#' @exportClass uwmwEstimate
#' @author Joris Meys
setClass("uwmwEstimate",
representation=list(esttype = 'character',
names = 'character',
est = 'numeric',
se = 'numeric',
ll = 'numeric',
ul = 'numeric',
refest = 'numeric',
refse = 'numeric',
refll = 'numeric',
reful = 'numeric',
type = 'character',
confint = 'numeric',
housekeeping = 'character',
groupinfo = 'character'
),
prototype=list(esttype = character(0),
names = character(0),
est = numeric(0),
se = numeric(0),
ll = numeric(0),
ul = numeric(0),
refest = numeric(0),
refse = numeric(0),
refll = numeric(0),
reful = numeric(0),
type = character(0),
confint = numeric(0),
housekeeping = character(0),
groupinfo = character(0)
),
validity=function(object){
esttype <- object@esttype
type <- object@type
refest <- object@refest
refse <- object@refse
refll <- object@refll
reful <- object@reful
if(!esttype %in% c("or","logor","odds","logodds","p"))
return("esttype should be one of or, logor, odds, logodds or p.")
if(!type %in% c("H","O"))
return("type should be either H or O.")
else if(type =="H" && is.null(object@housekeeping))
return("housekeeping genes have to be provided when type is H.")
if(esttype %in% c("or","logor"))
if(!all(is.na(c(refest,refse,refll,reful))))
return("Reference value makes no sense for odds ratio or logor.")
if(length(object@confint) > 1)
return("invalid specification for confint.")
if(object@confint > 1 | object@confint < 0)
return("confint should be between 0 and 1.")
if(length(object@groupinfo) != 2)
return("uwmwEstimate groupinfo must be of length 2.")
ns <- slotNames(object)[2:6]
lengths <- sapply(ns,function(i)length(slot(object,i)))
if(length(unique(lengths))==1) TRUE else
"The dimensions of the slots are incompatible."
}
)
# Constructor method
uwmwEstimate <- function(esttype,
names,
est,
se,
ll,
ul,
refest=NA,
refse=NA,
refll=NA,
reful=NA,
type,
confint,
housekeeping=character(0),
groupinfo
){
new("uwmwEstimate",
esttype = esttype,
names = names,
est = est,
se = as.numeric(se),
ll = ll,
ul = ul,
refest = as.numeric(refest),
refse = as.numeric(refse),
refll = as.numeric(refll),
reful = as.numeric(reful),
type = type,
confint = as.numeric(confint),
housekeeping = housekeeping,
groupinfo = groupinfo
)
}
|
################################################################################
# COVID-19 GRAPHER
#
# This code should
# -- Download the latest data from ECDC
# -- Creates graphs about the development in countries of choice regarding
# - Number of cases since 100 cases
# - Number of deaths since 10 deaths
#
# Author: Viking Waldén
#
# NB: If the ECDC hasn't updated their data today, the code download won't work.
# It's then easiest to manually download the data and run it from there.
#
################################################################################
#-------------------------------------------------------------------------------
# PREAMBLE
#-------------------------------------------------------------------------------
rm(list = ls())
# Needed packages
packages <- c("rio",
"data.table",
"httr",
"readxl",
"tidyverse")
# Install new and load all packages
install_load <- function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
}
lapply(packages, install_load)
#-------------------------------------------------------------------------------
# AUTOMATIC DOWNLOAD
#-------------------------------------------------------------------------------
# # URL with daily aupdate
# url <- paste("https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-",format(Sys.time(), "%Y-%m-%d"), ".xlsx", sep = "")
#
# # Download data to local temp file
# GET(url, authenticate(":", ":", type="ntlm"), write_disk(tf <- tempfile(fileext = ".xlsx")))
#
# # Import data
# data <- read_excel(tf) %>%
# arrange(GeoId, DateRep)
#-------------------------------------------------------------------------------
# MANUAL DOWNLOAD
#-------------------------------------------------------------------------------
data <- import("C:/Users/vi.3590/Desktop/Corona/data.xlsx") %>%
arrange(GeoId, DateRep)
#-------------------------------------------------------------------------------
# DEATH AND CASE DATASETS
#-------------------------------------------------------------------------------
# Create cumulative cases and deaths
dt <- data.table(data)
dt[, tot_cases := cumsum(Cases), by = list(GeoId)]
dt[, tot_deaths := cumsum(Deaths), by = list(GeoId)]
# Select countries
dt <- dt %>%
filter(GeoId == "SE" | GeoId == "NO" | GeoId == "DK" | GeoId == "FI" | GeoId == "IT")
# Death dataset
deaths <- dt %>%
filter(tot_deaths >= 10) %>%
data.table
# Generate days since 10 dead
deaths[, days_since_10_dead := seq(1:.N), by = list(GeoId)]
# Cases dataset
cases <- dt %>%
filter(tot_cases >= 100) %>%
data.table
# Generate days since 100 cases
cases[, days_since_100_cases := seq(1:.N), by = list(GeoId)]
#-------------------------------------------------------------------------------
# GRAPHS
#-------------------------------------------------------------------------------
# Cases
## Total
cases %>%
filter(days_since_100_cases <= 20) %>%
ggplot(aes(x = days_since_100_cases, y = tot_cases, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total cases", trans = "log10") +
scale_x_continuous(name = "Days since 100 cases") +
ggtitle("Total Covid-19 cases") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
## Per day
cases %>%
filter(days_since_100_cases <= 20) %>%
ggplot(aes(x = days_since_100_cases, y = Cases, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total cases", trans = "log10") +
scale_x_continuous(name = "Days since 100 cases") +
ggtitle("New Covid-19 cases per day") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
# Deaths
## Total
deaths %>%
filter(days_since_10_dead <= 14) %>%
ggplot(aes(x = days_since_10_dead, y = tot_deaths, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total deaths", trans = "log10") +
scale_x_continuous(name = "Days since 10 deaths") +
ggtitle("Total Covid-19 deaths") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
## Per day
deaths %>%
filter(days_since_10_dead <= 14) %>%
ggplot(aes(x = days_since_10_dead, y = Deaths, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total deaths", trans = "log10") +
scale_x_continuous(name = "Days since 10 deaths") +
ggtitle("New Covid-19 deaths") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
| /do/covid_grapher/covid_grapher.R | no_license | cschroe/covid19 | R | false | false | 4,917 | r | ################################################################################
# COVID-19 GRAPHER
#
# This code should
# -- Download the latest data from ECDC
# -- Creates graphs about the development in countries of choice regarding
# - Number of cases since 100 cases
# - Number of deaths since 10 deaths
#
# Author: Viking Waldén
#
# NB: If the ECDC hasn't updated their data today, the code download won't work.
# It's then easiest to manually download the data and run it from there.
#
################################################################################
#-------------------------------------------------------------------------------
# PREAMBLE
#-------------------------------------------------------------------------------
rm(list = ls())
# Needed packages
packages <- c("rio",
"data.table",
"httr",
"readxl",
"tidyverse")
# Install new and load all packages
install_load <- function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
}
lapply(packages, install_load)
#-------------------------------------------------------------------------------
# AUTOMATIC DOWNLOAD
#-------------------------------------------------------------------------------
# # URL with daily aupdate
# url <- paste("https://www.ecdc.europa.eu/sites/default/files/documents/COVID-19-geographic-disbtribution-worldwide-",format(Sys.time(), "%Y-%m-%d"), ".xlsx", sep = "")
#
# # Download data to local temp file
# GET(url, authenticate(":", ":", type="ntlm"), write_disk(tf <- tempfile(fileext = ".xlsx")))
#
# # Import data
# data <- read_excel(tf) %>%
# arrange(GeoId, DateRep)
#-------------------------------------------------------------------------------
# MANUAL DOWNLOAD
#-------------------------------------------------------------------------------
data <- import("C:/Users/vi.3590/Desktop/Corona/data.xlsx") %>%
arrange(GeoId, DateRep)
#-------------------------------------------------------------------------------
# DEATH AND CASE DATASETS
#-------------------------------------------------------------------------------
# Create cumulative cases and deaths
dt <- data.table(data)
dt[, tot_cases := cumsum(Cases), by = list(GeoId)]
dt[, tot_deaths := cumsum(Deaths), by = list(GeoId)]
# Select countries
dt <- dt %>%
filter(GeoId == "SE" | GeoId == "NO" | GeoId == "DK" | GeoId == "FI" | GeoId == "IT")
# Death dataset
deaths <- dt %>%
filter(tot_deaths >= 10) %>%
data.table
# Generate days since 10 dead
deaths[, days_since_10_dead := seq(1:.N), by = list(GeoId)]
# Cases dataset
cases <- dt %>%
filter(tot_cases >= 100) %>%
data.table
# Generate days since 100 cases
cases[, days_since_100_cases := seq(1:.N), by = list(GeoId)]
#-------------------------------------------------------------------------------
# GRAPHS
#-------------------------------------------------------------------------------
# Cases
## Total
cases %>%
filter(days_since_100_cases <= 20) %>%
ggplot(aes(x = days_since_100_cases, y = tot_cases, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total cases", trans = "log10") +
scale_x_continuous(name = "Days since 100 cases") +
ggtitle("Total Covid-19 cases") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
## Per day
cases %>%
filter(days_since_100_cases <= 20) %>%
ggplot(aes(x = days_since_100_cases, y = Cases, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total cases", trans = "log10") +
scale_x_continuous(name = "Days since 100 cases") +
ggtitle("New Covid-19 cases per day") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
# Deaths
## Total
deaths %>%
filter(days_since_10_dead <= 14) %>%
ggplot(aes(x = days_since_10_dead, y = tot_deaths, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total deaths", trans = "log10") +
scale_x_continuous(name = "Days since 10 deaths") +
ggtitle("Total Covid-19 deaths") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
## Per day
deaths %>%
filter(days_since_10_dead <= 14) %>%
ggplot(aes(x = days_since_10_dead, y = Deaths, colour = GeoId)) +
geom_line() + scale_y_continuous(name = "Total deaths", trans = "log10") +
scale_x_continuous(name = "Days since 10 deaths") +
ggtitle("New Covid-19 deaths") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"))
|
library(otsad)
context("Optimized clasic Processing Sd-Ewma")
test_that("OcpSdEwma gives the correct result", {
## Generate data
set.seed(100)
n <- 500
# x <- sample(1:100, n, replace = TRUE)
# x[70:90] <- sample(110:115, 21, replace = TRUE)
# x[25] <- 200
# x[320] <- 170
x <- c(31,26,56,6,47,49,82,38,55,18,63,89,29,40,77,67,21,36,36,70,54,72,54,75,200,18,78,89,55,28,
49,93,35,96,70,89,19,63,99,14,34,87,78,83,61,50,79,89,21,31,34,20,24,28,60,26,13,23,60,22,
47,65,97,68,45,36,46,45,25,111,114,111,112,112,112,114,111,114,113,111,112,110,112,115,110,
110,112,111,115,113,91,21,36,45,91,39,52,13,4,78,33,39,5,37,58,69,98,71,2,54,84,81,9,24,
97,4,92,73,21,85,40,40,48,59,36,3,100,96,56,11,24,87,74,50,58,2,48,5,47,63,68,9,15,91,13,
73,96,5,2,20,51,93,14,17,61,82,85,79,2,70,82,57,49,17,9,17,3,71,77,86,44,42,59,83,80,33,96,
66,46,61,29,72,93,68,19,35,13,11,30,84,100,44,21,97,67,30,12,60,13,79,37,96,92,83,32,88,81,
62,8,43,35,76,22,30,36,64,90,75,46,4,57,44,61,96,27,66,8,8,38,30,56,37,85,63,40,30,39,71,
95,78,22,72,67,71,28,72,67,5,7,28,31,96,39,38,85,85,32,14,62,80,34,91,20,80,76,92,33,9,92,
96,68,75,45,12,68,74,49,18,68,27,35,22,2,38,57,68,75,96,17,33,14,64,34,65,31,8,67,76,56,
54,85,66,96,62,50,98,50,66,60,95,37,88,46,50,47,62,61,79,56,170,41,52,53,100,43,100,79,52,
51,92,27,18,41,54,25,38,59,21,81,64,74,44,58,26,47,17,62,96,48,76,3,18,64,17,36,19,90,24,
99,3,11,25,73,4,55,69,31,39,74,97,77,59,47,36,39,21,14,39,27,71,41,27,41,20,83,53,40,58,98,
66,34,34,95,39,57,52,14,24,72,30,52,28,37,44,81,53,70,85,85,40,16,64,29,93,16,97,1,71,64,
78,90,52,75,93,10,50,20,100,4,22,90,12,37,17,17,98,82,96,40,86,7,31,50,65,52,80,57,36,66,
24,27,52,74,78,12,62,92,64,28,36,68,91,74,92,39,86,6,26,37,37,29,39,35,28,81,100,76,61,60,
59,24,46,100,9,6,50,6,96,64,27,100,36,38,34,11,58,34,1,100,37)
df <- data.frame(timestamp=1:n,value=x)
## Calculate anomalies
result <- OcpSdEwma(
data = df$value,
n.train = 5,
threshold = 0.01,
l = 3
)
## read correct results
correct.results <- rep(0, 500)
correct.results[c(25, 92, 320)] <- 1
expect_equal(as.numeric(result$is.anomaly), correct.results)
})
| /tests/testthat/test_ocp_sd_ewma.R | no_license | cran/otsad | R | false | false | 2,332 | r | library(otsad)
context("Optimized clasic Processing Sd-Ewma")
test_that("OcpSdEwma gives the correct result", {
## Generate data
set.seed(100)
n <- 500
# x <- sample(1:100, n, replace = TRUE)
# x[70:90] <- sample(110:115, 21, replace = TRUE)
# x[25] <- 200
# x[320] <- 170
x <- c(31,26,56,6,47,49,82,38,55,18,63,89,29,40,77,67,21,36,36,70,54,72,54,75,200,18,78,89,55,28,
49,93,35,96,70,89,19,63,99,14,34,87,78,83,61,50,79,89,21,31,34,20,24,28,60,26,13,23,60,22,
47,65,97,68,45,36,46,45,25,111,114,111,112,112,112,114,111,114,113,111,112,110,112,115,110,
110,112,111,115,113,91,21,36,45,91,39,52,13,4,78,33,39,5,37,58,69,98,71,2,54,84,81,9,24,
97,4,92,73,21,85,40,40,48,59,36,3,100,96,56,11,24,87,74,50,58,2,48,5,47,63,68,9,15,91,13,
73,96,5,2,20,51,93,14,17,61,82,85,79,2,70,82,57,49,17,9,17,3,71,77,86,44,42,59,83,80,33,96,
66,46,61,29,72,93,68,19,35,13,11,30,84,100,44,21,97,67,30,12,60,13,79,37,96,92,83,32,88,81,
62,8,43,35,76,22,30,36,64,90,75,46,4,57,44,61,96,27,66,8,8,38,30,56,37,85,63,40,30,39,71,
95,78,22,72,67,71,28,72,67,5,7,28,31,96,39,38,85,85,32,14,62,80,34,91,20,80,76,92,33,9,92,
96,68,75,45,12,68,74,49,18,68,27,35,22,2,38,57,68,75,96,17,33,14,64,34,65,31,8,67,76,56,
54,85,66,96,62,50,98,50,66,60,95,37,88,46,50,47,62,61,79,56,170,41,52,53,100,43,100,79,52,
51,92,27,18,41,54,25,38,59,21,81,64,74,44,58,26,47,17,62,96,48,76,3,18,64,17,36,19,90,24,
99,3,11,25,73,4,55,69,31,39,74,97,77,59,47,36,39,21,14,39,27,71,41,27,41,20,83,53,40,58,98,
66,34,34,95,39,57,52,14,24,72,30,52,28,37,44,81,53,70,85,85,40,16,64,29,93,16,97,1,71,64,
78,90,52,75,93,10,50,20,100,4,22,90,12,37,17,17,98,82,96,40,86,7,31,50,65,52,80,57,36,66,
24,27,52,74,78,12,62,92,64,28,36,68,91,74,92,39,86,6,26,37,37,29,39,35,28,81,100,76,61,60,
59,24,46,100,9,6,50,6,96,64,27,100,36,38,34,11,58,34,1,100,37)
df <- data.frame(timestamp=1:n,value=x)
## Calculate anomalies
result <- OcpSdEwma(
data = df$value,
n.train = 5,
threshold = 0.01,
l = 3
)
## read correct results
correct.results <- rep(0, 500)
correct.results[c(25, 92, 320)] <- 1
expect_equal(as.numeric(result$is.anomaly), correct.results)
})
|
##Functions create inversed matrix and also cache the inverse of matrix
##This function stores the matrix and a cached value of the inverse of the matrix.
##sets and gets the value of a matrix, gets and sets the cached value of a matrix
makeCacheMatrix <- function(orgMatrix = matrix()) {
if (!is.matrix(orgMatrix)) {
stop("Create a matrix")
}
# holds the cached value or NULL
invMatrix <- NULL
#sets new matrix
set <- function(y) {
orgMatrix <<- y
invMatrix <<- NULL
}
get <- function() orgMatrix
# sets inversed matrix
setInverse <- function(inverse) invMatrix <<- inverse
getInverse <- function() invMatrix
# return a list. Each named element of the list is a function
list(set=set, get=get, setinverse=setInverse, getinverse=getInverse)
}
## This function calculates the inverse of a matrix got and set by function makeCacheMatrix
##But if matrix is already inversed it returns that matrix
cacheSolve <- function(cachMatrix, ...) {
#Sets inversed matrix or null
invMatrix <- cachMatrix$getInverse()
#if it is not a null returns already inversed Matrix
if(!is.null(invMatrix)) {
message("getting cached data")
return(invMatrix)
}
#if it is null gets original matrix
ToInv <- cachMatrix$get()
#inverses it
invMatrix <- solve(ToInv)
#set inversed matrix
cachMatrix$setInverse(invMatrix)
invMatrix
}
| /cachematrix2.R | no_license | Grrrusti/ProgrammingAssignment2 | R | false | false | 1,404 | r | ##Functions create inversed matrix and also cache the inverse of matrix
##This function stores the matrix and a cached value of the inverse of the matrix.
##sets and gets the value of a matrix, gets and sets the cached value of a matrix
makeCacheMatrix <- function(orgMatrix = matrix()) {
if (!is.matrix(orgMatrix)) {
stop("Create a matrix")
}
# holds the cached value or NULL
invMatrix <- NULL
#sets new matrix
set <- function(y) {
orgMatrix <<- y
invMatrix <<- NULL
}
get <- function() orgMatrix
# sets inversed matrix
setInverse <- function(inverse) invMatrix <<- inverse
getInverse <- function() invMatrix
# return a list. Each named element of the list is a function
list(set=set, get=get, setinverse=setInverse, getinverse=getInverse)
}
## This function calculates the inverse of a matrix got and set by function makeCacheMatrix
##But if matrix is already inversed it returns that matrix
cacheSolve <- function(cachMatrix, ...) {
#Sets inversed matrix or null
invMatrix <- cachMatrix$getInverse()
#if it is not a null returns already inversed Matrix
if(!is.null(invMatrix)) {
message("getting cached data")
return(invMatrix)
}
#if it is null gets original matrix
ToInv <- cachMatrix$get()
#inverses it
invMatrix <- solve(ToInv)
#set inversed matrix
cachMatrix$setInverse(invMatrix)
invMatrix
}
|
## make country prediction graph
## Nicholas Reich
## September 2014
#'@param forecast_file file containing forecasts
#'@param counts_file file containing counts
#'
make_country_prediction_line_graph <- function(forecasts, counts, ylim_scale=1, min_plot_date=as.Date("2012-04-01"), show_unused_counts=TRUE) {
require(dplyr)
require(lubridate)
require(scales)
forecasts <- tbl_df(forecasts)
counts <- tbl_df(counts)
## aggregate to country-level
forecasts_cntry <- forecasts %>% group_by(biweek, year) %>%
summarize(predicted_cntry_count = sum(predicted_count),
predicted_ub = sum(ub),
predicted_lb = sum(lb)) %>%
mutate(time = year + (biweek-1)/26,
date_sick = biweek_to_date(biweek, year))
forecast_times <- (forecasts_cntry$year + (forecasts_cntry$biweek-1)/26)
counts_cntry <- counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarize(cntry_count = sum(count)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year),
forecast_biweek = time %in% forecast_times) %>%
filter(date_sick >= min_plot_date)
## add column in counts_cntry indicating which biweeks were left out of the fit
## make plot
p <- ggplot() + theme_bw() +
theme(legend.position="bottom",
axis.text.x = element_text(angle = 90, hjust = 1, vjust=.5),
panel.background = element_rect(fill = "transparent",colour = NA), # or element_blank()
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "transparent",colour = NA))
if(show_unused_counts){
## using gray bars for unused cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick, y=cntry_count, fill=forecast_biweek),
stat="identity") +
scale_fill_manual(values=c("black", "gray"),
name="",
labels=c("used by forecast model", "not used by forecast model"))
} else {
## no unused cases
p <- p + geom_bar(data=filter(counts_cntry, forecast_biweek==FALSE),
aes(x=date_sick, y=cntry_count),
stat="identity")
}
p <- p +
## add forecasts
geom_line(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_point(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_ribbon(data=forecasts_cntry,
aes(x=date_sick, ymin=predicted_lb, ymax=predicted_ub),
alpha=I(.3)) +
# air-brushing
scale_x_date(breaks = "3 months",
labels = date_format("%d %b %Y"))+
xlab(NULL) + ylab(NULL) +
ylim(0, max(counts_cntry$cntry_count)*ylim_scale) +
ggtitle("Observed and predicted DHF case counts for all of Thailand")
p
}
new_make_country_prediction_line_graph <- function(forecasts,
counts,
ylim_scale=1,
min_plot_date=as.Date("2012-04-01"),
show_unused_counts=TRUE,
expanded_counts) {
require(dplyr)
require(lubridate)
require(scales)
forecasts <- tbl_df(forecasts)
counts <- tbl_df(counts)
## aggregate to country-level
forecasts_cntry <- forecasts %>%
group_by(date_sick_biweek, date_sick_year) %>%
summarize(predicted_cntry_count = sum(predicted_count),
predicted_ub = sum(ub),
predicted_lb = sum(lb)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year)) %>%
ungroup()
forecast_times <- (forecasts_cntry$date_sick_year + (forecasts_cntry$date_sick_biweek-1)/26)
counts_cntry <- counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarize(cntry_count = sum(count)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year),
forecast_biweek = time %in% forecast_times,
expanded = 0) %>%
filter(date_sick >= min_plot_date) %>%
ungroup()
## establish a sane upper bound
counts_ub <- counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarise(count = sum(count))
max_counts <- max(counts_ub$count * ylim_scale)
chart_ub <- ifelse(max(forecasts_cntry$predicted_ub) * ylim_scale > max_counts,
max(max_counts,
forecasts_cntry$predicted_count * ylim_scale),
max(c(forecasts_cntry$predicted_ub,
counts_cntry$cntry_count)) * ylim_scale)
if(!missing(expanded_counts)){
expanded_ctry <- expanded_counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarize(cntry_count = sum(expanded_cases)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year),
forecast_biweek = time %in% forecast_times,
expanded=1) %>%
filter(date_sick >= min_plot_date) %>%
select(date_sick_year, date_sick_biweek, cntry_count,
time, date_sick, forecast_biweek, expanded)
counts_cntry <- bind_rows(counts_cntry, expanded_ctry)
}
## add column in counts_cntry indicating which biweeks were left out of the fit
## make plot
p <- ggplot() + theme_bw() +
theme(legend.position="bottom",
axis.text.x = element_text(angle = 90, hjust = 1, vjust=.5),
panel.background = element_rect(fill = "transparent",
colour = NA), # or element_blank()
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "transparent",
colour = NA))
if(show_unused_counts){
if(!missing(expanded_counts)){
counts_cntry$bar_color <- counts_cntry$expanded+
2*counts_cntry$forecast_biweek
## using gray bars for unused cases and blue bars for expanded cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick,
y=cntry_count,
fill=as.factor(bar_color)),
color="black",
stat="identity") +
scale_fill_manual(values=c("black", "gray", "white"),
name="",
labels=c("observed cases",
"estimated unreported cases",
"observed cases, unused"))
} else{
## using gray bars for unused cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick,
y=cntry_count,
fill=forecast_biweek),
color="black",
stat="identity") +
scale_fill_manual(values=c("black", "white"),
name="",
labels=c("used by forecast model",
"not used by forecast model"))
}
} else {
if(!missing(expanded_counts)){
## using blue bars for expanded cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick,
y=cntry_count,
fill=as.factor(expanded)),
color="black",
stat="identity") +
scale_fill_manual(values=c("black", "gray"),
name="",
labels=c("observed cases",
"estimated unreported cases"))
} else{
## no unused cases
p <- p + geom_bar(data=filter(counts_cntry, forecast_biweek==FALSE),
aes(x=date_sick, y=cntry_count),
stat="identity")
}
}
p <- p +
## add forecasts
geom_line(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_point(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_ribbon(data=forecasts_cntry,
aes(x=date_sick, ymin=predicted_lb, ymax=predicted_ub),
alpha=I(.3)) +
# air-brushing
scale_x_date(date_breaks = "3 months",
labels = date_format("%d %b %Y"))+
xlab(NULL) + ylab(NULL) +
coord_cartesian(ylim=c(0, chart_ub)) +
ggtitle("Observed and predicted DHF case counts for all of Thailand")
p
} | /R/make_country_forecast_linegraph.R | no_license | arttioz/dengue_project_sra | R | false | false | 9,721 | r | ## make country prediction graph
## Nicholas Reich
## September 2014
#'@param forecast_file file containing forecasts
#'@param counts_file file containing counts
#'
make_country_prediction_line_graph <- function(forecasts, counts, ylim_scale=1, min_plot_date=as.Date("2012-04-01"), show_unused_counts=TRUE) {
require(dplyr)
require(lubridate)
require(scales)
forecasts <- tbl_df(forecasts)
counts <- tbl_df(counts)
## aggregate to country-level
forecasts_cntry <- forecasts %>% group_by(biweek, year) %>%
summarize(predicted_cntry_count = sum(predicted_count),
predicted_ub = sum(ub),
predicted_lb = sum(lb)) %>%
mutate(time = year + (biweek-1)/26,
date_sick = biweek_to_date(biweek, year))
forecast_times <- (forecasts_cntry$year + (forecasts_cntry$biweek-1)/26)
counts_cntry <- counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarize(cntry_count = sum(count)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year),
forecast_biweek = time %in% forecast_times) %>%
filter(date_sick >= min_plot_date)
## add column in counts_cntry indicating which biweeks were left out of the fit
## make plot
p <- ggplot() + theme_bw() +
theme(legend.position="bottom",
axis.text.x = element_text(angle = 90, hjust = 1, vjust=.5),
panel.background = element_rect(fill = "transparent",colour = NA), # or element_blank()
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "transparent",colour = NA))
if(show_unused_counts){
## using gray bars for unused cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick, y=cntry_count, fill=forecast_biweek),
stat="identity") +
scale_fill_manual(values=c("black", "gray"),
name="",
labels=c("used by forecast model", "not used by forecast model"))
} else {
## no unused cases
p <- p + geom_bar(data=filter(counts_cntry, forecast_biweek==FALSE),
aes(x=date_sick, y=cntry_count),
stat="identity")
}
p <- p +
## add forecasts
geom_line(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_point(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_ribbon(data=forecasts_cntry,
aes(x=date_sick, ymin=predicted_lb, ymax=predicted_ub),
alpha=I(.3)) +
# air-brushing
scale_x_date(breaks = "3 months",
labels = date_format("%d %b %Y"))+
xlab(NULL) + ylab(NULL) +
ylim(0, max(counts_cntry$cntry_count)*ylim_scale) +
ggtitle("Observed and predicted DHF case counts for all of Thailand")
p
}
new_make_country_prediction_line_graph <- function(forecasts,
counts,
ylim_scale=1,
min_plot_date=as.Date("2012-04-01"),
show_unused_counts=TRUE,
expanded_counts) {
require(dplyr)
require(lubridate)
require(scales)
forecasts <- tbl_df(forecasts)
counts <- tbl_df(counts)
## aggregate to country-level
forecasts_cntry <- forecasts %>%
group_by(date_sick_biweek, date_sick_year) %>%
summarize(predicted_cntry_count = sum(predicted_count),
predicted_ub = sum(ub),
predicted_lb = sum(lb)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year)) %>%
ungroup()
forecast_times <- (forecasts_cntry$date_sick_year + (forecasts_cntry$date_sick_biweek-1)/26)
counts_cntry <- counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarize(cntry_count = sum(count)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year),
forecast_biweek = time %in% forecast_times,
expanded = 0) %>%
filter(date_sick >= min_plot_date) %>%
ungroup()
## establish a sane upper bound
counts_ub <- counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarise(count = sum(count))
max_counts <- max(counts_ub$count * ylim_scale)
chart_ub <- ifelse(max(forecasts_cntry$predicted_ub) * ylim_scale > max_counts,
max(max_counts,
forecasts_cntry$predicted_count * ylim_scale),
max(c(forecasts_cntry$predicted_ub,
counts_cntry$cntry_count)) * ylim_scale)
if(!missing(expanded_counts)){
expanded_ctry <- expanded_counts %>%
group_by(date_sick_year, date_sick_biweek) %>%
summarize(cntry_count = sum(expanded_cases)) %>%
mutate(time = date_sick_year + (date_sick_biweek-1)/26,
date_sick = biweek_to_date(date_sick_biweek, date_sick_year),
forecast_biweek = time %in% forecast_times,
expanded=1) %>%
filter(date_sick >= min_plot_date) %>%
select(date_sick_year, date_sick_biweek, cntry_count,
time, date_sick, forecast_biweek, expanded)
counts_cntry <- bind_rows(counts_cntry, expanded_ctry)
}
## add column in counts_cntry indicating which biweeks were left out of the fit
## make plot
p <- ggplot() + theme_bw() +
theme(legend.position="bottom",
axis.text.x = element_text(angle = 90, hjust = 1, vjust=.5),
panel.background = element_rect(fill = "transparent",
colour = NA), # or element_blank()
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "transparent",
colour = NA))
if(show_unused_counts){
if(!missing(expanded_counts)){
counts_cntry$bar_color <- counts_cntry$expanded+
2*counts_cntry$forecast_biweek
## using gray bars for unused cases and blue bars for expanded cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick,
y=cntry_count,
fill=as.factor(bar_color)),
color="black",
stat="identity") +
scale_fill_manual(values=c("black", "gray", "white"),
name="",
labels=c("observed cases",
"estimated unreported cases",
"observed cases, unused"))
} else{
## using gray bars for unused cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick,
y=cntry_count,
fill=forecast_biweek),
color="black",
stat="identity") +
scale_fill_manual(values=c("black", "white"),
name="",
labels=c("used by forecast model",
"not used by forecast model"))
}
} else {
if(!missing(expanded_counts)){
## using blue bars for expanded cases
p <- p + geom_bar(data=counts_cntry,
aes(x=date_sick,
y=cntry_count,
fill=as.factor(expanded)),
color="black",
stat="identity") +
scale_fill_manual(values=c("black", "gray"),
name="",
labels=c("observed cases",
"estimated unreported cases"))
} else{
## no unused cases
p <- p + geom_bar(data=filter(counts_cntry, forecast_biweek==FALSE),
aes(x=date_sick, y=cntry_count),
stat="identity")
}
}
p <- p +
## add forecasts
geom_line(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_point(data=forecasts_cntry,
aes(x=date_sick, y=predicted_cntry_count)) +
geom_ribbon(data=forecasts_cntry,
aes(x=date_sick, ymin=predicted_lb, ymax=predicted_ub),
alpha=I(.3)) +
# air-brushing
scale_x_date(date_breaks = "3 months",
labels = date_format("%d %b %Y"))+
xlab(NULL) + ylab(NULL) +
coord_cartesian(ylim=c(0, chart_ub)) +
ggtitle("Observed and predicted DHF case counts for all of Thailand")
p
} |
library(shiny)
# Define UI for miles per gallon application
shinyUI(navbarPage("Motor Trends Cars MPG data analysis",
tabPanel("Plot",
# Sidebar with controls to select the variable to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
selectInput("variable", "Variable:",
list("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
checkboxInput("outliers", "Show outliers", FALSE)
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("header")),
h3(textOutput("caption")),
plotOutput("mpgPlot")
)
),
tabPanel("Help",
mainPanel(
includeMarkdown("help.md")
)
)
)) | /ui.R | no_license | nitinnaik/devdataprodCoruseProject | R | false | false | 803 | r | library(shiny)
# Define UI for miles per gallon application
shinyUI(navbarPage("Motor Trends Cars MPG data analysis",
tabPanel("Plot",
# Sidebar with controls to select the variable to plot against mpg
# and to specify whether outliers should be included
sidebarPanel(
selectInput("variable", "Variable:",
list("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
checkboxInput("outliers", "Show outliers", FALSE)
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("header")),
h3(textOutput("caption")),
plotOutput("mpgPlot")
)
),
tabPanel("Help",
mainPanel(
includeMarkdown("help.md")
)
)
)) |
#############################################################
## Internal validation sampling strategies for exposure
## measurement error correction
## Simulation study
##
## Tabularise summaries percentage bias + coverage
## lindanab4@gmail.com - 20201103
#############################################################
##############################
# 0 - Load librairies + source code
##############################
library(xtable)
library(data.table)
library(dplyr)
sum_analysis <-
readRDS(file = "./results/summaries/summary.Rds")
sum_analysis <- subset(
sum_analysis,
size_valdata == 0.1 &
(
R_squared == 0.2 |
R_squared == 0.4 | R_squared == 0.6 | R_squared == 0.8
) &
(skewness == 0.1 | skewness == 1.5 | skewness == 3) &
(method == "complete_case" | method == "inadm_reg_cal")
)
sum_analysis <- sum_analysis[order(sampling_strat, -linear, method, R_squared, skewness),]
##############################
# 1 - Helper functions
##############################
# # Function that creates a string of effect_est% (ci_lower%-ci_upper%)
# effect_est_and_ci <- function(row_of_summary){
# effect_est <- round(as.numeric(row_of_summary[["effect_est"]]), 0)
# ci_lower <- round(as.numeric(row_of_summary[["ci_lower"]]), 0)
# ci_upper <- round(as.numeric(row_of_summary[["ci_upper"]]), 0)
# paste0(effect_est, "%", " (", ci_lower, "%-", ci_upper, "%)")
# }
##############################
# 2 - Create table
##############################
# Select values needed from summary object
caption <-
c("Percentage bias and coverage in the estimated association between visceral adipose tissue and insulin resistance with an internal validation sample of 10\% of the main study's sample size")
table <-
sum_analysis[, c("sampling_strat",
"linear",
"method",
"R_squared",
"skewness",
"perc_bias",
"cover")]
table$perc_bias <- round(table$perc_bias, 1)
table$cover <- round(table$cover * 100, 1)
table_ivrs <- subset(table, method == "complete_case")
table_ivrs <- table_ivrs[, -c("method")]
colnames(table_ivrs)[colnames(table_ivrs) == "perc_bias"] <- "perc_bias_ivrs"
colnames(table_ivrs)[colnames(table_ivrs) == "cover"] <- "cover_ivrs"
table_ivrs_wide <- reshape(
table_ivrs,
idvar = c("linear", "R_squared", "skewness"),
timevar = "sampling_strat",
direction = "wide"
)
table_ivrs_wide <- setcolorder(table_ivrs_wide,
c(1, 2, 3,
4, 6, 8,
5, 7, 9))
table_vrc <- subset(table, method == "inadm_reg_cal")
table_vrc <- table_vrc[, -c("method")]
colnames(table_vrc)[colnames(table_vrc) == "perc_bias"] <- "perc_bias_vrc"
colnames(table_vrc)[colnames(table_vrc) == "cover"] <- "cover_vrc"
table_vrc_wide <- reshape(
table_vrc,
idvar = c("linear", "R_squared", "skewness"),
timevar = "sampling_strat",
direction = "wide"
)
table_vrc_wide <- setcolorder(table_vrc_wide,
c(1, 2, 3,
4, 6, 8,
5, 7, 9))
table <- merge(table_ivrs_wide, table_vrc_wide,
by = c("linear", "R_squared", "skewness"), sort = F)
table$linear <- ifelse(table$linear == 0, "no", "yes")
# Change columnames
table <- as.data.frame(table)
colnames(table) <- c("", "Scenario", "",
"", "", "IVSR", "", "", "",
"", "", "VRC", "", "", "")
table <- rbind(c("", "", "",
"R", "SR", "E",
"R", "SR", "E",
"R", "SR", "E",
"R", "SR", "E"), table)
table <- rbind(c("Linear", "R-Squared", "Skewness",
"", "Bias", "",
"", "Coverage", "",
"", "Bias", "",
"", "Coverage", ""), table)
# Create TeX table
table_xtable <- print(xtable(table,
caption = caption,
digits = 0),
include.rownames = FALSE)
file_con <- file("./results/tables/table_bias_cover_10.txt")
writeLines(table_xtable, file_con)
close(file_con)
| /rcode/tabular/tabularise_bias_cover_10.R | permissive | LindaNab/me_neo | R | false | false | 4,204 | r | #############################################################
## Internal validation sampling strategies for exposure
## measurement error correction
## Simulation study
##
## Tabularise summaries percentage bias + coverage
## lindanab4@gmail.com - 20201103
#############################################################
##############################
# 0 - Load librairies + source code
##############################
library(xtable)
library(data.table)
library(dplyr)
sum_analysis <-
readRDS(file = "./results/summaries/summary.Rds")
sum_analysis <- subset(
sum_analysis,
size_valdata == 0.1 &
(
R_squared == 0.2 |
R_squared == 0.4 | R_squared == 0.6 | R_squared == 0.8
) &
(skewness == 0.1 | skewness == 1.5 | skewness == 3) &
(method == "complete_case" | method == "inadm_reg_cal")
)
sum_analysis <- sum_analysis[order(sampling_strat, -linear, method, R_squared, skewness),]
##############################
# 1 - Helper functions
##############################
# # Function that creates a string of effect_est% (ci_lower%-ci_upper%)
# effect_est_and_ci <- function(row_of_summary){
# effect_est <- round(as.numeric(row_of_summary[["effect_est"]]), 0)
# ci_lower <- round(as.numeric(row_of_summary[["ci_lower"]]), 0)
# ci_upper <- round(as.numeric(row_of_summary[["ci_upper"]]), 0)
# paste0(effect_est, "%", " (", ci_lower, "%-", ci_upper, "%)")
# }
##############################
# 2 - Create table
##############################
# Select values needed from summary object
caption <-
c("Percentage bias and coverage in the estimated association between visceral adipose tissue and insulin resistance with an internal validation sample of 10\% of the main study's sample size")
table <-
sum_analysis[, c("sampling_strat",
"linear",
"method",
"R_squared",
"skewness",
"perc_bias",
"cover")]
table$perc_bias <- round(table$perc_bias, 1)
table$cover <- round(table$cover * 100, 1)
table_ivrs <- subset(table, method == "complete_case")
table_ivrs <- table_ivrs[, -c("method")]
colnames(table_ivrs)[colnames(table_ivrs) == "perc_bias"] <- "perc_bias_ivrs"
colnames(table_ivrs)[colnames(table_ivrs) == "cover"] <- "cover_ivrs"
table_ivrs_wide <- reshape(
table_ivrs,
idvar = c("linear", "R_squared", "skewness"),
timevar = "sampling_strat",
direction = "wide"
)
table_ivrs_wide <- setcolorder(table_ivrs_wide,
c(1, 2, 3,
4, 6, 8,
5, 7, 9))
table_vrc <- subset(table, method == "inadm_reg_cal")
table_vrc <- table_vrc[, -c("method")]
colnames(table_vrc)[colnames(table_vrc) == "perc_bias"] <- "perc_bias_vrc"
colnames(table_vrc)[colnames(table_vrc) == "cover"] <- "cover_vrc"
table_vrc_wide <- reshape(
table_vrc,
idvar = c("linear", "R_squared", "skewness"),
timevar = "sampling_strat",
direction = "wide"
)
table_vrc_wide <- setcolorder(table_vrc_wide,
c(1, 2, 3,
4, 6, 8,
5, 7, 9))
table <- merge(table_ivrs_wide, table_vrc_wide,
by = c("linear", "R_squared", "skewness"), sort = F)
table$linear <- ifelse(table$linear == 0, "no", "yes")
# Change columnames
table <- as.data.frame(table)
colnames(table) <- c("", "Scenario", "",
"", "", "IVSR", "", "", "",
"", "", "VRC", "", "", "")
table <- rbind(c("", "", "",
"R", "SR", "E",
"R", "SR", "E",
"R", "SR", "E",
"R", "SR", "E"), table)
table <- rbind(c("Linear", "R-Squared", "Skewness",
"", "Bias", "",
"", "Coverage", "",
"", "Bias", "",
"", "Coverage", ""), table)
# Create TeX table
table_xtable <- print(xtable(table,
caption = caption,
digits = 0),
include.rownames = FALSE)
file_con <- file("./results/tables/table_bias_cover_10.txt")
writeLines(table_xtable, file_con)
close(file_con)
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{class}
\name{WebLocation-class}
\alias{WebLocation-class}
\alias{webloc}
\title{Web location}
\usage{
webloc(path, clss = "WebLocation")
}
\arguments{
\item{path}{character, pointing to an existing web directory. Required.}
\item{clss}{character, optional class name. Default is "WebLocation".}
}
\description{
Read-only web folder
This function returns a WebLocation object that can be used as a read-only folder.
}
\details{
The queries and meta methods do not work for this class.
}
\examples{
getSlots("WebLocation")
}
\seealso{
\code{\link{webloc}}
}
| /man/WebLocation-class.Rd | no_license | doomhammerhell/test-datamart | R | false | false | 650 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{class}
\name{WebLocation-class}
\alias{WebLocation-class}
\alias{webloc}
\title{Web location}
\usage{
webloc(path, clss = "WebLocation")
}
\arguments{
\item{path}{character, pointing to an existing web directory. Required.}
\item{clss}{character, optional class name. Default is "WebLocation".}
}
\description{
Read-only web folder
This function returns a WebLocation object that can be used as a read-only folder.
}
\details{
The queries and meta methods do not work for this class.
}
\examples{
getSlots("WebLocation")
}
\seealso{
\code{\link{webloc}}
}
|
library(doldar)
data(iris)
iterations <- 1000
lda <- sample_do(iris, Species~., iterations = iterations)
betas <- get_betas(lda)
mm <- model.matrix(Species~.,iris)
fit <- predict(lda,mm)
sum(fit$preds == as.integer(iris$Species)) / length(fit$preds)
| /inst/examples/main.R | no_license | lejon/doldar | R | false | false | 257 | r | library(doldar)
data(iris)
iterations <- 1000
lda <- sample_do(iris, Species~., iterations = iterations)
betas <- get_betas(lda)
mm <- model.matrix(Species~.,iris)
fit <- predict(lda,mm)
sum(fit$preds == as.integer(iris$Species)) / length(fit$preds)
|
libs <- c("here", "purrr")
suppressMessages(
suppressWarnings(sapply(libs, require, character.only = TRUE))
)
rm(libs)
sample_data <- read.csv(here("data", "sample_ga_data_binomial_2019-04-26.csv"))
source(here("R", "sim_variables.R"))
source(here("R", "ga_gbm_function.R"))
source(here("R", "vimshift_ph290_project.R"))
concordance <- function(l1, l2) {
con <- map_dbl(1:length(l1), function(i) {
sum(l1[1:i] %in% l2[1:i])}
)
return(sum(con))
}
library("doParallel")
nCores <- 10
reps <- 10
registerDoParallel(nCores)
k <- 13
n <- 2^k
print("Running for various seed for n")
results <- foreach(i = 1:reps) %dopar% {
suppressMessages(suppressWarnings(tryCatch(
{sim1 <- simulate_outcome(sample_data = sample_data, n = n,
complexity = 1, seed = sample(1:10000, 1))
trueOrder <- ranking_complex(ranking_info = sim1$ranking_info)
gbOrder <- gbm(data = data.frame(sim1$covariates, y = sim1$y))
suppressMessages(vimshiftOrder <- run_combined_var_imp(train = data.frame(sim1$covariates,
y = as.integer(sim1$y)),
Wnames, Wnames_cat))
a <- list("truth" = trueOrder,
"gb" = gbOrder,
"vim" = vimshiftOrder)
saveRDS(a, file = here("data", paste0("n_", n, "_", i, ".rds")))
})))
cat("Iteration", i, "done\n")
}
saveRDS(results, file = here("data", paste0("n_", n, ".rds")))
print("Running for various seed for complexity")
complexity <- 1
results <- foreach(i = 1:reps) %dopar% {
suppressMessages(suppressWarnings(tryCatch(
{sim1 <- simulate_outcome(sample_data = sample_data, n = 1000,
complexity = complexity, seed = sample(1:10000, 1))
trueOrder <- ranking_complex(ranking_info = sim1$ranking_info)
gbOrder <- gbm(data = data.frame(sim1$covariates, y = sim1$y))
suppressMessages(vimshiftOrder <- run_combined_var_imp(train = data.frame(sim1$covariates,
y = as.integer(sim1$y)),
Wnames, Wnames_cat))
a <- list("truth" = trueOrder,
"gb" = gbOrder,
"vim" = vimshiftOrder)
saveRDS(a, file = here("data", paste0("complexity_", complexity, "_", i, ".rds")))
}
)))
cat("Iteration", i, "done\n")
}
saveRDS(results, file = here("data", paste0("complexity_", complexity, ".rds")))
| /R/simulation_6.R | no_license | HectorRDB/PH290-Final-Project | R | false | false | 2,410 | r | libs <- c("here", "purrr")
suppressMessages(
suppressWarnings(sapply(libs, require, character.only = TRUE))
)
rm(libs)
sample_data <- read.csv(here("data", "sample_ga_data_binomial_2019-04-26.csv"))
source(here("R", "sim_variables.R"))
source(here("R", "ga_gbm_function.R"))
source(here("R", "vimshift_ph290_project.R"))
concordance <- function(l1, l2) {
con <- map_dbl(1:length(l1), function(i) {
sum(l1[1:i] %in% l2[1:i])}
)
return(sum(con))
}
library("doParallel")
nCores <- 10
reps <- 10
registerDoParallel(nCores)
k <- 13
n <- 2^k
print("Running for various seed for n")
results <- foreach(i = 1:reps) %dopar% {
suppressMessages(suppressWarnings(tryCatch(
{sim1 <- simulate_outcome(sample_data = sample_data, n = n,
complexity = 1, seed = sample(1:10000, 1))
trueOrder <- ranking_complex(ranking_info = sim1$ranking_info)
gbOrder <- gbm(data = data.frame(sim1$covariates, y = sim1$y))
suppressMessages(vimshiftOrder <- run_combined_var_imp(train = data.frame(sim1$covariates,
y = as.integer(sim1$y)),
Wnames, Wnames_cat))
a <- list("truth" = trueOrder,
"gb" = gbOrder,
"vim" = vimshiftOrder)
saveRDS(a, file = here("data", paste0("n_", n, "_", i, ".rds")))
})))
cat("Iteration", i, "done\n")
}
saveRDS(results, file = here("data", paste0("n_", n, ".rds")))
print("Running for various seed for complexity")
complexity <- 1
results <- foreach(i = 1:reps) %dopar% {
suppressMessages(suppressWarnings(tryCatch(
{sim1 <- simulate_outcome(sample_data = sample_data, n = 1000,
complexity = complexity, seed = sample(1:10000, 1))
trueOrder <- ranking_complex(ranking_info = sim1$ranking_info)
gbOrder <- gbm(data = data.frame(sim1$covariates, y = sim1$y))
suppressMessages(vimshiftOrder <- run_combined_var_imp(train = data.frame(sim1$covariates,
y = as.integer(sim1$y)),
Wnames, Wnames_cat))
a <- list("truth" = trueOrder,
"gb" = gbOrder,
"vim" = vimshiftOrder)
saveRDS(a, file = here("data", paste0("complexity_", complexity, "_", i, ".rds")))
}
)))
cat("Iteration", i, "done\n")
}
saveRDS(results, file = here("data", paste0("complexity_", complexity, ".rds")))
|
%
% Auto-generated file, do not modify.
% Instead, copy this file to the man/ folder, remove this warning, and edit freely.
% Use Git to identify changes in this file which suggest where to change your edited copy.
%
\name{Link-class}
\alias{Link-class}
\docType{class}
\title{
Link
}
\format{An R6 class object.}
\description{
Represents a link in Synapse.
Links must have a target ID and a parent. When you do synapseclient.Synapse.get on a Link object,
the Link object is returned. If the target is desired, specify followLink=True in synapseclient.Synapse.get.
}
\section{Methods}{
\itemize{
\item \code{Link(targetId=NULL, targetVersion=NULL, parent=NULL, properties=NULL, annotations=NULL, local_state=NULL)}: Constructor for \code{\link{Link}}
\item \code{local_state(state=NULL)}: Set or get the object's internal state, excluding properties, or annotations.
}
}
| /auto-man/Link-class.Rd | permissive | Sage-Bionetworks/synapser | R | false | false | 876 | rd | %
% Auto-generated file, do not modify.
% Instead, copy this file to the man/ folder, remove this warning, and edit freely.
% Use Git to identify changes in this file which suggest where to change your edited copy.
%
\name{Link-class}
\alias{Link-class}
\docType{class}
\title{
Link
}
\format{An R6 class object.}
\description{
Represents a link in Synapse.
Links must have a target ID and a parent. When you do synapseclient.Synapse.get on a Link object,
the Link object is returned. If the target is desired, specify followLink=True in synapseclient.Synapse.get.
}
\section{Methods}{
\itemize{
\item \code{Link(targetId=NULL, targetVersion=NULL, parent=NULL, properties=NULL, annotations=NULL, local_state=NULL)}: Constructor for \code{\link{Link}}
\item \code{local_state(state=NULL)}: Set or get the object's internal state, excluding properties, or annotations.
}
}
|
seed <- 579
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226221.4970426522
df.resid <- 35402
df <- 165
coefs <- c(6.688714457679813, 5.811463811798606, 5.681163712213066, 5.346312620348099, 5.045968804664241, 4.871251852811924, 4.825471457981159, 4.608143873034347, 4.424414277994358, 4.310690232043166, 4.366622534999515, 4.1889627239795795, 3.9977189753619853, 3.9853356518748826, 3.7704221616282103, 3.5317582887174477, 3.3129041851172443, 2.9769184361528738, 2.538101193886421, 2.093627889406519, 1.6185486142962082, 0.9112517292434672, 0.9516282448439711, 0.20431889353702115, 0.6791560229098378, -1.1225814651432335, -9.875105832958457e-2, 0.9393533185211437, 0.9838707025610218, -1.182330545352761, -2.181767956777174, -2.563015821785121, -0.4792899295761722, 0.7625275877491479, 1.2076579481238199, -0.8618827199800694, 0.13585915974085455, -1.123474240563784, -0.1240608253839268, -0.35738963952374636, 0.8827029502678161, 0.6863663316970873, -0.8418556793594486, -1.7675475373512726, -0.9293824869461967, -0.8119248324618649, -0.6438639562714377, 6.127374025638519e-2, 0.25242583770550586, -0.5682855080399348, -0.15011714266479254, 0.8862509424993632, -2.1957043479091305, 1.653771922898161, 0.8745585950028382, 1.1190650781180365, -1.448354930175235, -5.579910563216151e-2, -0.6243398522671412, 1.2809882532624626, 0.38921557247175315, 0.6615602224654763, -1.4668155540958532, -0.6390113962197228, -0.9267267895601522, 0.15774397271194343, 0.5363179086686375, -0.3166931572839032, -1.0494956925156966, -0.48420142846815684, -1.5671584277150261, -1.9622616994182333e-2, 0.5074719376034629, 0.8610806463786133, 0.7757675853889173, -1.192471191142284, -1.1736857593577459, -1.1997898813307364, -0.1287779269268473, 0.7712144767324682, 1.0496674525530891, 9.942177859513196e-2, 0.1993970655522322, -1.167544126954171, -0.6958247342927041, 0.39725898034242824, 1.2041477863951529, 0.25987477439963863, 0.8373258306622322, -1.838314492798681, 0.4743287953235884, 0.7403885009912179, 0.7552737126386867, 0.1792827466794132, -0.2769465204008186, 1.0043858560746004, -0.12829251064720545, 0.2970629430540155, -4.433974450549151e-2, -0.17729654640236026, 0.4844741196840296, -0.28905418133721344, 0.6972418869452054, -0.5204197883320439, 0.6592160531959909, 0.7860304872731051, 1.1307375234522197, -0.8765048032892664, -0.16558647478888003, -0.35816512930022515, 0.48196420249947447, 0.5618462438021964, 1.5216501094033925, -0.6770257179183897, -0.20992163922760637, -0.7875418762389416, 0.734906205203496, -0.28375046743562554, 0.4919329376526773, 0.7742880496360015, -0.7213197716461773, -0.5674276880015661, -1.7887727825333752, -0.5257581415441301, 0.3328639284734936, 0.8309168397190565, 1.4521482945266881e-2, 1.1323221982963079, -0.7270028247811151, -0.37503858262781214, 0.39150957063469044, 0.7531063633530586, 0.636469441676921, 0.35329067178268686, 0.156042161344288, 1.1160300523396358, -0.36575907702875116, 1.065572234499647, 0.6973242553846816, 1.0271768080893235, 0.6664859434477038, -0.6926693652300631, -1.1601498714074658, 0.44794803737880134, 0.1800379288287408, 0.4865720132708014, -8.534684167543038e-2, -0.46240452691491973, -1.897667017296497, 1.2804017586974141, 0.1290076692750568, 1.245140526894628, -0.26228291878193516, -3.5130547684842806e-2, -0.10040466219769137, -1.6007549262806542, -1.0696124095621242, 0.6831085537546682, 1.140282213560606, -0.40902388981481885, 1.5166553501002724, -0.48825824148562635, -6.09773912542496e-2, -8.008940813070285e-2, 1.0302594157373175)
| /analysis/boot/boot579.R | no_license | patperry/interaction-proc | R | false | false | 3,763 | r | seed <- 579
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 226221.4970426522
df.resid <- 35402
df <- 165
coefs <- c(6.688714457679813, 5.811463811798606, 5.681163712213066, 5.346312620348099, 5.045968804664241, 4.871251852811924, 4.825471457981159, 4.608143873034347, 4.424414277994358, 4.310690232043166, 4.366622534999515, 4.1889627239795795, 3.9977189753619853, 3.9853356518748826, 3.7704221616282103, 3.5317582887174477, 3.3129041851172443, 2.9769184361528738, 2.538101193886421, 2.093627889406519, 1.6185486142962082, 0.9112517292434672, 0.9516282448439711, 0.20431889353702115, 0.6791560229098378, -1.1225814651432335, -9.875105832958457e-2, 0.9393533185211437, 0.9838707025610218, -1.182330545352761, -2.181767956777174, -2.563015821785121, -0.4792899295761722, 0.7625275877491479, 1.2076579481238199, -0.8618827199800694, 0.13585915974085455, -1.123474240563784, -0.1240608253839268, -0.35738963952374636, 0.8827029502678161, 0.6863663316970873, -0.8418556793594486, -1.7675475373512726, -0.9293824869461967, -0.8119248324618649, -0.6438639562714377, 6.127374025638519e-2, 0.25242583770550586, -0.5682855080399348, -0.15011714266479254, 0.8862509424993632, -2.1957043479091305, 1.653771922898161, 0.8745585950028382, 1.1190650781180365, -1.448354930175235, -5.579910563216151e-2, -0.6243398522671412, 1.2809882532624626, 0.38921557247175315, 0.6615602224654763, -1.4668155540958532, -0.6390113962197228, -0.9267267895601522, 0.15774397271194343, 0.5363179086686375, -0.3166931572839032, -1.0494956925156966, -0.48420142846815684, -1.5671584277150261, -1.9622616994182333e-2, 0.5074719376034629, 0.8610806463786133, 0.7757675853889173, -1.192471191142284, -1.1736857593577459, -1.1997898813307364, -0.1287779269268473, 0.7712144767324682, 1.0496674525530891, 9.942177859513196e-2, 0.1993970655522322, -1.167544126954171, -0.6958247342927041, 0.39725898034242824, 1.2041477863951529, 0.25987477439963863, 0.8373258306622322, -1.838314492798681, 0.4743287953235884, 0.7403885009912179, 0.7552737126386867, 0.1792827466794132, -0.2769465204008186, 1.0043858560746004, -0.12829251064720545, 0.2970629430540155, -4.433974450549151e-2, -0.17729654640236026, 0.4844741196840296, -0.28905418133721344, 0.6972418869452054, -0.5204197883320439, 0.6592160531959909, 0.7860304872731051, 1.1307375234522197, -0.8765048032892664, -0.16558647478888003, -0.35816512930022515, 0.48196420249947447, 0.5618462438021964, 1.5216501094033925, -0.6770257179183897, -0.20992163922760637, -0.7875418762389416, 0.734906205203496, -0.28375046743562554, 0.4919329376526773, 0.7742880496360015, -0.7213197716461773, -0.5674276880015661, -1.7887727825333752, -0.5257581415441301, 0.3328639284734936, 0.8309168397190565, 1.4521482945266881e-2, 1.1323221982963079, -0.7270028247811151, -0.37503858262781214, 0.39150957063469044, 0.7531063633530586, 0.636469441676921, 0.35329067178268686, 0.156042161344288, 1.1160300523396358, -0.36575907702875116, 1.065572234499647, 0.6973242553846816, 1.0271768080893235, 0.6664859434477038, -0.6926693652300631, -1.1601498714074658, 0.44794803737880134, 0.1800379288287408, 0.4865720132708014, -8.534684167543038e-2, -0.46240452691491973, -1.897667017296497, 1.2804017586974141, 0.1290076692750568, 1.245140526894628, -0.26228291878193516, -3.5130547684842806e-2, -0.10040466219769137, -1.6007549262806542, -1.0696124095621242, 0.6831085537546682, 1.140282213560606, -0.40902388981481885, 1.5166553501002724, -0.48825824148562635, -6.09773912542496e-2, -8.008940813070285e-2, 1.0302594157373175)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitvalidate.R,
% R/splitvalidate.lassosum.pipeline.R
\name{splitvalidate}
\alias{splitvalidate}
\alias{splitvalidate.lassosum.pipeline}
\title{Function to perform split-validation using output from lassosum.pipeline with external phenotype}
\usage{
splitvalidate(...)
\method{splitvalidate}{lassosum.pipeline}(ls.pipeline, test.bfile = NULL,
keep = NULL, remove = NULL, pheno = NULL, covar = NULL, trace = 1,
split = NULL, rematch = !is.null(test.bfile), ...)
}
\arguments{
\item{...}{parameters to pass to \code{\link{validate.lassosum.pipeline}}}
\item{ls.pipeline}{A lassosum.pipeline object}
\item{test.bfile}{The (\href{https://www.cog-genomics.org/plink2/formats#bed}{PLINK bfile} for the test dataset}
\item{keep}{Participants to keep (see \code{\link{lassosum}} for more details)}
\item{remove}{Participants to remove}
\item{pheno}{A vector of phenotype OR a \code{data.frame} with 3 columns, the first 2 columns being headed "FID" and "IID", OR a filename for such a data.frame}
\item{covar}{A matrix of covariates OR a \code{data.frame} with 3 or more columns, the first 2 columns being headed "FID" and "IID", OR a filename for such a data.frame}
\item{trace}{Controls amount of output}
\item{rematch}{Forces a rematching of the ls.pipline beta's with the new .bim file}
}
\details{
Performs split-validation. Randomly split the test data into half for validation
and half for prediction. Standardize the best cross-predicted pgs and stack together.
}
| /man/splitvalidate.Rd | permissive | choishingwan/lassosum | R | false | true | 1,559 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitvalidate.R,
% R/splitvalidate.lassosum.pipeline.R
\name{splitvalidate}
\alias{splitvalidate}
\alias{splitvalidate.lassosum.pipeline}
\title{Function to perform split-validation using output from lassosum.pipeline with external phenotype}
\usage{
splitvalidate(...)
\method{splitvalidate}{lassosum.pipeline}(ls.pipeline, test.bfile = NULL,
keep = NULL, remove = NULL, pheno = NULL, covar = NULL, trace = 1,
split = NULL, rematch = !is.null(test.bfile), ...)
}
\arguments{
\item{...}{parameters to pass to \code{\link{validate.lassosum.pipeline}}}
\item{ls.pipeline}{A lassosum.pipeline object}
\item{test.bfile}{The (\href{https://www.cog-genomics.org/plink2/formats#bed}{PLINK bfile} for the test dataset}
\item{keep}{Participants to keep (see \code{\link{lassosum}} for more details)}
\item{remove}{Participants to remove}
\item{pheno}{A vector of phenotype OR a \code{data.frame} with 3 columns, the first 2 columns being headed "FID" and "IID", OR a filename for such a data.frame}
\item{covar}{A matrix of covariates OR a \code{data.frame} with 3 or more columns, the first 2 columns being headed "FID" and "IID", OR a filename for such a data.frame}
\item{trace}{Controls amount of output}
\item{rematch}{Forces a rematching of the ls.pipline beta's with the new .bim file}
}
\details{
Performs split-validation. Randomly split the test data into half for validation
and half for prediction. Standardize the best cross-predicted pgs and stack together.
}
|
library(ordinalRidge)
set.seed(42)
## Generate training and test data
Tr <- toyData( 100, 3, 2, stdev=0.5 )
Te <- toyData( 5, 3, 2, stdev=0.5 )
## Without normalizing by a constant
K <- Tr$X %*% t(Tr$X)
Kte <- Te$X %*% t(Tr$X)
mdl <- ordinalRidge(K, Tr$y)
## With normalizing by a constant
n <- nrow(Tr$X)
KN <- Tr$X %*% t(Tr$X) / n # <-- Adjust training kernel
KNte <- Te$X %*% t(Tr$X) / n # <-- Adjust test kernel
mdlN <- ordinalRidge(KN, Tr$y, lambda=0.1/n) # <-- Adjust lambda
## Compute predictions
P <- predict(mdl, Kte)
PN <- predict(mdlN, KNte)
## Weights on the original features and bias terms
t(Tr$X) %*% mdl$v
# [,1]
# Feat1 1.0166423
# Feat2 0.9938469
# Feat3 1.0025881
t(Tr$X) %*% mdlN$v / n # <-- Adjust the weights
# [,1]
# Feat1 1.0166423
# Feat2 0.9938469
# Feat3 1.0025881
## No need to adjust the bias terms!
mdl$b
# [1] -1.393522 -4.517129
mdlN$b
# [1] -1.393522 -4.517129
## No need to adjust predictions!
range( P$score - PN$score )
# [1] -2.819966e-14 1.332268e-14
identical( P$pred, PN$pred )
# [1] TRUE
range( P$prob - PN$prob )
# [1] -5.884182e-15 8.659740e-15
| /02-keradj.R | permissive | clemenshug/ord-work | R | false | false | 1,180 | r | library(ordinalRidge)
set.seed(42)
## Generate training and test data
Tr <- toyData( 100, 3, 2, stdev=0.5 )
Te <- toyData( 5, 3, 2, stdev=0.5 )
## Without normalizing by a constant
K <- Tr$X %*% t(Tr$X)
Kte <- Te$X %*% t(Tr$X)
mdl <- ordinalRidge(K, Tr$y)
## With normalizing by a constant
n <- nrow(Tr$X)
KN <- Tr$X %*% t(Tr$X) / n # <-- Adjust training kernel
KNte <- Te$X %*% t(Tr$X) / n # <-- Adjust test kernel
mdlN <- ordinalRidge(KN, Tr$y, lambda=0.1/n) # <-- Adjust lambda
## Compute predictions
P <- predict(mdl, Kte)
PN <- predict(mdlN, KNte)
## Weights on the original features and bias terms
t(Tr$X) %*% mdl$v
# [,1]
# Feat1 1.0166423
# Feat2 0.9938469
# Feat3 1.0025881
t(Tr$X) %*% mdlN$v / n # <-- Adjust the weights
# [,1]
# Feat1 1.0166423
# Feat2 0.9938469
# Feat3 1.0025881
## No need to adjust the bias terms!
mdl$b
# [1] -1.393522 -4.517129
mdlN$b
# [1] -1.393522 -4.517129
## No need to adjust predictions!
range( P$score - PN$score )
# [1] -2.819966e-14 1.332268e-14
identical( P$pred, PN$pred )
# [1] TRUE
range( P$prob - PN$prob )
# [1] -5.884182e-15 8.659740e-15
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{make.comm}
\alias{make.comm}
\title{construct community (simple)}
\usage{
make.comm(Comm1, Factors)
}
\arguments{
\item{Comm1}{list of species functions}
\item{Factors}{data frame of "environmental" data}
}
\description{
construct community (simple)
}
\examples{
make.comm()
}
\keyword{community}
\keyword{microbiome}
\keyword{model}
\keyword{reference}
| /man/make.comm.Rd | permissive | Djeppschmidt/Model.Microbiome | R | false | true | 451 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{make.comm}
\alias{make.comm}
\title{construct community (simple)}
\usage{
make.comm(Comm1, Factors)
}
\arguments{
\item{Comm1}{list of species functions}
\item{Factors}{data frame of "environmental" data}
}
\description{
construct community (simple)
}
\examples{
make.comm()
}
\keyword{community}
\keyword{microbiome}
\keyword{model}
\keyword{reference}
|
#!/usr/bin/env Rscript
args <- commandArgs(T)
## input
dir <- args[1]
cond <- unlist(strsplit(args[2], split = ":"))
## output
sampleInfo <- args[3]
tsv_count <- args[4]
sample_Info <- read.csv(sampleInfo,header = T,sep=",", stringsAsFactors=F)
saps <- unique(sample_Info[sample_Info$condition %in% cond,]$samples)
NameList <- file.path(dir, paste("count/",saps,".screen_A.csv",sep=""))
Lib <- lapply(NameList,function(x){
read.table(x, header = F,sep = "\t")})
name <- unlist(lapply(NameList, function(x) {
unlist(strsplit(basename(x),split=".screen_A.csv"))[1]}))
Lib_df <- Reduce(function(x, y) merge(x, y, by=c("V1","V2","V3")),Lib)
colnames(Lib_df) <- c("gene", "sgRNA", "seq", name )
Lib_df_final <- Lib_df[,c("sgRNA", "gene", name)]
write.table(Lib_df_final, tsv_count, sep = "\t", row.names = F, quote = F)
| /scripts/pre_Mageck.R | no_license | yuxinghai/CMZ_gecko | R | false | false | 827 | r | #!/usr/bin/env Rscript
args <- commandArgs(T)
## input
dir <- args[1]
cond <- unlist(strsplit(args[2], split = ":"))
## output
sampleInfo <- args[3]
tsv_count <- args[4]
sample_Info <- read.csv(sampleInfo,header = T,sep=",", stringsAsFactors=F)
saps <- unique(sample_Info[sample_Info$condition %in% cond,]$samples)
NameList <- file.path(dir, paste("count/",saps,".screen_A.csv",sep=""))
Lib <- lapply(NameList,function(x){
read.table(x, header = F,sep = "\t")})
name <- unlist(lapply(NameList, function(x) {
unlist(strsplit(basename(x),split=".screen_A.csv"))[1]}))
Lib_df <- Reduce(function(x, y) merge(x, y, by=c("V1","V2","V3")),Lib)
colnames(Lib_df) <- c("gene", "sgRNA", "seq", name )
Lib_df_final <- Lib_df[,c("sgRNA", "gene", name)]
write.table(Lib_df_final, tsv_count, sep = "\t", row.names = F, quote = F)
|
#' Vertrag Templat tEMPLAT
#'
#' @param name,adr,tel,email,knr,euro,h Kunde
#'
#' @return string
#' @export
#'
Vertrag<- function(name="Vorname Nachname",
adr="bitte ergänzen",
tel="bitte ergänzen",
email="bitte ergänzen",
knr="0",
euro="76",
h=" 5-9 Stunden" ,
Betreff="statistische Beratung",
zwischenrechnung=400,
BANK = "BANK",
IBAN = "IBAN",
BIC = "BIC"
){
Kunde <- paste(
" ", name, " ", adr, "\n\n",
"Telefon/Skype: ", tel, ", E-Mail: ", email, "\n\n"
)
Kondition <- paste0("**Stundensatz: ", euro, " Euro**\n\n" ,
"**geschätzter Aufwand: ", h, "**\n")
msg<-
paste0(
"---
title: Vertrag über Betreuungstätigkeit
author:
- Statistik-Peter
- Dipl.-Ing. Wolfgang Peter
- Innsbrucker Straße 14, 6176 Völs
date: Völs, `r format(Sys.time(), '%d.%m.%Y')`
geometry: left=3cm, right=3cm, top=2.5cm, bottom=2.3cm
lang: de-DE
linestretch: 1.2
fontsize: 11pt
output:
pdf_document
---
Im Folgenden vereinbaren
",
Kunde,
"
nachfolgend Kunde genannt und
DI Wolfgang Peter
einen Vertrag über eine ", Betreff, ".
",
Kondition,
"
KNr: ", knr,
"
## §1
Die Firma Statistik-Peter hilft bei der eigenständigen Erstellung
wissenschaftlichen Arbeit mit Schwerpunkt Methoden -Teil. Wir bieten
in keiner Form eine Dienstleistung im Sinne von akademischen Ghostwriting an.
D.h. wir erstellen keine Texte oder Text-Vorlagen. Was wir anbieten
ist eine Hilfestellung für zukünftige Akademiker, ihre wissenschaftliche
Arbeit selbständig zu erstellen. Wir beraten welche statistischen Methoden
geeignet sind, eine wissenschaftliche Fragestellung zu prüfen. Wir arbeiten
dabei interaktiv mit dem Diplomanden/in, wobei die Interpretation, Beschreibung
und Verteidigung der Ergebnisse allein vom Kunden erfolgt.
## §2
Die statistische Auswertung und Beratung wird von DI Wolfgang Peter nach
bestem Wissen und Gewissen durchgeführt. Eine Erfolgsgarantie besteht nicht.
Es gelten die DFG-Empfehlungen zur Sicherung guter wissenschaftlicher Praxis (www.dfg.de).
## §3
Die Betreung kann jederzeit von einer der beiden Seiten beendet werden.
In diesem Fall sind vom Kunden nur die geleisteten Stunden zu entgelten.
## §4
Jegliche Inhalte wissenschaftlicher Arbeiten werden vertraulich behandelt
und Dritten nicht zugänglich gemacht.
## §5
Das Honorar für die Auswertung berechnet sich aus der tatsächlich erbrachten
Leistungen zum vereinbarten Stundensatz. Zwischenrechnungen werden erstellt,
wenn ein Betrag von ",zwischenrechnung ," Euro überschritten wird.
## §6
Die Tabellen und Grafiken werden von der Firma Statistik-Peter mit der
Software R im APA-Style Format erstellt.
## §7
Für diese Geschäftsbeziehungen und die gesamten Rechtsbeziehungen der
Vertragspartner gilt das Recht der Bundesrepublik Österreich. Ausschließlicher
Gerichtsstand für alle, sich aus dem Vertragsverhältnis unmittelbar oder mittelbar
ergebenden Streitigkeiten ist Innsbruck.
## Bankverbindung:
", BANK, "
IBAN: ", IBAN, "
BIC: ", BIC, "
# Anmerkung zur Auswertung und den verwendeten Methoden
Die von mir verwendeten statistischen Methoden basieren vor allem auf den
Empfehlungen von Bortz [4] sowie Sachs [7].
Die Darstellung der Ergebnisse entspricht wissenschaftlicher Vorgaben,
insbesondere halte ich mich bei den Tabellen und Grafiken sowie der
Darstellung statistischer Kennzahlen an die Vorgaben von APA-Style[2]. (Der APA-Style
ist im Kontext sozialwissenschaftlicher Forschung quasi der Gold-Standard
hinsichtlich des Berichtens von Ergebnissen.)
Die Ergebnisse kann ich entweder in MS-Word, MS-Excel, Open-Office, LaTex,
HTML sowie die Grafiken als PDF, jpg, gif oder Windows Metafile weitergeben.
Rohdaten kann ich in einem für Statistik-Software lesbaren Format weitergeben
z.B. für R, SPSS, Minitab.
Ich kann keine Ergebnis-Darstellungen in proprietären-Formaten wie z.B.
MATLAB, Minitab, SPSS, SAS oder STATA erstellen. Die Auswertung erstelle
ich mit der Software R [8].
Was ich explizit nicht mache, sind alle Tätigkeiten die das Arbeitsfeld
von Grafikern beinhalten. Darunter fallen, das Einbinden von Schrifttypen,
Einfügen von Hintergrundbildern und Logos, das designen von Grafiken und
Tabellen nach künstlerischen Vorgaben, das Erstellen von Druckvorlagen,
alles was mit dem Begriff corporate identity in Verbindung gebracht werden kann,
sowie der Nachbau von z.B. Excel-Grafiken und Tabellen aus Vorlagen.
## Literatur
[1] Achim Bühl, (2014), SPSS 22 Einführung in die moderne Datenanalyse, 14. aktualisierte Auflage, Pearson
[2] APA, 2009, Publication Manual of the American Psychological Association
[3] Daniel Wollschläger (2012), Grundlagen der Datenanalyse mit R: Eine anwendungsorientierte Einführung 2. Aufl., Heidelberg: Springer
[4] Jürgen Bortz, Nicola Döring, (2006), Forschungsmethoden und Evaluation, Heidelberg: Springer
[5] Jürgen Bortz, Christof Schuster, (2010), Statistik für Human- und Sozialwissenschaftler, 7. Aufl., Heidelberg: Springer
[6] John Fox, Sanford Weisberg, (2011), An R Companion to Applied Regression, Second Edition, Sage
[7] Lothar Sachs, Jürgen Hedderich, (2006), Angewandte Statistik, 12.Aufl. Heidelberg: Springer
[8] R Core Team (2015). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.
# Datenschutzerklärung
Wir verarbeiten Ihre personenbezogenen Daten, die unter folgende Datenkategorien fallen:
- Name/Firma,
- Geschäftsanschrift und sonstige Adressen des Kunden,
- Kontaktdaten (Telefonnummer, Telefaxnummer, E-Mail-Adresse, etc.)
Sie haben uns Daten über sich freiwillig zur Verfügung gestellt und wir
verarbeiten diese Daten auf Grundlage Ihrer Einwilligung zu folgenden Zwecken:
- Betreuung des Kunden
Sie können diese Einwilligung jederzeit widerrufen. Ein Widerruf hat zur Folge,
dass wir Ihre Daten ab diesem Zeitpunkt zu oben genannten Zwecken nicht mehr verarbeiten.
Für einen Widerruf wenden Sie sich bitte an: Wolfgang Peter
Die von Ihnen bereit gestellten Daten sind weiters zur Vertragserfüllung bzw. zur
Durchführung vorvertraglicher Maßnahmen erforderlich. Ohne diese Daten können wir
den Vertrag mit Ihnen nicht abschließen.
Wir speichern Ihre Daten bis zur Beendigung der Auftragsarbeit. Für die Speicherung
verwenden wir die Dienste von a1.net und dropbox.com sowie als Email-Provider gmail.com.
Ihre Daten werden zumindest zum Teil auch außerhalb der EU bzw. des EWR verarbeitet,
und zwar in USA. Das angemessene Schutzniveau ergibt sich aus einem Angemessenheitsbeschluss
der Europäischen Kommission nach Art 45 DSGVO.
## Rechtsbehelfsbelehrung
Ihnen stehen grundsätzlich die Rechte auf Auskunft, Berichtigung, Löschung,
Einschränkung, Datenübertragbarkeit und Widerspruch zu. Dafür wenden Sie sich
an uns. Wenn Sie glauben, dass die Verarbeitung Ihrer Daten gegen das Datenschutzrecht
verstößt oder Ihre datenschutzrechtlichen Ansprüche sonst in einer Weise verletzt
worden sind, können Sie sich bei der Aufsichtsbehörde beschweren. In Österreich ist
die Datenschutzbehörde zuständig.
")
msg
}
| /R/Vertrag.R | no_license | stp4/stp25Project | R | false | false | 7,516 | r | #' Vertrag Templat tEMPLAT
#'
#' @param name,adr,tel,email,knr,euro,h Kunde
#'
#' @return string
#' @export
#'
Vertrag<- function(name="Vorname Nachname",
adr="bitte ergänzen",
tel="bitte ergänzen",
email="bitte ergänzen",
knr="0",
euro="76",
h=" 5-9 Stunden" ,
Betreff="statistische Beratung",
zwischenrechnung=400,
BANK = "BANK",
IBAN = "IBAN",
BIC = "BIC"
){
Kunde <- paste(
" ", name, " ", adr, "\n\n",
"Telefon/Skype: ", tel, ", E-Mail: ", email, "\n\n"
)
Kondition <- paste0("**Stundensatz: ", euro, " Euro**\n\n" ,
"**geschätzter Aufwand: ", h, "**\n")
msg<-
paste0(
"---
title: Vertrag über Betreuungstätigkeit
author:
- Statistik-Peter
- Dipl.-Ing. Wolfgang Peter
- Innsbrucker Straße 14, 6176 Völs
date: Völs, `r format(Sys.time(), '%d.%m.%Y')`
geometry: left=3cm, right=3cm, top=2.5cm, bottom=2.3cm
lang: de-DE
linestretch: 1.2
fontsize: 11pt
output:
pdf_document
---
Im Folgenden vereinbaren
",
Kunde,
"
nachfolgend Kunde genannt und
DI Wolfgang Peter
einen Vertrag über eine ", Betreff, ".
",
Kondition,
"
KNr: ", knr,
"
## §1
Die Firma Statistik-Peter hilft bei der eigenständigen Erstellung
wissenschaftlichen Arbeit mit Schwerpunkt Methoden -Teil. Wir bieten
in keiner Form eine Dienstleistung im Sinne von akademischen Ghostwriting an.
D.h. wir erstellen keine Texte oder Text-Vorlagen. Was wir anbieten
ist eine Hilfestellung für zukünftige Akademiker, ihre wissenschaftliche
Arbeit selbständig zu erstellen. Wir beraten welche statistischen Methoden
geeignet sind, eine wissenschaftliche Fragestellung zu prüfen. Wir arbeiten
dabei interaktiv mit dem Diplomanden/in, wobei die Interpretation, Beschreibung
und Verteidigung der Ergebnisse allein vom Kunden erfolgt.
## §2
Die statistische Auswertung und Beratung wird von DI Wolfgang Peter nach
bestem Wissen und Gewissen durchgeführt. Eine Erfolgsgarantie besteht nicht.
Es gelten die DFG-Empfehlungen zur Sicherung guter wissenschaftlicher Praxis (www.dfg.de).
## §3
Die Betreung kann jederzeit von einer der beiden Seiten beendet werden.
In diesem Fall sind vom Kunden nur die geleisteten Stunden zu entgelten.
## §4
Jegliche Inhalte wissenschaftlicher Arbeiten werden vertraulich behandelt
und Dritten nicht zugänglich gemacht.
## §5
Das Honorar für die Auswertung berechnet sich aus der tatsächlich erbrachten
Leistungen zum vereinbarten Stundensatz. Zwischenrechnungen werden erstellt,
wenn ein Betrag von ",zwischenrechnung ," Euro überschritten wird.
## §6
Die Tabellen und Grafiken werden von der Firma Statistik-Peter mit der
Software R im APA-Style Format erstellt.
## §7
Für diese Geschäftsbeziehungen und die gesamten Rechtsbeziehungen der
Vertragspartner gilt das Recht der Bundesrepublik Österreich. Ausschließlicher
Gerichtsstand für alle, sich aus dem Vertragsverhältnis unmittelbar oder mittelbar
ergebenden Streitigkeiten ist Innsbruck.
## Bankverbindung:
", BANK, "
IBAN: ", IBAN, "
BIC: ", BIC, "
# Anmerkung zur Auswertung und den verwendeten Methoden
Die von mir verwendeten statistischen Methoden basieren vor allem auf den
Empfehlungen von Bortz [4] sowie Sachs [7].
Die Darstellung der Ergebnisse entspricht wissenschaftlicher Vorgaben,
insbesondere halte ich mich bei den Tabellen und Grafiken sowie der
Darstellung statistischer Kennzahlen an die Vorgaben von APA-Style[2]. (Der APA-Style
ist im Kontext sozialwissenschaftlicher Forschung quasi der Gold-Standard
hinsichtlich des Berichtens von Ergebnissen.)
Die Ergebnisse kann ich entweder in MS-Word, MS-Excel, Open-Office, LaTex,
HTML sowie die Grafiken als PDF, jpg, gif oder Windows Metafile weitergeben.
Rohdaten kann ich in einem für Statistik-Software lesbaren Format weitergeben
z.B. für R, SPSS, Minitab.
Ich kann keine Ergebnis-Darstellungen in proprietären-Formaten wie z.B.
MATLAB, Minitab, SPSS, SAS oder STATA erstellen. Die Auswertung erstelle
ich mit der Software R [8].
Was ich explizit nicht mache, sind alle Tätigkeiten die das Arbeitsfeld
von Grafikern beinhalten. Darunter fallen, das Einbinden von Schrifttypen,
Einfügen von Hintergrundbildern und Logos, das designen von Grafiken und
Tabellen nach künstlerischen Vorgaben, das Erstellen von Druckvorlagen,
alles was mit dem Begriff corporate identity in Verbindung gebracht werden kann,
sowie der Nachbau von z.B. Excel-Grafiken und Tabellen aus Vorlagen.
## Literatur
[1] Achim Bühl, (2014), SPSS 22 Einführung in die moderne Datenanalyse, 14. aktualisierte Auflage, Pearson
[2] APA, 2009, Publication Manual of the American Psychological Association
[3] Daniel Wollschläger (2012), Grundlagen der Datenanalyse mit R: Eine anwendungsorientierte Einführung 2. Aufl., Heidelberg: Springer
[4] Jürgen Bortz, Nicola Döring, (2006), Forschungsmethoden und Evaluation, Heidelberg: Springer
[5] Jürgen Bortz, Christof Schuster, (2010), Statistik für Human- und Sozialwissenschaftler, 7. Aufl., Heidelberg: Springer
[6] John Fox, Sanford Weisberg, (2011), An R Companion to Applied Regression, Second Edition, Sage
[7] Lothar Sachs, Jürgen Hedderich, (2006), Angewandte Statistik, 12.Aufl. Heidelberg: Springer
[8] R Core Team (2015). R: A language and environment for statistical computing. R Foundation for Statistical Computing, Vienna, Austria. URL http://www.R-project.org/.
# Datenschutzerklärung
Wir verarbeiten Ihre personenbezogenen Daten, die unter folgende Datenkategorien fallen:
- Name/Firma,
- Geschäftsanschrift und sonstige Adressen des Kunden,
- Kontaktdaten (Telefonnummer, Telefaxnummer, E-Mail-Adresse, etc.)
Sie haben uns Daten über sich freiwillig zur Verfügung gestellt und wir
verarbeiten diese Daten auf Grundlage Ihrer Einwilligung zu folgenden Zwecken:
- Betreuung des Kunden
Sie können diese Einwilligung jederzeit widerrufen. Ein Widerruf hat zur Folge,
dass wir Ihre Daten ab diesem Zeitpunkt zu oben genannten Zwecken nicht mehr verarbeiten.
Für einen Widerruf wenden Sie sich bitte an: Wolfgang Peter
Die von Ihnen bereit gestellten Daten sind weiters zur Vertragserfüllung bzw. zur
Durchführung vorvertraglicher Maßnahmen erforderlich. Ohne diese Daten können wir
den Vertrag mit Ihnen nicht abschließen.
Wir speichern Ihre Daten bis zur Beendigung der Auftragsarbeit. Für die Speicherung
verwenden wir die Dienste von a1.net und dropbox.com sowie als Email-Provider gmail.com.
Ihre Daten werden zumindest zum Teil auch außerhalb der EU bzw. des EWR verarbeitet,
und zwar in USA. Das angemessene Schutzniveau ergibt sich aus einem Angemessenheitsbeschluss
der Europäischen Kommission nach Art 45 DSGVO.
## Rechtsbehelfsbelehrung
Ihnen stehen grundsätzlich die Rechte auf Auskunft, Berichtigung, Löschung,
Einschränkung, Datenübertragbarkeit und Widerspruch zu. Dafür wenden Sie sich
an uns. Wenn Sie glauben, dass die Verarbeitung Ihrer Daten gegen das Datenschutzrecht
verstößt oder Ihre datenschutzrechtlichen Ansprüche sonst in einer Weise verletzt
worden sind, können Sie sich bei der Aufsichtsbehörde beschweren. In Österreich ist
die Datenschutzbehörde zuständig.
")
msg
}
|
install.packages('RtutoR')
library(RtutoR)
univar_num_features_summary <- lapply(numeric_features, function(var_name) {
val <- data.frame(t(Summarize(tt[[var_name]])))
val[] = lapply(val, function(x) round(x,2))
#colnames(val) <- c("min","25th","50th","75th","max")
return(val)
})
environment(univar_num_features_summary) <- asNamespace('RtutoR')
fixInNamespace("univar_num_features_summary", pos = 3)
fixInNamespace("univar_num_features_summary", pos="package:RtutoR")
univar_num_features_summary() <- RtutoR:::univar_num_features_summary()
?assignInNamespace
remove.packages('RtutoR')
tt=read_xlsx("df.xlsx")
write_xlsx(tt, 'aa.xlsx')
names(tt)
a='mpg'
res = generate_exploratory_analysis_ppt(tt,target_var = a,
output_file_name = "titanic_exp_report.pptx")
tt
a=as.vector(tt[2])
b=c(t(tt$am))
install.packages('FSA')
library(FSA)
Summarize(b)
?fivenum
data_type <- sapply(tt,class)
numeric_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("numeric","integer","double")))]
categorical_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("factor","character","logical")))]
univar_num_features_summary1 <- lapply(numeric_features, function(var_name) {
val <- data.frame(t(fivenum(tt[[var_name]])))
val[] = lapply(val, function(x) round(x,2))
#colnames(val) <- c("min","25th","50th","75th","max")
return(val)
})
glimpse(tt)
tt$drat=as.numeric(tt$drat)
tt$wt=as.numeric(tt$wt)
tt$qsec=as.numeric(tt$qsec)
univar_num_features_summary1
summary(tt[numeric_features])
categorical_features
table(tt[categorical_features])
univar_cat_features_summary <- lapply(categorical_features, function(var_name) {
count <- table(tt[[var_name]])
perct <- paste0(round(prop.table(count)*100,2),"%")
val <- as.data.frame(rbind(count,perct))
labels <- data.frame(Metric = c("Count","Perct."))
val <- cbind(labels,val)
return(val)
})
categorical_features
univar_cat_features_summary
univar_num_features_summary2 <- lapply(numeric_features, function(var_name) {
val <- data.frame(t(Summarize(tt[[var_name]])))
val[] = lapply(val, function(x) round(x,2))
#colnames(val) <- c("min","25th","50th","75th","max")
return(val)
})
univar_num_features_summary2
unzip("RtutoR-master.zip")
file.rename("RtutoR-master", "RtutoR")
shell("R CMD build RtutoR")
getwd()
setwd("C:/Users/User/Desktop/Mission/R/R_code/Shinydashboard")
install.packages("RtutoR_1.1.tar.gz", repos = NULL, type="source")
install.packages("gdtools")
library(RtutoR)
updateR()
version
library(RtutoR)
library(readxl)
library(dplyr)
tt=read_xlsx("ACCT_INFO.xlsx")
glimpse(tt)
tt=tt%>%select(input_slr_id,am_email,BUH,WinIT,AMH.y)
target_var_type <- data_type[which(names(tt) == target_var)]
data_type <- sapply(tt,class)
data_type
numeric_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("numeric","integer","double")))]
numeric_features
categorical_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("factor","character","logical")))]
categorical_features
res = generate_exploratory_analysis_ppt(tt,target_var = 'WinIT',
output_file_name = "titanic_exp_report.pptx")
length(unique(tt$input_slr_id))
x <- c(3:5, 11:8, 8 + 0:5)
x
unique(x)
x[duplicated(x)]
head(tt[duplicated(tt$mpg),])
a=tt[duplicated(tt$mpg),]
a
tt%>%filter(mpg %in% a$mpg)%>%arrange(mpg)
nrow(a)
names(a)
| /Shinydashboard/dev.R | no_license | jcflyingco/R_code | R | false | false | 3,473 | r |
install.packages('RtutoR')
library(RtutoR)
univar_num_features_summary <- lapply(numeric_features, function(var_name) {
val <- data.frame(t(Summarize(tt[[var_name]])))
val[] = lapply(val, function(x) round(x,2))
#colnames(val) <- c("min","25th","50th","75th","max")
return(val)
})
environment(univar_num_features_summary) <- asNamespace('RtutoR')
fixInNamespace("univar_num_features_summary", pos = 3)
fixInNamespace("univar_num_features_summary", pos="package:RtutoR")
univar_num_features_summary() <- RtutoR:::univar_num_features_summary()
?assignInNamespace
remove.packages('RtutoR')
tt=read_xlsx("df.xlsx")
write_xlsx(tt, 'aa.xlsx')
names(tt)
a='mpg'
res = generate_exploratory_analysis_ppt(tt,target_var = a,
output_file_name = "titanic_exp_report.pptx")
tt
a=as.vector(tt[2])
b=c(t(tt$am))
install.packages('FSA')
library(FSA)
Summarize(b)
?fivenum
data_type <- sapply(tt,class)
numeric_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("numeric","integer","double")))]
categorical_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("factor","character","logical")))]
univar_num_features_summary1 <- lapply(numeric_features, function(var_name) {
val <- data.frame(t(fivenum(tt[[var_name]])))
val[] = lapply(val, function(x) round(x,2))
#colnames(val) <- c("min","25th","50th","75th","max")
return(val)
})
glimpse(tt)
tt$drat=as.numeric(tt$drat)
tt$wt=as.numeric(tt$wt)
tt$qsec=as.numeric(tt$qsec)
univar_num_features_summary1
summary(tt[numeric_features])
categorical_features
table(tt[categorical_features])
univar_cat_features_summary <- lapply(categorical_features, function(var_name) {
count <- table(tt[[var_name]])
perct <- paste0(round(prop.table(count)*100,2),"%")
val <- as.data.frame(rbind(count,perct))
labels <- data.frame(Metric = c("Count","Perct."))
val <- cbind(labels,val)
return(val)
})
categorical_features
univar_cat_features_summary
univar_num_features_summary2 <- lapply(numeric_features, function(var_name) {
val <- data.frame(t(Summarize(tt[[var_name]])))
val[] = lapply(val, function(x) round(x,2))
#colnames(val) <- c("min","25th","50th","75th","max")
return(val)
})
univar_num_features_summary2
unzip("RtutoR-master.zip")
file.rename("RtutoR-master", "RtutoR")
shell("R CMD build RtutoR")
getwd()
setwd("C:/Users/User/Desktop/Mission/R/R_code/Shinydashboard")
install.packages("RtutoR_1.1.tar.gz", repos = NULL, type="source")
install.packages("gdtools")
library(RtutoR)
updateR()
version
library(RtutoR)
library(readxl)
library(dplyr)
tt=read_xlsx("ACCT_INFO.xlsx")
glimpse(tt)
tt=tt%>%select(input_slr_id,am_email,BUH,WinIT,AMH.y)
target_var_type <- data_type[which(names(tt) == target_var)]
data_type <- sapply(tt,class)
data_type
numeric_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("numeric","integer","double")))]
numeric_features
categorical_features <- names(data_type)[sapply(data_type,function(x) any(x %in% c("factor","character","logical")))]
categorical_features
res = generate_exploratory_analysis_ppt(tt,target_var = 'WinIT',
output_file_name = "titanic_exp_report.pptx")
length(unique(tt$input_slr_id))
x <- c(3:5, 11:8, 8 + 0:5)
x
unique(x)
x[duplicated(x)]
head(tt[duplicated(tt$mpg),])
a=tt[duplicated(tt$mpg),]
a
tt%>%filter(mpg %in% a$mpg)%>%arrange(mpg)
nrow(a)
names(a)
|
exportFct_addPlotColumns <- function(tab, path, idVar, trRef){
## Create plots paths to each respective pdf and check for existance.
## Preparation
idsProcessed <- tab %>%
# Extract id column as character vector:
extract2(idVar) %>%
# Replace special characters by '_':
gsub("(\\.)", "_", .)
# Only include reference data plots if a valid reference data set is defined:
existsRef <- !is.null(trRef) && file.exists(trRef)
## All plot types share an initial prefix consisting of directory + protein id:
pathBase <- file.path(path, "plots", idsProcessed)
## Create plot paths:
paths_allCurves <- paste0(pathBase, "_2D_TPP_all_plots.pdf")
paths_goodCurves <- paste0(pathBase, "_2D_TPP_good_plots.pdf")
paths_singleCurves <- paste0(pathBase, "_2D_TPP_single_plots.pdf")
paths_splineCurves <- paste0(pathBase, "_2D_TPP_spline_plots.pdf")
## Check files for existence and remove duplicates:
paths_allCurves_final <- removeInvalidPaths(paths_allCurves)
paths_goodCurves_final <- removeInvalidPaths(paths_goodCurves)
paths_singleCurves_final <- removeInvalidPaths(paths_singleCurves)
paths_splineCurves_final <- removeInvalidPaths(paths_splineCurves)
## Add to output table:
tab$plot_all_drcurves <- paths_allCurves_final
tab$plot_good_drcurve <- paths_goodCurves_final
tab$plot_single_drcurve <- paths_singleCurves_final
tab$plot_spline_fits <- paths_splineCurves_final
## Special case: reference data boxplots
if (existsRef){
### Potential problem: in Nils original code, the ids were not preprocessed
### by special character removal. Test for consistency
### with the plot generating function (still to be written).
dirRefBoxplots <- file.path(dirname(trRef), "fcBoxplots")
filesRefBoxplots <- paste0("fcBoxpl_", idsProcessed)
paths_refBoxplots <- file.path(dirRefBoxplots, filesRefBoxplots)
paths_refBoxplots_final <- removeInvalidPaths(paths_refBoxplots)
tab$plot_tr_reference <- paths_refBoxplots_final
}
return(tab)
}
| /R/exportFct_addPlotColumns.R | no_license | SamGG/TPP | R | false | false | 2,107 | r | exportFct_addPlotColumns <- function(tab, path, idVar, trRef){
## Create plots paths to each respective pdf and check for existance.
## Preparation
idsProcessed <- tab %>%
# Extract id column as character vector:
extract2(idVar) %>%
# Replace special characters by '_':
gsub("(\\.)", "_", .)
# Only include reference data plots if a valid reference data set is defined:
existsRef <- !is.null(trRef) && file.exists(trRef)
## All plot types share an initial prefix consisting of directory + protein id:
pathBase <- file.path(path, "plots", idsProcessed)
## Create plot paths:
paths_allCurves <- paste0(pathBase, "_2D_TPP_all_plots.pdf")
paths_goodCurves <- paste0(pathBase, "_2D_TPP_good_plots.pdf")
paths_singleCurves <- paste0(pathBase, "_2D_TPP_single_plots.pdf")
paths_splineCurves <- paste0(pathBase, "_2D_TPP_spline_plots.pdf")
## Check files for existence and remove duplicates:
paths_allCurves_final <- removeInvalidPaths(paths_allCurves)
paths_goodCurves_final <- removeInvalidPaths(paths_goodCurves)
paths_singleCurves_final <- removeInvalidPaths(paths_singleCurves)
paths_splineCurves_final <- removeInvalidPaths(paths_splineCurves)
## Add to output table:
tab$plot_all_drcurves <- paths_allCurves_final
tab$plot_good_drcurve <- paths_goodCurves_final
tab$plot_single_drcurve <- paths_singleCurves_final
tab$plot_spline_fits <- paths_splineCurves_final
## Special case: reference data boxplots
if (existsRef){
### Potential problem: in Nils original code, the ids were not preprocessed
### by special character removal. Test for consistency
### with the plot generating function (still to be written).
dirRefBoxplots <- file.path(dirname(trRef), "fcBoxplots")
filesRefBoxplots <- paste0("fcBoxpl_", idsProcessed)
paths_refBoxplots <- file.path(dirRefBoxplots, filesRefBoxplots)
paths_refBoxplots_final <- removeInvalidPaths(paths_refBoxplots)
tab$plot_tr_reference <- paths_refBoxplots_final
}
return(tab)
}
|
#### Approximate bayesian power ####
# internal function #
power_approxbayes<-function(r.alloc=r.alloc,n=i,parE=parE,theta=theta)
{
nP=r.alloc[1]*n
nR=r.alloc[2]*n
nE=r.alloc[3]*n
aE<-1; bE<-1; aR<-1; bR<-1; aP<-1; bP<-1
me=aE/(aE+bE)
mr=aR/(aR+bR)
mp=aP/(aP+bP)
sigma2e=(aE*bE)/(((aE+bE)^2)*(aE+bE+1))
sigma2r=(aR*bR)/(((aR+bR)^2)*(aR+bR+1))
sigma2p=(aP*bP)/(((aP+bP)^2)*(aP+bP+1))
sigma2_T<-parE*(1-parE)/nE + (theta^2)*parR*(1-parR)/nR + ((1-theta)^2)*parP*(1-parP)/nP
m_etaEP=me-mp
m_etaRP=mr-mp
sigma2_etaEP<-sigma2e + sigma2p
sigma2_etaRP<-sigma2r + sigma2p
rho<-sigma2p/(sqrt(sigma2_etaRP*sigma2_etaEP))
alpha<-(-m_etaRP/sqrt(sigma2_etaRP))
phi=dnorm(alpha,0,1)
c<-1-pnorm(alpha,0,1)
delta_alpha=(phi/c)*((phi/c)-alpha)
E1<-m_etaEP+sqrt(sigma2_etaEP)*rho*phi/c
E2<-m_etaRP+sqrt(sigma2_etaRP)*phi/c
V1<-sigma2_etaEP*(1+(rho^2*alpha*phi/c)-(rho*phi/c)^2)
V2<-sigma2_etaRP*(1-delta_alpha)
E12<-(sqrt(sigma2_etaEP*sigma2_etaRP)*rho*(alpha*phi+c)/c) + (sqrt(sigma2_etaEP)*m_etaRP*rho*phi/c) +
(sqrt(sigma2_etaRP)*m_etaEP*phi/c) + m_etaEP*m_etaRP
cov<-E12-E1*E2
mu_star_th<-E1-theta*E2
sigma2_star_nu<-V1+(theta^2)*V2-2*theta*cov
mu_T_theta<-parE - theta*parR -(1-theta)*parP
z<-qnorm(1-p_star,0,1)
point<-z*sqrt((1/sigma2_T) + (1/sigma2_star_nu))*sqrt(sigma2_T) +
(mu_star_th/sigma2_star_nu)*(sqrt(sigma2_T)) + (mu_T_theta/sqrt(sigma2_T))
power<-pnorm(point,0,1)
return(power)
}
# sample size calculation #
samplesize_fn_approxbayes<-function(r.alloc,parE,theta)
{
a<-1:1000
power<-NULL
for(i in a)
{
power<-c(power,power_approxbayes(r.alloc=r.alloc,n=i,parE=parE,theta=theta))
}
power<-round(power,3)
n<-a[min(which(power>=0.8))]
return(n)
}
#################### Approx Bayes ###################
p_star<-0.975
parR<-.7
parP<-.1
n.table.ab<-NULL
r.alloc.list<-matrix(c(1,1,1,1,2,2,1,2,3),3,3)
for (i.r.alloc in c(1:3))
{
r.alloc<-r.alloc.list[i.r.alloc,]
for (theta in c(0.8,0.7))
{
for (parE in c(.9, .85, .8, .75, .7))
{
n<-samplesize_fn_approxbayes(r.alloc=r.alloc,parE=parE,theta=theta)
n.table.ab<-rbind(n.table.ab,c(r.alloc=r.alloc,parE=parE,theta=theta,n.ab=n))
}
}
}
n.table.ab
| /approxbayes_samplesizecode.R | no_license | erina633/Binary3armNI | R | false | false | 2,404 | r | #### Approximate bayesian power ####
# internal function #
power_approxbayes<-function(r.alloc=r.alloc,n=i,parE=parE,theta=theta)
{
nP=r.alloc[1]*n
nR=r.alloc[2]*n
nE=r.alloc[3]*n
aE<-1; bE<-1; aR<-1; bR<-1; aP<-1; bP<-1
me=aE/(aE+bE)
mr=aR/(aR+bR)
mp=aP/(aP+bP)
sigma2e=(aE*bE)/(((aE+bE)^2)*(aE+bE+1))
sigma2r=(aR*bR)/(((aR+bR)^2)*(aR+bR+1))
sigma2p=(aP*bP)/(((aP+bP)^2)*(aP+bP+1))
sigma2_T<-parE*(1-parE)/nE + (theta^2)*parR*(1-parR)/nR + ((1-theta)^2)*parP*(1-parP)/nP
m_etaEP=me-mp
m_etaRP=mr-mp
sigma2_etaEP<-sigma2e + sigma2p
sigma2_etaRP<-sigma2r + sigma2p
rho<-sigma2p/(sqrt(sigma2_etaRP*sigma2_etaEP))
alpha<-(-m_etaRP/sqrt(sigma2_etaRP))
phi=dnorm(alpha,0,1)
c<-1-pnorm(alpha,0,1)
delta_alpha=(phi/c)*((phi/c)-alpha)
E1<-m_etaEP+sqrt(sigma2_etaEP)*rho*phi/c
E2<-m_etaRP+sqrt(sigma2_etaRP)*phi/c
V1<-sigma2_etaEP*(1+(rho^2*alpha*phi/c)-(rho*phi/c)^2)
V2<-sigma2_etaRP*(1-delta_alpha)
E12<-(sqrt(sigma2_etaEP*sigma2_etaRP)*rho*(alpha*phi+c)/c) + (sqrt(sigma2_etaEP)*m_etaRP*rho*phi/c) +
(sqrt(sigma2_etaRP)*m_etaEP*phi/c) + m_etaEP*m_etaRP
cov<-E12-E1*E2
mu_star_th<-E1-theta*E2
sigma2_star_nu<-V1+(theta^2)*V2-2*theta*cov
mu_T_theta<-parE - theta*parR -(1-theta)*parP
z<-qnorm(1-p_star,0,1)
point<-z*sqrt((1/sigma2_T) + (1/sigma2_star_nu))*sqrt(sigma2_T) +
(mu_star_th/sigma2_star_nu)*(sqrt(sigma2_T)) + (mu_T_theta/sqrt(sigma2_T))
power<-pnorm(point,0,1)
return(power)
}
# sample size calculation #
samplesize_fn_approxbayes<-function(r.alloc,parE,theta)
{
a<-1:1000
power<-NULL
for(i in a)
{
power<-c(power,power_approxbayes(r.alloc=r.alloc,n=i,parE=parE,theta=theta))
}
power<-round(power,3)
n<-a[min(which(power>=0.8))]
return(n)
}
#################### Approx Bayes ###################
p_star<-0.975
parR<-.7
parP<-.1
n.table.ab<-NULL
r.alloc.list<-matrix(c(1,1,1,1,2,2,1,2,3),3,3)
for (i.r.alloc in c(1:3))
{
r.alloc<-r.alloc.list[i.r.alloc,]
for (theta in c(0.8,0.7))
{
for (parE in c(.9, .85, .8, .75, .7))
{
n<-samplesize_fn_approxbayes(r.alloc=r.alloc,parE=parE,theta=theta)
n.table.ab<-rbind(n.table.ab,c(r.alloc=r.alloc,parE=parE,theta=theta,n.ab=n))
}
}
}
n.table.ab
|
## Let's make a webscraper!
## Sources:
## https://www.analyticsvidhya.com/blog/2017/03/beginners-guide-on-web-scraping-in-r-using-rvest-with-hands-on-knowledge/
## https://www.rdocumentation.org/packages/rvest/versions/0.3.2/topics/html_nodes
## https://www.rdocumentation.org/packages/rvest/versions/0.3.2/topics/html_text
## Uncomment this to install packages
#install.packages('rvest')
# Load in 'rvest' package
library('rvest')
'Specify the URL endpoint we are using'
url <- 'http://www.imdb.com/search/title?count=100&release_date=2016,2016&title_type=feature'
webpage <- read_html(url)
#html_nodes: More easily extract pieces out of HTML documents using XPath and css selectors
#html_text: Extract attributes, text and tag name from html.
rank_data_html <- html_nodes(webpage,'.text-primary')
rank_data <- html_text(rank_data_html)
head(rank_data)
rank_data<-as.numeric(rank_data)
head(rank_data)
#Using CSS selectors to scrape the title section
title_data_html <- html_nodes(webpage, '.lister-item-header a')
#html to text
title_data <- html_text(title_data_html)
#look at data
head(title_data)
#Using CSS selectors to scrape the description section
description_data_html <- html_nodes(webpage, '.ratings-bar+ .text-muted')
#Converting the description data to text
description_data <- html_text(description_data_html)
#look at data
head(description_data)
#Data-Preprocessing: removing '\n'
description_data <- gsub("\n", "", description_data)
head(description_data)
#Using CSS selectors to scrape the Movie runtime section
runtime_data_html <- html_nodes(webpage, '.text-muted .runtime')
#Converting the movie runtime data to text
runtime_data <- html_text(runtime_data_html)
#Let's have a look at the movie runtime
head(runtime_data)
#Data-Preprocessing: removing mins and converting it to numerical
runtime_data <- gsub(" min", "", runtime_data)
runtime_data <- as.numeric(runtime_data)
#Let's have another look at the runtime data
head(runtime_data)
#Using CSS selectors to scrape the movie genre section
genre_data_html <- html_nodes(webpage, '.genre')
#Converting the genre data to text
genre_data <- html_text(genre_data_html)
#Let's have a look at the genres
head(genre_data)
#Data-Preprocessing: removing \n
genre_data <- gsub("\n", "", genre_data)
#Data-Preprocessing: removing excess spaces
genre_data <- gsub(" ", "", genre_data)
#taking only the first genre of each movie
#Convering each genre from text to factor
#Let's have another look at the genre data
#Using CSS selectors to scrape the IMDB rating section
#Converting the ratings data to text
#Let's have a look at the ratings
#Data-Preprocessing: converting ratings to numerical
#Let's have another look at the ratings data
#Using CSS selectors to scrape the directors section
#Converting the directors data to text
#Let's have a look at the directors data
#Data-Preprocessing: converting directors data into factors
#Using CSS selectors to scrap the actors section
#Converting the gross actors data to text
#Let's have a look at the actors data
#Data-Preprocessing: converting actors data into factors
#Using CSS selectors to scrap the gross revenue section
#Converting the gross revenue data to text
#Let's have a look at the votes data
#Data-Preprocessing: removing '$' and 'M' signs
#Let's check the length of gross data
length(gross_data)
#Filling missing entries with NA
#Data-Preprocessing: converting gross to numerical
#Let's have another look at the length of gross data
#library('ggplot2')
# let's draw some plots!
| /webscraper-demo.R | no_license | AnActualSeb/mini-demos | R | false | false | 3,580 | r | ## Let's make a webscraper!
## Sources:
## https://www.analyticsvidhya.com/blog/2017/03/beginners-guide-on-web-scraping-in-r-using-rvest-with-hands-on-knowledge/
## https://www.rdocumentation.org/packages/rvest/versions/0.3.2/topics/html_nodes
## https://www.rdocumentation.org/packages/rvest/versions/0.3.2/topics/html_text
## Uncomment this to install packages
#install.packages('rvest')
# Load in 'rvest' package
library('rvest')
'Specify the URL endpoint we are using'
url <- 'http://www.imdb.com/search/title?count=100&release_date=2016,2016&title_type=feature'
webpage <- read_html(url)
#html_nodes: More easily extract pieces out of HTML documents using XPath and css selectors
#html_text: Extract attributes, text and tag name from html.
rank_data_html <- html_nodes(webpage,'.text-primary')
rank_data <- html_text(rank_data_html)
head(rank_data)
rank_data<-as.numeric(rank_data)
head(rank_data)
#Using CSS selectors to scrape the title section
title_data_html <- html_nodes(webpage, '.lister-item-header a')
#html to text
title_data <- html_text(title_data_html)
#look at data
head(title_data)
#Using CSS selectors to scrape the description section
description_data_html <- html_nodes(webpage, '.ratings-bar+ .text-muted')
#Converting the description data to text
description_data <- html_text(description_data_html)
#look at data
head(description_data)
#Data-Preprocessing: removing '\n'
description_data <- gsub("\n", "", description_data)
head(description_data)
#Using CSS selectors to scrape the Movie runtime section
runtime_data_html <- html_nodes(webpage, '.text-muted .runtime')
#Converting the movie runtime data to text
runtime_data <- html_text(runtime_data_html)
#Let's have a look at the movie runtime
head(runtime_data)
#Data-Preprocessing: removing mins and converting it to numerical
runtime_data <- gsub(" min", "", runtime_data)
runtime_data <- as.numeric(runtime_data)
#Let's have another look at the runtime data
head(runtime_data)
#Using CSS selectors to scrape the movie genre section
genre_data_html <- html_nodes(webpage, '.genre')
#Converting the genre data to text
genre_data <- html_text(genre_data_html)
#Let's have a look at the genres
head(genre_data)
#Data-Preprocessing: removing \n
genre_data <- gsub("\n", "", genre_data)
#Data-Preprocessing: removing excess spaces
genre_data <- gsub(" ", "", genre_data)
#taking only the first genre of each movie
#Convering each genre from text to factor
#Let's have another look at the genre data
#Using CSS selectors to scrape the IMDB rating section
#Converting the ratings data to text
#Let's have a look at the ratings
#Data-Preprocessing: converting ratings to numerical
#Let's have another look at the ratings data
#Using CSS selectors to scrape the directors section
#Converting the directors data to text
#Let's have a look at the directors data
#Data-Preprocessing: converting directors data into factors
#Using CSS selectors to scrap the actors section
#Converting the gross actors data to text
#Let's have a look at the actors data
#Data-Preprocessing: converting actors data into factors
#Using CSS selectors to scrap the gross revenue section
#Converting the gross revenue data to text
#Let's have a look at the votes data
#Data-Preprocessing: removing '$' and 'M' signs
#Let's check the length of gross data
length(gross_data)
#Filling missing entries with NA
#Data-Preprocessing: converting gross to numerical
#Let's have another look at the length of gross data
#library('ggplot2')
# let's draw some plots!
|
#Make Supplementary Figure 1
rm(list=ls())
#Set working directory
setwd("C:/Users/englander/Box Sync/VMS/Nature Sustainability/replication_files/")
library(dplyr)
library(haven)
library(ggplot2)
library(sandwich)
library(latex2exp)
library(tidyr)
library(readr)
load("Data/supfig1df.Rdata")
#What should Newey-West bandwidths be?
#depth
depthreg <- lm(depth ~ inner + absdist + dist2 + dist3 + inner:absdist +
inner:dist2 + inner:dist3, data = supfig1df)
#Bandwidth
bwNeweyWest(depthreg, kernel = "Quadratic Spectral") #8.9
bwNeweyWest(depthreg, kernel = "Bartlett") #17.2
#npp
nppreg <- lm(npp ~ inner + absdist + dist2 + dist3 + inner:absdist +
inner:dist2 + inner:dist3, data = supfig1df)
#Bandwidth
bwNeweyWest(nppreg, kernel = "Quadratic Spectral") #31.3.
bwNeweyWest(nppreg, kernel = "Bartlett") #139.7
#Use 32 as lag instead of 140 in this case because 140 is too large
#sst
sstreg <- lm(sst ~ inner + absdist + dist2 + dist3 + inner:absdist +
inner:dist2 + inner:dist3, data = supfig1df)
#Bandwidth
bwNeweyWest(sstreg, kernel = "Quadratic Spectral") #3.7
bwNeweyWest(sstreg, kernel = "Bartlett") #3.6
#Output data for stata to compute confidence intervals
write_dta(mutate(supfig1df, #Make dist an integer so can tsset in Stata. I will recenter dist in Stata after I have declared tsset.
dist = if_else(type=="outer",dist-.5,dist+.5)),
path = "Data/supfig1dat.dta")
rm(depthreg, sstreg, nppreg)
#Drop extra columns
supfig1df <- dplyr::select(supfig1df, -absdist, -dist2, -dist3, -inner)
#Create one row for each variable-dist
supfig1df <- gather(supfig1df, variable, value, -dist, -type)
#Load confidence intervals created in Stata
cidat <- bind_rows(
read_csv("Data/Confidence_Intervals/supfig1a_ci.csv") %>% mutate(variable='depth'),
read_csv("Data/Confidence_Intervals/supfig1b_ci.csv") %>% mutate(variable='npp'),
read_csv("Data/Confidence_Intervals/supfig1c_ci.csv") %>% mutate(variable='sst')
)
#Create dist variable
cidat$dist <- gsub( "\\..*$", "", cidat$parm)
cidat$dist <- as.numeric(cidat$dist) - 50.5
#Create type variable
cidat$type <- "outer"
cidat$type[grep("1.inner",cidat$parm)] <- "inner"
#Drop non-relevant parameter values (e.g. inner with dist = -30 doesn't actually exist)
cidat <- filter(cidat, (type=="outer" & dist <= -1.5) |
(type == "inner" & dist >= 1.5))
supfig1df <- left_join(supfig1df,
dplyr::select(cidat, estimate, min95, max95, dist, variable),
by = c("dist", "variable"))
#If value is missing, set estimate, min95, and max95 equal to NA too
supfig1df$estimate[is.na(supfig1df$value)] <- as.numeric(NA)
supfig1df$min95[is.na(supfig1df$value)] <- as.numeric(NA)
supfig1df$max95[is.na(supfig1df$value)] <- as.numeric(NA)
#Check that point estimates are close to actual values (i.e. Stata regression was correct)
sum(supfig1df$value[!is.na(supfig1df$value)] - supfig1df$estimate[!is.na(supfig1df$value)])
supfig1df$type <- as.factor(supfig1df$type)
#Want high seas to come first on plot
supfig1df$type <- relevel(supfig1df$type, ref = "outer")
myThemeStuff <- theme(panel.background = element_rect(fill = NA),
panel.border = element_rect(fill = NA, color = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks = element_line(color = "gray5",size=.35),
axis.text = element_text(color = "black", size = 5.5, family="sans"),
axis.title = element_text(color = "black", size = 6.5, family = "sans"),
#axis.title.y.right = element_text(angle = 90,hjust=0),
axis.title.y = element_text(hjust = .5),
legend.key = element_blank(),
plot.title = element_text(hjust = 0.5),
legend.text=element_text(size=6.5, family = "sans"),
legend.title = element_text(size=6.5, family = "sans"),
plot.margin = unit(c(.01,.01,.01,.01),"in"),
plot.tag = element_text(family = "sans", size = 9, face='bold')
)
#Add high seas and EEZ text
textdf <- rbind(
cbind(-25,-4.19,"High seas"),
cbind(25,-4.19,"EEZs")
) %>% as.data.frame() %>% tbl_df() %>% mutate_all(as.character) %>%
rename(x=V1,y=V2,label=V3) %>% mutate(x=as.numeric(x), y=as.numeric(y))
#Make figure supfig1a
supfig1a <- ggplot(data = filter(supfig1df, variable=='depth'),
aes(x=dist, y = value)) +
geom_point(aes(shape=type),color = "lightseagreen", size = .5) +
geom_smooth(aes(shape=type),formula = y~poly(x,3),
method = "lm",se = FALSE, color = "lightseagreen", size = .3) +
scale_y_continuous("Average Depth (km)") +
geom_vline(xintercept=0, color="red") +
scale_shape_manual("Location",
values = c(1,16),
labels = c("High seas","EEZs")) +
myThemeStuff +
guides(shape = FALSE) +
geom_ribbon(aes(ymin=min95,ymax=max95,shape=type),alpha=.3,fill="lightseagreen") +
scale_x_continuous("Distance to EEZ-high seas boundary (km)") +
geom_text(data=textdf, aes(x=x,y=y,label=label), size = 2.5, family = "sans") +
labs(tag = "a")
ggsave("Figures/supfig1a.png",supfig1a,
width=88,height=54.83077,units="mm",dpi=1200)
#Make supfig1b
#Add high seas and EEZ text
textdf <- rbind(
cbind(-25,360,"High seas"),
cbind(25,360,"EEZs")
) %>% as.data.frame() %>% tbl_df() %>% mutate_all(as.character) %>%
rename(x=V1,y=V2,label=V3) %>% mutate(x=as.numeric(x), y=as.numeric(y))
#Make plot
supfig1b <- ggplot(data = filter(supfig1df, variable=='npp'),
aes(x=dist, y = value)) +
geom_point(aes(shape=type),color = "lightseagreen", size = .5) +
geom_smooth(aes(shape=type),formula = y~poly(x,3),
method = "lm",se = FALSE, color = "lightseagreen", size = .3) +
scale_y_continuous(TeX("Average NPP (mg Carbon / $ km^2$ / day)")) +
geom_vline(xintercept=0, color="red") +
scale_shape_manual("Location",
values = c(1,16),
labels = c("High seas","EEZs")) +
myThemeStuff +
guides(shape = FALSE) +
geom_ribbon(aes(ymin=min95,ymax=max95,shape=type),alpha=.3,fill="lightseagreen") +
scale_x_continuous("Distance to EEZ-high seas boundary (km)") +
geom_text(data=textdf, aes(x=x,y=y,label=label), size = 2.5, family = "sans") +
labs(tag = "b")
ggsave("Figures/supfig1b.png",supfig1b,
width=88,height=54.83077,units="mm",dpi=1200)
#Make supfig1c
textdf <- rbind(
cbind(-25,23.3,"High seas"),
cbind(25,23.3,"EEZs")
) %>% as.data.frame() %>% tbl_df() %>% mutate_all(as.character) %>%
rename(x=V1,y=V2,label=V3) %>% mutate(x=as.numeric(x), y=as.numeric(y))
#Make plot
supfig1c <- ggplot(data = filter(supfig1df, variable=='sst'),
aes(x=dist, y = value)) +
geom_point(aes(shape=type),color = "lightseagreen", size = .5) +
geom_smooth(aes(shape=type),formula = y~poly(x,3),
method = "lm",se = FALSE, color = "lightseagreen", size = .3) +
scale_y_continuous(TeX("Average SST (degrees)")) +
geom_vline(xintercept=0, color="red") +
scale_shape_manual("Location",
values = c(1,16),
labels = c("High seas","EEZs")) +
myThemeStuff +
guides(shape = FALSE) +
geom_ribbon(aes(ymin=min95,ymax=max95,shape=type),alpha=.3,fill="lightseagreen") +
scale_x_continuous("Distance to EEZ-high seas boundary (km)") +
geom_text(data=textdf, aes(x=x,y=y,label=label), size = 2.5, family = "sans") +
labs(tag = "c")
ggsave("Figures/supfig1c.png",supfig1c,
width=88,height=54.83077,units="mm",dpi=1200)
| /Scripts/make_supfig1.R | no_license | englander/replication_eez | R | false | false | 7,836 | r | #Make Supplementary Figure 1
rm(list=ls())
#Set working directory
setwd("C:/Users/englander/Box Sync/VMS/Nature Sustainability/replication_files/")
library(dplyr)
library(haven)
library(ggplot2)
library(sandwich)
library(latex2exp)
library(tidyr)
library(readr)
load("Data/supfig1df.Rdata")
#What should Newey-West bandwidths be?
#depth
depthreg <- lm(depth ~ inner + absdist + dist2 + dist3 + inner:absdist +
inner:dist2 + inner:dist3, data = supfig1df)
#Bandwidth
bwNeweyWest(depthreg, kernel = "Quadratic Spectral") #8.9
bwNeweyWest(depthreg, kernel = "Bartlett") #17.2
#npp
nppreg <- lm(npp ~ inner + absdist + dist2 + dist3 + inner:absdist +
inner:dist2 + inner:dist3, data = supfig1df)
#Bandwidth
bwNeweyWest(nppreg, kernel = "Quadratic Spectral") #31.3.
bwNeweyWest(nppreg, kernel = "Bartlett") #139.7
#Use 32 as lag instead of 140 in this case because 140 is too large
#sst
sstreg <- lm(sst ~ inner + absdist + dist2 + dist3 + inner:absdist +
inner:dist2 + inner:dist3, data = supfig1df)
#Bandwidth
bwNeweyWest(sstreg, kernel = "Quadratic Spectral") #3.7
bwNeweyWest(sstreg, kernel = "Bartlett") #3.6
#Output data for stata to compute confidence intervals
write_dta(mutate(supfig1df, #Make dist an integer so can tsset in Stata. I will recenter dist in Stata after I have declared tsset.
dist = if_else(type=="outer",dist-.5,dist+.5)),
path = "Data/supfig1dat.dta")
rm(depthreg, sstreg, nppreg)
#Drop extra columns
supfig1df <- dplyr::select(supfig1df, -absdist, -dist2, -dist3, -inner)
#Create one row for each variable-dist
supfig1df <- gather(supfig1df, variable, value, -dist, -type)
#Load confidence intervals created in Stata
cidat <- bind_rows(
read_csv("Data/Confidence_Intervals/supfig1a_ci.csv") %>% mutate(variable='depth'),
read_csv("Data/Confidence_Intervals/supfig1b_ci.csv") %>% mutate(variable='npp'),
read_csv("Data/Confidence_Intervals/supfig1c_ci.csv") %>% mutate(variable='sst')
)
#Create dist variable
cidat$dist <- gsub( "\\..*$", "", cidat$parm)
cidat$dist <- as.numeric(cidat$dist) - 50.5
#Create type variable
cidat$type <- "outer"
cidat$type[grep("1.inner",cidat$parm)] <- "inner"
#Drop non-relevant parameter values (e.g. inner with dist = -30 doesn't actually exist)
cidat <- filter(cidat, (type=="outer" & dist <= -1.5) |
(type == "inner" & dist >= 1.5))
supfig1df <- left_join(supfig1df,
dplyr::select(cidat, estimate, min95, max95, dist, variable),
by = c("dist", "variable"))
#If value is missing, set estimate, min95, and max95 equal to NA too
supfig1df$estimate[is.na(supfig1df$value)] <- as.numeric(NA)
supfig1df$min95[is.na(supfig1df$value)] <- as.numeric(NA)
supfig1df$max95[is.na(supfig1df$value)] <- as.numeric(NA)
#Check that point estimates are close to actual values (i.e. Stata regression was correct)
sum(supfig1df$value[!is.na(supfig1df$value)] - supfig1df$estimate[!is.na(supfig1df$value)])
supfig1df$type <- as.factor(supfig1df$type)
#Want high seas to come first on plot
supfig1df$type <- relevel(supfig1df$type, ref = "outer")
myThemeStuff <- theme(panel.background = element_rect(fill = NA),
panel.border = element_rect(fill = NA, color = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.ticks = element_line(color = "gray5",size=.35),
axis.text = element_text(color = "black", size = 5.5, family="sans"),
axis.title = element_text(color = "black", size = 6.5, family = "sans"),
#axis.title.y.right = element_text(angle = 90,hjust=0),
axis.title.y = element_text(hjust = .5),
legend.key = element_blank(),
plot.title = element_text(hjust = 0.5),
legend.text=element_text(size=6.5, family = "sans"),
legend.title = element_text(size=6.5, family = "sans"),
plot.margin = unit(c(.01,.01,.01,.01),"in"),
plot.tag = element_text(family = "sans", size = 9, face='bold')
)
#Add high seas and EEZ text
textdf <- rbind(
cbind(-25,-4.19,"High seas"),
cbind(25,-4.19,"EEZs")
) %>% as.data.frame() %>% tbl_df() %>% mutate_all(as.character) %>%
rename(x=V1,y=V2,label=V3) %>% mutate(x=as.numeric(x), y=as.numeric(y))
#Make figure supfig1a
supfig1a <- ggplot(data = filter(supfig1df, variable=='depth'),
aes(x=dist, y = value)) +
geom_point(aes(shape=type),color = "lightseagreen", size = .5) +
geom_smooth(aes(shape=type),formula = y~poly(x,3),
method = "lm",se = FALSE, color = "lightseagreen", size = .3) +
scale_y_continuous("Average Depth (km)") +
geom_vline(xintercept=0, color="red") +
scale_shape_manual("Location",
values = c(1,16),
labels = c("High seas","EEZs")) +
myThemeStuff +
guides(shape = FALSE) +
geom_ribbon(aes(ymin=min95,ymax=max95,shape=type),alpha=.3,fill="lightseagreen") +
scale_x_continuous("Distance to EEZ-high seas boundary (km)") +
geom_text(data=textdf, aes(x=x,y=y,label=label), size = 2.5, family = "sans") +
labs(tag = "a")
ggsave("Figures/supfig1a.png",supfig1a,
width=88,height=54.83077,units="mm",dpi=1200)
#Make supfig1b
#Add high seas and EEZ text
textdf <- rbind(
cbind(-25,360,"High seas"),
cbind(25,360,"EEZs")
) %>% as.data.frame() %>% tbl_df() %>% mutate_all(as.character) %>%
rename(x=V1,y=V2,label=V3) %>% mutate(x=as.numeric(x), y=as.numeric(y))
#Make plot
supfig1b <- ggplot(data = filter(supfig1df, variable=='npp'),
aes(x=dist, y = value)) +
geom_point(aes(shape=type),color = "lightseagreen", size = .5) +
geom_smooth(aes(shape=type),formula = y~poly(x,3),
method = "lm",se = FALSE, color = "lightseagreen", size = .3) +
scale_y_continuous(TeX("Average NPP (mg Carbon / $ km^2$ / day)")) +
geom_vline(xintercept=0, color="red") +
scale_shape_manual("Location",
values = c(1,16),
labels = c("High seas","EEZs")) +
myThemeStuff +
guides(shape = FALSE) +
geom_ribbon(aes(ymin=min95,ymax=max95,shape=type),alpha=.3,fill="lightseagreen") +
scale_x_continuous("Distance to EEZ-high seas boundary (km)") +
geom_text(data=textdf, aes(x=x,y=y,label=label), size = 2.5, family = "sans") +
labs(tag = "b")
ggsave("Figures/supfig1b.png",supfig1b,
width=88,height=54.83077,units="mm",dpi=1200)
#Make supfig1c
textdf <- rbind(
cbind(-25,23.3,"High seas"),
cbind(25,23.3,"EEZs")
) %>% as.data.frame() %>% tbl_df() %>% mutate_all(as.character) %>%
rename(x=V1,y=V2,label=V3) %>% mutate(x=as.numeric(x), y=as.numeric(y))
#Make plot
supfig1c <- ggplot(data = filter(supfig1df, variable=='sst'),
aes(x=dist, y = value)) +
geom_point(aes(shape=type),color = "lightseagreen", size = .5) +
geom_smooth(aes(shape=type),formula = y~poly(x,3),
method = "lm",se = FALSE, color = "lightseagreen", size = .3) +
scale_y_continuous(TeX("Average SST (degrees)")) +
geom_vline(xintercept=0, color="red") +
scale_shape_manual("Location",
values = c(1,16),
labels = c("High seas","EEZs")) +
myThemeStuff +
guides(shape = FALSE) +
geom_ribbon(aes(ymin=min95,ymax=max95,shape=type),alpha=.3,fill="lightseagreen") +
scale_x_continuous("Distance to EEZ-high seas boundary (km)") +
geom_text(data=textdf, aes(x=x,y=y,label=label), size = 2.5, family = "sans") +
labs(tag = "c")
ggsave("Figures/supfig1c.png",supfig1c,
width=88,height=54.83077,units="mm",dpi=1200)
|
.obstate <- new.env(parent=emptyenv())
.obstate$ob <- '.ob'
.obstate$envir <- .GlobalEnv
#' Create or Get Order Book Environment
#'
#' @param ob string naming the order book environment, or an environment, default '.ob'
#' @param envir environment to use as the parent for the order book, default .GlobalEnv
#'
#' @return
#' pointer to the order book environment
#'
#' @examples
#'
#' .ob <- newOB()
#' is.environment(.ob)
#' # TRUE
#'
#' .ob <- getOB()
#' is.environment(.ob)
#' #TRUE
#'
#' @aliases getOB
#' @export
newOB <- function(ob='.ob', envir=.GlobalEnv){
if(!is.environment(ob)) {
assign(ob, new.env(hash=TRUE), envir = envir)
}
.obstate$ob <- ob
.obstate$envir <- envir
get(x=ob, pos=envir)
}
#' @rdname newOB
#' @export
getOB <- function(ob=NULL, envir=NULL){
if(is.null(ob)){
ob <- .obstate$ob
}
if(is.null(envir)){
envir <- .obstate$envir
}
if(!is.environment(ob) && !is.environment(envir[[ob]])) {
newOB(ob=ob, envir=envir)
}
get(ob, pos=envir)
}
###############################################################################
# obmodeling: Parsing, analysis, visualization of L1 and L2 order book data
# Copyright (c) 2017- Jeffrey Mazar and Brian G. Peterson
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see https://www.gnu.org/licenses/licenses.en.html
#
###############################################################################
| /R/envir_utils.R | no_license | jmazar/obmodeling | R | false | false | 1,454 | r |
.obstate <- new.env(parent=emptyenv())
.obstate$ob <- '.ob'
.obstate$envir <- .GlobalEnv
#' Create or Get Order Book Environment
#'
#' @param ob string naming the order book environment, or an environment, default '.ob'
#' @param envir environment to use as the parent for the order book, default .GlobalEnv
#'
#' @return
#' pointer to the order book environment
#'
#' @examples
#'
#' .ob <- newOB()
#' is.environment(.ob)
#' # TRUE
#'
#' .ob <- getOB()
#' is.environment(.ob)
#' #TRUE
#'
#' @aliases getOB
#' @export
newOB <- function(ob='.ob', envir=.GlobalEnv){
if(!is.environment(ob)) {
assign(ob, new.env(hash=TRUE), envir = envir)
}
.obstate$ob <- ob
.obstate$envir <- envir
get(x=ob, pos=envir)
}
#' @rdname newOB
#' @export
getOB <- function(ob=NULL, envir=NULL){
if(is.null(ob)){
ob <- .obstate$ob
}
if(is.null(envir)){
envir <- .obstate$envir
}
if(!is.environment(ob) && !is.environment(envir[[ob]])) {
newOB(ob=ob, envir=envir)
}
get(ob, pos=envir)
}
###############################################################################
# obmodeling: Parsing, analysis, visualization of L1 and L2 order book data
# Copyright (c) 2017- Jeffrey Mazar and Brian G. Peterson
#
# This library is distributed under the terms of the GNU Public License (GPL)
# for full details see https://www.gnu.org/licenses/licenses.en.html
#
###############################################################################
|
#' @title Add function
#' @description Add together two numbers.
#' @param x A number.
#' @param y A number.
#' @return The sum of \code{x} and \code{y}.
#' \describe{
#' \item{One}{First item}
#' \item{Two}{Second item}
#' }
#' @examples
#' add(1, 1)
#' add(10, 1)
#' @export
add <- function(x, y) {
x + y
}
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Install Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
# Load Package: 'Cmd + Shift + L'
# convert roxygen comments to .Rd files 'Cmd + Shift + D'
| /R/add.R | no_license | hegu2692/curatedTBData | R | false | false | 729 | r | #' @title Add function
#' @description Add together two numbers.
#' @param x A number.
#' @param y A number.
#' @return The sum of \code{x} and \code{y}.
#' \describe{
#' \item{One}{First item}
#' \item{Two}{Second item}
#' }
#' @examples
#' add(1, 1)
#' add(10, 1)
#' @export
add <- function(x, y) {
x + y
}
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Install Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
# Load Package: 'Cmd + Shift + L'
# convert roxygen comments to .Rd files 'Cmd + Shift + D'
|
test_that("warn_for_args works as anticipated", {
skip_on_cran()
expect_error(
warn_for_args(),
"You must supply a token"
)
expect_error(
warn_for_args(token = NA_character_),
"You must supply a token"
)
expect_error(
warn_for_args(token = NULL),
"You must supply a token"
)
## No error or return if nothing goes wrong
expect_invisible(warn_for_args("xoxp-1234"))
expect_warning(
warn_for_args(token = "xoxp-1234", channel = "foo")
)
})
test_that("Auth works", {
skip_on_cran()
auth <- auth_test()
expect_true(
auth$ok
)
})
test_that("chrtrans works", {
skip_on_cran()
channel <- slackr_chtrans("#test")
expect_equal(
channel,
"C01K5VCPLGZ"
)
})
test_that("Channels works", {
skip_on_cran()
channels <- slackr_channels()
expect_equal(
ncol(channels),
28
)
expect_gte(
nrow(channels),
6
)
expect_true(
"test" %in% channels$name
)
})
test_that("Users works", {
skip_on_cran()
users <- slackr_users()
expect_true(
"mrkaye97" %in% users$name
)
})
test_that("with_retry correctly retries requests", {
skip_on_cran()
i <- 1
mock <- function() {
if (i > 1) {
httr:::response(
headers = list(`retry-after` = 2),
status_code = 200,
ok = TRUE
)
} else {
i <<- i + 1
httr:::response(
headers = list(`retry-after` = 2),
status_code = 429
)
}
}
expect_message(with_retry(mock), "Pausing for 2 seconds due to Slack API rate limit")
out <- with_retry(mock)
expect_true(out$ok)
expect_equal(out$status_code, 200)
})
| /tests/testthat/test-internals.R | permissive | mrkaye97/slackr | R | false | false | 1,649 | r | test_that("warn_for_args works as anticipated", {
skip_on_cran()
expect_error(
warn_for_args(),
"You must supply a token"
)
expect_error(
warn_for_args(token = NA_character_),
"You must supply a token"
)
expect_error(
warn_for_args(token = NULL),
"You must supply a token"
)
## No error or return if nothing goes wrong
expect_invisible(warn_for_args("xoxp-1234"))
expect_warning(
warn_for_args(token = "xoxp-1234", channel = "foo")
)
})
test_that("Auth works", {
skip_on_cran()
auth <- auth_test()
expect_true(
auth$ok
)
})
test_that("chrtrans works", {
skip_on_cran()
channel <- slackr_chtrans("#test")
expect_equal(
channel,
"C01K5VCPLGZ"
)
})
test_that("Channels works", {
skip_on_cran()
channels <- slackr_channels()
expect_equal(
ncol(channels),
28
)
expect_gte(
nrow(channels),
6
)
expect_true(
"test" %in% channels$name
)
})
test_that("Users works", {
skip_on_cran()
users <- slackr_users()
expect_true(
"mrkaye97" %in% users$name
)
})
test_that("with_retry correctly retries requests", {
skip_on_cran()
i <- 1
mock <- function() {
if (i > 1) {
httr:::response(
headers = list(`retry-after` = 2),
status_code = 200,
ok = TRUE
)
} else {
i <<- i + 1
httr:::response(
headers = list(`retry-after` = 2),
status_code = 429
)
}
}
expect_message(with_retry(mock), "Pausing for 2 seconds due to Slack API rate limit")
out <- with_retry(mock)
expect_true(out$ok)
expect_equal(out$status_code, 200)
})
|
x <- c(1, 2, 3)
y <- c(4, 5, 6)
z <- c(7, 8, 9)
x + y + z | /process.R | no_license | Ninja58900/demo | R | false | false | 57 | r | x <- c(1, 2, 3)
y <- c(4, 5, 6)
z <- c(7, 8, 9)
x + y + z |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JaBbA.R
\name{JaBbA.digest}
\alias{JaBbA.digest}
\title{JaBbA.digest}
\usage{
JaBbA.digest(jab, kag = NULL, verbose = T, keep.all = T)
}
\arguments{
\item{jab}{JaBbA object "undigested"}
\item{kag}{karyograph (original karyograph input to JaBbA), if NULL then will "redigest" JaBbA object}
\item{verbose}{logical flag}
\item{keep.all}{keep.all (default TRUE) whether to keep 0 copy junctions or collapse segments across these as well}
}
\description{
JaBbA.digest
}
\details{
processes JaBbA object
(1) collapsing segments with same copy number that lack loose ends
(2) (optional) +/- adds segments correponding to loose ends
(3) outputting edges data frame with colors, and other formatting information
(4) outputting junctions GRangesList with copy number, color, lty and other plotting components
TODO: replace with proper object instantiator
}
| /man/JaBbA.digest.Rd | no_license | andref1989/JaBbA | R | false | true | 933 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JaBbA.R
\name{JaBbA.digest}
\alias{JaBbA.digest}
\title{JaBbA.digest}
\usage{
JaBbA.digest(jab, kag = NULL, verbose = T, keep.all = T)
}
\arguments{
\item{jab}{JaBbA object "undigested"}
\item{kag}{karyograph (original karyograph input to JaBbA), if NULL then will "redigest" JaBbA object}
\item{verbose}{logical flag}
\item{keep.all}{keep.all (default TRUE) whether to keep 0 copy junctions or collapse segments across these as well}
}
\description{
JaBbA.digest
}
\details{
processes JaBbA object
(1) collapsing segments with same copy number that lack loose ends
(2) (optional) +/- adds segments correponding to loose ends
(3) outputting edges data frame with colors, and other formatting information
(4) outputting junctions GRangesList with copy number, color, lty and other plotting components
TODO: replace with proper object instantiator
}
|
library(provenance)
### Name: read.densities
### Title: Read a .csv file with mineral and rock densities
### Aliases: read.densities
### ** Examples
data(Namib,densities)
N8 <- subset(Namib$HM,select="N8")
distribution <- minsorting(N8,densities,phi=2,sigmaphi=1,medium="air",by=0.05)
plot(distribution)
| /data/genthat_extracted_code/provenance/examples/read.densities.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 311 | r | library(provenance)
### Name: read.densities
### Title: Read a .csv file with mineral and rock densities
### Aliases: read.densities
### ** Examples
data(Namib,densities)
N8 <- subset(Namib$HM,select="N8")
distribution <- minsorting(N8,densities,phi=2,sigmaphi=1,medium="air",by=0.05)
plot(distribution)
|
#' Journals currently or previously indexed in MEDLINE
#'
#' Titles, abbreviations, MeSH terms and other details of journals currently or
#' previously indexed in the NLM Catalog at NCBI
#'
#' XML search results from the NLM Catalog at NCBI using "reportedmedline" on April 18, 2016
#'
#' @format A data frame with 15001 observations on the following 7 variables.
#' \describe{
#' \item{ta}{ title abbreviation = MedlineTA tag}
#' \item{title}{ journal title = TitleMain/Title tag}
#' \item{country}{ Country tag}
#' \item{language}{ primary language = Language[@LangType="Primary"] tag }
#' \item{year}{ PublicationFirstYear tag }
#' \item{mesh}{ MeSH terms including descriptor, qualifier and major topic marked with * }
#' \item{current}{ Currently indexed in MEDLINE = IndexingSourceName[@IndexingStatus="Currently-indexed"] tag matches MEDLINE }
#' }
#'
#' @source \url{http://www.ncbi.nlm.nih.gov/nlmcatalog}
#'
#' @examples
#' data(nlm)
#' table(nlm$current)
#' subset(nlm, grepl("Heart Diseases", mesh) )
#' table2(unlist(strsplit(nlm$mesh, "; ")))
#'
"nlm"
#' Yersinia pestis virulence publications
#'
#' Europe PMC core search results with Yersinia pestis virulence in title
#'
#' @format A data frame with 160 observations on the following 19 variables.
#' \describe{
#' \item{pmid}{PubMed ID = pmid tag}
#' \item{pmcid}{PubMed Central ID = pmcid tag}
#' \item{src}{ source tag }
#' \item{doi}{ DOI tag }
#' \item{authors}{ authorString tag }
#' \item{year}{ pubYear tag }
#' \item{title}{ article title tag }
#' \item{journal}{ full journal name = journal/title tag }
#' \item{nlm_ta}{ journal abbreviation = medlineAbbreviation tag}
#' \item{volume}{ journalInfo/volume tag }
#' \item{issue}{ journalInfo/issue tag }
#' \item{pages}{ pageInfo tag }
#' \item{citedby}{ citedByCount tag }
#' \item{published}{ firstPublicationDate tag }
#' \item{language}{language tag}
#' \item{abstract}{abstractText tag}
#' \item{mesh}{ MeSH terms including descriptor, qualifier and major topic*}
#' \item{keywords }{keyword tags}
#' \item{chemicals }{chemical/name tags}
#' }
#' @source Europe PMC search
#' @examples
#' data(yp)
#' \dontrun{ yp <- search_core("title:(Yersinia pestis virulence) src:MED")}
#' t(yp[7,])
#' bib_format(yp[1:7,])
#' data.frame(n=sort(table(unlist(strsplit(yp$pubType, "; "))), decreasing=TRUE))
#' subset(yp, grepl("retracted", pubType))
#' data.frame(n=sort(table(unlist(strsplit(yp$authorString, ", "))), decreasing=TRUE)[1:15])
"yp"
| /R/data.R | no_license | enterstudio/euPMC | R | false | false | 2,557 | r | #' Journals currently or previously indexed in MEDLINE
#'
#' Titles, abbreviations, MeSH terms and other details of journals currently or
#' previously indexed in the NLM Catalog at NCBI
#'
#' XML search results from the NLM Catalog at NCBI using "reportedmedline" on April 18, 2016
#'
#' @format A data frame with 15001 observations on the following 7 variables.
#' \describe{
#' \item{ta}{ title abbreviation = MedlineTA tag}
#' \item{title}{ journal title = TitleMain/Title tag}
#' \item{country}{ Country tag}
#' \item{language}{ primary language = Language[@LangType="Primary"] tag }
#' \item{year}{ PublicationFirstYear tag }
#' \item{mesh}{ MeSH terms including descriptor, qualifier and major topic marked with * }
#' \item{current}{ Currently indexed in MEDLINE = IndexingSourceName[@IndexingStatus="Currently-indexed"] tag matches MEDLINE }
#' }
#'
#' @source \url{http://www.ncbi.nlm.nih.gov/nlmcatalog}
#'
#' @examples
#' data(nlm)
#' table(nlm$current)
#' subset(nlm, grepl("Heart Diseases", mesh) )
#' table2(unlist(strsplit(nlm$mesh, "; ")))
#'
"nlm"
#' Yersinia pestis virulence publications
#'
#' Europe PMC core search results with Yersinia pestis virulence in title
#'
#' @format A data frame with 160 observations on the following 19 variables.
#' \describe{
#' \item{pmid}{PubMed ID = pmid tag}
#' \item{pmcid}{PubMed Central ID = pmcid tag}
#' \item{src}{ source tag }
#' \item{doi}{ DOI tag }
#' \item{authors}{ authorString tag }
#' \item{year}{ pubYear tag }
#' \item{title}{ article title tag }
#' \item{journal}{ full journal name = journal/title tag }
#' \item{nlm_ta}{ journal abbreviation = medlineAbbreviation tag}
#' \item{volume}{ journalInfo/volume tag }
#' \item{issue}{ journalInfo/issue tag }
#' \item{pages}{ pageInfo tag }
#' \item{citedby}{ citedByCount tag }
#' \item{published}{ firstPublicationDate tag }
#' \item{language}{language tag}
#' \item{abstract}{abstractText tag}
#' \item{mesh}{ MeSH terms including descriptor, qualifier and major topic*}
#' \item{keywords }{keyword tags}
#' \item{chemicals }{chemical/name tags}
#' }
#' @source Europe PMC search
#' @examples
#' data(yp)
#' \dontrun{ yp <- search_core("title:(Yersinia pestis virulence) src:MED")}
#' t(yp[7,])
#' bib_format(yp[1:7,])
#' data.frame(n=sort(table(unlist(strsplit(yp$pubType, "; "))), decreasing=TRUE))
#' subset(yp, grepl("retracted", pubType))
#' data.frame(n=sort(table(unlist(strsplit(yp$authorString, ", "))), decreasing=TRUE)[1:15])
"yp"
|
#' Search YouTube
#'
#' Search for videos, channels and playlists. (By default, the function searches for videos.)
#'
#' @param term Character. Search term; required; no default
#' @param max_results Maximum number of items that should be returned. Integer. Optional. Can be between 0 and 50. Default is 50.
#' Search results are constrained to a maximum of 500 videos if type is video and we have a value of \code{channel_id}.
#' @param channel_id Character. Only return search results from this channel; Optional.
#' @param channel_type Character. Optional. Takes one of two values: \code{'any', 'show'}. Default is \code{'any'}
#' @param event_type Character. Optional. Takes one of three values: \code{'completed', 'live', 'upcoming'}
#' @param location Character. Optional. Latitude and Longitude within parentheses, e.g. "(37.42307,-122.08427)"
#' @param location_radius Character. Optional. e.g. "1500m", "5km", "10000ft", "0.75mi"
#' @param published_after Character. Optional. RFC 339 Format. For instance, "1970-01-01T00:00:00Z"
#' @param published_before Character. Optional. RFC 339 Format. For instance, "1970-01-01T00:00:00Z"
#' @param type Character. Optional. Takes one of three values: \code{'video', 'channel', 'playlist'}. Default is \code{'video'}.
#' @param video_caption Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'closedCaption', 'none'}. Type must be set to video.
#' @param video_type Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'episode'} (return episode of shows), 'movie' (return movies)
#' @param video_syndicated Character. Optional. Takes one of two values: \code{'any'} (return all videos; Default), \code{'true'} (return only syndicated videos)
#' @param video_definition Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'high', 'standard'}
#' @param video_license Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'creativeCommon'} (return videos with Creative Commons
#' license), \code{'youtube'} (return videos with standard YouTube license).
#' @param simplify Boolean. Return a data.frame if \code{TRUE}. Default is \code{TRUE}. If \code{TRUE}, it returns a list that carries additional information.
#' @param page_token specific page in the result set that should be returned, optional
#' @param get_all get all results, iterating through all the results pages. Default is \code{TRUE}. Result is a \code{data.frame}. Optional.
#' @param \dots Additional arguments passed to \code{\link{tuber_GET}}.
#'
#' @return data.frame with 16 elements: \code{video_id, publishedAt, channelId, title, description, thumbnails.default.url, thumbnails.default.width,
#' thumbnails.default.height, thumbnails.medium.url, thumbnails.medium.width, thumbnails.medium.height, thumbnails.high.url, thumbnails.high.width,
#' thumbnails.high.height, channelTitle, liveBroadcastContent}
#' @export
#'
#' @references \url{https://developers.google.com/youtube/v3/docs/search/list}
#'
#' @examples
#' \dontrun{
#'
#' # Set API token via yt_oauth() first
#'
#' yt_search(term="Barack Obama")
#' yt_search(term="Barack Obama", published_after="2016-10-01T00:00:00Z")
#' yt_search(term="Barack Obama", published_before="2016-09-01T00:00:00Z")
#' yt_search(term="Barack Obama", published_before="2016-03-01T00:00:00Z",
#' published_after="2016-02-01T00:00:00Z")
#' yt_search(term="Barack Obama", published_before = "2016-02-10T00:00:00Z",
#' published_after="2016-01-01T00:00:00Z")
#' }
yt_search <- function (term=NULL, max_results = 50, channel_id= NULL, channel_type=NULL, type="video",
event_type=NULL, location= NULL, location_radius=NULL, published_after=NULL,
published_before=NULL, video_definition = "any", video_caption="any",
video_license="any", video_syndicated="any", video_type="any",
simplify = TRUE, get_all = TRUE, page_token = NULL, ...) {
if (!is.character(term)) stop("Must specify a search term.\n")
if (max_results < 0 | max_results > 50) stop("max_results only takes a value between 0 and 50.")
if (!(video_license %in% c("any", "creativeCommon", "youtube"))) stop("video_license can only take values: any, creativeCommon, or youtube.")
if (!(video_syndicated %in% c("any", "true"))) stop("video_syndicated can only take values: any or true.")
if (!(video_type %in% c("any", "episode", "movie"))) stop("video_type can only take values: any, episode, or movie.")
if (is.character(published_after)) if (is.na(as.POSIXct(published_after, format = "%Y-%m-%dT%H:%M:%SZ"))) stop("The date is not properly formatted in RFC 339 Format.")
if (is.character(published_before)) if (is.na(as.POSIXct(published_before, format = "%Y-%m-%dT%H:%M:%SZ"))) stop("The date is not properly formatted in RFC 339 Format.")
if (type!="video") video_caption = video_license = video_definition = video_type = video_syndicated= NULL
# For queries with spaces
format_term = paste0(unlist(strsplit(term, " ")), collapse="%20")
querylist <- list(part="snippet", q = format_term, maxResults = max_results, channelId=channel_id, type=type, channelType=channel_type, eventType= event_type,
location= location, publishedAfter=published_after, publishedBefore=published_before, videoDefinition = video_definition, videoCaption= video_caption,
videoType=video_type, videoSyndicated=video_syndicated, videoLicense= video_license, pageToken = page_token)
# Sending NULLs to Google seems to short its wiring
querylist <- querylist[names(querylist)[sapply(querylist, is.character)]]
res <- tuber_GET("search", querylist, ...)
if (identical(get_all, TRUE)) {
if (type=="video") {
simple_res <- lapply(res$items, function(x) c(video_id = x$id$videoId, unlist(x$snippet)))
} else {
simple_res <- lapply(res$items, function(x) unlist(x$snippet))
}
fin_res <- ldply(simple_res, rbind)
page_token <- res$nextPageToken
while ( is.character(page_token)) {
a_res <- yt_search(part="snippet", term = term, max_results = max_results, channel_id=channel_id, type=type, channel_type=channel_type, event_type= event_type,
location= location, published_after = published_after, published_before = published_before, video_definition = video_definition, video_caption= video_caption,
video_type=video_type, video_syndicated=video_syndicated, video_license = video_license, simplify = FALSE, get_all = FALSE, page_token = page_token)
if (type=="video") {
a_simple_res <- lapply(a_res$items, function(x) c(video_id = x$id$videoId, unlist(x$snippet)))
} else {
a_simple_res <- lapply(a_res$items, function(x) unlist(x$snippet))
}
a_resdf <- ldply(a_simple_res, rbind)
fin_res <- rbind(fin_res, a_resdf)
page_token <- a_res$nextPageToken
}
return(fin_res)
}
if (identical(simplify, TRUE)) {
if (res$pageInfo$totalResults != 0) {
simple_res <- lapply(res$items, function(x) unlist(x$snippet))
resdf <- ldply(simple_res, rbind)
return(resdf)
} else {
return(data.frame())
}
}
return(res)
}
| /R/yt_search.R | permissive | maelezo/tuber | R | false | false | 7,299 | r | #' Search YouTube
#'
#' Search for videos, channels and playlists. (By default, the function searches for videos.)
#'
#' @param term Character. Search term; required; no default
#' @param max_results Maximum number of items that should be returned. Integer. Optional. Can be between 0 and 50. Default is 50.
#' Search results are constrained to a maximum of 500 videos if type is video and we have a value of \code{channel_id}.
#' @param channel_id Character. Only return search results from this channel; Optional.
#' @param channel_type Character. Optional. Takes one of two values: \code{'any', 'show'}. Default is \code{'any'}
#' @param event_type Character. Optional. Takes one of three values: \code{'completed', 'live', 'upcoming'}
#' @param location Character. Optional. Latitude and Longitude within parentheses, e.g. "(37.42307,-122.08427)"
#' @param location_radius Character. Optional. e.g. "1500m", "5km", "10000ft", "0.75mi"
#' @param published_after Character. Optional. RFC 339 Format. For instance, "1970-01-01T00:00:00Z"
#' @param published_before Character. Optional. RFC 339 Format. For instance, "1970-01-01T00:00:00Z"
#' @param type Character. Optional. Takes one of three values: \code{'video', 'channel', 'playlist'}. Default is \code{'video'}.
#' @param video_caption Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'closedCaption', 'none'}. Type must be set to video.
#' @param video_type Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'episode'} (return episode of shows), 'movie' (return movies)
#' @param video_syndicated Character. Optional. Takes one of two values: \code{'any'} (return all videos; Default), \code{'true'} (return only syndicated videos)
#' @param video_definition Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'high', 'standard'}
#' @param video_license Character. Optional. Takes one of three values: \code{'any'} (return all videos; Default), \code{'creativeCommon'} (return videos with Creative Commons
#' license), \code{'youtube'} (return videos with standard YouTube license).
#' @param simplify Boolean. Return a data.frame if \code{TRUE}. Default is \code{TRUE}. If \code{TRUE}, it returns a list that carries additional information.
#' @param page_token specific page in the result set that should be returned, optional
#' @param get_all get all results, iterating through all the results pages. Default is \code{TRUE}. Result is a \code{data.frame}. Optional.
#' @param \dots Additional arguments passed to \code{\link{tuber_GET}}.
#'
#' @return data.frame with 16 elements: \code{video_id, publishedAt, channelId, title, description, thumbnails.default.url, thumbnails.default.width,
#' thumbnails.default.height, thumbnails.medium.url, thumbnails.medium.width, thumbnails.medium.height, thumbnails.high.url, thumbnails.high.width,
#' thumbnails.high.height, channelTitle, liveBroadcastContent}
#' @export
#'
#' @references \url{https://developers.google.com/youtube/v3/docs/search/list}
#'
#' @examples
#' \dontrun{
#'
#' # Set API token via yt_oauth() first
#'
#' yt_search(term="Barack Obama")
#' yt_search(term="Barack Obama", published_after="2016-10-01T00:00:00Z")
#' yt_search(term="Barack Obama", published_before="2016-09-01T00:00:00Z")
#' yt_search(term="Barack Obama", published_before="2016-03-01T00:00:00Z",
#' published_after="2016-02-01T00:00:00Z")
#' yt_search(term="Barack Obama", published_before = "2016-02-10T00:00:00Z",
#' published_after="2016-01-01T00:00:00Z")
#' }
yt_search <- function (term=NULL, max_results = 50, channel_id= NULL, channel_type=NULL, type="video",
event_type=NULL, location= NULL, location_radius=NULL, published_after=NULL,
published_before=NULL, video_definition = "any", video_caption="any",
video_license="any", video_syndicated="any", video_type="any",
simplify = TRUE, get_all = TRUE, page_token = NULL, ...) {
if (!is.character(term)) stop("Must specify a search term.\n")
if (max_results < 0 | max_results > 50) stop("max_results only takes a value between 0 and 50.")
if (!(video_license %in% c("any", "creativeCommon", "youtube"))) stop("video_license can only take values: any, creativeCommon, or youtube.")
if (!(video_syndicated %in% c("any", "true"))) stop("video_syndicated can only take values: any or true.")
if (!(video_type %in% c("any", "episode", "movie"))) stop("video_type can only take values: any, episode, or movie.")
if (is.character(published_after)) if (is.na(as.POSIXct(published_after, format = "%Y-%m-%dT%H:%M:%SZ"))) stop("The date is not properly formatted in RFC 339 Format.")
if (is.character(published_before)) if (is.na(as.POSIXct(published_before, format = "%Y-%m-%dT%H:%M:%SZ"))) stop("The date is not properly formatted in RFC 339 Format.")
if (type!="video") video_caption = video_license = video_definition = video_type = video_syndicated= NULL
# For queries with spaces
format_term = paste0(unlist(strsplit(term, " ")), collapse="%20")
querylist <- list(part="snippet", q = format_term, maxResults = max_results, channelId=channel_id, type=type, channelType=channel_type, eventType= event_type,
location= location, publishedAfter=published_after, publishedBefore=published_before, videoDefinition = video_definition, videoCaption= video_caption,
videoType=video_type, videoSyndicated=video_syndicated, videoLicense= video_license, pageToken = page_token)
# Sending NULLs to Google seems to short its wiring
querylist <- querylist[names(querylist)[sapply(querylist, is.character)]]
res <- tuber_GET("search", querylist, ...)
if (identical(get_all, TRUE)) {
if (type=="video") {
simple_res <- lapply(res$items, function(x) c(video_id = x$id$videoId, unlist(x$snippet)))
} else {
simple_res <- lapply(res$items, function(x) unlist(x$snippet))
}
fin_res <- ldply(simple_res, rbind)
page_token <- res$nextPageToken
while ( is.character(page_token)) {
a_res <- yt_search(part="snippet", term = term, max_results = max_results, channel_id=channel_id, type=type, channel_type=channel_type, event_type= event_type,
location= location, published_after = published_after, published_before = published_before, video_definition = video_definition, video_caption= video_caption,
video_type=video_type, video_syndicated=video_syndicated, video_license = video_license, simplify = FALSE, get_all = FALSE, page_token = page_token)
if (type=="video") {
a_simple_res <- lapply(a_res$items, function(x) c(video_id = x$id$videoId, unlist(x$snippet)))
} else {
a_simple_res <- lapply(a_res$items, function(x) unlist(x$snippet))
}
a_resdf <- ldply(a_simple_res, rbind)
fin_res <- rbind(fin_res, a_resdf)
page_token <- a_res$nextPageToken
}
return(fin_res)
}
if (identical(simplify, TRUE)) {
if (res$pageInfo$totalResults != 0) {
simple_res <- lapply(res$items, function(x) unlist(x$snippet))
resdf <- ldply(simple_res, rbind)
return(resdf)
} else {
return(data.frame())
}
}
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.init.R
\name{fixed<-}
\alias{fixed<-}
\title{Set value of fixed attribute}
\usage{
fixed(x) <- value
}
\arguments{
\item{x}{object}
\item{value}{value to set}
}
\description{
Sets value of fixed attribute.
}
\keyword{internal}
| /man/fixed-set.Rd | no_license | bergsmat/partab | R | false | true | 310 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.init.R
\name{fixed<-}
\alias{fixed<-}
\title{Set value of fixed attribute}
\usage{
fixed(x) <- value
}
\arguments{
\item{x}{object}
\item{value}{value to set}
}
\description{
Sets value of fixed attribute.
}
\keyword{internal}
|
## ***Made available using the The MIT License (MIT)***
# Copyright (c) 2012, Adam Cooper
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
## ************ end licence ***************
## ----------------------------------------
## This code builds a Google Charts "bubble chart" to track (stemmed) terms over several years
## shows: term freq, document freq, positive and negative sentiment, "subjectivity" (pos or neg sentiment)
## ----------------------------------------
library("tm")
library("Snowball")
library("slam")
library("brew")
library("RSQLite")
#library("rjson")
##
## NB {data_set_name}/HV_Init.R should be run first to establish the run parameters
##
#source(paste(base.dir,"commonFunctions.R",sep="/"))
# Filepaths
base.dir<-"/home/arc1/R Projects/Text Mining Weak Signals"
source.dir<-paste(base.dir,"Source Data",sep="/")
output.dir<-paste("/home/arc1/R Projects/Text Mining Weak Signals Output/History Visualiser",set.name,sep="/")
brew.dir<-paste(base.dir,"History Visualiser",sep="/")
web.page.base<-paste("http://arc12.github.com/Text-Mining-Weak-Signals-Output/History Visualiser",set.name, sep="/")
dir.create(output.dir, showWarnings=FALSE)
setwd(output.dir)
if(source.type=="a"){
file.postfix<-"conf"
}else{
file.postfix<-"blog"
}
## SET UP DATABASE
sqlite.filename<-"TMWS Data A.sqlite"
use.sqlite<-!is.na(sqlite.filename)
if(use.sqlite){
# instantiate the SQLite driver in the R process
sqlite<- dbDriver("SQLite")
# open sqlite connection. db is a "connection"
db<- dbConnect(sqlite, dbname=paste(source.dir,sqlite.filename,sep="/"))
#summary(db)
}
##
## Choose some run parameters depending on the source type
## start and end dates are largely plucked from database limits
##
today<-as.POSIXlt(Sys.Date(), tz = "GMT")
if(source.type=="a"){
slice.size<-12 #how many months in a time slice used in the analysis.
interpolate.size<-3 #number of months between interpolated points in the output; 1 "row" is created for each interval. No interpolation if slice.size = interpolate.size
minmax.years<-dbGetQuery(db,"SELECT min(year) min, max(year) max from abstract")[1,]
start.year<-max(2006, as.numeric(minmax.years$min))
start.month<-1 #default = 1
end.year<-as.numeric(minmax.years$max)
end.month<-1 #default = 1. NB this determines the start of the last slice
}else if(source.type=="b"){
slice.size<-2 #how many months in a time slice used in the analysis.
interpolate.size<-1 #number of months between interpolated points in the output; 1 "row" is created for each interval. No interpolation if slice.size = interpolate.size
minmax.dates<-dbGetQuery(db,"SELECT min(datestamp) min, max(datestamp) max from blog_post")[1,]
min.date<-as.POSIXlt(minmax.dates$min, tz="GMT")
max.date<-as.POSIXlt(minmax.dates$max, tz="GMT")
start.year<- min.date$year+1900
if(!is.na(override.start.year)){start.year<-override.start.year}
start.month<-min.date$mon+1#generally set to 1
#since we want the end.date to actually be the START of the last slice and this must be a whole number of "slice.size" slices, there is some fiddling to do
m.diff<-12*(max.date$year+1900-start.year)+max.date$mon-min.date$mon-1
if(max.date$mday<28){m.diff<-m.diff-1}#remove months that are not [almost] complete
end.date<-as.POSIXlt(paste(start.year,start.month,"1",sep="-"), tz = "GMT")
end.date$mon<- end.date$mon+floor(m.diff/slice.size)*slice.size
end.date<-as.POSIXlt(as.character(end.date))#otherwise $mon and $year are not changed right
end.year<-end.date$year+1900
end.month<-end.date$mon+1
}else{
stop(paste("Unknown source type",source.type))
}
#create a mapping from datatable column names to PlainTextDocument attribute names
#"Keywords" and after are user-defined "localmetadata" properties while the rest are standard tm package document metadata fields
if(source.type=="a"){
map<-list(Content="treated", Heading="title", Author="authors", DateTimeStamp="datestamp", Origin="origin", URL="url", DBLP_URL="dblp_url", Positive="pos_score", Negative="neg_score", Subjectivity="subj_score")
}else{
map<-list(Content="treated", Heading="title", Author="authors", DateTimeStamp="datestamp", Origin="origin",URL="url", Positive="pos_score", Negative="neg_score", Subjectivity="subj_score")
}
## CONVENIENT TO USE FUNCTION FOR BREWING
doBrew<-function(page.name, isGroup=FALSE){
#Create the HTML/JS for the Google Chart using a Brew Template
html.filename<-paste(page.name," ",file.postfix,".html",sep="")
web.page.url<-paste(web.page.base,html.filename,sep="/")
gadget.filename<-paste(page.name,file.postfix,"gadget.xml",sep=" ")
gadget.url.encoded<-URLencode(paste(web.page.base,gadget.filename,sep="/"), reserved=TRUE)
isGadget=FALSE
brew(file=paste(brew.dir,"HV Brew Template.html",sep="/"),
output=html.filename,run=TRUE)
isGadget=TRUE
brew(file=paste(brew.dir,"HV Brew Template.html",sep="/"),
output=gadget.filename,run=TRUE)
}
# adapted from confidence.band (Derek Young and David Hunter)
# based on Ellipses, by J. Fox and G. Monette, from
# car package
confidence.band = function(model, levels=0.95, segments=50, col.points=palette()[1],
col.line=palette()[1], col.bands=palette()[2],
lty.line=1, lty.bands=2, ...) {
if (attr(model$terms,"intercept")!=1 || length(model$coef) !=2) {
stop(paste("condifence.bands only works for simple linear regression\n",
"with one predictor and an intercept"))
}
# plot(model$model[,2:1], col=col.points, ...)
# abline(model, col=col.line, lty=lty.line, lwd=2)
angles=(0:segments)*pi/segments
halfcircle = cbind(cos(angles), sin(angles))
chol.shape = chol(vcov(model))
slopes = (halfcircle %*% chol.shape)[,2]
angles = angles+angles[which.max(slopes)]
halfcircle = cbind(cos(angles), sin(angles))
center = model$coef
radius = sqrt(2*qf(levels, 2, df.residual(model)))
for (r in radius) {
for (i in 1:2) {
halfcircle = -halfcircle
ellipse = sweep(r*(halfcircle %*% chol.shape), 2, center, "+")
int = ellipse[,1]
slope = ellipse[,2]
x = -diff(int)/diff(slope)
y = int[-1]+slope[-1]*x
lines(x, y, lwd=2, lty=lty.bands, col=col.bands)
}
}
}
# convienience for plotting - incl linear fit and indication of confidence band if there is >= an approx fit
do.plot<-function(outFile, xdates,xvals,yvals,main.txt,sub.txt,y.txt, hiPts1=NULL, hiPts2=NULL){
#set any leading zeros to NA in order to omit them from plot and linear model so that we fit the line after the term is first seen
first.non.zero<-match(T,yvals>0)
if(first.non.zero>1){
yvals[1:(first.non.zero-1)]<-NA
}
png(file=outFile, width=1000, height=1000,pointsize=12, res=150)
plot(xdates, yvals, main=main.txt, sub=sub.txt, xlab="", ylab=y.txt, type="b")
if(sum(yvals>0, na.rm=T)>2){
model<-lm(yvals ~ xvals, na.action=na.omit)# +I(xvals^2))
r.squared<-summary(model)$r.squared
if(r.squared>0.3){
yy <- model$coef %*% rbind(1,xvals)#,xvals^2)
confidence.band(model)
lines(xvals,yy,lwd=2,col=3)
}
}
#two levels of highlighting - intended for positive sentiment.
#the arguments hiPts are indexes into the x and y vectors
if(length(hiPts1)>0){
points(xdates[hiPts1],yvals[hiPts1], col="red")#open red circles
}
if(length(hiPts2)>0){
points(xdates[hiPts2],yvals[hiPts2],pch=19, col="red")#filled red circles
}
ad<-dev.off()
}
##
## Compute the time-slice operation parameters from the "init" values
##
num.slices<-(end.month-start.month+12*(end.year-start.year))/slice.size+1
if(slice.size==interpolate.size){
num.interpolate <- num.slices
}else{
num.interpolate<-slice.size*(num.slices-1)/interpolate.size+1
}
init.date<-as.POSIXlt(paste(start.year,start.month,"1",sep="-"), tz = "GMT")
#slice dates define the filtering of documents.
slice.start.dates<-as.POSIXlt(seq.POSIXt(init.date, by=paste(slice.size,"months"), length.out=num.slices))
if((run.mode=="motion")){
#interpolate dates define the plotting, whether or not an interpolation has actually occurred
# hence they are offset to the centre of slices as well as (usually) interspersing the slice periods
interpolate.start.dates<-as.POSIXlt(seq.POSIXt(init.date, by=paste(interpolate.size,"months"), length.out=num.interpolate))
interpolate.start.dates$mon<-interpolate.start.dates$mon+(slice.size/2)
}
##
## Main work done now
##
if(do.groups){
# a bunch of matrices to accumulate the grouped results while the following nest of 2 loops operates
gmat.template<-matrix(ncol=length(word.lists),nrow=num.slices)
gmat.or.slices.freq<-gmat.template
gmat.or.slices.docs<-gmat.template
gmat.or.slices.positive<-gmat.template
gmat.or.slices.negative<-gmat.template
gmat.or.slices.subjectivity<-gmat.template
}
# to keep the number of docs per slice (shown in HTML output)
slice.docs.cnt<-rep(0,num.slices)
titles<-gsub("\\."," ",names(word.lists))#lazy way to titles is to replace "." in the list element names - override if necessary
file.names<-names(word.lists[])
##
## Start to get results.
## The outer loop is over the 1..* lists of terms (term.lists in the HV_Init.R file). Separate viz for each
## The inner loop is over the years
##
for (i.run in 1:length(word.lists)){
run.title<-titles[i.run]#For presentation
run.name<-file.names[i.run]#for output file
run.words<-unlist(word.lists[i.run])
run.terms<-stemDocument(tolower(removePunctuation(run.words)))
#eliminate any words that reduce to the same stem
use<-match(unique(run.terms),run.terms)
if(length(use)<length(run.terms)){
run.words<-run.words[use]
run.terms<-run.terms[use]
print(paste("Eliminating words with same stem. Now using:",paste(run.words, collapse=",")))
}
#suppress groups if just 1 term
do.groups.run<-do.groups && (length(run.terms)>1)
##logging
while(sink.number()>0)
{sink()}
sink(file=paste(run.name," ",file.postfix,".log",sep=""), append=FALSE, type="output", split=TRUE)
#prepare 0-containing vector to receive the results. NB some of the terms may not appear in some/all slices
results.template<-rep(0,length(run.terms))
names(results.template)<-run.terms
#prep dataframes to receive the slice data
data.slices.freq<-data.frame()
data.slices.docs<-data.frame()
data.slices.positive<-data.frame()
data.slices.negative<-data.frame()
data.slices.subjectivity<-data.frame()
docs.used<-0
print(paste("Run:",run.name, " Terms:", paste(run.terms, collapse=", ")))
#Loop over the time slices, querying the database for each slice and for the user-designated search terms
for(slice in 1:num.slices){
#date range for this row
start.date<-slice.start.dates[slice]
end.date<-start.date
end.date$mon<-end.date$mon+slice.size
end.date<-as.POSIXlt(as.character(end.date))#otherwise $mon and $year are not changed right on year-crossing slices
print(paste("Period: ",start.date,"<= T <",end.date))
# get the indeces of the documents that come from the date range
#the peculiar "mday-1" is needed to trick the filter since the source data has a year number only
# q.start.date<-start.date
# q.start.date$mday<-q.start.date$mday-1
# q.end.date<-end.date
# q.end.date$mday<-q.end.date$mday-1
## SQL creation.
# NB: these fetch the "treated" text - i.e stopword removal and stemming etc already done
if(source.type=="a"){
sqlDateClause<-paste("year >= '",start.date$year+1900,"' AND year<'",end.date$year+1900,"'",sep="")
sqlIdClause<-paste("id in (SELECT docid from abstract_fts4 WHERE treated MATCH '",
paste(run.terms, collapse=" OR "),"')",sep="")
sql<-paste("SELECT origin, year||'-07-01' datestamp, pages, title, authors, treated, url, dblp_url, pos_score, neg_score, subj_score FROM abstract WHERE",sqlDateClause,"AND",sqlIdClause,sep=" ")
sqlSums<-paste("SELECT SUM(treated_words) terms, COUNT(1) docs FROM abstract WHERE",sqlDateClause)
}else if(source.type=="b"){
sqlDateClause<-paste("datestamp >= '",as.character(start.date),"' AND datestamp<'",as.character(end.date),"'",sep="")
sqlIdClause<-paste("id in (SELECT docid from blog_post_fts4 WHERE treated MATCH '",
paste(run.terms, collapse=" OR "),"')",sep="")
sql<-paste("SELECT origin, datestamp, title, authors, treated, url, pos_score, neg_score, subj_score FROM blog_post WHERE",sqlDateClause,"AND",sqlIdClause,sep=" ")
sqlSums<-paste("SELECT SUM(treated_words) terms, COUNT(1) docs FROM blog_post WHERE",sqlDateClause)
}
#
table<-NULL
if(use.sqlite){
#query, fetch all records to dataframe and clear resultset in one go
table<-dbGetQuery(db,sql)
#also extract the post-stopword (and other pre-processing) word count so that we can calculate frequencies
slice.totals<-dbGetQuery(db, sqlSums)
}
#store for Brew. This is repeated (pointlessly) for each i.run
slice.docs.cnt[slice]<-slice.totals$docs
print(paste(slice.totals$docs,"documents in period"))
docs.used<-docs.used+slice.totals$docs
#only need to build a corpus and DTM if there are >0 documents in the slice with at least 1 run.term appearing. Otherwise just need to fill in zeros
if(length(table[,1])>0){
# now read in a corpus, handling the metadata via mappings previously declared
corp<-Corpus(DataframeSource(table), readerControl=list(reader= readTabular(mapping=map)))
#and build the DTM for the slice
#NB1: no stopwords, stemming since this is already done in DB prep
#NB2: use the run.terms as a dictionary so the DTM has minimal terms
dtm.tf.slice<-DocumentTermMatrix(corp, control=list(stemming=FALSE, removePunctuation=FALSE, removeNumbers=FALSE, stopwords=FALSE, dictionary=run.terms))
dtm.bin.slice<-weightBin(dtm.tf.slice)
#compute some corpus and term statistics FOR INFORMATION
print("Slice Document Term Matrix, Term-Frequency")
print(dtm.tf.slice)
dtm.tf.sums<-col_sums(dtm.tf.slice)
#calculate term frequency (%) and document count ** for the terms aposite to the current run
#NB1: document count is normalised to **per month** for blogs and **per year** for conference abstracts
##make sure the term order is as in run.terms for consistency across all parts
reorder<-match(run.terms,Terms(dtm.tf.slice))
slice.freq<-100*col_sums(dtm.tf.slice)[reorder]/slice.totals$terms
print("Frequences (%):")
print(slice.freq)
if(source.type=="b"){
res<-col_sums(dtm.bin.slice)/slice.size
}else if(source.type=="a"){
res<-col_sums(dtm.bin.slice)*12/slice.size
}
slice.docs<-res[reorder]
print("Document count:")
print(slice.docs)
#calculate the "subjectivity" for each term according to the subjectivity of containing documents
#and simultaneiously fill out rows of the data to be visualised
# pull out the sentiment data
pos_score<-table[,"pos_score"]
neg_score<-table[,"neg_score"]
subj_score<-table[,"subj_score"]
sent.positive = vector()
sent.negative = vector()
subjectivity = vector()
for (nt in run.terms){
sent.positive[nt]<-mean(pos_score[as.numeric(Docs(dtm.tf.slice[as.matrix(dtm.tf.slice[,nt])>0,nt]))])
sent.negative[nt]<-mean(neg_score[as.numeric(Docs(dtm.tf.slice[as.matrix(dtm.tf.slice[,nt])>0,nt]))])
subjectivity[nt]<-mean(subj_score[as.numeric(Docs(dtm.tf.slice[as.matrix(dtm.tf.slice[,nt])>0,nt]))])
}
#get NaNs when terms not present since the Docs() selector above is then empty, which are a nuisance!
sent.positive[is.nan(sent.positive)]<-0.0
sent.negative[is.nan(sent.negative)]<-0.0
subjectivity[is.nan(subjectivity)]<-0.0
print("Subjectivity:")
print(subjectivity)
if(do.groups.run){
#the groups require special treatment
gmat.or.slices.freq[slice,i.run]<-sum(slice.freq)
gmat.or.slices.docs[slice,i.run]<-sum(slice.docs)
or.docs<-as.numeric(Docs(dtm.tf.slice)[row_sums(dtm.tf.slice)>0])
gmat.or.slices.positive[slice,i.run]<-mean(pos_score[or.docs])
gmat.or.slices.negative[slice,i.run]<-mean(neg_score[or.docs])
gmat.or.slices.subjectivity[slice,i.run]<-mean(subj_score[or.docs])
# next lines obsolete
# gmat.or.slices.positive[is.nan(gmat.or.slices.positive)]<-0.0
# gmat.or.slices.negative[is.nan(gmat.or.slices.negative)]<-0.0
# gmat.or.slices.subjectivity[is.nan(gmat.or.slices.subjectivity)]<-0.0
}
}else{
#no documents in slice contain terms
print("No documents in slice")
slice.freq<-results.template
slice.docs<-results.template
sent.positive<-results.template
sent.negative<-results.template
subjectivity<-results.template
if(do.groups.run){
#the groups require special treatment
gmat.or.slices.freq[slice,i.run]<-0.0
gmat.or.slices.docs[slice,i.run]<-0.0
gmat.or.slices.positive[slice,i.run]<-0.0
gmat.or.slices.negative[slice,i.run]<-0.0
gmat.or.slices.subjectivity[slice,i.run]<-0.0
}
}
#accumulate the current slice with the previous
data.slices.freq<-rbind(data.slices.freq,slice.freq)
data.slices.docs<-rbind(data.slices.docs,slice.docs)
data.slices.positive<-rbind(data.slices.positive,sent.positive)
data.slices.negative<-rbind(data.slices.negative,sent.negative)
data.slices.subjectivity<-rbind(data.slices.subjectivity,subjectivity)
}
# assemble the data into a convenient array for processing in a Brew template, interpolating if necessary
#row.count<-num.interpolate*length(run.terms)
#data.rows<-array(0,c(row.count,7))#this is destined for JSON in the Brew template
data.rows<-data.frame()
if((run.mode=="motion")){
#interpolate
if(slice.size > interpolate.size){
data.slices.freq<-sapply(data.slices.freq,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.docs<-sapply(data.slices.docs,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.positive<-sapply(data.slices.positive,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.negative<-sapply(data.slices.negative,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.subjectivity<-sapply(data.slices.subjectivity,
function(x) spline(x, method="natural", n=num.interpolate)$y)
# cancel out negative values, make 0.0 be a "hard floor"
data.slices.freq[data.slices.freq<0.0]<-0.0
data.slices.docs[data.slices.docs<0.0]<-0.0
data.slices.positive[data.slices.positive<0.0]<-0.0
data.slices.negative[data.slices.negative<0.0]<-0.0
data.slices.subjectivity[data.slices.subjectivity<0.0]<-0.0
}
#loop over terms to build a "denormalised" form of the data for the google chart code
for(t in 1:length(run.terms)){
data.rows<-rbind(data.rows, data.frame(rep(run.words[t],num.interpolate),
as.character(interpolate.start.dates),
data.slices.freq[,t],
data.slices.docs[,t],
data.slices.subjectivity[,t],
data.slices.positive[,t],
data.slices.negative[,t]))
}
doBrew(run.name)
}
if((run.mode=="simple")){
#loop over the terms in the run, plotting a chart according to the source data type
#there is a little frigging to add a plot for all words in the set "OR" grouped
#by adding the sum onto the dataframe as the last column, i.e. a pseudo-word
run.length<-length(run.terms)
words<-run.words
if(do.groups.run){
run.length<-length(run.terms)+1
words<-c(words,paste("Words about",run.title))
data.slices.freq<-cbind(data.slices.freq, gmat.or.slices.freq[,i.run])
data.slices.docs<-cbind(data.slices.docs, gmat.or.slices.docs[,i.run])
data.slices.positive<-cbind(data.slices.positive, gmat.or.slices.positive[,i.run])
}
sub.txt<-""
for(i.term in 1:run.length){
word<-words[i.term]
if(source.type=="a"){
xvals<-slice.start.dates$year+1900
xdates<-xvals
main.txt<-paste("\"",word,"\" in Conference Abstracts",sep="")
y.d.txt<-"Abstracts per Year"
}else{
xvals<-as.numeric(slice.start.dates)#needed in order to fit squared term
xdates<-slice.start.dates
main.txt<-paste("\"",word,"\" in Blog Posts",sep="")
y.d.txt<-"Posts per Month"
}
#tweaks to file name and titles depending whether or not this is the "group"
if(do.groups.run && (i.term == run.length)){
sub.txt<-paste("(shows any of ",paste(run.words,collapse=", "),")",sep="")
main.txt<-gsub("\"","",main.txt)#remove quotes to look better
outFile<-paste(run.name,"all",sep="-")
}else{
if(run.length==1){
outFile<-run.name
}else{
outFile<-paste(run.name,word,sep="-")
}
}
outFile<-paste(outFile, file.postfix)
y.f.txt<-"Word Frequency (%)"
#set highlighting for positive sentiment
hi1<-data.slices.positive[,i.term]>0.08
hi2<-data.slices.positive[,i.term]>0.1
#frequency
yvals<-data.slices.freq[,i.term]
do.plot(paste(outFile,"freq.png"),xdates,xvals,yvals,main.txt,sub.txt,y.f.txt, hi1, hi2)
#docs
yvals<-data.slices.docs[,i.term]
do.plot(paste(outFile,"docs.png"),xdates,xvals,yvals,main.txt,sub.txt,y.d.txt, hi1, hi2)
}
}
#stop logging
sink()
}
##
## Groups special treatment
if(do.groups && ((run.mode=="motion"))){
#interpolate
if(slice.size > interpolate.size){
gmat.or.slices.freq<-apply(gmat.or.slices.freq, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.docs<-apply(gmat.or.slices.docs, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.positive<-apply(gmat.or.slices.positive, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.negative<-apply(gmat.or.slices.negative, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.subjectivity<-apply(gmat.or.slices.subjectivity, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
}
data.rows<-data.frame()
#do.groups.pretty<-gsub("\\."," ",names(word.lists)) #prettify
for(g in 1:length(word.lists)){
data.rows<-rbind(data.rows, data.frame(rep(titles[g],num.interpolate),
as.character(interpolate.start.dates),
gmat.or.slices.freq[,g],
gmat.or.slices.docs[,g],
gmat.or.slices.subjectivity[,g],
gmat.or.slices.positive[,g],
gmat.or.slices.negative[,g]))
}
run.title<-"Groups of Terms"#used in brew template... messy coding :-(
doBrew("Groups", isGroup=TRUE)
}
# properly terminate database use
if(use.sqlite){
dbDisconnect(db)
}
| /History Visualiser/HistoryVis.R | permissive | emddarn/Text-Mining-Weak-Signals | R | false | false | 25,798 | r | ## ***Made available using the The MIT License (MIT)***
# Copyright (c) 2012, Adam Cooper
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
## ************ end licence ***************
## ----------------------------------------
## This code builds a Google Charts "bubble chart" to track (stemmed) terms over several years
## shows: term freq, document freq, positive and negative sentiment, "subjectivity" (pos or neg sentiment)
## ----------------------------------------
library("tm")
library("Snowball")
library("slam")
library("brew")
library("RSQLite")
#library("rjson")
##
## NB {data_set_name}/HV_Init.R should be run first to establish the run parameters
##
#source(paste(base.dir,"commonFunctions.R",sep="/"))
# Filepaths
base.dir<-"/home/arc1/R Projects/Text Mining Weak Signals"
source.dir<-paste(base.dir,"Source Data",sep="/")
output.dir<-paste("/home/arc1/R Projects/Text Mining Weak Signals Output/History Visualiser",set.name,sep="/")
brew.dir<-paste(base.dir,"History Visualiser",sep="/")
web.page.base<-paste("http://arc12.github.com/Text-Mining-Weak-Signals-Output/History Visualiser",set.name, sep="/")
dir.create(output.dir, showWarnings=FALSE)
setwd(output.dir)
if(source.type=="a"){
file.postfix<-"conf"
}else{
file.postfix<-"blog"
}
## SET UP DATABASE
sqlite.filename<-"TMWS Data A.sqlite"
use.sqlite<-!is.na(sqlite.filename)
if(use.sqlite){
# instantiate the SQLite driver in the R process
sqlite<- dbDriver("SQLite")
# open sqlite connection. db is a "connection"
db<- dbConnect(sqlite, dbname=paste(source.dir,sqlite.filename,sep="/"))
#summary(db)
}
##
## Choose some run parameters depending on the source type
## start and end dates are largely plucked from database limits
##
today<-as.POSIXlt(Sys.Date(), tz = "GMT")
if(source.type=="a"){
slice.size<-12 #how many months in a time slice used in the analysis.
interpolate.size<-3 #number of months between interpolated points in the output; 1 "row" is created for each interval. No interpolation if slice.size = interpolate.size
minmax.years<-dbGetQuery(db,"SELECT min(year) min, max(year) max from abstract")[1,]
start.year<-max(2006, as.numeric(minmax.years$min))
start.month<-1 #default = 1
end.year<-as.numeric(minmax.years$max)
end.month<-1 #default = 1. NB this determines the start of the last slice
}else if(source.type=="b"){
slice.size<-2 #how many months in a time slice used in the analysis.
interpolate.size<-1 #number of months between interpolated points in the output; 1 "row" is created for each interval. No interpolation if slice.size = interpolate.size
minmax.dates<-dbGetQuery(db,"SELECT min(datestamp) min, max(datestamp) max from blog_post")[1,]
min.date<-as.POSIXlt(minmax.dates$min, tz="GMT")
max.date<-as.POSIXlt(minmax.dates$max, tz="GMT")
start.year<- min.date$year+1900
if(!is.na(override.start.year)){start.year<-override.start.year}
start.month<-min.date$mon+1#generally set to 1
#since we want the end.date to actually be the START of the last slice and this must be a whole number of "slice.size" slices, there is some fiddling to do
m.diff<-12*(max.date$year+1900-start.year)+max.date$mon-min.date$mon-1
if(max.date$mday<28){m.diff<-m.diff-1}#remove months that are not [almost] complete
end.date<-as.POSIXlt(paste(start.year,start.month,"1",sep="-"), tz = "GMT")
end.date$mon<- end.date$mon+floor(m.diff/slice.size)*slice.size
end.date<-as.POSIXlt(as.character(end.date))#otherwise $mon and $year are not changed right
end.year<-end.date$year+1900
end.month<-end.date$mon+1
}else{
stop(paste("Unknown source type",source.type))
}
#create a mapping from datatable column names to PlainTextDocument attribute names
#"Keywords" and after are user-defined "localmetadata" properties while the rest are standard tm package document metadata fields
if(source.type=="a"){
map<-list(Content="treated", Heading="title", Author="authors", DateTimeStamp="datestamp", Origin="origin", URL="url", DBLP_URL="dblp_url", Positive="pos_score", Negative="neg_score", Subjectivity="subj_score")
}else{
map<-list(Content="treated", Heading="title", Author="authors", DateTimeStamp="datestamp", Origin="origin",URL="url", Positive="pos_score", Negative="neg_score", Subjectivity="subj_score")
}
## CONVENIENT TO USE FUNCTION FOR BREWING
doBrew<-function(page.name, isGroup=FALSE){
#Create the HTML/JS for the Google Chart using a Brew Template
html.filename<-paste(page.name," ",file.postfix,".html",sep="")
web.page.url<-paste(web.page.base,html.filename,sep="/")
gadget.filename<-paste(page.name,file.postfix,"gadget.xml",sep=" ")
gadget.url.encoded<-URLencode(paste(web.page.base,gadget.filename,sep="/"), reserved=TRUE)
isGadget=FALSE
brew(file=paste(brew.dir,"HV Brew Template.html",sep="/"),
output=html.filename,run=TRUE)
isGadget=TRUE
brew(file=paste(brew.dir,"HV Brew Template.html",sep="/"),
output=gadget.filename,run=TRUE)
}
# adapted from confidence.band (Derek Young and David Hunter)
# based on Ellipses, by J. Fox and G. Monette, from
# car package
confidence.band = function(model, levels=0.95, segments=50, col.points=palette()[1],
col.line=palette()[1], col.bands=palette()[2],
lty.line=1, lty.bands=2, ...) {
if (attr(model$terms,"intercept")!=1 || length(model$coef) !=2) {
stop(paste("condifence.bands only works for simple linear regression\n",
"with one predictor and an intercept"))
}
# plot(model$model[,2:1], col=col.points, ...)
# abline(model, col=col.line, lty=lty.line, lwd=2)
angles=(0:segments)*pi/segments
halfcircle = cbind(cos(angles), sin(angles))
chol.shape = chol(vcov(model))
slopes = (halfcircle %*% chol.shape)[,2]
angles = angles+angles[which.max(slopes)]
halfcircle = cbind(cos(angles), sin(angles))
center = model$coef
radius = sqrt(2*qf(levels, 2, df.residual(model)))
for (r in radius) {
for (i in 1:2) {
halfcircle = -halfcircle
ellipse = sweep(r*(halfcircle %*% chol.shape), 2, center, "+")
int = ellipse[,1]
slope = ellipse[,2]
x = -diff(int)/diff(slope)
y = int[-1]+slope[-1]*x
lines(x, y, lwd=2, lty=lty.bands, col=col.bands)
}
}
}
# convienience for plotting - incl linear fit and indication of confidence band if there is >= an approx fit
do.plot<-function(outFile, xdates,xvals,yvals,main.txt,sub.txt,y.txt, hiPts1=NULL, hiPts2=NULL){
#set any leading zeros to NA in order to omit them from plot and linear model so that we fit the line after the term is first seen
first.non.zero<-match(T,yvals>0)
if(first.non.zero>1){
yvals[1:(first.non.zero-1)]<-NA
}
png(file=outFile, width=1000, height=1000,pointsize=12, res=150)
plot(xdates, yvals, main=main.txt, sub=sub.txt, xlab="", ylab=y.txt, type="b")
if(sum(yvals>0, na.rm=T)>2){
model<-lm(yvals ~ xvals, na.action=na.omit)# +I(xvals^2))
r.squared<-summary(model)$r.squared
if(r.squared>0.3){
yy <- model$coef %*% rbind(1,xvals)#,xvals^2)
confidence.band(model)
lines(xvals,yy,lwd=2,col=3)
}
}
#two levels of highlighting - intended for positive sentiment.
#the arguments hiPts are indexes into the x and y vectors
if(length(hiPts1)>0){
points(xdates[hiPts1],yvals[hiPts1], col="red")#open red circles
}
if(length(hiPts2)>0){
points(xdates[hiPts2],yvals[hiPts2],pch=19, col="red")#filled red circles
}
ad<-dev.off()
}
##
## Compute the time-slice operation parameters from the "init" values
##
num.slices<-(end.month-start.month+12*(end.year-start.year))/slice.size+1
if(slice.size==interpolate.size){
num.interpolate <- num.slices
}else{
num.interpolate<-slice.size*(num.slices-1)/interpolate.size+1
}
init.date<-as.POSIXlt(paste(start.year,start.month,"1",sep="-"), tz = "GMT")
#slice dates define the filtering of documents.
slice.start.dates<-as.POSIXlt(seq.POSIXt(init.date, by=paste(slice.size,"months"), length.out=num.slices))
if((run.mode=="motion")){
#interpolate dates define the plotting, whether or not an interpolation has actually occurred
# hence they are offset to the centre of slices as well as (usually) interspersing the slice periods
interpolate.start.dates<-as.POSIXlt(seq.POSIXt(init.date, by=paste(interpolate.size,"months"), length.out=num.interpolate))
interpolate.start.dates$mon<-interpolate.start.dates$mon+(slice.size/2)
}
##
## Main work done now
##
if(do.groups){
# a bunch of matrices to accumulate the grouped results while the following nest of 2 loops operates
gmat.template<-matrix(ncol=length(word.lists),nrow=num.slices)
gmat.or.slices.freq<-gmat.template
gmat.or.slices.docs<-gmat.template
gmat.or.slices.positive<-gmat.template
gmat.or.slices.negative<-gmat.template
gmat.or.slices.subjectivity<-gmat.template
}
# to keep the number of docs per slice (shown in HTML output)
slice.docs.cnt<-rep(0,num.slices)
titles<-gsub("\\."," ",names(word.lists))#lazy way to titles is to replace "." in the list element names - override if necessary
file.names<-names(word.lists[])
##
## Start to get results.
## The outer loop is over the 1..* lists of terms (term.lists in the HV_Init.R file). Separate viz for each
## The inner loop is over the years
##
for (i.run in 1:length(word.lists)){
run.title<-titles[i.run]#For presentation
run.name<-file.names[i.run]#for output file
run.words<-unlist(word.lists[i.run])
run.terms<-stemDocument(tolower(removePunctuation(run.words)))
#eliminate any words that reduce to the same stem
use<-match(unique(run.terms),run.terms)
if(length(use)<length(run.terms)){
run.words<-run.words[use]
run.terms<-run.terms[use]
print(paste("Eliminating words with same stem. Now using:",paste(run.words, collapse=",")))
}
#suppress groups if just 1 term
do.groups.run<-do.groups && (length(run.terms)>1)
##logging
while(sink.number()>0)
{sink()}
sink(file=paste(run.name," ",file.postfix,".log",sep=""), append=FALSE, type="output", split=TRUE)
#prepare 0-containing vector to receive the results. NB some of the terms may not appear in some/all slices
results.template<-rep(0,length(run.terms))
names(results.template)<-run.terms
#prep dataframes to receive the slice data
data.slices.freq<-data.frame()
data.slices.docs<-data.frame()
data.slices.positive<-data.frame()
data.slices.negative<-data.frame()
data.slices.subjectivity<-data.frame()
docs.used<-0
print(paste("Run:",run.name, " Terms:", paste(run.terms, collapse=", ")))
#Loop over the time slices, querying the database for each slice and for the user-designated search terms
for(slice in 1:num.slices){
#date range for this row
start.date<-slice.start.dates[slice]
end.date<-start.date
end.date$mon<-end.date$mon+slice.size
end.date<-as.POSIXlt(as.character(end.date))#otherwise $mon and $year are not changed right on year-crossing slices
print(paste("Period: ",start.date,"<= T <",end.date))
# get the indeces of the documents that come from the date range
#the peculiar "mday-1" is needed to trick the filter since the source data has a year number only
# q.start.date<-start.date
# q.start.date$mday<-q.start.date$mday-1
# q.end.date<-end.date
# q.end.date$mday<-q.end.date$mday-1
## SQL creation.
# NB: these fetch the "treated" text - i.e stopword removal and stemming etc already done
if(source.type=="a"){
sqlDateClause<-paste("year >= '",start.date$year+1900,"' AND year<'",end.date$year+1900,"'",sep="")
sqlIdClause<-paste("id in (SELECT docid from abstract_fts4 WHERE treated MATCH '",
paste(run.terms, collapse=" OR "),"')",sep="")
sql<-paste("SELECT origin, year||'-07-01' datestamp, pages, title, authors, treated, url, dblp_url, pos_score, neg_score, subj_score FROM abstract WHERE",sqlDateClause,"AND",sqlIdClause,sep=" ")
sqlSums<-paste("SELECT SUM(treated_words) terms, COUNT(1) docs FROM abstract WHERE",sqlDateClause)
}else if(source.type=="b"){
sqlDateClause<-paste("datestamp >= '",as.character(start.date),"' AND datestamp<'",as.character(end.date),"'",sep="")
sqlIdClause<-paste("id in (SELECT docid from blog_post_fts4 WHERE treated MATCH '",
paste(run.terms, collapse=" OR "),"')",sep="")
sql<-paste("SELECT origin, datestamp, title, authors, treated, url, pos_score, neg_score, subj_score FROM blog_post WHERE",sqlDateClause,"AND",sqlIdClause,sep=" ")
sqlSums<-paste("SELECT SUM(treated_words) terms, COUNT(1) docs FROM blog_post WHERE",sqlDateClause)
}
#
table<-NULL
if(use.sqlite){
#query, fetch all records to dataframe and clear resultset in one go
table<-dbGetQuery(db,sql)
#also extract the post-stopword (and other pre-processing) word count so that we can calculate frequencies
slice.totals<-dbGetQuery(db, sqlSums)
}
#store for Brew. This is repeated (pointlessly) for each i.run
slice.docs.cnt[slice]<-slice.totals$docs
print(paste(slice.totals$docs,"documents in period"))
docs.used<-docs.used+slice.totals$docs
#only need to build a corpus and DTM if there are >0 documents in the slice with at least 1 run.term appearing. Otherwise just need to fill in zeros
if(length(table[,1])>0){
# now read in a corpus, handling the metadata via mappings previously declared
corp<-Corpus(DataframeSource(table), readerControl=list(reader= readTabular(mapping=map)))
#and build the DTM for the slice
#NB1: no stopwords, stemming since this is already done in DB prep
#NB2: use the run.terms as a dictionary so the DTM has minimal terms
dtm.tf.slice<-DocumentTermMatrix(corp, control=list(stemming=FALSE, removePunctuation=FALSE, removeNumbers=FALSE, stopwords=FALSE, dictionary=run.terms))
dtm.bin.slice<-weightBin(dtm.tf.slice)
#compute some corpus and term statistics FOR INFORMATION
print("Slice Document Term Matrix, Term-Frequency")
print(dtm.tf.slice)
dtm.tf.sums<-col_sums(dtm.tf.slice)
#calculate term frequency (%) and document count ** for the terms aposite to the current run
#NB1: document count is normalised to **per month** for blogs and **per year** for conference abstracts
##make sure the term order is as in run.terms for consistency across all parts
reorder<-match(run.terms,Terms(dtm.tf.slice))
slice.freq<-100*col_sums(dtm.tf.slice)[reorder]/slice.totals$terms
print("Frequences (%):")
print(slice.freq)
if(source.type=="b"){
res<-col_sums(dtm.bin.slice)/slice.size
}else if(source.type=="a"){
res<-col_sums(dtm.bin.slice)*12/slice.size
}
slice.docs<-res[reorder]
print("Document count:")
print(slice.docs)
#calculate the "subjectivity" for each term according to the subjectivity of containing documents
#and simultaneiously fill out rows of the data to be visualised
# pull out the sentiment data
pos_score<-table[,"pos_score"]
neg_score<-table[,"neg_score"]
subj_score<-table[,"subj_score"]
sent.positive = vector()
sent.negative = vector()
subjectivity = vector()
for (nt in run.terms){
sent.positive[nt]<-mean(pos_score[as.numeric(Docs(dtm.tf.slice[as.matrix(dtm.tf.slice[,nt])>0,nt]))])
sent.negative[nt]<-mean(neg_score[as.numeric(Docs(dtm.tf.slice[as.matrix(dtm.tf.slice[,nt])>0,nt]))])
subjectivity[nt]<-mean(subj_score[as.numeric(Docs(dtm.tf.slice[as.matrix(dtm.tf.slice[,nt])>0,nt]))])
}
#get NaNs when terms not present since the Docs() selector above is then empty, which are a nuisance!
sent.positive[is.nan(sent.positive)]<-0.0
sent.negative[is.nan(sent.negative)]<-0.0
subjectivity[is.nan(subjectivity)]<-0.0
print("Subjectivity:")
print(subjectivity)
if(do.groups.run){
#the groups require special treatment
gmat.or.slices.freq[slice,i.run]<-sum(slice.freq)
gmat.or.slices.docs[slice,i.run]<-sum(slice.docs)
or.docs<-as.numeric(Docs(dtm.tf.slice)[row_sums(dtm.tf.slice)>0])
gmat.or.slices.positive[slice,i.run]<-mean(pos_score[or.docs])
gmat.or.slices.negative[slice,i.run]<-mean(neg_score[or.docs])
gmat.or.slices.subjectivity[slice,i.run]<-mean(subj_score[or.docs])
# next lines obsolete
# gmat.or.slices.positive[is.nan(gmat.or.slices.positive)]<-0.0
# gmat.or.slices.negative[is.nan(gmat.or.slices.negative)]<-0.0
# gmat.or.slices.subjectivity[is.nan(gmat.or.slices.subjectivity)]<-0.0
}
}else{
#no documents in slice contain terms
print("No documents in slice")
slice.freq<-results.template
slice.docs<-results.template
sent.positive<-results.template
sent.negative<-results.template
subjectivity<-results.template
if(do.groups.run){
#the groups require special treatment
gmat.or.slices.freq[slice,i.run]<-0.0
gmat.or.slices.docs[slice,i.run]<-0.0
gmat.or.slices.positive[slice,i.run]<-0.0
gmat.or.slices.negative[slice,i.run]<-0.0
gmat.or.slices.subjectivity[slice,i.run]<-0.0
}
}
#accumulate the current slice with the previous
data.slices.freq<-rbind(data.slices.freq,slice.freq)
data.slices.docs<-rbind(data.slices.docs,slice.docs)
data.slices.positive<-rbind(data.slices.positive,sent.positive)
data.slices.negative<-rbind(data.slices.negative,sent.negative)
data.slices.subjectivity<-rbind(data.slices.subjectivity,subjectivity)
}
# assemble the data into a convenient array for processing in a Brew template, interpolating if necessary
#row.count<-num.interpolate*length(run.terms)
#data.rows<-array(0,c(row.count,7))#this is destined for JSON in the Brew template
data.rows<-data.frame()
if((run.mode=="motion")){
#interpolate
if(slice.size > interpolate.size){
data.slices.freq<-sapply(data.slices.freq,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.docs<-sapply(data.slices.docs,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.positive<-sapply(data.slices.positive,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.negative<-sapply(data.slices.negative,
function(x) spline(x, method="natural", n=num.interpolate)$y)
data.slices.subjectivity<-sapply(data.slices.subjectivity,
function(x) spline(x, method="natural", n=num.interpolate)$y)
# cancel out negative values, make 0.0 be a "hard floor"
data.slices.freq[data.slices.freq<0.0]<-0.0
data.slices.docs[data.slices.docs<0.0]<-0.0
data.slices.positive[data.slices.positive<0.0]<-0.0
data.slices.negative[data.slices.negative<0.0]<-0.0
data.slices.subjectivity[data.slices.subjectivity<0.0]<-0.0
}
#loop over terms to build a "denormalised" form of the data for the google chart code
for(t in 1:length(run.terms)){
data.rows<-rbind(data.rows, data.frame(rep(run.words[t],num.interpolate),
as.character(interpolate.start.dates),
data.slices.freq[,t],
data.slices.docs[,t],
data.slices.subjectivity[,t],
data.slices.positive[,t],
data.slices.negative[,t]))
}
doBrew(run.name)
}
if((run.mode=="simple")){
#loop over the terms in the run, plotting a chart according to the source data type
#there is a little frigging to add a plot for all words in the set "OR" grouped
#by adding the sum onto the dataframe as the last column, i.e. a pseudo-word
run.length<-length(run.terms)
words<-run.words
if(do.groups.run){
run.length<-length(run.terms)+1
words<-c(words,paste("Words about",run.title))
data.slices.freq<-cbind(data.slices.freq, gmat.or.slices.freq[,i.run])
data.slices.docs<-cbind(data.slices.docs, gmat.or.slices.docs[,i.run])
data.slices.positive<-cbind(data.slices.positive, gmat.or.slices.positive[,i.run])
}
sub.txt<-""
for(i.term in 1:run.length){
word<-words[i.term]
if(source.type=="a"){
xvals<-slice.start.dates$year+1900
xdates<-xvals
main.txt<-paste("\"",word,"\" in Conference Abstracts",sep="")
y.d.txt<-"Abstracts per Year"
}else{
xvals<-as.numeric(slice.start.dates)#needed in order to fit squared term
xdates<-slice.start.dates
main.txt<-paste("\"",word,"\" in Blog Posts",sep="")
y.d.txt<-"Posts per Month"
}
#tweaks to file name and titles depending whether or not this is the "group"
if(do.groups.run && (i.term == run.length)){
sub.txt<-paste("(shows any of ",paste(run.words,collapse=", "),")",sep="")
main.txt<-gsub("\"","",main.txt)#remove quotes to look better
outFile<-paste(run.name,"all",sep="-")
}else{
if(run.length==1){
outFile<-run.name
}else{
outFile<-paste(run.name,word,sep="-")
}
}
outFile<-paste(outFile, file.postfix)
y.f.txt<-"Word Frequency (%)"
#set highlighting for positive sentiment
hi1<-data.slices.positive[,i.term]>0.08
hi2<-data.slices.positive[,i.term]>0.1
#frequency
yvals<-data.slices.freq[,i.term]
do.plot(paste(outFile,"freq.png"),xdates,xvals,yvals,main.txt,sub.txt,y.f.txt, hi1, hi2)
#docs
yvals<-data.slices.docs[,i.term]
do.plot(paste(outFile,"docs.png"),xdates,xvals,yvals,main.txt,sub.txt,y.d.txt, hi1, hi2)
}
}
#stop logging
sink()
}
##
## Groups special treatment
if(do.groups && ((run.mode=="motion"))){
#interpolate
if(slice.size > interpolate.size){
gmat.or.slices.freq<-apply(gmat.or.slices.freq, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.docs<-apply(gmat.or.slices.docs, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.positive<-apply(gmat.or.slices.positive, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.negative<-apply(gmat.or.slices.negative, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
gmat.or.slices.subjectivity<-apply(gmat.or.slices.subjectivity, MARGIN=2,
function(x) spline(x, method="natural", n=num.interpolate)$y)
}
data.rows<-data.frame()
#do.groups.pretty<-gsub("\\."," ",names(word.lists)) #prettify
for(g in 1:length(word.lists)){
data.rows<-rbind(data.rows, data.frame(rep(titles[g],num.interpolate),
as.character(interpolate.start.dates),
gmat.or.slices.freq[,g],
gmat.or.slices.docs[,g],
gmat.or.slices.subjectivity[,g],
gmat.or.slices.positive[,g],
gmat.or.slices.negative[,g]))
}
run.title<-"Groups of Terms"#used in brew template... messy coding :-(
doBrew("Groups", isGroup=TRUE)
}
# properly terminate database use
if(use.sqlite){
dbDisconnect(db)
}
|
\name{Jfuns}
\alias{Jdet}
\alias{Jtr}
\alias{COOKD}
\alias{COVRATIO}
\alias{DFFITS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
General Classes of Influence Measures
}
\description{
These functions implement the general classes of influence measures
for multivariate regression models defined in Barrett and Ling (1992),
Eqn 2.3, 2.4, as shown in their Table 1.
They are defined in terms of the submatrices for a deleted index subset \eqn{I}
\deqn{H_I = X_I (X^T X)^{-1} X_I}
\deqn{Q_I = E_I (E^T E)^{-1} E_I}
corresponding to the hat and residual matrices in univariate models.
For subset size \eqn{m = 1} these evaluate to scalar equivalents of
hat values and studentized residuals.
For subset size \eqn{m > 1} these are \eqn{m \times m} matrices and
functions in the \eqn{J^{det}} class use \eqn{|H_I|} and \eqn{|Q_I|},
while those in the \eqn{J^{tr}} class use \eqn{tr(H_I)} and \eqn{tr(Q_I)}.
The functions \code{COOKD}, \code{COVRATIO}, and \code{DFFITS} implement
some of the standard influence measures in these terms for the general
cases of multivariate linear models and deletion of subsets of size
\code{m>1}, but they are only included here for experimental purposes.
}
\usage{
Jdet(H, Q, a, b, f)
Jtr(H, Q, a, b, f)
COOKD(H, Q, n, p, r, m)
COVRATIO(H, Q, n, p, r, m)
DFFITS(H, Q, n, p, r, m)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{H}{
a scalar or \eqn{m \times m} matrix giving the hat values for subset \eqn{I}
}
\item{Q}{
a scalar or \eqn{m \times m} matrix giving the residual values for subset \eqn{I}
}
\item{a}{
the \eqn{a} parameter for the \eqn{J^{det}} and \eqn{J^{tr}} classes
}
\item{b}{
the \eqn{b} parameter for the \eqn{J^{det}} and \eqn{J^{tr}} classes
}
\item{f}{
scaling factor for the \eqn{J^{det}} and \eqn{J^{tr}} classes
}
\item{n}{
sample size
}
\item{p}{
number of predictor variables
}
\item{r}{
number of response variables
}
\item{m}{
deletion subset size
}
}
\details{
These functions are purely experimental and not intended to be used directly.
However, they may be useful to define other influence measures than are currently
implemented here.
}
\value{
The scalar result of the computation.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Barrett, B. E. and Ling, R. F. (1992).
General Classes of Influence Measures for Multivariate Regression.
\emph{Journal of the American Statistical Association}, \bold{87}(417), 184-191.
}
\author{
Michael Friendly
}
%\note{
%%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
%\examples{
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{array}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/Jfuns.Rd | no_license | guhjy/mvinfluence | R | false | false | 3,076 | rd | \name{Jfuns}
\alias{Jdet}
\alias{Jtr}
\alias{COOKD}
\alias{COVRATIO}
\alias{DFFITS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
General Classes of Influence Measures
}
\description{
These functions implement the general classes of influence measures
for multivariate regression models defined in Barrett and Ling (1992),
Eqn 2.3, 2.4, as shown in their Table 1.
They are defined in terms of the submatrices for a deleted index subset \eqn{I}
\deqn{H_I = X_I (X^T X)^{-1} X_I}
\deqn{Q_I = E_I (E^T E)^{-1} E_I}
corresponding to the hat and residual matrices in univariate models.
For subset size \eqn{m = 1} these evaluate to scalar equivalents of
hat values and studentized residuals.
For subset size \eqn{m > 1} these are \eqn{m \times m} matrices and
functions in the \eqn{J^{det}} class use \eqn{|H_I|} and \eqn{|Q_I|},
while those in the \eqn{J^{tr}} class use \eqn{tr(H_I)} and \eqn{tr(Q_I)}.
The functions \code{COOKD}, \code{COVRATIO}, and \code{DFFITS} implement
some of the standard influence measures in these terms for the general
cases of multivariate linear models and deletion of subsets of size
\code{m>1}, but they are only included here for experimental purposes.
}
\usage{
Jdet(H, Q, a, b, f)
Jtr(H, Q, a, b, f)
COOKD(H, Q, n, p, r, m)
COVRATIO(H, Q, n, p, r, m)
DFFITS(H, Q, n, p, r, m)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{H}{
a scalar or \eqn{m \times m} matrix giving the hat values for subset \eqn{I}
}
\item{Q}{
a scalar or \eqn{m \times m} matrix giving the residual values for subset \eqn{I}
}
\item{a}{
the \eqn{a} parameter for the \eqn{J^{det}} and \eqn{J^{tr}} classes
}
\item{b}{
the \eqn{b} parameter for the \eqn{J^{det}} and \eqn{J^{tr}} classes
}
\item{f}{
scaling factor for the \eqn{J^{det}} and \eqn{J^{tr}} classes
}
\item{n}{
sample size
}
\item{p}{
number of predictor variables
}
\item{r}{
number of response variables
}
\item{m}{
deletion subset size
}
}
\details{
These functions are purely experimental and not intended to be used directly.
However, they may be useful to define other influence measures than are currently
implemented here.
}
\value{
The scalar result of the computation.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
Barrett, B. E. and Ling, R. F. (1992).
General Classes of Influence Measures for Multivariate Regression.
\emph{Journal of the American Statistical Association}, \bold{87}(417), 184-191.
}
\author{
Michael Friendly
}
%\note{
%%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
%\examples{
%}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{array}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#' @name table_format
#' @export
#' @author Walmes Zeviani, \email{walmes@@ufpr.br}.
#' @title Round Numeric Columns of a Table
#' @description This function returns a table with numeric columns
#' rounded accordingly the specified number of decimal
#' digits. Numeric columns are coverted to character after rounding.
#' @param table a data frame.
#' @param digits a vector with non negative integer values with length
#' equals the number of columns of \code{table}. These number
#' represent the number of digits used to round numeric values. The
#' number is ignored when the correponding column isn't
#' numeric. \code{NA} and negative values are ignored.
#' @return a table with column rounded to the number of decimal digits
#' specified. All columns are coverted to character, so numeric
#' functions can not be direct applied to them anymore. Because of
#' this, it is not recommended assign the result of the function to
#' the object used as \code{table} argument.
#' @seealso \code{\link{matrix2html}()}.
#' @examples
#'
#' x <- table_format(head(rock), digits = c(1, 2, 3, 4))
#' x
#' str(x)
#'
#' x <- table_format(head(iris), c(2, 3, 2, 3, NA))
#' x
#' str(x)
#'
#' x <- table_format(head(rock), c(-1, NA, 3, 4))
#' x
#' str(x)
#'
table_format <- function(table, digits) {
if (!is.data.frame(table)) {
stop("`table` must be a data.frame.")
}
if (ncol(table) != length(digits)) {
stop(paste0("Length of `digits` is not equal ",
"the `table` number of columns."))
}
tb <- table
for (i in which(digits >= 0)) {
x <- tb[, i]
d <- digits[i]
if (is.numeric(x)) {
f <- paste0("%0.", d, "f")
x <- sprintf(f, x)
} else {
x <- as.character(x)
}
tb[, i] <- x
}
return(tb)
}
#' @name matrix2html
#' @export
#' @author Walmes Zeviani, \email{walmes@@ufpr.br}.
#' @title Convert a character matrix to HTML table
#' @description This function coverts a character matrix to an html
#' table.
#' @param x a character matrix. Is most of the cases it is the object
#' returned by \code{table_format}.
#' @param caption a string to be the caption of the table. Default is
#' \code{NULL}, no caption.
#' @param styles a vector with style definition for \code{th}, \code{tr}
#' and \code{td} tags. See the examples section. Default is
#' \code{NULL}. These styles must be defined in a css file used to
#' render the html page. If you are using \code{knitr}, you can use
#' a custom css file when knitting \code{knit2html("my\_doc.Rmd",
#' stylesheet = "my\_custom\_css.css")}.
#' @param indexes a positive integer matrix with dimensions equal to
#' \code{x}. The numbers in each cell of \code{indexes} call the
#' corresponding \code{styles} to be used in \code{x}. See examples
#' for clarification. Default is \code{NULL}.
#' @param class a string corresponding a table style defined in a
#' \code{css} file. Default is \code{NULL}.
#' @return a character vector. Use \code{cat} inside chunks with header
#' \code{results = "asis"} to print the result as interpretable code
#' of an html table.
#' @seealso \code{\link{table_format}()}.
#' @examples
#'
#' x <- head(rock)
#' x <- sapply(x, as.character)
#' str(x)
#'
#' m2h <- matrix2html(x)
#' cat(m2h)
#'
#' x <- mtcars[1:10, 1:3]
#' x <- rbind(c("", colnames(x)), cbind(rownames(x), x))
#' x <- as.matrix(x)
#' rownames(x) <- colnames(x) <- NULL
#' x[1,1] <- "Cars"
#' x
#'
#' m2h <- matrix2html(x, caption="Part of the cars data set.")
#' cat(m2h)
#'
#' # These table class are assumed to be defined in a css file as
#' # follow.
#'
#' # td.red { color: #CC0000; }
#' # td.blue { color: #0000CC; }
#' # td.shade { background-color: #CCCCCC; }
#' # td.line { border-bottom: 1px solid #000000; }
#' # td.bold { font-weight: bold; }
#' # td.italic { font-style: italic; }
#'
#' sty <- c("<th align=\"center\">%s</th>", "<tr>\n%s\n</tr>\n",
#' "<td align=\"center\">%s</td>",
#' "<td align=\"center\" class=\"red\">%s</td>",
#' "<td align=\"center\" class=\"blue line\">%s</td>",
#' "<td align=\"center\" class=\"shade\">%s</td>",
#' "<td align=\"center\" class=\"line\">%s</td>",
#' "<td align=\"center\" class=\"bold shade\">%s</td>",
#' "<td align=\"center\" class=\"italic blue\">%s</td>")
#'
#' # Which style for which cell table.
#' idx <- 0L * row(x) + 3L
#' idx[3, ] <- 4L
#' idx[4, ] <- 5L
#' idx[5, ] <- 6L
#' idx[7, ] <- 7L
#' idx[8, ] <- 8L
#' idx[9, ] <- 9L
#' idx[, 3] <- 3L
#' idx[1, ] <- 1L
#'
#' m2h <- matrix2html(x = x, styles = sty, indexes = idx,
#' caption = "Part of the cars data set.")
#' cat(m2h)
#'
matrix2html <- function(x,
caption = NULL,
styles = NULL,
indexes = NULL,
class = NULL) {
applysty <- function(x, sty, which = 1L) {
sprintf(sty[which], x)
}
applySty <- Vectorize(applysty, c("x", "which"))
if (is.null(styles)) {
styles <- c("<th>%s</th>", "<tr>\n%s\n</tr>\n", "<td>%s</td>")
}
if (is.null(indexes)) {
indexes <- 0L * row(x) + 3L
indexes[1, ] <- 1L
}
if (!(is.matrix(indexes) & is.integer(indexes))) {
stop("indexes must be a integer matrix.")
}
if (!(length(styles)>=3)) {
stop("styles must have minimum length equal 3.")
}
if (!is.character(styles)) {
stop("styles must be character.")
}
if (!all(grepl("%s", styles))) {
stop("All styles elements must have %s.")
}
th <- grepl("^<th.*th>", styles)[1]
td <- grepl("^<td.*td>", styles)[1]
if (!(th | td)) {
stop(paste("styles[1] must be a definition",
"for <th><\th> or <td></td>."))
}
tr <- grepl("^<tr.*tr>", styles)[2]
if (!tr) {
stop("styles[2] must be a definition for <tr><\tr>.")
}
td <- grepl("^<td.*td>", styles)[3]
if (!td) {
stop("styles[3] must be a definition for <td><\td>.")
}
if (nrow(x)!=nrow(indexes) | ncol(x)!=ncol(indexes)) {
stop("x and indexes must be of the same dimension.")
}
if (max(indexes) > length(styles)) {
stop("There is some index outside the styles provided.")
}
if (is.matrix(x) & is.character(x)) {
A <- applySty(unlist(x), styles, which = unlist(indexes))
dim(A) <- dim(x)
A <- apply(A, 1, paste, collapse = "\n")
A <- applysty(A, sty = styles, which = 2L)
} else {
stop("x must be a character matrix.")
}
if (!is.null(caption)) {
cap <- c("<figcaption class=\"tab\">",
caption, "\n</figcaption>\n")
A <- c(cap, A)
}
if (length(class)==1) {
table <- sprintf("<table class=\"%s\">\n", class)
} else {
table <- "<table>\n"
}
A <- c(table, A, "</table>\n")
return(A)
}
| /R/tables.R | no_license | walmes/wzRfun | R | false | false | 7,037 | r | #' @name table_format
#' @export
#' @author Walmes Zeviani, \email{walmes@@ufpr.br}.
#' @title Round Numeric Columns of a Table
#' @description This function returns a table with numeric columns
#' rounded accordingly the specified number of decimal
#' digits. Numeric columns are coverted to character after rounding.
#' @param table a data frame.
#' @param digits a vector with non negative integer values with length
#' equals the number of columns of \code{table}. These number
#' represent the number of digits used to round numeric values. The
#' number is ignored when the correponding column isn't
#' numeric. \code{NA} and negative values are ignored.
#' @return a table with column rounded to the number of decimal digits
#' specified. All columns are coverted to character, so numeric
#' functions can not be direct applied to them anymore. Because of
#' this, it is not recommended assign the result of the function to
#' the object used as \code{table} argument.
#' @seealso \code{\link{matrix2html}()}.
#' @examples
#'
#' x <- table_format(head(rock), digits = c(1, 2, 3, 4))
#' x
#' str(x)
#'
#' x <- table_format(head(iris), c(2, 3, 2, 3, NA))
#' x
#' str(x)
#'
#' x <- table_format(head(rock), c(-1, NA, 3, 4))
#' x
#' str(x)
#'
table_format <- function(table, digits) {
if (!is.data.frame(table)) {
stop("`table` must be a data.frame.")
}
if (ncol(table) != length(digits)) {
stop(paste0("Length of `digits` is not equal ",
"the `table` number of columns."))
}
tb <- table
for (i in which(digits >= 0)) {
x <- tb[, i]
d <- digits[i]
if (is.numeric(x)) {
f <- paste0("%0.", d, "f")
x <- sprintf(f, x)
} else {
x <- as.character(x)
}
tb[, i] <- x
}
return(tb)
}
#' @name matrix2html
#' @export
#' @author Walmes Zeviani, \email{walmes@@ufpr.br}.
#' @title Convert a character matrix to HTML table
#' @description This function coverts a character matrix to an html
#' table.
#' @param x a character matrix. Is most of the cases it is the object
#' returned by \code{table_format}.
#' @param caption a string to be the caption of the table. Default is
#' \code{NULL}, no caption.
#' @param styles a vector with style definition for \code{th}, \code{tr}
#' and \code{td} tags. See the examples section. Default is
#' \code{NULL}. These styles must be defined in a css file used to
#' render the html page. If you are using \code{knitr}, you can use
#' a custom css file when knitting \code{knit2html("my\_doc.Rmd",
#' stylesheet = "my\_custom\_css.css")}.
#' @param indexes a positive integer matrix with dimensions equal to
#' \code{x}. The numbers in each cell of \code{indexes} call the
#' corresponding \code{styles} to be used in \code{x}. See examples
#' for clarification. Default is \code{NULL}.
#' @param class a string corresponding a table style defined in a
#' \code{css} file. Default is \code{NULL}.
#' @return a character vector. Use \code{cat} inside chunks with header
#' \code{results = "asis"} to print the result as interpretable code
#' of an html table.
#' @seealso \code{\link{table_format}()}.
#' @examples
#'
#' x <- head(rock)
#' x <- sapply(x, as.character)
#' str(x)
#'
#' m2h <- matrix2html(x)
#' cat(m2h)
#'
#' x <- mtcars[1:10, 1:3]
#' x <- rbind(c("", colnames(x)), cbind(rownames(x), x))
#' x <- as.matrix(x)
#' rownames(x) <- colnames(x) <- NULL
#' x[1,1] <- "Cars"
#' x
#'
#' m2h <- matrix2html(x, caption="Part of the cars data set.")
#' cat(m2h)
#'
#' # These table class are assumed to be defined in a css file as
#' # follow.
#'
#' # td.red { color: #CC0000; }
#' # td.blue { color: #0000CC; }
#' # td.shade { background-color: #CCCCCC; }
#' # td.line { border-bottom: 1px solid #000000; }
#' # td.bold { font-weight: bold; }
#' # td.italic { font-style: italic; }
#'
#' sty <- c("<th align=\"center\">%s</th>", "<tr>\n%s\n</tr>\n",
#' "<td align=\"center\">%s</td>",
#' "<td align=\"center\" class=\"red\">%s</td>",
#' "<td align=\"center\" class=\"blue line\">%s</td>",
#' "<td align=\"center\" class=\"shade\">%s</td>",
#' "<td align=\"center\" class=\"line\">%s</td>",
#' "<td align=\"center\" class=\"bold shade\">%s</td>",
#' "<td align=\"center\" class=\"italic blue\">%s</td>")
#'
#' # Which style for which cell table.
#' idx <- 0L * row(x) + 3L
#' idx[3, ] <- 4L
#' idx[4, ] <- 5L
#' idx[5, ] <- 6L
#' idx[7, ] <- 7L
#' idx[8, ] <- 8L
#' idx[9, ] <- 9L
#' idx[, 3] <- 3L
#' idx[1, ] <- 1L
#'
#' m2h <- matrix2html(x = x, styles = sty, indexes = idx,
#' caption = "Part of the cars data set.")
#' cat(m2h)
#'
matrix2html <- function(x,
caption = NULL,
styles = NULL,
indexes = NULL,
class = NULL) {
applysty <- function(x, sty, which = 1L) {
sprintf(sty[which], x)
}
applySty <- Vectorize(applysty, c("x", "which"))
if (is.null(styles)) {
styles <- c("<th>%s</th>", "<tr>\n%s\n</tr>\n", "<td>%s</td>")
}
if (is.null(indexes)) {
indexes <- 0L * row(x) + 3L
indexes[1, ] <- 1L
}
if (!(is.matrix(indexes) & is.integer(indexes))) {
stop("indexes must be a integer matrix.")
}
if (!(length(styles)>=3)) {
stop("styles must have minimum length equal 3.")
}
if (!is.character(styles)) {
stop("styles must be character.")
}
if (!all(grepl("%s", styles))) {
stop("All styles elements must have %s.")
}
th <- grepl("^<th.*th>", styles)[1]
td <- grepl("^<td.*td>", styles)[1]
if (!(th | td)) {
stop(paste("styles[1] must be a definition",
"for <th><\th> or <td></td>."))
}
tr <- grepl("^<tr.*tr>", styles)[2]
if (!tr) {
stop("styles[2] must be a definition for <tr><\tr>.")
}
td <- grepl("^<td.*td>", styles)[3]
if (!td) {
stop("styles[3] must be a definition for <td><\td>.")
}
if (nrow(x)!=nrow(indexes) | ncol(x)!=ncol(indexes)) {
stop("x and indexes must be of the same dimension.")
}
if (max(indexes) > length(styles)) {
stop("There is some index outside the styles provided.")
}
if (is.matrix(x) & is.character(x)) {
A <- applySty(unlist(x), styles, which = unlist(indexes))
dim(A) <- dim(x)
A <- apply(A, 1, paste, collapse = "\n")
A <- applysty(A, sty = styles, which = 2L)
} else {
stop("x must be a character matrix.")
}
if (!is.null(caption)) {
cap <- c("<figcaption class=\"tab\">",
caption, "\n</figcaption>\n")
A <- c(cap, A)
}
if (length(class)==1) {
table <- sprintf("<table class=\"%s\">\n", class)
} else {
table <- "<table>\n"
}
A <- c(table, A, "</table>\n")
return(A)
}
|
edges = read.csv("edges.csv")
users = read.csv("users.csv")
nrow(users)
str(users)
str(edges)
mean(apply(table(edges),1,sum))
table(users$locale)
table(users$locale, users$gender)
library(igraph)
g = graph.data.frame(edges, FALSE, users)
sum(degree(g)>=10)
V(g)$size = degree(g)/2+2
plot(g, vertex.label=NA)
max(V(g)$size)
min(V(g)$size)
###################### coloring based on gender
V(g)$color = "black"
V(g)$color[V(g)$gender == "A"] = "red"
V(g)$color[V(g)$gender == "B"] = "gray"
plot(g, vertex.label=NA)
###################### coloring based on school
V(g)$color = "black"
V(g)$color[V(g)$school == "A"] = "red"
V(g)$color[V(g)$school == "B"] = "gray"
V(g)$color[V(g)$school == "AB"] = "green"
plot(g, vertex.label=NA)
###################### coloring based on locale
V(g)$color = "black"
V(g)$color[V(g)$locale == "A"] = "red"
V(g)$color[V(g)$locale == "B"] = "gray"
plot(g, vertex.label=NA) | /Week 7/problem2.R | no_license | b1ck0/MIT15.071-Analytics_Edge | R | false | false | 912 | r | edges = read.csv("edges.csv")
users = read.csv("users.csv")
nrow(users)
str(users)
str(edges)
mean(apply(table(edges),1,sum))
table(users$locale)
table(users$locale, users$gender)
library(igraph)
g = graph.data.frame(edges, FALSE, users)
sum(degree(g)>=10)
V(g)$size = degree(g)/2+2
plot(g, vertex.label=NA)
max(V(g)$size)
min(V(g)$size)
###################### coloring based on gender
V(g)$color = "black"
V(g)$color[V(g)$gender == "A"] = "red"
V(g)$color[V(g)$gender == "B"] = "gray"
plot(g, vertex.label=NA)
###################### coloring based on school
V(g)$color = "black"
V(g)$color[V(g)$school == "A"] = "red"
V(g)$color[V(g)$school == "B"] = "gray"
V(g)$color[V(g)$school == "AB"] = "green"
plot(g, vertex.label=NA)
###################### coloring based on locale
V(g)$color = "black"
V(g)$color[V(g)$locale == "A"] = "red"
V(g)$color[V(g)$locale == "B"] = "gray"
plot(g, vertex.label=NA) |
#panorama&takeover survey + requestlog results
library(data.table)
panorama = fread("~/Downloads/Panorama_survey")
colnames(panorama) = c("adspace_id","premium_level", "survey_id","originaladid", "surveyadid","format","market","uid", "answersetid","questionid","optionid","deliveries","viewableimps")
takeover1 = fread("~/Downloads/4cc86a3f-bfd5-4d8b-97f4-cd268cc22625-000000")
colnames(takeover1) = c("adspace_id","premium_level", "survey_id","originaladid", "surveyadid","format","market","uid", "answersetid","questionid","optionid","deliveries","viewableimps")
takeover2 = fread("~/Downloads/takeover2_survey")
colnames(takeover2) = c("adspace_id","premium_level", "survey_id","originaladid", "surveyadid","format","market","uid", "answersetid","questionid","optionid","deliveries","viewableimps")
panorama_recall = subset(panorama, questionid == 84)
panorama_recall_positive = subset(panorama_recall, optionid == 259)
panorama_recall_negative = subset(panorama_recall, optionid == 258)
takeover1_recall = subset(takeover1, questionid == 28)
takeover1_recall_positive = subset(takeover1_recall, optionid == 77)
takeover1_recall_negative = subset(takeover1_recall, optionid == 78)
takeover2_recall = subset(takeover2, questionid == 28)
takeover2_recall_positive = subset(takeover2_recall, optionid == 77)
takeover2_recall_negative = subset(takeover2_recall, optionid == 78)
takeover = rbind(takeover1, takeover2)
rm(takeover1_recall, takeover2_recall)
takeover_recall = subset(takeover, questionid == 28)
takeover_recall_positive = subset(takeover_recall, optionid == 77)
takeover_recall_negative = subset(takeover_recall, optionid == 78) | /Panorama-Takeover/panorama_takeover.R | no_license | pinarlarsson/R | R | false | false | 1,650 | r | #panorama&takeover survey + requestlog results
library(data.table)
panorama = fread("~/Downloads/Panorama_survey")
colnames(panorama) = c("adspace_id","premium_level", "survey_id","originaladid", "surveyadid","format","market","uid", "answersetid","questionid","optionid","deliveries","viewableimps")
takeover1 = fread("~/Downloads/4cc86a3f-bfd5-4d8b-97f4-cd268cc22625-000000")
colnames(takeover1) = c("adspace_id","premium_level", "survey_id","originaladid", "surveyadid","format","market","uid", "answersetid","questionid","optionid","deliveries","viewableimps")
takeover2 = fread("~/Downloads/takeover2_survey")
colnames(takeover2) = c("adspace_id","premium_level", "survey_id","originaladid", "surveyadid","format","market","uid", "answersetid","questionid","optionid","deliveries","viewableimps")
panorama_recall = subset(panorama, questionid == 84)
panorama_recall_positive = subset(panorama_recall, optionid == 259)
panorama_recall_negative = subset(panorama_recall, optionid == 258)
takeover1_recall = subset(takeover1, questionid == 28)
takeover1_recall_positive = subset(takeover1_recall, optionid == 77)
takeover1_recall_negative = subset(takeover1_recall, optionid == 78)
takeover2_recall = subset(takeover2, questionid == 28)
takeover2_recall_positive = subset(takeover2_recall, optionid == 77)
takeover2_recall_negative = subset(takeover2_recall, optionid == 78)
takeover = rbind(takeover1, takeover2)
rm(takeover1_recall, takeover2_recall)
takeover_recall = subset(takeover, questionid == 28)
takeover_recall_positive = subset(takeover_recall, optionid == 77)
takeover_recall_negative = subset(takeover_recall, optionid == 78) |
\name{MissingHealthData}
\alias{MissingHealthData}
\title{Health Data with Missing Values}
\description{
Medical data containing both categorical variables and continuous variables,
the latter include two variables with missing values.
}
\usage{
data(MissingHealthData)
}
\format{
A data frame with 52 observations on the following 8 variables.
A part of the source data was recoded according to an input example of
original program CATDAP-02. In addition, we converted 1 into 'A' and 2 into
'B' of symptoms data, and converted cholesterol data less than 198 into 'low'
and the others into 'high'.
\tabular{rlll}{
[, 1] \tab opthalmo. \tab 1, 2\cr
[, 2] \tab ecg \tab\tab 1, 2\cr
[, 3] \tab symptoms \tab\tab A, B\cr
[, 4] \tab age \tab\tab 49-59\cr
[, 5] \tab max.press \tab\tab 98-216, 300 (missing value)\cr
[, 6] \tab min.press \tab\tab 56-120, 300 (missing value)\cr
[, 7] \tab aortic.wav \tab\tab 6.3-10.2\cr
[, 8] \tab cholesterol \tab\tab low, high
}
}
\source{
Y.Sakamoto, M.Ishiguro and G.Kitagawa (1980)
\emph{Computer Science Monograph, No.14, CATDAP, A CATEGORICAL DATA ANALYSIS
PROGRAM PACKAGE, DATA No.2.} The Institute of Statistical Mathematics.
Y.Sakamoto (1985) \emph{Categorical Data Analysis by AIC, p. 74.} Kluwer
Academic publishers.
}
\keyword{datasets}
| /man/MissingHealthData.Rd | no_license | cran/catdap | R | false | false | 1,367 | rd | \name{MissingHealthData}
\alias{MissingHealthData}
\title{Health Data with Missing Values}
\description{
Medical data containing both categorical variables and continuous variables,
the latter include two variables with missing values.
}
\usage{
data(MissingHealthData)
}
\format{
A data frame with 52 observations on the following 8 variables.
A part of the source data was recoded according to an input example of
original program CATDAP-02. In addition, we converted 1 into 'A' and 2 into
'B' of symptoms data, and converted cholesterol data less than 198 into 'low'
and the others into 'high'.
\tabular{rlll}{
[, 1] \tab opthalmo. \tab 1, 2\cr
[, 2] \tab ecg \tab\tab 1, 2\cr
[, 3] \tab symptoms \tab\tab A, B\cr
[, 4] \tab age \tab\tab 49-59\cr
[, 5] \tab max.press \tab\tab 98-216, 300 (missing value)\cr
[, 6] \tab min.press \tab\tab 56-120, 300 (missing value)\cr
[, 7] \tab aortic.wav \tab\tab 6.3-10.2\cr
[, 8] \tab cholesterol \tab\tab low, high
}
}
\source{
Y.Sakamoto, M.Ishiguro and G.Kitagawa (1980)
\emph{Computer Science Monograph, No.14, CATDAP, A CATEGORICAL DATA ANALYSIS
PROGRAM PACKAGE, DATA No.2.} The Institute of Statistical Mathematics.
Y.Sakamoto (1985) \emph{Categorical Data Analysis by AIC, p. 74.} Kluwer
Academic publishers.
}
\keyword{datasets}
|
## This functions is written to cache the inverse of a matrix
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | zzzhengjh/ProgrammingAssignment2 | R | false | false | 1,106 | r | ## This functions is written to cache the inverse of a matrix
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rwebppl.R
\name{get_samples}
\alias{get_samples}
\title{Get samples}
\usage{
get_samples(df, num_samples)
}
\arguments{
\item{df}{A data frame of webppl "histogram" output (has a column called
`prob` with probabilities, remaining columns are parameter values).}
\item{num_samples}{A number of samples to reconstruct.}
}
\value{
Data frame of parameter values with number of rows equal to
`num_samples`.
}
\description{
Turn webppl "histogram" output into samples.
}
\examples{
num_samples <- 10
df <- data.frame(prob = c(0.1, 0.3, 0.5, 0.1), support = c("a","b","c","d"))
get_samples(df, num_samples)
}
| /man/get_samples.Rd | no_license | dyurovsky/rwebppl | R | false | true | 684 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rwebppl.R
\name{get_samples}
\alias{get_samples}
\title{Get samples}
\usage{
get_samples(df, num_samples)
}
\arguments{
\item{df}{A data frame of webppl "histogram" output (has a column called
`prob` with probabilities, remaining columns are parameter values).}
\item{num_samples}{A number of samples to reconstruct.}
}
\value{
Data frame of parameter values with number of rows equal to
`num_samples`.
}
\description{
Turn webppl "histogram" output into samples.
}
\examples{
num_samples <- 10
df <- data.frame(prob = c(0.1, 0.3, 0.5, 0.1), support = c("a","b","c","d"))
get_samples(df, num_samples)
}
|
##################
### Chapter 16 ###
##################
# Ch 16 Exercise 2: advanced ggplot2 practice
# Install and load the `ggplot2` package
#install.packages('ggplot2')
library("ggplot2")
library("dplyr")
# For this exercise you will again be working with the `diamonds` data set.
# Use `?diamonds` to review details about this data set
?diamonds
## Position Adjustments
# Draw a column (bar) chart of diamonds cuts by price, with each bar filled by
# clarity. You should see a _stacked_ bar chart.
ggplot(data = diamonds) +
geom_col(mapping = aes(x = price, y = cut, fill = clarity))
# Draw the same chart again, but with each element positioned to "fill" the y axis
ggplot(data = diamonds) +
geom_col(mapping = aes(x = price, y = cut, fill = clarity), position = "fill")
# Draw the same chart again, but with each element positioned to "dodge" each other
ggplot(data = diamonds) +
geom_col(mapping = aes(x = price, y = cut,fill = clarity), position = "dodge")
# Draw a plot with point geometry with the x-position mapped to `cut` and the
# y-position mapped to `clarity`
# This creates a "grid" grouping the points
ggplot(data = diamonds) +
geom_point(mapping = aes(x = cut, y = clarity))
# Use the "jitter" position adjustment to keep the points from all overlapping!
# (This works a little better with a sample of diamond data, such as from the
# previous exercise).
diamonds_sample <- sample_n(diamonds, 1000)
ggplot(data = diamonds_sample) +
geom_point(mapping = aes(x = cut, y = clarity), position = "jitter")
## Scales
# Draw a "boxplot" (with `geom_boxplot`) for the diamond's price (y) by color (x)
ggplot(data = diamonds_sample) +
geom_boxplot(mapping = aes(x = color, y = price))
# This has a lot of outliers, making it harder to read. To fix this, draw the
# same plot but with a _logarithmic_ scale for the y axis.
ggplot(data = diamonds_sample) +
geom_boxplot(mapping = aes(x = color, y = price)) +
scale_y_log10()
# For another version, draw the same plot but with `violin` geometry instead of
# `boxplot` geometry!
# How does the logarithmic scale change the data presentation?
ggplot(data = diamonds_sample) +
geom_violin(mapping = aes(x = color, y = price)) +
scale_y_log10()
# Another interesting plot: draw a plot of the diamonds price (y) by carat (x),
# using a heatmap of 2d bins (geom_bin2d)
# What happens when you make the x and y channels scale logarithmically?
# Draw a scatter plot for the diamonds price (y) by carat (x). Color each point
# by the clarity (Remember, this will take a while. Use a sample of the diamonds
# for faster results)
# Change the color of the previous plot using a ColorBrewer scale of your choice.
# What looks nice?
## Coordinate Systems
# Draw a bar chart with x-position and fill color BOTH mapped to cut
# For best results, SET the `width` of the geometry to be 1 (fill plot, no space
# between)
# TIP: You can save the plot to a variable for easier modifications
# Draw the same chart, but with the coordinate system flipped
# Draw the same chart, but in a polar coordinate system. It's a Coxcomb chart!
## Facets
# Take the scatter plot of price by carat data (colored by clarity) and add
# _facets_ based on the diamond's `color`
## Saving Plots
# Use the `ggsave()` function to save the current (recent) plot to disk.
# Name the output file "my-plot.png".
# Make sure you've set the working directory!!
| /chapter-16-exercises/exercise-2/exercise.R | permissive | seoaus00/book-exercises | R | false | false | 3,436 | r | ##################
### Chapter 16 ###
##################
# Ch 16 Exercise 2: advanced ggplot2 practice
# Install and load the `ggplot2` package
#install.packages('ggplot2')
library("ggplot2")
library("dplyr")
# For this exercise you will again be working with the `diamonds` data set.
# Use `?diamonds` to review details about this data set
?diamonds
## Position Adjustments
# Draw a column (bar) chart of diamonds cuts by price, with each bar filled by
# clarity. You should see a _stacked_ bar chart.
ggplot(data = diamonds) +
geom_col(mapping = aes(x = price, y = cut, fill = clarity))
# Draw the same chart again, but with each element positioned to "fill" the y axis
ggplot(data = diamonds) +
geom_col(mapping = aes(x = price, y = cut, fill = clarity), position = "fill")
# Draw the same chart again, but with each element positioned to "dodge" each other
ggplot(data = diamonds) +
geom_col(mapping = aes(x = price, y = cut,fill = clarity), position = "dodge")
# Draw a plot with point geometry with the x-position mapped to `cut` and the
# y-position mapped to `clarity`
# This creates a "grid" grouping the points
ggplot(data = diamonds) +
geom_point(mapping = aes(x = cut, y = clarity))
# Use the "jitter" position adjustment to keep the points from all overlapping!
# (This works a little better with a sample of diamond data, such as from the
# previous exercise).
diamonds_sample <- sample_n(diamonds, 1000)
ggplot(data = diamonds_sample) +
geom_point(mapping = aes(x = cut, y = clarity), position = "jitter")
## Scales
# Draw a "boxplot" (with `geom_boxplot`) for the diamond's price (y) by color (x)
ggplot(data = diamonds_sample) +
geom_boxplot(mapping = aes(x = color, y = price))
# This has a lot of outliers, making it harder to read. To fix this, draw the
# same plot but with a _logarithmic_ scale for the y axis.
ggplot(data = diamonds_sample) +
geom_boxplot(mapping = aes(x = color, y = price)) +
scale_y_log10()
# For another version, draw the same plot but with `violin` geometry instead of
# `boxplot` geometry!
# How does the logarithmic scale change the data presentation?
ggplot(data = diamonds_sample) +
geom_violin(mapping = aes(x = color, y = price)) +
scale_y_log10()
# Another interesting plot: draw a plot of the diamonds price (y) by carat (x),
# using a heatmap of 2d bins (geom_bin2d)
# What happens when you make the x and y channels scale logarithmically?
# Draw a scatter plot for the diamonds price (y) by carat (x). Color each point
# by the clarity (Remember, this will take a while. Use a sample of the diamonds
# for faster results)
# Change the color of the previous plot using a ColorBrewer scale of your choice.
# What looks nice?
## Coordinate Systems
# Draw a bar chart with x-position and fill color BOTH mapped to cut
# For best results, SET the `width` of the geometry to be 1 (fill plot, no space
# between)
# TIP: You can save the plot to a variable for easier modifications
# Draw the same chart, but with the coordinate system flipped
# Draw the same chart, but in a polar coordinate system. It's a Coxcomb chart!
## Facets
# Take the scatter plot of price by carat data (colored by clarity) and add
# _facets_ based on the diamond's `color`
## Saving Plots
# Use the `ggsave()` function to save the current (recent) plot to disk.
# Name the output file "my-plot.png".
# Make sure you've set the working directory!!
|
data <- read_csv("C:/Users/CJ/Desktop/coding/r_project/data/descriptive.csv")
data
# MASS 패키기 설치 및 메모리 로딩
library(MASS)
data(Animals)
head(Animals)
# brain 칼럼을 대상으로 다음 기술 통계량 구하기
summary(Animals$brain)
var(Animals$brain) # 분산 구하기
sd(Animals$brain) # 표준편차 구하기
mean(Animals$brain) # 평균값 구하기
max(Animals$brain)
min(Animals$brain)
median(Animals$brain)
# describe(), freq()함수 이용하여
# Animals 데이터 셋 전체를 대상으로 기술 통계량 구하기
install.packages("psych")
library(psych)
describe(Animals)
install.packages("descr")
library(descr)
freq(Animals)
# 명목척도 변수인 학교유형(type), 합격여부(pass) 변수에 대해
# 빈도 분석을 수행하고 결과를 막대그래프와 파이차트로 시각화
type <- data$type
x <- table(type)
barplot(x)
pie(x)
# 비율척도 변수인 나이 변수에 대해 요약치(평균, 표준편차)와 비대칭도(왜도,첨도)
# 통계량을 구하고, 히스토그램 작성하여 비대칭도 통계량 설명
age <- data$age
summary(age)
skewness(age) # 왜도
kurtosis(age) # 첨도
par(mfrow = c(1,1))
hist(age, freq = F)
lines(density(age), col='blue')
x <- seq(35, 80, 0.1) # x축 시작점, 끝점, 순서값의 크기
curve(dnorm(x, mean(age), sd(age)), col='red', add=T) # dnorm 표준정규분포
| /r02/210510실습문제.R | no_license | kanu21sj/R_Programimng_Edu | R | false | false | 1,442 | r | data <- read_csv("C:/Users/CJ/Desktop/coding/r_project/data/descriptive.csv")
data
# MASS 패키기 설치 및 메모리 로딩
library(MASS)
data(Animals)
head(Animals)
# brain 칼럼을 대상으로 다음 기술 통계량 구하기
summary(Animals$brain)
var(Animals$brain) # 분산 구하기
sd(Animals$brain) # 표준편차 구하기
mean(Animals$brain) # 평균값 구하기
max(Animals$brain)
min(Animals$brain)
median(Animals$brain)
# describe(), freq()함수 이용하여
# Animals 데이터 셋 전체를 대상으로 기술 통계량 구하기
install.packages("psych")
library(psych)
describe(Animals)
install.packages("descr")
library(descr)
freq(Animals)
# 명목척도 변수인 학교유형(type), 합격여부(pass) 변수에 대해
# 빈도 분석을 수행하고 결과를 막대그래프와 파이차트로 시각화
type <- data$type
x <- table(type)
barplot(x)
pie(x)
# 비율척도 변수인 나이 변수에 대해 요약치(평균, 표준편차)와 비대칭도(왜도,첨도)
# 통계량을 구하고, 히스토그램 작성하여 비대칭도 통계량 설명
age <- data$age
summary(age)
skewness(age) # 왜도
kurtosis(age) # 첨도
par(mfrow = c(1,1))
hist(age, freq = F)
lines(density(age), col='blue')
x <- seq(35, 80, 0.1) # x축 시작점, 끝점, 순서값의 크기
curve(dnorm(x, mean(age), sd(age)), col='red', add=T) # dnorm 표준정규분포
|
## File Name: btm_trim_increment.R
## File Version: 0.04
btm_trim_increment <- function(incr, maxincr )
{
res <- ifelse( abs(incr) > maxincr, maxincr*sign(incr), incr )
return(res)
}
| /R/btm_trim_increment.R | no_license | alexanderrobitzsch/sirt | R | false | false | 192 | r | ## File Name: btm_trim_increment.R
## File Version: 0.04
btm_trim_increment <- function(incr, maxincr )
{
res <- ifelse( abs(incr) > maxincr, maxincr*sign(incr), incr )
return(res)
}
|
# 0-L2M-download-2019-20:
# Download all the archived L2M reports in raw form to then evaluate:
# https://official.nba.com/2019-20-nba-officiating-last-two-minute-reports/
# ---- start --------------------------------------------------------------
library(httr)
library(rvest)
library(tidyverse)
library(splashr)
# Create a directory for the data
local_dir <- "0-data/L2M/2019-20"
data_source <- paste0(local_dir, "/raw")
scrape_source <- paste0(data_source, "/scraped_splashr")
if (!file.exists(local_dir)) dir.create(local_dir, recursive = T)
if (!file.exists(data_source)) dir.create(data_source, recursive = T)
if (!file.exists(scrape_source)) dir.create(scrape_source, recursive = T)
url_l2m <- paste0("https://official.nba.com/",
"2019-20-nba-officiating-last-two-minute-reports/")
# read in url from above, then extract the links that comply with:
links <- read_html(url_l2m) %>%
html_nodes("h2~ p a") %>%
html_attr("href")
# Hack correction for url link which includes "http:// https://"
links <- str_remove(links, "http://: ")
# ---- pdf-format ---------------------------------------------------------
links_pdf <- links[grepl(pattern = "*.pdf", links)]
files <- paste(data_source, basename(links_pdf), sep = "/")
# For each url and file, check to see if it exists then try to download.
# need to keep a record of those which fail with a 404 error
pdf_games <- map(links_pdf, function(x) {
file_x <- paste(data_source, basename(x), sep = "/")
if (!file.exists(file_x)) {
Sys.sleep(runif(1, 3, 5))
tryCatch(download.file(x, file_x, method = "libcurl"),
warning = function(w) {
"bad"
})
} else "exists"
})
# ---- url-format ---------------------------------------------------------
links_url <- links[!grepl(pattern = "*.pdf", links)]
# Only get the links from games not scraped
scraped_files <- dir(scrape_source, pattern = ".csv", full.names = T)
links_url <- links_url[!(gsub(".*\\?|&.*", "", links_url) %in%
tools::file_path_sans_ext(basename(scraped_files)))]
# scraped_data <- read_rds("0-data/L2M/scraped_201819.rds")
# bad_data <- filter(scraped_data, grepl("error", period))
# good_data <- filter(scraped_data, !grepl("error", period))
#
# links_url <- links_url[!(gsub(".*\\?|&.*", "", links_url) %in%
# good_data$game_id)]
# ---- splashr-start -------------------------------------------------------
# Only start splashr if there are urls to be downloaded.
if (!is_empty(links_url)) {
# Create docker container for splashr to scrape
splash_container <- start_splash(container_name = "l2m")
Sys.sleep(3)
}
# ---- map-links ----------------------------------------------------------
if (splash_active()) {
scrape_site <- map(links_url, function(x) {
game_id <- gsub(".*\\?|&.*", "", x)
print(paste0(game_id, " at ", Sys.time()))
Sys.sleep(runif(1, 10, 15))
l2m_raw <- render_html(url = x, wait = 7)
game_details <- l2m_raw %>%
xml_nodes(".gamedetails") %>%
html_text()
game_date <- l2m_raw %>%
xml_nodes(".gamedate") %>%
html_text()
l2m_site <- l2m_raw %>%
html_table(fill = T) %>%
.[[1]]
# Did the scrape give us a functional table?
if (is.data.frame(l2m_site)) {
print("is a data.frame")
l2m_site <- l2m_site[, !is.na(names(l2m_site)) & !(names(l2m_site) == "")]
names(l2m_site) <- tolower(str_replace(names(l2m_site), " ", "_"))
data1 <- l2m_site %>%
mutate(comments = if_else(grepl(pattern = "^Comment", period),
time, NA_character_),
comments = lead(comments),
stint = if_else(period == "", time, NA_character_)) %>%
fill(stint)
data2 <- data1 %>%
filter(grepl(pattern = "^Period", period)) %>%
mutate(period = str_remove(period, "Period:"),
time = str_remove(time, "Time:"),
call_type = str_remove(call_type, "Call Type:"),
committing = str_remove(committing_player,
"Committing Player:"),
disadvantaged = str_remove(disadvantaged_player,
"Disadvantaged Player:"),
decision = str_remove(review_decision,
"Review Decision:")) %>%
mutate_all(str_trim) %>%
select(period, time, call_type, committing, disadvantaged, decision,
comments, stint)
# Check to see that these are dataframes with more than 1 row
n1 = nrow(data2)
if (n1 > 0) {
print("it worked!")
j5 <- data2
j5$game_id <- game_id
j5$game_details <- game_details
j5$game_date <- game_date
j5$scrape_time <- Sys.time()
return(j5)
} else if (n1 == 0) {
print("didn't work, data were 0 length")
j5 <- data.frame(period = "error - n1 == 0",
game_id = game_id,
game_details = game_details,
game_date = game_date,
scrape_time = Sys.time())
return(j5)
} else {
print("didn't work, not sure")
j5 <- data.frame(period = "error - huh",
game_id = game_id,
game_details = game_details,
game_date = game_date,
scrape_time = Sys.time())
return(j5)
}
} else {
print("no")
j5 <- data.frame(period = "error",
game_id = game_id,
game_details = game_details,
game_date = game_date,
scrape_time = Sys.time())
return(j5)
}
})
} else {
print("Splash is not running, please enable before scraping.")
scrape_site <- list(data.frame(period = "error - splashr did not start",
game_id = NA, game_details = NA,
game_date = NA, scrape_time = Sys.time()))
}
# Remove the splash_container if it was created
if (exists("splash_container")) stop_splash(splash_container)
# Now, write the full data set and also write individual csv files!
if (is_empty(scrape_site)) {
scrape_data <- data.frame(scrape_time = NA)
} else {
scrape_data <- bind_rows(scrape_site) %>%
# Filter out the errors
filter(!grepl("error", period)) %>%
mutate(scrape_time = as.character(scrape_time))
# Just in case all were errors.
if (nrow(scrape_data) == 0) scrape_data <- data.frame(scrape_time = NA)
}
scraped_data <- map(scraped_files, read_csv, col_types = cols(.default = "c"))
# Individual games
ind_games_csv <- map(scrape_site, function(x) {
game_id <- x$game_id[1]
# If the data.frame in the list only has one observation it's an error
if (nrow(x) > 1) {
write_csv(x, paste0(scrape_source, "/", game_id, ".csv"))
return(data.frame(game_id, status = "good"))
} else {
return(data.frame(game_id, status = "bad"))
}
})
corrections <- scraped_data %>%
bind_rows(scrape_data) %>%
arrange(scrape_time) %>%
filter(!is.na(scrape_time))
# Enter in the home and away teams plus final scores
corrections <- corrections %>%
mutate(away1 = str_trim(str_remove(game_details, "@.*$")),
away_score = str_extract(away1, "(?<=\\().+?(?=\\))"),
away_team = str_trim(str_remove(away1, "\\(.*\\)")),
home1 = str_trim(str_remove(game_details, ".*@")),
home_score = str_extract(home1, "(?<=\\().+?(?=\\))"),
home_team = str_trim(str_remove(home1, "\\(.*\\)"))) %>%
select(-away1, -home1) %>%
# Put the corrections into a consistent order
arrange(game_id, period, desc(time))
write_csv(corrections, paste0(local_dir, "/scraped_201920.csv"))
write_rds(corrections, paste0(local_dir, "/scraped_201920.rds"))
| /0-data/0-L2M-download-2019-20-splashr.R | permissive | KofiJone/L2M | R | false | false | 8,126 | r | # 0-L2M-download-2019-20:
# Download all the archived L2M reports in raw form to then evaluate:
# https://official.nba.com/2019-20-nba-officiating-last-two-minute-reports/
# ---- start --------------------------------------------------------------
library(httr)
library(rvest)
library(tidyverse)
library(splashr)
# Create a directory for the data
local_dir <- "0-data/L2M/2019-20"
data_source <- paste0(local_dir, "/raw")
scrape_source <- paste0(data_source, "/scraped_splashr")
if (!file.exists(local_dir)) dir.create(local_dir, recursive = T)
if (!file.exists(data_source)) dir.create(data_source, recursive = T)
if (!file.exists(scrape_source)) dir.create(scrape_source, recursive = T)
url_l2m <- paste0("https://official.nba.com/",
"2019-20-nba-officiating-last-two-minute-reports/")
# read in url from above, then extract the links that comply with:
links <- read_html(url_l2m) %>%
html_nodes("h2~ p a") %>%
html_attr("href")
# Hack correction for url link which includes "http:// https://"
links <- str_remove(links, "http://: ")
# ---- pdf-format ---------------------------------------------------------
links_pdf <- links[grepl(pattern = "*.pdf", links)]
files <- paste(data_source, basename(links_pdf), sep = "/")
# For each url and file, check to see if it exists then try to download.
# need to keep a record of those which fail with a 404 error
pdf_games <- map(links_pdf, function(x) {
file_x <- paste(data_source, basename(x), sep = "/")
if (!file.exists(file_x)) {
Sys.sleep(runif(1, 3, 5))
tryCatch(download.file(x, file_x, method = "libcurl"),
warning = function(w) {
"bad"
})
} else "exists"
})
# ---- url-format ---------------------------------------------------------
links_url <- links[!grepl(pattern = "*.pdf", links)]
# Only get the links from games not scraped
scraped_files <- dir(scrape_source, pattern = ".csv", full.names = T)
links_url <- links_url[!(gsub(".*\\?|&.*", "", links_url) %in%
tools::file_path_sans_ext(basename(scraped_files)))]
# scraped_data <- read_rds("0-data/L2M/scraped_201819.rds")
# bad_data <- filter(scraped_data, grepl("error", period))
# good_data <- filter(scraped_data, !grepl("error", period))
#
# links_url <- links_url[!(gsub(".*\\?|&.*", "", links_url) %in%
# good_data$game_id)]
# ---- splashr-start -------------------------------------------------------
# Only start splashr if there are urls to be downloaded.
if (!is_empty(links_url)) {
# Create docker container for splashr to scrape
splash_container <- start_splash(container_name = "l2m")
Sys.sleep(3)
}
# ---- map-links ----------------------------------------------------------
if (splash_active()) {
scrape_site <- map(links_url, function(x) {
game_id <- gsub(".*\\?|&.*", "", x)
print(paste0(game_id, " at ", Sys.time()))
Sys.sleep(runif(1, 10, 15))
l2m_raw <- render_html(url = x, wait = 7)
game_details <- l2m_raw %>%
xml_nodes(".gamedetails") %>%
html_text()
game_date <- l2m_raw %>%
xml_nodes(".gamedate") %>%
html_text()
l2m_site <- l2m_raw %>%
html_table(fill = T) %>%
.[[1]]
# Did the scrape give us a functional table?
if (is.data.frame(l2m_site)) {
print("is a data.frame")
l2m_site <- l2m_site[, !is.na(names(l2m_site)) & !(names(l2m_site) == "")]
names(l2m_site) <- tolower(str_replace(names(l2m_site), " ", "_"))
data1 <- l2m_site %>%
mutate(comments = if_else(grepl(pattern = "^Comment", period),
time, NA_character_),
comments = lead(comments),
stint = if_else(period == "", time, NA_character_)) %>%
fill(stint)
data2 <- data1 %>%
filter(grepl(pattern = "^Period", period)) %>%
mutate(period = str_remove(period, "Period:"),
time = str_remove(time, "Time:"),
call_type = str_remove(call_type, "Call Type:"),
committing = str_remove(committing_player,
"Committing Player:"),
disadvantaged = str_remove(disadvantaged_player,
"Disadvantaged Player:"),
decision = str_remove(review_decision,
"Review Decision:")) %>%
mutate_all(str_trim) %>%
select(period, time, call_type, committing, disadvantaged, decision,
comments, stint)
# Check to see that these are dataframes with more than 1 row
n1 = nrow(data2)
if (n1 > 0) {
print("it worked!")
j5 <- data2
j5$game_id <- game_id
j5$game_details <- game_details
j5$game_date <- game_date
j5$scrape_time <- Sys.time()
return(j5)
} else if (n1 == 0) {
print("didn't work, data were 0 length")
j5 <- data.frame(period = "error - n1 == 0",
game_id = game_id,
game_details = game_details,
game_date = game_date,
scrape_time = Sys.time())
return(j5)
} else {
print("didn't work, not sure")
j5 <- data.frame(period = "error - huh",
game_id = game_id,
game_details = game_details,
game_date = game_date,
scrape_time = Sys.time())
return(j5)
}
} else {
print("no")
j5 <- data.frame(period = "error",
game_id = game_id,
game_details = game_details,
game_date = game_date,
scrape_time = Sys.time())
return(j5)
}
})
} else {
print("Splash is not running, please enable before scraping.")
scrape_site <- list(data.frame(period = "error - splashr did not start",
game_id = NA, game_details = NA,
game_date = NA, scrape_time = Sys.time()))
}
# Remove the splash_container if it was created
if (exists("splash_container")) stop_splash(splash_container)
# Now, write the full data set and also write individual csv files!
if (is_empty(scrape_site)) {
scrape_data <- data.frame(scrape_time = NA)
} else {
scrape_data <- bind_rows(scrape_site) %>%
# Filter out the errors
filter(!grepl("error", period)) %>%
mutate(scrape_time = as.character(scrape_time))
# Just in case all were errors.
if (nrow(scrape_data) == 0) scrape_data <- data.frame(scrape_time = NA)
}
scraped_data <- map(scraped_files, read_csv, col_types = cols(.default = "c"))
# Individual games
ind_games_csv <- map(scrape_site, function(x) {
game_id <- x$game_id[1]
# If the data.frame in the list only has one observation it's an error
if (nrow(x) > 1) {
write_csv(x, paste0(scrape_source, "/", game_id, ".csv"))
return(data.frame(game_id, status = "good"))
} else {
return(data.frame(game_id, status = "bad"))
}
})
corrections <- scraped_data %>%
bind_rows(scrape_data) %>%
arrange(scrape_time) %>%
filter(!is.na(scrape_time))
# Enter in the home and away teams plus final scores
corrections <- corrections %>%
mutate(away1 = str_trim(str_remove(game_details, "@.*$")),
away_score = str_extract(away1, "(?<=\\().+?(?=\\))"),
away_team = str_trim(str_remove(away1, "\\(.*\\)")),
home1 = str_trim(str_remove(game_details, ".*@")),
home_score = str_extract(home1, "(?<=\\().+?(?=\\))"),
home_team = str_trim(str_remove(home1, "\\(.*\\)"))) %>%
select(-away1, -home1) %>%
# Put the corrections into a consistent order
arrange(game_id, period, desc(time))
write_csv(corrections, paste0(local_dir, "/scraped_201920.csv"))
write_rds(corrections, paste0(local_dir, "/scraped_201920.rds"))
|
#===========================================================================
# Library
#===========================================================================
library(shiny)
library(dplyr)
library(markdown)
library(data.table)
#===========================================================================
# Data Prepare for selectInput,sliderInput,numericInput
#===========================================================================
path = getwd()
#---- Load Data ----
Azure_ML_train <- fread(file.path(path, "Azure_ML_train.csv")) %>% select(-Survived)
Azure_ML_test <- fread(file.path(path, "Azure_ML_test.csv"))
#---- Bind Data ----
Azure_ML_data <- rbind(Azure_ML_train,Azure_ML_test)
rm(Azure_ML_train,Azure_ML_test)
#---- selectInput for classfication variable ----
Pclass <- sort(unique(Azure_ML_data$PassengerClass))
gender <- sort(unique(Azure_ML_data$Gender))
Embarked <- sort(unique(Azure_ML_data$PortEmbarkation[Azure_ML_data$PortEmbarkation != ""]))
#Azure_ML_data %>% select(PortEmbarkation) %>% distinct(PortEmbarkation) %>% arrange(PortEmbarkation)
#---- sliderInput ----
Fare <- data.frame( max = max(Azure_ML_data$FarePrice ,na.rm =TRUE),
min = min(Azure_ML_data$FarePrice ,na.rm =TRUE)
)
#---- numericInput ----
age <- list( max = floor(max(Azure_ML_data$Age ,na.rm =TRUE)),
min = floor(min(Azure_ML_data$Age ,na.rm =TRUE)))
SibSp <- list( max = max(as.numeric(Azure_ML_data$SiblingSpouse),na.rm =TRUE),
min = min(as.numeric(Azure_ML_data$SiblingSpouse),na.rm =TRUE))
Parch <- list( max = max(as.numeric(Azure_ML_data$ParentChild),na.rm =TRUE),
min = min(as.numeric(Azure_ML_data$ParentChild),na.rm =TRUE)
)
#===========================================================================
# Shiny Layout
#===========================================================================
shinyUI(fluidPage(
titlePanel("Titanic Survival Prediction"),
sidebarLayout(
sidebarPanel(
selectInput("PassengerClass", "PassengerClass : ", choices=Pclass),
selectInput("Gender", "Gender : ", choices=gender),
numericInput("Age", "Age : ", min = age$min, max = age$max, value =age$max, step = 0.5)
),
mainPanel( imageOutput("result_plot") )
)
))
| /ui.R | no_license | Chihengwang/RShinyApp | R | false | false | 2,273 | r | #===========================================================================
# Library
#===========================================================================
library(shiny)
library(dplyr)
library(markdown)
library(data.table)
#===========================================================================
# Data Prepare for selectInput,sliderInput,numericInput
#===========================================================================
path = getwd()
#---- Load Data ----
Azure_ML_train <- fread(file.path(path, "Azure_ML_train.csv")) %>% select(-Survived)
Azure_ML_test <- fread(file.path(path, "Azure_ML_test.csv"))
#---- Bind Data ----
Azure_ML_data <- rbind(Azure_ML_train,Azure_ML_test)
rm(Azure_ML_train,Azure_ML_test)
#---- selectInput for classfication variable ----
Pclass <- sort(unique(Azure_ML_data$PassengerClass))
gender <- sort(unique(Azure_ML_data$Gender))
Embarked <- sort(unique(Azure_ML_data$PortEmbarkation[Azure_ML_data$PortEmbarkation != ""]))
#Azure_ML_data %>% select(PortEmbarkation) %>% distinct(PortEmbarkation) %>% arrange(PortEmbarkation)
#---- sliderInput ----
Fare <- data.frame( max = max(Azure_ML_data$FarePrice ,na.rm =TRUE),
min = min(Azure_ML_data$FarePrice ,na.rm =TRUE)
)
#---- numericInput ----
age <- list( max = floor(max(Azure_ML_data$Age ,na.rm =TRUE)),
min = floor(min(Azure_ML_data$Age ,na.rm =TRUE)))
SibSp <- list( max = max(as.numeric(Azure_ML_data$SiblingSpouse),na.rm =TRUE),
min = min(as.numeric(Azure_ML_data$SiblingSpouse),na.rm =TRUE))
Parch <- list( max = max(as.numeric(Azure_ML_data$ParentChild),na.rm =TRUE),
min = min(as.numeric(Azure_ML_data$ParentChild),na.rm =TRUE)
)
#===========================================================================
# Shiny Layout
#===========================================================================
shinyUI(fluidPage(
titlePanel("Titanic Survival Prediction"),
sidebarLayout(
sidebarPanel(
selectInput("PassengerClass", "PassengerClass : ", choices=Pclass),
selectInput("Gender", "Gender : ", choices=gender),
numericInput("Age", "Age : ", min = age$min, max = age$max, value =age$max, step = 0.5)
),
mainPanel( imageOutput("result_plot") )
)
))
|
# FD with DF
library(fdth)
mdf <- data.frame(X1=rep(LETTERS[1:4], 25), X2=as.factor(rep(1:10, 10)),Y1=c(NA, NA, rnorm(96, 10, 1), NA, NA), Y2=rnorm(100, 60, 4), Y3=rnorm(100, 50, 4), Y4=rnorm(100, 40, 4))
mdf
(tb <- fdt(mdf))
# Histograms
plot(tb, v=TRUE)
plot(tb, col=rainbow(8))
plot(tb, type='fh')
dev.off()
par(mar=c(1,1,1,1))
#graphics.off()
plot(tb, type='rfh')
plot(tb,type='rfph')
plot(tb,
type='cdh')
plot(tb,
type='cfh')
plot(tb,
type='cfph')
# Poligons
plot(tb,
v=TRUE,
type='fp')
plot(tb,
type='rfp')
plot(tb,
type='rfpp')
plot(tb,
type='cdp')
plot(tb,
type='cfp')
plot(tb,
type='cfpp')
| /22-summary/24b-freqdistr4.R | no_license | DUanalytics/rAnalytics | R | false | false | 650 | r | # FD with DF
library(fdth)
mdf <- data.frame(X1=rep(LETTERS[1:4], 25), X2=as.factor(rep(1:10, 10)),Y1=c(NA, NA, rnorm(96, 10, 1), NA, NA), Y2=rnorm(100, 60, 4), Y3=rnorm(100, 50, 4), Y4=rnorm(100, 40, 4))
mdf
(tb <- fdt(mdf))
# Histograms
plot(tb, v=TRUE)
plot(tb, col=rainbow(8))
plot(tb, type='fh')
dev.off()
par(mar=c(1,1,1,1))
#graphics.off()
plot(tb, type='rfh')
plot(tb,type='rfph')
plot(tb,
type='cdh')
plot(tb,
type='cfh')
plot(tb,
type='cfph')
# Poligons
plot(tb,
v=TRUE,
type='fp')
plot(tb,
type='rfp')
plot(tb,
type='rfpp')
plot(tb,
type='cdp')
plot(tb,
type='cfp')
plot(tb,
type='cfpp')
|
library(shiny)
shinyUI(bootstrapPage(fluidPage(
headerPanel("Comparison and Commonality Word Cloud Generator"),
sidebarPanel(
textInput("text1", label = "Enter your 1st text", value = "And we will safeguard America's own security against those who threaten our citizens, our friends, and our interests. Look at Iran. Through the power of our diplomacy, a world that was once divided about how to deal with Iran's nuclear program now stands as one. The regime is more isolated than ever before; its leaders are faced with crippling sanctions, and as long as they shirk their responsibilities, this pressure will not relent. Let there be no doubt: America is determined to prevent Iran from getting a nuclear weapon, and I will take no options off the table to achieve that goal. But a peaceful resolution of this issue is still possible, and far better, and if Iran changes course and meets its obligations, it can rejoin the community of nations.
The renewal of American leadership can be felt across the globe. Our oldest alliances in Europe and Asia are stronger than ever. Our ties to the Americas are deeper. Our iron-clad commitment to Israel's security has meant the closest military cooperation between our two countries in history. We've made it clear that America is a Pacific power, and a new beginning in Burma has lit a new hope. From the coalitions we've built to secure nuclear materials, to the missions we've led against hunger and disease; from the blows we've dealt to our enemies; to the enduring power of our moral example, America is back.
Anyone who tells you otherwise, anyone who tells you that America is in decline or that our influence has waned, doesn't know what they're talking about. That's not the message we get from leaders around the world, all of whom are eager to work with us. That's not how people feel from Tokyo to Berlin; from Cape Town to Rio; where opinions of America are higher than they've been in years. Yes, the world is changing; no, we can't control every event. But America remains the one indispensable nation in world affairs - and as long as I'm President, I intend to keep it that way."),
textInput("text2", label = "Enter your 2nd text", value = "One of my proudest possessions is the flag that the SEAL Team took with them on the mission to get bin Laden. On it are each of their names. Some may be Democrats. Some may be Republicans. But that doesn't matter. Just like it didn't matter that day in the Situation Room, when I sat next to Bob Gates - a man who was George Bush's defense secretary; and Hillary Clinton, a woman who ran against me for president.All that mattered that day was the mission. No one thought about politics. No one thought about themselves. One of the young men involved in the raid later told me that he didn't deserve credit for the mission. It only succeeded, he said, because every single member of that unit did their job - the pilot who landed the helicopter that spun out of control; the translator who kept others from entering the compound; the troops who separated the women and children from the fight; the SEALs who charged up the stairs. More than that, the mission only succeeded because every member of that unit trusted each other - because you can't charge up those stairs, into darkness and danger, unless you know that there's someone behind you, watching your back.
So it is with America. Each time I look at that flag, I'm reminded that our destiny is stitc"),
actionButton("submit", "Generate Cloud!")
),
mainPanel(
strong("Word cloud instructions"),
br(),
p("Wait until the app is fully loaded , then just copy and paste either two songs or speeches or any two text of interest that you would like to compare and also find any common word patterns."),
p("Use the text box 1 to enter your first text and text box 2 to enter your second text (Select all the default text and overwrite with the new text)"),
br(),
p("Then click generate cloud action button (In this case the there is no reactive code implemented so even before you click generate cloud the program might run)"),
tags$ol(
tags$li("The First Graph shows the Comparison Cloud"),
tags$li("The second graph shows the Commonality Cloud")
),
br(),
p("All the code is available on my github:", a("https://github.com/Raj85/Coursera_DevelopingDataProducts"), " where a detailed method used is listed"),
p("Please Ignore the Error msg when you clear either one fo the text boxes", a("Error:invalid 'cex' value")),
plotOutput("cloud",width = "100%", height = "1000px" )
)
))) | /Ui.R | no_license | ybhushan/Coursera_DevelopingDataProducts | R | false | false | 4,753 | r | library(shiny)
shinyUI(bootstrapPage(fluidPage(
headerPanel("Comparison and Commonality Word Cloud Generator"),
sidebarPanel(
textInput("text1", label = "Enter your 1st text", value = "And we will safeguard America's own security against those who threaten our citizens, our friends, and our interests. Look at Iran. Through the power of our diplomacy, a world that was once divided about how to deal with Iran's nuclear program now stands as one. The regime is more isolated than ever before; its leaders are faced with crippling sanctions, and as long as they shirk their responsibilities, this pressure will not relent. Let there be no doubt: America is determined to prevent Iran from getting a nuclear weapon, and I will take no options off the table to achieve that goal. But a peaceful resolution of this issue is still possible, and far better, and if Iran changes course and meets its obligations, it can rejoin the community of nations.
The renewal of American leadership can be felt across the globe. Our oldest alliances in Europe and Asia are stronger than ever. Our ties to the Americas are deeper. Our iron-clad commitment to Israel's security has meant the closest military cooperation between our two countries in history. We've made it clear that America is a Pacific power, and a new beginning in Burma has lit a new hope. From the coalitions we've built to secure nuclear materials, to the missions we've led against hunger and disease; from the blows we've dealt to our enemies; to the enduring power of our moral example, America is back.
Anyone who tells you otherwise, anyone who tells you that America is in decline or that our influence has waned, doesn't know what they're talking about. That's not the message we get from leaders around the world, all of whom are eager to work with us. That's not how people feel from Tokyo to Berlin; from Cape Town to Rio; where opinions of America are higher than they've been in years. Yes, the world is changing; no, we can't control every event. But America remains the one indispensable nation in world affairs - and as long as I'm President, I intend to keep it that way."),
textInput("text2", label = "Enter your 2nd text", value = "One of my proudest possessions is the flag that the SEAL Team took with them on the mission to get bin Laden. On it are each of their names. Some may be Democrats. Some may be Republicans. But that doesn't matter. Just like it didn't matter that day in the Situation Room, when I sat next to Bob Gates - a man who was George Bush's defense secretary; and Hillary Clinton, a woman who ran against me for president.All that mattered that day was the mission. No one thought about politics. No one thought about themselves. One of the young men involved in the raid later told me that he didn't deserve credit for the mission. It only succeeded, he said, because every single member of that unit did their job - the pilot who landed the helicopter that spun out of control; the translator who kept others from entering the compound; the troops who separated the women and children from the fight; the SEALs who charged up the stairs. More than that, the mission only succeeded because every member of that unit trusted each other - because you can't charge up those stairs, into darkness and danger, unless you know that there's someone behind you, watching your back.
So it is with America. Each time I look at that flag, I'm reminded that our destiny is stitc"),
actionButton("submit", "Generate Cloud!")
),
mainPanel(
strong("Word cloud instructions"),
br(),
p("Wait until the app is fully loaded , then just copy and paste either two songs or speeches or any two text of interest that you would like to compare and also find any common word patterns."),
p("Use the text box 1 to enter your first text and text box 2 to enter your second text (Select all the default text and overwrite with the new text)"),
br(),
p("Then click generate cloud action button (In this case the there is no reactive code implemented so even before you click generate cloud the program might run)"),
tags$ol(
tags$li("The First Graph shows the Comparison Cloud"),
tags$li("The second graph shows the Commonality Cloud")
),
br(),
p("All the code is available on my github:", a("https://github.com/Raj85/Coursera_DevelopingDataProducts"), " where a detailed method used is listed"),
p("Please Ignore the Error msg when you clear either one fo the text boxes", a("Error:invalid 'cex' value")),
plotOutput("cloud",width = "100%", height = "1000px" )
)
))) |
context("HP check")
set.seed(123)
load("units.RData")
p1 <- draw_units(price_limit = 50, units_data = units)
p2 <- draw_units(price_limit = 50, units_data = units)
units_game <- draw_hexs(p1, p2)
test_that("check results", {
expect_equal(hp_check(player_id = "ID9", opponent_id = "ID6", units_data = units_game, dist_matrix_data = dist_matrix), 46.585)
expect_equal(hp_check(player_id = "ID9", opponent_id = "ID344", units_data = units_game, dist_matrix_data = dist_matrix), 0)
})
test_that("check errors", {
expect_error(hp_check(player_id = "ID9", opponent_id = "ID1", units_data = units_game, dist_matrix_data = dist_matrix))
expect_error(hp_check(player_id = "ID9", opponent_id = "ID1", units_data = units, dist_matrix_data = dist_matrix))
})
| /tests/testthat/test-hp_check.R | no_license | lwawrowski/combat | R | false | false | 758 | r | context("HP check")
set.seed(123)
load("units.RData")
p1 <- draw_units(price_limit = 50, units_data = units)
p2 <- draw_units(price_limit = 50, units_data = units)
units_game <- draw_hexs(p1, p2)
test_that("check results", {
expect_equal(hp_check(player_id = "ID9", opponent_id = "ID6", units_data = units_game, dist_matrix_data = dist_matrix), 46.585)
expect_equal(hp_check(player_id = "ID9", opponent_id = "ID344", units_data = units_game, dist_matrix_data = dist_matrix), 0)
})
test_that("check errors", {
expect_error(hp_check(player_id = "ID9", opponent_id = "ID1", units_data = units_game, dist_matrix_data = dist_matrix))
expect_error(hp_check(player_id = "ID9", opponent_id = "ID1", units_data = units, dist_matrix_data = dist_matrix))
})
|
#' Filter the tokens based on term frequency
#'
#' `step_textfilter` creates a *specification* of a recipe step that
#' will convert a list of its tokenized parts into a list where the
#' tokens are filtered based on frequency.
#'
#' @param recipe A recipe object. The step will be added to the
#' sequence of operations for this recipe.
#' @param ... One or more selector functions to choose variables.
#' For `step_textfilter`, this indicates the variables to be encoded
#' into a list column. See [recipes::selections()] for more
#' details. For the `tidy` method, these are not currently used.
#' @param role Not used by this step since no new variables are
#' created.
#' @param columns A list of tibble results that define the
#' encoding. This is `NULL` until the step is trained by
#' [recipes::prep.recipe()].
#' @param max.tf An integer. Maximal number of times a word can appear
#' before getting removed.
#' @param min.tf An integer. Minimum number of times a word can appear
#' before getting removed.
#' @param procentage A logical. Should max.tf and min.tf be interpreded
#' as a procentage instead of count.
#' @param max.words An integer. Will only keep the top max.words words
#' after filtering done by max.tf and min.tf.
#' @param res The words that will be keep will be stored here once
#' this preprocessing step has be trained by [prep.recipe()].
#' @param skip A logical. Should the step be skipped when the
#' recipe is baked by [recipes::bake.recipe()]? While all
#' operations are baked when [recipes::prep.recipe()] is run, some
#' operations may not be able to be conducted on new data (e.g.
#' processing the outcome variable(s)). Care should be taken when
#' using `skip = TRUE` as it may affect the computations for
#' subsequent operations
#' @param trained A logical to indicate if the recipe has been
#' baked.
#' @return An updated version of `recipe` with the new step added
#' to the sequence of existing steps (if any).
#' @examples
#' library(recipes)
#'
#' data(okc_text)
#'
#' okc_rec <- recipe(~ ., data = okc_text) %>%
#' step_tokenize(essay0) %>%
#' step_textfilter(essay0, max.words = 10) %>%
#' prep(training = okc_text, retain = TRUE)
#'
#' juice(okc_rec, essay0) %>%
#' slice(1:2)
#'
#' juice(okc_rec) %>%
#' slice(2) %>%
#' pull(essay0)
#' @keywords datagen
#' @concept preprocessing encoding
#' @export
#' @importFrom recipes add_step step terms_select sel2char ellipse_check
#' @importFrom recipes check_type
step_textfilter <-
function(recipe,
...,
role = NA,
trained = FALSE,
columns = NULL,
max.tf = Inf,
min.tf = 0,
procentage = FALSE,
max.words = NULL,
res = NULL,
skip = FALSE
) {
add_step(
recipe,
step_textfilter_new(
terms = ellipse_check(...),
role = role,
trained = trained,
columns = columns,
max.tf = max.tf,
min.tf = min.tf,
procentage = procentage,
max.words = max.words,
res = res,
skip = skip
)
)
}
step_textfilter_new <-
function(terms = NULL,
role = NA,
trained = FALSE,
columns = NULL,
max.tf = NULL,
min.tf = NULL,
procentage = NULL,
max.words = NULL,
res = NULL,
skip = FALSE) {
step(
subclass = "textfilter",
terms = terms,
role = role,
trained = trained,
columns = columns,
max.tf = max.tf,
min.tf = min.tf,
procentage = procentage,
max.words = max.words,
res = res,
skip = skip
)
}
#' @export
prep.step_textfilter <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_list(training[, col_names])
retain_words <- list()
for (i in seq_along(col_names)) {
retain_words[[i]] <- textfilter_fun(training[, col_names[i], drop = TRUE],
x$max.tf, x$min.tf, x$max.words,
x$procentage)
}
step_textfilter_new(
terms = x$terms,
role = x$role,
trained = TRUE,
columns = col_names,
max.tf = x$max.tf,
min.tf = x$min.tf,
procentage = x$procentage,
max.words = x$max.words,
res = retain_words,
skip = x$skip
)
}
#' @export
#' @importFrom tibble as_tibble tibble
#' @importFrom recipes bake prep
#' @importFrom purrr map
bake.step_textfilter <- function(object, newdata, ...) {
col_names <- object$columns
# for backward compat
for (i in seq_along(col_names)) {
newdata[, col_names[i]] <-
word_tbl_filter(newdata[, col_names[i], drop = TRUE],
object$res[[i]],
TRUE)
}
newdata <- factor_to_text(newdata, col_names)
as_tibble(newdata)
}
textfilter_fun <- function(data, max_tf, min_tf, max_features, procentage) {
tf <- table(unlist(data))
if(procentage)
tf <- tf / sum(tf)
ids <- tf < max_tf & tf > min_tf
if(is.null(max_features)) {
names(sort(tf[ids], decreasing = TRUE))
} else {
names(sort(tf[ids], decreasing = TRUE)[seq_len(max_features)])
}
}
| /R/textfilter.R | permissive | NanaAkwasiAbayieBoateng/textrecipes | R | false | false | 5,262 | r | #' Filter the tokens based on term frequency
#'
#' `step_textfilter` creates a *specification* of a recipe step that
#' will convert a list of its tokenized parts into a list where the
#' tokens are filtered based on frequency.
#'
#' @param recipe A recipe object. The step will be added to the
#' sequence of operations for this recipe.
#' @param ... One or more selector functions to choose variables.
#' For `step_textfilter`, this indicates the variables to be encoded
#' into a list column. See [recipes::selections()] for more
#' details. For the `tidy` method, these are not currently used.
#' @param role Not used by this step since no new variables are
#' created.
#' @param columns A list of tibble results that define the
#' encoding. This is `NULL` until the step is trained by
#' [recipes::prep.recipe()].
#' @param max.tf An integer. Maximal number of times a word can appear
#' before getting removed.
#' @param min.tf An integer. Minimum number of times a word can appear
#' before getting removed.
#' @param procentage A logical. Should max.tf and min.tf be interpreded
#' as a procentage instead of count.
#' @param max.words An integer. Will only keep the top max.words words
#' after filtering done by max.tf and min.tf.
#' @param res The words that will be keep will be stored here once
#' this preprocessing step has be trained by [prep.recipe()].
#' @param skip A logical. Should the step be skipped when the
#' recipe is baked by [recipes::bake.recipe()]? While all
#' operations are baked when [recipes::prep.recipe()] is run, some
#' operations may not be able to be conducted on new data (e.g.
#' processing the outcome variable(s)). Care should be taken when
#' using `skip = TRUE` as it may affect the computations for
#' subsequent operations
#' @param trained A logical to indicate if the recipe has been
#' baked.
#' @return An updated version of `recipe` with the new step added
#' to the sequence of existing steps (if any).
#' @examples
#' library(recipes)
#'
#' data(okc_text)
#'
#' okc_rec <- recipe(~ ., data = okc_text) %>%
#' step_tokenize(essay0) %>%
#' step_textfilter(essay0, max.words = 10) %>%
#' prep(training = okc_text, retain = TRUE)
#'
#' juice(okc_rec, essay0) %>%
#' slice(1:2)
#'
#' juice(okc_rec) %>%
#' slice(2) %>%
#' pull(essay0)
#' @keywords datagen
#' @concept preprocessing encoding
#' @export
#' @importFrom recipes add_step step terms_select sel2char ellipse_check
#' @importFrom recipes check_type
step_textfilter <-
function(recipe,
...,
role = NA,
trained = FALSE,
columns = NULL,
max.tf = Inf,
min.tf = 0,
procentage = FALSE,
max.words = NULL,
res = NULL,
skip = FALSE
) {
add_step(
recipe,
step_textfilter_new(
terms = ellipse_check(...),
role = role,
trained = trained,
columns = columns,
max.tf = max.tf,
min.tf = min.tf,
procentage = procentage,
max.words = max.words,
res = res,
skip = skip
)
)
}
step_textfilter_new <-
function(terms = NULL,
role = NA,
trained = FALSE,
columns = NULL,
max.tf = NULL,
min.tf = NULL,
procentage = NULL,
max.words = NULL,
res = NULL,
skip = FALSE) {
step(
subclass = "textfilter",
terms = terms,
role = role,
trained = trained,
columns = columns,
max.tf = max.tf,
min.tf = min.tf,
procentage = procentage,
max.words = max.words,
res = res,
skip = skip
)
}
#' @export
prep.step_textfilter <- function(x, training, info = NULL, ...) {
col_names <- terms_select(x$terms, info = info)
check_list(training[, col_names])
retain_words <- list()
for (i in seq_along(col_names)) {
retain_words[[i]] <- textfilter_fun(training[, col_names[i], drop = TRUE],
x$max.tf, x$min.tf, x$max.words,
x$procentage)
}
step_textfilter_new(
terms = x$terms,
role = x$role,
trained = TRUE,
columns = col_names,
max.tf = x$max.tf,
min.tf = x$min.tf,
procentage = x$procentage,
max.words = x$max.words,
res = retain_words,
skip = x$skip
)
}
#' @export
#' @importFrom tibble as_tibble tibble
#' @importFrom recipes bake prep
#' @importFrom purrr map
bake.step_textfilter <- function(object, newdata, ...) {
col_names <- object$columns
# for backward compat
for (i in seq_along(col_names)) {
newdata[, col_names[i]] <-
word_tbl_filter(newdata[, col_names[i], drop = TRUE],
object$res[[i]],
TRUE)
}
newdata <- factor_to_text(newdata, col_names)
as_tibble(newdata)
}
textfilter_fun <- function(data, max_tf, min_tf, max_features, procentage) {
tf <- table(unlist(data))
if(procentage)
tf <- tf / sum(tf)
ids <- tf < max_tf & tf > min_tf
if(is.null(max_features)) {
names(sort(tf[ids], decreasing = TRUE))
} else {
names(sort(tf[ids], decreasing = TRUE)[seq_len(max_features)])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{listContexts}
\alias{listContexts}
\title{Available OpenCL Contexts}
\usage{
listContexts()
}
\value{
data.frame containing the following fields
\item{context}{Integer identifying context}
\item{platform}{Character string listing OpenCL platform}
\item{platform_index}{Integer identifying platform}
\item{device}{Character string listing device name}
\item{device_index}{Integer identifying device}
\item{device_type}{Character string labeling device (e.g. gpu)}
}
\description{
Provide a data.frame of available OpenCL contexts and
associated information.
}
| /man/listContexts.Rd | no_license | nnQuynh/gpuR | R | false | true | 663 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{listContexts}
\alias{listContexts}
\title{Available OpenCL Contexts}
\usage{
listContexts()
}
\value{
data.frame containing the following fields
\item{context}{Integer identifying context}
\item{platform}{Character string listing OpenCL platform}
\item{platform_index}{Integer identifying platform}
\item{device}{Character string listing device name}
\item{device_index}{Integer identifying device}
\item{device_type}{Character string labeling device (e.g. gpu)}
}
\description{
Provide a data.frame of available OpenCL contexts and
associated information.
}
|
#======================================================================================================
# Merging With Fantastic
#--------------------------------------------------
# Point of this script is to
# 1. Take Survey Data and Create DV measure
# 2. Merge FANTASTIC measures to Personal data
# 3. Build models to predict behiavioral measures
#--------------------------------------------------
library(tidyverse)
library(GGally)
library(psych)
library(corrr)
library(cowplot)
options(scipen = 999)
#--------------------------------------------------
# Data Import
dictation_survey <- read_csv("aural_survey/Dictation_Survey_Responses.csv")
fantastic_computations <- read_csv("corpus/symbolic/Melosol_Features.csv")
#--------------------------------------------------
# Swap Out Transposed Melodies Names
# Berkowitz334
# Berkowitz382
# Berkowitz417
# Berkowitz607
accomodate_rename <- function(x){
x[x=="Berkowitz334"] <- "Berkowitz334t"
x[x=="Berkowitz382"] <- "Berkowitz382t"
x[x=="Berkowitz417"] <- "Berkowitz417t"
x[x=="Berkowitz607"] <- "Berkowitz607t"
x
}
fantastic_computations$file.id <- accomodate_rename(fantastic_computations$file.id)
#--------------------------------------------------
# Change Fantastic Name for Merge
fantastic_computations %>%
rename(stimulus = file.id) -> fantastic_computations
dictation_survey %>%
select(stimulus, Difficulty_2nd_Year, Grammar) %>%
group_by(stimulus) %>%
mutate(mean_diff = mean(Difficulty_2nd_Year),
mean_gram = mean(Grammar)) %>%
select(stimulus, starts_with("mean")) %>%
unique() -> target
#--------------------------------------------------
# Merge Data
# Variables Wanted: All Fantastic,
#remove_transpose <- function(x){
# x %>%
# str_remove_all(pattern = "t$")
#}
# dictation_survey$stimulus <- remove_transpose(dictation_survey$stimulus)
target %>%
left_join(fantastic_computations) -> melody_data
#======================================================================================================
# Export Data For Correlation Table
#--------------------------------------------------
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
arrange(-mean_diff)
#--------------------------------------------------
# FILE FOF DISSERATION
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
# mutate(strength = abs(mean_diff) + abs(mean_gram)) %>%
gather(mean_diff, mean_gram, -rowname) %>%
rename(Feature = rowname, GroundTruth = mean_diff, corr = mean_gram) %>%
filter(Feature != "mean_gram") %>%
filter(Feature != "mean_diff") -> fantplotdata
fix_gt <- function(x){
x[x=="mean_diff"] <- "Mean Difficulty"
x[x=="mean_gram"] <- "Mean Grammar"
x
}
fantplotdata$GroundTruth <- fix_gt(fantplotdata$GroundTruth)
fantplotdata %>%
ggplot(aes(x = reorder(Feature, corr), y = corr, group = GroundTruth)) +
coord_flip() +
scale_fill_viridis(discrete = TRUE) +
geom_bar(stat = "identity", aes(fill = GroundTruth)) +
labs(title = "Correlations Between FANTASTIC Features and Expert Ratings",
x = "FANTASTIC Features",
y = "r",
color = "Ground Truth") +
theme_minimal() -> fantastic_expert_plot
fantastic_expert_plot
# ggsave(filename = "document/img/FantasticExpertPlot.png", plot = fantastic_expert_plot)
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
arrange(-mean_diff, mean_gram) %>%
filter(rowname != "mean_diff" & rowname != "mean_gram") %>%
head(n = 5) -> feature_head
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
arrange(-mean_diff, mean_gram) %>%
filter(rowname != "mean_diff" & rowname != "mean_gram") %>%
tail(n = 5) -> feature_tail
rbind(feature_head, feature_tail) %>%
mutate(Feature = rowname, Difficulty = mean_diff, Grammar = mean_gram) %>%
select(Feature, Difficulty, Grammar) %>%
knitr::kable(digits = 2) -> strong_features
strong_features
# write_rds(strong_features, path = "document/img/strongfeatures.rds")
#======================================================================================================
# Show Collinearity of Feature Items
fantastic_computations %>%
select(-stimulus, h.contour, starts_with("int")) %>%
select(i.abs.std, i.abs.mean, step.cont.loc.var, i.entropy, p.entropy,
d.median, d.eq.trans, mean.Yules.K, tonalness, mean.Simpsons.D, mode) %>%
mutate(mode = as.factor(mode)) %>%
ggpairs(title = "Feature Correlations") -> fantastic_collin
fantastic_collin
# ggsave(filename = "document/img/FANTASTIC_collin.png", plot = fantastic_collin)
#--------------------------------------------------
# Plot Melody Against Various Features
View(melody_data)
# Good Ones
ggplot(melody_data, aes(x = p.entropy, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Pitch Entropy", x = "Pitch Entropy", y = "Mean Difficulty") +
geom_smooth(method = 'lm', se = FALSE) +
ylim(c(0,100)) +
stat_cor(method = "pearson") +
theme_minimal() -> cow_pentropy
ggplot(melody_data, aes(x = tonalness, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Tonalness", x = "Tonalness", y = "") +
ylim(c(0,100)) +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
theme_minimal() -> cow_tonalness
ggplot(melody_data, aes(x = step.cont.loc.var, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Stepwise Contour\nLocal Variation", x = "Stepwise Contour", y = "") +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
theme_minimal() -> cow_stepcontlocalvar
ggplot(melody_data, aes(x = len, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Melody Length", x = "Melody Length", y = "") +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
theme_minimal() -> cow_len
# Bad Ones
ggplot(melody_data, aes(x = tonal.spike, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Tonal Spike", x = "Tonal Spike", y = "Mean Difficulty") +
geom_smooth(method = 'lm', se = FALSE) +
ylim(c(0,100)) +
stat_cor(method = "pearson") +
theme_minimal() -> cow_tonalspike
ggplot(melody_data, aes(x = step.cont.glob.dir , y = mean_diff)) +
geom_point() + theme_minimal() +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
labs(title = "Stepwise Contour\nGlobal Direction", x = "Stepwise Contour: Global Direction", y = "") +
theme_minimal() -> cow_stpcontglobdir
ggplot(melody_data, aes(x = mean.entropy, y = mean_diff)) +
geom_point() + theme_minimal() +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
labs(title = "Mean Pitch Entropy", x = "Mean Pitch Entropy", y = "") +
theme_minimal() -> cow_meanentropy
ggplot(melody_data, aes(x = d.range, y = mean_diff)) +
geom_point() + theme_minimal() +
geom_smooth(method = 'lm', se = FALSE) +
ylim(c(0,100)) +
stat_cor(method = "pearson") +
labs(title = "Durational Range", x = "Durational Range", y = "") +
theme_minimal() -> cow_drange
#--------------------------------------------------
plot_grid(cow_pentropy, cow_tonalness, cow_stepcontlocalvar, cow_len,
cow_tonalspike, cow_stpcontglobdir, cow_meanentropy, cow_drange, nrow = 2, ncol = 4) -> univariate_features
univariate_features
# ggsave(filename = "document/img/univariate_cow.png")
#--------------------------------------------------
# Regression
model_feat1 <- lm(mean_diff ~ p.entropy + len + tonalness + step.cont.loc.var, data = melody_data)
model_feat2 <-lm(mean_diff ~ p.entropy + len, data = melody_data)
model_feat3 <- lm(mean_diff ~ p.entropy, data = melody_data)
summary(model_feat1)
summary(model_feat2)
summary(model_feat3)
#--------------------------------------------------
melody_data %>%
select(stimulus,mean_diff, mean_gram, p.range,p.entropy,len, note.dens,tonalness) %>%
pairs.panels()
model_dumb <- lm(mean_diff ~ p.entropy + len + tonalness + step.cont.loc.var, data = melody_data)
summary(model_dumb)
| /analyses/musical_features/predict_survey.R | no_license | davidjohnbaker1/mmd | R | false | false | 8,469 | r | #======================================================================================================
# Merging With Fantastic
#--------------------------------------------------
# Point of this script is to
# 1. Take Survey Data and Create DV measure
# 2. Merge FANTASTIC measures to Personal data
# 3. Build models to predict behiavioral measures
#--------------------------------------------------
library(tidyverse)
library(GGally)
library(psych)
library(corrr)
library(cowplot)
options(scipen = 999)
#--------------------------------------------------
# Data Import
dictation_survey <- read_csv("aural_survey/Dictation_Survey_Responses.csv")
fantastic_computations <- read_csv("corpus/symbolic/Melosol_Features.csv")
#--------------------------------------------------
# Swap Out Transposed Melodies Names
# Berkowitz334
# Berkowitz382
# Berkowitz417
# Berkowitz607
accomodate_rename <- function(x){
x[x=="Berkowitz334"] <- "Berkowitz334t"
x[x=="Berkowitz382"] <- "Berkowitz382t"
x[x=="Berkowitz417"] <- "Berkowitz417t"
x[x=="Berkowitz607"] <- "Berkowitz607t"
x
}
fantastic_computations$file.id <- accomodate_rename(fantastic_computations$file.id)
#--------------------------------------------------
# Change Fantastic Name for Merge
fantastic_computations %>%
rename(stimulus = file.id) -> fantastic_computations
dictation_survey %>%
select(stimulus, Difficulty_2nd_Year, Grammar) %>%
group_by(stimulus) %>%
mutate(mean_diff = mean(Difficulty_2nd_Year),
mean_gram = mean(Grammar)) %>%
select(stimulus, starts_with("mean")) %>%
unique() -> target
#--------------------------------------------------
# Merge Data
# Variables Wanted: All Fantastic,
#remove_transpose <- function(x){
# x %>%
# str_remove_all(pattern = "t$")
#}
# dictation_survey$stimulus <- remove_transpose(dictation_survey$stimulus)
target %>%
left_join(fantastic_computations) -> melody_data
#======================================================================================================
# Export Data For Correlation Table
#--------------------------------------------------
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
arrange(-mean_diff)
#--------------------------------------------------
# FILE FOF DISSERATION
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
# mutate(strength = abs(mean_diff) + abs(mean_gram)) %>%
gather(mean_diff, mean_gram, -rowname) %>%
rename(Feature = rowname, GroundTruth = mean_diff, corr = mean_gram) %>%
filter(Feature != "mean_gram") %>%
filter(Feature != "mean_diff") -> fantplotdata
fix_gt <- function(x){
x[x=="mean_diff"] <- "Mean Difficulty"
x[x=="mean_gram"] <- "Mean Grammar"
x
}
fantplotdata$GroundTruth <- fix_gt(fantplotdata$GroundTruth)
fantplotdata %>%
ggplot(aes(x = reorder(Feature, corr), y = corr, group = GroundTruth)) +
coord_flip() +
scale_fill_viridis(discrete = TRUE) +
geom_bar(stat = "identity", aes(fill = GroundTruth)) +
labs(title = "Correlations Between FANTASTIC Features and Expert Ratings",
x = "FANTASTIC Features",
y = "r",
color = "Ground Truth") +
theme_minimal() -> fantastic_expert_plot
fantastic_expert_plot
# ggsave(filename = "document/img/FantasticExpertPlot.png", plot = fantastic_expert_plot)
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
arrange(-mean_diff, mean_gram) %>%
filter(rowname != "mean_diff" & rowname != "mean_gram") %>%
head(n = 5) -> feature_head
melody_data %>%
ungroup(stimulus) %>%
select(mean_diff:step.cont.loc.var) %>%
correlate() %>%
shave() %>%
select(rowname, mean_diff, mean_gram) %>%
arrange(-mean_diff, mean_gram) %>%
filter(rowname != "mean_diff" & rowname != "mean_gram") %>%
tail(n = 5) -> feature_tail
rbind(feature_head, feature_tail) %>%
mutate(Feature = rowname, Difficulty = mean_diff, Grammar = mean_gram) %>%
select(Feature, Difficulty, Grammar) %>%
knitr::kable(digits = 2) -> strong_features
strong_features
# write_rds(strong_features, path = "document/img/strongfeatures.rds")
#======================================================================================================
# Show Collinearity of Feature Items
fantastic_computations %>%
select(-stimulus, h.contour, starts_with("int")) %>%
select(i.abs.std, i.abs.mean, step.cont.loc.var, i.entropy, p.entropy,
d.median, d.eq.trans, mean.Yules.K, tonalness, mean.Simpsons.D, mode) %>%
mutate(mode = as.factor(mode)) %>%
ggpairs(title = "Feature Correlations") -> fantastic_collin
fantastic_collin
# ggsave(filename = "document/img/FANTASTIC_collin.png", plot = fantastic_collin)
#--------------------------------------------------
# Plot Melody Against Various Features
View(melody_data)
# Good Ones
ggplot(melody_data, aes(x = p.entropy, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Pitch Entropy", x = "Pitch Entropy", y = "Mean Difficulty") +
geom_smooth(method = 'lm', se = FALSE) +
ylim(c(0,100)) +
stat_cor(method = "pearson") +
theme_minimal() -> cow_pentropy
ggplot(melody_data, aes(x = tonalness, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Tonalness", x = "Tonalness", y = "") +
ylim(c(0,100)) +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
theme_minimal() -> cow_tonalness
ggplot(melody_data, aes(x = step.cont.loc.var, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Stepwise Contour\nLocal Variation", x = "Stepwise Contour", y = "") +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
theme_minimal() -> cow_stepcontlocalvar
ggplot(melody_data, aes(x = len, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Melody Length", x = "Melody Length", y = "") +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
theme_minimal() -> cow_len
# Bad Ones
ggplot(melody_data, aes(x = tonal.spike, y = mean_diff)) +
geom_point() + theme_minimal() +
labs(title = "Tonal Spike", x = "Tonal Spike", y = "Mean Difficulty") +
geom_smooth(method = 'lm', se = FALSE) +
ylim(c(0,100)) +
stat_cor(method = "pearson") +
theme_minimal() -> cow_tonalspike
ggplot(melody_data, aes(x = step.cont.glob.dir , y = mean_diff)) +
geom_point() + theme_minimal() +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
labs(title = "Stepwise Contour\nGlobal Direction", x = "Stepwise Contour: Global Direction", y = "") +
theme_minimal() -> cow_stpcontglobdir
ggplot(melody_data, aes(x = mean.entropy, y = mean_diff)) +
geom_point() + theme_minimal() +
geom_smooth(method = 'lm', se = FALSE) +
stat_cor(method = "pearson") +
ylim(c(0,100)) +
labs(title = "Mean Pitch Entropy", x = "Mean Pitch Entropy", y = "") +
theme_minimal() -> cow_meanentropy
ggplot(melody_data, aes(x = d.range, y = mean_diff)) +
geom_point() + theme_minimal() +
geom_smooth(method = 'lm', se = FALSE) +
ylim(c(0,100)) +
stat_cor(method = "pearson") +
labs(title = "Durational Range", x = "Durational Range", y = "") +
theme_minimal() -> cow_drange
#--------------------------------------------------
plot_grid(cow_pentropy, cow_tonalness, cow_stepcontlocalvar, cow_len,
cow_tonalspike, cow_stpcontglobdir, cow_meanentropy, cow_drange, nrow = 2, ncol = 4) -> univariate_features
univariate_features
# ggsave(filename = "document/img/univariate_cow.png")
#--------------------------------------------------
# Regression
model_feat1 <- lm(mean_diff ~ p.entropy + len + tonalness + step.cont.loc.var, data = melody_data)
model_feat2 <-lm(mean_diff ~ p.entropy + len, data = melody_data)
model_feat3 <- lm(mean_diff ~ p.entropy, data = melody_data)
summary(model_feat1)
summary(model_feat2)
summary(model_feat3)
#--------------------------------------------------
melody_data %>%
select(stimulus,mean_diff, mean_gram, p.range,p.entropy,len, note.dens,tonalness) %>%
pairs.panels()
model_dumb <- lm(mean_diff ~ p.entropy + len + tonalness + step.cont.loc.var, data = melody_data)
summary(model_dumb)
|
setwd("../National Emissions Inventory - PM2.5 Emissions/")
getwd()
## Library
library(dplyr)
#library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## See data
head(NEI)
head(SCC)
########################### Question 2 ###########################
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510")
#from 1999 to 2008? Use the base plotting system to make a plot answering this question.
fips24510 <- subset(NEI, fips == "24510", select = c(Emissions, year))
fips24510_sum <- aggregate(fips24510["Emissions"], by=fips24510["year"], sum)
png("Plot2.png", width=480, height=480)
plot(fips24510_sum$year, fips24510_sum$Emissions, type = 'l', xlab = 'Years', ylab = 'PM2.5 Emissions')
title('Total emissions from PM2.5 over the years in Baltimore City')
dev.off()
#R: The emissions decrease from 1999 to 2002, then increase from 2002i intil 2005 them goes down
| /scripts/Plot2.R | no_license | jungoncalves/National-Emissions-Inventory---PM2.5-Emissions | R | false | false | 969 | r | setwd("../National Emissions Inventory - PM2.5 Emissions/")
getwd()
## Library
library(dplyr)
#library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## See data
head(NEI)
head(SCC)
########################### Question 2 ###########################
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510")
#from 1999 to 2008? Use the base plotting system to make a plot answering this question.
fips24510 <- subset(NEI, fips == "24510", select = c(Emissions, year))
fips24510_sum <- aggregate(fips24510["Emissions"], by=fips24510["year"], sum)
png("Plot2.png", width=480, height=480)
plot(fips24510_sum$year, fips24510_sum$Emissions, type = 'l', xlab = 'Years', ylab = 'PM2.5 Emissions')
title('Total emissions from PM2.5 over the years in Baltimore City')
dev.off()
#R: The emissions decrease from 1999 to 2002, then increase from 2002i intil 2005 them goes down
|
\name{findDFC}
\alias{findDFC}
\title{
Finds the double first cousins in a pedigree}
\description{
Given a pedigree, all pairs of individuals that are double first cousins are returned.}
\usage{
findDFC(pedigree, exact = FALSE, parallel = FALSE,
ncores = getOption("mc.cores", 2L))
}
\arguments{
\item{pedigree }{
A pedigree with columns organized: ID, Dam, Sire
}
\item{exact }{
A logical statement indicating if individuals who are exactly double first cousins are to be identified
}
\item{parallel }{
A logical statement indicating if parallelization should be attempted. Note, only reliable for Mac and Linux operating systems.
}
\item{ncores }{
Number of cpus to use, default is maximum available
}
}
\details{
When exact = TRUE, only those individuals whose grandparents are completely unrelated will be identified as double first cousins. When exact = FALSE, as long as the parents of individuals i and j are two sets of siblings (i.e., either sires full brothers/dams full sisters or two pairs of opposite sex full sibs) then i and j will be considered double first cousins. In the event where the grandparents of i and j are also related, exact = FALSE will still consider i and j full sibs, even though genetically they will be more related than exact = TRUE double first cousins.
\code{parallel} = TRUE should only be used on Linux or Mac OSes (i.e., not Windows).
}
\value{
\item{PedPositionList }{gives the list of row numbers for all the pairs of indidivuals that are related as double first cousins
}
\item{DFC }{gives the list of IDs, as characters, for all the pairs of individuals that are related as double first cousins
}
\item{FamilyCnt }{If two individuals, i and j, are double first cousins, then i's siblings will also be double first cousins with j's siblings. Therefore, this is the total number of family pairs where offspring are related as double first cousins.
}
}
\author{\email{matthewwolak@gmail.com}
}
| /man/findDFC.Rd | no_license | luansheng/nadiv | R | false | false | 1,961 | rd | \name{findDFC}
\alias{findDFC}
\title{
Finds the double first cousins in a pedigree}
\description{
Given a pedigree, all pairs of individuals that are double first cousins are returned.}
\usage{
findDFC(pedigree, exact = FALSE, parallel = FALSE,
ncores = getOption("mc.cores", 2L))
}
\arguments{
\item{pedigree }{
A pedigree with columns organized: ID, Dam, Sire
}
\item{exact }{
A logical statement indicating if individuals who are exactly double first cousins are to be identified
}
\item{parallel }{
A logical statement indicating if parallelization should be attempted. Note, only reliable for Mac and Linux operating systems.
}
\item{ncores }{
Number of cpus to use, default is maximum available
}
}
\details{
When exact = TRUE, only those individuals whose grandparents are completely unrelated will be identified as double first cousins. When exact = FALSE, as long as the parents of individuals i and j are two sets of siblings (i.e., either sires full brothers/dams full sisters or two pairs of opposite sex full sibs) then i and j will be considered double first cousins. In the event where the grandparents of i and j are also related, exact = FALSE will still consider i and j full sibs, even though genetically they will be more related than exact = TRUE double first cousins.
\code{parallel} = TRUE should only be used on Linux or Mac OSes (i.e., not Windows).
}
\value{
\item{PedPositionList }{gives the list of row numbers for all the pairs of indidivuals that are related as double first cousins
}
\item{DFC }{gives the list of IDs, as characters, for all the pairs of individuals that are related as double first cousins
}
\item{FamilyCnt }{If two individuals, i and j, are double first cousins, then i's siblings will also be double first cousins with j's siblings. Therefore, this is the total number of family pairs where offspring are related as double first cousins.
}
}
\author{\email{matthewwolak@gmail.com}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config.R
\name{config}
\alias{config}
\alias{firebase_config}
\title{Config}
\usage{
firebase_config(
api_key,
project_id,
auth_domain = NULL,
storage_bucket = NULL,
app_id = NULL,
database_url = NULL,
overwrite = FALSE
)
}
\arguments{
\item{api_key}{API key of your project.}
\item{project_id}{Id of your web project.}
\item{auth_domain}{Authentication domain, if \code{NULL}
attempts to build firebase's default domain.}
\item{storage_bucket}{URl to the bucket. if \code{NULL}
attempts to build firebase's default storage domain.}
\item{app_id}{Application ID, necessary for Analytics.}
\item{database_url}{URL to the database, required to use
the \code{RealtimeDatabase}.}
\item{overwrite}{Whether to overwrite any existing configuration file.}
}
\value{
Path to file.
}
\description{
Configure Firebase, either using a config file or by setting
environment variables (see section below).
}
\details{
Creates the configuration file necessary to running fireblaze.
Note that if you changed the project you must use said ID
here, not the one originally created by Google.
Classes of the package look first for the configuration file
then, if not found look for the environment variables.
}
\note{
Do not share this file with anyone.
}
\section{Environment Variables}{
\itemize{
\item \code{FIREBASE_API_KEY}
\item \code{FIREBASE_PROJECT_ID}
\item \code{FIREBASE_AUTH_DOMAIN}
\item \code{FIREBASE_STORAGE_BUCKET}
\item \code{FIREBASE_APP_ID}
\item \code{FIREBASE_DATABASE_URL}
}
}
\examples{
\dontrun{firebase_config("xXxxx", "my-project")}
}
| /man/config.Rd | no_license | cran/firebase | R | false | true | 1,645 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config.R
\name{config}
\alias{config}
\alias{firebase_config}
\title{Config}
\usage{
firebase_config(
api_key,
project_id,
auth_domain = NULL,
storage_bucket = NULL,
app_id = NULL,
database_url = NULL,
overwrite = FALSE
)
}
\arguments{
\item{api_key}{API key of your project.}
\item{project_id}{Id of your web project.}
\item{auth_domain}{Authentication domain, if \code{NULL}
attempts to build firebase's default domain.}
\item{storage_bucket}{URl to the bucket. if \code{NULL}
attempts to build firebase's default storage domain.}
\item{app_id}{Application ID, necessary for Analytics.}
\item{database_url}{URL to the database, required to use
the \code{RealtimeDatabase}.}
\item{overwrite}{Whether to overwrite any existing configuration file.}
}
\value{
Path to file.
}
\description{
Configure Firebase, either using a config file or by setting
environment variables (see section below).
}
\details{
Creates the configuration file necessary to running fireblaze.
Note that if you changed the project you must use said ID
here, not the one originally created by Google.
Classes of the package look first for the configuration file
then, if not found look for the environment variables.
}
\note{
Do not share this file with anyone.
}
\section{Environment Variables}{
\itemize{
\item \code{FIREBASE_API_KEY}
\item \code{FIREBASE_PROJECT_ID}
\item \code{FIREBASE_AUTH_DOMAIN}
\item \code{FIREBASE_STORAGE_BUCKET}
\item \code{FIREBASE_APP_ID}
\item \code{FIREBASE_DATABASE_URL}
}
}
\examples{
\dontrun{firebase_config("xXxxx", "my-project")}
}
|
#' Subset cells/rows/columns/geometries using their positions
#'
#' @description
#'
#' `slice()` lets you index cells/rows/columns/geometries by their (integer)
#' locations. It allows you to select, remove, and duplicate those dimensions
#' of a Spat* object.
#'
#' **If you want to slice your SpatRaster by geographic coordinates** use
#' [filter.SpatRaster()] method.
#'
#' It is accompanied by a number of helpers for common use cases:
#'
#' - `slice_head()` and `slice_tail()` select the first or last
#' cells/geometries.
#'
#' - `slice_sample()` randomly selects cells/geometries.
#'
#' - `slice_rows()` and `slice_cols()` allow to subset entire rows or columns,
#' of a SpatRaster.
#'
#' - `slice_colrows()` subsets regions of the raster by row and column position
#' of a SpatRaster.
#'
#' You can get a skeleton of your SpatRaster with the cell, column and row
#' index with [as_coordinates()].
#'
#' See **Methods** for details.
#'
#' @export
#' @rdname slice.Spat
#' @name slice.Spat
#'
#' @seealso
#' [dplyr::slice()], [terra::spatSample()].
#'
#' You can get a skeleton of your SpatRaster with the cell, column and row
#' index with [as_coordinates()].
#'
#' If you want to slice by geographic coordinates use [filter.SpatRaster()].
#'
#' @family single table verbs
#' @family dplyr.rows
#' @family dplyr.methods
#'
#' @return A Spat* object of the same class than `.data`. See **Methods**.
#'
#' @importFrom dplyr slice
#'
#' @inheritParams mutate.Spat
#'
#' @param .preserve Ignored for Spat* objects
#' @param .keep_extent Should the extent of the resulting SpatRaster be kept?
#' See also [terra::trim()], [terra::extend()].
#' @param ... [`data-masking`][dplyr::slice] Integer row values. Provide
#' either positive values to keep, or negative values to drop.
#'
#' The values provided must be either all positive or all negative. Indices
#' beyond the number of rows in the input are silently ignored.
#' See **Methods**.
#' @param cols,rows Integer col/row values of the SpatRaster
#' @param inverse If `TRUE`, `.data` is inverse-masked to the given selection.
#' See [terra::mask()].
#' @param na.rm Logical, should cells that present a value of `NA` removed when
#' computing `slice_min()/slice_max()`?. The default is `TRUE`.
#' @inheritParams dplyr::slice
#'
#' @section terra equivalent:
#'
#' [terra::subset()], [terra::spatSample()]
#'
#' @section Methods:
#'
#' Implementation of the **generic** [dplyr::slice()] function.
#'
#' ## SpatRaster
#'
#' The result is a SpatRaster with the crs and resolution of the input and
#' where cell values of the selected cells/columns/rows are preserved.
#'
#' Use `.keep_extent = TRUE` to preserve the extent of `.data` on the output.
#' The non-selected cells would present a value of `NA`.
#'
#' ## SpatVector
#'
#' The result is a SpatVector where the attributes of the selected
#' geometries are preserved. If `.data` is a
#' [grouped SpatVector][is_grouped_spatvector], the operation will be performed
#' on each group, so that (e.g.) `slice_head(df, n = 5)` will select the first
#' five rows in each group.
#'
#' @examples
#'
#'
#' library(terra)
#'
#' f <- system.file("extdata/cyl_temp.tif", package = "tidyterra")
#' r <- rast(f)
#'
#' # Slice first 100 cells
#' r %>%
#' slice(1:100) %>%
#' plot()
#'
#' # Rows
#' r %>%
#' slice_rows(1:30) %>%
#' plot()
#'
#' # Cols
#' r %>%
#' slice_cols(-(20:50)) %>%
#' plot()
#'
#' # Spatial sample
#' r %>%
#' slice_sample(prop = 0.2) %>%
#' plot()
#'
#'
#' # Slice regions
#' r %>%
#' slice_colrows(
#' cols = c(20:40, 60:80),
#' rows = -c(1:20, 30:50)
#' ) %>%
#' plot()
#'
#' # Group wise operation with SpatVectors--------------------------------------
#' v <- terra::vect(system.file("ex/lux.shp", package = "terra"))
#'
#' \donttest{
#' glimpse(v) %>% autoplot(aes(fill = NAME_1))
#'
#' gv <- v %>% group_by(NAME_1)
#' # All slice helpers operate per group, silently truncating to the group size
#' gv %>%
#' slice_head(n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' gv %>%
#' slice_tail(n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' gv %>%
#' slice_min(AREA, n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' gv %>%
#' slice_max(AREA, n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' }
slice.SpatRaster <- function(.data, ..., .preserve = FALSE,
.keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
sliced <- dplyr::slice(skeleton, ...)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice.SpatVector <- function(.data, ..., .preserve = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice(tbl, ..., .preserve = .preserve)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_head
slice_head.SpatRaster <- function(.data, ..., n, prop, .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
sliced <- dplyr::slice_head(skeleton, ..., n = n, prop = prop)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
newrast <- newrast[keepcells, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_head.SpatVector <- function(.data, ..., n, prop) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_head(tbl, ..., n = n, prop = prop)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_tail
slice_tail.SpatRaster <- function(.data, ..., n, prop, .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
sliced <- dplyr::slice_tail(skeleton, ..., n = n, prop = prop)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
newrast <- newrast[keepcells, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_tail.SpatVector <- function(.data, ..., n, prop) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_tail(tbl, ..., n = n, prop = prop)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_min
slice_min.SpatRaster <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, .keep_extent = FALSE,
na.rm = TRUE) {
# Create skeleton
skeleton <- as_coordinates(.data)
values <- as_tibble(.data, na.rm = FALSE, xy = FALSE)
# Fix names just in case
names(skeleton) <- paste0(names(skeleton), ".tidyterra")
# Add values
skeleton <- dplyr::bind_cols(skeleton, values)
# Remove NAs
if (na.rm) skeleton <- tidyr::drop_na(skeleton)
sliced <- dplyr::slice_min(skeleton,
order_by = {{ order_by }},
..., n = n, prop = prop,
with_ties = with_ties
)
keepcells <- sliced$cellindex.tidyterra
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex.tidyterra, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_min.SpatVector <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, na_rm = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_min(tbl, ...,
order_by = {{ order_by }}, ..., n = n,
prop = prop, with_ties = with_ties, na_rm = na_rm
)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_max
slice_max.SpatRaster <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, .keep_extent = FALSE,
na.rm = TRUE) {
# Create skeleton
skeleton <- as_coordinates(.data)
values <- as_tibble(.data, na.rm = FALSE, xy = FALSE)
# Fix names just in case
names(skeleton) <- paste0(names(skeleton), ".tidyterra")
# Add values
skeleton <- dplyr::bind_cols(skeleton, values)
# Remove NAs
if (na.rm) skeleton <- tidyr::drop_na(skeleton)
sliced <- dplyr::slice_max(skeleton,
order_by = {{ order_by }},
..., n = n, prop = prop,
with_ties = with_ties
)
keepcells <- sliced$cellindex.tidyterra
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex.tidyterra, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_max.SpatVector <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, na_rm = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_max(tbl, ...,
order_by = {{ order_by }}, ..., n = n,
prop = prop, with_ties = with_ties, na_rm = na_rm
)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_sample
slice_sample.SpatRaster <- function(.data, ..., n, prop,
weight_by = NULL, replace = FALSE,
.keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
values <- as_tibble(.data, na.rm = FALSE, xy = FALSE)
# Fix names just in case
names(skeleton) <- paste0(names(skeleton), ".tidyterra")
# Add values
skeleton <- dplyr::bind_cols(skeleton, values)
sliced <- dplyr::slice_sample(skeleton, ...,
n = n,
prop = prop, weight_by = weight_by,
replace = replace
)
keepcells <- sliced$cellindex.tidyterra
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex.tidyterra, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_sample.SpatVector <- function(.data, ..., n, prop,
weight_by = NULL, replace = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_sample(tbl, ..., n = n, prop = prop, replace = replace)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
slice_rows <- function(.data, ...) {
UseMethod("slice_rows")
}
#' @export
#' @rdname slice.Spat
slice_rows.SpatRaster <- function(.data, ..., .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
index <- skeleton["rowindex"]
index$rowindex <- sort(index$rowindex)
index <- dplyr::distinct(index)
slice_dim <- dplyr::slice(index, ...)
# Get cells to make NA
sliced <- dplyr::inner_join(skeleton,
slice_dim,
by = "rowindex"
)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(slice_dim$rowindex)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, , drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_cols <- function(.data, ...) {
UseMethod("slice_cols")
}
#' @export
#' @rdname slice.Spat
slice_cols.SpatRaster <- function(.data, ..., .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
index <- skeleton["colindex"]
index$colindex <- sort(index$colindex)
index <- dplyr::distinct(index)
slice_dim <- dplyr::slice(index, ...)
# Get cells to make NA
sliced <- dplyr::inner_join(skeleton,
slice_dim,
by = "colindex"
)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(slice_dim$colindex)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[, keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_colrows <- function(.data, ...) {
UseMethod("slice_colrows")
}
#' @export
#' @rdname slice.Spat
slice_colrows.SpatRaster <- function(.data, ..., cols, rows,
.keep_extent = FALSE,
inverse = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
index <- skeleton["colindex"]
index$colindex <- sort(index$colindex)
index <- dplyr::distinct(index)
# Cols
col_index <- skeleton["colindex"]
col_index$colindex <- sort(col_index$colindex)
col_index <- dplyr::distinct(col_index)
slice_cols <- dplyr::slice(col_index, cols)
# Rows
row_index <- skeleton["rowindex"]
row_index$rowindex <- sort(row_index$rowindex)
row_index <- dplyr::distinct(row_index)
slice_rows <- dplyr::slice(row_index, rows)
# Get cells to make NA
sliced <- dplyr::inner_join(skeleton,
slice_cols,
by = "colindex"
)
sliced <- dplyr::inner_join(sliced,
slice_rows,
by = "rowindex"
)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
# cols
range_col <- range(sliced$colindex)
keepindex_col <- seq(range_col[1], range_col[2], by = 1)
range_row <- range(sliced$rowindex)
keepindex_row <- seq(range_row[1], range_row[2], by = 1)
newrast <- newrast[keepindex_row, keepindex_col, drop = FALSE]
return(newrast)
}
#' @export
dplyr::slice
#' @export
dplyr::slice_head
#' @export
dplyr::slice_max
#' @export
dplyr::slice_min
#' @export
dplyr::slice_tail
#' @export
dplyr::slice_sample
| /R/slice-Spat.R | permissive | dieghernan/tidyterra | R | false | false | 16,058 | r | #' Subset cells/rows/columns/geometries using their positions
#'
#' @description
#'
#' `slice()` lets you index cells/rows/columns/geometries by their (integer)
#' locations. It allows you to select, remove, and duplicate those dimensions
#' of a Spat* object.
#'
#' **If you want to slice your SpatRaster by geographic coordinates** use
#' [filter.SpatRaster()] method.
#'
#' It is accompanied by a number of helpers for common use cases:
#'
#' - `slice_head()` and `slice_tail()` select the first or last
#' cells/geometries.
#'
#' - `slice_sample()` randomly selects cells/geometries.
#'
#' - `slice_rows()` and `slice_cols()` allow to subset entire rows or columns,
#' of a SpatRaster.
#'
#' - `slice_colrows()` subsets regions of the raster by row and column position
#' of a SpatRaster.
#'
#' You can get a skeleton of your SpatRaster with the cell, column and row
#' index with [as_coordinates()].
#'
#' See **Methods** for details.
#'
#' @export
#' @rdname slice.Spat
#' @name slice.Spat
#'
#' @seealso
#' [dplyr::slice()], [terra::spatSample()].
#'
#' You can get a skeleton of your SpatRaster with the cell, column and row
#' index with [as_coordinates()].
#'
#' If you want to slice by geographic coordinates use [filter.SpatRaster()].
#'
#' @family single table verbs
#' @family dplyr.rows
#' @family dplyr.methods
#'
#' @return A Spat* object of the same class than `.data`. See **Methods**.
#'
#' @importFrom dplyr slice
#'
#' @inheritParams mutate.Spat
#'
#' @param .preserve Ignored for Spat* objects
#' @param .keep_extent Should the extent of the resulting SpatRaster be kept?
#' See also [terra::trim()], [terra::extend()].
#' @param ... [`data-masking`][dplyr::slice] Integer row values. Provide
#' either positive values to keep, or negative values to drop.
#'
#' The values provided must be either all positive or all negative. Indices
#' beyond the number of rows in the input are silently ignored.
#' See **Methods**.
#' @param cols,rows Integer col/row values of the SpatRaster
#' @param inverse If `TRUE`, `.data` is inverse-masked to the given selection.
#' See [terra::mask()].
#' @param na.rm Logical, should cells that present a value of `NA` removed when
#' computing `slice_min()/slice_max()`?. The default is `TRUE`.
#' @inheritParams dplyr::slice
#'
#' @section terra equivalent:
#'
#' [terra::subset()], [terra::spatSample()]
#'
#' @section Methods:
#'
#' Implementation of the **generic** [dplyr::slice()] function.
#'
#' ## SpatRaster
#'
#' The result is a SpatRaster with the crs and resolution of the input and
#' where cell values of the selected cells/columns/rows are preserved.
#'
#' Use `.keep_extent = TRUE` to preserve the extent of `.data` on the output.
#' The non-selected cells would present a value of `NA`.
#'
#' ## SpatVector
#'
#' The result is a SpatVector where the attributes of the selected
#' geometries are preserved. If `.data` is a
#' [grouped SpatVector][is_grouped_spatvector], the operation will be performed
#' on each group, so that (e.g.) `slice_head(df, n = 5)` will select the first
#' five rows in each group.
#'
#' @examples
#'
#'
#' library(terra)
#'
#' f <- system.file("extdata/cyl_temp.tif", package = "tidyterra")
#' r <- rast(f)
#'
#' # Slice first 100 cells
#' r %>%
#' slice(1:100) %>%
#' plot()
#'
#' # Rows
#' r %>%
#' slice_rows(1:30) %>%
#' plot()
#'
#' # Cols
#' r %>%
#' slice_cols(-(20:50)) %>%
#' plot()
#'
#' # Spatial sample
#' r %>%
#' slice_sample(prop = 0.2) %>%
#' plot()
#'
#'
#' # Slice regions
#' r %>%
#' slice_colrows(
#' cols = c(20:40, 60:80),
#' rows = -c(1:20, 30:50)
#' ) %>%
#' plot()
#'
#' # Group wise operation with SpatVectors--------------------------------------
#' v <- terra::vect(system.file("ex/lux.shp", package = "terra"))
#'
#' \donttest{
#' glimpse(v) %>% autoplot(aes(fill = NAME_1))
#'
#' gv <- v %>% group_by(NAME_1)
#' # All slice helpers operate per group, silently truncating to the group size
#' gv %>%
#' slice_head(n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' gv %>%
#' slice_tail(n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' gv %>%
#' slice_min(AREA, n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' gv %>%
#' slice_max(AREA, n = 1) %>%
#' glimpse() %>%
#' autoplot(aes(fill = NAME_1))
#' }
slice.SpatRaster <- function(.data, ..., .preserve = FALSE,
.keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
sliced <- dplyr::slice(skeleton, ...)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice.SpatVector <- function(.data, ..., .preserve = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice(tbl, ..., .preserve = .preserve)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_head
slice_head.SpatRaster <- function(.data, ..., n, prop, .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
sliced <- dplyr::slice_head(skeleton, ..., n = n, prop = prop)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
newrast <- newrast[keepcells, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_head.SpatVector <- function(.data, ..., n, prop) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_head(tbl, ..., n = n, prop = prop)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_tail
slice_tail.SpatRaster <- function(.data, ..., n, prop, .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
sliced <- dplyr::slice_tail(skeleton, ..., n = n, prop = prop)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
newrast <- newrast[keepcells, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_tail.SpatVector <- function(.data, ..., n, prop) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_tail(tbl, ..., n = n, prop = prop)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_min
slice_min.SpatRaster <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, .keep_extent = FALSE,
na.rm = TRUE) {
# Create skeleton
skeleton <- as_coordinates(.data)
values <- as_tibble(.data, na.rm = FALSE, xy = FALSE)
# Fix names just in case
names(skeleton) <- paste0(names(skeleton), ".tidyterra")
# Add values
skeleton <- dplyr::bind_cols(skeleton, values)
# Remove NAs
if (na.rm) skeleton <- tidyr::drop_na(skeleton)
sliced <- dplyr::slice_min(skeleton,
order_by = {{ order_by }},
..., n = n, prop = prop,
with_ties = with_ties
)
keepcells <- sliced$cellindex.tidyterra
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex.tidyterra, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_min.SpatVector <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, na_rm = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_min(tbl, ...,
order_by = {{ order_by }}, ..., n = n,
prop = prop, with_ties = with_ties, na_rm = na_rm
)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_max
slice_max.SpatRaster <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, .keep_extent = FALSE,
na.rm = TRUE) {
# Create skeleton
skeleton <- as_coordinates(.data)
values <- as_tibble(.data, na.rm = FALSE, xy = FALSE)
# Fix names just in case
names(skeleton) <- paste0(names(skeleton), ".tidyterra")
# Add values
skeleton <- dplyr::bind_cols(skeleton, values)
# Remove NAs
if (na.rm) skeleton <- tidyr::drop_na(skeleton)
sliced <- dplyr::slice_max(skeleton,
order_by = {{ order_by }},
..., n = n, prop = prop,
with_ties = with_ties
)
keepcells <- sliced$cellindex.tidyterra
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex.tidyterra, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_max.SpatVector <- function(.data, order_by, ..., n, prop,
with_ties = TRUE, na_rm = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_max(tbl, ...,
order_by = {{ order_by }}, ..., n = n,
prop = prop, with_ties = with_ties, na_rm = na_rm
)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
#' @importFrom dplyr slice_sample
slice_sample.SpatRaster <- function(.data, ..., n, prop,
weight_by = NULL, replace = FALSE,
.keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
values <- as_tibble(.data, na.rm = FALSE, xy = FALSE)
# Fix names just in case
names(skeleton) <- paste0(names(skeleton), ".tidyterra")
# Add values
skeleton <- dplyr::bind_cols(skeleton, values)
sliced <- dplyr::slice_sample(skeleton, ...,
n = n,
prop = prop, weight_by = weight_by,
replace = replace
)
keepcells <- sliced$cellindex.tidyterra
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex.tidyterra, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(keepcells)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_sample.SpatVector <- function(.data, ..., n, prop,
weight_by = NULL, replace = FALSE) {
# Use own method
tbl <- as_tibble(.data)
ind <- make_safe_index("tterra_index", tbl)
tbl[[ind]] <- seq_len(nrow(tbl))
sliced <- dplyr::slice_sample(tbl, ..., n = n, prop = prop, replace = replace)
# Regenerate
vend <- .data[as.integer(sliced[[ind]]), ]
vend <- group_prepare_spat(vend, sliced)
vend
}
#' @export
#' @rdname slice.Spat
slice_rows <- function(.data, ...) {
UseMethod("slice_rows")
}
#' @export
#' @rdname slice.Spat
slice_rows.SpatRaster <- function(.data, ..., .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
index <- skeleton["rowindex"]
index$rowindex <- sort(index$rowindex)
index <- dplyr::distinct(index)
slice_dim <- dplyr::slice(index, ...)
# Get cells to make NA
sliced <- dplyr::inner_join(skeleton,
slice_dim,
by = "rowindex"
)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(slice_dim$rowindex)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[keepindex, , drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_cols <- function(.data, ...) {
UseMethod("slice_cols")
}
#' @export
#' @rdname slice.Spat
slice_cols.SpatRaster <- function(.data, ..., .keep_extent = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
index <- skeleton["colindex"]
index$colindex <- sort(index$colindex)
index <- dplyr::distinct(index)
slice_dim <- dplyr::slice(index, ...)
# Get cells to make NA
sliced <- dplyr::inner_join(skeleton,
slice_dim,
by = "colindex"
)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
range <- range(slice_dim$colindex)
keepindex <- seq(range[1], range[2], by = 1)
newrast <- newrast[, keepindex, drop = FALSE]
return(newrast)
}
#' @export
#' @rdname slice.Spat
slice_colrows <- function(.data, ...) {
UseMethod("slice_colrows")
}
#' @export
#' @rdname slice.Spat
slice_colrows.SpatRaster <- function(.data, ..., cols, rows,
.keep_extent = FALSE,
inverse = FALSE) {
# Create skeleton
skeleton <- as_coordinates(.data)
index <- skeleton["colindex"]
index$colindex <- sort(index$colindex)
index <- dplyr::distinct(index)
# Cols
col_index <- skeleton["colindex"]
col_index$colindex <- sort(col_index$colindex)
col_index <- dplyr::distinct(col_index)
slice_cols <- dplyr::slice(col_index, cols)
# Rows
row_index <- skeleton["rowindex"]
row_index$rowindex <- sort(row_index$rowindex)
row_index <- dplyr::distinct(row_index)
slice_rows <- dplyr::slice(row_index, rows)
# Get cells to make NA
sliced <- dplyr::inner_join(skeleton,
slice_cols,
by = "colindex"
)
sliced <- dplyr::inner_join(sliced,
slice_rows,
by = "rowindex"
)
keepcells <- sliced$cellindex
# Make NA cells
# To NA
tonas <- setdiff(skeleton$cellindex, keepcells)
newrast <- .data
newrast[tonas] <- NA
# With keep_extent we just replaced the cells with NAs
if (.keep_extent) {
return(newrast)
}
# Crop to selected range
# cols
range_col <- range(sliced$colindex)
keepindex_col <- seq(range_col[1], range_col[2], by = 1)
range_row <- range(sliced$rowindex)
keepindex_row <- seq(range_row[1], range_row[2], by = 1)
newrast <- newrast[keepindex_row, keepindex_col, drop = FALSE]
return(newrast)
}
#' @export
dplyr::slice
#' @export
dplyr::slice_head
#' @export
dplyr::slice_max
#' @export
dplyr::slice_min
#' @export
dplyr::slice_tail
#' @export
dplyr::slice_sample
|
#!/usr/bin/env Rscript
library(ggplot2)
source('scripts/theme_custom.R')
filenames <- Sys.glob("data/r0.5-*/1/flow*")
if (exists("dfm")){
rm(dfm)
}
for (file in filenames) {
temp_dfm <- read.table(file, header=TRUE);
temp_dfm$down <- grepl("-down-", file);
temp_dfm$mean_sleep <- as.numeric(gsub(".*-i([^-]*?)-.*", "\\1", file))
temp_dfm$mobility <- gsub(".*-m([^/]*?)/.*", "\\1", file)
if (!exists("dfm")){
dfm <- temp_dfm
} else {
dfm<-rbind(dfm, temp_dfm)
}
rm(temp_dfm)
}
dfm$mobility <- as.factor(dfm$mobility)
a <- with(dfm, aggregate(dfm[, c("goodput", "elapsed", "num_holes", "rate")], list(mean_sleep=mean_sleep, mobility=mobility), function(x) { c(MEAN=mean(x) , CI=qt(0.95/2+0.5, length(x))*(sd(x) / sqrt(length(x))) )}), simplify=FALSE)
svg("plots/goodput_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, goodput[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=goodput[,"MEAN"]+goodput[,"CI"], ymax=goodput[,"MEAN"]-goodput[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow goodput (Mb/s)") +
theme_custom_tl
dev.off()
svg("plots/rate_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, rate[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=rate[,"MEAN"]+rate[,"CI"], ymax=rate[,"MEAN"]-rate[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow rate (Mb/s)") +
theme_custom_tl
dev.off()
svg("plots/elapsed_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, elapsed[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=elapsed[,"MEAN"]+elapsed[,"CI"], ymax=elapsed[,"MEAN"]-elapsed[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow elapsed (Mb/s)") +
theme_custom
dev.off()
svg("plots/num_holes_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, num_holes[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=num_holes[,"MEAN"]+num_holes[,"CI"], ymax=num_holes[,"MEAN"]-num_holes[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow num_holes (Mb/s)") +
theme_custom
dev.off()
a <- aggregate(dfm$elapsed, list(dfm$mean_sleep, dfm$mobility), mean)
svg("plots/elapsed_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(Group.1, x, group=Group.2, color=Group.2)) +
geom_line() +
scale_x_log10() +
ylim(0,7) +
xlab("Mean think time at base station (s)") +
ylab("Mean flow completion time (s)") +
theme_custom
dev.off()
a <- aggregate(dfm$num_holes, list(dfm$mean_sleep, dfm$mobility), mean)
svg("plots/num_holes_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(Group.1, x, group=Group.2, color=Group.2)) +
geom_line() +
scale_x_log10() +
ylim(0, 45) +
xlab("Mean think time at base station (s)") +
ylab("Mean number of holes per flow") +
theme_custom
dev.off()
a <- aggregate(dfm$rate, list(dfm$mean_sleep, dfm$mobility), mean)
svg("plots/rate_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(Group.1, x, group=Group.2, color=Group.2)) +
geom_line() +
scale_x_log10() +
ylim(0, 1.5) +
xlab("Mean think time at base station (s)") +
ylab("Mean flow rate (Mb/s)") +
theme_custom
dev.off()
| /simulations/scripts/plot-gamma.R | no_license | Renato2012/mapme-tnsm17 | R | false | false | 3,311 | r | #!/usr/bin/env Rscript
library(ggplot2)
source('scripts/theme_custom.R')
filenames <- Sys.glob("data/r0.5-*/1/flow*")
if (exists("dfm")){
rm(dfm)
}
for (file in filenames) {
temp_dfm <- read.table(file, header=TRUE);
temp_dfm$down <- grepl("-down-", file);
temp_dfm$mean_sleep <- as.numeric(gsub(".*-i([^-]*?)-.*", "\\1", file))
temp_dfm$mobility <- gsub(".*-m([^/]*?)/.*", "\\1", file)
if (!exists("dfm")){
dfm <- temp_dfm
} else {
dfm<-rbind(dfm, temp_dfm)
}
rm(temp_dfm)
}
dfm$mobility <- as.factor(dfm$mobility)
a <- with(dfm, aggregate(dfm[, c("goodput", "elapsed", "num_holes", "rate")], list(mean_sleep=mean_sleep, mobility=mobility), function(x) { c(MEAN=mean(x) , CI=qt(0.95/2+0.5, length(x))*(sd(x) / sqrt(length(x))) )}), simplify=FALSE)
svg("plots/goodput_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, goodput[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=goodput[,"MEAN"]+goodput[,"CI"], ymax=goodput[,"MEAN"]-goodput[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow goodput (Mb/s)") +
theme_custom_tl
dev.off()
svg("plots/rate_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, rate[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=rate[,"MEAN"]+rate[,"CI"], ymax=rate[,"MEAN"]-rate[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow rate (Mb/s)") +
theme_custom_tl
dev.off()
svg("plots/elapsed_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, elapsed[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=elapsed[,"MEAN"]+elapsed[,"CI"], ymax=elapsed[,"MEAN"]-elapsed[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow elapsed (Mb/s)") +
theme_custom
dev.off()
svg("plots/num_holes_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(mean_sleep, num_holes[,"MEAN"], group=mobility, color=mobility)) +
geom_line() +
geom_errorbar(aes(ymin=num_holes[,"MEAN"]+num_holes[,"CI"], ymax=num_holes[,"MEAN"]-num_holes[,"CI"])) +
scale_x_log10() +
xlab("Mean think time at base station (s)") +
ylab("Mean flow num_holes (Mb/s)") +
theme_custom
dev.off()
a <- aggregate(dfm$elapsed, list(dfm$mean_sleep, dfm$mobility), mean)
svg("plots/elapsed_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(Group.1, x, group=Group.2, color=Group.2)) +
geom_line() +
scale_x_log10() +
ylim(0,7) +
xlab("Mean think time at base station (s)") +
ylab("Mean flow completion time (s)") +
theme_custom
dev.off()
a <- aggregate(dfm$num_holes, list(dfm$mean_sleep, dfm$mobility), mean)
svg("plots/num_holes_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(Group.1, x, group=Group.2, color=Group.2)) +
geom_line() +
scale_x_log10() +
ylim(0, 45) +
xlab("Mean think time at base station (s)") +
ylab("Mean number of holes per flow") +
theme_custom
dev.off()
a <- aggregate(dfm$rate, list(dfm$mean_sleep, dfm$mobility), mean)
svg("plots/rate_vs_mean_sleep-r0.25.svg")
ggplot(a, aes(Group.1, x, group=Group.2, color=Group.2)) +
geom_line() +
scale_x_log10() +
ylim(0, 1.5) +
xlab("Mean think time at base station (s)") +
ylab("Mean flow rate (Mb/s)") +
theme_custom
dev.off()
|
| pc = 0xc000 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc002 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 | MEM[0x2001] = 0x20 |
| pc = 0xc005 | a = 0x10 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc006 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc007 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 | MEM[0x2001] = 0x10 |
| pc = 0xc00a | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 | MEM[0x2001] = 0x20 |
| pc = 0xc00d | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xfd | p[NV-BDIZC] = 00100000 |
| pc = 0xc01b | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xfd | p[NV-BDIZC] = 00100000 |
| pc = 0xc01c | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc010 | a = 0x20 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc012 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc013 | a = 0x100 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100001 |
| pc = 0xc014 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc015 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc016 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc017 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc018 | a = 0x40 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| /res/ror.r | permissive | victorhenriquetx/mc861-nesemulator | R | false | false | 1,517 | r | | pc = 0xc000 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc002 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 | MEM[0x2001] = 0x20 |
| pc = 0xc005 | a = 0x10 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc006 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc007 | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 | MEM[0x2001] = 0x10 |
| pc = 0xc00a | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 | MEM[0x2001] = 0x20 |
| pc = 0xc00d | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xfd | p[NV-BDIZC] = 00100000 |
| pc = 0xc01b | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xfd | p[NV-BDIZC] = 00100000 |
| pc = 0xc01c | a = 0x20 | x = 0x0 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc010 | a = 0x20 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc012 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc013 | a = 0x100 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100001 |
| pc = 0xc014 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc015 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc016 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc017 | a = 0x80 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
| pc = 0xc018 | a = 0x40 | x = 0x80 | y = 0x0 | sp = 0xff | p[NV-BDIZC] = 00100000 |
|
first <- function(...) head(..., n = 1)
but_last <- function(...) head(..., n = -1)
last <- function(...) tail(..., n = 1)
length_zero <- function(x) length(x) == 0
length_one <- function(x) length(x) == 1
is_list_of <- function(xs, classes) {
assert_that(is.list(xs))
all(vapply(xs, function(x) inherits(x, classes), logical(1)))
}
merge_lists <- function(x, y) {
x[names(y)] <- y
x
}
#' Higher-order functions for dealing with lists
#'
#' These functions were inspired by underscore.js.
#'
#' @name list_functions
#' @param key the name of a value in a list
#' @param keys a character vector of names in a list
#' @param xss a list of lists
#' @return \code{pluck} returns an unnamed value and \code{pluck_apply} returns
#' a list of unnamed values. \code{pick} returns a simplified version of the
#' original list. \code{pick_apply} returns a list of simplified lists.
#'
#' @details \itemize{ \item \code{pluck}: Pluck a named value from a list \item
#' \code{pick}: Simplify a list by picking out whitelisted names}
#'
#' The simple versions of \code{pluck} and \code{pick} are curried functions,
#' meaning that they return a function which can be applied to a list. See the
#' syntax in the usage section.
#' @keywords internal
NULL
#' @rdname list_functions
pluck <- function(key) {
function(xs) xs[[key]]
}
#' @rdname list_functions
pluck_apply <- function(key, xss) {
assert_that(is_list_of(xss, "list"))
lapply(xss, pluck(key))
}
#' @rdname list_functions
pick <- function(keys) {
function(xs) {
classes <- class(xs)
xs <- xs[is.element(names(xs), keys)]
class(xs) <- classes
xs
}
}
#' @rdname list_functions
pick_apply <- function(keys, xss) {
assert_that(is_list_of(xss, "list"))
classes <- class(xss)
xss <- lapply(xss, pick(keys))
class(xss) <- classes
xss
}
| /R/utils.R | no_license | cran/rprime | R | false | false | 1,909 | r | first <- function(...) head(..., n = 1)
but_last <- function(...) head(..., n = -1)
last <- function(...) tail(..., n = 1)
length_zero <- function(x) length(x) == 0
length_one <- function(x) length(x) == 1
is_list_of <- function(xs, classes) {
assert_that(is.list(xs))
all(vapply(xs, function(x) inherits(x, classes), logical(1)))
}
merge_lists <- function(x, y) {
x[names(y)] <- y
x
}
#' Higher-order functions for dealing with lists
#'
#' These functions were inspired by underscore.js.
#'
#' @name list_functions
#' @param key the name of a value in a list
#' @param keys a character vector of names in a list
#' @param xss a list of lists
#' @return \code{pluck} returns an unnamed value and \code{pluck_apply} returns
#' a list of unnamed values. \code{pick} returns a simplified version of the
#' original list. \code{pick_apply} returns a list of simplified lists.
#'
#' @details \itemize{ \item \code{pluck}: Pluck a named value from a list \item
#' \code{pick}: Simplify a list by picking out whitelisted names}
#'
#' The simple versions of \code{pluck} and \code{pick} are curried functions,
#' meaning that they return a function which can be applied to a list. See the
#' syntax in the usage section.
#' @keywords internal
NULL
#' @rdname list_functions
pluck <- function(key) {
function(xs) xs[[key]]
}
#' @rdname list_functions
pluck_apply <- function(key, xss) {
assert_that(is_list_of(xss, "list"))
lapply(xss, pluck(key))
}
#' @rdname list_functions
pick <- function(keys) {
function(xs) {
classes <- class(xs)
xs <- xs[is.element(names(xs), keys)]
class(xs) <- classes
xs
}
}
#' @rdname list_functions
pick_apply <- function(keys, xss) {
assert_that(is_list_of(xss, "list"))
classes <- class(xss)
xss <- lapply(xss, pick(keys))
class(xss) <- classes
xss
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameterObject.R
\name{loadParameterObject}
\alias{loadParameterObject}
\title{Load Parameter Object}
\usage{
loadParameterObject(file)
}
\arguments{
\item{file}{A filename that where the data will be stored.}
\item{parameter}{A parameter object that corrosponds to
one of the model types, such as "ROC", or "FONSE".}
\item{model}{Type of the model. Should corrospond to the parameter type.}
}
\value{
This function has no return value.
}
\description{
\code{loadParameterObject} will call the appropriate followup
call to loadXXXParameterObject based off of the parameter type
given.
}
\details{
For example, if a ROCParameter is passed, the the loadParameterObject
for the ROCParameter will be called. This allows us to not have an if-else
block in the code - making use of the how R handles these situations.
}
| /man/loadParameterObject.Rd | no_license | ghanas/RibModelFramework | R | false | true | 897 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameterObject.R
\name{loadParameterObject}
\alias{loadParameterObject}
\title{Load Parameter Object}
\usage{
loadParameterObject(file)
}
\arguments{
\item{file}{A filename that where the data will be stored.}
\item{parameter}{A parameter object that corrosponds to
one of the model types, such as "ROC", or "FONSE".}
\item{model}{Type of the model. Should corrospond to the parameter type.}
}
\value{
This function has no return value.
}
\description{
\code{loadParameterObject} will call the appropriate followup
call to loadXXXParameterObject based off of the parameter type
given.
}
\details{
For example, if a ROCParameter is passed, the the loadParameterObject
for the ROCParameter will be called. This allows us to not have an if-else
block in the code - making use of the how R handles these situations.
}
|
# Code to plot a timeseries of the Global Active Power in kilowatts.
library(data.table)
library(dplyr)
# Sets working directory to Coursera Assignment Directory
setwd("C:/Users/rober/Coursera/EDACourseProject1")
# Power datafilepath
datafilepath <- "household_power_consumption.txt"
## Reading Downloaded Datafiles ##
powerdatatable <- data.table::fread(input = datafilepath,header=TRUE, na.strings = "?")
## Tidy datafile
# Making a POSIXct date to be filtered and graphed by time of day
powerdatatable[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Subset Dates for 2007-02-01 and 2007-02-02
powerdatatable <- subset(powerdatatable,(dateTime >= "2007-02-01") & (dateTime < "2007-02-03"))
png("plot2.png", width=500, height=500)
## Plot 2
plot(x = powerdatatable[, dateTime]
, y = powerdatatable[, Global_active_power]
, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | Robbocj01/EDACourseProject1 | R | false | false | 941 | r | # Code to plot a timeseries of the Global Active Power in kilowatts.
library(data.table)
library(dplyr)
# Sets working directory to Coursera Assignment Directory
setwd("C:/Users/rober/Coursera/EDACourseProject1")
# Power datafilepath
datafilepath <- "household_power_consumption.txt"
## Reading Downloaded Datafiles ##
powerdatatable <- data.table::fread(input = datafilepath,header=TRUE, na.strings = "?")
## Tidy datafile
# Making a POSIXct date to be filtered and graphed by time of day
powerdatatable[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
# Subset Dates for 2007-02-01 and 2007-02-02
powerdatatable <- subset(powerdatatable,(dateTime >= "2007-02-01") & (dateTime < "2007-02-03"))
png("plot2.png", width=500, height=500)
## Plot 2
plot(x = powerdatatable[, dateTime]
, y = powerdatatable[, Global_active_power]
, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
#' Extract County of Issue from the Personal Numeric Code
#'
#' @inheritParams get_sex
#'
#' @return a string representing the name of the county where the CNP was issued
#' @export
#'
#' @examples
#' get_county(6201206018078)
#' get_county(5201206346491)
#' get_county(1940616346114)
#' get_county(7041218318525)
get_county <- function(cnp) {
suppressMessages(
checks <- check_cnp_is_valid(cnp)
)
if (any(checks == FALSE, na.rm = TRUE)) {
invalid_cnps <- sum(checks == FALSE, na.rm = TRUE)
stop_msg <- glue::glue("Please supply a vector of valid CNPs. The \\
input vector has {invalid_cnps} invalid \\
values. For a detailed diagnosis use \\
check_cnp_is_valid()")
stop(stop_msg, call. = FALSE)
}
cnp_dec <- purrr::map(cnp, decompose_cnp)
result <- purrr::map_chr(cnp_dec, get_county_unvec)
result
}
get_county_unvec <- function(cnp_dec) {
county_code <- cnp_dec["JJ"]
if (is.na(cnp_dec[["JJ"]])) {
return(NA_character_)
}
county_lookup %>%
dplyr::filter(.data$code == county_code) %>%
dplyr::pull(.data$county)
}
| /R/get_county.R | permissive | dragosmg/rocnp | R | false | false | 1,217 | r | #' Extract County of Issue from the Personal Numeric Code
#'
#' @inheritParams get_sex
#'
#' @return a string representing the name of the county where the CNP was issued
#' @export
#'
#' @examples
#' get_county(6201206018078)
#' get_county(5201206346491)
#' get_county(1940616346114)
#' get_county(7041218318525)
get_county <- function(cnp) {
suppressMessages(
checks <- check_cnp_is_valid(cnp)
)
if (any(checks == FALSE, na.rm = TRUE)) {
invalid_cnps <- sum(checks == FALSE, na.rm = TRUE)
stop_msg <- glue::glue("Please supply a vector of valid CNPs. The \\
input vector has {invalid_cnps} invalid \\
values. For a detailed diagnosis use \\
check_cnp_is_valid()")
stop(stop_msg, call. = FALSE)
}
cnp_dec <- purrr::map(cnp, decompose_cnp)
result <- purrr::map_chr(cnp_dec, get_county_unvec)
result
}
get_county_unvec <- function(cnp_dec) {
county_code <- cnp_dec["JJ"]
if (is.na(cnp_dec[["JJ"]])) {
return(NA_character_)
}
county_lookup %>%
dplyr::filter(.data$code == county_code) %>%
dplyr::pull(.data$county)
}
|
rm(list=ls())
starttime <- proc.time()
cat("\n\n================ Prepare Data =====================================\n")
library(yaml)
library(dplyr)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(data.table)
library(ggplot2)
#year<-97
TS <- data.table(Year=NA_integer_,Berenj=NA_real_,
Gav=NA_real_,Goosfand=NA_real_,
Shir=NA_real_,Panir=NA_real_,
Tokhmemorgh=NA_real_,
Morgh=NA_real_,MacaroniGram=NA_real_,
Rob_GojeGram=NA_real_,PiazGram=NA_real_,
SibzaminiGram=NA_real_,
Decile=NA_real_)[0]
for (year in (Settings$startyear:Settings$endyear)){
cat(paste0("\n------------------------------\nYear:", year, "\n"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"Total2.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"FinalPoors.rda"))
MD<-merge(MD,Total[,.(HHID,`011164`,MacaroniGram,`011231`,`011232`,PoultryMeat_MGram,
PoultryMeat_NMGram,`011411`,MilkGrams,`011428`,`011429`,
Cheese_PasturizedGram,Cheese_NonPasturizedGram,
`011921`,Rob_GojeGram,`011441`,`011442`,Egg_MashinGram,
Egg_NonMashinGram,`011731`,SibzaminiGram,
`011732`,PiazGram,`011211`,`011212`,
CowMeatGram,SheepMeatGram,
Rice_TaromGram,Rice_DomsiahGram)],by="HHID")
m<-MD[,.(weighted.mean(`011164`/Total_Exp_Month_nondurable,Weight),
weighted.mean((`011231`+`011232`)/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011411`/Total_Exp_Month_nondurable,Weight),
weighted.mean((`011428`+`011429`)/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011921`/Total_Exp_Month_nondurable,Weight),
weighted.mean((`011441`+`011442`)/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011731`/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011732`/Total_Exp_Month_nondurable,Weight)),by=Decile][order(Decile)]
z<-MD[,.(weighted.mean(`011164`/FoodExpenditure,Weight),
weighted.mean((`011231`+`011232`)/FoodExpenditure,Weight),
weighted.mean(`011411`/FoodExpenditure,Weight),
weighted.mean((`011428`+`011429`)/FoodExpenditure,Weight),
weighted.mean(`011921`/FoodExpenditure,Weight),
weighted.mean((`011441`+`011442`)/FoodExpenditure,Weight),
weighted.mean(`011731`/FoodExpenditure,Weight),
weighted.mean(`011732`/FoodExpenditure,Weight)),by=Decile][order(Decile)]
y<-MD[,.(weighted.mean(`011164`,Weight),
weighted.mean(`011231`+`011232`,Weight),
weighted.mean(`011411`,Weight),
weighted.mean(`011428`+`011429`,Weight),
weighted.mean(`011921`,Weight),
weighted.mean(`011441`+`011442`,Weight),
weighted.mean(`011731`,Weight),
weighted.mean(`011732`,Weight)),by=Decile][order(Decile)]
x<-MD[,.(MacaroniGram=weighted.mean(MacaroniGram,Weight),
PoultryMeat_Gram=weighted.mean(PoultryMeat_MGram+PoultryMeat_NMGram,Weight),
MilkGrams=weighted.mean(MilkGrams,Weight),
CheeseGram=weighted.mean(Cheese_PasturizedGram+Cheese_NonPasturizedGram,Weight),
Rob_GojeGram=weighted.mean(Rob_GojeGram,Weight),
Egg_Gram=weighted.mean(Egg_MashinGram+Egg_NonMashinGram,Weight),
SibzaminiGram=weighted.mean(SibzaminiGram,Weight),
PiazGram=weighted.mean(PiazGram,Weight),
CowMeatGram=weighted.mean(CowMeatGram,Weight),
SheepMeatGram=weighted.mean(SheepMeatGram,Weight)),by=Decile][order(Decile)]
MD[,weighted.mean(Size,Weight),by=Decile][order(Decile)]
MD[,weighted.mean(FoodKCaloriesHH_Per,Weight),by=Decile][order(Decile)]
A<-MD[,.(Morgh=weighted.mean(PoultryMeat_MGram,Weight),
Shir= weighted.mean(MilkGrams,Weight),
Panir=weighted.mean(Cheese_PasturizedGram,Weight),
Tokhmemorgh=weighted.mean(Egg_MashinGram+Egg_NonMashinGram,Weight),
Berenj=weighted.mean(Rice_TaromGram+Rice_DomsiahGram,Weight),
Gav=weighted.mean(CowMeatGram,Weight),
Goosfand=weighted.mean(SheepMeatGram,Weight),
Rob_GojeGram=weighted.mean(Rob_GojeGram,Weight),
SibzaminiGram=weighted.mean(SibzaminiGram,Weight),
PiazGram=weighted.mean(PiazGram,Weight),
MacaroniGram=weighted.mean(MacaroniGram,Weight)),by=Decile][order(Decile)]
A[,Year:=year]
TS <- rbind(TS,A)
}
#TS<-TS[as.numeric(Decile)<7]
#ggplot(TS, aes( y=Morgh, x=Year,fill=Decile)) +
# geom_bar(position="dodge", stat="identity") + theme_bw() +
# theme(axis.text.x = element_text(angle=45, vjust=1, hjust=1))
| /R/Archive/June 2020 (removed code files)/Some Calculations fo food.R | no_license | IPRCIRI/IRHEIS | R | false | false | 4,679 | r | rm(list=ls())
starttime <- proc.time()
cat("\n\n================ Prepare Data =====================================\n")
library(yaml)
library(dplyr)
Settings <- yaml.load_file("Settings.yaml")
library(readxl)
library(data.table)
library(ggplot2)
#year<-97
TS <- data.table(Year=NA_integer_,Berenj=NA_real_,
Gav=NA_real_,Goosfand=NA_real_,
Shir=NA_real_,Panir=NA_real_,
Tokhmemorgh=NA_real_,
Morgh=NA_real_,MacaroniGram=NA_real_,
Rob_GojeGram=NA_real_,PiazGram=NA_real_,
SibzaminiGram=NA_real_,
Decile=NA_real_)[0]
for (year in (Settings$startyear:Settings$endyear)){
cat(paste0("\n------------------------------\nYear:", year, "\n"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"Total2.rda"))
load(file=paste0(Settings$HEISProcessedPath,"Y",year,"FinalPoors.rda"))
MD<-merge(MD,Total[,.(HHID,`011164`,MacaroniGram,`011231`,`011232`,PoultryMeat_MGram,
PoultryMeat_NMGram,`011411`,MilkGrams,`011428`,`011429`,
Cheese_PasturizedGram,Cheese_NonPasturizedGram,
`011921`,Rob_GojeGram,`011441`,`011442`,Egg_MashinGram,
Egg_NonMashinGram,`011731`,SibzaminiGram,
`011732`,PiazGram,`011211`,`011212`,
CowMeatGram,SheepMeatGram,
Rice_TaromGram,Rice_DomsiahGram)],by="HHID")
m<-MD[,.(weighted.mean(`011164`/Total_Exp_Month_nondurable,Weight),
weighted.mean((`011231`+`011232`)/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011411`/Total_Exp_Month_nondurable,Weight),
weighted.mean((`011428`+`011429`)/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011921`/Total_Exp_Month_nondurable,Weight),
weighted.mean((`011441`+`011442`)/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011731`/Total_Exp_Month_nondurable,Weight),
weighted.mean(`011732`/Total_Exp_Month_nondurable,Weight)),by=Decile][order(Decile)]
z<-MD[,.(weighted.mean(`011164`/FoodExpenditure,Weight),
weighted.mean((`011231`+`011232`)/FoodExpenditure,Weight),
weighted.mean(`011411`/FoodExpenditure,Weight),
weighted.mean((`011428`+`011429`)/FoodExpenditure,Weight),
weighted.mean(`011921`/FoodExpenditure,Weight),
weighted.mean((`011441`+`011442`)/FoodExpenditure,Weight),
weighted.mean(`011731`/FoodExpenditure,Weight),
weighted.mean(`011732`/FoodExpenditure,Weight)),by=Decile][order(Decile)]
y<-MD[,.(weighted.mean(`011164`,Weight),
weighted.mean(`011231`+`011232`,Weight),
weighted.mean(`011411`,Weight),
weighted.mean(`011428`+`011429`,Weight),
weighted.mean(`011921`,Weight),
weighted.mean(`011441`+`011442`,Weight),
weighted.mean(`011731`,Weight),
weighted.mean(`011732`,Weight)),by=Decile][order(Decile)]
x<-MD[,.(MacaroniGram=weighted.mean(MacaroniGram,Weight),
PoultryMeat_Gram=weighted.mean(PoultryMeat_MGram+PoultryMeat_NMGram,Weight),
MilkGrams=weighted.mean(MilkGrams,Weight),
CheeseGram=weighted.mean(Cheese_PasturizedGram+Cheese_NonPasturizedGram,Weight),
Rob_GojeGram=weighted.mean(Rob_GojeGram,Weight),
Egg_Gram=weighted.mean(Egg_MashinGram+Egg_NonMashinGram,Weight),
SibzaminiGram=weighted.mean(SibzaminiGram,Weight),
PiazGram=weighted.mean(PiazGram,Weight),
CowMeatGram=weighted.mean(CowMeatGram,Weight),
SheepMeatGram=weighted.mean(SheepMeatGram,Weight)),by=Decile][order(Decile)]
MD[,weighted.mean(Size,Weight),by=Decile][order(Decile)]
MD[,weighted.mean(FoodKCaloriesHH_Per,Weight),by=Decile][order(Decile)]
A<-MD[,.(Morgh=weighted.mean(PoultryMeat_MGram,Weight),
Shir= weighted.mean(MilkGrams,Weight),
Panir=weighted.mean(Cheese_PasturizedGram,Weight),
Tokhmemorgh=weighted.mean(Egg_MashinGram+Egg_NonMashinGram,Weight),
Berenj=weighted.mean(Rice_TaromGram+Rice_DomsiahGram,Weight),
Gav=weighted.mean(CowMeatGram,Weight),
Goosfand=weighted.mean(SheepMeatGram,Weight),
Rob_GojeGram=weighted.mean(Rob_GojeGram,Weight),
SibzaminiGram=weighted.mean(SibzaminiGram,Weight),
PiazGram=weighted.mean(PiazGram,Weight),
MacaroniGram=weighted.mean(MacaroniGram,Weight)),by=Decile][order(Decile)]
A[,Year:=year]
TS <- rbind(TS,A)
}
#TS<-TS[as.numeric(Decile)<7]
#ggplot(TS, aes( y=Morgh, x=Year,fill=Decile)) +
# geom_bar(position="dodge", stat="identity") + theme_bw() +
# theme(axis.text.x = element_text(angle=45, vjust=1, hjust=1))
|
rm(list=ls())
source('post_getReady.R')
for(i in 5){
prjname = paste0('sac', i)
dir.out = file.path(dir.fig, prjname)
dir.create(dir.out, showWarnings = F, recursive = T)
message('\n\n', i,'/', nd, '\t', prjname)
inpath = file.path(workdir, 'input', prjname)
outpath = file.path(workdir, 'output', paste0(prjname, '.out') )
pp=shud.env(prjname, inpath, outpath)
ia=getArea(); AA=sum(ia)
fn=file.path(dir.rds, paste0(prjname, '.PT.RDS'))
pt=readRDS(fn)
nc=ncol(pt[[1]])
pd = apply.monthly(apply.daily(apply.daily(pt[[1]], mean, na.rm=TRUE), sum)/nc, sum)
td.max = apply.monthly(apply.daily(pt[[2]], max, na.rm=TRUE), max)
td.min = apply.monthly(apply.daily(pt[[2]], min, na.rm=TRUE), min)
td.mean = apply.monthly(apply.daily(apply.daily(pt[[2]], mean, na.rm=TRUE), sum)/nc, mean)
tsd=cbind(pd, td.min, td.mean, td.max)
colnames(tsd)=c('P', 'Tmin', 'Tmean', 'Tmax')
time(tsd)=as.Date(time(tsd))
saveRDS(tsd, file.path(dir.rds, paste0(prjname, '.PT_avg.RDS') ) )
cfactor=50
head(tsd)
# x=apply.yearly(tsd, FUN=mean)
# x$P=x$P * 12
# x
# ggplot(data=tsd)+ ylab('Monthly Tempearature (C)')+xlab('Time')+
# geom_col(aes(x=time, y=P*cfactor), group = 1, color='Blue')
p=ggplot(data=tsd)+
ylab('Monthly Tempearature (C)')+xlab('Time')+
geom_col(aes(x=Index, y=P*cfactor, group=1, fill='P')) +
geom_line(aes(x=Index, y=Tmean,group = 1, color="T")) +
geom_ribbon(aes(x = Index, ymax = Tmax, ymin = Tmin, fill = 'R'),
alpha = 0.4)+
scale_color_manual(values=c('red'), lab='Mean temperature', name='')+
scale_fill_manual(values=c('blue', 'skyblue'), lab=c('Precipitation', 'Temperature range'),
name='')+
theme(legend.position = 'top', legend.direction = 'horizontal')+
scale_y_continuous(
sec.axis = sec_axis(~./cfactor,
name = bquote('Precipitation (' ~ m ~ month^{-1} ~ ')' ) ) )
p
ggsave(p, filename = file.path(dir.out, paste0(prjname, '_PT.png')),
height = 4, width=6)
} | /post_Forcing.R | no_license | Model-Intercomparison-Datasets/Cache-Creek | R | false | false | 2,047 | r | rm(list=ls())
source('post_getReady.R')
for(i in 5){
prjname = paste0('sac', i)
dir.out = file.path(dir.fig, prjname)
dir.create(dir.out, showWarnings = F, recursive = T)
message('\n\n', i,'/', nd, '\t', prjname)
inpath = file.path(workdir, 'input', prjname)
outpath = file.path(workdir, 'output', paste0(prjname, '.out') )
pp=shud.env(prjname, inpath, outpath)
ia=getArea(); AA=sum(ia)
fn=file.path(dir.rds, paste0(prjname, '.PT.RDS'))
pt=readRDS(fn)
nc=ncol(pt[[1]])
pd = apply.monthly(apply.daily(apply.daily(pt[[1]], mean, na.rm=TRUE), sum)/nc, sum)
td.max = apply.monthly(apply.daily(pt[[2]], max, na.rm=TRUE), max)
td.min = apply.monthly(apply.daily(pt[[2]], min, na.rm=TRUE), min)
td.mean = apply.monthly(apply.daily(apply.daily(pt[[2]], mean, na.rm=TRUE), sum)/nc, mean)
tsd=cbind(pd, td.min, td.mean, td.max)
colnames(tsd)=c('P', 'Tmin', 'Tmean', 'Tmax')
time(tsd)=as.Date(time(tsd))
saveRDS(tsd, file.path(dir.rds, paste0(prjname, '.PT_avg.RDS') ) )
cfactor=50
head(tsd)
# x=apply.yearly(tsd, FUN=mean)
# x$P=x$P * 12
# x
# ggplot(data=tsd)+ ylab('Monthly Tempearature (C)')+xlab('Time')+
# geom_col(aes(x=time, y=P*cfactor), group = 1, color='Blue')
p=ggplot(data=tsd)+
ylab('Monthly Tempearature (C)')+xlab('Time')+
geom_col(aes(x=Index, y=P*cfactor, group=1, fill='P')) +
geom_line(aes(x=Index, y=Tmean,group = 1, color="T")) +
geom_ribbon(aes(x = Index, ymax = Tmax, ymin = Tmin, fill = 'R'),
alpha = 0.4)+
scale_color_manual(values=c('red'), lab='Mean temperature', name='')+
scale_fill_manual(values=c('blue', 'skyblue'), lab=c('Precipitation', 'Temperature range'),
name='')+
theme(legend.position = 'top', legend.direction = 'horizontal')+
scale_y_continuous(
sec.axis = sec_axis(~./cfactor,
name = bquote('Precipitation (' ~ m ~ month^{-1} ~ ')' ) ) )
p
ggsave(p, filename = file.path(dir.out, paste0(prjname, '_PT.png')),
height = 4, width=6)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_unification.R
\name{sv_meta_compare}
\alias{sv_meta_compare}
\title{compares two data.frames returns the variables are different between them}
\usage{
sv_meta_compare(the.data.1, the.data.2)
}
\arguments{
\item{the.data.2}{}
}
\description{
compares two data.frames returns the variables are different between them
}
| /man/sv_meta_compare.Rd | no_license | shahryareiv/singleverbr | R | false | true | 401 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_unification.R
\name{sv_meta_compare}
\alias{sv_meta_compare}
\title{compares two data.frames returns the variables are different between them}
\usage{
sv_meta_compare(the.data.1, the.data.2)
}
\arguments{
\item{the.data.2}{}
}
\description{
compares two data.frames returns the variables are different between them
}
|
# Total Gas Pressure module functions for
# "iQuaCalc (Lite).R"
gasTgpModuleInput <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(width = 5,
tabsetPanel(id = ns('my_tabset_tgp_controls'), type = 'pills', selected = NULL,
tabPanel('Input Summary', value = 'tgp_input_summary',
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px; margin-top: 35px;
margin-left: 10px;',
htmlOutput(ns('gas_input_echo'))
)
)
)
),
tabPanel('Water', value = 'tgp_enter_t_and_s',
fluidRow(
# T, S, pH, & Alk column
column(width = 12,
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px; margin-top: 15px;
margin-left: 10px;',
temperatureModuleInput(ns('temp_for_tgp'), 0),
salinityModuleInput(ns('sal_for_tgp'), 0)
),
wellPanel(style = 'padding-bottom: 0px;
margin-left: 10px;',
gasModuleInput(ns('o2_for_tgp'), 'Oxygen', 1, 'Pure O2')
),
wellPanel(style = 'padding-bottom: 0px;
margin-left: 10px;',
tabsetPanel(id = ns('co2_tabset_panel'),
tabPanel(title = 'CO2 by measurement',
style = 'padding-top: 8px',
co2MeasuredNumericModuleInput(ns('co2_measured_for_tgp'))
),
tabPanel(title = 'CO2 by pH & [Alk]',
style = 'padding-top: 5px',
phModuleInput(ns('ph_for_tgp')),
alkModuleInput(ns('alk_for_tgp'), 0)
)
)
)
)
) # END fluidRow with wellPanel()s
) # END column
) # END TOP fluidRow
), # END tabPanel 'T & S'
tabPanel('Atmosphere', value = 'tgp_enter_bar_and_co2_atm',
fluidRow(
# barometric & CO2 (atm) column
column(width = 12,
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px; margin-top: 20px;
margin-left: 10px;',
barometricNumericModuleInput(ns('barometric_for_tgp'))
),
wellPanel(style = 'padding-bottom: 0px;
margin-left: 10px;',
co2_gasModuleInput(ns('co2_atm_tgp'))
)
)
)
)
) # END TOP fluidRow
) # END tabPanel Atmos
) # END tabsetPanel
), # END column for left-side input data
column(width = 7,
# fluidRow(
# h4('Enter Total Gas Pressure & DO measurements', align = 'center')
# ),
# Enter TGP & DO
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px;
margin-rightt: 20px;',
splitLayout(cellWidths = c('25%', '45%', '30%'),
numericInput(ns('tgpSlider_convert'), 'TGP',
min = 0, max = 45, value = 33, step = 0.01),
selectInput(ns('tgpConvertUnits'), 'TGP Units',
choices = tgpChoices),
tags$h6()
)
)
)
), # END fluidRow 'TGP & DO'
# DT (& other output)
fluidRow(
column(width = 11,
div(DT::dataTableOutput(ns('tgp_dt')),
style = 'font-size: 115%')
)
), # END fluidRow DT
br(), br(),
fluidRow(
splitLayout(cellWidths = c('40%', '60%'),
box(style = "text-align: center;",
width = NULL,
# title = 'Pure O2 Tank Duration',
# solidHeader = T,
status = 'primary',
background = 'light-blue',
tags$h3('Compensation Depth', align = 'center'),
htmlOutput(ns('comp_depth'))
),
plotOutput(ns('tgp_plot'), height = '210px', width = '315px')
)
) # END fluidRow 'compensation depth'
) # END column for right-side display
) # END top-most fluidRow
)
}
gasTgpModule <- function(input, output, session, st) {
# "*_init" flags when app is (re-)launched
rv <- reactiveValues(select_init = -1,
tgp_units_default = 'Δ mm Hg (torr)',
tgp_sl_init = -1,
tgp_default = tgpSet,
duct_tape_2 = -1)
icTemp <- callModule(temperatureModule, 'temp_for_tgp', reactive(st()))
icSal <- callModule(salinityModule, 'sal_for_tgp',
reactive(icTemp()),
reactive(st()))
icPh <- callModule(phModule, 'ph_for_tgp',
reactive(icTemp()),
reactive(icSal()),
reactive(st()))
icAlk <- callModule(alkModule, 'alk_for_tgp',
reactive(icTemp()),
reactive(icSal()),
reactive(st()))
icBarometric <- callModule(barometricNumericModule, 'barometric_for_tgp',
reactive(st()))
# co2_data <- callModule(co2_gas_atm_Module, 'co2_atm_tgp',
# reactive(st()))
# module for ATMOSPHERIC CO2 ----
co2_data <- callModule(co2_gasModule, 'co2_atm_tgp',
# reactive(icTemp()), reactive(icSal()),
# reactive(icPh()), reactive(icAlk()),
# reactive(icBarometric()),
reactive(st()))
icO2 <- callModule(gasModule, 'o2_for_tgp',
reactive(icTemp()), reactive(icSal()),
# 'Oxygen', 1, 'Pure O2', # NB: 'O2', **NOT** 'Oxygen'
'O2', 1, 'Pure O2', # NB: 'O2', **NOT** 'Oxygen'
reactive(icBarometric()),
reactive(st()))
# module, DISSOLVED & MEASURED CO2 ----
# accepts entered concentration and returns...only mg/L, for now??
co2_dissolved_measured <- callModule(co2MeasuredNumericModule, 'co2_measured_for_tgp',
reactive(icTemp()), reactive(icSal()),
reactive(icPh()),
reactive(icBarometric()),
reactive(st()))
# icGasSatCalc <- callModule(gasSatModule, 'dummy_gas_sat',
# reactive(icTemp()), reactive(icSal()),
# reactive(icPh()), reactive(icAlk()),
# reactive(icBarometric()), reactive(co2_data()),
# reactive(st()))
# ---- TGP ----
# Observe SELECT_INPUT input, store when changed
observeEvent(input$tgpConvertUnits, priority = 50, {
if(rv$select_init < 0) {
x <- session$ns('tgpConvertUnits')
rv$select_init <- 1
tgp_units_init <- st()[[x]]
if(length(tgp_units_init) == 0)
tgp_units_init <- rv$tgp_units_default
# user (re-)opened app. Is store$select empty?
updateSelectInput(session, 'tgpConvertUnits', 'TGP Units',
choices = tgpChoices,
selected = tgp_units_init)
freezeReactiveValue(input, "tgpConvertUnits")
}
updateStore(session, session$ns("tgpConvertUnits"), input$tgpConvertUnits)
idx <- which(input$tgpConvertUnits == tgpUnitsList)
y <- paste0(session$ns('sl_'), input$tgpConvertUnits)
my_tgp_value <- st()[[y]]
if(length(my_tgp_value) == 0)
my_tgp_value <- rv$tgp_default[idx]
updateNumericInput(session, "tgpSlider_convert", label = paste0(tgpUnitsList_short[idx], ' TGP'),
value = my_tgp_value,
min = tgpMin[idx], max = tgpMax[idx], step = tgpStep[idx])
# updateSliderInput(session, "tgpSlider_convert", label = paste0(tgpUnitsList_short[idx], ' TGP'),
# value = my_tgp_value,
# min = tgpMin[idx], max = tgpMax[idx], step = tgpStep[idx])
freezeReactiveValue(input, "tgpSlider_convert")
# update slider value for current units ???
updateStore(session, paste0(session$ns('sl_'), input$tgpConvertUnits), my_tgp_value)
})
# Observe SLIDER_INPUT input, store when changed
# observeEvent(c(input$gasSlider_convert, icTemp(), icSal()), {
observeEvent(input$tgpSlider_convert, {
if(rv$tgp_sl_init < 0) {
rv$tgp_sl_init <- 1
return()
}
idx <- which(input$tgpConvertUnits == tgpUnitsList)
y <- paste0(session$ns('sl_'), input$tgpConvertUnits)
my_tgp_value <- st()[[y]]
if(length(my_tgp_value) == 0)
my_tgp_value <- rv$tgp_default[idx]
else
my_tgp_value <- input$tgpSlider_convert
# update slider value for current units
updateStore(session, paste0(session$ns('sl_'), input$tgpConvertUnits), my_tgp_value)
})
# ---- ECHO INPUT ----
# ECHO gas inputs
output$gas_input_echo <- renderUI({
req(icBarometric())
str1 <- tags$strong(paste0('Temperature: ', icTemp()$val,' ', icTemp()$units))
str2 <- tags$strong(paste0(' Salinity: ', icSal()$val,' ', icSal()$units))
str3 <- tags$strong(paste0(' pH: ', icPh()$val,' ', icPh()$units))
str4 <- tags$strong(paste0(' Alkalinity: ', icAlk()$val,' ', icAlk()$units))
# if units of 'altitude', then display estimated pressure in mm Hg
if(icBarometric()$units %in% c('km', 'm', 'ft')) {
# returns ic barometric in atm, convert to mm Hg
barometric_in_mmHg <- calcBarometricToIcUnits(icBarometric()$val, icBarometric()$units) * 759.999952
barometric_in_mmHg <- round(barometric_in_mmHg, 1)
str5 <- tags$strong(paste0(icBarometric()$val,' ', icBarometric()$units,
' (~ ', barometric_in_mmHg, ' mm Hg)'))
} else {
str5 <- tags$strong(paste0(' Barometric: ', icBarometric()$val,' ', icBarometric()$units))
}
str6 <- tags$strong(paste0(' CO₂ (atm): ', co2_data()$val,' ', co2_data()$units))
# str6 <- paste0('CO', tags$sub(2), ' (atm): ', icCO2_tgp()$val,' ', icCO2_tgp()$units)
HTML(paste(tags$h4(str1), tags$h4(str2), tags$hr(),
tags$h4(str3), tags$h4(str4), tags$hr(),
tags$h4(str5), tags$h4(str6)))
# sep = '<br/>'))
# HTML(paste(tags$h4(str1)))
})
# Compensation Depth ----
output$comp_depth <- renderUI({
if(df_tgp()$comp_depth < 0) {
str1 <- tags$h4('The water column is')
str2 <- tags$h4('under-saturated')
str3 <- ''
} else {
my_comp_depth <- as.numeric(formatC(round(df_tgp()$comp_depth, 5), format='f', digits=4))
# ad hoc change m to m & cm...
meters <- floor(my_comp_depth)
cm <- round((my_comp_depth - meters) * 100, 2)
meters_centimeters <- paste0(meters, ' m ', cm, ' cm')
str1 <- tags$h3(paste0(round(my_comp_depth, 2), ' m'))
str2 <- tags$h4(paste0('(', meters_centimeters, ')'))
str3 <- tags$h4(paste0('(', convertMetersToFtAndInches(my_comp_depth)[1]), ')')
}
# see: https://stackoverflow.com/questions/26368192/how-to-insert-new-line-in-r-shiny-string
# HTML(paste("hello", "world", sep="<br/>"))
HTML(paste0(str1, str2, str3, sep = "<br/>"))
})
# PLOT TGP vs. depth ----
tgp_depth_df <- reactive({
z_comp <- df_tgp()$comp_depth
req(z_comp >= 0)
z_seq <- seq(z_comp * 1.5, 0, -0.1)
# # convert BP from IC atm to mm Hg
# bp_mmHg <- bp_ic * 759.999952
mmHg_per_meter <- getPressureIncreaseWithDepth(icTemp()$ic, icSal()$ic)
# calc pressure with depth like thie...? ----
# tgp_mmHg <- z_comp_in_meters * mmHg_per_meter + bp_mmHg
# ΔP <- z_seq * mmHg_per_meter
tgp_seq <- seq(100, 0, -0.1)
# cat('in gas_tgp_module.R/tgp_depth_df...\n')
delta_P_in_mmHg <- as.numeric(df_tgp()$delta_P)
# cat('delta_P_in_mmHg = ', delta_P_in_mmHg, ' mm Hg \n')
x <- delta_P_in_mmHg - (mmHg_per_meter * z_seq)
# print(x)
# print(z_seq)
# cat('======================== \n\n')
df <- data.frame(z = z_seq, delta_P = x,
stringsAsFactors = F)
df
})
output$tgp_plot <- renderPlot({
# geom_blank() +
z_comp <- df_tgp()$comp_depth
req(z_comp >= 0)
z_comp_zone <- tibble(x = c(-Inf, Inf, Inf, -Inf),
y = c(z_comp, z_comp, Inf, Inf))
# stringsAsFactors = F)
z_comp_no <- tibble(x = c(-Inf, Inf, Inf, -Inf),
y = c(z_comp, z_comp, 0, 0))
# stringsAsFactors = F)
# NB: remove "geom_path: Each group consists of only one observation. Do you need to adjust the group aesthetic?"
# see: https://stackoverflow.com/questions/27082601/ggplot2-line-chart-gives-geom-path-each-group-consist-of-only-one-observation
# , group = 1
p <- tgp_depth_df() %>% ggplot(aes(delta_P, z, group = 1)) +
scale_y_reverse() +
geom_line(color = 'blue') +
geom_hline(yintercept = z_comp, linetype = "dashed") +
xlab('ΔP (mm Hg)') +
ylab('depth (m)') +
geom_polygon(data = z_comp_zone,
aes(x = x,
y = y
),
alpha = 0.4,
fill = "darkgreen"
) +
geom_polygon(data = z_comp_no,
aes(x = x,
y = y
),
alpha = 0.2,
fill = "red"
)
# coord_cartesian(xlim = c(-6, as.numeric(df_tgp()$delta_P)),
# ylim = c(0, 5),
# expand = F)
p <- p + scale_x_continuous(position = 'top')
p
})
# ---- DF_CO2() ----
df_co2 <- reactive({
my_icTemp <- icTemp()$ic
my_icSal <- icSal()$ic
my_icPh <- icPh()$ic
my_icAlk <- icAlk()$ic / 1000.0
my_icBarometric <- icBarometric()$ic
if('CO2 by measurement' == input$co2_tabset_panel) {
co2_in_mg_per_L <- co2_dissolved_measured()$co2_mg_per_L
} else {
co2_actual_mol_kg <- alphaZero(my_icTemp, my_icSal, my_icPh) * calcDicOfAlk(my_icAlk, my_icPh, my_icTemp, my_icSal)
co2_in_mg_per_L <- 1000.0 * co2_actual_mol_kg * MW_CO2 * (calcRho(my_icTemp, my_icSal) / 1000.0)
}
co2_for_tgp <- tgp_calc_co2(my_icTemp, my_icSal,
icPh()$ic, icAlk()$ic, # NB: [Alk] in meq/kg (e.g., "2.4")
co2_data()$co2_mole_frac,
my_icBarometric,
co2_in_mg_per_L)
co2_for_tgp$'%' <- formatC(co2_for_tgp$'%', format='f', digits=2)
co2_for_tgp$'Δ mm Hg' <- formatC(co2_for_tgp$'Δ mm Hg', format='f', digits=2)
co2_for_tgp$'mm Hg' <- formatC(co2_for_tgp$'mm Hg', format='f', digits=2)
co2_for_tgp
})
# ---- DF_TGP() ----
df_tgp <- reactive({
# ----------------------------------------*
# NB: MUST 'flatten' named gasChoices to gasUnitsListPref ----
idx_g <- which(input$tgpConvertUnits == tgpUnitsList)
tgp.LL <- tgpMin[idx_g]
tgp.UU <- tgpMax[idx_g]
tgp.units <- tgpUnitsList[idx_g]
str_message <- paste0('Please enter a TGP value between ',
tgp.LL, ' & ', tgp.UU, ' ', tgp.units)
validate(
need(
try(
input$tgpSlider_convert >= tgp.LL &&
input$tgpSlider_convert <= tgp.UU
),
str_message
)
)
# ----------------------------------------*
req(
input$tgpSlider_convert, input$tgpConvertUnits,
icTemp(), icSal(),
icO2(),
co2_data(),
df_co2(),
icBarometric(),
cancelOutput = T
)
my_icTemp <- icTemp()$ic
my_icSal <- icSal()$ic
my_icO2 <- icO2()$ic
my_icBarometric <- icBarometric()$ic
my_icPh <- icPh()$ic
my_icAlk <- icAlk()$ic
# TGP I.C. units -- NOT atm, but (inconveniently) "%"
icTgp <- calcTgpToIcUnits(input$tgpSlider_convert, input$tgpConvertUnits, my_icBarometric)
df <- calcTgpToAllUnits(input$tgpSlider_convert, input$tgpConvertUnits, my_icBarometric)
# --------------------------*
ic_delta_p <- calc_delta_p_to_ic_units(input$tgpSlider_convert,
input$tgpConvertUnits,
my_icBarometric)
# my_df <- calc_delta_p_to_all_units(input$tgpSlider_convert,
# input$tgpConvertUnits,
# my_icBarometric)
# cat('\n in gas_tgp_module.R: ...\n')
# cat('ic_delta_p => ', ic_delta_p, '\n\n')
# # print(df %>% spread(units, vals))
# cat('----------------\n')
# print(my_df %>% spread(units, vals))
# print(co2_dissolved_measured()$co2_mg_per_L)
# cat('----------------\n')
is_co2_by_measurement <- ifelse(('CO2 by measurement' == input$co2_tabset_panel),
TRUE,
FALSE)
tgp_df <- calc_tgp_n2ar_o2_co2(my_icTemp, my_icSal,
my_icPh, my_icAlk, # NB: [Alk] in meq/kg (e.g., "2.4")
my_icBarometric,
ic_delta_p,
my_icO2,
is_co2_by_measurement,
co2_data()$co2_mole_frac,
co2_dissolved_measured()$co2_mg_per_L)
# cat('\n********************\n')
# print(tgp_df)
# # print(tgp_df %>% mutate(ratio = as.numeric('N2 + Ar') / as.numeric(O2)))
# cat('********************\n\n')
# --------------------------*
# tgp_mmHg <- df$vals[2]
# tgp_mmHg_2 <- my_df$vals[2]
tgp_mmHg_3 <- as.numeric(tgp_df$'Total'[1])
# cat(tgp_df$'Total Gas'[1], '\n')
# cat(class(tgp_df$'Total Gas'[1]), '\n')
# cat(df$'Δ mm Hg'[1], ' vs. ', ic_delta_p, ' vs ', tgp_mmHg_3, '\n')
comp_depth <- tgpCalcCompDepth(tgp_mmHg_3,
my_icBarometric,
my_icTemp,
my_icSal,
'm')
# df <- df %>% spread(units, vals)
# get O2 and N2 data for TGP -- alraedy spread
# df_o2_n2 <- tgpCalcN2ArFromTgp_po_sto(icTgp,
# my_icO2,
# my_icTemp, my_icSal,
# my_icBarometric)
# Have to format here -- but why?!
# df$'%'[1] <- formatC(round(df$'%'[1], 3), format='f', digits=2)
# df$'Δ mm Hg'[1] <- formatC(round(df$'Δ mm Hg'[1], 3), format='f', digits=2)
# df$'mm Hg'[1] <- formatC(round(df$'mm Hg'[1], 3), format='f', digits=2)
#
# df_o2_n2$'Δ mm Hg'[1] <- formatC(round(df_o2_n2$'Δ mm Hg'[1], 3), format='f', digits=2)
# # [????] WHY must explicitly cast 'numeric' to as.numeric() ??
# df_o2_n2$'Δ mm Hg'[2] <- formatC(round(as.numeric(df_o2_n2$'Δ mm Hg'[2]), 3), format='f', digits=2)
#
# df_o2_n2$'mm Hg'[1] <- formatC(round(df_o2_n2$'mm Hg'[1], 3), format='f', digits=2)
# df_o2_n2$'mm Hg'[2] <- formatC(round(as.numeric(df_o2_n2$'mm Hg'[2]), 3), format='f', digits=2)
#
# df_o2_n2$'%'[1] <- formatC(round(df_o2_n2$'%'[1], 3), format='f', digits=2)
# df_o2_n2$'%'[2] <- formatC(round(as.numeric(df_o2_n2$'%'[2]), 3), format='f', digits=2)
# Vapor Pressure ----
# vp_in_atm <- calcVP(my_icTemp, my_icSal) # in atm
# vp_in_mmHg <- vp_in_atm * 759.99999
# vapor pressure df
# df_vp <- data.frame(gas = c(rep('VP', 3)),
# vals = c('--', formatC(vp_in_mmHg, format='f', digits=2), '--'),
# units = c('%', 'mm Hg', 'Δ mm Hg'),
#
# stringsAsFactors = F
# )
# df_vp <- df_vp %>% spread(units, vals)
# x <- bind_rows(df, df_o2_n2, df_co2(), df_vp)
# tgp_list <- list(df = df, ic = icTgp)
tgp_list <- list(df = tgp_df,
ic = icTgp,
delta_P = ic_delta_p, # in mm Hg
# delta_P = df$'Δ mm Hg'[1], # in mm Hg
comp_depth = comp_depth)
# 1/2 "duct tape" solution ... ----
rv$tgp_sl_init <- 5
tgp_list
})
proxy_dt_data = dataTableProxy(session$ns('tgp_dt'))
# proxy_dt_data = dataTableProxy('tgp_dt')
# NB: replaceData doesn't work in module namespace
# see: https://github.com/rstudio/DT/issues/359 for workaround
# observeEvent(df_tgp(), {
observe({
# replaceData(proxy_dt_data, dummy(), rownames = F, resetPaging = FALSE)
dataTableAjax(session, df_tgp()$df, rownames = F, outputId = 'tgp_dt')
reloadData(proxy_dt_data, resetPaging = F)
cat('\n********************\n')
print(df_tgp()$df)
cat('\n--------------------\n')
# print(tgp_df %>% mutate(ratio = as.numeric('N2 + Ar') / as.numeric(O2)))
cat('********************\n\n')
})
output$tgp_dt <- DT::renderDataTable({
rv$tgp_sl_init
# icTemp()$ic
# req(df_tgp(), cancelOutput = T)
# cat('\n********************\n')
# print(df_tgp()$df)
# cat('\n--------------------\n')
# # print(tgp_df %>% mutate(ratio = as.numeric('N2 + Ar') / as.numeric(O2)))
# cat('********************\n\n')
# datatable( isolate(df_tgp()$df),
datatable( df_tgp()$df,
rownames = F,
options = list(dom = 't',
'bSort' = F,
'bInfo' = F,
pageLength = 3,
# columnDefs = list(list(targets = 2, visible = F)),
columnDefs = list(list(className = 'dt-right', targets = 0:5)),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': 'lightblue', 'color': '#000'});",
"}")
)
)
})
} | /.Rproj.user/D05DB695/sources/s-BE481F9E/F137D5CF-contents | no_license | aquacalc/iQuaCalc_R | R | false | false | 28,107 | # Total Gas Pressure module functions for
# "iQuaCalc (Lite).R"
gasTgpModuleInput <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(width = 5,
tabsetPanel(id = ns('my_tabset_tgp_controls'), type = 'pills', selected = NULL,
tabPanel('Input Summary', value = 'tgp_input_summary',
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px; margin-top: 35px;
margin-left: 10px;',
htmlOutput(ns('gas_input_echo'))
)
)
)
),
tabPanel('Water', value = 'tgp_enter_t_and_s',
fluidRow(
# T, S, pH, & Alk column
column(width = 12,
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px; margin-top: 15px;
margin-left: 10px;',
temperatureModuleInput(ns('temp_for_tgp'), 0),
salinityModuleInput(ns('sal_for_tgp'), 0)
),
wellPanel(style = 'padding-bottom: 0px;
margin-left: 10px;',
gasModuleInput(ns('o2_for_tgp'), 'Oxygen', 1, 'Pure O2')
),
wellPanel(style = 'padding-bottom: 0px;
margin-left: 10px;',
tabsetPanel(id = ns('co2_tabset_panel'),
tabPanel(title = 'CO2 by measurement',
style = 'padding-top: 8px',
co2MeasuredNumericModuleInput(ns('co2_measured_for_tgp'))
),
tabPanel(title = 'CO2 by pH & [Alk]',
style = 'padding-top: 5px',
phModuleInput(ns('ph_for_tgp')),
alkModuleInput(ns('alk_for_tgp'), 0)
)
)
)
)
) # END fluidRow with wellPanel()s
) # END column
) # END TOP fluidRow
), # END tabPanel 'T & S'
tabPanel('Atmosphere', value = 'tgp_enter_bar_and_co2_atm',
fluidRow(
# barometric & CO2 (atm) column
column(width = 12,
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px; margin-top: 20px;
margin-left: 10px;',
barometricNumericModuleInput(ns('barometric_for_tgp'))
),
wellPanel(style = 'padding-bottom: 0px;
margin-left: 10px;',
co2_gasModuleInput(ns('co2_atm_tgp'))
)
)
)
)
) # END TOP fluidRow
) # END tabPanel Atmos
) # END tabsetPanel
), # END column for left-side input data
column(width = 7,
# fluidRow(
# h4('Enter Total Gas Pressure & DO measurements', align = 'center')
# ),
# Enter TGP & DO
fluidRow(
column(width = 12,
wellPanel(style = 'padding-bottom: 0px;
margin-rightt: 20px;',
splitLayout(cellWidths = c('25%', '45%', '30%'),
numericInput(ns('tgpSlider_convert'), 'TGP',
min = 0, max = 45, value = 33, step = 0.01),
selectInput(ns('tgpConvertUnits'), 'TGP Units',
choices = tgpChoices),
tags$h6()
)
)
)
), # END fluidRow 'TGP & DO'
# DT (& other output)
fluidRow(
column(width = 11,
div(DT::dataTableOutput(ns('tgp_dt')),
style = 'font-size: 115%')
)
), # END fluidRow DT
br(), br(),
fluidRow(
splitLayout(cellWidths = c('40%', '60%'),
box(style = "text-align: center;",
width = NULL,
# title = 'Pure O2 Tank Duration',
# solidHeader = T,
status = 'primary',
background = 'light-blue',
tags$h3('Compensation Depth', align = 'center'),
htmlOutput(ns('comp_depth'))
),
plotOutput(ns('tgp_plot'), height = '210px', width = '315px')
)
) # END fluidRow 'compensation depth'
) # END column for right-side display
) # END top-most fluidRow
)
}
gasTgpModule <- function(input, output, session, st) {
# "*_init" flags when app is (re-)launched
rv <- reactiveValues(select_init = -1,
tgp_units_default = 'Δ mm Hg (torr)',
tgp_sl_init = -1,
tgp_default = tgpSet,
duct_tape_2 = -1)
icTemp <- callModule(temperatureModule, 'temp_for_tgp', reactive(st()))
icSal <- callModule(salinityModule, 'sal_for_tgp',
reactive(icTemp()),
reactive(st()))
icPh <- callModule(phModule, 'ph_for_tgp',
reactive(icTemp()),
reactive(icSal()),
reactive(st()))
icAlk <- callModule(alkModule, 'alk_for_tgp',
reactive(icTemp()),
reactive(icSal()),
reactive(st()))
icBarometric <- callModule(barometricNumericModule, 'barometric_for_tgp',
reactive(st()))
# co2_data <- callModule(co2_gas_atm_Module, 'co2_atm_tgp',
# reactive(st()))
# module for ATMOSPHERIC CO2 ----
co2_data <- callModule(co2_gasModule, 'co2_atm_tgp',
# reactive(icTemp()), reactive(icSal()),
# reactive(icPh()), reactive(icAlk()),
# reactive(icBarometric()),
reactive(st()))
icO2 <- callModule(gasModule, 'o2_for_tgp',
reactive(icTemp()), reactive(icSal()),
# 'Oxygen', 1, 'Pure O2', # NB: 'O2', **NOT** 'Oxygen'
'O2', 1, 'Pure O2', # NB: 'O2', **NOT** 'Oxygen'
reactive(icBarometric()),
reactive(st()))
# module, DISSOLVED & MEASURED CO2 ----
# accepts entered concentration and returns...only mg/L, for now??
co2_dissolved_measured <- callModule(co2MeasuredNumericModule, 'co2_measured_for_tgp',
reactive(icTemp()), reactive(icSal()),
reactive(icPh()),
reactive(icBarometric()),
reactive(st()))
# icGasSatCalc <- callModule(gasSatModule, 'dummy_gas_sat',
# reactive(icTemp()), reactive(icSal()),
# reactive(icPh()), reactive(icAlk()),
# reactive(icBarometric()), reactive(co2_data()),
# reactive(st()))
# ---- TGP ----
# Observe SELECT_INPUT input, store when changed
observeEvent(input$tgpConvertUnits, priority = 50, {
if(rv$select_init < 0) {
x <- session$ns('tgpConvertUnits')
rv$select_init <- 1
tgp_units_init <- st()[[x]]
if(length(tgp_units_init) == 0)
tgp_units_init <- rv$tgp_units_default
# user (re-)opened app. Is store$select empty?
updateSelectInput(session, 'tgpConvertUnits', 'TGP Units',
choices = tgpChoices,
selected = tgp_units_init)
freezeReactiveValue(input, "tgpConvertUnits")
}
updateStore(session, session$ns("tgpConvertUnits"), input$tgpConvertUnits)
idx <- which(input$tgpConvertUnits == tgpUnitsList)
y <- paste0(session$ns('sl_'), input$tgpConvertUnits)
my_tgp_value <- st()[[y]]
if(length(my_tgp_value) == 0)
my_tgp_value <- rv$tgp_default[idx]
updateNumericInput(session, "tgpSlider_convert", label = paste0(tgpUnitsList_short[idx], ' TGP'),
value = my_tgp_value,
min = tgpMin[idx], max = tgpMax[idx], step = tgpStep[idx])
# updateSliderInput(session, "tgpSlider_convert", label = paste0(tgpUnitsList_short[idx], ' TGP'),
# value = my_tgp_value,
# min = tgpMin[idx], max = tgpMax[idx], step = tgpStep[idx])
freezeReactiveValue(input, "tgpSlider_convert")
# update slider value for current units ???
updateStore(session, paste0(session$ns('sl_'), input$tgpConvertUnits), my_tgp_value)
})
# Observe SLIDER_INPUT input, store when changed
# observeEvent(c(input$gasSlider_convert, icTemp(), icSal()), {
observeEvent(input$tgpSlider_convert, {
if(rv$tgp_sl_init < 0) {
rv$tgp_sl_init <- 1
return()
}
idx <- which(input$tgpConvertUnits == tgpUnitsList)
y <- paste0(session$ns('sl_'), input$tgpConvertUnits)
my_tgp_value <- st()[[y]]
if(length(my_tgp_value) == 0)
my_tgp_value <- rv$tgp_default[idx]
else
my_tgp_value <- input$tgpSlider_convert
# update slider value for current units
updateStore(session, paste0(session$ns('sl_'), input$tgpConvertUnits), my_tgp_value)
})
# ---- ECHO INPUT ----
# ECHO gas inputs
output$gas_input_echo <- renderUI({
req(icBarometric())
str1 <- tags$strong(paste0('Temperature: ', icTemp()$val,' ', icTemp()$units))
str2 <- tags$strong(paste0(' Salinity: ', icSal()$val,' ', icSal()$units))
str3 <- tags$strong(paste0(' pH: ', icPh()$val,' ', icPh()$units))
str4 <- tags$strong(paste0(' Alkalinity: ', icAlk()$val,' ', icAlk()$units))
# if units of 'altitude', then display estimated pressure in mm Hg
if(icBarometric()$units %in% c('km', 'm', 'ft')) {
# returns ic barometric in atm, convert to mm Hg
barometric_in_mmHg <- calcBarometricToIcUnits(icBarometric()$val, icBarometric()$units) * 759.999952
barometric_in_mmHg <- round(barometric_in_mmHg, 1)
str5 <- tags$strong(paste0(icBarometric()$val,' ', icBarometric()$units,
' (~ ', barometric_in_mmHg, ' mm Hg)'))
} else {
str5 <- tags$strong(paste0(' Barometric: ', icBarometric()$val,' ', icBarometric()$units))
}
str6 <- tags$strong(paste0(' CO₂ (atm): ', co2_data()$val,' ', co2_data()$units))
# str6 <- paste0('CO', tags$sub(2), ' (atm): ', icCO2_tgp()$val,' ', icCO2_tgp()$units)
HTML(paste(tags$h4(str1), tags$h4(str2), tags$hr(),
tags$h4(str3), tags$h4(str4), tags$hr(),
tags$h4(str5), tags$h4(str6)))
# sep = '<br/>'))
# HTML(paste(tags$h4(str1)))
})
# Compensation Depth ----
output$comp_depth <- renderUI({
if(df_tgp()$comp_depth < 0) {
str1 <- tags$h4('The water column is')
str2 <- tags$h4('under-saturated')
str3 <- ''
} else {
my_comp_depth <- as.numeric(formatC(round(df_tgp()$comp_depth, 5), format='f', digits=4))
# ad hoc change m to m & cm...
meters <- floor(my_comp_depth)
cm <- round((my_comp_depth - meters) * 100, 2)
meters_centimeters <- paste0(meters, ' m ', cm, ' cm')
str1 <- tags$h3(paste0(round(my_comp_depth, 2), ' m'))
str2 <- tags$h4(paste0('(', meters_centimeters, ')'))
str3 <- tags$h4(paste0('(', convertMetersToFtAndInches(my_comp_depth)[1]), ')')
}
# see: https://stackoverflow.com/questions/26368192/how-to-insert-new-line-in-r-shiny-string
# HTML(paste("hello", "world", sep="<br/>"))
HTML(paste0(str1, str2, str3, sep = "<br/>"))
})
# PLOT TGP vs. depth ----
tgp_depth_df <- reactive({
z_comp <- df_tgp()$comp_depth
req(z_comp >= 0)
z_seq <- seq(z_comp * 1.5, 0, -0.1)
# # convert BP from IC atm to mm Hg
# bp_mmHg <- bp_ic * 759.999952
mmHg_per_meter <- getPressureIncreaseWithDepth(icTemp()$ic, icSal()$ic)
# calc pressure with depth like thie...? ----
# tgp_mmHg <- z_comp_in_meters * mmHg_per_meter + bp_mmHg
# ΔP <- z_seq * mmHg_per_meter
tgp_seq <- seq(100, 0, -0.1)
# cat('in gas_tgp_module.R/tgp_depth_df...\n')
delta_P_in_mmHg <- as.numeric(df_tgp()$delta_P)
# cat('delta_P_in_mmHg = ', delta_P_in_mmHg, ' mm Hg \n')
x <- delta_P_in_mmHg - (mmHg_per_meter * z_seq)
# print(x)
# print(z_seq)
# cat('======================== \n\n')
df <- data.frame(z = z_seq, delta_P = x,
stringsAsFactors = F)
df
})
output$tgp_plot <- renderPlot({
# geom_blank() +
z_comp <- df_tgp()$comp_depth
req(z_comp >= 0)
z_comp_zone <- tibble(x = c(-Inf, Inf, Inf, -Inf),
y = c(z_comp, z_comp, Inf, Inf))
# stringsAsFactors = F)
z_comp_no <- tibble(x = c(-Inf, Inf, Inf, -Inf),
y = c(z_comp, z_comp, 0, 0))
# stringsAsFactors = F)
# NB: remove "geom_path: Each group consists of only one observation. Do you need to adjust the group aesthetic?"
# see: https://stackoverflow.com/questions/27082601/ggplot2-line-chart-gives-geom-path-each-group-consist-of-only-one-observation
# , group = 1
p <- tgp_depth_df() %>% ggplot(aes(delta_P, z, group = 1)) +
scale_y_reverse() +
geom_line(color = 'blue') +
geom_hline(yintercept = z_comp, linetype = "dashed") +
xlab('ΔP (mm Hg)') +
ylab('depth (m)') +
geom_polygon(data = z_comp_zone,
aes(x = x,
y = y
),
alpha = 0.4,
fill = "darkgreen"
) +
geom_polygon(data = z_comp_no,
aes(x = x,
y = y
),
alpha = 0.2,
fill = "red"
)
# coord_cartesian(xlim = c(-6, as.numeric(df_tgp()$delta_P)),
# ylim = c(0, 5),
# expand = F)
p <- p + scale_x_continuous(position = 'top')
p
})
# ---- DF_CO2() ----
df_co2 <- reactive({
my_icTemp <- icTemp()$ic
my_icSal <- icSal()$ic
my_icPh <- icPh()$ic
my_icAlk <- icAlk()$ic / 1000.0
my_icBarometric <- icBarometric()$ic
if('CO2 by measurement' == input$co2_tabset_panel) {
co2_in_mg_per_L <- co2_dissolved_measured()$co2_mg_per_L
} else {
co2_actual_mol_kg <- alphaZero(my_icTemp, my_icSal, my_icPh) * calcDicOfAlk(my_icAlk, my_icPh, my_icTemp, my_icSal)
co2_in_mg_per_L <- 1000.0 * co2_actual_mol_kg * MW_CO2 * (calcRho(my_icTemp, my_icSal) / 1000.0)
}
co2_for_tgp <- tgp_calc_co2(my_icTemp, my_icSal,
icPh()$ic, icAlk()$ic, # NB: [Alk] in meq/kg (e.g., "2.4")
co2_data()$co2_mole_frac,
my_icBarometric,
co2_in_mg_per_L)
co2_for_tgp$'%' <- formatC(co2_for_tgp$'%', format='f', digits=2)
co2_for_tgp$'Δ mm Hg' <- formatC(co2_for_tgp$'Δ mm Hg', format='f', digits=2)
co2_for_tgp$'mm Hg' <- formatC(co2_for_tgp$'mm Hg', format='f', digits=2)
co2_for_tgp
})
# ---- DF_TGP() ----
df_tgp <- reactive({
# ----------------------------------------*
# NB: MUST 'flatten' named gasChoices to gasUnitsListPref ----
idx_g <- which(input$tgpConvertUnits == tgpUnitsList)
tgp.LL <- tgpMin[idx_g]
tgp.UU <- tgpMax[idx_g]
tgp.units <- tgpUnitsList[idx_g]
str_message <- paste0('Please enter a TGP value between ',
tgp.LL, ' & ', tgp.UU, ' ', tgp.units)
validate(
need(
try(
input$tgpSlider_convert >= tgp.LL &&
input$tgpSlider_convert <= tgp.UU
),
str_message
)
)
# ----------------------------------------*
req(
input$tgpSlider_convert, input$tgpConvertUnits,
icTemp(), icSal(),
icO2(),
co2_data(),
df_co2(),
icBarometric(),
cancelOutput = T
)
my_icTemp <- icTemp()$ic
my_icSal <- icSal()$ic
my_icO2 <- icO2()$ic
my_icBarometric <- icBarometric()$ic
my_icPh <- icPh()$ic
my_icAlk <- icAlk()$ic
# TGP I.C. units -- NOT atm, but (inconveniently) "%"
icTgp <- calcTgpToIcUnits(input$tgpSlider_convert, input$tgpConvertUnits, my_icBarometric)
df <- calcTgpToAllUnits(input$tgpSlider_convert, input$tgpConvertUnits, my_icBarometric)
# --------------------------*
ic_delta_p <- calc_delta_p_to_ic_units(input$tgpSlider_convert,
input$tgpConvertUnits,
my_icBarometric)
# my_df <- calc_delta_p_to_all_units(input$tgpSlider_convert,
# input$tgpConvertUnits,
# my_icBarometric)
# cat('\n in gas_tgp_module.R: ...\n')
# cat('ic_delta_p => ', ic_delta_p, '\n\n')
# # print(df %>% spread(units, vals))
# cat('----------------\n')
# print(my_df %>% spread(units, vals))
# print(co2_dissolved_measured()$co2_mg_per_L)
# cat('----------------\n')
is_co2_by_measurement <- ifelse(('CO2 by measurement' == input$co2_tabset_panel),
TRUE,
FALSE)
tgp_df <- calc_tgp_n2ar_o2_co2(my_icTemp, my_icSal,
my_icPh, my_icAlk, # NB: [Alk] in meq/kg (e.g., "2.4")
my_icBarometric,
ic_delta_p,
my_icO2,
is_co2_by_measurement,
co2_data()$co2_mole_frac,
co2_dissolved_measured()$co2_mg_per_L)
# cat('\n********************\n')
# print(tgp_df)
# # print(tgp_df %>% mutate(ratio = as.numeric('N2 + Ar') / as.numeric(O2)))
# cat('********************\n\n')
# --------------------------*
# tgp_mmHg <- df$vals[2]
# tgp_mmHg_2 <- my_df$vals[2]
tgp_mmHg_3 <- as.numeric(tgp_df$'Total'[1])
# cat(tgp_df$'Total Gas'[1], '\n')
# cat(class(tgp_df$'Total Gas'[1]), '\n')
# cat(df$'Δ mm Hg'[1], ' vs. ', ic_delta_p, ' vs ', tgp_mmHg_3, '\n')
comp_depth <- tgpCalcCompDepth(tgp_mmHg_3,
my_icBarometric,
my_icTemp,
my_icSal,
'm')
# df <- df %>% spread(units, vals)
# get O2 and N2 data for TGP -- alraedy spread
# df_o2_n2 <- tgpCalcN2ArFromTgp_po_sto(icTgp,
# my_icO2,
# my_icTemp, my_icSal,
# my_icBarometric)
# Have to format here -- but why?!
# df$'%'[1] <- formatC(round(df$'%'[1], 3), format='f', digits=2)
# df$'Δ mm Hg'[1] <- formatC(round(df$'Δ mm Hg'[1], 3), format='f', digits=2)
# df$'mm Hg'[1] <- formatC(round(df$'mm Hg'[1], 3), format='f', digits=2)
#
# df_o2_n2$'Δ mm Hg'[1] <- formatC(round(df_o2_n2$'Δ mm Hg'[1], 3), format='f', digits=2)
# # [????] WHY must explicitly cast 'numeric' to as.numeric() ??
# df_o2_n2$'Δ mm Hg'[2] <- formatC(round(as.numeric(df_o2_n2$'Δ mm Hg'[2]), 3), format='f', digits=2)
#
# df_o2_n2$'mm Hg'[1] <- formatC(round(df_o2_n2$'mm Hg'[1], 3), format='f', digits=2)
# df_o2_n2$'mm Hg'[2] <- formatC(round(as.numeric(df_o2_n2$'mm Hg'[2]), 3), format='f', digits=2)
#
# df_o2_n2$'%'[1] <- formatC(round(df_o2_n2$'%'[1], 3), format='f', digits=2)
# df_o2_n2$'%'[2] <- formatC(round(as.numeric(df_o2_n2$'%'[2]), 3), format='f', digits=2)
# Vapor Pressure ----
# vp_in_atm <- calcVP(my_icTemp, my_icSal) # in atm
# vp_in_mmHg <- vp_in_atm * 759.99999
# vapor pressure df
# df_vp <- data.frame(gas = c(rep('VP', 3)),
# vals = c('--', formatC(vp_in_mmHg, format='f', digits=2), '--'),
# units = c('%', 'mm Hg', 'Δ mm Hg'),
#
# stringsAsFactors = F
# )
# df_vp <- df_vp %>% spread(units, vals)
# x <- bind_rows(df, df_o2_n2, df_co2(), df_vp)
# tgp_list <- list(df = df, ic = icTgp)
tgp_list <- list(df = tgp_df,
ic = icTgp,
delta_P = ic_delta_p, # in mm Hg
# delta_P = df$'Δ mm Hg'[1], # in mm Hg
comp_depth = comp_depth)
# 1/2 "duct tape" solution ... ----
rv$tgp_sl_init <- 5
tgp_list
})
proxy_dt_data = dataTableProxy(session$ns('tgp_dt'))
# proxy_dt_data = dataTableProxy('tgp_dt')
# NB: replaceData doesn't work in module namespace
# see: https://github.com/rstudio/DT/issues/359 for workaround
# observeEvent(df_tgp(), {
observe({
# replaceData(proxy_dt_data, dummy(), rownames = F, resetPaging = FALSE)
dataTableAjax(session, df_tgp()$df, rownames = F, outputId = 'tgp_dt')
reloadData(proxy_dt_data, resetPaging = F)
cat('\n********************\n')
print(df_tgp()$df)
cat('\n--------------------\n')
# print(tgp_df %>% mutate(ratio = as.numeric('N2 + Ar') / as.numeric(O2)))
cat('********************\n\n')
})
output$tgp_dt <- DT::renderDataTable({
rv$tgp_sl_init
# icTemp()$ic
# req(df_tgp(), cancelOutput = T)
# cat('\n********************\n')
# print(df_tgp()$df)
# cat('\n--------------------\n')
# # print(tgp_df %>% mutate(ratio = as.numeric('N2 + Ar') / as.numeric(O2)))
# cat('********************\n\n')
# datatable( isolate(df_tgp()$df),
datatable( df_tgp()$df,
rownames = F,
options = list(dom = 't',
'bSort' = F,
'bInfo' = F,
pageLength = 3,
# columnDefs = list(list(targets = 2, visible = F)),
columnDefs = list(list(className = 'dt-right', targets = 0:5)),
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': 'lightblue', 'color': '#000'});",
"}")
)
)
})
} | |
data <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
wdata <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
png("plot4.png", width=480, height=480, units="px")
par(mfrow=c(2,2))
plot( as.numeric(as.character(wdata$Global_active_power)), type="l", xaxt="n", ylab="Global Active Power (kilowatts)", xlab="")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
plot( as.numeric(as.character(wdata$Voltage)), type="l", xaxt="n", ylab="Voltage", xlab="")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
plot( as.numeric(as.character(wdata$Sub_metering_1)), type="l", xaxt="n", ylab="Energy Sub Metering", xlab="")
lines(as.numeric(as.character(wdata$Sub_metering_2)), col="red")
lines(as.numeric(as.character(wdata$Sub_metering_3)), col="blue")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot( as.numeric(as.character(wdata$Global_reactive_power)), type="l", xaxt="n", ylab="Global Reactive Power (kilowatts)", xlab="")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
dev.off() | /Desktop/EDA/plot4.R | no_license | AdrianBadica/ExData_Plotting1 | R | false | false | 1,110 | r | data <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
wdata <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
png("plot4.png", width=480, height=480, units="px")
par(mfrow=c(2,2))
plot( as.numeric(as.character(wdata$Global_active_power)), type="l", xaxt="n", ylab="Global Active Power (kilowatts)", xlab="")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
plot( as.numeric(as.character(wdata$Voltage)), type="l", xaxt="n", ylab="Voltage", xlab="")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
plot( as.numeric(as.character(wdata$Sub_metering_1)), type="l", xaxt="n", ylab="Energy Sub Metering", xlab="")
lines(as.numeric(as.character(wdata$Sub_metering_2)), col="red")
lines(as.numeric(as.character(wdata$Sub_metering_3)), col="blue")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
legend("topright", lty=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot( as.numeric(as.character(wdata$Global_reactive_power)), type="l", xaxt="n", ylab="Global Reactive Power (kilowatts)", xlab="")
axis(1,at=1:3, lab=c("Thu", "Fri", "Sat"))
dev.off() |
test_that("to_upper and to_lower have equivalent base versions", {
x <- "This is a sentence."
expect_identical(str_to_upper(x), toupper(x))
expect_identical(str_to_lower(x), tolower(x))
})
test_that("to_title creates one capital letter per word", {
x <- "This is a sentence."
expect_equal(str_count(x, "\\W+"), str_count(str_to_title(x), "[[:upper:]]"))
})
test_that("to_sentence capitalizes just the first letter", {
x <- "This is a sentence."
expect_identical(str_to_sentence("a Test"), "A test")
})
| /tests/testthat/test-case.R | permissive | tidyverse/stringr | R | false | false | 518 | r | test_that("to_upper and to_lower have equivalent base versions", {
x <- "This is a sentence."
expect_identical(str_to_upper(x), toupper(x))
expect_identical(str_to_lower(x), tolower(x))
})
test_that("to_title creates one capital letter per word", {
x <- "This is a sentence."
expect_equal(str_count(x, "\\W+"), str_count(str_to_title(x), "[[:upper:]]"))
})
test_that("to_sentence capitalizes just the first letter", {
x <- "This is a sentence."
expect_identical(str_to_sentence("a Test"), "A test")
})
|
####################################################################################
# Functions to create a version of the chapter 2 monocarp model where the flowering
# strategy is a function-valued trait specified by a spline basis and coefficients
# This relies on Rcode/c2/Monocarp Demog Funs.R; functions defined there and used
# without change are not repeated here.
####################################################################################
## Probability of flowering function
## Input argument B must be a basis object of type 'bspline'
## created by create.bspline.basis() in the fda package
p_bz_spline <- function(z, B, cj) {
linear.p <- eval.basis(B,z)%*%cj; # linear predictor
p <- 1/(1+exp(-linear.p)) # logistic transformation to probability
return(p)
}
###################################################################################
## Functions to build IPM kernels.
###################################################################################
## Define the fecundity kernel
F_z1z_spline <- function (z1, z, m.par, B, cj) {
return( p_bz_spline(z, B, cj) * b_z(z, m.par) * m.par["p.r"] * c_0z1(z1, m.par))
}
## Define the survival kernel
P_z1z_spline <- function(z1, z, m.par, B, cj) {
return((1 - p_bz_spline(z, B,cj)) * s_z(z, m.par) * G_z1z(z1, z, m.par))
}
## Define the fecundity kernel, to operate on vectors z1 and z
## This is much faster than using outer() when building an iteration matrix
F_z1z_spline_vec <- function (z1, z, m.par, B, cj) {
a <- matrix(c_0z1(z1, m.par),ncol=1);
b <- matrix(p_bz_spline(z, B, cj) * b_z(z, m.par),nrow=1);
return(m.par["p.r"]*(a%*%b));
}
## Make the iteration matrices
mk_K_spline <- function(m, m.par, L, U, B, cj) {
# mesh points
h <- (U - L)/m
meshpts <- L + ((1:m) - 1/2) * h
P <- h * (outer(meshpts, meshpts, P_z1z_spline, m.par = m.par, B = B, cj = cj))
F <- h* F_z1z_spline_vec(meshpts, meshpts, m.par=m.par, B=B, cj=cj)
K <- P + F
return(list(K = K, meshpts = meshpts, P = P, F = F))
}
| /ipmbook-code/c9/Monocarp Demog SplineFuns.R | no_license | aekendig/population-modeling-techniques | R | false | false | 2,055 | r | ####################################################################################
# Functions to create a version of the chapter 2 monocarp model where the flowering
# strategy is a function-valued trait specified by a spline basis and coefficients
# This relies on Rcode/c2/Monocarp Demog Funs.R; functions defined there and used
# without change are not repeated here.
####################################################################################
## Probability of flowering function
## Input argument B must be a basis object of type 'bspline'
## created by create.bspline.basis() in the fda package
p_bz_spline <- function(z, B, cj) {
linear.p <- eval.basis(B,z)%*%cj; # linear predictor
p <- 1/(1+exp(-linear.p)) # logistic transformation to probability
return(p)
}
###################################################################################
## Functions to build IPM kernels.
###################################################################################
## Define the fecundity kernel
F_z1z_spline <- function (z1, z, m.par, B, cj) {
return( p_bz_spline(z, B, cj) * b_z(z, m.par) * m.par["p.r"] * c_0z1(z1, m.par))
}
## Define the survival kernel
P_z1z_spline <- function(z1, z, m.par, B, cj) {
return((1 - p_bz_spline(z, B,cj)) * s_z(z, m.par) * G_z1z(z1, z, m.par))
}
## Define the fecundity kernel, to operate on vectors z1 and z
## This is much faster than using outer() when building an iteration matrix
F_z1z_spline_vec <- function (z1, z, m.par, B, cj) {
a <- matrix(c_0z1(z1, m.par),ncol=1);
b <- matrix(p_bz_spline(z, B, cj) * b_z(z, m.par),nrow=1);
return(m.par["p.r"]*(a%*%b));
}
## Make the iteration matrices
mk_K_spline <- function(m, m.par, L, U, B, cj) {
# mesh points
h <- (U - L)/m
meshpts <- L + ((1:m) - 1/2) * h
P <- h * (outer(meshpts, meshpts, P_z1z_spline, m.par = m.par, B = B, cj = cj))
F <- h* F_z1z_spline_vec(meshpts, meshpts, m.par=m.par, B=B, cj=cj)
K <- P + F
return(list(K = K, meshpts = meshpts, P = P, F = F))
}
|
sales <- c(6,6,4,2,3)
salesmatrix <- matrix(c(1,1,1,1,1,6,6,4,2,3), nrow=5, ncol=2)
cars <- c(20,18,10,6,11)
plot(sales, cars)
model <- lm(cars ~ sales)
summary(model)
abline(model)
w <- solve(t(salesmatrix)%*%salesmatrix)%*%t(salesmatrix)%*%cars
w
newdata = data.frame(sales=5)
predict(model, newdata, interval="predict")
predict(model, newdata, interval="confidence")
confint(model)
0.125+3.125*t(sales)
cars - (0.125+3.125*t(sales))
mean((cars - (0.125+3.125*t(sales)))^2) | /Project 2/Problem 2.R | no_license | jmjlacosta/ARTS | R | false | false | 477 | r | sales <- c(6,6,4,2,3)
salesmatrix <- matrix(c(1,1,1,1,1,6,6,4,2,3), nrow=5, ncol=2)
cars <- c(20,18,10,6,11)
plot(sales, cars)
model <- lm(cars ~ sales)
summary(model)
abline(model)
w <- solve(t(salesmatrix)%*%salesmatrix)%*%t(salesmatrix)%*%cars
w
newdata = data.frame(sales=5)
predict(model, newdata, interval="predict")
predict(model, newdata, interval="confidence")
confint(model)
0.125+3.125*t(sales)
cars - (0.125+3.125*t(sales))
mean((cars - (0.125+3.125*t(sales)))^2) |
skip_if_no_keras <- function(required_version = NULL) {
if (!is_keras_available(required_version))
skip("required keras version not available for testing")
}
test_succeeds <- function(desc, expr, required_version = NULL) {
test_that(desc, {
skip_if_no_keras(required_version)
expect_error(force(expr), NA)
})
}
test_call_succeeds <- function(call_name, expr, required_version = NULL) {
test_succeeds(paste(call_name, "call succeeds"), expr, required_version)
}
is_backend <- function(name) {
is_keras_available() && identical(backend()$backend(), name)
}
skip_if_cntk <- function() {
if (is_backend("cntk"))
skip("Test not run for CNTK backend")
}
skip_if_tensorflow_implementation <- function() {
if (keras:::is_tensorflow_implementation())
skip("Test not run for TensorFlow implementation")
}
define_model <- function() {
model <- keras_model_sequential()
model %>%
layer_dense(32, input_shape = 784, kernel_initializer = initializer_ones()) %>%
layer_activation('relu') %>%
layer_dense(10) %>%
layer_activation('softmax')
model
}
define_and_compile_model <- function() {
model <- define_model()
model %>%
compile(
loss='binary_crossentropy',
optimizer = optimizer_sgd(),
metrics='accuracy'
)
model
}
random_array <- function(dim) {
array(runif(prod(dim)), dim = dim)
}
| /tests/testthat/utils.R | no_license | adamkc/keras | R | false | false | 1,377 | r |
skip_if_no_keras <- function(required_version = NULL) {
if (!is_keras_available(required_version))
skip("required keras version not available for testing")
}
test_succeeds <- function(desc, expr, required_version = NULL) {
test_that(desc, {
skip_if_no_keras(required_version)
expect_error(force(expr), NA)
})
}
test_call_succeeds <- function(call_name, expr, required_version = NULL) {
test_succeeds(paste(call_name, "call succeeds"), expr, required_version)
}
is_backend <- function(name) {
is_keras_available() && identical(backend()$backend(), name)
}
skip_if_cntk <- function() {
if (is_backend("cntk"))
skip("Test not run for CNTK backend")
}
skip_if_tensorflow_implementation <- function() {
if (keras:::is_tensorflow_implementation())
skip("Test not run for TensorFlow implementation")
}
define_model <- function() {
model <- keras_model_sequential()
model %>%
layer_dense(32, input_shape = 784, kernel_initializer = initializer_ones()) %>%
layer_activation('relu') %>%
layer_dense(10) %>%
layer_activation('softmax')
model
}
define_and_compile_model <- function() {
model <- define_model()
model %>%
compile(
loss='binary_crossentropy',
optimizer = optimizer_sgd(),
metrics='accuracy'
)
model
}
random_array <- function(dim) {
array(runif(prod(dim)), dim = dim)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.trendbreaker}
\alias{plot.trendbreaker}
\title{Plotting method for trendbreaker objects}
\usage{
\method{plot}{trendbreaker}(
x,
x_axis,
point_size = 2,
col_normal = "#8B8B8C",
col_increase = "#CB3355",
col_decrease = "#32AB96",
guide = TRUE,
...
)
}
\arguments{
\item{x}{an \code{trendbreaker} object, as returned by \code{asmodee}}
\item{x_axis}{the name or position of the variable in \code{get_results(x)} to be
used on the x-axis, which represents time}
\item{point_size}{the size of the points to be used; defaults to 2}
\item{col_normal}{the color to be used for non-outlying observations,
i.e. observations falling within the prediction interval of the estimated
temporal trend}
\item{col_increase}{the color to be used for outlying observations which are
above the prediction interval of the estimated temporal trend}
\item{col_decrease}{the color to be used for outlying observations which are
below the prediction interval of the estimated temporal trend}
\item{guide}{a \code{logical} indicating whether a color legend should be added to
the plot (\code{TRUE}, default) or not (\code{FALSE})}
\item{...}{unused - present for compatibility with the \code{plot} generic}
}
\description{
The plotting method for \code{trendbreaker} objects produces a \code{ggplot} object, which
can then be modified using \code{ggplot2}. It accepts a few arguments for
customising the graphs produced.
}
\author{
Thibaut Jombart
}
| /man/plot.trendbreaker.Rd | permissive | stephaneghozzi/trendbreaker | R | false | true | 1,543 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.trendbreaker}
\alias{plot.trendbreaker}
\title{Plotting method for trendbreaker objects}
\usage{
\method{plot}{trendbreaker}(
x,
x_axis,
point_size = 2,
col_normal = "#8B8B8C",
col_increase = "#CB3355",
col_decrease = "#32AB96",
guide = TRUE,
...
)
}
\arguments{
\item{x}{an \code{trendbreaker} object, as returned by \code{asmodee}}
\item{x_axis}{the name or position of the variable in \code{get_results(x)} to be
used on the x-axis, which represents time}
\item{point_size}{the size of the points to be used; defaults to 2}
\item{col_normal}{the color to be used for non-outlying observations,
i.e. observations falling within the prediction interval of the estimated
temporal trend}
\item{col_increase}{the color to be used for outlying observations which are
above the prediction interval of the estimated temporal trend}
\item{col_decrease}{the color to be used for outlying observations which are
below the prediction interval of the estimated temporal trend}
\item{guide}{a \code{logical} indicating whether a color legend should be added to
the plot (\code{TRUE}, default) or not (\code{FALSE})}
\item{...}{unused - present for compatibility with the \code{plot} generic}
}
\description{
The plotting method for \code{trendbreaker} objects produces a \code{ggplot} object, which
can then be modified using \code{ggplot2}. It accepts a few arguments for
customising the graphs produced.
}
\author{
Thibaut Jombart
}
|
\name{bayesurvey-package}
\alias{bayesurvey-package}
\alias{bayesurvey}
\docType{package}
\title{
Bayesian Conjugate Prior Analysis of Survey Data
}
\description{
This package implements Bayesian conjugate prior models with a focus on handling survey data with multiple categories. It starts by normalizing and transforming the data, then fits prior distributions, and runs a conjugate prior analysis that can be iterative. While many of the functions are designed for categorical data, the conjugate prior functions work with any type of data..
}
\author{
Brittany Alexander
Maintainer: Brittany Alexander <brittany@stat.tamu.edu>
}
\references{
Poll-Based Bayesian Models to Predict United States Presidential Elections". B. Alexander, L. Ellingson, Joint Statistical Meetings Proceedings 2019.
}
\keyword{ package }
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
| /man/bayesurvey-package.Rd | no_license | balexanderstats/bayesurvey | R | false | false | 982 | rd | \name{bayesurvey-package}
\alias{bayesurvey-package}
\alias{bayesurvey}
\docType{package}
\title{
Bayesian Conjugate Prior Analysis of Survey Data
}
\description{
This package implements Bayesian conjugate prior models with a focus on handling survey data with multiple categories. It starts by normalizing and transforming the data, then fits prior distributions, and runs a conjugate prior analysis that can be iterative. While many of the functions are designed for categorical data, the conjugate prior functions work with any type of data..
}
\author{
Brittany Alexander
Maintainer: Brittany Alexander <brittany@stat.tamu.edu>
}
\references{
Poll-Based Bayesian Models to Predict United States Presidential Elections". B. Alexander, L. Ellingson, Joint Statistical Meetings Proceedings 2019.
}
\keyword{ package }
\examples{
\dontrun{
## Optional simple examples of the most important functions
## These can be in \dontrun{} and \donttest{} blocks.
}
}
|
#' Plot estimated functions for experimental units faceted by cluster versus data to assess fit.
#'
#' Uses as input the output object from the gpdpgrow() and gmrfdpgrow() functions.
#'
#' @param object A \code{gpdpgrow} or \code{gmrfdpgrow} object.
#' @param N_clusters Denotes the number of largest sized (in terms of membership) clusters to plot.
#' Defaults to all clusters.
#' @param time_points Inputs a vector of common time points at which the collections of functions were
#' observed (with the possibility of intermittent missingness). The length of \code{time_points}
#' should be equal to the number of columns in the data matrix, \code{y}. Defaults to
#' \code{time_points = 1:ncol(y)}.
#' @param units_name The plot label for observation units. Defaults to \code{units_name = "function"}.
#' @param units_label A vector of labels to apply to the observation units with length equal to the number of
#' unique units. Defaults to sequential numeric values as input with data, \code{y}.
#' @param date_field A vector of \code{Date} values for labeling the x-axis tick marks.
#' Defaults to \code{1:T} .
#' @param x.axis.label Text label for x-axis. Defaults to \code{"time"}.
#' @param y.axis.label Text label for y-axis. Defaults to \code{"function values"}.
#' @param smoother A scalar boolean input indicating whether to co-plot a smoother line
#' through the functions in each cluster.
#' @param sample_rate A numeric value in (0,1] indicating percent of functions to randomly sample within
#' each cluster to address over-plotting. Defaults to 1.
#' @param single_unit A scalar boolean indicating whether to plot the fitted vs data curve for
#' only a single experimental units (versus a random sample of 6).
#' Defaults to \code{single_unit = FALSE}.
#' @param credible A scalar boolean indicating whether to plot 95 percent credible intervals for
#' estimated functions, \code{bb}, when plotting fitted functions versus data. Defaults to
#' \code{credible = FALSE}
#' @param num_plot A scalar integer indicating how many randomly-selected functions to plot
#' (each in it's own plot panel) in the plot of functions versus the observed time series
#' in the case that \code{single_unit == TRUE}.
#' Defaults to \code{num_plot = 6}.
#' @return A list object containing the plot of estimated functions, faceted by cluster,
#' and the associated \code{data.frame} object.
#' \item{p.cluster}{A \code{ggplot2} plot object}
#' \item{dat.cluster}{A \code{data.frame} object used to generate \code{p.cluster}.}
#' @seealso \code{\link{gpdpgrow}}, \code{\link{gmrfdpgrow}}
#' @examples
#' {
#' library(growfunctions)
#'
#' ## load the monthly employment count data for a collection of
#' ## U.S. states from the Current
#' ## Population Survey (cps)
#' data(cps)
#' ## subselect the columns of N x T, y, associated with
#' ## the years 2008 - 2013
#' ## to examine the state level employment levels
#' ## during the "great recession"
#' y_short <- cps$y[,(cps$yr_label %in% c(2008:2013))]
#'
#' ## Run the DP mixture of iGMRF's to estimate posterior
#' ## distributions for model parameters
#' ## Under default RW2(kappa) = order 2 trend
#' ## precision term
#' res_gmrf <- gmrfdpgrow(y = y_short,
#' n.iter = 40,
#' n.burn = 20,
#' n.thin = 1)
#'
#' ## 2 plots of estimated functions: 1. faceted by cluster and fit;
#' ## 2. data for experimental units.
#' ## for a group of randomly-selected functions
#' fit_plots_gmrf <- cluster_plot( object = res_gmrf,
#' units_name = "state",
#' units_label = cps$st,
#' single_unit = FALSE,
#' credible = TRUE )
#' }
#' @author Terrance Savitsky \email{tds151@@gmail.com}
#' @aliases cluster_plot
#' @export
cluster_plot <- function(object, N_clusters = NULL, time_points = NULL, units_name = "unit",
units_label = NULL, date_field = NULL, x.axis.label = NULL,
y.axis.label = NULL, smoother = TRUE,
sample_rate = 1.0, single_unit = FALSE, credible = FALSE,
num_plot = NULL)
{
## read in the data
y <- object$optpartition$y ## N x T data matrix used for modeling
## for gmrfdpgrow object, y may have intermittant missing values (= NA).
## there is also returned an N x T, y_bar, that estimates the missing values, not used here.
N <- nrow(y) ## number of experimental units
T <- ncol(y) ## numer of time-indexed observations for each unit
## capture time points, t_j, at which functions are observed
if(is.null(time_points))
{
time_points <- 1:T
}
## build data.frame that maps experimental units to clusters
cluster <- object$bigSmin
c.sizes <- sapply(cluster,length)
if( (length(N_clusters) == 0) || (N_clusters > length(cluster)) )
{ ## plot all the clusters
clusterstoplot <- sort(c.sizes,decreasing = TRUE,
index.return=TRUE)$ix[1:length(cluster)]
}else{ ## plot the functions in the N_cluster largest clusters
clusterstoplot <- sort(c.sizes,decreasing = TRUE,
index.return=TRUE)$ix[1:N_clusters]
} ## end conditional statement on which clusters to plot
map <- vector(mode="list",length = length(clusterstoplot))
for(i in 1:length(clusterstoplot))
{
cluster.i <- cluster[[clusterstoplot[i]]]
map[[i]] <- as.data.frame(cbind(cluster.i,i),stringsAsFactors = FALSE)
names(map[[i]]) <- c("units_numeric","cluster")
}
map <- do.call("rbind",map)
## experimental unit entries are numeric from bigSmin, with minimum value set to 1 (not 0 as in c++)
map$units_numeric <- as.numeric(map$units_numeric)
## create link of units_label to replace numbers with labels
units_numeric <- 1:N ##sort(unique(map$units_numeric))
if( length(units_label) > 0 )
{
units_label <- sort(unique(units_label)) ## a vector of entries
}else{ ## just set units_label = to numeric values from bigSmin is units_label == NULL
units_label <- units_numeric
}
stopifnot(length(units_label) == length(units_numeric))
tmp <- data.frame(units_label,units_numeric)
names(tmp) <- c(eval(units_name),"units_numeric")
map <- merge(map,tmp,by="units_numeric",all.x=TRUE)
map <- map[order(map$cluster,map[,eval(units_name)]),]
###############################################################################
## compose plot of fitted functions grouped into membership clusters
###############################################################################
## create plot data.frame
bb <- object$bb ## nkeep x (N*T) function samples. N is fast-moving
bb.hat <- colMeans(bb)
units_label_dat <- rep(eval(units_label),times=T)
if( length(date_field) > 0 )
{
stopifnot(length(date_field) == T)
month <- rep(sort(unique(date_field)), each = N)
}else{ ## no date field input
month <- rep(time_points, each = N)
}
dat.b <- data.frame(bb.hat,month,units_label_dat)
names(dat.b) <- c("value","time",eval(units_name))
datb.clust <- merge(dat.b,map,all.x=TRUE,by=eval(units_name),sort = FALSE)
## sample records
rate <- sample_rate
tmp <- split(datb.clust,list(datb.clust$cluster))
tmp <- unlist(sapply(tmp,function(x){
tot_recs <- length(unique(x[,eval(units_name)]))
u_recs <- sort(unique(x[,eval(units_name)]))
inc_recs <- sample(u_recs,round(rate*tot_recs),replace = FALSE)
}))
datb_plot <- subset(datb.clust, datb.clust[,eval(units_name)] %in% tmp)
## bases faceted by cluster
p.c <- ggplot(data=datb_plot,aes(x = time, y = value))
l <- geom_line(aes_string(group = eval(units_name)), alpha = 0.2)
l.2 <- geom_smooth(aes(group=1),alpha = 1.0,
size = 1, linetype = 2, se = FALSE, colour = "brown",
method = "loess")
f <- facet_wrap(~cluster, scales = "fixed")
options <- theme(axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(size=14,
angle=-90,hjust=0),
axis.text.y = element_text(size=14),
## legend.margin = margin(5, 5, 5, 5),
strip.text = element_text(size=18))
if( length(y.axis.label) > 0 )
{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = eval(y.axis.label))
}else{ ## no x label
axis <- labs(x = "time", y = eval(y.axis.label) )
}
}else{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = "normalized y units" )
}else{ ## no labels
axis <- labs(x = "time", y = "normalized y units" )
}
}
dev.new()
if( smoother == TRUE)
{
p.c <- p.c + l + l.2 + f + theme_bw() + axis +
options
}else{ ## no smoother
p.c <- p.c + l + f + theme_bw() + axis +
options
}
suppressWarnings(print(p.c))
###############################################################################
## compose plot of fitted functions vs. actual data for randomly-selected units
###############################################################################
d3 <- data.frame(units_label,y) ## y is N x T
names(d3) <- c(eval(units_name),eval(time_points)) ## even though month is date/numeric, names will be char
dat_gca <- merge(d3,map,by=eval(units_name),all.x=TRUE)
dat_gca <- melt(dat_gca,measure.vars = as.character(time_points), variable.name = "time",
value.name = "fit")
dat_gca$time <- as.numeric(dat_gca$time)
if( !is.null(date_field))
{
dat_date <- data.frame(sort(unique(time_points)),sort(unique(date_field)))
names(dat_date) <- c("time","date")
dat_gca <- merge(dat_gca,dat_date, by = "time", all.x=TRUE)
dat_gca$time <- as.Date(dat_gca$date)
dat_gca$date <- NULL
}
##
## compose fit predictions
##
## build data.frame with N*T rows (where N is fast-moving)
lo <- apply(bb,2,function(x){quantile(x,probs = 0.025)}) # 1 x N*T - N is fast
hi <- apply(bb,2,function(x){quantile(x,probs = 0.975)}) # 1 x N*T - N is fast
dat_gcp <- data.frame(rep(d3[,eval(units_name)],times=T),
rep(1:T,each=N),bb.hat,lo,hi)
names(dat_gcp) <- c(eval(units_name),"time","fit","lo","hi")
## merge in cluster locations to facet fit v data plot panels by cluster membership
dat_gcp <- merge(dat_gcp,map,by=eval(units_name),all.x=TRUE)
if( !is.null(date_field) )
{
dat_gcp <- merge(dat_gcp,dat_date, by = "time", all.x=TRUE)
dat_gcp$time <- as.Date(dat_gcp$date)
dat_gcp$date <- NULL
}
##
## render growth curve plot
##
## randomly select records to plot
if(single_unit == FALSE) ## plot the fit for a single experimental unit?
{
if(!is.null(num_plot))
{
num_plot <- floor(num_plot) ## ensure num_plot is an integer
}else{ ## is.null(num_plot) = TRUE
num_plot <- 6
}
## number of units per cluster
cluster_counts <- as.vector(table(map$cluster))
M <- length(cluster_counts) ## number of clusters
## map[,cluser] is a vector of length N of cluster assignments
## that is sorted by cluster. We sample num_plot from this, without replacement,
## to ensure we don't sample more units per cluster than there are members.
## This is effectively sampling with probability proportional to cluster size.
units_to_sample <- map[,eval(units_name)]
cluster_labels <- map[,"cluster"]
sample_ids <- sort(sample(cluster_labels,num_plot,
replace=FALSE))
table_clust <- table(sample_ids)
## labels of clusers sampled
clusters_sampled <- as.numeric(names(table_clust))
## number of each cluster label sampled
num_per_clust <- as.vector(table_clust)
## generate num_per_clust randomly sampled units with clusters_sampled
M_to_sample <- length(num_per_clust) ## won't sample all M clusters
plot_st <- vector("list",M_to_sample)
for( m in 1:M_to_sample )
{
units_to_sample_m <- units_to_sample[cluster_labels == clusters_sampled[m]]
plot_st[[m]] <- sample(units_to_sample_m, num_per_clust[m],replace=FALSE)
} ## end loop m sampling num_per_clust[m] units within cluster, cluster_sampled[m]
plot_st <- unlist(plot_st)
}else{ ## sample only a single unit
plot_st <- sample(map[,eval(units_name)],1,replace=FALSE)
} ## end conditional statement on whether single unit or group of units to be sampled
## outputs unit_name ids to be sampled in plot_st
plot_gca <- subset(dat_gca, dat_gca[,eval(units_name)] %in% plot_st)
plot_gcp <- subset(dat_gcp, dat_gcp[,eval(units_name)] %in% plot_st)
## compose plot
p.t = ggplot(data=plot_gcp,aes(x=time, y = fit) )
l = geom_line(aes_string(group = eval(units_name)),size = 1.2,
alpha = 0.6, colour="#FF9999")
l.2 = geom_point(data=plot_gca,aes_string(group = eval(units_name)),size=3,shape=1,
colour="black") ## Add the data
l.3 = geom_line(data=plot_gca,aes_string(group = eval(units_name)),lty="dotted") ## Add the data
if( length(y.axis.label) > 0 )
{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = eval(y.axis.label))
}else{ ## no x label
axis <- labs(x = "time", y = eval(y.axis.label) )
}
}else{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = "normalized y units")
}else{ ## no labels
axis <- labs(x = "time", y = "normalized y units" )
}
}
f <- facet_wrap(as.formula(paste("cluster ~", units_name)), scales="fixed")
options <- theme(axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(size=14,
angle=-90,hjust=0),
axis.text.y = element_text(size=14),
# legend.title = element_text(size=12),
# legend.text = element_text(size=12),
## legend.margin = margin(5, 5, 5, 5),
strip.text = element_text(size=18))
if( credible == FALSE )
{
p.t <- p.t + l + f + axis + l.2 + l.3 + theme_bw() + options
}else{ ## plot credible intervals
ci <- geom_ribbon(aes(ymin=lo,ymax=hi),alpha=0.2)
p.t <- p.t + l + ci + f + axis + l.2 + l.3 + theme_bw() + options
}
suppressWarnings(print(p.t))
value <- fit <- time <- NULL
return(invisible(list(p.cluster = p.c, p.fit = p.t, dat.fit = dat_gcp,
dat.cluster = datb_plot, map = map)))
} ## end function cluster_plot | /fuzzedpackages/growfunctions/R/gp_cluster_plot.R | no_license | akhikolla/testpackages | R | false | false | 17,753 | r | #' Plot estimated functions for experimental units faceted by cluster versus data to assess fit.
#'
#' Uses as input the output object from the gpdpgrow() and gmrfdpgrow() functions.
#'
#' @param object A \code{gpdpgrow} or \code{gmrfdpgrow} object.
#' @param N_clusters Denotes the number of largest sized (in terms of membership) clusters to plot.
#' Defaults to all clusters.
#' @param time_points Inputs a vector of common time points at which the collections of functions were
#' observed (with the possibility of intermittent missingness). The length of \code{time_points}
#' should be equal to the number of columns in the data matrix, \code{y}. Defaults to
#' \code{time_points = 1:ncol(y)}.
#' @param units_name The plot label for observation units. Defaults to \code{units_name = "function"}.
#' @param units_label A vector of labels to apply to the observation units with length equal to the number of
#' unique units. Defaults to sequential numeric values as input with data, \code{y}.
#' @param date_field A vector of \code{Date} values for labeling the x-axis tick marks.
#' Defaults to \code{1:T} .
#' @param x.axis.label Text label for x-axis. Defaults to \code{"time"}.
#' @param y.axis.label Text label for y-axis. Defaults to \code{"function values"}.
#' @param smoother A scalar boolean input indicating whether to co-plot a smoother line
#' through the functions in each cluster.
#' @param sample_rate A numeric value in (0,1] indicating percent of functions to randomly sample within
#' each cluster to address over-plotting. Defaults to 1.
#' @param single_unit A scalar boolean indicating whether to plot the fitted vs data curve for
#' only a single experimental units (versus a random sample of 6).
#' Defaults to \code{single_unit = FALSE}.
#' @param credible A scalar boolean indicating whether to plot 95 percent credible intervals for
#' estimated functions, \code{bb}, when plotting fitted functions versus data. Defaults to
#' \code{credible = FALSE}
#' @param num_plot A scalar integer indicating how many randomly-selected functions to plot
#' (each in it's own plot panel) in the plot of functions versus the observed time series
#' in the case that \code{single_unit == TRUE}.
#' Defaults to \code{num_plot = 6}.
#' @return A list object containing the plot of estimated functions, faceted by cluster,
#' and the associated \code{data.frame} object.
#' \item{p.cluster}{A \code{ggplot2} plot object}
#' \item{dat.cluster}{A \code{data.frame} object used to generate \code{p.cluster}.}
#' @seealso \code{\link{gpdpgrow}}, \code{\link{gmrfdpgrow}}
#' @examples
#' {
#' library(growfunctions)
#'
#' ## load the monthly employment count data for a collection of
#' ## U.S. states from the Current
#' ## Population Survey (cps)
#' data(cps)
#' ## subselect the columns of N x T, y, associated with
#' ## the years 2008 - 2013
#' ## to examine the state level employment levels
#' ## during the "great recession"
#' y_short <- cps$y[,(cps$yr_label %in% c(2008:2013))]
#'
#' ## Run the DP mixture of iGMRF's to estimate posterior
#' ## distributions for model parameters
#' ## Under default RW2(kappa) = order 2 trend
#' ## precision term
#' res_gmrf <- gmrfdpgrow(y = y_short,
#' n.iter = 40,
#' n.burn = 20,
#' n.thin = 1)
#'
#' ## 2 plots of estimated functions: 1. faceted by cluster and fit;
#' ## 2. data for experimental units.
#' ## for a group of randomly-selected functions
#' fit_plots_gmrf <- cluster_plot( object = res_gmrf,
#' units_name = "state",
#' units_label = cps$st,
#' single_unit = FALSE,
#' credible = TRUE )
#' }
#' @author Terrance Savitsky \email{tds151@@gmail.com}
#' @aliases cluster_plot
#' @export
cluster_plot <- function(object, N_clusters = NULL, time_points = NULL, units_name = "unit",
units_label = NULL, date_field = NULL, x.axis.label = NULL,
y.axis.label = NULL, smoother = TRUE,
sample_rate = 1.0, single_unit = FALSE, credible = FALSE,
num_plot = NULL)
{
## read in the data
y <- object$optpartition$y ## N x T data matrix used for modeling
## for gmrfdpgrow object, y may have intermittant missing values (= NA).
## there is also returned an N x T, y_bar, that estimates the missing values, not used here.
N <- nrow(y) ## number of experimental units
T <- ncol(y) ## numer of time-indexed observations for each unit
## capture time points, t_j, at which functions are observed
if(is.null(time_points))
{
time_points <- 1:T
}
## build data.frame that maps experimental units to clusters
cluster <- object$bigSmin
c.sizes <- sapply(cluster,length)
if( (length(N_clusters) == 0) || (N_clusters > length(cluster)) )
{ ## plot all the clusters
clusterstoplot <- sort(c.sizes,decreasing = TRUE,
index.return=TRUE)$ix[1:length(cluster)]
}else{ ## plot the functions in the N_cluster largest clusters
clusterstoplot <- sort(c.sizes,decreasing = TRUE,
index.return=TRUE)$ix[1:N_clusters]
} ## end conditional statement on which clusters to plot
map <- vector(mode="list",length = length(clusterstoplot))
for(i in 1:length(clusterstoplot))
{
cluster.i <- cluster[[clusterstoplot[i]]]
map[[i]] <- as.data.frame(cbind(cluster.i,i),stringsAsFactors = FALSE)
names(map[[i]]) <- c("units_numeric","cluster")
}
map <- do.call("rbind",map)
## experimental unit entries are numeric from bigSmin, with minimum value set to 1 (not 0 as in c++)
map$units_numeric <- as.numeric(map$units_numeric)
## create link of units_label to replace numbers with labels
units_numeric <- 1:N ##sort(unique(map$units_numeric))
if( length(units_label) > 0 )
{
units_label <- sort(unique(units_label)) ## a vector of entries
}else{ ## just set units_label = to numeric values from bigSmin is units_label == NULL
units_label <- units_numeric
}
stopifnot(length(units_label) == length(units_numeric))
tmp <- data.frame(units_label,units_numeric)
names(tmp) <- c(eval(units_name),"units_numeric")
map <- merge(map,tmp,by="units_numeric",all.x=TRUE)
map <- map[order(map$cluster,map[,eval(units_name)]),]
###############################################################################
## compose plot of fitted functions grouped into membership clusters
###############################################################################
## create plot data.frame
bb <- object$bb ## nkeep x (N*T) function samples. N is fast-moving
bb.hat <- colMeans(bb)
units_label_dat <- rep(eval(units_label),times=T)
if( length(date_field) > 0 )
{
stopifnot(length(date_field) == T)
month <- rep(sort(unique(date_field)), each = N)
}else{ ## no date field input
month <- rep(time_points, each = N)
}
dat.b <- data.frame(bb.hat,month,units_label_dat)
names(dat.b) <- c("value","time",eval(units_name))
datb.clust <- merge(dat.b,map,all.x=TRUE,by=eval(units_name),sort = FALSE)
## sample records
rate <- sample_rate
tmp <- split(datb.clust,list(datb.clust$cluster))
tmp <- unlist(sapply(tmp,function(x){
tot_recs <- length(unique(x[,eval(units_name)]))
u_recs <- sort(unique(x[,eval(units_name)]))
inc_recs <- sample(u_recs,round(rate*tot_recs),replace = FALSE)
}))
datb_plot <- subset(datb.clust, datb.clust[,eval(units_name)] %in% tmp)
## bases faceted by cluster
p.c <- ggplot(data=datb_plot,aes(x = time, y = value))
l <- geom_line(aes_string(group = eval(units_name)), alpha = 0.2)
l.2 <- geom_smooth(aes(group=1),alpha = 1.0,
size = 1, linetype = 2, se = FALSE, colour = "brown",
method = "loess")
f <- facet_wrap(~cluster, scales = "fixed")
options <- theme(axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(size=14,
angle=-90,hjust=0),
axis.text.y = element_text(size=14),
## legend.margin = margin(5, 5, 5, 5),
strip.text = element_text(size=18))
if( length(y.axis.label) > 0 )
{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = eval(y.axis.label))
}else{ ## no x label
axis <- labs(x = "time", y = eval(y.axis.label) )
}
}else{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = "normalized y units" )
}else{ ## no labels
axis <- labs(x = "time", y = "normalized y units" )
}
}
dev.new()
if( smoother == TRUE)
{
p.c <- p.c + l + l.2 + f + theme_bw() + axis +
options
}else{ ## no smoother
p.c <- p.c + l + f + theme_bw() + axis +
options
}
suppressWarnings(print(p.c))
###############################################################################
## compose plot of fitted functions vs. actual data for randomly-selected units
###############################################################################
d3 <- data.frame(units_label,y) ## y is N x T
names(d3) <- c(eval(units_name),eval(time_points)) ## even though month is date/numeric, names will be char
dat_gca <- merge(d3,map,by=eval(units_name),all.x=TRUE)
dat_gca <- melt(dat_gca,measure.vars = as.character(time_points), variable.name = "time",
value.name = "fit")
dat_gca$time <- as.numeric(dat_gca$time)
if( !is.null(date_field))
{
dat_date <- data.frame(sort(unique(time_points)),sort(unique(date_field)))
names(dat_date) <- c("time","date")
dat_gca <- merge(dat_gca,dat_date, by = "time", all.x=TRUE)
dat_gca$time <- as.Date(dat_gca$date)
dat_gca$date <- NULL
}
##
## compose fit predictions
##
## build data.frame with N*T rows (where N is fast-moving)
lo <- apply(bb,2,function(x){quantile(x,probs = 0.025)}) # 1 x N*T - N is fast
hi <- apply(bb,2,function(x){quantile(x,probs = 0.975)}) # 1 x N*T - N is fast
dat_gcp <- data.frame(rep(d3[,eval(units_name)],times=T),
rep(1:T,each=N),bb.hat,lo,hi)
names(dat_gcp) <- c(eval(units_name),"time","fit","lo","hi")
## merge in cluster locations to facet fit v data plot panels by cluster membership
dat_gcp <- merge(dat_gcp,map,by=eval(units_name),all.x=TRUE)
if( !is.null(date_field) )
{
dat_gcp <- merge(dat_gcp,dat_date, by = "time", all.x=TRUE)
dat_gcp$time <- as.Date(dat_gcp$date)
dat_gcp$date <- NULL
}
##
## render growth curve plot
##
## randomly select records to plot
if(single_unit == FALSE) ## plot the fit for a single experimental unit?
{
if(!is.null(num_plot))
{
num_plot <- floor(num_plot) ## ensure num_plot is an integer
}else{ ## is.null(num_plot) = TRUE
num_plot <- 6
}
## number of units per cluster
cluster_counts <- as.vector(table(map$cluster))
M <- length(cluster_counts) ## number of clusters
## map[,cluser] is a vector of length N of cluster assignments
## that is sorted by cluster. We sample num_plot from this, without replacement,
## to ensure we don't sample more units per cluster than there are members.
## This is effectively sampling with probability proportional to cluster size.
units_to_sample <- map[,eval(units_name)]
cluster_labels <- map[,"cluster"]
sample_ids <- sort(sample(cluster_labels,num_plot,
replace=FALSE))
table_clust <- table(sample_ids)
## labels of clusers sampled
clusters_sampled <- as.numeric(names(table_clust))
## number of each cluster label sampled
num_per_clust <- as.vector(table_clust)
## generate num_per_clust randomly sampled units with clusters_sampled
M_to_sample <- length(num_per_clust) ## won't sample all M clusters
plot_st <- vector("list",M_to_sample)
for( m in 1:M_to_sample )
{
units_to_sample_m <- units_to_sample[cluster_labels == clusters_sampled[m]]
plot_st[[m]] <- sample(units_to_sample_m, num_per_clust[m],replace=FALSE)
} ## end loop m sampling num_per_clust[m] units within cluster, cluster_sampled[m]
plot_st <- unlist(plot_st)
}else{ ## sample only a single unit
plot_st <- sample(map[,eval(units_name)],1,replace=FALSE)
} ## end conditional statement on whether single unit or group of units to be sampled
## outputs unit_name ids to be sampled in plot_st
plot_gca <- subset(dat_gca, dat_gca[,eval(units_name)] %in% plot_st)
plot_gcp <- subset(dat_gcp, dat_gcp[,eval(units_name)] %in% plot_st)
## compose plot
p.t = ggplot(data=plot_gcp,aes(x=time, y = fit) )
l = geom_line(aes_string(group = eval(units_name)),size = 1.2,
alpha = 0.6, colour="#FF9999")
l.2 = geom_point(data=plot_gca,aes_string(group = eval(units_name)),size=3,shape=1,
colour="black") ## Add the data
l.3 = geom_line(data=plot_gca,aes_string(group = eval(units_name)),lty="dotted") ## Add the data
if( length(y.axis.label) > 0 )
{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = eval(y.axis.label))
}else{ ## no x label
axis <- labs(x = "time", y = eval(y.axis.label) )
}
}else{
if( length(x.axis.label) > 0 )
{
axis <- labs(x = eval(x.axis.label), y = "normalized y units")
}else{ ## no labels
axis <- labs(x = "time", y = "normalized y units" )
}
}
f <- facet_wrap(as.formula(paste("cluster ~", units_name)), scales="fixed")
options <- theme(axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(size=14,
angle=-90,hjust=0),
axis.text.y = element_text(size=14),
# legend.title = element_text(size=12),
# legend.text = element_text(size=12),
## legend.margin = margin(5, 5, 5, 5),
strip.text = element_text(size=18))
if( credible == FALSE )
{
p.t <- p.t + l + f + axis + l.2 + l.3 + theme_bw() + options
}else{ ## plot credible intervals
ci <- geom_ribbon(aes(ymin=lo,ymax=hi),alpha=0.2)
p.t <- p.t + l + ci + f + axis + l.2 + l.3 + theme_bw() + options
}
suppressWarnings(print(p.t))
value <- fit <- time <- NULL
return(invisible(list(p.cluster = p.c, p.fit = p.t, dat.fit = dat_gcp,
dat.cluster = datb_plot, map = map)))
} ## end function cluster_plot |
library(XML)
library(RCurl)
library(tidyr)
library(countrycode)
library(ggplot2)
library(viridis)
get_density <- function(x, y, ...) {
dens <- MASS::kde2d(x, y, ...)
ix <- findInterval(x, dens$x)
iy <- findInterval(y, dens$y)
ii <- cbind(ix, iy)
return(dens$z[ii])
}
#Get all the country names -->paste links later
countries<-get_country_names()
saveRDS(countries,"./data/countrieswrdc.rds")
################################################
###############################################################
#loop trough all the countries and stations
#j= i=
for(j in 1:length(countries)){
nm<-countries[j]
print(nm)
stations<-get_station_names(nm)
for(i in 1:length(stations)){
stn<-stations[i]
print(stn)
yr<-get_station_start(nm,stn)
yr_seq<-seq(yr,2019,by=1)
df.wrdc.ecad<-lapply(yr_seq,function(x) try(get_station_data(nm,stn,yr=x),TRUE))
df.wrdc.ecad<-do.call("rbind",df.wrdc.ecad[sapply(df.wrdc.ecad, function(x) !inherits(x, "try-error"))])
try(write.table(df.wrdc.ecad,paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_may2019/country_",nm,"_stn_",stn,".txt"),
row.names = FALSE,
col.names = TRUE,
sep=","))
try(p<-ggplot(df.wrdc.ecad,aes(ser_date,qq))+geom_point()+ggtitle(stn))
try(ggsave(p,filename = paste0("/usr/people/dirksen/Pictures/wrdc/fig/",nm,"_",stn,".png")))
rm(df.wrdc.ecad)
# try(df.meta<-get_station_meta(nm,stn))
# try(write.table(df.meta,paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/meta_may2019/",nm,"_meta.txt"),
# row.names = FALSE,
# col.names = !file.exists(paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/meta_may2019/",nm,"_meta.txt")),
# sep=",",
# append = TRUE))
# rm(df.meta)
}
}
##################ENTRIES FOR THE ECAD DATABASE
source("inst/ser_id_new_stations.R")
update_wrdc_entries()
###############################################################
###############################################################
###################QUALITY CONTROL#############################
###############################################################
# meta_data<-list.files("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/meta/",full.names = TRUE)
# meta_data<-lapply(meta_data,fread)
# meta_data<-rbindlist(meta_data)
#
# meta_data$name <- gsub(",","",meta_data$name)
# meta_data$name <- gsub("\\. / ","_",meta_data$name)
# meta_data$name <- gsub("\\. ","_",meta_data$name)
# meta_data$name <- gsub("\\.","_",meta_data$name)
# meta_data$name <- gsub(" / ","_",meta_data$name)
# meta_data$name <- gsub(" /","_",meta_data$name)
# meta_data$name <- gsub("/ ","_",meta_data$name)
# meta_data$name <- gsub("/","_",meta_data$name)
# meta_data$name <- gsub("\\'","_",meta_data$name)
# meta_data$name <- gsub(" ","_",meta_data$name)
#
#
# stn_data<-list.files("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_may2019/",full.names=TRUE)
# stn_names<-list.files("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_may2019/")
# stn_country<-gsub("_stn_.*","",stn_names)
# stn_country<-gsub("country_","",stn_country)
# stn_names<-gsub(".*_stn_","",stn_names)
# stn_names<-gsub(".txt","",stn_names)
#
# for(i in 1:length(stn_data)){
# stn<-stn_names[i]
# print(stn)
# nm<-stn_country[i]
# stn_data_one<-fread(stn_data[i])
# stn_name_one<-meta_data[which(stn_names[i]==meta_data$name),]
# print(stn_name_one)
#
# try(df.qc<-check_qq_time_series(stn_data_one,lat=stn_name_one$lat/3600,lon=stn_name_one$lon/3600),TRUE)
# try(write.table(df.qc,paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_qc_may2019/country_",nm,"_stn_",stn,"_qc.txt"),
# row.names = FALSE,
# col.names = TRUE,
# sep=","),TRUE)
#
# try(df.qc$density<-get_density(df.qc$zenith_mean,df.qc$qq, n = 100),TRUE)
# try(qc<-ggplot(df.qc,aes(x=zenith_mean)) +
# geom_point(aes(y=qq,color=density)) +
# #geom_smooth(aes(y=Qmax_zen),color="red",linetype="dashed") +
# #geom_smooth(aes(y=Qrare_zen),color="orange",linetype="dashed") +
# geom_smooth(aes(y=Qmax),color="yellow") +
# geom_smooth(aes(y=Qmin),color="green") +
# xlab("Mean Daytime Solar Zenith") +
# ylab("Global Radiation") +
# scale_color_viridis() +
# ggtitle(paste0("Country = ", nm, ", Station = ",stn)) +
# theme_bw(),TRUE)
# try(ggsave(qc,filename = paste0("/usr/people/dirksen/Pictures/wrdc/fig_qc/",nm,"_",stn,".png")),TRUE)
# rm(df.qc)
# rm(qc)
# }
#
| /inst/european_solar_data_download.R | no_license | MariekeDirk/wrdc | R | false | false | 4,534 | r | library(XML)
library(RCurl)
library(tidyr)
library(countrycode)
library(ggplot2)
library(viridis)
get_density <- function(x, y, ...) {
dens <- MASS::kde2d(x, y, ...)
ix <- findInterval(x, dens$x)
iy <- findInterval(y, dens$y)
ii <- cbind(ix, iy)
return(dens$z[ii])
}
#Get all the country names -->paste links later
countries<-get_country_names()
saveRDS(countries,"./data/countrieswrdc.rds")
################################################
###############################################################
#loop trough all the countries and stations
#j= i=
for(j in 1:length(countries)){
nm<-countries[j]
print(nm)
stations<-get_station_names(nm)
for(i in 1:length(stations)){
stn<-stations[i]
print(stn)
yr<-get_station_start(nm,stn)
yr_seq<-seq(yr,2019,by=1)
df.wrdc.ecad<-lapply(yr_seq,function(x) try(get_station_data(nm,stn,yr=x),TRUE))
df.wrdc.ecad<-do.call("rbind",df.wrdc.ecad[sapply(df.wrdc.ecad, function(x) !inherits(x, "try-error"))])
try(write.table(df.wrdc.ecad,paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_may2019/country_",nm,"_stn_",stn,".txt"),
row.names = FALSE,
col.names = TRUE,
sep=","))
try(p<-ggplot(df.wrdc.ecad,aes(ser_date,qq))+geom_point()+ggtitle(stn))
try(ggsave(p,filename = paste0("/usr/people/dirksen/Pictures/wrdc/fig/",nm,"_",stn,".png")))
rm(df.wrdc.ecad)
# try(df.meta<-get_station_meta(nm,stn))
# try(write.table(df.meta,paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/meta_may2019/",nm,"_meta.txt"),
# row.names = FALSE,
# col.names = !file.exists(paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/meta_may2019/",nm,"_meta.txt")),
# sep=",",
# append = TRUE))
# rm(df.meta)
}
}
##################ENTRIES FOR THE ECAD DATABASE
source("inst/ser_id_new_stations.R")
update_wrdc_entries()
###############################################################
###############################################################
###################QUALITY CONTROL#############################
###############################################################
# meta_data<-list.files("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/meta/",full.names = TRUE)
# meta_data<-lapply(meta_data,fread)
# meta_data<-rbindlist(meta_data)
#
# meta_data$name <- gsub(",","",meta_data$name)
# meta_data$name <- gsub("\\. / ","_",meta_data$name)
# meta_data$name <- gsub("\\. ","_",meta_data$name)
# meta_data$name <- gsub("\\.","_",meta_data$name)
# meta_data$name <- gsub(" / ","_",meta_data$name)
# meta_data$name <- gsub(" /","_",meta_data$name)
# meta_data$name <- gsub("/ ","_",meta_data$name)
# meta_data$name <- gsub("/","_",meta_data$name)
# meta_data$name <- gsub("\\'","_",meta_data$name)
# meta_data$name <- gsub(" ","_",meta_data$name)
#
#
# stn_data<-list.files("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_may2019/",full.names=TRUE)
# stn_names<-list.files("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_may2019/")
# stn_country<-gsub("_stn_.*","",stn_names)
# stn_country<-gsub("country_","",stn_country)
# stn_names<-gsub(".*_stn_","",stn_names)
# stn_names<-gsub(".txt","",stn_names)
#
# for(i in 1:length(stn_data)){
# stn<-stn_names[i]
# print(stn)
# nm<-stn_country[i]
# stn_data_one<-fread(stn_data[i])
# stn_name_one<-meta_data[which(stn_names[i]==meta_data$name),]
# print(stn_name_one)
#
# try(df.qc<-check_qq_time_series(stn_data_one,lat=stn_name_one$lat/3600,lon=stn_name_one$lon/3600),TRUE)
# try(write.table(df.qc,paste0("/net/pc150400/nobackup/users/dirksen/data/radiation_europe/WRDC/data_qc_may2019/country_",nm,"_stn_",stn,"_qc.txt"),
# row.names = FALSE,
# col.names = TRUE,
# sep=","),TRUE)
#
# try(df.qc$density<-get_density(df.qc$zenith_mean,df.qc$qq, n = 100),TRUE)
# try(qc<-ggplot(df.qc,aes(x=zenith_mean)) +
# geom_point(aes(y=qq,color=density)) +
# #geom_smooth(aes(y=Qmax_zen),color="red",linetype="dashed") +
# #geom_smooth(aes(y=Qrare_zen),color="orange",linetype="dashed") +
# geom_smooth(aes(y=Qmax),color="yellow") +
# geom_smooth(aes(y=Qmin),color="green") +
# xlab("Mean Daytime Solar Zenith") +
# ylab("Global Radiation") +
# scale_color_viridis() +
# ggtitle(paste0("Country = ", nm, ", Station = ",stn)) +
# theme_bw(),TRUE)
# try(ggsave(qc,filename = paste0("/usr/people/dirksen/Pictures/wrdc/fig_qc/",nm,"_",stn,".png")),TRUE)
# rm(df.qc)
# rm(qc)
# }
#
|
# merge_scores ------------------------------------------------------------
fits <- c('./1st/','./2nd/','./3rd/')
getScore <- function(f){
fit <- readRDS(paste0(f,'allRes.Rds'))
unlist(lapply(fit,function(x) x$fitRes$value))
}
scores <- lapply(fits,getScore)
scores.df <- do.call(cbind,scores)
colnames(scores.df)<-c('V1','V2','V3')
write.csv(file='fit_scores_cmp.csv',scores.df)
# statistics ------------------------------------------------------------------
table(scores.df[,1]< scores.df[,3])
which(scores.df[,1]> scores.df[,3])
require(dplyr)
require(tidyr)
scores.df <- as.data.frame((scores.df))
scores.df$gene=rownames(scores.df)
scores.df.2<- scores.df%>%gather(model,score,1:2)
scores.df.final <- scores.df.2%>%group_by(gene)%>%
summarise(minScore = min(score),
minModel = model[which.min(score)])
write.csv(file='./data/final.score.csv',scores.df.final)
# heatmap
require(ggplot2)
rownames(scores.df.final)<- scores.df.final$gene
ggplot(scores.df.final,aes(minScore))+geom_histogram(color="white")
# parameters --------------------------------------------------------------
getPars <- function(f){
fit <- read.csv(paste0(f,'pars.csv'),stringsAsFactors = F)
fit
}
pars.all <- lapply(fits,getPars)
names(pars.all)<- c('V1','V2','V3')
final.score <- read.csv(file='./data/final.score.csv',row.names = 1,stringsAsFactors = F)
final.pars <- apply(final.score, 1, function(s)
{
s=as.data.frame(t(s),stringsAsFactors = F)
s%>%left_join(pars.all[[s$minModel]])%>%select(-one_of("k3"))
})
final.pars<-do.call(rbind,final.pars)
fwrite(final.pars,'/Users/frank/Dropbox/Projects/DurationDecoding-code/supplemental_tables/table_model_v2.csv')
| /fig.6_twoStep/fit_cmp.R | no_license | biomystery/duration_decode_manuscript | R | false | false | 1,680 | r | # merge_scores ------------------------------------------------------------
fits <- c('./1st/','./2nd/','./3rd/')
getScore <- function(f){
fit <- readRDS(paste0(f,'allRes.Rds'))
unlist(lapply(fit,function(x) x$fitRes$value))
}
scores <- lapply(fits,getScore)
scores.df <- do.call(cbind,scores)
colnames(scores.df)<-c('V1','V2','V3')
write.csv(file='fit_scores_cmp.csv',scores.df)
# statistics ------------------------------------------------------------------
table(scores.df[,1]< scores.df[,3])
which(scores.df[,1]> scores.df[,3])
require(dplyr)
require(tidyr)
scores.df <- as.data.frame((scores.df))
scores.df$gene=rownames(scores.df)
scores.df.2<- scores.df%>%gather(model,score,1:2)
scores.df.final <- scores.df.2%>%group_by(gene)%>%
summarise(minScore = min(score),
minModel = model[which.min(score)])
write.csv(file='./data/final.score.csv',scores.df.final)
# heatmap
require(ggplot2)
rownames(scores.df.final)<- scores.df.final$gene
ggplot(scores.df.final,aes(minScore))+geom_histogram(color="white")
# parameters --------------------------------------------------------------
getPars <- function(f){
fit <- read.csv(paste0(f,'pars.csv'),stringsAsFactors = F)
fit
}
pars.all <- lapply(fits,getPars)
names(pars.all)<- c('V1','V2','V3')
final.score <- read.csv(file='./data/final.score.csv',row.names = 1,stringsAsFactors = F)
final.pars <- apply(final.score, 1, function(s)
{
s=as.data.frame(t(s),stringsAsFactors = F)
s%>%left_join(pars.all[[s$minModel]])%>%select(-one_of("k3"))
})
final.pars<-do.call(rbind,final.pars)
fwrite(final.pars,'/Users/frank/Dropbox/Projects/DurationDecoding-code/supplemental_tables/table_model_v2.csv')
|
## If you want to source() a bunch of files, something like
## the following may be useful:
sourcedir <- function(path, trace = TRUE, ...) {
for (nm in list.files(path, pattern = "[.][RrSsQq]$")) {
if(trace) cat(nm,":")
source(file.path(path, nm), ...)
if(trace) cat("\n")
}
}
##Loading data
loaddata.csv <- function( path )
{
#Load data
con = file(path, "r");
DB <- read.csv(con, head=T, sep=";");
close(con);
return(DB);
}
##Loading data
loaddata.table <- function( path )
{
#Load data
con = file(path, "r");
DB <- read.table(con, sep=",");
close(con);
return(DB);
}
| /rcode/axlib.R | no_license | pedrodiamel/estadistica-in1119 | R | false | false | 618 | r | ## If you want to source() a bunch of files, something like
## the following may be useful:
sourcedir <- function(path, trace = TRUE, ...) {
for (nm in list.files(path, pattern = "[.][RrSsQq]$")) {
if(trace) cat(nm,":")
source(file.path(path, nm), ...)
if(trace) cat("\n")
}
}
##Loading data
loaddata.csv <- function( path )
{
#Load data
con = file(path, "r");
DB <- read.csv(con, head=T, sep=";");
close(con);
return(DB);
}
##Loading data
loaddata.table <- function( path )
{
#Load data
con = file(path, "r");
DB <- read.table(con, sep=",");
close(con);
return(DB);
}
|
diet_limma = reactive({
data$limma$diet[[input$diet.level]] %>%
rownames_to_column("Feature") %>%
arrange(pvalue) %>%
sapply(function(col){
if(!is.numeric(col)) return(col)
round(col, digits = 3)
}) %>%
as.data.frame %>%
column_to_rownames("Feature")
})
output$diet_limma = renderDT(
diet_limma(),
selection = list(mode = "single", selected = 1),
server=T
)
diet_boxplot_selector = reactive({
rownames(diet_limma())[input$diet_limma_rows_selected]
})
output$diet_boxplot = renderPlotly({
mset = data$data$diet[[input$diet.level]]
p = plot_boxplot(mset,
x = "Timepoint",
feature = diet_boxplot_selector(),
cols = "Treatment",
line = "Subject",
color = "Subject",
color.pal = pal_jama()(7)) +
labs(x = "")
ggplotly(p)
}) | /hdl/apps/app/server/diet/boxplot.R | no_license | zhuchcn/egg_study | R | false | false | 962 | r | diet_limma = reactive({
data$limma$diet[[input$diet.level]] %>%
rownames_to_column("Feature") %>%
arrange(pvalue) %>%
sapply(function(col){
if(!is.numeric(col)) return(col)
round(col, digits = 3)
}) %>%
as.data.frame %>%
column_to_rownames("Feature")
})
output$diet_limma = renderDT(
diet_limma(),
selection = list(mode = "single", selected = 1),
server=T
)
diet_boxplot_selector = reactive({
rownames(diet_limma())[input$diet_limma_rows_selected]
})
output$diet_boxplot = renderPlotly({
mset = data$data$diet[[input$diet.level]]
p = plot_boxplot(mset,
x = "Timepoint",
feature = diet_boxplot_selector(),
cols = "Treatment",
line = "Subject",
color = "Subject",
color.pal = pal_jama()(7)) +
labs(x = "")
ggplotly(p)
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/old_taxa--taxmap--internal.R
\name{parse_possibly_named_logical}
\alias{parse_possibly_named_logical}
\title{used to parse inputs to `drop_obs` and `reassign_obs`}
\usage{
parse_possibly_named_logical(input, data, default)
}
\description{
used to parse inputs to `drop_obs` and `reassign_obs`
}
\keyword{internal}
| /man/parse_possibly_named_logical.Rd | permissive | grunwaldlab/metacoder | R | false | true | 392 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/old_taxa--taxmap--internal.R
\name{parse_possibly_named_logical}
\alias{parse_possibly_named_logical}
\title{used to parse inputs to `drop_obs` and `reassign_obs`}
\usage{
parse_possibly_named_logical(input, data, default)
}
\description{
used to parse inputs to `drop_obs` and `reassign_obs`
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.R
\name{generate_meas}
\alias{generate_meas}
\title{sample measurement from generated fields}
\usage{
generate_meas(vect_meas, Y_xy, verbose = F)
}
\arguments{
\item{Y_xy}{original fields}
\item{verbose}{boolean for whether to display comments}
\item{vect_param}{a vector containing parameters for sampling measurements}
}
\value{
a dataframe containing measurements
}
\description{
sample measurement from generated fields
}
| /man/generate_meas.Rd | no_license | kcucchi/rPriorSynthetic | R | false | true | 516 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate.R
\name{generate_meas}
\alias{generate_meas}
\title{sample measurement from generated fields}
\usage{
generate_meas(vect_meas, Y_xy, verbose = F)
}
\arguments{
\item{Y_xy}{original fields}
\item{verbose}{boolean for whether to display comments}
\item{vect_param}{a vector containing parameters for sampling measurements}
}
\value{
a dataframe containing measurements
}
\description{
sample measurement from generated fields
}
|
library(cluster)
library(factoextra)
library(NbClust)
wine <- read.csv("D:/Assignments/PCA/wine.csv")
View(wine)
str(wine)
cor(wine)
# building PCA object
wine_pca <- princomp(wine[,-1],cor = TRUE, scores = TRUE, covmat = NULL)
summary(wine_pca)
screeplot(wine_pca, type = "lines")
abline(h=1)
loadings(wine_pca)
plot(wine_pca)
biplot(wine_pca)
plot(cumsum(wine_pca$sdev*wine_pca$sdev)*100/(sum(wine_pca$sdev*wine_pca$sdev)),type="b")
wine_pca$scores[,1:3]
# binding wine dataset and pca scores
winedata <- cbind(wine,wine_pca$scores[,1:3])
View(winedata)
wine_clust <- winedata[,15:17]
norm_wine_clust <- scale(wine_clust)
dist1 <- dist(norm_wine_clust, method = "euclidean")
# clustering using hierarchial
fit_hier <- hclust(dist1, method = "complete")
plot(fit_hier)
# cluster analysis- all variables
no_of_Clusters = NbClust(wine, distance = "euclidean", min.nc = 2, max.nc = 10, method = "complete", index ="all")
fviz_nbclust(no_of_Clusters) + theme_minimal()
# Hierarchical clustering - All Variables
hclust.complete = eclust(wine,"hclust",k = 7,method = "complete", graph = FALSE)
fviz_dend(hclust.complete, rect = TRUE, show_labels = FALSE)
# K-Means clustering - All Variables
km_7 = eclust(wine, "kmeans",k = 7, nstart = 25, graph = FALSE)
fviz_cluster(km_7, geom = "point", frame.type = "norm")
# Cluster Analysis - PCA Components
winedf.pca = winedata[,2:14]
no_of_Clusters = NbClust(winedf.pca,distance = "euclidean",min.nc = 2,max.nc = 10,method = "complete",index ="all")
fviz_nbclust(no_of_Clusters) + theme_minimal()
# Hierarchical clustering - PCA Components
hclust.complete = eclust(winedf.pca, "hclust", k = 7, method = "complete", graph = FALSE)
fviz_dend(hclust.complete, rect = TRUE, show_labels = FALSE)
# K-Means clustering - PCA Components
km_7 = eclust(winedf.pca, "kmeans", k = 7, nstart = 25, graph = FALSE)
fviz_cluster(km_7, geom = "point", frame.type = "norm")
| /PCA_assign_winedataset.R | no_license | sachinmungmode/R-Python-codes | R | false | false | 1,959 | r | library(cluster)
library(factoextra)
library(NbClust)
wine <- read.csv("D:/Assignments/PCA/wine.csv")
View(wine)
str(wine)
cor(wine)
# building PCA object
wine_pca <- princomp(wine[,-1],cor = TRUE, scores = TRUE, covmat = NULL)
summary(wine_pca)
screeplot(wine_pca, type = "lines")
abline(h=1)
loadings(wine_pca)
plot(wine_pca)
biplot(wine_pca)
plot(cumsum(wine_pca$sdev*wine_pca$sdev)*100/(sum(wine_pca$sdev*wine_pca$sdev)),type="b")
wine_pca$scores[,1:3]
# binding wine dataset and pca scores
winedata <- cbind(wine,wine_pca$scores[,1:3])
View(winedata)
wine_clust <- winedata[,15:17]
norm_wine_clust <- scale(wine_clust)
dist1 <- dist(norm_wine_clust, method = "euclidean")
# clustering using hierarchial
fit_hier <- hclust(dist1, method = "complete")
plot(fit_hier)
# cluster analysis- all variables
no_of_Clusters = NbClust(wine, distance = "euclidean", min.nc = 2, max.nc = 10, method = "complete", index ="all")
fviz_nbclust(no_of_Clusters) + theme_minimal()
# Hierarchical clustering - All Variables
hclust.complete = eclust(wine,"hclust",k = 7,method = "complete", graph = FALSE)
fviz_dend(hclust.complete, rect = TRUE, show_labels = FALSE)
# K-Means clustering - All Variables
km_7 = eclust(wine, "kmeans",k = 7, nstart = 25, graph = FALSE)
fviz_cluster(km_7, geom = "point", frame.type = "norm")
# Cluster Analysis - PCA Components
winedf.pca = winedata[,2:14]
no_of_Clusters = NbClust(winedf.pca,distance = "euclidean",min.nc = 2,max.nc = 10,method = "complete",index ="all")
fviz_nbclust(no_of_Clusters) + theme_minimal()
# Hierarchical clustering - PCA Components
hclust.complete = eclust(winedf.pca, "hclust", k = 7, method = "complete", graph = FALSE)
fviz_dend(hclust.complete, rect = TRUE, show_labels = FALSE)
# K-Means clustering - PCA Components
km_7 = eclust(winedf.pca, "kmeans", k = 7, nstart = 25, graph = FALSE)
fviz_cluster(km_7, geom = "point", frame.type = "norm")
|
#Nicole E Soltis
#02/21/18
#02_FAMaddPhenos
#----------------------------------------------------------------------------
rm(list=ls())
#pipeline note:
#1. run A01_TABtoPEDnMAP_rmIso.R
#2. make sure there is a copy of plink executable in data/GEMMA_eachAt_Bc
#3. in command prompt: cd Documents/GitRepos/BcAt_RNAGWAS/data/GEMMA_eachAt_Bc
#4. RUN ./plink --noweb --file 01_PLINK/dpbinMAF20NA10 --maf 0.2 --make-bed --out binMAF20NA10 } do this ONCE. NEXT STEP is customized by ogphenos/ permutation
#5. copy these files to GEMMA_eachBc_At/01_PLINK
#6. run this script (A02_prepPhenos_Bc.R)
##start here
#7. cd Documents/GitRepos/BcAt_RNAGWAS/data/B05_GEMMA/
#8. copy edited .fam, original .bim, .bed to C03_runGEMMA/
#9. copy bash script: cp scripts/GEMMA_lesions/norand_GEMMA_kmatrix.sh data/B05_GEMMA_les/
#10. cd to data/B05_GEMMA_les/
#11. calculate k-matrix with: bash C04_runGEMMA_allAt_kmat.sh, mv files to C04_kmat
#12. move the whole thing to Data/ drive
#13. on Data/ run GEMMA: bash C05_runGEMMA_allAt_kmat_run.sh
#14. pheno order can be found in names(Phenos)
setwd("~/Documents/GitRepos/BcAt_RNAGWAS")
IsoNames <- read.csv("data/Vivian_Bc/IsolateKey_Vivian.csv")
MyReads <- read.csv("data/Vivian_Bc/result.lsm.csv") #9270 phenotypes
MyReads <- MyReads[,-c(1)]
#attach Isolate Names
names(IsoNames)[1] <- "Isolate"
IsoNames <- IsoNames[,c(1,3)]
MyReads <- merge(MyReads, IsoNames, by="Isolate")
MyReads <- MyReads[,c(9270,2:9269)]
names(MyReads)[1] <- "Isolate"
#myFAM is the PLINK output of converting *.ped and *.map (01_TABtoPEDnMAP.R) to *.bed and *.bim and *.fam
myFAM <- read.table("data/GEMMA_eachAt_Bc/01_PLINK/binMAF20NA10.fam")
#GEMMA only needs column 1 (individual ID), column 6 (phenotype)
#n (num) with -n 1 means column 6 is pheno, -n 2 means 7… etc.
#first, split MyReads by plant accession. Then generate matching FAM files for each.
myread_coi1 <- MyReads[MyReads$HostGenotype=="coi.1",]
myread_col0 <- MyReads[MyReads$HostGenotype=="col.0",]
myread_npr1 <- MyReads[MyReads$HostGenotype=="npr.1",]
##do this for each of above
Phenos <- myread_npr1
#col2 = V2 = Isolate
Phenos <- Phenos[,c(1,3:length(Phenos))]
names(Phenos)[1] <- "V2"
Phenos_match <- Phenos[ order(Phenos$V2), ]
#remove non-genotyped 01.02.13 from Phenos
Phenos_match <- Phenos_match[!Phenos_match$V2 =="1.02.13",]
myFAM_match <- myFAM
myFAM_match$delete <- c(1:95)
myFAM_match <- myFAM_match[ order(myFAM_match$V2), ]
## check that these are 0, 0, 95
setdiff(myFAM_match$V2,Phenos_match$V2)
setdiff(Phenos_match$V2,myFAM_match$V2)
intersect(myFAM_match$V2,Phenos_match$V2)
#now add Phenos_match onto myFAM_match
myFAM_match2 <- merge(myFAM_match, Phenos_match, by="V2")
myFAM_match2 <- myFAM_match2[order(myFAM_match2$delete),]
#remove dummy phenotype (column 6)
#and reorder V1:V5
myFAM_match2 <- myFAM_match2[,c(2,1,3:5,8:length(myFAM_match2))]
##be sure to move new *.fam into the correct directory!
Sys.time()
write.table(myFAM_match2, "data/GEMMA_eachAt_Bc/02_GEMMA/binMAF20NA10.fam", row.names=FALSE, col.names=TRUE)
Sys.time()
myFAM_check <- read.table("data/GEMMA_eachAt_Bc/col0/02_GEMMA/binMAF20NA10.fam")
myFAM_check2 <- read.table("data/GEMMA_eachBc_At/col0/02_GEMMA/binMAF20NA10.fam")
| /scripts/GEMMA_each/col0/A02_prepPhenos_Bc.R | no_license | nicolise/BcAt_RNAGWAS | R | false | false | 3,208 | r | #Nicole E Soltis
#02/21/18
#02_FAMaddPhenos
#----------------------------------------------------------------------------
rm(list=ls())
#pipeline note:
#1. run A01_TABtoPEDnMAP_rmIso.R
#2. make sure there is a copy of plink executable in data/GEMMA_eachAt_Bc
#3. in command prompt: cd Documents/GitRepos/BcAt_RNAGWAS/data/GEMMA_eachAt_Bc
#4. RUN ./plink --noweb --file 01_PLINK/dpbinMAF20NA10 --maf 0.2 --make-bed --out binMAF20NA10 } do this ONCE. NEXT STEP is customized by ogphenos/ permutation
#5. copy these files to GEMMA_eachBc_At/01_PLINK
#6. run this script (A02_prepPhenos_Bc.R)
##start here
#7. cd Documents/GitRepos/BcAt_RNAGWAS/data/B05_GEMMA/
#8. copy edited .fam, original .bim, .bed to C03_runGEMMA/
#9. copy bash script: cp scripts/GEMMA_lesions/norand_GEMMA_kmatrix.sh data/B05_GEMMA_les/
#10. cd to data/B05_GEMMA_les/
#11. calculate k-matrix with: bash C04_runGEMMA_allAt_kmat.sh, mv files to C04_kmat
#12. move the whole thing to Data/ drive
#13. on Data/ run GEMMA: bash C05_runGEMMA_allAt_kmat_run.sh
#14. pheno order can be found in names(Phenos)
setwd("~/Documents/GitRepos/BcAt_RNAGWAS")
IsoNames <- read.csv("data/Vivian_Bc/IsolateKey_Vivian.csv")
MyReads <- read.csv("data/Vivian_Bc/result.lsm.csv") #9270 phenotypes
MyReads <- MyReads[,-c(1)]
#attach Isolate Names
names(IsoNames)[1] <- "Isolate"
IsoNames <- IsoNames[,c(1,3)]
MyReads <- merge(MyReads, IsoNames, by="Isolate")
MyReads <- MyReads[,c(9270,2:9269)]
names(MyReads)[1] <- "Isolate"
#myFAM is the PLINK output of converting *.ped and *.map (01_TABtoPEDnMAP.R) to *.bed and *.bim and *.fam
myFAM <- read.table("data/GEMMA_eachAt_Bc/01_PLINK/binMAF20NA10.fam")
#GEMMA only needs column 1 (individual ID), column 6 (phenotype)
#n (num) with -n 1 means column 6 is pheno, -n 2 means 7… etc.
#first, split MyReads by plant accession. Then generate matching FAM files for each.
myread_coi1 <- MyReads[MyReads$HostGenotype=="coi.1",]
myread_col0 <- MyReads[MyReads$HostGenotype=="col.0",]
myread_npr1 <- MyReads[MyReads$HostGenotype=="npr.1",]
##do this for each of above
Phenos <- myread_npr1
#col2 = V2 = Isolate
Phenos <- Phenos[,c(1,3:length(Phenos))]
names(Phenos)[1] <- "V2"
Phenos_match <- Phenos[ order(Phenos$V2), ]
#remove non-genotyped 01.02.13 from Phenos
Phenos_match <- Phenos_match[!Phenos_match$V2 =="1.02.13",]
myFAM_match <- myFAM
myFAM_match$delete <- c(1:95)
myFAM_match <- myFAM_match[ order(myFAM_match$V2), ]
## check that these are 0, 0, 95
setdiff(myFAM_match$V2,Phenos_match$V2)
setdiff(Phenos_match$V2,myFAM_match$V2)
intersect(myFAM_match$V2,Phenos_match$V2)
#now add Phenos_match onto myFAM_match
myFAM_match2 <- merge(myFAM_match, Phenos_match, by="V2")
myFAM_match2 <- myFAM_match2[order(myFAM_match2$delete),]
#remove dummy phenotype (column 6)
#and reorder V1:V5
myFAM_match2 <- myFAM_match2[,c(2,1,3:5,8:length(myFAM_match2))]
##be sure to move new *.fam into the correct directory!
Sys.time()
write.table(myFAM_match2, "data/GEMMA_eachAt_Bc/02_GEMMA/binMAF20NA10.fam", row.names=FALSE, col.names=TRUE)
Sys.time()
myFAM_check <- read.table("data/GEMMA_eachAt_Bc/col0/02_GEMMA/binMAF20NA10.fam")
myFAM_check2 <- read.table("data/GEMMA_eachBc_At/col0/02_GEMMA/binMAF20NA10.fam")
|
## version: 1.30
## method: get
## path: /nodes
## code: 200
## response: [{"ID":"24ifsmvkjbyhk","Version":{"Index":8},"CreatedAt":"2016-06-07T20:31:11.853781916Z","UpdatedAt":"2016-06-07T20:31:11.999868824Z","Spec":{"Name":"my-node","Role":"manager","Availability":"active","Labels":{"foo":"bar"}},"Description":{"Hostname":"bf3067039e47","Platform":{"Architecture":"x86_64","OS":"linux"},"Resources":{"NanoCPUs":4000000000,"MemoryBytes":8272408576},"Engine":{"EngineVersion":"17.04.0","Labels":{"foo":"bar"},"Plugins":[{"Type":"Volume","Name":"local"},{"Type":"Network","Name":"bridge"},{"Type":"Network","Name":"null"},{"Type":"Network","Name":"overlay"}]},"TLSInfo":{"TrustRoot":"-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0\nMzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf\n3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO\nPQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz\npxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H\n-----END CERTIFICATE-----\n","CertIssuerSubject":"MBMxETAPBgNVBAMTCHN3YXJtLWNh","CertIssuerPublicKey":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="}},"Status":{"State":"ready","Addr":"172.17.0.2"},"ManagerStatus":{"Leader":true,"Reachability":"reachable","Addr":"172.17.0.2:2377"}}]
data_frame <- function(...) {
data.frame(..., stringsAsFactors = FALSE)
}
tls_info <- list(
trust_root = "-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0\nMzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf\n3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO\nPQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz\npxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H\n-----END CERTIFICATE-----\n",
cert_issuer_subject = "MBMxETAPBgNVBAMTCHN3YXJtLWNh",
cert_issuer_public_key = "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==")
data_frame(
id = "24ifsmvkjbyhk",
version = I(list(list(index = 8L))),
created_at = "2016-06-07T20:31:11.853781916Z",
updated_at = "2016-06-07T20:31:11.999868824Z",
spec = I(list(list(
name = "my-node",
labels = c(foo = "bar"),
role = "manager",
availability = "active"))),
description = I(list(list(
hostname = "bf3067039e47",
platform = list(
architecture = "x86_64",
os = "linux"),
resources = list(
nano_cpus = 4e+09,
memory_bytes = 8272408576),
engine = list(
engine_version = "17.04.0",
labels = c(foo = "bar"),
plugins = data.frame(
type = c("Volume", "Network", "Network", "Network"),
name = c("local", "bridge", "null", "overlay"),
stringsAsFactors = FALSE)),
tls_info = tls_info))),
status = I(list(list(
state = "ready",
message = NA_character_,
addr = "172.17.0.2"))),
manager_status = I(list(list(
leader = TRUE,
reachability = "reachable",
addr = "172.17.0.2:2377"))))
| /tests/testthat/sample_responses/v1.30/node_list.R | no_license | cran/stevedore | R | false | false | 3,497 | r | ## version: 1.30
## method: get
## path: /nodes
## code: 200
## response: [{"ID":"24ifsmvkjbyhk","Version":{"Index":8},"CreatedAt":"2016-06-07T20:31:11.853781916Z","UpdatedAt":"2016-06-07T20:31:11.999868824Z","Spec":{"Name":"my-node","Role":"manager","Availability":"active","Labels":{"foo":"bar"}},"Description":{"Hostname":"bf3067039e47","Platform":{"Architecture":"x86_64","OS":"linux"},"Resources":{"NanoCPUs":4000000000,"MemoryBytes":8272408576},"Engine":{"EngineVersion":"17.04.0","Labels":{"foo":"bar"},"Plugins":[{"Type":"Volume","Name":"local"},{"Type":"Network","Name":"bridge"},{"Type":"Network","Name":"null"},{"Type":"Network","Name":"overlay"}]},"TLSInfo":{"TrustRoot":"-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0\nMzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf\n3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO\nPQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz\npxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H\n-----END CERTIFICATE-----\n","CertIssuerSubject":"MBMxETAPBgNVBAMTCHN3YXJtLWNh","CertIssuerPublicKey":"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A=="}},"Status":{"State":"ready","Addr":"172.17.0.2"},"ManagerStatus":{"Leader":true,"Reachability":"reachable","Addr":"172.17.0.2:2377"}}]
data_frame <- function(...) {
data.frame(..., stringsAsFactors = FALSE)
}
tls_info <- list(
trust_root = "-----BEGIN CERTIFICATE-----\nMIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw\nEzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0\nMzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH\nA0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf\n3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB\nAf8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO\nPQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz\npxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H\n-----END CERTIFICATE-----\n",
cert_issuer_subject = "MBMxETAPBgNVBAMTCHN3YXJtLWNh",
cert_issuer_public_key = "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==")
data_frame(
id = "24ifsmvkjbyhk",
version = I(list(list(index = 8L))),
created_at = "2016-06-07T20:31:11.853781916Z",
updated_at = "2016-06-07T20:31:11.999868824Z",
spec = I(list(list(
name = "my-node",
labels = c(foo = "bar"),
role = "manager",
availability = "active"))),
description = I(list(list(
hostname = "bf3067039e47",
platform = list(
architecture = "x86_64",
os = "linux"),
resources = list(
nano_cpus = 4e+09,
memory_bytes = 8272408576),
engine = list(
engine_version = "17.04.0",
labels = c(foo = "bar"),
plugins = data.frame(
type = c("Volume", "Network", "Network", "Network"),
name = c("local", "bridge", "null", "overlay"),
stringsAsFactors = FALSE)),
tls_info = tls_info))),
status = I(list(list(
state = "ready",
message = NA_character_,
addr = "172.17.0.2"))),
manager_status = I(list(list(
leader = TRUE,
reachability = "reachable",
addr = "172.17.0.2:2377"))))
|
#### Aim of prog: Show the variability of tree growth using boxplots
## Description:
# There will be three boxplots per species:
# - northern part
# - middle part
# - southern part
#
## Remark:
# I did a quick and dirty function using global access variable
#### Load package and clear memory
library(data.table)
library(tikzDevice)
#### Clear memory and graphs
rm(list = ls())
graphics.off()
options(max.print = 500)
#### Tool function
## Get centroids (centro, north, and south) of data
computeCentroLat = function(lat)
{
lat = unique(lat)
min_lat = min(lat)
max_lat = max(lat)
av_lat = mean(lat)
return(list(centro_lat = av_lat, north_lat = mean(c(av_lat, max_lat)), south_lat = mean(c(av_lat, min_lat))))
}
## Set region
region = function(lat, centro, north, south)
{
results = rep("middle", length(lat))
results[lat <= south] = "south"
results[lat >= north] = "north"
return (list(region = results))
}
## Function to plot (quick and dirty, unsing global access variable)
varGrowth_fct = function(nameFig, rangeFig = 1:3, stand = FALSE)
{
tikz(nameFig, width = 6, height = 5, standAlone = stand)
op = par(mar = c(0, 2.5, 2, 0), mgp = c(1.5, 0.75, 0),
oma = c(0,2,0,0), tck = -0.015, las = 1)
tikzAnnotate(paste0("\\definecolor{shadeN}{RGB}{", paste(shadeN, collapse = ","), "}"))
tikzAnnotate(paste0("\\definecolor{shadeM}{RGB}{", paste(shadeM, collapse = ","), "}"))
tikzAnnotate(paste0("\\definecolor{shadeS}{RGB}{", paste(shadeS, collapse = ","), "}"))
plot(x = NULL, y = NULL, xlim = c(0, 3*speciesSpace + 2*interSpecies + 0.1),
ylim = c(0, maxG + 1), axes = FALSE, xlab = "",
ylab = "")
count = 0
for (i in rangeFig)
{
## Species line and name
sp = ls_species[i]
species_coords = halfSpeciesSpace + count*(speciesSpace + interSpecies)
tikzCoord(species_coords - halfSpeciesSpace, maxG + 0.5, paste0("pt_start_", i))
tikzCoord(species_coords + halfSpeciesSpace, maxG + 0.5, paste0("pt_end_", i))
tikzAnnotate(paste0("\\draw[loosely dashed] (pt_start_", i, ") -- (pt_end_", i, ");"))
tikzCoord(species_coords, maxG + 0.5, 'species_coords')
tikzAnnotate(paste0("\\node[above] (spPos) at (species_coords) {", sp, "};"))
## Add boxplots manually
# North
north_qt = growth_dt[(species == sp) & (region == "north"), quantile(growth, probs = c(0.025, 0.25, 0.5, 0.75, 0.975))]
outliers_above = growth_dt[(species == sp) & (region == "north") & (growth > north_qt["97.5%"]), unique(growth)]
outliers_below = growth_dt[(species == sp) & (region == "north") & (growth < north_qt["2.5%"]), unique(growth)]
north_right = species_coords - midBox - interBox
north_left = north_right - widthBox
segments(x0 = north_left, y0 = north_qt["2.5%"], x1 = north_right, y1 = north_qt["2.5%"], col = "#010120") # Lower horizontal
segments(x0 = species_coords - interBox - widthBox, y0 = north_qt["2.5%"],
x1 = species_coords - interBox - widthBox, y1 = north_qt["25%"], col = "#010120") # Lower vertical
tikzCoord(north_left, north_qt["25%"], paste0("lower_north_", i)) # Lower left corner
tikzCoord(north_right, north_qt["75%"], paste0("upper_north_", i)) # Upper right corner
tikzAnnotate(paste0("\\draw[color=shadeN] (lower_north_", i, ") rectangle (upper_north_", i, ");")) # Rectangle
segments(x0 = north_left, y0 = north_qt["50%"], x1 = north_right, y1 = north_qt["50%"], col = "#010120") # Lower horizontal
segments(x0 = species_coords - interBox - widthBox, y0 = north_qt["75%"],
x1 = species_coords - interBox - widthBox, y1 = north_qt["97.5%"], col = "#010120") # Upper vertical
segments(x0 = north_left, y0 = north_qt["97.5%"], x1 = north_right, y1 = north_qt["97.5%"], col = "#010120") # Upper horizontal
points(x = rep(species_coords - interBox - widthBox, length(outliers_above)),
y = outliers_above, col = "#010120", pch = 20)
points(x = rep(species_coords - interBox - widthBox, length(outliers_below)),
y = outliers_below, col = "#010120", pch = 20)
# Middle
middle_qt = growth_dt[(species == sp) & (region == "middle"), quantile(growth, probs = c(0.025, 0.25, 0.5, 0.75, 0.975))]
outliers_above = growth_dt[(species == sp) & (region == "middle") & (growth > middle_qt["97.5%"]), unique(growth)]
outliers_below = growth_dt[(species == sp) & (region == "middle") & (growth < middle_qt["2.5%"]), unique(growth)]
middle_right = species_coords + midBox
middle_left = species_coords - midBox
segments(x0 = middle_left, y0 = middle_qt["2.5%"], x1 = middle_right, y1 = middle_qt["2.5%"], col = "#0E51FF") # Lower horizontal
segments(x0 = species_coords, y0 = middle_qt["2.5%"], x1 = species_coords, y1 = middle_qt["25%"], col = "#0E51FF") # Lower vertical
tikzCoord(middle_left, middle_qt["25%"], paste0("lower_middle_", i)) # Lower left corner
tikzCoord(middle_right, middle_qt["75%"], paste0("upper_middle_", i)) # Upper right corner
tikzAnnotate(paste0("\\draw[color=shadeM] (lower_middle_", i, ") rectangle (upper_middle_", i, ");")) # Rectangle
segments(x0 = middle_left, y0 = middle_qt["50%"], x1 = middle_right, y1 = middle_qt["50%"], col = "#0E51FF") # Lower horizontal
segments(x0 = species_coords, y0 = middle_qt["75%"], x1 = species_coords, y1 = middle_qt["97.5%"], col = "#0E51FF") # Upper vertical
segments(x0 = middle_left, y0 = middle_qt["97.5%"], x1 = middle_right, y1 = middle_qt["97.5%"], col = "#0E51FF") # Upper horizontal
points(x = rep(species_coords, length(outliers_above)),
y = outliers_above, col = "#0E51FF", pch = 20)
points(x = rep(species_coords, length(outliers_below)),
y = outliers_below, col = "#0E51FF", pch = 20)
# South
south_qt = growth_dt[(species == sp) & (region == "south"), quantile(growth, probs = c(0.025, 0.25, 0.5, 0.75, 0.975))]
outliers_above = growth_dt[(species == sp) & (region == "south") & (growth > south_qt["97.5%"]), unique(growth)]
outliers_below = growth_dt[(species == sp) & (region == "south") & (growth < south_qt["2.5%"]), unique(growth)]
south_right = species_coords + midBox + interBox
south_left = south_right + widthBox
segments(x0 = south_left, y0 = south_qt["2.5%"], x1 = south_right, y1 = south_qt["2.5%"], col = "#00F9FF") # Lower horizontal
segments(x0 = species_coords + interBox + widthBox, y0 = south_qt["2.5%"],
x1 = species_coords + interBox + widthBox, y1 = south_qt["25%"], col = "#00F9FF") # Lower vertical
tikzCoord(south_left, south_qt["25%"], paste0("lower_south_", i)) # Lower left corner
tikzCoord(south_right, south_qt["75%"], paste0("upper_south_", i)) # Upper right corner
tikzAnnotate(paste0("\\draw[color=shadeS] (lower_south_", i, ") rectangle (upper_south_", i, ");")) # Rectangle
segments(x0 = south_left, y0 = south_qt["50%"], x1 = south_right, y1 = south_qt["50%"], col = "#00F9FF") # Lower horizontal
segments(x0 = species_coords + interBox + widthBox, y0 = south_qt["75%"],
x1 = species_coords + interBox + widthBox, y1 = south_qt["97.5%"], col = "#00F9FF") # Upper vertical
segments(x0 = south_left, y0 = south_qt["97.5%"], x1 = south_right, y1 = south_qt["97.5%"], col = "#00F9FF") # Upper horizontal
points(x = rep(species_coords + interBox + widthBox, length(outliers_above)),
y = outliers_above, col = "#00F9FF", pch = 20)
points(x = rep(species_coords + interBox + widthBox, length(outliers_below)),
y = outliers_below, col = "#00F9FF", pch = 20)
count = count + 1
}
axis(side = 2, at = seq(0, maxG, by = 5))
mtext(text = "Growth data (in mm/yr)", side = 2, outer = TRUE, las = 0)
# Legend
tikzAnnotate("
\\matrix [below right] at (current bounding box.north west) {
\\node [shape = rectangle, fill = shadeN, label = right:North] {}; &
\\node [shape = rectangle, fill = shadeM, label = right:Middle] {}; &
\\node [shape = rectangle, fill = shadeS, label = right:South] {}; \\\\
};
")
dev.off()
}
#### Load data and compute data centroids
## Growth data
growth_dt = readRDS("../createData/growth_dt.rds")[, .(species_id, latitude, growth)]
tsn = readRDS("../growth/tsn.rds")[, .(species_id, species, tolLevel)]
ls_species = sort(tsn[, species])
n = length(ls_species)
## Species-specific data centroids
# Latitude centroids
growth_dt[, c("centroid", "north", "south") := computeCentroLat(latitude), by = species_id]
growth_dt[, region := region(latitude, centroid, north, south), by = species_id]
growth_dt[, table(region), by = species_id]
growth_dt = growth_dt[tsn, on = "species_id"]
maxG = growth_dt[, max(growth)]
#### Plot parameters
## Colours for Northern, Middle and Southern regions
shadeN = col2rgb("#010120")[,1]
shadeM = col2rgb("#0E51FF")[,1]
shadeS = col2rgb("#00F9FF")[,1]
## Space and width
widthBox = 0.75
interBox = 0.25
interSpecies = 1
midBox = widthBox/2
speciesSpace = 3*widthBox + 2*interBox
halfSpeciesSpace = speciesSpace/2
#### Plot
varGrowth_fct("growthVar1-3.tex", rangeFig = 1:3) # , stand = TRUE)
varGrowth_fct("growthVar4-6.tex", rangeFig = 4:6) # , stand = TRUE)
varGrowth_fct("growthVar7-9.tex", rangeFig = 7:9) # , stand = TRUE)
varGrowth_fct("growthVar10-12.tex", rangeFig = 10:12) # , stand = TRUE)
varGrowth_fct("growthVar13-14.tex", rangeFig = 13:14) # , stand = TRUE)
| /addFigures/growthVariability.R | no_license | amael-ls/code_R0niche | R | false | false | 9,140 | r |
#### Aim of prog: Show the variability of tree growth using boxplots
## Description:
# There will be three boxplots per species:
# - northern part
# - middle part
# - southern part
#
## Remark:
# I did a quick and dirty function using global access variable
#### Load package and clear memory
library(data.table)
library(tikzDevice)
#### Clear memory and graphs
rm(list = ls())
graphics.off()
options(max.print = 500)
#### Tool function
## Get centroids (centro, north, and south) of data
computeCentroLat = function(lat)
{
lat = unique(lat)
min_lat = min(lat)
max_lat = max(lat)
av_lat = mean(lat)
return(list(centro_lat = av_lat, north_lat = mean(c(av_lat, max_lat)), south_lat = mean(c(av_lat, min_lat))))
}
## Set region
region = function(lat, centro, north, south)
{
results = rep("middle", length(lat))
results[lat <= south] = "south"
results[lat >= north] = "north"
return (list(region = results))
}
## Function to plot (quick and dirty, unsing global access variable)
varGrowth_fct = function(nameFig, rangeFig = 1:3, stand = FALSE)
{
tikz(nameFig, width = 6, height = 5, standAlone = stand)
op = par(mar = c(0, 2.5, 2, 0), mgp = c(1.5, 0.75, 0),
oma = c(0,2,0,0), tck = -0.015, las = 1)
tikzAnnotate(paste0("\\definecolor{shadeN}{RGB}{", paste(shadeN, collapse = ","), "}"))
tikzAnnotate(paste0("\\definecolor{shadeM}{RGB}{", paste(shadeM, collapse = ","), "}"))
tikzAnnotate(paste0("\\definecolor{shadeS}{RGB}{", paste(shadeS, collapse = ","), "}"))
plot(x = NULL, y = NULL, xlim = c(0, 3*speciesSpace + 2*interSpecies + 0.1),
ylim = c(0, maxG + 1), axes = FALSE, xlab = "",
ylab = "")
count = 0
for (i in rangeFig)
{
## Species line and name
sp = ls_species[i]
species_coords = halfSpeciesSpace + count*(speciesSpace + interSpecies)
tikzCoord(species_coords - halfSpeciesSpace, maxG + 0.5, paste0("pt_start_", i))
tikzCoord(species_coords + halfSpeciesSpace, maxG + 0.5, paste0("pt_end_", i))
tikzAnnotate(paste0("\\draw[loosely dashed] (pt_start_", i, ") -- (pt_end_", i, ");"))
tikzCoord(species_coords, maxG + 0.5, 'species_coords')
tikzAnnotate(paste0("\\node[above] (spPos) at (species_coords) {", sp, "};"))
## Add boxplots manually
# North
north_qt = growth_dt[(species == sp) & (region == "north"), quantile(growth, probs = c(0.025, 0.25, 0.5, 0.75, 0.975))]
outliers_above = growth_dt[(species == sp) & (region == "north") & (growth > north_qt["97.5%"]), unique(growth)]
outliers_below = growth_dt[(species == sp) & (region == "north") & (growth < north_qt["2.5%"]), unique(growth)]
north_right = species_coords - midBox - interBox
north_left = north_right - widthBox
segments(x0 = north_left, y0 = north_qt["2.5%"], x1 = north_right, y1 = north_qt["2.5%"], col = "#010120") # Lower horizontal
segments(x0 = species_coords - interBox - widthBox, y0 = north_qt["2.5%"],
x1 = species_coords - interBox - widthBox, y1 = north_qt["25%"], col = "#010120") # Lower vertical
tikzCoord(north_left, north_qt["25%"], paste0("lower_north_", i)) # Lower left corner
tikzCoord(north_right, north_qt["75%"], paste0("upper_north_", i)) # Upper right corner
tikzAnnotate(paste0("\\draw[color=shadeN] (lower_north_", i, ") rectangle (upper_north_", i, ");")) # Rectangle
segments(x0 = north_left, y0 = north_qt["50%"], x1 = north_right, y1 = north_qt["50%"], col = "#010120") # Lower horizontal
segments(x0 = species_coords - interBox - widthBox, y0 = north_qt["75%"],
x1 = species_coords - interBox - widthBox, y1 = north_qt["97.5%"], col = "#010120") # Upper vertical
segments(x0 = north_left, y0 = north_qt["97.5%"], x1 = north_right, y1 = north_qt["97.5%"], col = "#010120") # Upper horizontal
points(x = rep(species_coords - interBox - widthBox, length(outliers_above)),
y = outliers_above, col = "#010120", pch = 20)
points(x = rep(species_coords - interBox - widthBox, length(outliers_below)),
y = outliers_below, col = "#010120", pch = 20)
# Middle
middle_qt = growth_dt[(species == sp) & (region == "middle"), quantile(growth, probs = c(0.025, 0.25, 0.5, 0.75, 0.975))]
outliers_above = growth_dt[(species == sp) & (region == "middle") & (growth > middle_qt["97.5%"]), unique(growth)]
outliers_below = growth_dt[(species == sp) & (region == "middle") & (growth < middle_qt["2.5%"]), unique(growth)]
middle_right = species_coords + midBox
middle_left = species_coords - midBox
segments(x0 = middle_left, y0 = middle_qt["2.5%"], x1 = middle_right, y1 = middle_qt["2.5%"], col = "#0E51FF") # Lower horizontal
segments(x0 = species_coords, y0 = middle_qt["2.5%"], x1 = species_coords, y1 = middle_qt["25%"], col = "#0E51FF") # Lower vertical
tikzCoord(middle_left, middle_qt["25%"], paste0("lower_middle_", i)) # Lower left corner
tikzCoord(middle_right, middle_qt["75%"], paste0("upper_middle_", i)) # Upper right corner
tikzAnnotate(paste0("\\draw[color=shadeM] (lower_middle_", i, ") rectangle (upper_middle_", i, ");")) # Rectangle
segments(x0 = middle_left, y0 = middle_qt["50%"], x1 = middle_right, y1 = middle_qt["50%"], col = "#0E51FF") # Lower horizontal
segments(x0 = species_coords, y0 = middle_qt["75%"], x1 = species_coords, y1 = middle_qt["97.5%"], col = "#0E51FF") # Upper vertical
segments(x0 = middle_left, y0 = middle_qt["97.5%"], x1 = middle_right, y1 = middle_qt["97.5%"], col = "#0E51FF") # Upper horizontal
points(x = rep(species_coords, length(outliers_above)),
y = outliers_above, col = "#0E51FF", pch = 20)
points(x = rep(species_coords, length(outliers_below)),
y = outliers_below, col = "#0E51FF", pch = 20)
# South
south_qt = growth_dt[(species == sp) & (region == "south"), quantile(growth, probs = c(0.025, 0.25, 0.5, 0.75, 0.975))]
outliers_above = growth_dt[(species == sp) & (region == "south") & (growth > south_qt["97.5%"]), unique(growth)]
outliers_below = growth_dt[(species == sp) & (region == "south") & (growth < south_qt["2.5%"]), unique(growth)]
south_right = species_coords + midBox + interBox
south_left = south_right + widthBox
segments(x0 = south_left, y0 = south_qt["2.5%"], x1 = south_right, y1 = south_qt["2.5%"], col = "#00F9FF") # Lower horizontal
segments(x0 = species_coords + interBox + widthBox, y0 = south_qt["2.5%"],
x1 = species_coords + interBox + widthBox, y1 = south_qt["25%"], col = "#00F9FF") # Lower vertical
tikzCoord(south_left, south_qt["25%"], paste0("lower_south_", i)) # Lower left corner
tikzCoord(south_right, south_qt["75%"], paste0("upper_south_", i)) # Upper right corner
tikzAnnotate(paste0("\\draw[color=shadeS] (lower_south_", i, ") rectangle (upper_south_", i, ");")) # Rectangle
segments(x0 = south_left, y0 = south_qt["50%"], x1 = south_right, y1 = south_qt["50%"], col = "#00F9FF") # Lower horizontal
segments(x0 = species_coords + interBox + widthBox, y0 = south_qt["75%"],
x1 = species_coords + interBox + widthBox, y1 = south_qt["97.5%"], col = "#00F9FF") # Upper vertical
segments(x0 = south_left, y0 = south_qt["97.5%"], x1 = south_right, y1 = south_qt["97.5%"], col = "#00F9FF") # Upper horizontal
points(x = rep(species_coords + interBox + widthBox, length(outliers_above)),
y = outliers_above, col = "#00F9FF", pch = 20)
points(x = rep(species_coords + interBox + widthBox, length(outliers_below)),
y = outliers_below, col = "#00F9FF", pch = 20)
count = count + 1
}
axis(side = 2, at = seq(0, maxG, by = 5))
mtext(text = "Growth data (in mm/yr)", side = 2, outer = TRUE, las = 0)
# Legend
tikzAnnotate("
\\matrix [below right] at (current bounding box.north west) {
\\node [shape = rectangle, fill = shadeN, label = right:North] {}; &
\\node [shape = rectangle, fill = shadeM, label = right:Middle] {}; &
\\node [shape = rectangle, fill = shadeS, label = right:South] {}; \\\\
};
")
dev.off()
}
#### Load data and compute data centroids
## Growth data
growth_dt = readRDS("../createData/growth_dt.rds")[, .(species_id, latitude, growth)]
tsn = readRDS("../growth/tsn.rds")[, .(species_id, species, tolLevel)]
ls_species = sort(tsn[, species])
n = length(ls_species)
## Species-specific data centroids
# Latitude centroids
growth_dt[, c("centroid", "north", "south") := computeCentroLat(latitude), by = species_id]
growth_dt[, region := region(latitude, centroid, north, south), by = species_id]
growth_dt[, table(region), by = species_id]
growth_dt = growth_dt[tsn, on = "species_id"]
maxG = growth_dt[, max(growth)]
#### Plot parameters
## Colours for Northern, Middle and Southern regions
shadeN = col2rgb("#010120")[,1]
shadeM = col2rgb("#0E51FF")[,1]
shadeS = col2rgb("#00F9FF")[,1]
## Space and width
widthBox = 0.75
interBox = 0.25
interSpecies = 1
midBox = widthBox/2
speciesSpace = 3*widthBox + 2*interBox
halfSpeciesSpace = speciesSpace/2
#### Plot
varGrowth_fct("growthVar1-3.tex", rangeFig = 1:3) # , stand = TRUE)
varGrowth_fct("growthVar4-6.tex", rangeFig = 4:6) # , stand = TRUE)
varGrowth_fct("growthVar7-9.tex", rangeFig = 7:9) # , stand = TRUE)
varGrowth_fct("growthVar10-12.tex", rangeFig = 10:12) # , stand = TRUE)
varGrowth_fct("growthVar13-14.tex", rangeFig = 13:14) # , stand = TRUE)
|
i = 36
library(asSeq, lib="/nas02/home/w/e/weisun/R/Rlibs/")
# -------------------------------------------------------------------------
# read in the list of the SNP to be excluded
# -------------------------------------------------------------------------
setwd("/lustre/scr/w/e/weisun/TCGA/hetSNP_EA/")
files = list.files(path = ".", pattern="hetSNP_")
sams = gsub("hetSNP_", "", files)
sams = gsub(".txt", "", sams, fixed=TRUE)
#for(i in 1:length(files)){
f1 = files[i]
sam1 = sams[i]
cat("\n", sam1, date(), "\n")
input = sprintf("../bam/%s_sorted_by_name_uniq_filtered.bam", sam1)
outputTag = sprintf("../bam/%s_asCounts_hetSNP_EA", sam1)
snpList = f1
if(! file.exists(f1)){
stop("snpList file does not exist")
}
extractAsReads(input, snpList, outputTag)
#}
| /data_preparation/R_batch3/_step2/step2_filter_asCounts.35.R | no_license | jasa-acs/Mapping-Tumor-Specific-Expression-QTLs-in-Impure-Tumor-Samples | R | false | false | 808 | r | i = 36
library(asSeq, lib="/nas02/home/w/e/weisun/R/Rlibs/")
# -------------------------------------------------------------------------
# read in the list of the SNP to be excluded
# -------------------------------------------------------------------------
setwd("/lustre/scr/w/e/weisun/TCGA/hetSNP_EA/")
files = list.files(path = ".", pattern="hetSNP_")
sams = gsub("hetSNP_", "", files)
sams = gsub(".txt", "", sams, fixed=TRUE)
#for(i in 1:length(files)){
f1 = files[i]
sam1 = sams[i]
cat("\n", sam1, date(), "\n")
input = sprintf("../bam/%s_sorted_by_name_uniq_filtered.bam", sam1)
outputTag = sprintf("../bam/%s_asCounts_hetSNP_EA", sam1)
snpList = f1
if(! file.exists(f1)){
stop("snpList file does not exist")
}
extractAsReads(input, snpList, outputTag)
#}
|
if(!is.na(seed)){
set.seed(seed)
}
## Generate Data
pmcmc_pars_list <- list(
# three peaks, large, small, delta
data = tibble(
deaths = c(14506, 50601, 62041, 83553, 110407, 137149, 133691, 98801, 82238, 81455, 64927, 63541, 65007, 63094, 55199, 34818, 36804, 49677, 51421,
64591, 163419, 271734, 600103, 624699, 407582, 140985, 126373, 129763, 136651, 118683, 101090, 92772, 94338, 94087, 90210, 81653, 76853, 43591)
) %>%
mutate(
week_start = as_date("2021-12-01") - 7 * rev(seq_along(deaths)),
week_end = week_start + 7
)
)
## Country Parameters
pmcmc_pars_list <- append(
pmcmc_pars_list,
list(
population = c(
116879507, 117982127, 126155952, 126045566, 122504804, 117397269, 112176098, 103460178, 90219894, 79440280, 68875962, 59256268,
48890528, 38260283, 24091443, 15083955, 13284271
),
# contact matrix
baseline_contact_matrix = squire::get_mixing_matrix(iso3c = "IND")
)
)
# healthcare
pmcmc_pars_list$baseline_hosp_bed_capacity <- sum(pmcmc_pars_list$populations) / 1000
pmcmc_pars_list$baseline_ICU_bed_capacity <- sum(pmcmc_pars_list$populations) / 10000
## Vaccine Parameters
pmcmc_pars_list <- append(
pmcmc_pars_list,
list(
# similar to standard but we don't bother with double dose scaling
baseline_vaccine_efficacy_infection = 0.6,
baseline_vaccine_efficacy_disease = 0.9,
dur_V = 365 * 1.5,
vaccine_coverage_mat = purrr::map_df(.x = 1:15, .f = function(x) {
out <- rep(0, 17)
out[17 - 1:x + 1] <- 0.8
names(out) <- paste0("age_group", 1:17)
out
}) %>% as.matrix(),
baseline_max_vaccine = 0
)
)
# regimen, starts in at first date should hit max by dec-2021
pmcmc_pars_list$date_vaccine_change <- min(pmcmc_pars_list$data$week_start)
pmcmc_pars_list$max_vaccine <- sum(
pmcmc_pars_list$population * tail(pmcmc_pars_list$vaccine_coverage_mat, 1)
) /
as.numeric(as_date("2021-12-01") - pmcmc_pars_list$date_vaccine_change)
## COVID parameters
pmcmc_pars_list$dur_R <- 365
# rest as defaults
## MCMC Options
pmcmc_pars_list <- append(
pmcmc_pars_list,
list(
# intial values etc simpler if we use the same already
n_mcmc = 10000,
squire_model = nimue::nimue_deterministic_model(),
log_likelihood = excess_log_likelihood,
log_prior = function(pars) {
0
},
n_particles = 1,
steps_per_day = 1,
n_chains = 1,
scaling_factor = 1,
pars_init = list(),
pars_min = list(),
pars_max = list(),
pars_discrete = list(),
proposal_kernel = NULL,
pars_obs = list(
phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 7, exp_noise = 1e07,
k_death_cumulative = 40,
likelihood = function(model_deaths, data_deaths, pars_obs) {
phi_death <- 1
k_death <- 7
exp_noise <- 1e07
k_death_cumulative <- 40
# also add a term for cumulative deaths
c(
squire:::ll_nbinom(
data_deaths, model_deaths, phi_death,
k_death,
exp_noise
),
squire:::ll_nbinom(
sum(data_deaths), sum(model_deaths), phi_death,
k_death_cumulative,
exp_noise
)
)
}
),
Rt_args = list(
Rt_date_spline_start = min(pmcmc_pars_list$data$week_start),
Rt_rw_duration = 14
),
burnin = 0,
replicates = 25
)
)
# assign initials
# start date
pmcmc_pars_list$pars_init$start_date <- min(pmcmc_pars_list$data$week_start) - 50
pmcmc_pars_list$pars_min$start_date <- min(pmcmc_pars_list$data$week_start) - 50 - 10
pmcmc_pars_list$pars_max$start_date <- min(pmcmc_pars_list$data$week_start) - 50 + 10
pmcmc_pars_list$pars_discrete$start_date <- TRUE
# R0
pmcmc_pars_list$pars_init$R0 <- 4.56
pmcmc_pars_list$pars_min$R0 <- 3
pmcmc_pars_list$pars_max$R0 <- 5
pmcmc_pars_list$pars_discrete$R0 <- FALSE
# fitting parameters
# how many needed
pmcmc_pars_list$Rt_args$date_Rt_change <- seq(
pmcmc_pars_list$Rt_args$Rt_date_spline_start,
max(pmcmc_pars_list$data$week_end) - 21,
by = pmcmc_pars_list$Rt_args$Rt_rw_duration
)
pmcmc_pars_list$date_Rt_change <- pmcmc_pars_list$Rt_args$date_Rt_change
#manually tuned to fit the no adjustments case
pars_init_rw <- as.list(
c(1.44, 0.2, 0.24, 0.32, -0.52, 0.16, 0.08, -0.32, -0.04,
-0.88, -0.32, 1.2, -0.64, -1.08, 0.64, -0.84, 0.2, 0.28)
)
pars_min_rw <- as.list(rep(-2, length(pmcmc_pars_list$Rt_args$date_Rt_change)))
pars_max_rw <- as.list(rep(2, length(pmcmc_pars_list$Rt_args$date_Rt_change)))
pars_discrete_rw <- as.list(rep(FALSE, length(pmcmc_pars_list$Rt_args$date_Rt_change)))
names(pars_init_rw) <- names(pars_min_rw) <- names(pars_max_rw) <-
names(pars_discrete_rw) <- paste0("Rt_rw_", seq_along(pmcmc_pars_list$Rt_args$date_Rt_change))
pmcmc_pars_list$pars_init <- append(pmcmc_pars_list$pars_init, pars_init_rw)
pmcmc_pars_list$pars_min <- append(pmcmc_pars_list$pars_min, pars_min_rw)
pmcmc_pars_list$pars_max <- append(pmcmc_pars_list$pars_max, pars_max_rw)
pmcmc_pars_list$pars_discrete <- append(pmcmc_pars_list$pars_discrete, pars_discrete_rw)
rm(pars_init_rw, pars_min_rw, pars_max_rw, pars_discrete_rw)
# proposal covariance
pmcmc_pars_list$proposal_kernel <- matrix(1,
nrow = length(pmcmc_pars_list$pars_init),
ncol = length(pmcmc_pars_list$pars_init)
)
colnames(pmcmc_pars_list$proposal_kernel) <- names(pmcmc_pars_list$pars_init)
rownames(pmcmc_pars_list$proposal_kernel) <- names(pmcmc_pars_list$pars_init)
## Delta Characteristics, will do multiple just one for now
delta_characteristics <- tribble(
~start_date, ~immune_escape, ~hosp_modifier, ~delta_shift_dur,
as_date("2021-07-01"), 0, 1, 60,
as_date("2021-07-01"), 0.10, 1, 60,
as_date("2021-07-01"), 0.20, 1, 60,
as_date("2021-07-01"), 0.30, 1, 60,
as_date("2021-07-01"), 0.40, 1, 60,
as_date("2021-07-01"), 0.50, 1, 60,
as_date("2021-07-01"), 0.60, 1, 60,
as_date("2021-07-01"), 0.70, 1, 60,
as_date("2021-07-01"), 0.80, 1, 60
) %>%
mutate(
# calculate dur_R shift
dur_R_shift = 1 / (
(delta_shift_dur / pmcmc_pars_list$dur_R - log(1 - immune_escape)) /
delta_shift_dur
)
)
#delta_characteristics <- delta_characteristics[c(1, nrow(delta_characteristics)),]
## Run fitting + calculate deaths averted
create_plot <- function(model_fit, start_date, immune_escape, hosp_modifier, delta_shift_dur){
dp_plot(model_fit) +
labs(
title =
paste0(
"Start Date:", start_date,
" Immune Escape:", immune_escape,
" Hospitalisation Modifier:", hosp_modifier,
" Shift Duration:", delta_shift_dur
)
)
}
calculate_reff <- function(out){
ratios <- squire.page::get_immunity_ratios_vaccine(out)
reff <- get_Rt(out) %>% dplyr::group_by(rep) %>% dplyr::mutate(ratios = ratios[[unique(.data$rep)]][seq_along(.data$Rt)],
Reff = .data$Rt * .data$ratios) %>%
dplyr::ungroup()
return(reff)
}
model_fits <- pmap(
.l = delta_characteristics,
pmcmc_pars_list,
.f = function(start_date, immune_escape, dur_R_shift, hosp_modifier,
delta_shift_dur, pmcmc_pars_list) {
# add the delta adjustments to pars_obs
pmcmc_pars_list$pars_obs$dur_R <- dur_R_shift
pmcmc_pars_list$pars_obs$prob_hosp_multiplier <- hosp_modifier
pmcmc_pars_list$pars_obs$delta_start_date <- start_date
pmcmc_pars_list$pars_obs$shift_duration <- delta_shift_dur
#
# pmcmc_pars_list$pars_obs$dur_R <- delta_characteristics$dur_R_shift[1]
# pmcmc_pars_list$pars_obs$prob_hosp_multiplier <- delta_characteristics$hosp_modifier[1]
# pmcmc_pars_list$pars_obs$delta_start_date <- delta_characteristics$start_date[1]
# pmcmc_pars_list$pars_obs$shift_duration <- delta_characteristics$delta_shift_dur[1]
# pmcmc_pars_list$n_mcmc <- 100
# fit to data
model_fit <- exec(
pmcmc_excess,
!!!pmcmc_pars_list
)
# assign class because I didn't put that in pmcmc_excess for some reason?
class(model_fit) <- c("excess_nimue_simulation", "nimue_simulation")
#also assign prior since that's also missing
model_fit$pmcmc_results$inputs$prior <- function(pars) {
0
}
plot <- create_plot(model_fit, start_date, immune_escape, hosp_modifier, delta_shift_dur
)
#generate parameter draws
pars_list <- squire.page::generate_parameters(model_fit, draws = 25)
#get baseline deaths
baseline_deaths <- squire.page::nimue_format(
squire.page::generate_draws(model_fit, pars_list),
var_select = "deaths"
)
#reff
model_fit$parameters$country <- "India"
baseline_reff <- calculate_reff(
squire.page::generate_draws(model_fit, pars_list)
)
# squire.page::rt_plot_immunity(
# squire.page::generate_draws(model_fit, pars_list)
# )
#generate counterfactual deaths
model_fit$interventions$max_vaccine <- c(0, 0)
model_fit$odin_parameters$max_vaccine <- c(0, 0)
model_fit$parameters$max_vaccine <- c(0, 0)
model_fit$pmcmc_results$inputs$interventions$max_vaccine <- c(0, 0)
model_fit$pmcmc_results$inputs$model_params$max_vaccine <- c(0, 0)
counterfactual_deaths <- squire.page::nimue_format(
squire.page::generate_draws(model_fit, pars_list),
var_select = "deaths"
)
counterfactual_reff <- calculate_reff(
squire.page::generate_draws(model_fit, pars_list)
)
# squire.page::rt_plot_immunity(
# squire.page::generate_draws(model_fit, pars_list)
# )
#merge and calculate total deaths averted
deaths_averted <- baseline_deaths %>%
group_by(replicate) %>%
summarise(baseline_deaths = sum(y, na.rm = TRUE)) %>%
left_join(counterfactual_deaths %>%
group_by(replicate) %>%
summarise(deaths = sum(y, na.rm = TRUE))
) %>%
mutate(deaths_averted = deaths - baseline_deaths) %>%
summarise(
deaths_averted_median = median(deaths_averted),
deaths_averted_025 = quantile(deaths_averted, 0.025),
deaths_averted_975 = quantile(deaths_averted, 0.975)
) %>%
mutate(
delta_start_date = start_date,
delta_immune_escape = immune_escape,
delta_hosp_modifier = hosp_modifier,
delta_delta_shift_dur = delta_shift_dur
)
#as an experiment we also plot difference in reff from both
reff_df <- #
baseline_reff %>%
select(date, Reff, rep) %>%
rename(vacc_Reff = Reff) %>%
full_join(
counterfactual_reff %>%
select(date, Reff, rep)
) %>%
mutate(
delta_start_date = start_date,
delta_immune_escape = immune_escape,
delta_hosp_modifier = hosp_modifier,
delta_delta_shift_dur = delta_shift_dur
)
# baseline_reff$rts %>%
# select(date, Reff_median) %>%
# rename(vacc_Reff = Reff_median) %>%
# full_join(
# counterfactual_reff$rts %>%
# select(date, Reff_median)
# ) %>%
# mutate(
# Reff_diff = Reff_median - vacc_Reff
# ) %>%
# select(date, Reff_diff) %>%
# mutate(
# delta_start_date = start_date,
# delta_immune_escape = immune_escape,
# delta_hosp_modifier = hosp_modifier,
# delta_delta_shift_dur = delta_shift_dur
# )
return(list(
plot = plot,
reff = reff_df,
result = deaths_averted
))
}
)
#pdf of plots
pdf("fitting_plot.pdf")
map(model_fits, ~print(.x$plot))
dev.off()
#combine reff differences
saveRDS(
do.call(
rbind,
map(
model_fits, ~.x$reff
)
),# %>%
# rename(`Immune Escape:` = delta_immune_escape) %>%
# select(date, Reff_diff, `Immune Escape:`) %>%
# ggplot(aes(x = date, y = Reff_diff,
# colour = `Immune Escape:`, group = `Immune Escape:`)) +
# geom_line() +
# ggpubr::theme_pubclean() +
# labs(x = "Date", y = "Reduction in effective reproduction\nfrom vaccine induced protection"),
"reff_plot.Rds"
)
#save results
results_df <- do.call(
rbind,
lapply(model_fits, function(x){x$result})
)
saveRDS(results_df, "res.Rds")
#plot of the deaths
saveRDS(ggplot(pmcmc_pars_list$data, aes(week_start, (deaths/sum(pmcmc_pars_list$population))*1e5)) +
geomtextpath::geom_textvline(label = "Delta Introduction",
xintercept = unique(delta_characteristics$start_date),
hjust = 0.2,
linetype = 2) +
geom_step(color = "red") +
theme_bw() + ylab("Weekly Deaths per 100,000\n") + xlab("") +
ggpubr::theme_pubr(), "death_curve.Rds")
| /src/delta_toy_example_india/script.R | permissive | mrc-ide/covid-vaccine-impact-orderly | R | false | false | 12,744 | r | if(!is.na(seed)){
set.seed(seed)
}
## Generate Data
pmcmc_pars_list <- list(
# three peaks, large, small, delta
data = tibble(
deaths = c(14506, 50601, 62041, 83553, 110407, 137149, 133691, 98801, 82238, 81455, 64927, 63541, 65007, 63094, 55199, 34818, 36804, 49677, 51421,
64591, 163419, 271734, 600103, 624699, 407582, 140985, 126373, 129763, 136651, 118683, 101090, 92772, 94338, 94087, 90210, 81653, 76853, 43591)
) %>%
mutate(
week_start = as_date("2021-12-01") - 7 * rev(seq_along(deaths)),
week_end = week_start + 7
)
)
## Country Parameters
pmcmc_pars_list <- append(
pmcmc_pars_list,
list(
population = c(
116879507, 117982127, 126155952, 126045566, 122504804, 117397269, 112176098, 103460178, 90219894, 79440280, 68875962, 59256268,
48890528, 38260283, 24091443, 15083955, 13284271
),
# contact matrix
baseline_contact_matrix = squire::get_mixing_matrix(iso3c = "IND")
)
)
# healthcare
pmcmc_pars_list$baseline_hosp_bed_capacity <- sum(pmcmc_pars_list$populations) / 1000
pmcmc_pars_list$baseline_ICU_bed_capacity <- sum(pmcmc_pars_list$populations) / 10000
## Vaccine Parameters
pmcmc_pars_list <- append(
pmcmc_pars_list,
list(
# similar to standard but we don't bother with double dose scaling
baseline_vaccine_efficacy_infection = 0.6,
baseline_vaccine_efficacy_disease = 0.9,
dur_V = 365 * 1.5,
vaccine_coverage_mat = purrr::map_df(.x = 1:15, .f = function(x) {
out <- rep(0, 17)
out[17 - 1:x + 1] <- 0.8
names(out) <- paste0("age_group", 1:17)
out
}) %>% as.matrix(),
baseline_max_vaccine = 0
)
)
# regimen, starts in at first date should hit max by dec-2021
pmcmc_pars_list$date_vaccine_change <- min(pmcmc_pars_list$data$week_start)
pmcmc_pars_list$max_vaccine <- sum(
pmcmc_pars_list$population * tail(pmcmc_pars_list$vaccine_coverage_mat, 1)
) /
as.numeric(as_date("2021-12-01") - pmcmc_pars_list$date_vaccine_change)
## COVID parameters
pmcmc_pars_list$dur_R <- 365
# rest as defaults
## MCMC Options
pmcmc_pars_list <- append(
pmcmc_pars_list,
list(
# intial values etc simpler if we use the same already
n_mcmc = 10000,
squire_model = nimue::nimue_deterministic_model(),
log_likelihood = excess_log_likelihood,
log_prior = function(pars) {
0
},
n_particles = 1,
steps_per_day = 1,
n_chains = 1,
scaling_factor = 1,
pars_init = list(),
pars_min = list(),
pars_max = list(),
pars_discrete = list(),
proposal_kernel = NULL,
pars_obs = list(
phi_cases = 1, k_cases = 2, phi_death = 1, k_death = 7, exp_noise = 1e07,
k_death_cumulative = 40,
likelihood = function(model_deaths, data_deaths, pars_obs) {
phi_death <- 1
k_death <- 7
exp_noise <- 1e07
k_death_cumulative <- 40
# also add a term for cumulative deaths
c(
squire:::ll_nbinom(
data_deaths, model_deaths, phi_death,
k_death,
exp_noise
),
squire:::ll_nbinom(
sum(data_deaths), sum(model_deaths), phi_death,
k_death_cumulative,
exp_noise
)
)
}
),
Rt_args = list(
Rt_date_spline_start = min(pmcmc_pars_list$data$week_start),
Rt_rw_duration = 14
),
burnin = 0,
replicates = 25
)
)
# assign initials
# start date
pmcmc_pars_list$pars_init$start_date <- min(pmcmc_pars_list$data$week_start) - 50
pmcmc_pars_list$pars_min$start_date <- min(pmcmc_pars_list$data$week_start) - 50 - 10
pmcmc_pars_list$pars_max$start_date <- min(pmcmc_pars_list$data$week_start) - 50 + 10
pmcmc_pars_list$pars_discrete$start_date <- TRUE
# R0
pmcmc_pars_list$pars_init$R0 <- 4.56
pmcmc_pars_list$pars_min$R0 <- 3
pmcmc_pars_list$pars_max$R0 <- 5
pmcmc_pars_list$pars_discrete$R0 <- FALSE
# fitting parameters
# how many needed
pmcmc_pars_list$Rt_args$date_Rt_change <- seq(
pmcmc_pars_list$Rt_args$Rt_date_spline_start,
max(pmcmc_pars_list$data$week_end) - 21,
by = pmcmc_pars_list$Rt_args$Rt_rw_duration
)
pmcmc_pars_list$date_Rt_change <- pmcmc_pars_list$Rt_args$date_Rt_change
#manually tuned to fit the no adjustments case
pars_init_rw <- as.list(
c(1.44, 0.2, 0.24, 0.32, -0.52, 0.16, 0.08, -0.32, -0.04,
-0.88, -0.32, 1.2, -0.64, -1.08, 0.64, -0.84, 0.2, 0.28)
)
pars_min_rw <- as.list(rep(-2, length(pmcmc_pars_list$Rt_args$date_Rt_change)))
pars_max_rw <- as.list(rep(2, length(pmcmc_pars_list$Rt_args$date_Rt_change)))
pars_discrete_rw <- as.list(rep(FALSE, length(pmcmc_pars_list$Rt_args$date_Rt_change)))
names(pars_init_rw) <- names(pars_min_rw) <- names(pars_max_rw) <-
names(pars_discrete_rw) <- paste0("Rt_rw_", seq_along(pmcmc_pars_list$Rt_args$date_Rt_change))
pmcmc_pars_list$pars_init <- append(pmcmc_pars_list$pars_init, pars_init_rw)
pmcmc_pars_list$pars_min <- append(pmcmc_pars_list$pars_min, pars_min_rw)
pmcmc_pars_list$pars_max <- append(pmcmc_pars_list$pars_max, pars_max_rw)
pmcmc_pars_list$pars_discrete <- append(pmcmc_pars_list$pars_discrete, pars_discrete_rw)
rm(pars_init_rw, pars_min_rw, pars_max_rw, pars_discrete_rw)
# proposal covariance
pmcmc_pars_list$proposal_kernel <- matrix(1,
nrow = length(pmcmc_pars_list$pars_init),
ncol = length(pmcmc_pars_list$pars_init)
)
colnames(pmcmc_pars_list$proposal_kernel) <- names(pmcmc_pars_list$pars_init)
rownames(pmcmc_pars_list$proposal_kernel) <- names(pmcmc_pars_list$pars_init)
## Delta Characteristics, will do multiple just one for now
delta_characteristics <- tribble(
~start_date, ~immune_escape, ~hosp_modifier, ~delta_shift_dur,
as_date("2021-07-01"), 0, 1, 60,
as_date("2021-07-01"), 0.10, 1, 60,
as_date("2021-07-01"), 0.20, 1, 60,
as_date("2021-07-01"), 0.30, 1, 60,
as_date("2021-07-01"), 0.40, 1, 60,
as_date("2021-07-01"), 0.50, 1, 60,
as_date("2021-07-01"), 0.60, 1, 60,
as_date("2021-07-01"), 0.70, 1, 60,
as_date("2021-07-01"), 0.80, 1, 60
) %>%
mutate(
# calculate dur_R shift
dur_R_shift = 1 / (
(delta_shift_dur / pmcmc_pars_list$dur_R - log(1 - immune_escape)) /
delta_shift_dur
)
)
#delta_characteristics <- delta_characteristics[c(1, nrow(delta_characteristics)),]
## Run fitting + calculate deaths averted
create_plot <- function(model_fit, start_date, immune_escape, hosp_modifier, delta_shift_dur){
dp_plot(model_fit) +
labs(
title =
paste0(
"Start Date:", start_date,
" Immune Escape:", immune_escape,
" Hospitalisation Modifier:", hosp_modifier,
" Shift Duration:", delta_shift_dur
)
)
}
calculate_reff <- function(out){
ratios <- squire.page::get_immunity_ratios_vaccine(out)
reff <- get_Rt(out) %>% dplyr::group_by(rep) %>% dplyr::mutate(ratios = ratios[[unique(.data$rep)]][seq_along(.data$Rt)],
Reff = .data$Rt * .data$ratios) %>%
dplyr::ungroup()
return(reff)
}
model_fits <- pmap(
.l = delta_characteristics,
pmcmc_pars_list,
.f = function(start_date, immune_escape, dur_R_shift, hosp_modifier,
delta_shift_dur, pmcmc_pars_list) {
# add the delta adjustments to pars_obs
pmcmc_pars_list$pars_obs$dur_R <- dur_R_shift
pmcmc_pars_list$pars_obs$prob_hosp_multiplier <- hosp_modifier
pmcmc_pars_list$pars_obs$delta_start_date <- start_date
pmcmc_pars_list$pars_obs$shift_duration <- delta_shift_dur
#
# pmcmc_pars_list$pars_obs$dur_R <- delta_characteristics$dur_R_shift[1]
# pmcmc_pars_list$pars_obs$prob_hosp_multiplier <- delta_characteristics$hosp_modifier[1]
# pmcmc_pars_list$pars_obs$delta_start_date <- delta_characteristics$start_date[1]
# pmcmc_pars_list$pars_obs$shift_duration <- delta_characteristics$delta_shift_dur[1]
# pmcmc_pars_list$n_mcmc <- 100
# fit to data
model_fit <- exec(
pmcmc_excess,
!!!pmcmc_pars_list
)
# assign class because I didn't put that in pmcmc_excess for some reason?
class(model_fit) <- c("excess_nimue_simulation", "nimue_simulation")
#also assign prior since that's also missing
model_fit$pmcmc_results$inputs$prior <- function(pars) {
0
}
plot <- create_plot(model_fit, start_date, immune_escape, hosp_modifier, delta_shift_dur
)
#generate parameter draws
pars_list <- squire.page::generate_parameters(model_fit, draws = 25)
#get baseline deaths
baseline_deaths <- squire.page::nimue_format(
squire.page::generate_draws(model_fit, pars_list),
var_select = "deaths"
)
#reff
model_fit$parameters$country <- "India"
baseline_reff <- calculate_reff(
squire.page::generate_draws(model_fit, pars_list)
)
# squire.page::rt_plot_immunity(
# squire.page::generate_draws(model_fit, pars_list)
# )
#generate counterfactual deaths
model_fit$interventions$max_vaccine <- c(0, 0)
model_fit$odin_parameters$max_vaccine <- c(0, 0)
model_fit$parameters$max_vaccine <- c(0, 0)
model_fit$pmcmc_results$inputs$interventions$max_vaccine <- c(0, 0)
model_fit$pmcmc_results$inputs$model_params$max_vaccine <- c(0, 0)
counterfactual_deaths <- squire.page::nimue_format(
squire.page::generate_draws(model_fit, pars_list),
var_select = "deaths"
)
counterfactual_reff <- calculate_reff(
squire.page::generate_draws(model_fit, pars_list)
)
# squire.page::rt_plot_immunity(
# squire.page::generate_draws(model_fit, pars_list)
# )
#merge and calculate total deaths averted
deaths_averted <- baseline_deaths %>%
group_by(replicate) %>%
summarise(baseline_deaths = sum(y, na.rm = TRUE)) %>%
left_join(counterfactual_deaths %>%
group_by(replicate) %>%
summarise(deaths = sum(y, na.rm = TRUE))
) %>%
mutate(deaths_averted = deaths - baseline_deaths) %>%
summarise(
deaths_averted_median = median(deaths_averted),
deaths_averted_025 = quantile(deaths_averted, 0.025),
deaths_averted_975 = quantile(deaths_averted, 0.975)
) %>%
mutate(
delta_start_date = start_date,
delta_immune_escape = immune_escape,
delta_hosp_modifier = hosp_modifier,
delta_delta_shift_dur = delta_shift_dur
)
#as an experiment we also plot difference in reff from both
reff_df <- #
baseline_reff %>%
select(date, Reff, rep) %>%
rename(vacc_Reff = Reff) %>%
full_join(
counterfactual_reff %>%
select(date, Reff, rep)
) %>%
mutate(
delta_start_date = start_date,
delta_immune_escape = immune_escape,
delta_hosp_modifier = hosp_modifier,
delta_delta_shift_dur = delta_shift_dur
)
# baseline_reff$rts %>%
# select(date, Reff_median) %>%
# rename(vacc_Reff = Reff_median) %>%
# full_join(
# counterfactual_reff$rts %>%
# select(date, Reff_median)
# ) %>%
# mutate(
# Reff_diff = Reff_median - vacc_Reff
# ) %>%
# select(date, Reff_diff) %>%
# mutate(
# delta_start_date = start_date,
# delta_immune_escape = immune_escape,
# delta_hosp_modifier = hosp_modifier,
# delta_delta_shift_dur = delta_shift_dur
# )
return(list(
plot = plot,
reff = reff_df,
result = deaths_averted
))
}
)
#pdf of plots
pdf("fitting_plot.pdf")
map(model_fits, ~print(.x$plot))
dev.off()
#combine reff differences
saveRDS(
do.call(
rbind,
map(
model_fits, ~.x$reff
)
),# %>%
# rename(`Immune Escape:` = delta_immune_escape) %>%
# select(date, Reff_diff, `Immune Escape:`) %>%
# ggplot(aes(x = date, y = Reff_diff,
# colour = `Immune Escape:`, group = `Immune Escape:`)) +
# geom_line() +
# ggpubr::theme_pubclean() +
# labs(x = "Date", y = "Reduction in effective reproduction\nfrom vaccine induced protection"),
"reff_plot.Rds"
)
#save results
results_df <- do.call(
rbind,
lapply(model_fits, function(x){x$result})
)
saveRDS(results_df, "res.Rds")
#plot of the deaths
saveRDS(ggplot(pmcmc_pars_list$data, aes(week_start, (deaths/sum(pmcmc_pars_list$population))*1e5)) +
geomtextpath::geom_textvline(label = "Delta Introduction",
xintercept = unique(delta_characteristics$start_date),
hjust = 0.2,
linetype = 2) +
geom_step(color = "red") +
theme_bw() + ylab("Weekly Deaths per 100,000\n") + xlab("") +
ggpubr::theme_pubr(), "death_curve.Rds")
|
#' Heatmap of cluster-specific APA genes
#'
#' \code{clusterAPAheatmap} draws heatmap of cluster-specific APA genes to show the degree and direction of APA. This function first selects genes that are significant cluster-specific APA in any cluster. Then, it draws heatmap with estimated coefficients from logistic regression.
#'
#' @param ECoeffSig_Mat significant APA table returned from \code{APAtest}.
#' @param FDR_P_cutoff The cutoff for FDR-controlled P values of Wald tests. Default to 0.05.
#' @param CoeffCutoff The cutoff for estimated coefficients of logistic regression.
#' @return \code{clusterAPAheatmap} returns a heatmap of cluster-specific APA genes
#'
#' @examples
#' clusterAPAheatmap(ECoeffSig_Mat = result_from_estimateSig$ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2))
#'
clusterAPAheatmap <- function(ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2)){
ECoeffSig_Mat.pval <- ECoeffSig_Mat[,str_detect(colnames(ECoeffSig_Mat), ".pval")]
EP.qval <- matrix(nrow = nrow(ECoeffSig_Mat),ncol = ncol(ECoeffSig_Mat.pval))
for (i in 1:ncol(ECoeffSig_Mat.pval)){
pval <- ECoeffSig_Mat.pval[,i]
EP.qval[,i] <- p.adjust(pval)
}
keep.EP.coef<-(abs(ECoeffSig_Mat[,str_detect(colnames(ECoeffSig_Mat), ".coef")])>=CoeffCutoff)>0
keep.EP.pval<-(EP.qval<=FDR_P_cutoff)>0
keep.EP <- keep.EP.coef+keep.EP.pval
keep.EP <- rowSums(keep.EP==2, na.rm = T)>=1
EP.sig.clus <- ECoeffSig_Mat[keep.EP,]
y <- as.matrix(EP.sig.clus[,str_detect(colnames(ECoeffSig_Mat), ".coef")])
y[is.na(y)] <- 0
colnames(y) <- unique(do.call(rbind, str_split(colnames(ECoeffSig_Mat), "\\."))[,1])[-1]
colfunc <- colorRampPalette(c("blue", "white", "red"))
heatmap.2(y, col=colfunc(15),
dendrogram="both", srtCol=45,
scale="none", density.info="none", trace="none",labRow = NA, key.xlab = "Coefficient")
}
#' Dot plot of selected APA genes
#'
#' @description Draw dot plot of cluster-specific APA genes selected by user. The size of dots shows the deviation of the proportion of long isoforms from grand mean of all transcripts. The color of dots shows the direction of 3' UTR processing (lengthening or shortening).
#'
#' @param ECoeffSig_Mat significant APA table returned from \code{APAtest}.
#' @param FDR_P_cutoff The cutoff for FDR-controlled P values of Wald tests. Default to 0.05.
#' @param CoeffCutoff The cutoff for estimated coefficients of logistic regression.
#' @param APAgenes Character vector, a list of gene IDs user would like to display in the dot plot. The format should be consistent with geneID part of rownames in the significant APA table, transcript IDs, chrIDs, and all other parts of APA ID should be removed. For better interpretation, gene IDs will be converted to gene symbols in the plot.
#' @return \code{APAdotplot} returns a dot plot showing the status of 3' UTR legnthening or shortening for user selected genes.
#'
#' @examples
#' APAdotplot(ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2), APAgenes=str_extract(rownames(ECoeffSig_Mat), "ENSMUSG[:digit:]*"))
#'
APAdotplot <- function(ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2), APAgenes){
if (!is.character(APAgenes)) stop("'APAgenes' is not character vector.")
clustername <- unique(do.call(rbind, str_split(colnames(ECoeffSig_Mat), "\\."))[,1])[-1]
clustername <- clustername[-c(1,2)]
ECoeffSig_Mat.pval <- ECoeffSig_Mat[,str_detect(colnames(ECoeffSig_Mat), ".pval")]
EP.qval <- matrix(nrow = nrow(ECoeffSig_Mat),ncol = ncol(ECoeffSig_Mat.pval))
for (i in 1:ncol(ECoeffSig_Mat.pval)){
pval <- ECoeffSig_Mat.pval[,i]
EP.qval[,i] <- p.adjust(pval)
}
EP.sig.list <- list()
length(EP.sig.list) <- ((ncol(ECoeffSig_Mat)-3)/4)
ECoeffSig_Mat <- ECoeffSig_Mat[,-c(1,2)]
for (i in 1:length(EP.sig.list)){
EP.sig.list[[i]] <- data.frame(Genes=ECoeffSig_Mat$Genes,coef=ECoeffSig_Mat[,(i+1)], qval=EP.qval[,i],cluster=rep(clustername[i], nrow(ECoeffSig_Mat)))
EP.sig.list[[i]] <- na.omit(EP.sig.list[[i]][(abs(ECoeffSig_Mat[,(i+1)])>CoeffCutoff)&(EP.qval[,i]<FDR_P_cutoff),])
}
EP.clus.dotplot <- do.call(rbind, EP.sig.list)
EP.clus.dotplot$abs_Coef <- abs(EP.clus.dotplot$coef)
EP.clus.dotplot$isoform <- ifelse(EP.clus.dotplot$coef>0, "long", "short")
if (str_detect(EP.clus.dotplot$Genes, "ENSMUSG")[1]){
EP.clus.dotplot$Genes <- str_extract(EP.clus.dotplot$Genes, "ENSMUSG[:digit:]*")
} else if(str_detect(EP.clus.dotplot$Genes, "ENSG")[1]){
EP.clus.dotplot$Genes <- str_extract(EP.clus.dotplot$Genes, "ENSG[:digit:]*")
}else{
stop("The input gene ID is not ENSEMBL human or mouse.")
}
EP.clus.dotplot <- EP.clus.dotplot[tolower(EP.clus.dotplot$Genes) %in% tolower(APAgenes),]
if (str_detect(EP.clus.dotplot$Genes[1],"ENSMUSG")){
EP.clus.dotplot$GeneSymbol <- mapIds(org.Mm.eg.db, keys = str_extract(EP.clus.dotplot$Genes, "ENSMUSG[:digit:]*"), keytype = "ENSEMBL", column="SYMBOL")
} else if (str_detect(EP.clus.dotplot$Genes[1],"ENSG")){
EP.clus.dotplot$GeneSymbol <- mapIds(org.Hs.eg.db, keys = str_extract(EP.clus.dotplot$Genes, "ENSG[:digit:]*"), keytype = "ENSEMBL", column="SYMBOL")
} else {
stop("The species of gene ID is not ENSEMBL human or mouse.")
}
ggplot(EP.clus.dotplot,aes(x=GeneSymbol, y=cluster, size=abs_Coef, color=isoform))+
geom_point()+theme_classic()+theme(axis.text.x = element_text(angle = 45,hjust = 1))
}
| /scMAPA_RPackage/R/visualization.R | permissive | BIT-VS-IT/scMAPA | R | false | false | 5,374 | r | #' Heatmap of cluster-specific APA genes
#'
#' \code{clusterAPAheatmap} draws heatmap of cluster-specific APA genes to show the degree and direction of APA. This function first selects genes that are significant cluster-specific APA in any cluster. Then, it draws heatmap with estimated coefficients from logistic regression.
#'
#' @param ECoeffSig_Mat significant APA table returned from \code{APAtest}.
#' @param FDR_P_cutoff The cutoff for FDR-controlled P values of Wald tests. Default to 0.05.
#' @param CoeffCutoff The cutoff for estimated coefficients of logistic regression.
#' @return \code{clusterAPAheatmap} returns a heatmap of cluster-specific APA genes
#'
#' @examples
#' clusterAPAheatmap(ECoeffSig_Mat = result_from_estimateSig$ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2))
#'
clusterAPAheatmap <- function(ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2)){
ECoeffSig_Mat.pval <- ECoeffSig_Mat[,str_detect(colnames(ECoeffSig_Mat), ".pval")]
EP.qval <- matrix(nrow = nrow(ECoeffSig_Mat),ncol = ncol(ECoeffSig_Mat.pval))
for (i in 1:ncol(ECoeffSig_Mat.pval)){
pval <- ECoeffSig_Mat.pval[,i]
EP.qval[,i] <- p.adjust(pval)
}
keep.EP.coef<-(abs(ECoeffSig_Mat[,str_detect(colnames(ECoeffSig_Mat), ".coef")])>=CoeffCutoff)>0
keep.EP.pval<-(EP.qval<=FDR_P_cutoff)>0
keep.EP <- keep.EP.coef+keep.EP.pval
keep.EP <- rowSums(keep.EP==2, na.rm = T)>=1
EP.sig.clus <- ECoeffSig_Mat[keep.EP,]
y <- as.matrix(EP.sig.clus[,str_detect(colnames(ECoeffSig_Mat), ".coef")])
y[is.na(y)] <- 0
colnames(y) <- unique(do.call(rbind, str_split(colnames(ECoeffSig_Mat), "\\."))[,1])[-1]
colfunc <- colorRampPalette(c("blue", "white", "red"))
heatmap.2(y, col=colfunc(15),
dendrogram="both", srtCol=45,
scale="none", density.info="none", trace="none",labRow = NA, key.xlab = "Coefficient")
}
#' Dot plot of selected APA genes
#'
#' @description Draw dot plot of cluster-specific APA genes selected by user. The size of dots shows the deviation of the proportion of long isoforms from grand mean of all transcripts. The color of dots shows the direction of 3' UTR processing (lengthening or shortening).
#'
#' @param ECoeffSig_Mat significant APA table returned from \code{APAtest}.
#' @param FDR_P_cutoff The cutoff for FDR-controlled P values of Wald tests. Default to 0.05.
#' @param CoeffCutoff The cutoff for estimated coefficients of logistic regression.
#' @param APAgenes Character vector, a list of gene IDs user would like to display in the dot plot. The format should be consistent with geneID part of rownames in the significant APA table, transcript IDs, chrIDs, and all other parts of APA ID should be removed. For better interpretation, gene IDs will be converted to gene symbols in the plot.
#' @return \code{APAdotplot} returns a dot plot showing the status of 3' UTR legnthening or shortening for user selected genes.
#'
#' @examples
#' APAdotplot(ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2), APAgenes=str_extract(rownames(ECoeffSig_Mat), "ENSMUSG[:digit:]*"))
#'
APAdotplot <- function(ECoeffSig_Mat, FDR_P_cutoff=0.05, CoeffCutoff=log(2), APAgenes){
if (!is.character(APAgenes)) stop("'APAgenes' is not character vector.")
clustername <- unique(do.call(rbind, str_split(colnames(ECoeffSig_Mat), "\\."))[,1])[-1]
clustername <- clustername[-c(1,2)]
ECoeffSig_Mat.pval <- ECoeffSig_Mat[,str_detect(colnames(ECoeffSig_Mat), ".pval")]
EP.qval <- matrix(nrow = nrow(ECoeffSig_Mat),ncol = ncol(ECoeffSig_Mat.pval))
for (i in 1:ncol(ECoeffSig_Mat.pval)){
pval <- ECoeffSig_Mat.pval[,i]
EP.qval[,i] <- p.adjust(pval)
}
EP.sig.list <- list()
length(EP.sig.list) <- ((ncol(ECoeffSig_Mat)-3)/4)
ECoeffSig_Mat <- ECoeffSig_Mat[,-c(1,2)]
for (i in 1:length(EP.sig.list)){
EP.sig.list[[i]] <- data.frame(Genes=ECoeffSig_Mat$Genes,coef=ECoeffSig_Mat[,(i+1)], qval=EP.qval[,i],cluster=rep(clustername[i], nrow(ECoeffSig_Mat)))
EP.sig.list[[i]] <- na.omit(EP.sig.list[[i]][(abs(ECoeffSig_Mat[,(i+1)])>CoeffCutoff)&(EP.qval[,i]<FDR_P_cutoff),])
}
EP.clus.dotplot <- do.call(rbind, EP.sig.list)
EP.clus.dotplot$abs_Coef <- abs(EP.clus.dotplot$coef)
EP.clus.dotplot$isoform <- ifelse(EP.clus.dotplot$coef>0, "long", "short")
if (str_detect(EP.clus.dotplot$Genes, "ENSMUSG")[1]){
EP.clus.dotplot$Genes <- str_extract(EP.clus.dotplot$Genes, "ENSMUSG[:digit:]*")
} else if(str_detect(EP.clus.dotplot$Genes, "ENSG")[1]){
EP.clus.dotplot$Genes <- str_extract(EP.clus.dotplot$Genes, "ENSG[:digit:]*")
}else{
stop("The input gene ID is not ENSEMBL human or mouse.")
}
EP.clus.dotplot <- EP.clus.dotplot[tolower(EP.clus.dotplot$Genes) %in% tolower(APAgenes),]
if (str_detect(EP.clus.dotplot$Genes[1],"ENSMUSG")){
EP.clus.dotplot$GeneSymbol <- mapIds(org.Mm.eg.db, keys = str_extract(EP.clus.dotplot$Genes, "ENSMUSG[:digit:]*"), keytype = "ENSEMBL", column="SYMBOL")
} else if (str_detect(EP.clus.dotplot$Genes[1],"ENSG")){
EP.clus.dotplot$GeneSymbol <- mapIds(org.Hs.eg.db, keys = str_extract(EP.clus.dotplot$Genes, "ENSG[:digit:]*"), keytype = "ENSEMBL", column="SYMBOL")
} else {
stop("The species of gene ID is not ENSEMBL human or mouse.")
}
ggplot(EP.clus.dotplot,aes(x=GeneSymbol, y=cluster, size=abs_Coef, color=isoform))+
geom_point()+theme_classic()+theme(axis.text.x = element_text(angle = 45,hjust = 1))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.