content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
########## Solutions to Homework 2 ##########
#### Problem 4 ####
## @author: David Dobor ##
## Problem 1
x <- 0:3 # given support of r.v X
p_x <- c(0.25, 0.125, 0.125, 0.5) # and given its pmf
F_x <- cumsum(p_x) # compute the cdf of X
#### create a data frame that stores coordinats of the points to plot
coords <- data.frame(x=numeric(), y=numeric())
coords <- rbind(coords, c(-2, 0))
coords <- rbind(coords, c(x[1], 0))
for (i in 2:length(x)-1) {
coords <- rbind(coords, c(x[i], F_x[i]))
coords <- rbind(coords, c(x[i+1], F_x[i]))
}
coords <- rbind(coords, c(x[length(x)], F_x[length(x)]))
coords <- rbind(coords, c(x[length(x)] + 2, F_x[length(x)]))
#### now plot the cdf
g <- ggplot()
odd_inds <- seq(1, nrow(coords), by=2) #odd indecies- start line segments
even_inds <- seq(2, nrow(coords), by=2) #even indecies - end line segments
# add the line segments to the plot
g <- g + geom_segment(data=coords, mapping=aes(x=coords[odd_inds,1], y=coords[odd_inds,2], xend=coords[even_inds,1], yend=coords[even_inds,2]))
# add the white circles indicating the points of discontinuity
df <- data.frame(c(0, 1, 2, 3), c(0, F_x[1], F_x[2], F_x[3]))
colnames(df) <- c("x", "y")
g <- g + geom_point(data=df, mapping=aes(x=x, y=y), size=4, shape=21, fill="white")
# add the title and axes labels
g <- g +
xlab("X") +
ylab("F(X)") +
ggtitle("Cmulative Distribution of X")
# set where tick marks appear on the axes
g <- g + scale_y_continuous(breaks=c(0, 0.25, 0.375, 0.5, 1))
g <- g + scale_x_discrete(breaks=c(0, 1, 2, 3))
g <- g + coord_cartesian(xlim=c(-1.5,4.5))
# add a theme for (arguably) better looks
require(ggthemes)
# g <- g + theme_gdocs()
# ggsave("./cdf_plot_gdocs.png")
# g <- g + theme_economist() #+ scale_color_economist()
# ggsave("./cdf_plot_econ.png")
#g <- g + theme_wsj()
#ggsave("./cdf_plot_wsj.png", width=3.5, height=3.16, dpi=300)
#ggsave("./cdf_plot_wsj.png")
# g <- g + theme_solarized()
# ggsave("./cdf_plot_solarized.png", dpi=300)
# g <- g + theme_igray()
# ggsave("./cdf_plot_igray.png", dpi=300)
# show the graph
print(g)
ggsave("./cdf_plot.png", dpi=300)
| /week2/q1hw1.R | no_license | david-dobor/8003 | R | false | false | 2,189 | r | ########## Solutions to Homework 2 ##########
#### Problem 4 ####
## @author: David Dobor ##
## Problem 1
x <- 0:3 # given support of r.v X
p_x <- c(0.25, 0.125, 0.125, 0.5) # and given its pmf
F_x <- cumsum(p_x) # compute the cdf of X
#### create a data frame that stores coordinats of the points to plot
coords <- data.frame(x=numeric(), y=numeric())
coords <- rbind(coords, c(-2, 0))
coords <- rbind(coords, c(x[1], 0))
for (i in 2:length(x)-1) {
coords <- rbind(coords, c(x[i], F_x[i]))
coords <- rbind(coords, c(x[i+1], F_x[i]))
}
coords <- rbind(coords, c(x[length(x)], F_x[length(x)]))
coords <- rbind(coords, c(x[length(x)] + 2, F_x[length(x)]))
#### now plot the cdf
g <- ggplot()
odd_inds <- seq(1, nrow(coords), by=2) #odd indecies- start line segments
even_inds <- seq(2, nrow(coords), by=2) #even indecies - end line segments
# add the line segments to the plot
g <- g + geom_segment(data=coords, mapping=aes(x=coords[odd_inds,1], y=coords[odd_inds,2], xend=coords[even_inds,1], yend=coords[even_inds,2]))
# add the white circles indicating the points of discontinuity
df <- data.frame(c(0, 1, 2, 3), c(0, F_x[1], F_x[2], F_x[3]))
colnames(df) <- c("x", "y")
g <- g + geom_point(data=df, mapping=aes(x=x, y=y), size=4, shape=21, fill="white")
# add the title and axes labels
g <- g +
xlab("X") +
ylab("F(X)") +
ggtitle("Cmulative Distribution of X")
# set where tick marks appear on the axes
g <- g + scale_y_continuous(breaks=c(0, 0.25, 0.375, 0.5, 1))
g <- g + scale_x_discrete(breaks=c(0, 1, 2, 3))
g <- g + coord_cartesian(xlim=c(-1.5,4.5))
# add a theme for (arguably) better looks
require(ggthemes)
# g <- g + theme_gdocs()
# ggsave("./cdf_plot_gdocs.png")
# g <- g + theme_economist() #+ scale_color_economist()
# ggsave("./cdf_plot_econ.png")
#g <- g + theme_wsj()
#ggsave("./cdf_plot_wsj.png", width=3.5, height=3.16, dpi=300)
#ggsave("./cdf_plot_wsj.png")
# g <- g + theme_solarized()
# ggsave("./cdf_plot_solarized.png", dpi=300)
# g <- g + theme_igray()
# ggsave("./cdf_plot_igray.png", dpi=300)
# show the graph
print(g)
ggsave("./cdf_plot.png", dpi=300)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/google_map.R
\name{google_map-shiny}
\alias{google_map-shiny}
\alias{google_mapOutput}
\alias{renderGoogle_map}
\title{Shiny bindings for google_map}
\usage{
google_mapOutput(outputId, width = "100\%", height = "400px")
renderGoogle_map(expr, env = parent.frame(), quoted = FALSE)
}
\arguments{
\item{outputId}{output variable to read from}
\item{width, height}{Must be a valid CSS unit (like \code{'100\%'},
\code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
string and have \code{'px'} appended.}
\item{expr}{An expression that generates a google_map}
\item{env}{The environment in which to evaluate \code{expr}.}
\item{quoted}{Is \code{expr} a quoted expression (with \code{quote()})? This
is useful if you want to save an expression in a variable.}
}
\description{
Output and render functions for using google_map within Shiny
applications and interactive Rmd documents.
}
| /man/google_map-shiny.Rd | no_license | fdzul/googleway | R | false | true | 981 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/google_map.R
\name{google_map-shiny}
\alias{google_map-shiny}
\alias{google_mapOutput}
\alias{renderGoogle_map}
\title{Shiny bindings for google_map}
\usage{
google_mapOutput(outputId, width = "100\%", height = "400px")
renderGoogle_map(expr, env = parent.frame(), quoted = FALSE)
}
\arguments{
\item{outputId}{output variable to read from}
\item{width, height}{Must be a valid CSS unit (like \code{'100\%'},
\code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
string and have \code{'px'} appended.}
\item{expr}{An expression that generates a google_map}
\item{env}{The environment in which to evaluate \code{expr}.}
\item{quoted}{Is \code{expr} a quoted expression (with \code{quote()})? This
is useful if you want to save an expression in a variable.}
}
\description{
Output and render functions for using google_map within Shiny
applications and interactive Rmd documents.
}
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(dplyr)
d <- filter(NEI, fips == "24510")
d <- summarise(group_by(d, year), s = sum(Emissions))
png(filename = "plot2.png")
plot(d, type = "l")
dev.off() | /plot2.R | no_license | AaBelov/ExploratoryAnalysisWeek3 | R | false | false | 247 | r | NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(dplyr)
d <- filter(NEI, fips == "24510")
d <- summarise(group_by(d, year), s = sum(Emissions))
png(filename = "plot2.png")
plot(d, type = "l")
dev.off() |
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
library(kwic)
data(dickensl)
kwic(dickensl, "the")
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "the")
print(k, sort.by="right")
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "(is|are|was)", fixed=FALSE)
print(k, sort.by="right")
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "\\b(is|are|was)\\b", fixed=FALSE)
print(k, sort.by="right")
## ------------------------------------------------------------------------
k <- kwic(dickensl, "the")
print(k, sort.by="right", from=3, to=4)
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "the", 5, 5, unit="token")
print(k)
## ------------------------------------------------------------------------
print(k, sort.by="left")
## ------------------------------------------------------------------------
print(k, sort.by=-2)
print(k, sort.by=2)
## ------------------------------------------------------------------------
d <- system.file("plaintexts", package="kwic")
corpus <- VCorpus(
DirSource(directory=d, encoding="UTF-8"),
readerControl = list(reader=readPlain)
)
kwic(corpus, "the")
## ------------------------------------------------------------------------
d <- system.file("taggedtexts", package="kwic")
files <- dir(d, pattern = "*.txt")
## ------------------------------------------------------------------------
corpusl <- lapply(
files,
function(x) read.table(
paste(d, x, sep="/"),
quote="", sep="\t", header = TRUE, fileEncoding="ISO-8859-1", stringsAsFactors = FALSE
)
)
corpus <- do.call("rbind", corpusl)
corpus$doc_id <- rep(files, times=sapply(corpusl, nrow))
kwic(corpus, "Paris", token.column="lemme", left=30, right=30) #, unit="token"
| /vignettes/kwic.R | permissive | sylvainloiseau/kwic | R | false | false | 2,106 | r | ## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ------------------------------------------------------------------------
library(kwic)
data(dickensl)
kwic(dickensl, "the")
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "the")
print(k, sort.by="right")
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "(is|are|was)", fixed=FALSE)
print(k, sort.by="right")
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "\\b(is|are|was)\\b", fixed=FALSE)
print(k, sort.by="right")
## ------------------------------------------------------------------------
k <- kwic(dickensl, "the")
print(k, sort.by="right", from=3, to=4)
## ------------------------------------------------------------------------
data(dickensl)
k <- kwic(dickensl, "the", 5, 5, unit="token")
print(k)
## ------------------------------------------------------------------------
print(k, sort.by="left")
## ------------------------------------------------------------------------
print(k, sort.by=-2)
print(k, sort.by=2)
## ------------------------------------------------------------------------
d <- system.file("plaintexts", package="kwic")
corpus <- VCorpus(
DirSource(directory=d, encoding="UTF-8"),
readerControl = list(reader=readPlain)
)
kwic(corpus, "the")
## ------------------------------------------------------------------------
d <- system.file("taggedtexts", package="kwic")
files <- dir(d, pattern = "*.txt")
## ------------------------------------------------------------------------
corpusl <- lapply(
files,
function(x) read.table(
paste(d, x, sep="/"),
quote="", sep="\t", header = TRUE, fileEncoding="ISO-8859-1", stringsAsFactors = FALSE
)
)
corpus <- do.call("rbind", corpusl)
corpus$doc_id <- rep(files, times=sapply(corpusl, nrow))
kwic(corpus, "Paris", token.column="lemme", left=30, right=30) #, unit="token"
|
#load packages
library(ggplot2)
library(cowplot)
#making data structure work figure 4 eqiuvalent
i_sim <- read.csv(file="match comp i.csv", row.names = 1)
j_sim <- read.csv(file="match comp j.csv", row.names = 1)
generations <- 1:1000
df_i <- data.frame(generations, i_sim$V1)
df_j <- data.frame(generations, j_sim$V1)
plot_i_nm <- ggplot(data=df_i, aes(x=generations, y=i_sim$V1)) +
geom_point() + geom_line() + ylim(-2,2) + xlim(0, 1000) + xlab("Generations") + ylab("Mean Phenotype") + ggtitle("Mean Non-Matching Mutualism Species i")
plot_j_nm <- ggplot(data=df_j, aes(x=generations, y=j_sim$V1)) +
geom_point() + geom_line() + ylim(-2,2) + xlim(0, 1000) + xlab("Generations") + ylab("Mean Phenotype") + ggtitle("Mean Non-Matching Mutualism Species i")
#making data structure work figure 3 eqiuvalent
setwd("~/simulation_output/out_mut_match/")
list.filenames<-list.files(pattern=".rds")
list.data.i<-list()
list.data.j<-list()
#read rds files
for (i in 1:length(list.filenames)){
list.data.i[[i]]<-readRDS(list.filenames[i])
}
for (i in 1:length(list.filenames)){
list.data.j[[i]]<-readRDS(list.filenames[i])
}
#find mean end variance
end_variances_i <- list()
for(i in 1:length(list.data.i)){
var_i <- as.data.frame(list.data.i[[i]]$pop_var_i)
end_var <- mean(var_i[,10], na.rm=TRUE)
end_variances_i[[i]] <- end_var
}
end_variances_j <- list()
for(i in 1:length(list.data.j)){
var_i <- as.data.frame(list.data.j[[i]]$pop_var_j)
end_var <- mean(var_i[,10], na.rm=TRUE)
end_variances_j[[i]] <- end_var
}
end_var_i <- t(as.data.frame(end_variances_i))
end_var_j <- t(as.data.frame(end_variances_j))
var_i_fig_mm <- qplot(end_var_i[,1], geom="histogram", binwidth = 0.02,
xlab = "Final Variance", ylab = "Simulations", xlim = c(-0.02,0.4))
var_j_fig_mm <- qplot(end_var_j[,1], geom="histogram", binwidth = 0.02,
xlab = "Final Variance", ylab = "Simulations", xlim = c(-0.02,0.4))
plot_grid(var_i_fig_mm, var_j_fig_mm)
end_var <- cbind(end_var_i, end_var_j)
write.csv(end_var, file = "nonmatching_mutualism_variance.csv")
| /R/making_figures_nonmatch_mut.R | permissive | kmkaur/coevolver | R | false | false | 2,101 | r | #load packages
library(ggplot2)
library(cowplot)
#making data structure work figure 4 eqiuvalent
i_sim <- read.csv(file="match comp i.csv", row.names = 1)
j_sim <- read.csv(file="match comp j.csv", row.names = 1)
generations <- 1:1000
df_i <- data.frame(generations, i_sim$V1)
df_j <- data.frame(generations, j_sim$V1)
plot_i_nm <- ggplot(data=df_i, aes(x=generations, y=i_sim$V1)) +
geom_point() + geom_line() + ylim(-2,2) + xlim(0, 1000) + xlab("Generations") + ylab("Mean Phenotype") + ggtitle("Mean Non-Matching Mutualism Species i")
plot_j_nm <- ggplot(data=df_j, aes(x=generations, y=j_sim$V1)) +
geom_point() + geom_line() + ylim(-2,2) + xlim(0, 1000) + xlab("Generations") + ylab("Mean Phenotype") + ggtitle("Mean Non-Matching Mutualism Species i")
#making data structure work figure 3 eqiuvalent
setwd("~/simulation_output/out_mut_match/")
list.filenames<-list.files(pattern=".rds")
list.data.i<-list()
list.data.j<-list()
#read rds files
for (i in 1:length(list.filenames)){
list.data.i[[i]]<-readRDS(list.filenames[i])
}
for (i in 1:length(list.filenames)){
list.data.j[[i]]<-readRDS(list.filenames[i])
}
#find mean end variance
end_variances_i <- list()
for(i in 1:length(list.data.i)){
var_i <- as.data.frame(list.data.i[[i]]$pop_var_i)
end_var <- mean(var_i[,10], na.rm=TRUE)
end_variances_i[[i]] <- end_var
}
end_variances_j <- list()
for(i in 1:length(list.data.j)){
var_i <- as.data.frame(list.data.j[[i]]$pop_var_j)
end_var <- mean(var_i[,10], na.rm=TRUE)
end_variances_j[[i]] <- end_var
}
end_var_i <- t(as.data.frame(end_variances_i))
end_var_j <- t(as.data.frame(end_variances_j))
var_i_fig_mm <- qplot(end_var_i[,1], geom="histogram", binwidth = 0.02,
xlab = "Final Variance", ylab = "Simulations", xlim = c(-0.02,0.4))
var_j_fig_mm <- qplot(end_var_j[,1], geom="histogram", binwidth = 0.02,
xlab = "Final Variance", ylab = "Simulations", xlim = c(-0.02,0.4))
plot_grid(var_i_fig_mm, var_j_fig_mm)
end_var <- cbind(end_var_i, end_var_j)
write.csv(end_var, file = "nonmatching_mutualism_variance.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{simone}
\alias{simone}
\title{some comment}
\usage{
simone(G, eta, epsilon, T)
}
\description{
some comment
}
\keyword{internal}
| /LongMemoryTS/man/simone.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | true | 239 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{simone}
\alias{simone}
\title{some comment}
\usage{
simone(G, eta, epsilon, T)
}
\description{
some comment
}
\keyword{internal}
|
# Load in amino acid chain sequences for meso and thermo bacteria and generate hydrophobicity signals
library(tictoc)
tic()
meso_seq <- strsplit(scan("540_Mesophiles.txt", what = "list", sep = " "), split = NULL)
therm_seq <- strsplit(scan("540_Thermophiles.txt", what = "list", sep = " "), split = NULL)
# and now I have two lists of 540 lists, with accessible elements WOOT! Scan reads in the data from the txt files as lists, then stringsplit comes in and seperates each entry into vectors, so that we can access the amino acids in each protein. structure looks like this: list(protein(list(aminoacid)))
######################################################################################################################################
# Here I define some protein names for an index that was going to be used for a much smaller data set. These are no longer relevant.
#meso_protein_name <- c("ACP_2", "ACP", "Acyl", "ADK_2", "ADK", "Chemotaxis", "CheW", "CheY", "ColdShock_3","ColdShock", "ColdShock_2", "FKBP", "HGCS", "HPr", "IF1A", "IF5A", "NusB", "RNaseH", "RNaseP", "RRF", "Sigma", "Thioredoxin", "TIF3", "UNK")
#therm_protein_name <- c("ACP_2", "ACP", "Acyl", "ADK_2", "ADK", "Chemotaxis", "CheW", "CheY", "ColdShock_2", "ColdShock_3", "ColdShock", "FKBP", "HGCS", "Hpr", "IF1A", "IF5A", "NusB", "RNaseH", "RNaseP", "RRF", "Sigma", "Thioredoxin", "TIF3", "UNK")
######################################################################################################################################
# Now let's generate some hydrophobicity scales: this is done by generating a "pseudo"- dictionary in R that mimicks the dictionary in python, by making a list: as well I generate numerical scales and find the mean values
KD <- list( A = 1.8, C = 2.5 , D = -3.5, E = -3.5, F = 2.8, G = -0.40, H = -3.20, I = 4.5, K = -3.9, L = 3.8, M = 1.9, N = -3.5, P = -1.6, Q = -3.5, R = -4.5, S = -0.8, T = -0.7, V = 4.20, W = -0.9, Y = -1.3)
KD_num <- c( 1.8,2.5 , -3.5, -3.5, 2.8, -0.40, -3.20, 4.5, -3.9, 3.8, 1.9, -3.5, -1.6, -3.5, -4.5, -0.8, -0.7, 4.20, -0.9, -1.3)
mean_kd <- mean(KD_num)
HW <- list( A = -0.5, C = -1.0, D = 3.0, E = 3.0, F = -2.5, G = 0.0, H = -0.5, I = -1.8, K = 3.00, L = -1.8, M = -1.3, N = 0.2, P = 0.0, Q = 0.20, R = 3.00, S = 0.3, T = -0.4, V = -1.5, W = -3.40, Y = -2.3)
HW_num <- c( -0.5, -1.0, 3.0, 3.0, -2.5, 0.0, -0.5, -1.8, 3.00, -1.8, -1.3, 0.2, 0.0, 0.20, 3.00, 0.3, -0.4, -1.5, -3.40, -2.3)
mean_hw <- mean(HW_num)
ES <- list(A = 1.6, C = 2.0, D = -9.2, E = -8.2, F = 3.7, G = 1, H = -3.0, I = 3.10, K = -8.80, L = 2.8, M = 3.4, N = -4.8, P = -0.2, Q = -4.1, R = -12.3, S = 0.60, T = 1.20, V = 2.60, W = 1.9, Y = -0.7)
ES_num <- c( 1.6, 2.0, -9.2, -8.2, 3.7, 1, -3.0, 3.10,-8.80, 2.8, 3.4, -4.8, -0.2,-4.1, -12.3, 0.60, 1.20, 2.60, 1.9,-0.7)
mean_ES <- mean(ES_num)
# This first loop generates signals for mesophile bacteria
KD_meso <- matrix(list(), 1, 540)
HW_meso <- matrix(list(), 1, 540)
ES_meso <- matrix(list(), 1, 540)
for (j in 1:length(meso_seq)) {
x <- meso_seq[j]
my_seq <- unlist(x)
num_residues <- length(my_seq)
KD_seq <- numeric(num_residues)
HW_seq <- numeric(num_residues)
ES_seq <- numeric(num_residues)
for (k in 1:num_residues){
KD_seq[k] <- KD[my_seq[k]]
HW_seq[k] <- HW[my_seq[k]]
ES_seq[k] <- ES[my_seq[k]]
}
KD_meso[[1,j]] = unlist(KD_seq)
HW_meso[[1,j]] = unlist(HW_seq)
ES_meso[[1,j]] = unlist(ES_seq)
}
# And now for the thermophiles
KD_therm <- matrix(list(), 1, 540)
HW_therm <- matrix(list(), 1, 540)
ES_therm <- matrix(list(), 1, 540)
for (j in 1:length(therm_seq)) {
x <- therm_seq[j]
my_seq <- unlist(x)
num_residues <- length(my_seq)
KD_seq <- numeric(num_residues)
HW_seq <- numeric(num_residues)
ES_seq <- numeric(num_residues)
for (k in 1:num_residues){
KD_seq[k] <- KD[my_seq[k]]
HW_seq[k] <- HW[my_seq[k]]
ES_seq[k] <- ES[my_seq[k]]
}
KD_therm[[1,j]] = unlist(KD_seq)
HW_therm[[1,j]] = unlist(HW_seq)
ES_therm[[1,j]] = unlist(ES_seq)
}
toc()
# So now we have the following data sets:
# meso_seq, therm_seq - list of proteins with character sequences
# KD_therm/meso, HW_therm/meso, ES_therm/meso, matricies of proteins with vectors of primary chains
unique_meso <- sapply(KD_meso, function(x) length(unique(x)))
unique_therm <- sapply(KD_therm, function(x) length(unique(x)))
unique_meso/unique_therm
# playing with overlapping histograms
#meso <- unlist(KD_meso[1])
#therm <- unlist(KD_therm[1])
#hist(meso, breaks = 17, col = rgb(1,0,0,0.5), main = "overlapping Histogram")
#hist(therm, breaks = 17, col = rgb(0,0,1,0.5), add = T)
#box()
## Mean and Variance -----------------------------------------------------------------------------------------------------------------------------
# Kyte-Doolittle
mean_kd_meso <- sapply(KD_meso,mean)
var_kd_meso <- sapply(KD_meso,var)
mean_kd_therm <- sapply(KD_therm,mean)
var_kd_meso <- sapply(KD_therm,var)
# Hopp Woods
mean_HW_meso <- sapply(HW_meso,mean)
var_HW_meso <- sapply(HW_meso,var)
mean_HW_therm <- sapply(HW_therm,mean)
var_HW_meso <- sapply(HW_therm,var)
#Engleman-Steitz
mean_ES_meso <- sapply(ES_meso,mean)
var_ES_meso <- sapply(ES_meso,var)
mean_ES_therm <- sapply(ES_therm,mean)
var_ES_meso <- sapply(ES_therm,var)
# ratio of the means
ratio_kd_meso <- mean_kd_meso/mean_kd
ratio_kd_therm <- mean_kd_therm / mean_kd
ratio_hw_meso <- mean_HW_meso / mean_hw
ratio_hw_therm <- mean_HW_therm / mean_hw
ratio_ES_meso <- mean_ES_meso / mean_ES
ratio_ES_therm <- mean_ES_therm / mean_ES
plot(ratio_kd_meso,xlab = "indexing",ylab = "ratio of mean value per protien with KD mean",main = "KD mesophile mean/ mean(KD)")
lines(rep(1,540))
dev.new()
plot(ratio_kd_therm,xlab = "indexing", ylab = "ratio of mean value per protien with KD mean", main = "KD thermophile mean / mean(KD)" )
lines(rep(1,540))
dev.new()
plot(ratio_hw_meso, xlab = "indexing", ylab = "ratio of mean value per protien with HW mean", main = "HW mesophile mean/ mean(HW)")
lines(rep(1,540))
dev.new()
plot(ratio_hw_therm, xlab = "indexing",ylab = "ratio of mean value per protien with HW mean",main = "HW thermophile mean/ mean(HW)" )
lines(rep(1,540))
dev.new()
plot(ratio_ES_meso, xlab = "indexing",ylab = "ratio of mean value per protien with ES mean",main = "ES mesohpile mean/ mean(ES)")
lines(rep(1,540))
dev.new()
plot(ratio_ES_therm,xlab = "indexing",ylab = "ratio of mean value per protien with ES mean",main = " ES thermophile mean/ mean(ES)" )
lines(rep(1,540))
## KS Test (two sample)------------------------------------------------------------------------------------------------------------------------
#Kyte-Doolittle
KS_test_KD <- matrix(list(), 2, 540)
for (j in 1:540){
pop = ks.test(unlist(KD_therm[j]), unlist(KD_meso[j]))
KS_test_KD[1,j] = pop$statistic
KS_test_KD[2,j] = pop$p.value
}
p_values <- KS_test_KD[2,]
p_0.5_KD_KS = with(p_values, c(sum(p_values <=0.05))) # p values from kyte-doolittle for KS test - number of stat sig values
# with applies an expression to a dataset. with(data, expression), in our case we go through and check if a value is greater than or less than
# 0.05, this returns true (1) or false(0). Then we sum up all the values and get the total number of p-values that are stat sig.
#Hopp-Woods
KS_test_HW <- matrix(list(), 2, 540)
for (j in 1:540){
pop = ks.test(unlist(HW_therm[j]), unlist(HW_meso[j]))
KS_test_HW[1,j] = pop$statistic
KS_test_HW[2,j] = pop$p.value
}
p_values_HW_KS <- KS_test_HW[2,]
p_0.5_HW_KS = with(p_values_HW_KS, c(sum(p_values_HW_KS <=0.05)))
# Engleman-Steitz
KS_test_ES <- matrix(list(), 2, 540)
for (j in 1:540){
pop = ks.test(unlist(ES_therm[j]), unlist(ES_meso[j]))
KS_test_ES[1,j] = pop$statistic
KS_test_ES[2,j] = pop$p.value
}
p_values_ES_KS <- KS_test_ES[2,]
p_0.5_ES_KS = with(p_values_ES_KS, c(sum(p_values_ES_KS <=0.05)))
#----------------------------------------------------------------------------------------------------------------------------------------------
# Anderson-Darling
# Do normality first, this makes sense
library(nortest)
ad_meso_kd <- sum(sapply(KD_meso, function(x) ad.test(unlist(x))$p.value)>= 0.05)
ad_therm_kd <- sum(sapply(KD_therm, function(x) ad.test(unlist(x))$p.value)>= 0.05)
ad_meso_hw <- sum(sapply(HW_meso, function(x) ad.test(unlist(x))$p.value)>=0.05)
ad_therm_hw <- sum(sapply(HW_therm, function(x) ad.test(unlist(x))$p.value)>=0.05)
ad_meso_es <- sum(sapply(ES_meso, function(x) ad.test(unlist(x))$p.value)>=0.05)
ad_therm_es <- sum(sapply(ES_therm, function(x) ad.test(unlist(x))$p.value)>0.05)
# None of these are normally distributed
#---------------------------------------------------------------------------------------------------------------------------------------------
# Lets generate some Histograms!!!!!
| /Data_IN.R | no_license | JackLinehan/Wavelet-Analysis-of-Hydrophobicity-Signals-in-mesophile-and-thermophile-bacteria | R | false | false | 8,898 | r | # Load in amino acid chain sequences for meso and thermo bacteria and generate hydrophobicity signals
library(tictoc)
tic()
meso_seq <- strsplit(scan("540_Mesophiles.txt", what = "list", sep = " "), split = NULL)
therm_seq <- strsplit(scan("540_Thermophiles.txt", what = "list", sep = " "), split = NULL)
# and now I have two lists of 540 lists, with accessible elements WOOT! Scan reads in the data from the txt files as lists, then stringsplit comes in and seperates each entry into vectors, so that we can access the amino acids in each protein. structure looks like this: list(protein(list(aminoacid)))
######################################################################################################################################
# Here I define some protein names for an index that was going to be used for a much smaller data set. These are no longer relevant.
#meso_protein_name <- c("ACP_2", "ACP", "Acyl", "ADK_2", "ADK", "Chemotaxis", "CheW", "CheY", "ColdShock_3","ColdShock", "ColdShock_2", "FKBP", "HGCS", "HPr", "IF1A", "IF5A", "NusB", "RNaseH", "RNaseP", "RRF", "Sigma", "Thioredoxin", "TIF3", "UNK")
#therm_protein_name <- c("ACP_2", "ACP", "Acyl", "ADK_2", "ADK", "Chemotaxis", "CheW", "CheY", "ColdShock_2", "ColdShock_3", "ColdShock", "FKBP", "HGCS", "Hpr", "IF1A", "IF5A", "NusB", "RNaseH", "RNaseP", "RRF", "Sigma", "Thioredoxin", "TIF3", "UNK")
######################################################################################################################################
# Now let's generate some hydrophobicity scales: this is done by generating a "pseudo"- dictionary in R that mimicks the dictionary in python, by making a list: as well I generate numerical scales and find the mean values
KD <- list( A = 1.8, C = 2.5 , D = -3.5, E = -3.5, F = 2.8, G = -0.40, H = -3.20, I = 4.5, K = -3.9, L = 3.8, M = 1.9, N = -3.5, P = -1.6, Q = -3.5, R = -4.5, S = -0.8, T = -0.7, V = 4.20, W = -0.9, Y = -1.3)
KD_num <- c( 1.8,2.5 , -3.5, -3.5, 2.8, -0.40, -3.20, 4.5, -3.9, 3.8, 1.9, -3.5, -1.6, -3.5, -4.5, -0.8, -0.7, 4.20, -0.9, -1.3)
mean_kd <- mean(KD_num)
HW <- list( A = -0.5, C = -1.0, D = 3.0, E = 3.0, F = -2.5, G = 0.0, H = -0.5, I = -1.8, K = 3.00, L = -1.8, M = -1.3, N = 0.2, P = 0.0, Q = 0.20, R = 3.00, S = 0.3, T = -0.4, V = -1.5, W = -3.40, Y = -2.3)
HW_num <- c( -0.5, -1.0, 3.0, 3.0, -2.5, 0.0, -0.5, -1.8, 3.00, -1.8, -1.3, 0.2, 0.0, 0.20, 3.00, 0.3, -0.4, -1.5, -3.40, -2.3)
mean_hw <- mean(HW_num)
ES <- list(A = 1.6, C = 2.0, D = -9.2, E = -8.2, F = 3.7, G = 1, H = -3.0, I = 3.10, K = -8.80, L = 2.8, M = 3.4, N = -4.8, P = -0.2, Q = -4.1, R = -12.3, S = 0.60, T = 1.20, V = 2.60, W = 1.9, Y = -0.7)
ES_num <- c( 1.6, 2.0, -9.2, -8.2, 3.7, 1, -3.0, 3.10,-8.80, 2.8, 3.4, -4.8, -0.2,-4.1, -12.3, 0.60, 1.20, 2.60, 1.9,-0.7)
mean_ES <- mean(ES_num)
# This first loop generates signals for mesophile bacteria
KD_meso <- matrix(list(), 1, 540)
HW_meso <- matrix(list(), 1, 540)
ES_meso <- matrix(list(), 1, 540)
for (j in 1:length(meso_seq)) {
x <- meso_seq[j]
my_seq <- unlist(x)
num_residues <- length(my_seq)
KD_seq <- numeric(num_residues)
HW_seq <- numeric(num_residues)
ES_seq <- numeric(num_residues)
for (k in 1:num_residues){
KD_seq[k] <- KD[my_seq[k]]
HW_seq[k] <- HW[my_seq[k]]
ES_seq[k] <- ES[my_seq[k]]
}
KD_meso[[1,j]] = unlist(KD_seq)
HW_meso[[1,j]] = unlist(HW_seq)
ES_meso[[1,j]] = unlist(ES_seq)
}
# And now for the thermophiles
KD_therm <- matrix(list(), 1, 540)
HW_therm <- matrix(list(), 1, 540)
ES_therm <- matrix(list(), 1, 540)
for (j in 1:length(therm_seq)) {
x <- therm_seq[j]
my_seq <- unlist(x)
num_residues <- length(my_seq)
KD_seq <- numeric(num_residues)
HW_seq <- numeric(num_residues)
ES_seq <- numeric(num_residues)
for (k in 1:num_residues){
KD_seq[k] <- KD[my_seq[k]]
HW_seq[k] <- HW[my_seq[k]]
ES_seq[k] <- ES[my_seq[k]]
}
KD_therm[[1,j]] = unlist(KD_seq)
HW_therm[[1,j]] = unlist(HW_seq)
ES_therm[[1,j]] = unlist(ES_seq)
}
toc()
# So now we have the following data sets:
# meso_seq, therm_seq - list of proteins with character sequences
# KD_therm/meso, HW_therm/meso, ES_therm/meso, matricies of proteins with vectors of primary chains
unique_meso <- sapply(KD_meso, function(x) length(unique(x)))
unique_therm <- sapply(KD_therm, function(x) length(unique(x)))
unique_meso/unique_therm
# playing with overlapping histograms
#meso <- unlist(KD_meso[1])
#therm <- unlist(KD_therm[1])
#hist(meso, breaks = 17, col = rgb(1,0,0,0.5), main = "overlapping Histogram")
#hist(therm, breaks = 17, col = rgb(0,0,1,0.5), add = T)
#box()
## Mean and Variance -----------------------------------------------------------------------------------------------------------------------------
# Kyte-Doolittle
mean_kd_meso <- sapply(KD_meso,mean)
var_kd_meso <- sapply(KD_meso,var)
mean_kd_therm <- sapply(KD_therm,mean)
var_kd_meso <- sapply(KD_therm,var)
# Hopp Woods
mean_HW_meso <- sapply(HW_meso,mean)
var_HW_meso <- sapply(HW_meso,var)
mean_HW_therm <- sapply(HW_therm,mean)
var_HW_meso <- sapply(HW_therm,var)
#Engleman-Steitz
mean_ES_meso <- sapply(ES_meso,mean)
var_ES_meso <- sapply(ES_meso,var)
mean_ES_therm <- sapply(ES_therm,mean)
var_ES_meso <- sapply(ES_therm,var)
# ratio of the means
ratio_kd_meso <- mean_kd_meso/mean_kd
ratio_kd_therm <- mean_kd_therm / mean_kd
ratio_hw_meso <- mean_HW_meso / mean_hw
ratio_hw_therm <- mean_HW_therm / mean_hw
ratio_ES_meso <- mean_ES_meso / mean_ES
ratio_ES_therm <- mean_ES_therm / mean_ES
plot(ratio_kd_meso,xlab = "indexing",ylab = "ratio of mean value per protien with KD mean",main = "KD mesophile mean/ mean(KD)")
lines(rep(1,540))
dev.new()
plot(ratio_kd_therm,xlab = "indexing", ylab = "ratio of mean value per protien with KD mean", main = "KD thermophile mean / mean(KD)" )
lines(rep(1,540))
dev.new()
plot(ratio_hw_meso, xlab = "indexing", ylab = "ratio of mean value per protien with HW mean", main = "HW mesophile mean/ mean(HW)")
lines(rep(1,540))
dev.new()
plot(ratio_hw_therm, xlab = "indexing",ylab = "ratio of mean value per protien with HW mean",main = "HW thermophile mean/ mean(HW)" )
lines(rep(1,540))
dev.new()
plot(ratio_ES_meso, xlab = "indexing",ylab = "ratio of mean value per protien with ES mean",main = "ES mesohpile mean/ mean(ES)")
lines(rep(1,540))
dev.new()
plot(ratio_ES_therm,xlab = "indexing",ylab = "ratio of mean value per protien with ES mean",main = " ES thermophile mean/ mean(ES)" )
lines(rep(1,540))
## KS Test (two sample)------------------------------------------------------------------------------------------------------------------------
#Kyte-Doolittle
KS_test_KD <- matrix(list(), 2, 540)
for (j in 1:540){
pop = ks.test(unlist(KD_therm[j]), unlist(KD_meso[j]))
KS_test_KD[1,j] = pop$statistic
KS_test_KD[2,j] = pop$p.value
}
p_values <- KS_test_KD[2,]
p_0.5_KD_KS = with(p_values, c(sum(p_values <=0.05))) # p values from kyte-doolittle for KS test - number of stat sig values
# with applies an expression to a dataset. with(data, expression), in our case we go through and check if a value is greater than or less than
# 0.05, this returns true (1) or false(0). Then we sum up all the values and get the total number of p-values that are stat sig.
#Hopp-Woods
KS_test_HW <- matrix(list(), 2, 540)
for (j in 1:540){
pop = ks.test(unlist(HW_therm[j]), unlist(HW_meso[j]))
KS_test_HW[1,j] = pop$statistic
KS_test_HW[2,j] = pop$p.value
}
p_values_HW_KS <- KS_test_HW[2,]
p_0.5_HW_KS = with(p_values_HW_KS, c(sum(p_values_HW_KS <=0.05)))
# Engleman-Steitz
KS_test_ES <- matrix(list(), 2, 540)
for (j in 1:540){
pop = ks.test(unlist(ES_therm[j]), unlist(ES_meso[j]))
KS_test_ES[1,j] = pop$statistic
KS_test_ES[2,j] = pop$p.value
}
p_values_ES_KS <- KS_test_ES[2,]
p_0.5_ES_KS = with(p_values_ES_KS, c(sum(p_values_ES_KS <=0.05)))
#----------------------------------------------------------------------------------------------------------------------------------------------
# Anderson-Darling
# Do normality first, this makes sense
library(nortest)
ad_meso_kd <- sum(sapply(KD_meso, function(x) ad.test(unlist(x))$p.value)>= 0.05)
ad_therm_kd <- sum(sapply(KD_therm, function(x) ad.test(unlist(x))$p.value)>= 0.05)
ad_meso_hw <- sum(sapply(HW_meso, function(x) ad.test(unlist(x))$p.value)>=0.05)
ad_therm_hw <- sum(sapply(HW_therm, function(x) ad.test(unlist(x))$p.value)>=0.05)
ad_meso_es <- sum(sapply(ES_meso, function(x) ad.test(unlist(x))$p.value)>=0.05)
ad_therm_es <- sum(sapply(ES_therm, function(x) ad.test(unlist(x))$p.value)>0.05)
# None of these are normally distributed
#---------------------------------------------------------------------------------------------------------------------------------------------
# Lets generate some Histograms!!!!!
|
#' Global Two-Sample Test for Network-Valued Data
#'
#' This function carries out an hypothesis test where the null hypothesis is
#' that the two populations of networks share the same underlying probabilistic
#' distribution against the alternative hypothesis that the two populations come
#' from different distributions. The test is performed in a non-parametric
#' fashion using a permutational framework in which several statistics can be
#' used, together with several choices of network matrix representations and
#' distances between networks.
#'
#' @param x An \code{\link{nvd}} object listing networks in sample 1.
#' @param y An \code{\link{nvd}} object listing networks in sample 2.
#' @param representation A string specifying the desired type of representation,
#' among: \code{"adjacency"}, \code{"laplacian"} and \code{"modularity"}.
#' Defaults to \code{"adjacency"}.
#' @param distance A string specifying the chosen distance for calculating the
#' test statistic, among: \code{"hamming"}, \code{"frobenius"},
#' \code{"spectral"} and \code{"root-euclidean"}. Defaults to
#' \code{"frobenius"}.
#' @param stats A character vector specifying the chosen test statistic(s),
#' among: `"original_edge_count"`, `"generalized_edge_count"`,
#' `"weighted_edge_count"`, `"student_euclidean"`, `"welch_euclidean"` or any
#' statistics based on inter-point distances available in the **flipr**
#' package: `"flipr:student_ip"`, `"flipr:fisher_ip"`, `"flipr:bg_ip"`,
#' `"flipr:energy_ip"`, `"flipr:cq_ip"`. Defaults to `c("flipr:student_ip",
#' "flipr:fisher_ip")`.
#' @param B The number of permutation or the tolerance. If this number is lower
#' than \code{1}, it is intended as a tolerance. Otherwise, it is intended as
#' the number of required permutations. Defaults to `1000L`.
#' @param test A character string specifying the formula to be used to compute
#' the permutation p-value. Choices are `"estimate"`, `"upper_bound"` and
#' `"exact"`. Defaults to `"exact"` which provides exact tests.
#' @param k An integer specifying the density of the minimum spanning tree used
#' for the edge count statistics. Defaults to `5L`.
#' @param seed An integer for specifying the seed of the random generator for
#' result reproducibility. Defaults to `NULL`.
#'
#' @return A \code{\link[base]{list}} with three components: the value of the
#' statistic for the original two samples, the p-value of the resulting
#' permutation test and a numeric vector storing the values of the permuted
#' statistics.
#' @export
#'
#' @examples
#' n <- 10L
#'
#' # Two different models for the two populations
#' x <- nvd("smallworld", n)
#' y <- nvd("pa", n)
#' t1 <- test2_global(x, y, representation = "modularity")
#' t1$pvalue
#'
#' # Same model for the two populations
#' x <- nvd("smallworld", n)
#' y <- nvd("smallworld", n)
#' t2 <- test2_global(x, y, representation = "modularity")
#' t2$pvalue
test2_global <- function(x, y,
representation = "adjacency",
distance = "frobenius",
stats = c("flipr:t_ip", "flipr:f_ip"),
B = 1000L,
test = "exact",
k = 5L,
seed = NULL) {
withr::local_seed(seed)
n1 <- length(x)
n2 <- length(y)
n <- n1 + n2
representation <- match.arg(
representation,
c("adjacency", "laplacian", "modularity", "transitivity")
)
distance <- match.arg(
distance,
c("hamming", "frobenius", "spectral", "root-euclidean")
)
use_frechet_stats <- any(grepl("student_euclidean", stats)) ||
any(grepl("welch_euclidean", stats))
if (use_frechet_stats &&
(any(grepl("_ip", stats)) ||
any(grepl("edge_count", stats))))
cli::cli_abort("It is not possible to mix statistics based on Frechet means and statistics based on inter-point distances.")
ecp <- NULL
if (use_frechet_stats)
d <- repr_nvd(x, y, representation = representation)
else {
d <- dist_nvd(x, y, representation = representation, distance = distance)
if (any(grepl("edge_count", stats)))
ecp <- edge_count_global_variables(d, n1, k = k)
}
null_spec <- function(y, parameters) {
return(y)
}
stat_functions <- stats %>%
strsplit(split = ":") %>%
purrr::map(~ {
if (length(.x) == 1) {
s <- paste0("stat_", .x)
return(rlang::as_function(s))
}
s <- paste0("stat_", .x[2])
getExportedValue(.x[1], s)
})
stat_assignments <- list(delta = 1:length(stat_functions))
if (inherits(d, "dist")) {
xx <- d
yy <- as.integer(n1)
} else {
xx <- d[1:n1]
yy <- d[(n1 + 1):(n1 + n2)]
}
pf <- flipr::PlausibilityFunction$new(
null_spec = null_spec,
stat_functions = stat_functions,
stat_assignments = stat_assignments,
xx, yy,
seed = seed
)
pf$set_nperms(B)
pf$set_pvalue_formula(test)
pf$set_alternative("right_tail")
pf$get_value(
parameters = 0,
edge_count_prep = ecp,
keep_null_distribution = TRUE
)
}
#' Local Two-Sample Test for Network-Valued Data
#'
#' @inheritParams test2_global
#' @param partition Either a list or an integer vector specifying vertex
#' memberships into partition elements.
#' @param alpha Significance level for hypothesis testing. If set to 1, the
#' function outputs properly adjusted p-values. If lower than 1, then only
#' p-values lower than alpha are properly adjusted. Defaults to `0.05`.
#' @param verbose Boolean specifying whether information on intermediate tests
#' should be printed in the process (default: \code{FALSE}).
#'
#' @return A length-2 list reporting the adjusted p-values of each element of
#' the partition for the intra- and inter-tests.
#' @export
#'
#' @examples
#' n <- 10
#' p1 <- matrix(
#' data = c(0.1, 0.4, 0.1, 0.4,
#' 0.4, 0.4, 0.1, 0.4,
#' 0.1, 0.1, 0.4, 0.4,
#' 0.4, 0.4, 0.4, 0.4),
#' nrow = 4,
#' ncol = 4,
#' byrow = TRUE
#' )
#' p2 <- matrix(
#' data = c(0.1, 0.4, 0.4, 0.4,
#' 0.4, 0.4, 0.4, 0.4,
#' 0.4, 0.4, 0.1, 0.1,
#' 0.4, 0.4, 0.1, 0.4),
#' nrow = 4,
#' ncol = 4,
#' byrow = TRUE
#' )
#' sim <- sample2_sbm(n, 68, p1, c(17, 17, 17, 17), p2, seed = 1234)
#' m <- as.integer(c(rep(1, 17), rep(2, 17), rep(3, 17), rep(4, 17)))
#' test2_local(sim$x, sim$y, m,
#' seed = 1234,
#' alpha = 0.05,
#' B = 100)
test2_local <- function(x, y, partition,
representation = "adjacency",
distance = "frobenius",
stats = c("flipr:t_ip", "flipr:f_ip"),
B = 1000L,
alpha = 0.05,
test = "exact",
k = 5L,
seed = NULL,
verbose = FALSE) {
# Creating sigma-algebra generated by the partition
partition <- as_vertex_partition(partition)
E <- names(partition)
sa <- generate_sigma_algebra(partition)
psize <- length(sa)
# Initialize output for intra-adjusted pvalues
stop_intra <- FALSE
skip_intra <- NULL
p_intra <- utils::combn(E, 1, simplify = FALSE) %>%
purrr::transpose() %>%
purrr::simplify_all() %>%
rlang::set_names("E") %>%
tibble::as_tibble() %>%
dplyr::mutate(pvalue = 0, truncated = FALSE)
# Intialize output for inter-adjusted pvalues
stop_inter <- FALSE
skip_inter <- NULL
p_inter <- utils::combn(E, 2, simplify = FALSE) %>%
purrr::transpose() %>%
purrr::simplify_all() %>%
rlang::set_names(c("E1", "E2")) %>%
tibble::as_tibble() %>%
dplyr::mutate(pvalue = 0, truncated = FALSE)
for (i in 1:psize) {
sas <- sa[[i]]
compositions <- names(sas)
for (j in 1:length(sas)) {
if (stop_intra && stop_inter)
return(list(intra = p_intra, inter = p_inter))
element_name <- compositions[j]
update_intra <- !stop_intra && !(element_name %in% skip_intra)
update_inter <- !stop_inter && i < psize && !(element_name %in% skip_inter)
if (!update_intra && !update_intra)
next()
element_value <- sas[[j]]
individuals <- element_name %>%
strsplit(",") %>%
purrr::simplify()
# Tests on full subgraphs
p <- test2_subgraph(
x, y, element_value,
subgraph_full,
representation, distance, stats, B, test, k, seed
)
if (verbose) {
writeLines("- Type of test: FULL")
writeLines(paste0("Element of the sigma-algebra: ", element_name))
writeLines(paste0("P-value of the test: ", p))
}
# Intra-adjusted p-values from full tests
if (update_intra)
p_intra <- .update_intra_pvalues(p_intra, individuals, p, alpha)
# Inter-adjusted p-values from full tests
if (update_inter)
p_inter <- .update_inter_pvalues(p_inter, individuals, p, alpha)
# Update stopping and skipping conditions
stop_intra <- all(p_intra$truncated)
stop_inter <- all(p_inter$truncated)
if (p >= alpha) {
skip_intra <- .update_skip_list(skip_intra, individuals)
skip_inter <- .update_skip_list(skip_inter, individuals)
}
update_intra <- !stop_intra && !(element_name %in% skip_intra)
if (update_intra) {
# Tests on intra subgraphs
p <- test2_subgraph(
x, y, element_value,
subgraph_intra,
representation, distance, stats, B, test, k, seed
)
if (verbose) {
writeLines("- Type of test: INTRA")
writeLines(paste0("Element of the sigma-algebra: ", element_name))
writeLines(paste0("P-value of the test: ", p))
}
# Intra-adjusted p-values from intra tests
p_intra <- .update_intra_pvalues(p_intra, individuals, p, alpha)
# Update stopping and skipping conditions
stop_intra <- all(p_intra$truncated)
if (p >= alpha)
skip_intra <- .update_skip_list(skip_intra, individuals)
}
update_inter <- !stop_inter && i < psize && !(element_name %in% skip_inter)
if (update_inter) {
# Tests on inter subgraphs
p <- test2_subgraph(
x, y, element_value,
subgraph_inter,
representation, distance, stats, B, test, k, seed
)
if (verbose) {
writeLines("- Type of test: INTER")
writeLines(paste0("Element of the sigma-algebra: ", element_name))
writeLines(paste0("P-value of the test: ", p))
}
# Inter-adjusted p-values from inter tests
p_inter <- .update_inter_pvalues(p_inter, individuals, p, alpha)
# Update stopping and skipping conditions
stop_inter <- all(p_inter$truncated)
if (p >= alpha)
skip_inter <- .update_skip_list(skip_inter, individuals)
}
}
}
list(intra = p_intra, inter = p_inter)
}
.update_intra_pvalues <- function(output, c, p, alpha) {
output %>%
dplyr::mutate(
pvalue = purrr::map2_dbl(.data$E, .data$pvalue, ~ dplyr::if_else(.x %in% c, pmax(.y, p), .y)),
truncated = .data$pvalue >= alpha
)
}
.update_inter_pvalues <- function(output, c, p, alpha) {
output %>%
dplyr::mutate(
pvalue = purrr::pmap_dbl(
list(.data$E1, .data$E2, .data$pvalue),
~ dplyr::if_else(all(c(..1, ..2) %in% c), pmax(..3, p), ..3)
),
truncated = .data$pvalue >= alpha
)
}
.update_skip_list <- function(skip_list, individuals) {
for (k in 1:length(individuals)) {
tmp <- individuals %>%
utils::combn(k, paste0, collapse = ",", simplify = FALSE) %>%
purrr::simplify()
skip_list <- unique(c(skip_list, tmp))
}
skip_list
}
test2_subgraph <- function(x, y, subpartition, fun,
representation, distance, stats, B, test, k, seed) {
x <- x %>%
purrr::map(rlang::as_function(fun), vids = subpartition) %>%
as_nvd()
y <- y %>%
purrr::map(rlang::as_function(fun), vids = subpartition) %>%
as_nvd()
test2_global(
x, y,
representation = representation,
distance = distance,
stats = stats,
B = B,
test = test,
k = k,
seed = seed
)$pvalue
}
| /R/tests.R | no_license | cran/nevada | R | false | false | 12,299 | r | #' Global Two-Sample Test for Network-Valued Data
#'
#' This function carries out an hypothesis test where the null hypothesis is
#' that the two populations of networks share the same underlying probabilistic
#' distribution against the alternative hypothesis that the two populations come
#' from different distributions. The test is performed in a non-parametric
#' fashion using a permutational framework in which several statistics can be
#' used, together with several choices of network matrix representations and
#' distances between networks.
#'
#' @param x An \code{\link{nvd}} object listing networks in sample 1.
#' @param y An \code{\link{nvd}} object listing networks in sample 2.
#' @param representation A string specifying the desired type of representation,
#' among: \code{"adjacency"}, \code{"laplacian"} and \code{"modularity"}.
#' Defaults to \code{"adjacency"}.
#' @param distance A string specifying the chosen distance for calculating the
#' test statistic, among: \code{"hamming"}, \code{"frobenius"},
#' \code{"spectral"} and \code{"root-euclidean"}. Defaults to
#' \code{"frobenius"}.
#' @param stats A character vector specifying the chosen test statistic(s),
#' among: `"original_edge_count"`, `"generalized_edge_count"`,
#' `"weighted_edge_count"`, `"student_euclidean"`, `"welch_euclidean"` or any
#' statistics based on inter-point distances available in the **flipr**
#' package: `"flipr:student_ip"`, `"flipr:fisher_ip"`, `"flipr:bg_ip"`,
#' `"flipr:energy_ip"`, `"flipr:cq_ip"`. Defaults to `c("flipr:student_ip",
#' "flipr:fisher_ip")`.
#' @param B The number of permutation or the tolerance. If this number is lower
#' than \code{1}, it is intended as a tolerance. Otherwise, it is intended as
#' the number of required permutations. Defaults to `1000L`.
#' @param test A character string specifying the formula to be used to compute
#' the permutation p-value. Choices are `"estimate"`, `"upper_bound"` and
#' `"exact"`. Defaults to `"exact"` which provides exact tests.
#' @param k An integer specifying the density of the minimum spanning tree used
#' for the edge count statistics. Defaults to `5L`.
#' @param seed An integer for specifying the seed of the random generator for
#' result reproducibility. Defaults to `NULL`.
#'
#' @return A \code{\link[base]{list}} with three components: the value of the
#' statistic for the original two samples, the p-value of the resulting
#' permutation test and a numeric vector storing the values of the permuted
#' statistics.
#' @export
#'
#' @examples
#' n <- 10L
#'
#' # Two different models for the two populations
#' x <- nvd("smallworld", n)
#' y <- nvd("pa", n)
#' t1 <- test2_global(x, y, representation = "modularity")
#' t1$pvalue
#'
#' # Same model for the two populations
#' x <- nvd("smallworld", n)
#' y <- nvd("smallworld", n)
#' t2 <- test2_global(x, y, representation = "modularity")
#' t2$pvalue
test2_global <- function(x, y,
representation = "adjacency",
distance = "frobenius",
stats = c("flipr:t_ip", "flipr:f_ip"),
B = 1000L,
test = "exact",
k = 5L,
seed = NULL) {
withr::local_seed(seed)
n1 <- length(x)
n2 <- length(y)
n <- n1 + n2
representation <- match.arg(
representation,
c("adjacency", "laplacian", "modularity", "transitivity")
)
distance <- match.arg(
distance,
c("hamming", "frobenius", "spectral", "root-euclidean")
)
use_frechet_stats <- any(grepl("student_euclidean", stats)) ||
any(grepl("welch_euclidean", stats))
if (use_frechet_stats &&
(any(grepl("_ip", stats)) ||
any(grepl("edge_count", stats))))
cli::cli_abort("It is not possible to mix statistics based on Frechet means and statistics based on inter-point distances.")
ecp <- NULL
if (use_frechet_stats)
d <- repr_nvd(x, y, representation = representation)
else {
d <- dist_nvd(x, y, representation = representation, distance = distance)
if (any(grepl("edge_count", stats)))
ecp <- edge_count_global_variables(d, n1, k = k)
}
null_spec <- function(y, parameters) {
return(y)
}
stat_functions <- stats %>%
strsplit(split = ":") %>%
purrr::map(~ {
if (length(.x) == 1) {
s <- paste0("stat_", .x)
return(rlang::as_function(s))
}
s <- paste0("stat_", .x[2])
getExportedValue(.x[1], s)
})
stat_assignments <- list(delta = 1:length(stat_functions))
if (inherits(d, "dist")) {
xx <- d
yy <- as.integer(n1)
} else {
xx <- d[1:n1]
yy <- d[(n1 + 1):(n1 + n2)]
}
pf <- flipr::PlausibilityFunction$new(
null_spec = null_spec,
stat_functions = stat_functions,
stat_assignments = stat_assignments,
xx, yy,
seed = seed
)
pf$set_nperms(B)
pf$set_pvalue_formula(test)
pf$set_alternative("right_tail")
pf$get_value(
parameters = 0,
edge_count_prep = ecp,
keep_null_distribution = TRUE
)
}
#' Local Two-Sample Test for Network-Valued Data
#'
#' @inheritParams test2_global
#' @param partition Either a list or an integer vector specifying vertex
#' memberships into partition elements.
#' @param alpha Significance level for hypothesis testing. If set to 1, the
#' function outputs properly adjusted p-values. If lower than 1, then only
#' p-values lower than alpha are properly adjusted. Defaults to `0.05`.
#' @param verbose Boolean specifying whether information on intermediate tests
#' should be printed in the process (default: \code{FALSE}).
#'
#' @return A length-2 list reporting the adjusted p-values of each element of
#' the partition for the intra- and inter-tests.
#' @export
#'
#' @examples
#' n <- 10
#' p1 <- matrix(
#' data = c(0.1, 0.4, 0.1, 0.4,
#' 0.4, 0.4, 0.1, 0.4,
#' 0.1, 0.1, 0.4, 0.4,
#' 0.4, 0.4, 0.4, 0.4),
#' nrow = 4,
#' ncol = 4,
#' byrow = TRUE
#' )
#' p2 <- matrix(
#' data = c(0.1, 0.4, 0.4, 0.4,
#' 0.4, 0.4, 0.4, 0.4,
#' 0.4, 0.4, 0.1, 0.1,
#' 0.4, 0.4, 0.1, 0.4),
#' nrow = 4,
#' ncol = 4,
#' byrow = TRUE
#' )
#' sim <- sample2_sbm(n, 68, p1, c(17, 17, 17, 17), p2, seed = 1234)
#' m <- as.integer(c(rep(1, 17), rep(2, 17), rep(3, 17), rep(4, 17)))
#' test2_local(sim$x, sim$y, m,
#' seed = 1234,
#' alpha = 0.05,
#' B = 100)
test2_local <- function(x, y, partition,
representation = "adjacency",
distance = "frobenius",
stats = c("flipr:t_ip", "flipr:f_ip"),
B = 1000L,
alpha = 0.05,
test = "exact",
k = 5L,
seed = NULL,
verbose = FALSE) {
# Creating sigma-algebra generated by the partition
partition <- as_vertex_partition(partition)
E <- names(partition)
sa <- generate_sigma_algebra(partition)
psize <- length(sa)
# Initialize output for intra-adjusted pvalues
stop_intra <- FALSE
skip_intra <- NULL
p_intra <- utils::combn(E, 1, simplify = FALSE) %>%
purrr::transpose() %>%
purrr::simplify_all() %>%
rlang::set_names("E") %>%
tibble::as_tibble() %>%
dplyr::mutate(pvalue = 0, truncated = FALSE)
# Intialize output for inter-adjusted pvalues
stop_inter <- FALSE
skip_inter <- NULL
p_inter <- utils::combn(E, 2, simplify = FALSE) %>%
purrr::transpose() %>%
purrr::simplify_all() %>%
rlang::set_names(c("E1", "E2")) %>%
tibble::as_tibble() %>%
dplyr::mutate(pvalue = 0, truncated = FALSE)
for (i in 1:psize) {
sas <- sa[[i]]
compositions <- names(sas)
for (j in 1:length(sas)) {
if (stop_intra && stop_inter)
return(list(intra = p_intra, inter = p_inter))
element_name <- compositions[j]
update_intra <- !stop_intra && !(element_name %in% skip_intra)
update_inter <- !stop_inter && i < psize && !(element_name %in% skip_inter)
if (!update_intra && !update_intra)
next()
element_value <- sas[[j]]
individuals <- element_name %>%
strsplit(",") %>%
purrr::simplify()
# Tests on full subgraphs
p <- test2_subgraph(
x, y, element_value,
subgraph_full,
representation, distance, stats, B, test, k, seed
)
if (verbose) {
writeLines("- Type of test: FULL")
writeLines(paste0("Element of the sigma-algebra: ", element_name))
writeLines(paste0("P-value of the test: ", p))
}
# Intra-adjusted p-values from full tests
if (update_intra)
p_intra <- .update_intra_pvalues(p_intra, individuals, p, alpha)
# Inter-adjusted p-values from full tests
if (update_inter)
p_inter <- .update_inter_pvalues(p_inter, individuals, p, alpha)
# Update stopping and skipping conditions
stop_intra <- all(p_intra$truncated)
stop_inter <- all(p_inter$truncated)
if (p >= alpha) {
skip_intra <- .update_skip_list(skip_intra, individuals)
skip_inter <- .update_skip_list(skip_inter, individuals)
}
update_intra <- !stop_intra && !(element_name %in% skip_intra)
if (update_intra) {
# Tests on intra subgraphs
p <- test2_subgraph(
x, y, element_value,
subgraph_intra,
representation, distance, stats, B, test, k, seed
)
if (verbose) {
writeLines("- Type of test: INTRA")
writeLines(paste0("Element of the sigma-algebra: ", element_name))
writeLines(paste0("P-value of the test: ", p))
}
# Intra-adjusted p-values from intra tests
p_intra <- .update_intra_pvalues(p_intra, individuals, p, alpha)
# Update stopping and skipping conditions
stop_intra <- all(p_intra$truncated)
if (p >= alpha)
skip_intra <- .update_skip_list(skip_intra, individuals)
}
update_inter <- !stop_inter && i < psize && !(element_name %in% skip_inter)
if (update_inter) {
# Tests on inter subgraphs
p <- test2_subgraph(
x, y, element_value,
subgraph_inter,
representation, distance, stats, B, test, k, seed
)
if (verbose) {
writeLines("- Type of test: INTER")
writeLines(paste0("Element of the sigma-algebra: ", element_name))
writeLines(paste0("P-value of the test: ", p))
}
# Inter-adjusted p-values from inter tests
p_inter <- .update_inter_pvalues(p_inter, individuals, p, alpha)
# Update stopping and skipping conditions
stop_inter <- all(p_inter$truncated)
if (p >= alpha)
skip_inter <- .update_skip_list(skip_inter, individuals)
}
}
}
list(intra = p_intra, inter = p_inter)
}
.update_intra_pvalues <- function(output, c, p, alpha) {
output %>%
dplyr::mutate(
pvalue = purrr::map2_dbl(.data$E, .data$pvalue, ~ dplyr::if_else(.x %in% c, pmax(.y, p), .y)),
truncated = .data$pvalue >= alpha
)
}
.update_inter_pvalues <- function(output, c, p, alpha) {
output %>%
dplyr::mutate(
pvalue = purrr::pmap_dbl(
list(.data$E1, .data$E2, .data$pvalue),
~ dplyr::if_else(all(c(..1, ..2) %in% c), pmax(..3, p), ..3)
),
truncated = .data$pvalue >= alpha
)
}
.update_skip_list <- function(skip_list, individuals) {
for (k in 1:length(individuals)) {
tmp <- individuals %>%
utils::combn(k, paste0, collapse = ",", simplify = FALSE) %>%
purrr::simplify()
skip_list <- unique(c(skip_list, tmp))
}
skip_list
}
test2_subgraph <- function(x, y, subpartition, fun,
representation, distance, stats, B, test, k, seed) {
x <- x %>%
purrr::map(rlang::as_function(fun), vids = subpartition) %>%
as_nvd()
y <- y %>%
purrr::map(rlang::as_function(fun), vids = subpartition) %>%
as_nvd()
test2_global(
x, y,
representation = representation,
distance = distance,
stats = stats,
B = B,
test = test,
k = k,
seed = seed
)$pvalue
}
|
### get data in a right form for analysis
source(file='readAll.R')
## d is the data without intermezzo trials
## task 2 is the short task, task 4 is the long task
## TODO: quickly write the experimental design
## TODO: use rmarkdown! so we can ease up communication (also after publication)
#### look at data
head(d)
unique(d$index[d$group==1])
unique(d$index[d$group==2])
unique(d$index[d$group==3])
## group: 1: Depr, 2: Suic, 3: HC
groupSize <- c(15,12,22)
groupLabel <- c('Depressed','Suicidal','HC')
d$label <- factor(d$group)
levels(d$label) <- list('HC'=3, 'Depressed'=1,'Suicidal'=2)
## give all participants a unique id
d$index2 <- NA
d$index2[d$group==1] <- d$index[d$group==1]+30
d$index2[d$group==2] <- d$index[d$group==2]+50
d$index2[d$group==3] <- d$index[d$group==3]
d$id <- factor(d$index2)
levels(d$id) <- seq(1,49)
| /preprocess.R | no_license | woutervoorspoels/ShortLongtask-R-code | R | false | false | 838 | r | ### get data in a right form for analysis
source(file='readAll.R')
## d is the data without intermezzo trials
## task 2 is the short task, task 4 is the long task
## TODO: quickly write the experimental design
## TODO: use rmarkdown! so we can ease up communication (also after publication)
#### look at data
head(d)
unique(d$index[d$group==1])
unique(d$index[d$group==2])
unique(d$index[d$group==3])
## group: 1: Depr, 2: Suic, 3: HC
groupSize <- c(15,12,22)
groupLabel <- c('Depressed','Suicidal','HC')
d$label <- factor(d$group)
levels(d$label) <- list('HC'=3, 'Depressed'=1,'Suicidal'=2)
## give all participants a unique id
d$index2 <- NA
d$index2[d$group==1] <- d$index[d$group==1]+30
d$index2[d$group==2] <- d$index[d$group==2]+50
d$index2[d$group==3] <- d$index[d$group==3]
d$id <- factor(d$index2)
levels(d$id) <- seq(1,49)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QFactorGet-package.r
\docType{package}
\name{QFactorGet}
\alias{QFactorGet}
\alias{QFactorGet-package}
\title{QFactorGet}
\description{
QFactorGet
}
| /man/QFactorGet.Rd | no_license | raphael210/QFactorGet | R | false | true | 227 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QFactorGet-package.r
\docType{package}
\name{QFactorGet}
\alias{QFactorGet}
\alias{QFactorGet-package}
\title{QFactorGet}
\description{
QFactorGet
}
|
## Download and unzip the data file.
## IMPORTANT: Just uncomment the next 2 lines to download and unzip the file.
## To save time, it's commented so I don't have to repeat downloading and
## extracting the file every time this script is executed. If the two lines
## are not commente, just ignore this. :)
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "project.zip", method = "curl")
#unzip("project.zip")
## Load the dplyr package
library(plyr)
## Read the data
Xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
Ytest <- read.table("./UCI HAR Dataset/test/y_test.txt")
Xtrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
Ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
features <- read.table("./UCI HAR Dataset/features.txt")
XtestSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
XtrainSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
## ----------------------------------------------------------------------
## 1. Combine the training and the test sets to create one data set
X <- rbind(Xtest, Xtrain)
Y <- rbind(Ytest, Ytrain)
## ----------------------------------------------------------------------
## 2. Extract only the measurements on the mean and std deviation
## First, let's add the descriptive column names so we can filter it.
## This also partially solves Problem #4.
colnames(X) <- features$V2
## Select only columns with 'std' and 'mean(' using regular expression
Xsub <- X[,grep('(mean\\(|std)', names(X), value = T)]
## ----------------------------------------------------------------------
## 3. Use descriptive activity names to name the activities in data set
activities <- join(Y, activities)
Xsub <- cbind(Xsub, activities)
## ----------------------------------------------------------------------
## 4. Appropriately label the data set with descriptive variable names
## This is partially solved when we added the variable names from the
## 'features' data frame in the Problem #2 solution.
colnames(Xsub)[67] <- "ActivityCode"
colnames(Xsub)[68] <- "ActivityName"
## ----------------------------------------------------------------------
## 5. From the data set in step 4, create a second, independent tidy data
## set with the average of each variable for each activity and each
## subject
## Combine the two subject data frames
Subject <- rbind(XtestSubject, XtrainSubject)
## Add subject column to our data set
Xsub <- cbind(Xsub, Subject)
colnames(Xsub)[69] <- "Subject"
## Compute for mean for each unique subject-activity combination
tidy <- aggregate(. ~ Subject + ActivityName, data = Xsub, mean)
tidy <- arrange(tidy, Subject, ActivityName)
## Write the tidied data to file
write.table(tidy, "tidy.txt", row.names = FALSE)
| /run_analysis.R | no_license | evision/getdataProject | R | false | false | 2,893 | r | ## Download and unzip the data file.
## IMPORTANT: Just uncomment the next 2 lines to download and unzip the file.
## To save time, it's commented so I don't have to repeat downloading and
## extracting the file every time this script is executed. If the two lines
## are not commente, just ignore this. :)
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", destfile = "project.zip", method = "curl")
#unzip("project.zip")
## Load the dplyr package
library(plyr)
## Read the data
Xtest <- read.table("./UCI HAR Dataset/test/X_test.txt")
Ytest <- read.table("./UCI HAR Dataset/test/y_test.txt")
Xtrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
Ytrain <- read.table("./UCI HAR Dataset/train/y_train.txt")
features <- read.table("./UCI HAR Dataset/features.txt")
XtestSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
XtrainSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
## ----------------------------------------------------------------------
## 1. Combine the training and the test sets to create one data set
X <- rbind(Xtest, Xtrain)
Y <- rbind(Ytest, Ytrain)
## ----------------------------------------------------------------------
## 2. Extract only the measurements on the mean and std deviation
## First, let's add the descriptive column names so we can filter it.
## This also partially solves Problem #4.
colnames(X) <- features$V2
## Select only columns with 'std' and 'mean(' using regular expression
Xsub <- X[,grep('(mean\\(|std)', names(X), value = T)]
## ----------------------------------------------------------------------
## 3. Use descriptive activity names to name the activities in data set
activities <- join(Y, activities)
Xsub <- cbind(Xsub, activities)
## ----------------------------------------------------------------------
## 4. Appropriately label the data set with descriptive variable names
## This is partially solved when we added the variable names from the
## 'features' data frame in the Problem #2 solution.
colnames(Xsub)[67] <- "ActivityCode"
colnames(Xsub)[68] <- "ActivityName"
## ----------------------------------------------------------------------
## 5. From the data set in step 4, create a second, independent tidy data
## set with the average of each variable for each activity and each
## subject
## Combine the two subject data frames
Subject <- rbind(XtestSubject, XtrainSubject)
## Add subject column to our data set
Xsub <- cbind(Xsub, Subject)
colnames(Xsub)[69] <- "Subject"
## Compute for mean for each unique subject-activity combination
tidy <- aggregate(. ~ Subject + ActivityName, data = Xsub, mean)
tidy <- arrange(tidy, Subject, ActivityName)
## Write the tidied data to file
write.table(tidy, "tidy.txt", row.names = FALSE)
|
################################################################################
# Joshua C. Fjelstul, Ph.D.
# eutr R package
################################################################################
# define pipe function
`%>%` <- magrittr::`%>%`
# load data
load("data/notifications.RData")
load("data/comments.RData")
load("data/opinions.Rdata")
##################################################
# template
##################################################
# template
template_ts <- expand.grid(1988:2020, stringsAsFactors = FALSE)
names(template_ts) <- c("year")
##################################################
# notifications
##################################################
# collapse by member state and by year
notifications_ts <- notifications %>%
dplyr::group_by(start_year) %>%
dplyr::summarize(
count_notifications = dplyr::n()
) %>%
dplyr::ungroup()
# rename variable
notifications_ts <- dplyr::rename(notifications_ts, year = start_year)
# merge
notifications_ts <- dplyr::left_join(template_ts, notifications_ts, by = "year")
# convert to a tibble
notifications_ts <- dplyr::as_tibble(notifications_ts)
# code zeros
notifications_ts$count_notifications[is.na(notifications_ts$count_notifications)] <- 0
# key ID
notifications_ts$key_id <- 1:nrow(notifications_ts)
# select variables
notifications_ts <- dplyr::select(
notifications_ts,
key_id, year, count_notifications
)
# save
save(notifications_ts, file = "data/notifications_ts.RData")
##################################################
# comments
##################################################
# collapse by member state and by year
comments_ts <- comments %>%
dplyr::group_by(start_year) %>%
dplyr::summarize(
count_comments = dplyr::n()
) %>%
dplyr::ungroup()
# rename variable
comments_ts <- dplyr::rename(comments_ts, year = start_year)
# merge
comments_ts <- dplyr::left_join(template_ts, comments_ts, by = "year")
# convert to a tibble
comments_ts <- dplyr::as_tibble(comments_ts)
# code zeros
comments_ts$count_comments[is.na(comments_ts$count_comments)] <- 0
# key ID
comments_ts$key_id <- 1:nrow(comments_ts)
# select variables
comments_ts <- dplyr::select(
comments_ts,
key_id, year, count_comments
)
# save
save(comments_ts, file = "data/comments_ts.RData")
##################################################
# opinions
##################################################
# collapse by member state and by year
opinions_ts <- opinions %>%
dplyr::group_by(start_year) %>%
dplyr::summarize(
count_opinions = dplyr::n()
) %>%
dplyr::ungroup()
# rename variable
opinions_ts <- dplyr::rename(opinions_ts, year = start_year)
# merge
opinions_ts <- dplyr::left_join(template_ts, opinions_ts, by = "year")
# convert to a tibble
opinions_ts <- dplyr::as_tibble(opinions_ts)
# code zeros
opinions_ts$count_opinions[is.na(opinions_ts$count_opinions)] <- 0
# key ID
opinions_ts$key_id <- 1:nrow(opinions_ts)
# select variables
opinions_ts <- dplyr::select(
opinions_ts,
key_id, year, count_opinions
)
# save
save(opinions_ts, file = "data/opinions_ts.RData")
################################################################################
# end R script
################################################################################
| /data-raw/code/07_ts_data.R | no_license | jfjelstul/eutr | R | false | false | 3,302 | r | ################################################################################
# Joshua C. Fjelstul, Ph.D.
# eutr R package
################################################################################
# define pipe function
`%>%` <- magrittr::`%>%`
# load data
load("data/notifications.RData")
load("data/comments.RData")
load("data/opinions.Rdata")
##################################################
# template
##################################################
# template
template_ts <- expand.grid(1988:2020, stringsAsFactors = FALSE)
names(template_ts) <- c("year")
##################################################
# notifications
##################################################
# collapse by member state and by year
notifications_ts <- notifications %>%
dplyr::group_by(start_year) %>%
dplyr::summarize(
count_notifications = dplyr::n()
) %>%
dplyr::ungroup()
# rename variable
notifications_ts <- dplyr::rename(notifications_ts, year = start_year)
# merge
notifications_ts <- dplyr::left_join(template_ts, notifications_ts, by = "year")
# convert to a tibble
notifications_ts <- dplyr::as_tibble(notifications_ts)
# code zeros
notifications_ts$count_notifications[is.na(notifications_ts$count_notifications)] <- 0
# key ID
notifications_ts$key_id <- 1:nrow(notifications_ts)
# select variables
notifications_ts <- dplyr::select(
notifications_ts,
key_id, year, count_notifications
)
# save
save(notifications_ts, file = "data/notifications_ts.RData")
##################################################
# comments
##################################################
# collapse by member state and by year
comments_ts <- comments %>%
dplyr::group_by(start_year) %>%
dplyr::summarize(
count_comments = dplyr::n()
) %>%
dplyr::ungroup()
# rename variable
comments_ts <- dplyr::rename(comments_ts, year = start_year)
# merge
comments_ts <- dplyr::left_join(template_ts, comments_ts, by = "year")
# convert to a tibble
comments_ts <- dplyr::as_tibble(comments_ts)
# code zeros
comments_ts$count_comments[is.na(comments_ts$count_comments)] <- 0
# key ID
comments_ts$key_id <- 1:nrow(comments_ts)
# select variables
comments_ts <- dplyr::select(
comments_ts,
key_id, year, count_comments
)
# save
save(comments_ts, file = "data/comments_ts.RData")
##################################################
# opinions
##################################################
# collapse by member state and by year
opinions_ts <- opinions %>%
dplyr::group_by(start_year) %>%
dplyr::summarize(
count_opinions = dplyr::n()
) %>%
dplyr::ungroup()
# rename variable
opinions_ts <- dplyr::rename(opinions_ts, year = start_year)
# merge
opinions_ts <- dplyr::left_join(template_ts, opinions_ts, by = "year")
# convert to a tibble
opinions_ts <- dplyr::as_tibble(opinions_ts)
# code zeros
opinions_ts$count_opinions[is.na(opinions_ts$count_opinions)] <- 0
# key ID
opinions_ts$key_id <- 1:nrow(opinions_ts)
# select variables
opinions_ts <- dplyr::select(
opinions_ts,
key_id, year, count_opinions
)
# save
save(opinions_ts, file = "data/opinions_ts.RData")
################################################################################
# end R script
################################################################################
|
# work.dir = "~/Google_Drive/MyPackages/WaSPU/data"
# genename = "ANKRD34A"
# GWAS.plink = "wgas_maf5";
# Weight.db = "TW_WholeBlood_ElasticNet.0.5.db"
# method = "perm"; model = "binomial"
# B = 1e3; pow = c(1:8, Inf)
y = readRDS("binary_phenotype.rds")# convert to 1,0 coding
WaSPU(work.dir = "~/Google_Drive/MyPackages/WaSPU/data",
# genename= "ANKRD34A",
genename= "APOE",
GWAS.plink = "wgas_maf5",
y=y,
Weight.db = "TW_WholeBlood_ElasticNet.0.5.db",
method = "perm", model = "binomial",
B = 1e3, pow = c(1:8, Inf))
| /R/archieve/test.R | no_license | jasonzyx/WaSPU | R | false | false | 563 | r | # work.dir = "~/Google_Drive/MyPackages/WaSPU/data"
# genename = "ANKRD34A"
# GWAS.plink = "wgas_maf5";
# Weight.db = "TW_WholeBlood_ElasticNet.0.5.db"
# method = "perm"; model = "binomial"
# B = 1e3; pow = c(1:8, Inf)
y = readRDS("binary_phenotype.rds")# convert to 1,0 coding
WaSPU(work.dir = "~/Google_Drive/MyPackages/WaSPU/data",
# genename= "ANKRD34A",
genename= "APOE",
GWAS.plink = "wgas_maf5",
y=y,
Weight.db = "TW_WholeBlood_ElasticNet.0.5.db",
method = "perm", model = "binomial",
B = 1e3, pow = c(1:8, Inf))
|
#' Create the observation_ancillary table
#'
#' @param L0_flat (tbl_df, tbl, data.frame) The fully joined source L0 dataset, in "flat" format (see details).
#' @param observation_id (character) Column in \code{L0_flat} containing the identifier assigned to each unique observation.
#' @param variable_name (character) Columns in \code{L0_flat} containing the ancillary observation data.
#' @param unit (character) An optional column in \code{L0_flat} containing the units of each \code{variable_name} following the column naming convention: unit_<variable_name> (e.g. "unit_temperature").
#'
#' @details This function collects specified columns from \code{L0_flat}, converts into long (attribute-value) form by gathering \code{variable_name}. Regular expression matching joins \code{unit} to any associated \code{variable_name} and is listed in the resulting table's "unit" column.
#'
#' "flat" format refers to the fully joined source L0 dataset in "wide" form with the exception of the core observation variables, which are in "long" form (i.e. using the variable_name, value, unit columns of the observation table). This "flat" format is the "widest" an L1 ecocomDP dataset can be consistently spread due to the frequent occurrence of L0 source datasets with > 1 core observation variable.
#'
#' @return (tbl_df, tbl, data.frame) The observation_ancillary table.
#'
#' @export
#'
#' @examples
#' flat <- ants_L0_flat
#'
#' observation_ancillary <- create_observation_ancillary(
#' L0_flat = flat,
#' observation_id = "observation_id",
#' variable_name = c("trap.type", "trap.num", "moose.cage"))
#'
#' observation_ancillary
#'
create_observation_ancillary <- function(L0_flat,
observation_id,
variable_name,
unit = NULL) {
validate_arguments(fun.name = "create_observation_ancillary", fun.args = as.list(environment()))
# gather cols
cols_to_gather <- c(observation_id, variable_name)
res <- L0_flat %>%
dplyr::select(all_of(cols_to_gather)) %>%
dplyr::mutate(across(variable_name, as.character)) %>% # ancillary table variable_name needs character coercion
tidyr::pivot_longer(variable_name, names_to = "variable_name", values_to = "value") %>%
dplyr::arrange(observation_id)
# add units
res <- add_units(L0_flat, res, unit)
# keep only distinct values
res <- dplyr::distinct(res)
# add primary key
res$observation_ancillary_id <- seq(nrow(res))
# reorder
res <- res %>%
dplyr::select(observation_ancillary_id, observation_id, variable_name, value, unit)
# coerce classes
res <- coerce_table_classes(res, "observation_ancillary", class(L0_flat))
return(res)
} | /R/create_observation_ancillary.R | permissive | sokole/ecocomDP | R | false | false | 2,758 | r | #' Create the observation_ancillary table
#'
#' @param L0_flat (tbl_df, tbl, data.frame) The fully joined source L0 dataset, in "flat" format (see details).
#' @param observation_id (character) Column in \code{L0_flat} containing the identifier assigned to each unique observation.
#' @param variable_name (character) Columns in \code{L0_flat} containing the ancillary observation data.
#' @param unit (character) An optional column in \code{L0_flat} containing the units of each \code{variable_name} following the column naming convention: unit_<variable_name> (e.g. "unit_temperature").
#'
#' @details This function collects specified columns from \code{L0_flat}, converts into long (attribute-value) form by gathering \code{variable_name}. Regular expression matching joins \code{unit} to any associated \code{variable_name} and is listed in the resulting table's "unit" column.
#'
#' "flat" format refers to the fully joined source L0 dataset in "wide" form with the exception of the core observation variables, which are in "long" form (i.e. using the variable_name, value, unit columns of the observation table). This "flat" format is the "widest" an L1 ecocomDP dataset can be consistently spread due to the frequent occurrence of L0 source datasets with > 1 core observation variable.
#'
#' @return (tbl_df, tbl, data.frame) The observation_ancillary table.
#'
#' @export
#'
#' @examples
#' flat <- ants_L0_flat
#'
#' observation_ancillary <- create_observation_ancillary(
#' L0_flat = flat,
#' observation_id = "observation_id",
#' variable_name = c("trap.type", "trap.num", "moose.cage"))
#'
#' observation_ancillary
#'
create_observation_ancillary <- function(L0_flat,
observation_id,
variable_name,
unit = NULL) {
validate_arguments(fun.name = "create_observation_ancillary", fun.args = as.list(environment()))
# gather cols
cols_to_gather <- c(observation_id, variable_name)
res <- L0_flat %>%
dplyr::select(all_of(cols_to_gather)) %>%
dplyr::mutate(across(variable_name, as.character)) %>% # ancillary table variable_name needs character coercion
tidyr::pivot_longer(variable_name, names_to = "variable_name", values_to = "value") %>%
dplyr::arrange(observation_id)
# add units
res <- add_units(L0_flat, res, unit)
# keep only distinct values
res <- dplyr::distinct(res)
# add primary key
res$observation_ancillary_id <- seq(nrow(res))
# reorder
res <- res %>%
dplyr::select(observation_ancillary_id, observation_id, variable_name, value, unit)
# coerce classes
res <- coerce_table_classes(res, "observation_ancillary", class(L0_flat))
return(res)
} |
#' @title Mark Positional data - monocentrics
#' @description When several OTUs, some can be monocen. and others holocen.
#' Marks distance for
#' monocen. are measured from cen. and for
#' holocen. from top or bottom depending on \code{param} \code{origin}. See
#' vignettes.
#'
#' @docType data
#' @name markposDFs
NULL
#' @description bigdfOfMarks: Example data for mark position with column OTU
#'
#' @format bigdfOfMarks a data.frame with columns:
#' \describe{
#' \item{OTU}{OTU, species, mandatory if in dfChrSize}
#' \item{chrName}{name of chromosome}
#' \item{markName}{name of mark}
#' \item{chrRegion}{use p for short arm, q for long arm, and cen for
#' centromeric}
#' \item{markDistCen}{distance of mark to centromere (not for cen)}
#' \item{markSize}{size of mark (not for cen)}
#' }
#' @seealso \code{\link{markdataholo}}
#' @seealso \code{\link{plotIdiograms}}
#' @seealso \code{\link{chrbasicdatamono}}
#' @seealso \code{\link{dfMarkColor}}
#'
#' @rdname markposDFs
"bigdfOfMarks"
#' @description dfOfMarks: Example data for marks' position
#' @rdname markposDFs
"dfOfMarks"
#' @description dfOfMarks2: Marks' position including cen. marks
#' @rdname markposDFs
"dfOfMarks2"
#' @description humMarkPos: human karyotype bands' (marks) positions, measured
#' from Adler (1994)
#' @source
#' \href{http://www.pathology.washington.edu/research/cytopages/idiograms/human/}{Washington U}
#' @references Adler 1994. Idiogram Album. URL:
#' \href{http://www.pathology.washington.edu/research/cytopages/idiograms/human/}{Washington U.}
#' @rdname markposDFs
"humMarkPos"
#' @description allMarksSample: Example data for marks' position
#' @rdname markposDFs
"allMarksSample"
#' @description dfAlloParentMarks: Example data for mark position of GISH of
#' monocen.
#' @rdname markposDFs
"dfAlloParentMarks"
#' @description traspaMarks: T. spathacea (Rhoeo) marks' positions, from
#' Golczyk et al. (2005)
#' @references Golczyk H, Hasterok R, Joachimiak AJ (2005) FISH-aimed
#' karyotyping and
#' characterization of Renner complexes in permanent heterozygote Rhoeo
#' spathacea. Genome
#' 48:145-153.
#' @rdname markposDFs
"traspaMarks"
| /R/markposDFs.R | no_license | cran/idiogramFISH | R | false | false | 2,157 | r | #' @title Mark Positional data - monocentrics
#' @description When several OTUs, some can be monocen. and others holocen.
#' Marks distance for
#' monocen. are measured from cen. and for
#' holocen. from top or bottom depending on \code{param} \code{origin}. See
#' vignettes.
#'
#' @docType data
#' @name markposDFs
NULL
#' @description bigdfOfMarks: Example data for mark position with column OTU
#'
#' @format bigdfOfMarks a data.frame with columns:
#' \describe{
#' \item{OTU}{OTU, species, mandatory if in dfChrSize}
#' \item{chrName}{name of chromosome}
#' \item{markName}{name of mark}
#' \item{chrRegion}{use p for short arm, q for long arm, and cen for
#' centromeric}
#' \item{markDistCen}{distance of mark to centromere (not for cen)}
#' \item{markSize}{size of mark (not for cen)}
#' }
#' @seealso \code{\link{markdataholo}}
#' @seealso \code{\link{plotIdiograms}}
#' @seealso \code{\link{chrbasicdatamono}}
#' @seealso \code{\link{dfMarkColor}}
#'
#' @rdname markposDFs
"bigdfOfMarks"
#' @description dfOfMarks: Example data for marks' position
#' @rdname markposDFs
"dfOfMarks"
#' @description dfOfMarks2: Marks' position including cen. marks
#' @rdname markposDFs
"dfOfMarks2"
#' @description humMarkPos: human karyotype bands' (marks) positions, measured
#' from Adler (1994)
#' @source
#' \href{http://www.pathology.washington.edu/research/cytopages/idiograms/human/}{Washington U}
#' @references Adler 1994. Idiogram Album. URL:
#' \href{http://www.pathology.washington.edu/research/cytopages/idiograms/human/}{Washington U.}
#' @rdname markposDFs
"humMarkPos"
#' @description allMarksSample: Example data for marks' position
#' @rdname markposDFs
"allMarksSample"
#' @description dfAlloParentMarks: Example data for mark position of GISH of
#' monocen.
#' @rdname markposDFs
"dfAlloParentMarks"
#' @description traspaMarks: T. spathacea (Rhoeo) marks' positions, from
#' Golczyk et al. (2005)
#' @references Golczyk H, Hasterok R, Joachimiak AJ (2005) FISH-aimed
#' karyotyping and
#' characterization of Renner complexes in permanent heterozygote Rhoeo
#' spathacea. Genome
#' 48:145-153.
#' @rdname markposDFs
"traspaMarks"
|
library(FinCal)
library(plotly)
| /Global.R | no_license | tanvird3/MDS | R | false | false | 34 | r | library(FinCal)
library(plotly)
|
# lec16_2_cnn.r
# Convolutional Neural Network
# Require mxnet package
# install.packages("https://github.com/jeremiedb/mxnet_winbin/raw/master/mxnet.zip",repos = NULL)
library(mxnet)
# If you have Error message "no package called XML or DiagrmmeR", then install
#install.packages("XML")
#install.packages("DiagrammeR")
#library(XML)
#library(DiagrammeR)
# set working directory
setwd("D:/tempstore/moocr/wk16")
# Load MNIST mn1
# 28*28, 1 channel images
mn1 <- read.csv("mini_mnist.csv")
set.seed(123,sample.kind="Rounding")
N<-nrow(mn1)
tr.idx<-sample(1:N, size=N*2/3, replace=FALSE)
# split train data and test data
train_data<-data.matrix(mn1[tr.idx,])
test_data<-data.matrix(mn1[-tr.idx,])
test<-t(test_data[,-1]/255)
features<-t(train_data[,-1]/255)
labels<-train_data[,1]
# data preprocession
features_array <- features
dim(features_array) <- c(28,28,1,ncol(features))
test_array <- test
dim(test_array) <- c(28,28,1,ncol(test))
ncol(features)
table(labels)
# Build cnn model
# first conv layers
my_input = mx.symbol.Variable('data')
conv1 = mx.symbol.Convolution(data=my_input, kernel=c(4,4), stride=c(2,2), pad=c(1,1), num.filter = 20, name='conv1')
relu1 = mx.symbol.Activation(data=conv1, act.type='relu', name='relu1')
mp1 = mx.symbol.Pooling(data=relu1, kernel=c(2,2), stride=c(2,2), pool.type='max', name='pool1')
# second conv layers
conv2 = mx.symbol.Convolution(data=mp1, kernel=c(3,3), stride=c(2,2), pad=c(1,1), num.filter = 40, name='conv2')
relu2 = mx.symbol.Activation(data=conv2, act.type='relu', name='relu2')
mp2 = mx.symbol.Pooling(data=relu2, kernel=c(2,2), stride=c(2,2), pool.type='max', name='pool2')
# fully connected
fc1 = mx.symbol.FullyConnected(data=mp2, num.hidden = 1000, name='fc1')
relu3 = mx.symbol.Activation(data=fc1, act.type='relu', name='relu3')
fc2 = mx.symbol.FullyConnected(data=relu3, num.hidden = 3, name='fc2')
# softmax
sm = mx.symbol.SoftmaxOutput(data=fc2, name='sm')
# training
mx.set.seed(100,sample.kind="Rounding")
device <- mx.cpu()
model <- mx.model.FeedForward.create(symbol=sm,
optimizer = "sgd",
array.batch.size=30,
num.round = 70, learning.rate=0.1,
X=features_array, y=labels, ctx=device,
eval.metric = mx.metric.accuracy,
epoch.end.callback=mx.callback.log.train.metric(100))
graph.viz(model$symbol)
# test
predict_probs <- predict(model, test_array)
predicted_labels <- max.col(t(predict_probs)) - 1
table(test_data[, 1], predicted_labels)
sum(diag(table(test_data[, 1], predicted_labels)))/length(predicted_labels)
| /postech/머신러닝기법과 R프로그래밍 Ⅱ/data/week16_2/lec16_2_cnn.R | no_license | ne-choi/study | R | false | false | 2,721 | r | # lec16_2_cnn.r
# Convolutional Neural Network
# Require mxnet package
# install.packages("https://github.com/jeremiedb/mxnet_winbin/raw/master/mxnet.zip",repos = NULL)
library(mxnet)
# If you have Error message "no package called XML or DiagrmmeR", then install
#install.packages("XML")
#install.packages("DiagrammeR")
#library(XML)
#library(DiagrammeR)
# set working directory
setwd("D:/tempstore/moocr/wk16")
# Load MNIST mn1
# 28*28, 1 channel images
mn1 <- read.csv("mini_mnist.csv")
set.seed(123,sample.kind="Rounding")
N<-nrow(mn1)
tr.idx<-sample(1:N, size=N*2/3, replace=FALSE)
# split train data and test data
train_data<-data.matrix(mn1[tr.idx,])
test_data<-data.matrix(mn1[-tr.idx,])
test<-t(test_data[,-1]/255)
features<-t(train_data[,-1]/255)
labels<-train_data[,1]
# data preprocession
features_array <- features
dim(features_array) <- c(28,28,1,ncol(features))
test_array <- test
dim(test_array) <- c(28,28,1,ncol(test))
ncol(features)
table(labels)
# Build cnn model
# first conv layers
my_input = mx.symbol.Variable('data')
conv1 = mx.symbol.Convolution(data=my_input, kernel=c(4,4), stride=c(2,2), pad=c(1,1), num.filter = 20, name='conv1')
relu1 = mx.symbol.Activation(data=conv1, act.type='relu', name='relu1')
mp1 = mx.symbol.Pooling(data=relu1, kernel=c(2,2), stride=c(2,2), pool.type='max', name='pool1')
# second conv layers
conv2 = mx.symbol.Convolution(data=mp1, kernel=c(3,3), stride=c(2,2), pad=c(1,1), num.filter = 40, name='conv2')
relu2 = mx.symbol.Activation(data=conv2, act.type='relu', name='relu2')
mp2 = mx.symbol.Pooling(data=relu2, kernel=c(2,2), stride=c(2,2), pool.type='max', name='pool2')
# fully connected
fc1 = mx.symbol.FullyConnected(data=mp2, num.hidden = 1000, name='fc1')
relu3 = mx.symbol.Activation(data=fc1, act.type='relu', name='relu3')
fc2 = mx.symbol.FullyConnected(data=relu3, num.hidden = 3, name='fc2')
# softmax
sm = mx.symbol.SoftmaxOutput(data=fc2, name='sm')
# training
mx.set.seed(100,sample.kind="Rounding")
device <- mx.cpu()
model <- mx.model.FeedForward.create(symbol=sm,
optimizer = "sgd",
array.batch.size=30,
num.round = 70, learning.rate=0.1,
X=features_array, y=labels, ctx=device,
eval.metric = mx.metric.accuracy,
epoch.end.callback=mx.callback.log.train.metric(100))
graph.viz(model$symbol)
# test
predict_probs <- predict(model, test_array)
predicted_labels <- max.col(t(predict_probs)) - 1
table(test_data[, 1], predicted_labels)
sum(diag(table(test_data[, 1], predicted_labels)))/length(predicted_labels)
|
#This is a minimal version of seurat preparing
#This snippet reads cells from the file Deduplicated.csv and creates _ipmc, the Seurat object. Initial cell types are found in _types variable
#new cell types _newtypes are LETTERS A-H, W, I, M due to the preferrable single letter cell tags by Seurat visualizers.
# Remember that the ipmc@ident stores the results of clustering, so FindCluster destroys it
require(Seurat)
require(methods)
plotDir <- file.path(getwd(), "Plot")
resDir <- file.path(getwd(), "Res")
Genes <- read.table( file = paste0("Res", .Platform$file.sep, "NormalizedExTable.csv"), sep = "\t", stringsAsFactors = FALSE, check.names=FALSE )
Cells <- read.table( file = paste0("Res", .Platform$file.sep, "cellDescripitonsDedupQC.csv"), sep = "\t", stringsAsFactors = FALSE, check.names=FALSE)
Probes <- read.table( file = paste0("Res", .Platform$file.sep, "ProbesDescripitonsDedup.csv"), sep = "\t", stringsAsFactors = FALSE, check.names=FALSE)
rownames(Genes)[which(rownames(Genes)=="Kanamycin Pos")] <- "Kanamycin_Pos"
#reorder cells
cells_ind <- order(as.numeric(Cells["hpf",])) # order with hpf increasing
Genes_nh <- Genes_nh[, cells_ind]
Cells <- Cells[, cells_ind]
#rename cell types, prepare the annotated cell table
types <- unique(paste0(Cells["hpf",], "_", Cells["CellType",]))
hpf_CellType <- t(data.frame(hpf_CellType = paste0(Cells["hpf",], "_", Cells["CellType",]), row.names = colnames(Cells)))
Cells <- rbind(Cells, hpf_CellType)
newTypes <- c("18", "21", "24", "Tl", "30", "W2", "m6", "36", "48", "I", "M", "60", "72")
names(newTypes) <- types
allGenes <- rownames(Genes)
allGenes_nh <- rownames(Genes_nh)
logExps <- log10(1+Genes)
logExps_nh <- log10(1+Genes_nh)
ipmc <- CreateSeuratObject( raw.data = logExps )
ipmc_nh <- CreateSeuratObject( raw.data = logExps_nh)
ipmc30 <- CreateSeuratObject( raw.data = logExps30)
ipmc30_nh <- CreateSeuratObject( raw.data = logExps30_nh)
ipmc <- AddMetaData( object = ipmc, t(Cells), col.name = rownames(Cells) )
ipmc_nh <- AddMetaData( object = ipmc_nh, t(Cells), col.name = rownames(Cells) )
ipmc30 <- AddMetaData( object = ipmc30, t(Cells30), col.name = rownames(Cells) )
ipmc30_nh <- AddMetaData( object = ipmc30_nh, t(Cells30), col.name = rownames(Cells) )
newTypeDF <- data.frame( newType = character(ncol(Cells)), row.names = colnames(Cells) )
newTypeDF30 <- data.frame( newType = character(ncol(Cells30)), row.names = colnames(Cells30) )
cellNamesDF <- data.frame( cellNames = colnames(Cells), row.names = colnames(Cells))
ipmc <- AddMetaData( object = ipmc, newTypeDF, col.name = "newType")
ipmc <- AddMetaData( object = ipmc, cellNamesDF, col.name = "cellNames")
ipmc_nh <- AddMetaData( object = ipmc_nh, newTypeDF, col.name = "newType")
ipmc_nh <- AddMetaData( object = ipmc_nh, cellNamesDF, col.name = "cellNames")
ipmc30 <- AddMetaData( object = ipmc30, newTypeDF30, col.name = "newType")
ipmc30_nh <- AddMetaData( object = ipmc30_nh, newTypeDF30, col.name = "newType")
levels(ipmc@ident) <- newTypes
levels(ipmc30@ident) <- newTypes[types30]
levels(ipmc_nh@ident) <- newTypes
levels(ipmc30_nh@ident) <- newTypes[types30]
ipmc@ident <- as.factor(unlist( lapply( ipmc@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
ipmc_nh@ident <- as.factor(unlist( lapply( ipmc_nh@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
names(ipmc@ident) <- names(ipmc_nh@ident) <- colnames(ipmc@data)
ipmc30@ident <- as.factor(unlist( lapply( ipmc30@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
ipmc30_nh@ident <- as.factor(unlist( lapply( ipmc30_nh@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
names(ipmc30@ident) <- names(ipmc30_nh@ident) <- colnames(ipmc30@data)
ipmc@meta.data$newType <- ipmc@ident
ipmc_nh@meta.data$newType <- ipmc_nh@ident
ipmc30@meta.data$newType <- ipmc30@ident
ipmc30_nh@meta.data$newType <- ipmc30_nh@ident
ipmc <- ScaleData(ipmc)
ipmc_nh <- ScaleData(ipmc_nh)
ipmc30 <- ScaleData(ipmc30)
ipmc30_nh <- ScaleData(ipmc30_nh)
cellColors <- paste0("gray", seq(50+3*length(levels(ipmc_nh@ident)), 50, -3))
cellColors[ which(levels(ipmc_nh@ident) == "I")] = "green3"
cellColors[ which(levels(ipmc_nh@ident) == "M")] = "black"
cellColors[ which(levels(ipmc_nh@ident) == "m6")] = "red"
cellColors[ which(levels(ipmc_nh@ident) == "W2")] = "magenta"
cellColors[ which(levels(ipmc_nh@ident) == "Tl")] = "brown"
ipmc_nh <- RunTSNE(ipmc_nh, genes.use = rownames(ipmc_nh@data))
TSNEPlot(ipmc_nh, colors.use = cellColors)
mutants_keep <- which(ipmc_nh@ident %in% c("m6","W2"))
| /seurat2.r | no_license | SevaVigg/DrNCC | R | false | false | 4,654 | r | #This is a minimal version of seurat preparing
#This snippet reads cells from the file Deduplicated.csv and creates _ipmc, the Seurat object. Initial cell types are found in _types variable
#new cell types _newtypes are LETTERS A-H, W, I, M due to the preferrable single letter cell tags by Seurat visualizers.
# Remember that the ipmc@ident stores the results of clustering, so FindCluster destroys it
require(Seurat)
require(methods)
plotDir <- file.path(getwd(), "Plot")
resDir <- file.path(getwd(), "Res")
Genes <- read.table( file = paste0("Res", .Platform$file.sep, "NormalizedExTable.csv"), sep = "\t", stringsAsFactors = FALSE, check.names=FALSE )
Cells <- read.table( file = paste0("Res", .Platform$file.sep, "cellDescripitonsDedupQC.csv"), sep = "\t", stringsAsFactors = FALSE, check.names=FALSE)
Probes <- read.table( file = paste0("Res", .Platform$file.sep, "ProbesDescripitonsDedup.csv"), sep = "\t", stringsAsFactors = FALSE, check.names=FALSE)
rownames(Genes)[which(rownames(Genes)=="Kanamycin Pos")] <- "Kanamycin_Pos"
#reorder cells
cells_ind <- order(as.numeric(Cells["hpf",])) # order with hpf increasing
Genes_nh <- Genes_nh[, cells_ind]
Cells <- Cells[, cells_ind]
#rename cell types, prepare the annotated cell table
types <- unique(paste0(Cells["hpf",], "_", Cells["CellType",]))
hpf_CellType <- t(data.frame(hpf_CellType = paste0(Cells["hpf",], "_", Cells["CellType",]), row.names = colnames(Cells)))
Cells <- rbind(Cells, hpf_CellType)
newTypes <- c("18", "21", "24", "Tl", "30", "W2", "m6", "36", "48", "I", "M", "60", "72")
names(newTypes) <- types
allGenes <- rownames(Genes)
allGenes_nh <- rownames(Genes_nh)
logExps <- log10(1+Genes)
logExps_nh <- log10(1+Genes_nh)
ipmc <- CreateSeuratObject( raw.data = logExps )
ipmc_nh <- CreateSeuratObject( raw.data = logExps_nh)
ipmc30 <- CreateSeuratObject( raw.data = logExps30)
ipmc30_nh <- CreateSeuratObject( raw.data = logExps30_nh)
ipmc <- AddMetaData( object = ipmc, t(Cells), col.name = rownames(Cells) )
ipmc_nh <- AddMetaData( object = ipmc_nh, t(Cells), col.name = rownames(Cells) )
ipmc30 <- AddMetaData( object = ipmc30, t(Cells30), col.name = rownames(Cells) )
ipmc30_nh <- AddMetaData( object = ipmc30_nh, t(Cells30), col.name = rownames(Cells) )
newTypeDF <- data.frame( newType = character(ncol(Cells)), row.names = colnames(Cells) )
newTypeDF30 <- data.frame( newType = character(ncol(Cells30)), row.names = colnames(Cells30) )
cellNamesDF <- data.frame( cellNames = colnames(Cells), row.names = colnames(Cells))
ipmc <- AddMetaData( object = ipmc, newTypeDF, col.name = "newType")
ipmc <- AddMetaData( object = ipmc, cellNamesDF, col.name = "cellNames")
ipmc_nh <- AddMetaData( object = ipmc_nh, newTypeDF, col.name = "newType")
ipmc_nh <- AddMetaData( object = ipmc_nh, cellNamesDF, col.name = "cellNames")
ipmc30 <- AddMetaData( object = ipmc30, newTypeDF30, col.name = "newType")
ipmc30_nh <- AddMetaData( object = ipmc30_nh, newTypeDF30, col.name = "newType")
levels(ipmc@ident) <- newTypes
levels(ipmc30@ident) <- newTypes[types30]
levels(ipmc_nh@ident) <- newTypes
levels(ipmc30_nh@ident) <- newTypes[types30]
ipmc@ident <- as.factor(unlist( lapply( ipmc@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
ipmc_nh@ident <- as.factor(unlist( lapply( ipmc_nh@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
names(ipmc@ident) <- names(ipmc_nh@ident) <- colnames(ipmc@data)
ipmc30@ident <- as.factor(unlist( lapply( ipmc30@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
ipmc30_nh@ident <- as.factor(unlist( lapply( ipmc30_nh@meta.data[ , "hpf_CellType"], function(cell) newTypes[as.character(cell)]) ))
names(ipmc30@ident) <- names(ipmc30_nh@ident) <- colnames(ipmc30@data)
ipmc@meta.data$newType <- ipmc@ident
ipmc_nh@meta.data$newType <- ipmc_nh@ident
ipmc30@meta.data$newType <- ipmc30@ident
ipmc30_nh@meta.data$newType <- ipmc30_nh@ident
ipmc <- ScaleData(ipmc)
ipmc_nh <- ScaleData(ipmc_nh)
ipmc30 <- ScaleData(ipmc30)
ipmc30_nh <- ScaleData(ipmc30_nh)
cellColors <- paste0("gray", seq(50+3*length(levels(ipmc_nh@ident)), 50, -3))
cellColors[ which(levels(ipmc_nh@ident) == "I")] = "green3"
cellColors[ which(levels(ipmc_nh@ident) == "M")] = "black"
cellColors[ which(levels(ipmc_nh@ident) == "m6")] = "red"
cellColors[ which(levels(ipmc_nh@ident) == "W2")] = "magenta"
cellColors[ which(levels(ipmc_nh@ident) == "Tl")] = "brown"
ipmc_nh <- RunTSNE(ipmc_nh, genes.use = rownames(ipmc_nh@data))
TSNEPlot(ipmc_nh, colors.use = cellColors)
mutants_keep <- which(ipmc_nh@ident %in% c("m6","W2"))
|
function (file)
{
e <- get("data.env", .GlobalEnv)
e[["file_coding"]][[length(e[["file_coding"]]) + 1]] <- list(file = file)
.Call("_jiebaR_file_coding", file)
}
| /valgrind_test_dir/file_coding-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 175 | r | function (file)
{
e <- get("data.env", .GlobalEnv)
e[["file_coding"]][[length(e[["file_coding"]]) + 1]] <- list(file = file)
.Call("_jiebaR_file_coding", file)
}
|
# 3. Optional Plot
fx <- function(x, plot)
{
i <- 1
y <- rep(0, length(x))
while(i <= length(x))
{
if(x[i] <= -4 | x[i] >= 4)
{
y[i] <- "NA"
i = i + 1
}else if(x[i] < 0)
{
y[i] <- x[i]^2 + 2*x[i] + 3
i = i + 1
}else if(x[i] < 2)
{
y[i] <- x[i] + 3
i = i + 1
}else if(x[i] >= 2)
{
y[i] <- x[i]^2 + 4*x[i] - 7
i = i + 1
}
}
if(plot == TRUE)
{
plot(x, y)
}
return(y)
}
fx(-4:4, TRUE) | /problem 3.R | no_license | yokielove/881new | R | false | false | 488 | r | # 3. Optional Plot
fx <- function(x, plot)
{
i <- 1
y <- rep(0, length(x))
while(i <= length(x))
{
if(x[i] <= -4 | x[i] >= 4)
{
y[i] <- "NA"
i = i + 1
}else if(x[i] < 0)
{
y[i] <- x[i]^2 + 2*x[i] + 3
i = i + 1
}else if(x[i] < 2)
{
y[i] <- x[i] + 3
i = i + 1
}else if(x[i] >= 2)
{
y[i] <- x[i]^2 + 4*x[i] - 7
i = i + 1
}
}
if(plot == TRUE)
{
plot(x, y)
}
return(y)
}
fx(-4:4, TRUE) |
# Vectors, Matrices And Arrays
# ----------------------------
# Chapter Goals
# ~~~~~~~~~~~~~
# Vectors
# ~~~~~~~
8.5:4.5 #sequence of numbers from 8.5 down to 4.5
c(1, 1:3, c(5, 8), 13) #values concatenated into single vector
vector("numeric", 5)
vector("complex", 5)
vector("logical", 5)
vector("character", 5)
vector("list", 5)
numeric(5)
complex(5)
logical(5)
character(5)
# Sequences
# ^^^^^^^^^
seq.int(3, 12) #same as 3:12
seq.int(3, 12, 2)
seq.int(0.1, 0.01, -0.01)
n <- 0
1:n #not what you might expect!
seq_len(n)
pp <- c("Peter", "Piper", "picked", "a", "peck", "of", "pickled", "peppers")
for(i in seq_along(pp)) print(pp[i])
# Lengths
# ^^^^^^^
length(1:5)
length(c(TRUE, FALSE, NA))
sn <- c("Sheena", "leads", "Sheila", "needs")
length(sn)
nchar(sn)
poincare <- c(1, 0, 0, 0, 2, 0, 2, 0) #See http://oeis.org/A051629
length(poincare) <- 3
poincare
length(poincare) <- 8
poincare
# Names
# ^^^^^
c(apple = 1, banana = 2, "kiwi fruit" = 3, 4)
x <- 1:4
names(x) <- c("apple", "bananas", "kiwi fruit", "")
x
names(x)
names(1:4)
# Indexing Vectors
# ^^^^^^^^^^^^^^^^
(x <- (1:5) ^ 2)
x[c(1, 3, 5)]
x[c(-2, -4)]
x[c(TRUE, FALSE, TRUE, FALSE, TRUE)]
names(x) <- c("one", "four", "nine", "sixteen", "twenty five")
x[c("one", "nine", "twenty five")]
x[c(1, -1)] #This doesn't make sense!
x[c(1, NA, 5)]
x[c(TRUE, FALSE, NA, FALSE, TRUE)]
x[c(-2, NA)] #This doesn't make sense either!
x[6]
x[1.9] #1.9 rounded to 1
x[-1.9] #-1.9 rounded to -1
x[]
which(x > 10)
which.min(x)
which.max(x)
# Vector Recycling and Repetition
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1:5 + 1
1 + 1:5
1:5 + 1:15
1:5 + 1:7
rep(1:5, 3)
rep(1:5, each = 3)
rep(1:5, times = 1:5)
rep(1:5, length.out = 7)
rep.int(1:5, 3) #the same as rep(1:5, 3)
rep_len(1:5, 13)
# Matrices and Arrays
# ~~~~~~~~~~~~~~~~~~~
# Creating Arrays and Matrices
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(three_d_array <- array(
1:24,
dim = c(4, 3, 2),
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei"),
c("un", "deux")
)
))
class(three_d_array)
(a_matrix <- matrix(
1:12,
nrow = 4, #ncol = 3 works the same
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei")
)
))
class(a_matrix)
(two_d_array <- array(
1:12,
dim = c(4, 3),
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei")
)
))
identical(two_d_array, a_matrix)
class(two_d_array)
matrix(
1:12,
nrow = 4,
byrow = TRUE,
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei")
)
)
# Rows, Columns and Dimensions
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
dim(three_d_array)
dim(a_matrix)
nrow(a_matrix)
ncol(a_matrix)
nrow(three_d_array)
ncol(three_d_array)
length(three_d_array)
length(a_matrix)
dim(a_matrix) <- c(6, 2)
a_matrix
identical(nrow(a_matrix), NROW(a_matrix))
identical(ncol(a_matrix), NCOL(a_matrix))
recaman <- c(0, 1, 3, 6, 2, 7, 13, 20) #See http://oeis.org/A005132
nrow(x)
NROW(x)
ncol(x)
NCOL(x)
dim(x) #There is no DIM(X)
# Row, Column and Dimension Names
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
rownames(a_matrix)
colnames(a_matrix)
dimnames(a_matrix)
rownames(three_d_array)
colnames(three_d_array)
dimnames(three_d_array)
# Indexing Arrays
# ^^^^^^^^^^^^^^^
a_matrix[1, c("zwei", "drei")] #elements in 1st row, 2nd and 3rd columns
a_matrix[1, ] #all the first row
a_matrix[, c("zwei", "drei")] #all the second and third columns
# Combining Matrices
# ^^^^^^^^^^^^^^^^^^
(another_matrix <- matrix(
seq.int(2, 24, 2),
nrow = 4,
dimnames = list(
c("five", "six", "seven", "eight"),
c("vier", "funf", "sechs")
)
))
c(a_matrix, another_matrix)
cbind(a_matrix, another_matrix)
rbind(a_matrix, another_matrix)
# Array Arithmetic
# ^^^^^^^^^^^^^^^^
a_matrix + another_matrix
a_matrix * another_matrix
(another_matrix <- matrix(1:12, nrow = 2))
a_matrix + another_matrix #adding non-conformable matrices throws an error
t(a_matrix)
a_matrix %*% t(a_matrix) #inner multiplication
1:3 %o% 4:6 #outer multiplication
outer(1:3, 4:6) #same
(m <- matrix(c(1, 0, 1, 5, -3, 1, 2, 4, 7), nrow = 3))
m ^ -1
(inverse_of_m <- solve(m))
m %*% inverse_of_m
# Summary
# ~~~~~~~
# Test Your Knowledge: Quiz
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# Test Your Knowledge: Exercises
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| /XRPINPClassFiles/code_samples_rproj/Cotton_Learning_R_Chapter4_Vecmatarr_Code_Samples.r | no_license | CostelloTechnicalConsulting/XRPINPClassFiles | R | false | false | 4,582 | r | # Vectors, Matrices And Arrays
# ----------------------------
# Chapter Goals
# ~~~~~~~~~~~~~
# Vectors
# ~~~~~~~
8.5:4.5 #sequence of numbers from 8.5 down to 4.5
c(1, 1:3, c(5, 8), 13) #values concatenated into single vector
vector("numeric", 5)
vector("complex", 5)
vector("logical", 5)
vector("character", 5)
vector("list", 5)
numeric(5)
complex(5)
logical(5)
character(5)
# Sequences
# ^^^^^^^^^
seq.int(3, 12) #same as 3:12
seq.int(3, 12, 2)
seq.int(0.1, 0.01, -0.01)
n <- 0
1:n #not what you might expect!
seq_len(n)
pp <- c("Peter", "Piper", "picked", "a", "peck", "of", "pickled", "peppers")
for(i in seq_along(pp)) print(pp[i])
# Lengths
# ^^^^^^^
length(1:5)
length(c(TRUE, FALSE, NA))
sn <- c("Sheena", "leads", "Sheila", "needs")
length(sn)
nchar(sn)
poincare <- c(1, 0, 0, 0, 2, 0, 2, 0) #See http://oeis.org/A051629
length(poincare) <- 3
poincare
length(poincare) <- 8
poincare
# Names
# ^^^^^
c(apple = 1, banana = 2, "kiwi fruit" = 3, 4)
x <- 1:4
names(x) <- c("apple", "bananas", "kiwi fruit", "")
x
names(x)
names(1:4)
# Indexing Vectors
# ^^^^^^^^^^^^^^^^
(x <- (1:5) ^ 2)
x[c(1, 3, 5)]
x[c(-2, -4)]
x[c(TRUE, FALSE, TRUE, FALSE, TRUE)]
names(x) <- c("one", "four", "nine", "sixteen", "twenty five")
x[c("one", "nine", "twenty five")]
x[c(1, -1)] #This doesn't make sense!
x[c(1, NA, 5)]
x[c(TRUE, FALSE, NA, FALSE, TRUE)]
x[c(-2, NA)] #This doesn't make sense either!
x[6]
x[1.9] #1.9 rounded to 1
x[-1.9] #-1.9 rounded to -1
x[]
which(x > 10)
which.min(x)
which.max(x)
# Vector Recycling and Repetition
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
1:5 + 1
1 + 1:5
1:5 + 1:15
1:5 + 1:7
rep(1:5, 3)
rep(1:5, each = 3)
rep(1:5, times = 1:5)
rep(1:5, length.out = 7)
rep.int(1:5, 3) #the same as rep(1:5, 3)
rep_len(1:5, 13)
# Matrices and Arrays
# ~~~~~~~~~~~~~~~~~~~
# Creating Arrays and Matrices
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
(three_d_array <- array(
1:24,
dim = c(4, 3, 2),
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei"),
c("un", "deux")
)
))
class(three_d_array)
(a_matrix <- matrix(
1:12,
nrow = 4, #ncol = 3 works the same
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei")
)
))
class(a_matrix)
(two_d_array <- array(
1:12,
dim = c(4, 3),
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei")
)
))
identical(two_d_array, a_matrix)
class(two_d_array)
matrix(
1:12,
nrow = 4,
byrow = TRUE,
dimnames = list(
c("one", "two", "three", "four"),
c("ein", "zwei", "drei")
)
)
# Rows, Columns and Dimensions
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
dim(three_d_array)
dim(a_matrix)
nrow(a_matrix)
ncol(a_matrix)
nrow(three_d_array)
ncol(three_d_array)
length(three_d_array)
length(a_matrix)
dim(a_matrix) <- c(6, 2)
a_matrix
identical(nrow(a_matrix), NROW(a_matrix))
identical(ncol(a_matrix), NCOL(a_matrix))
recaman <- c(0, 1, 3, 6, 2, 7, 13, 20) #See http://oeis.org/A005132
nrow(x)
NROW(x)
ncol(x)
NCOL(x)
dim(x) #There is no DIM(X)
# Row, Column and Dimension Names
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
rownames(a_matrix)
colnames(a_matrix)
dimnames(a_matrix)
rownames(three_d_array)
colnames(three_d_array)
dimnames(three_d_array)
# Indexing Arrays
# ^^^^^^^^^^^^^^^
a_matrix[1, c("zwei", "drei")] #elements in 1st row, 2nd and 3rd columns
a_matrix[1, ] #all the first row
a_matrix[, c("zwei", "drei")] #all the second and third columns
# Combining Matrices
# ^^^^^^^^^^^^^^^^^^
(another_matrix <- matrix(
seq.int(2, 24, 2),
nrow = 4,
dimnames = list(
c("five", "six", "seven", "eight"),
c("vier", "funf", "sechs")
)
))
c(a_matrix, another_matrix)
cbind(a_matrix, another_matrix)
rbind(a_matrix, another_matrix)
# Array Arithmetic
# ^^^^^^^^^^^^^^^^
a_matrix + another_matrix
a_matrix * another_matrix
(another_matrix <- matrix(1:12, nrow = 2))
a_matrix + another_matrix #adding non-conformable matrices throws an error
t(a_matrix)
a_matrix %*% t(a_matrix) #inner multiplication
1:3 %o% 4:6 #outer multiplication
outer(1:3, 4:6) #same
(m <- matrix(c(1, 0, 1, 5, -3, 1, 2, 4, 7), nrow = 3))
m ^ -1
(inverse_of_m <- solve(m))
m %*% inverse_of_m
# Summary
# ~~~~~~~
# Test Your Knowledge: Quiz
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# Test Your Knowledge: Exercises
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
checkComponentsCollapsed <- function(K, N, FZY, smallestClN, EMiteration, crisp = FALSE)
{
resetCl = NULL
ComponentColapsedOntoSinglePoint = which(table(factor(apply(FZY, 1, which.max), levels = as.character(1:K))) < smallestClN)
# ComponentColapsedOntoSinglePoint is true if cluster contains less than smallestClN of people
while (as.logical(length(ComponentColapsedOntoSinglePoint)))
{ # If one cluster is empty or contains less than smallestClN: ressample everyone
cat(c("\n Warning: A single/empty cluster occured in EM-iteration",
EMiteration, ", memberships and Sigma reset \n"))
cat(c("\n Warning: A single/empty cluster occured in EM-iteration",
EMiteration, ", memberships and Sigma reset \n"), file = "EMwarnings.txt", append = TRUE)
resetCl = unique(c(resetCl, ComponentColapsedOntoSinglePoint))
for (clust in ComponentColapsedOntoSinglePoint)
{
# FZY[order(FZY[ , clust], decreasing = TRUE)[1:smallestClN] , clust] = 1 + 1e-100 # the highest posteriors in the empty cluster are set to 1
FZY[sample(1:N, smallestClN, replace = FALSE), clust] = 1.01
}
FZY = t(scale(t(FZY), center = FALSE, scale = rowSums(FZY))) # Scale posteriors so they sum to 1 again
if (crisp)
{
classification = apply(FZY, 1, which.max)
for (indv in 1:N)
{
FZY[indv, ] = rep(0, K)
FZY[indv, classification[indv]] = 1
}
}
ComponentColapsedOntoSinglePoint = which(table(factor(apply(FZY, 1, which.max), levels = as.character(1:K))) < smallestClN)
}
invisible(list(FZY = FZY, resetCl = resetCl,
iterationReset = as.logical(length(resetCl))))
} | /Functions/checkComponentsCollapsed.R | permissive | AnieBee/LCVAR | R | false | false | 1,814 | r | checkComponentsCollapsed <- function(K, N, FZY, smallestClN, EMiteration, crisp = FALSE)
{
resetCl = NULL
ComponentColapsedOntoSinglePoint = which(table(factor(apply(FZY, 1, which.max), levels = as.character(1:K))) < smallestClN)
# ComponentColapsedOntoSinglePoint is true if cluster contains less than smallestClN of people
while (as.logical(length(ComponentColapsedOntoSinglePoint)))
{ # If one cluster is empty or contains less than smallestClN: ressample everyone
cat(c("\n Warning: A single/empty cluster occured in EM-iteration",
EMiteration, ", memberships and Sigma reset \n"))
cat(c("\n Warning: A single/empty cluster occured in EM-iteration",
EMiteration, ", memberships and Sigma reset \n"), file = "EMwarnings.txt", append = TRUE)
resetCl = unique(c(resetCl, ComponentColapsedOntoSinglePoint))
for (clust in ComponentColapsedOntoSinglePoint)
{
# FZY[order(FZY[ , clust], decreasing = TRUE)[1:smallestClN] , clust] = 1 + 1e-100 # the highest posteriors in the empty cluster are set to 1
FZY[sample(1:N, smallestClN, replace = FALSE), clust] = 1.01
}
FZY = t(scale(t(FZY), center = FALSE, scale = rowSums(FZY))) # Scale posteriors so they sum to 1 again
if (crisp)
{
classification = apply(FZY, 1, which.max)
for (indv in 1:N)
{
FZY[indv, ] = rep(0, K)
FZY[indv, classification[indv]] = 1
}
}
ComponentColapsedOntoSinglePoint = which(table(factor(apply(FZY, 1, which.max), levels = as.character(1:K))) < smallestClN)
}
invisible(list(FZY = FZY, resetCl = resetCl,
iterationReset = as.logical(length(resetCl))))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/struct.R
\name{structList}
\alias{structList}
\title{Constructor for a structList object}
\usage{
structList(...)
}
\arguments{
\item{...}{a list of a \code{\link{struct}} objects}
}
\description{
the structList class is a conatainer for storing a collection
of struct objects.
}
| /man/structList.Rd | no_license | italo-granato/starmie | R | false | true | 359 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/struct.R
\name{structList}
\alias{structList}
\title{Constructor for a structList object}
\usage{
structList(...)
}
\arguments{
\item{...}{a list of a \code{\link{struct}} objects}
}
\description{
the structList class is a conatainer for storing a collection
of struct objects.
}
|
\alias{gtkToggleButtonGetActive}
\name{gtkToggleButtonGetActive}
\title{gtkToggleButtonGetActive}
\description{Queries a \code{\link{GtkToggleButton}} and returns its current state. Returns \code{TRUE} if
the toggle button is pressed in and \code{FALSE} if it is raised.}
\usage{gtkToggleButtonGetActive(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkToggleButton}}] a \code{\link{GtkToggleButton}}.}}
\value{[logical] a \code{logical} value.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkToggleButtonGetActive.Rd | no_license | cran/RGtk2.10 | R | false | false | 525 | rd | \alias{gtkToggleButtonGetActive}
\name{gtkToggleButtonGetActive}
\title{gtkToggleButtonGetActive}
\description{Queries a \code{\link{GtkToggleButton}} and returns its current state. Returns \code{TRUE} if
the toggle button is pressed in and \code{FALSE} if it is raised.}
\usage{gtkToggleButtonGetActive(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkToggleButton}}] a \code{\link{GtkToggleButton}}.}}
\value{[logical] a \code{logical} value.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
ChooseMarker <- function(pure_all, CellType,
nMarkCT = 10,
chooseSig = FALSE,
verbose = TRUE) {
K <- length(CellType)
SelMarker <- list()
for(k in 1:K) {
desn <- rep(0, ncol(pure_all))
desn[CellType[[k]]] <- 1
fit <- lmFit(pure_all, design = desn)
fit <- eBayes(fit)
res <- topTable(fit, number = nMarkCT*5)
bestRes2 <- res[res$logFC>0,]
if(chooseSig) {
tmpMar <- row.names(bestRes2[which(bestRes2$P.Value<0.05),])
tmpMar2 <- tmpMar[is.na(match(tmpMar, unlist(SelMarker)))]
tt <- length(tmpMar2)
if(tt == 0) {
if(verbose) {
message(paste0("Cell type ", k, " has no significant markers."))
message(paste0("Switch to selecting top", nMarkCT, "markers."))
}
tmpMar <- row.names(bestRes2)[1:(5*nMarkCT)]
tmpMar2 <- tmpMar[is.na(match(tmpMar, SelMarker))]
SelMarker[[k]] <- tmpMar2[1:nMarkCT]
} else {
if(tt < nMarkCT) {
if(verbose) {
message(paste0("Cell type ", k, " has ", tt, " significant markers."))
message(paste0("Select all of them for cell type ", k, "."))
}
SelMarker[[k]] <- tmpMar2
} else {
if(verbose) {
message(paste0("Cell type ", k, " has ", tt, " significant markers."))
message(paste0("Select the top ", nMarkCT,
" markers for cell type ", k, "."))
}
SelMarker[[k]] <- tmpMar2[1:nMarkCT]
}
}
} else if(!chooseSig) {
tmpMar <- row.names(bestRes2)[1:(5*nMarkCT)]
tmpMar2 <- tmpMar[is.na(match(tmpMar, unlist(SelMarker)))]
SelMarker[[k]] <- tmpMar2[1:nMarkCT]
}
}
names(SelMarker) <- names(CellType)
return(SelMarker)
}
| /R/ChooseMarker.R | no_license | ziyili20/TOAST | R | false | false | 2,138 | r | ChooseMarker <- function(pure_all, CellType,
nMarkCT = 10,
chooseSig = FALSE,
verbose = TRUE) {
K <- length(CellType)
SelMarker <- list()
for(k in 1:K) {
desn <- rep(0, ncol(pure_all))
desn[CellType[[k]]] <- 1
fit <- lmFit(pure_all, design = desn)
fit <- eBayes(fit)
res <- topTable(fit, number = nMarkCT*5)
bestRes2 <- res[res$logFC>0,]
if(chooseSig) {
tmpMar <- row.names(bestRes2[which(bestRes2$P.Value<0.05),])
tmpMar2 <- tmpMar[is.na(match(tmpMar, unlist(SelMarker)))]
tt <- length(tmpMar2)
if(tt == 0) {
if(verbose) {
message(paste0("Cell type ", k, " has no significant markers."))
message(paste0("Switch to selecting top", nMarkCT, "markers."))
}
tmpMar <- row.names(bestRes2)[1:(5*nMarkCT)]
tmpMar2 <- tmpMar[is.na(match(tmpMar, SelMarker))]
SelMarker[[k]] <- tmpMar2[1:nMarkCT]
} else {
if(tt < nMarkCT) {
if(verbose) {
message(paste0("Cell type ", k, " has ", tt, " significant markers."))
message(paste0("Select all of them for cell type ", k, "."))
}
SelMarker[[k]] <- tmpMar2
} else {
if(verbose) {
message(paste0("Cell type ", k, " has ", tt, " significant markers."))
message(paste0("Select the top ", nMarkCT,
" markers for cell type ", k, "."))
}
SelMarker[[k]] <- tmpMar2[1:nMarkCT]
}
}
} else if(!chooseSig) {
tmpMar <- row.names(bestRes2)[1:(5*nMarkCT)]
tmpMar2 <- tmpMar[is.na(match(tmpMar, unlist(SelMarker)))]
SelMarker[[k]] <- tmpMar2[1:nMarkCT]
}
}
names(SelMarker) <- names(CellType)
return(SelMarker)
}
|
# Note: for all scripts I'm assuming the data has already been downloaded,
# unzipped, and placed in a data folder locally. This is in line with the assignment.
## Read in data
setwd("~/Documents/Helpful Docs/Coursera/ExploratoryDataAnalysis")
power_data = read.csv2("./data/household_power_consumption.txt", )
# Cut down data to 2007-02-01 and 2007-02-02
power_data$Date = as.Date(power_data$Date, format = '%d/%m/%Y')
power_data_cut_down = power_data[power_data$Date >= as.Date('2007-02-01') & power_data$Date <= as.Date('2007-02-02'),]
# Make a datetime field for use in plotting
power_data_cut_down$DateTime = paste(power_data_cut_down$Date, power_data_cut_down$Time)
power_data_cut_down$DateTime = strptime(power_data_cut_down$DateTime, format = '%Y-%m-%d %H:%M:%S')
# Transform the value fields into numeric for plotting
power_data_cut_down$Sub_metering_1 = as.numeric(as.character(power_data_cut_down$Sub_metering_1))
power_data_cut_down$Sub_metering_2 = as.numeric(as.character(power_data_cut_down$Sub_metering_2))
power_data_cut_down$Sub_metering_3 = as.numeric(as.character(power_data_cut_down$Sub_metering_3))
power_data_cut_down$Voltage = as.numeric(as.character(power_data_cut_down$Voltage))
power_data_cut_down$Global_active_power = as.numeric(as.character(power_data_cut_down$Global_active_power))
power_data_cut_down$Global_reactive_power = as.numeric(as.character(power_data_cut_down$Global_reactive_power))
# Plot the data to PNG format
png(file = "plot4.png")
par(mfcol = c(2,2)) # build 2 rows and 2 cols, filling cols first
#upper left plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Global_active_power,
ylab = "Global Active Power", xlab = "",
main = "", type="l")
#lower left plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Sub_metering_1,
ylab = "Energy sub metering", xlab = "",
main = "", type="l")
lines(power_data_cut_down$DateTime, power_data_cut_down$Sub_metering_2, col = 'red')
lines(power_data_cut_down$DateTime, power_data_cut_down$Sub_metering_3, col = 'blue')
legend("topright", lty = c(1, 1, 1), col = c("black", "red", "blue"), bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#upper right plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Voltage,
ylab = "Voltage", xlab = "datetime",
main = "", type="l")
#lower right plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Global_reactive_power,
ylab = "Global_reactive_power", xlab = "datetime",
main = "", type="l")
dev.off()
| /plot4.R | no_license | rlapointe/ExData_Plotting1 | R | false | false | 2,543 | r | # Note: for all scripts I'm assuming the data has already been downloaded,
# unzipped, and placed in a data folder locally. This is in line with the assignment.
## Read in data
setwd("~/Documents/Helpful Docs/Coursera/ExploratoryDataAnalysis")
power_data = read.csv2("./data/household_power_consumption.txt", )
# Cut down data to 2007-02-01 and 2007-02-02
power_data$Date = as.Date(power_data$Date, format = '%d/%m/%Y')
power_data_cut_down = power_data[power_data$Date >= as.Date('2007-02-01') & power_data$Date <= as.Date('2007-02-02'),]
# Make a datetime field for use in plotting
power_data_cut_down$DateTime = paste(power_data_cut_down$Date, power_data_cut_down$Time)
power_data_cut_down$DateTime = strptime(power_data_cut_down$DateTime, format = '%Y-%m-%d %H:%M:%S')
# Transform the value fields into numeric for plotting
power_data_cut_down$Sub_metering_1 = as.numeric(as.character(power_data_cut_down$Sub_metering_1))
power_data_cut_down$Sub_metering_2 = as.numeric(as.character(power_data_cut_down$Sub_metering_2))
power_data_cut_down$Sub_metering_3 = as.numeric(as.character(power_data_cut_down$Sub_metering_3))
power_data_cut_down$Voltage = as.numeric(as.character(power_data_cut_down$Voltage))
power_data_cut_down$Global_active_power = as.numeric(as.character(power_data_cut_down$Global_active_power))
power_data_cut_down$Global_reactive_power = as.numeric(as.character(power_data_cut_down$Global_reactive_power))
# Plot the data to PNG format
png(file = "plot4.png")
par(mfcol = c(2,2)) # build 2 rows and 2 cols, filling cols first
#upper left plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Global_active_power,
ylab = "Global Active Power", xlab = "",
main = "", type="l")
#lower left plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Sub_metering_1,
ylab = "Energy sub metering", xlab = "",
main = "", type="l")
lines(power_data_cut_down$DateTime, power_data_cut_down$Sub_metering_2, col = 'red')
lines(power_data_cut_down$DateTime, power_data_cut_down$Sub_metering_3, col = 'blue')
legend("topright", lty = c(1, 1, 1), col = c("black", "red", "blue"), bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#upper right plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Voltage,
ylab = "Voltage", xlab = "datetime",
main = "", type="l")
#lower right plot
plot(power_data_cut_down$DateTime, power_data_cut_down$Global_reactive_power,
ylab = "Global_reactive_power", xlab = "datetime",
main = "", type="l")
dev.off()
|
#' Parses results from an xml object downloaded from clinicaltrials.gov
#'
#' Results of a clinical study are stored in a particular way. This reads and
#' organizes the information and returns it as a list of dataframes. Throws an error if the xml has no \code{clinical_results} node.
#'
#' @param parsed A parsed XML object, as returned by \code{XML::xmlParse}
#' @keywords Internal
#'
#' @return A list of \code{data.frame}s, participant flow, baseline data,
#' outcome results
#'
gather_results <- function(parsed){
check <- tryCatch(parsed[["//clinical_results"]], error = function(e) {
return(NULL)
})
if(is.null(check)) return(list(
participant_flow = NULL,
baseline_data = NULL,
outcome_data = NULL
))
this_nct_id <- XML::xmlValue(parsed[["//nct_id"]])
## participant flow
gp_look <- get_group_lookup(parsed, "//participant_flow/group_list")
period <- parsed["//period_list/period"]
flow_table <- do.call(plyr::rbind.fill, XML::xmlApply(period, function(node){
cbind(
title = XML::xmlValue(node[["title"]]),
do.call(plyr::rbind.fill, XML::xmlApply(node[["milestone_list"]], function(n0){
cbind(status = XML::xmlValue(n0[["title"]]),
data.frame(t(XML::xmlSApply(n0[["participants_list"]], XML::xmlAttrs)), stringsAsFactors = FALSE, row.names = 1:length(gp_look)))
}))
)
}))
flow_table$arm <- gp_look[flow_table$group_id]
flow_table$nct_id <- this_nct_id
## baseline
gp_look <- get_group_lookup(parsed, "//baseline/group_list")
measures <- parsed[["//baseline/measure_list"]]
baseline_table <- do.call(plyr::rbind.fill, XML::xmlApply(measures, function(node){
#outer most level: titles and units
lank <- XML::xmlSApply(node, function(n){
# category_list -> return sub-titles
if(XML::xmlName(n) == "category_list"){
do.call(plyr::rbind.fill, XML::xmlApply(n, function(n0){
tmpRes <- XML::xmlApply(n0[["measurement_list"]], function(x){
as.data.frame(t(XML::xmlAttrs(x)), stringsAsFactors = FALSE)
})
ResAdd <- do.call(plyr::rbind.fill, tmpRes)
data.frame(
cbind(
subtitle = XML::xmlValue(n0),
ResAdd,
stringsAsFactors = FALSE),
row.names = NULL, stringsAsFactors = FALSE)
}))
} else if(XML::xmlName(n) == "class_list"){
do.call(plyr::rbind.fill, XML::xmlApply(n, function(n0){
subtitle <- XML::xmlValue(n0[["title"]])
tmpRes <- XML::xmlApply(n0[["category_list"]][["category"]][["measurement_list"]], function(x){
as.data.frame(t(XML::xmlAttrs(x)), stringsAsFactors = FALSE)
})
ResAdd <- do.call(plyr::rbind.fill, tmpRes)
data.frame(
cbind(
subtitle = subtitle,
ResAdd,
stringsAsFactors = FALSE),
row.names = NULL, stringsAsFactors = FALSE)
}))
} else {
XML::xmlValue(n)
}
})
names(lank)[names(lank) == "class_list"] <- "category_list"
target <- lank$category_list
fillout <- lank[names(lank) != "category_list"]
cbind(fillout, target)
}))
baseline_table$arm <- gp_look[baseline_table$group_id]
baseline_table$nct_id <- this_nct_id
## outcomes
#parsed_out <- xml2::xml_find_all(x, ".//outcome")
all_results_list <- XML::xmlApply(parsed[["//clinical_results/outcome_list"]], function(parsed_out){
gp_look <- get_group_lookup(parsed_out, "group_list")
measures <- parsed_out[["measure_list"]]
analysis <- parsed_out[["analysis_list"]]
results_titles <- XML::xmlApply(parsed_out, function(node){
if(XML::xmlName(node) %in% c("group_list", "measure_list", "analysis_list")) return(NULL) else {
XML::xmlValue(node)
}
})
if(!is.null(measures)) {
results_table <- do.call(plyr::rbind.fill, XML::xmlApply(measures, function(node){
#outer most level: titles and units
lank <- XML::xmlSApply(node, function(n){
# category_list -> return sub-titles
if(XML::xmlName(n) == "category_list"){
do.call(plyr::rbind.fill, XML::xmlApply(n, function(n0){
data.frame(
cbind(
subtitle = XML::xmlValue(n0),
t(XML::xmlSApply(n0[["measurement_list"]], XML::xmlAttrs)),
stringsAsFactors = FALSE),
row.names = NULL, stringsAsFactors = FALSE)
}))
} else {
XML::xmlValue(n)
}
})
target <- lank$category_list
fillout <- lank[names(lank) != "category_list"]
cbind(fillout, target)
}))
results_table$arm <- gp_look[results_table$group_id]
measures_table <- cbind(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")],
results_table)
} else measures_table <- data.frame(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")])
if(!is.null(analysis)){
analysis_table <- do.call(plyr::rbind.fill, XML::xmlApply(analysis, function(node){
lank <- as.data.frame(XML::xmlApply(node, function(n){
if(XML::xmlName(n) == "group_id_list"){
data.frame(group_id = XML::xmlSApply(n, XML::xmlValue), stringsAsFactors = FALSE)
} else {
tmp <- data.frame(XML::xmlValue(n), stringsAsFactors = FALSE)
colnames(tmp) <- XML::xmlName(n)
tmp
}
}), stringsAsFactors = FALSE)
}))
analysis_table$arm <- gp_look[analysis_table$group_id]
analysis_table <- cbind(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")],
analysis_table)
} else analysis_table <- data.frame(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")])
if(is.null(analysis)){
measures_table
} else if(is.null(measures)){
analysis_table
} else {
plyr::rbind.fill(measures_table, analysis_table)
}
})
final_outcome_table <- do.call(plyr::rbind.fill, all_results_list)
final_outcome_table$nct_id <- this_nct_id
list(
participant_flow = flow_table,
baseline_data = baseline_table,
outcome_data = final_outcome_table
)
}
## group labels are stored as key: values but only referred to in results as
## keys. This makes a lookup vector.
get_group_lookup <- function(parsed, xpath){
group_list <- tryCatch(parsed[[xpath]], error = function(e) NULL)
if(is.null(group_list)) return(NULL)
group_lookup <- as.data.frame(t(XML::xmlSApply(group_list,
function(node){
c(XML::xmlAttrs(node), XML::xmlValue(XML::xmlChildren(node)$title))
})), stringsAsFactors = FALSE)
group_look <- group_lookup[,2]
names(group_look) <- group_lookup$group_id
group_look
}
## simple xml tables to dataframe
xmltodf <- function(parsed_xml, xpath){
as.data.frame(do.call(plyr::rbind.fill, lapply(parsed_xml[xpath],
function(x) as.data.frame(XML::xmlToList(x),
stringsAsFactors = FALSE))),
stringsAsFactors = FALSE)
}
| /R/gather_results.R | permissive | serayamaouche/rclinicaltrials | R | false | false | 7,499 | r | #' Parses results from an xml object downloaded from clinicaltrials.gov
#'
#' Results of a clinical study are stored in a particular way. This reads and
#' organizes the information and returns it as a list of dataframes. Throws an error if the xml has no \code{clinical_results} node.
#'
#' @param parsed A parsed XML object, as returned by \code{XML::xmlParse}
#' @keywords Internal
#'
#' @return A list of \code{data.frame}s, participant flow, baseline data,
#' outcome results
#'
gather_results <- function(parsed){
check <- tryCatch(parsed[["//clinical_results"]], error = function(e) {
return(NULL)
})
if(is.null(check)) return(list(
participant_flow = NULL,
baseline_data = NULL,
outcome_data = NULL
))
this_nct_id <- XML::xmlValue(parsed[["//nct_id"]])
## participant flow
gp_look <- get_group_lookup(parsed, "//participant_flow/group_list")
period <- parsed["//period_list/period"]
flow_table <- do.call(plyr::rbind.fill, XML::xmlApply(period, function(node){
cbind(
title = XML::xmlValue(node[["title"]]),
do.call(plyr::rbind.fill, XML::xmlApply(node[["milestone_list"]], function(n0){
cbind(status = XML::xmlValue(n0[["title"]]),
data.frame(t(XML::xmlSApply(n0[["participants_list"]], XML::xmlAttrs)), stringsAsFactors = FALSE, row.names = 1:length(gp_look)))
}))
)
}))
flow_table$arm <- gp_look[flow_table$group_id]
flow_table$nct_id <- this_nct_id
## baseline
gp_look <- get_group_lookup(parsed, "//baseline/group_list")
measures <- parsed[["//baseline/measure_list"]]
baseline_table <- do.call(plyr::rbind.fill, XML::xmlApply(measures, function(node){
#outer most level: titles and units
lank <- XML::xmlSApply(node, function(n){
# category_list -> return sub-titles
if(XML::xmlName(n) == "category_list"){
do.call(plyr::rbind.fill, XML::xmlApply(n, function(n0){
tmpRes <- XML::xmlApply(n0[["measurement_list"]], function(x){
as.data.frame(t(XML::xmlAttrs(x)), stringsAsFactors = FALSE)
})
ResAdd <- do.call(plyr::rbind.fill, tmpRes)
data.frame(
cbind(
subtitle = XML::xmlValue(n0),
ResAdd,
stringsAsFactors = FALSE),
row.names = NULL, stringsAsFactors = FALSE)
}))
} else if(XML::xmlName(n) == "class_list"){
do.call(plyr::rbind.fill, XML::xmlApply(n, function(n0){
subtitle <- XML::xmlValue(n0[["title"]])
tmpRes <- XML::xmlApply(n0[["category_list"]][["category"]][["measurement_list"]], function(x){
as.data.frame(t(XML::xmlAttrs(x)), stringsAsFactors = FALSE)
})
ResAdd <- do.call(plyr::rbind.fill, tmpRes)
data.frame(
cbind(
subtitle = subtitle,
ResAdd,
stringsAsFactors = FALSE),
row.names = NULL, stringsAsFactors = FALSE)
}))
} else {
XML::xmlValue(n)
}
})
names(lank)[names(lank) == "class_list"] <- "category_list"
target <- lank$category_list
fillout <- lank[names(lank) != "category_list"]
cbind(fillout, target)
}))
baseline_table$arm <- gp_look[baseline_table$group_id]
baseline_table$nct_id <- this_nct_id
## outcomes
#parsed_out <- xml2::xml_find_all(x, ".//outcome")
all_results_list <- XML::xmlApply(parsed[["//clinical_results/outcome_list"]], function(parsed_out){
gp_look <- get_group_lookup(parsed_out, "group_list")
measures <- parsed_out[["measure_list"]]
analysis <- parsed_out[["analysis_list"]]
results_titles <- XML::xmlApply(parsed_out, function(node){
if(XML::xmlName(node) %in% c("group_list", "measure_list", "analysis_list")) return(NULL) else {
XML::xmlValue(node)
}
})
if(!is.null(measures)) {
results_table <- do.call(plyr::rbind.fill, XML::xmlApply(measures, function(node){
#outer most level: titles and units
lank <- XML::xmlSApply(node, function(n){
# category_list -> return sub-titles
if(XML::xmlName(n) == "category_list"){
do.call(plyr::rbind.fill, XML::xmlApply(n, function(n0){
data.frame(
cbind(
subtitle = XML::xmlValue(n0),
t(XML::xmlSApply(n0[["measurement_list"]], XML::xmlAttrs)),
stringsAsFactors = FALSE),
row.names = NULL, stringsAsFactors = FALSE)
}))
} else {
XML::xmlValue(n)
}
})
target <- lank$category_list
fillout <- lank[names(lank) != "category_list"]
cbind(fillout, target)
}))
results_table$arm <- gp_look[results_table$group_id]
measures_table <- cbind(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")],
results_table)
} else measures_table <- data.frame(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")])
if(!is.null(analysis)){
analysis_table <- do.call(plyr::rbind.fill, XML::xmlApply(analysis, function(node){
lank <- as.data.frame(XML::xmlApply(node, function(n){
if(XML::xmlName(n) == "group_id_list"){
data.frame(group_id = XML::xmlSApply(n, XML::xmlValue), stringsAsFactors = FALSE)
} else {
tmp <- data.frame(XML::xmlValue(n), stringsAsFactors = FALSE)
colnames(tmp) <- XML::xmlName(n)
tmp
}
}), stringsAsFactors = FALSE)
}))
analysis_table$arm <- gp_look[analysis_table$group_id]
analysis_table <- cbind(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")],
analysis_table)
} else analysis_table <- data.frame(results_titles[!names(results_titles) %in% c("group_list", "measure_list", "analysis_list")])
if(is.null(analysis)){
measures_table
} else if(is.null(measures)){
analysis_table
} else {
plyr::rbind.fill(measures_table, analysis_table)
}
})
final_outcome_table <- do.call(plyr::rbind.fill, all_results_list)
final_outcome_table$nct_id <- this_nct_id
list(
participant_flow = flow_table,
baseline_data = baseline_table,
outcome_data = final_outcome_table
)
}
## group labels are stored as key: values but only referred to in results as
## keys. This makes a lookup vector.
get_group_lookup <- function(parsed, xpath){
group_list <- tryCatch(parsed[[xpath]], error = function(e) NULL)
if(is.null(group_list)) return(NULL)
group_lookup <- as.data.frame(t(XML::xmlSApply(group_list,
function(node){
c(XML::xmlAttrs(node), XML::xmlValue(XML::xmlChildren(node)$title))
})), stringsAsFactors = FALSE)
group_look <- group_lookup[,2]
names(group_look) <- group_lookup$group_id
group_look
}
## simple xml tables to dataframe
xmltodf <- function(parsed_xml, xpath){
as.data.frame(do.call(plyr::rbind.fill, lapply(parsed_xml[xpath],
function(x) as.data.frame(XML::xmlToList(x),
stringsAsFactors = FALSE))),
stringsAsFactors = FALSE)
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
qatd_cpp_fcm <- function(texts_, n_types, weights_, boolean, ordered) {
.Call(`_quanteda_qatd_cpp_fcm`, texts_, n_types, weights_, boolean, ordered)
}
qatd_cpp_index <- function(texts_, types_, words_) {
.Call(`_quanteda_qatd_cpp_index`, texts_, types_, words_)
}
qatd_cpp_tokens_chunk <- function(texts_, types_, size, overlap) {
.Call(`_quanteda_qatd_cpp_tokens_chunk`, texts_, types_, size, overlap)
}
qatd_cpp_tokens_compound <- function(texts_, compounds_, types_, delim_, join, window_left, window_right) {
.Call(`_quanteda_qatd_cpp_tokens_compound`, texts_, compounds_, types_, delim_, join, window_left, window_right)
}
qatd_cpp_tokens_lookup <- function(texts_, types_, words_, keys_, overlap, nomatch) {
.Call(`_quanteda_qatd_cpp_tokens_lookup`, texts_, types_, words_, keys_, overlap, nomatch)
}
qatd_cpp_tokens_ngrams <- function(texts_, types_, delim_, ns_, skips_) {
.Call(`_quanteda_qatd_cpp_tokens_ngrams`, texts_, types_, delim_, ns_, skips_)
}
qatd_cpp_tokens_recompile <- function(texts_, types_, gap = TRUE, dup = TRUE) {
.Call(`_quanteda_qatd_cpp_tokens_recompile`, texts_, types_, gap, dup)
}
qatd_cpp_tokens_replace <- function(texts_, types_, patterns_, replacements_) {
.Call(`_quanteda_qatd_cpp_tokens_replace`, texts_, types_, patterns_, replacements_)
}
qatd_cpp_tokens_restore <- function(texts_, marks_left_, marks_right_, types_, delim_) {
.Call(`_quanteda_qatd_cpp_tokens_restore`, texts_, marks_left_, marks_right_, types_, delim_)
}
qatd_cpp_tokens_segment <- function(texts_, types_, patterns_, remove, position) {
.Call(`_quanteda_qatd_cpp_tokens_segment`, texts_, types_, patterns_, remove, position)
}
qatd_cpp_tokens_select <- function(texts_, types_, words_, mode, padding, window_left, window_right, pos_from_, pos_to_) {
.Call(`_quanteda_qatd_cpp_tokens_select`, texts_, types_, words_, mode, padding, window_left, window_right, pos_from_, pos_to_)
}
qatd_cpp_is_grouped_numeric <- function(values_, groups_) {
.Call(`_quanteda_qatd_cpp_is_grouped_numeric`, values_, groups_)
}
qatd_cpp_is_grouped_character <- function(values_, groups_) {
.Call(`_quanteda_qatd_cpp_is_grouped_character`, values_, groups_)
}
qatd_cpp_set_load_factor <- function(type, value) {
invisible(.Call(`_quanteda_qatd_cpp_set_load_factor`, type, value))
}
qatd_cpp_get_load_factor <- function() {
.Call(`_quanteda_qatd_cpp_get_load_factor`)
}
qatd_cpp_set_meta <- function(object_, meta_) {
invisible(.Call(`_quanteda_qatd_cpp_set_meta`, object_, meta_))
}
qatd_cpp_tbb_enabled <- function() {
.Call(`_quanteda_qatd_cpp_tbb_enabled`)
}
| /R/RcppExports.R | no_license | cran/quanteda | R | false | false | 2,765 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
qatd_cpp_fcm <- function(texts_, n_types, weights_, boolean, ordered) {
.Call(`_quanteda_qatd_cpp_fcm`, texts_, n_types, weights_, boolean, ordered)
}
qatd_cpp_index <- function(texts_, types_, words_) {
.Call(`_quanteda_qatd_cpp_index`, texts_, types_, words_)
}
qatd_cpp_tokens_chunk <- function(texts_, types_, size, overlap) {
.Call(`_quanteda_qatd_cpp_tokens_chunk`, texts_, types_, size, overlap)
}
qatd_cpp_tokens_compound <- function(texts_, compounds_, types_, delim_, join, window_left, window_right) {
.Call(`_quanteda_qatd_cpp_tokens_compound`, texts_, compounds_, types_, delim_, join, window_left, window_right)
}
qatd_cpp_tokens_lookup <- function(texts_, types_, words_, keys_, overlap, nomatch) {
.Call(`_quanteda_qatd_cpp_tokens_lookup`, texts_, types_, words_, keys_, overlap, nomatch)
}
qatd_cpp_tokens_ngrams <- function(texts_, types_, delim_, ns_, skips_) {
.Call(`_quanteda_qatd_cpp_tokens_ngrams`, texts_, types_, delim_, ns_, skips_)
}
qatd_cpp_tokens_recompile <- function(texts_, types_, gap = TRUE, dup = TRUE) {
.Call(`_quanteda_qatd_cpp_tokens_recompile`, texts_, types_, gap, dup)
}
qatd_cpp_tokens_replace <- function(texts_, types_, patterns_, replacements_) {
.Call(`_quanteda_qatd_cpp_tokens_replace`, texts_, types_, patterns_, replacements_)
}
qatd_cpp_tokens_restore <- function(texts_, marks_left_, marks_right_, types_, delim_) {
.Call(`_quanteda_qatd_cpp_tokens_restore`, texts_, marks_left_, marks_right_, types_, delim_)
}
qatd_cpp_tokens_segment <- function(texts_, types_, patterns_, remove, position) {
.Call(`_quanteda_qatd_cpp_tokens_segment`, texts_, types_, patterns_, remove, position)
}
qatd_cpp_tokens_select <- function(texts_, types_, words_, mode, padding, window_left, window_right, pos_from_, pos_to_) {
.Call(`_quanteda_qatd_cpp_tokens_select`, texts_, types_, words_, mode, padding, window_left, window_right, pos_from_, pos_to_)
}
qatd_cpp_is_grouped_numeric <- function(values_, groups_) {
.Call(`_quanteda_qatd_cpp_is_grouped_numeric`, values_, groups_)
}
qatd_cpp_is_grouped_character <- function(values_, groups_) {
.Call(`_quanteda_qatd_cpp_is_grouped_character`, values_, groups_)
}
qatd_cpp_set_load_factor <- function(type, value) {
invisible(.Call(`_quanteda_qatd_cpp_set_load_factor`, type, value))
}
qatd_cpp_get_load_factor <- function() {
.Call(`_quanteda_qatd_cpp_get_load_factor`)
}
qatd_cpp_set_meta <- function(object_, meta_) {
invisible(.Call(`_quanteda_qatd_cpp_set_meta`, object_, meta_))
}
qatd_cpp_tbb_enabled <- function() {
.Call(`_quanteda_qatd_cpp_tbb_enabled`)
}
|
library(data.table)
exprs <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/Normalised_GEX_data.txt")
colnames(exprs) <- gsub("X", "", colnames(exprs))
exprs <- as.matrix(exprs)
info <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/sample_info.txt")
rownames(info) <- info$SampleID
gex.pcs <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/GEX_PCs.txt")
gex.pcs <- as.matrix(gex.pcs)
geno.pcs <- read.delim("~/Abu-Dhabi/Genotyping/AD_736_multi_ethnic_chip_updated_eQTL_inds_snps_removed_noLD_noLD_genotyping_pca_clean.eigenvec", sep="", row.names=2, header=F)
geno.pcs$V1 <- NULL
geno.pcs <- as.matrix(geno.pcs)
geno <- data.frame(fread("/well/jknight/AbuDhabiRNA/eQTL/Genotyping/AD_736_multi_ethnic_chip_eQTL_genotyping_b38.raw"))
rownames(geno) <- geno[, 1]
geno[, 1:6] <- NULL
colnames(geno) <- gsub("X", "", colnames(geno))
colnames(geno) <- substr(colnames(geno), 1, nchar(colnames(geno))-2)
geno <- as.matrix(geno)
pairs <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/Gene_snp_pairs.txt")
pairs[, 1] <- as.character(pairs[, 1])
pairs[, 2] <- make.names(pairs[, 2])
pairs[, 2] <- gsub("X", "", pairs[, 2])
colnames(pairs) <- c("Gene", "SNP")
PCs <- cbind(gex.pcs[, 1:25], geno.pcs[, 1:4])
expression.set <- exprs
expression.set <- t(expression.set)
expression.pc <- PCs
num.PC <- ncol(expression.pc)
# regress out expression
regressed.data <- matrix(nrow=nrow(expression.set), ncol=ncol(expression.set))
for(i in 1:(dim(expression.set)[2])){
model <- lm(as.matrix(expression.set[, i]) ~ as.matrix(expression.pc[, 1:(num.PC)]))
regressed.data[, i] <- expression.set[, i] -
rowSums(sapply(1:(num.PC), function(i)model$coefficients[i+1]*expression.pc[, 1:(num.PC)][,i]))
}
rownames(regressed.data) <- rownames(expression.set)
colnames(regressed.data) <- colnames(expression.set)
regressed.data <- t(regressed.data)
exprs <- regressed.data
save(list=c("exprs", "geno", "info", "gex.pcs", "geno.pcs", "pairs"),
file = "/well/jknight/AbuDhabiRNA/eQTL/eQTL.25PCs.RData")
| /eQTL/Make_rda.R | no_license | jknightlab/Abu-Dhabi | R | false | false | 1,964 | r | library(data.table)
exprs <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/Normalised_GEX_data.txt")
colnames(exprs) <- gsub("X", "", colnames(exprs))
exprs <- as.matrix(exprs)
info <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/sample_info.txt")
rownames(info) <- info$SampleID
gex.pcs <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/GEX_PCs.txt")
gex.pcs <- as.matrix(gex.pcs)
geno.pcs <- read.delim("~/Abu-Dhabi/Genotyping/AD_736_multi_ethnic_chip_updated_eQTL_inds_snps_removed_noLD_noLD_genotyping_pca_clean.eigenvec", sep="", row.names=2, header=F)
geno.pcs$V1 <- NULL
geno.pcs <- as.matrix(geno.pcs)
geno <- data.frame(fread("/well/jknight/AbuDhabiRNA/eQTL/Genotyping/AD_736_multi_ethnic_chip_eQTL_genotyping_b38.raw"))
rownames(geno) <- geno[, 1]
geno[, 1:6] <- NULL
colnames(geno) <- gsub("X", "", colnames(geno))
colnames(geno) <- substr(colnames(geno), 1, nchar(colnames(geno))-2)
geno <- as.matrix(geno)
pairs <- read.delim("~/Abu-Dhabi/RNASeq/eQTL/Gene_snp_pairs.txt")
pairs[, 1] <- as.character(pairs[, 1])
pairs[, 2] <- make.names(pairs[, 2])
pairs[, 2] <- gsub("X", "", pairs[, 2])
colnames(pairs) <- c("Gene", "SNP")
PCs <- cbind(gex.pcs[, 1:25], geno.pcs[, 1:4])
expression.set <- exprs
expression.set <- t(expression.set)
expression.pc <- PCs
num.PC <- ncol(expression.pc)
# regress out expression
regressed.data <- matrix(nrow=nrow(expression.set), ncol=ncol(expression.set))
for(i in 1:(dim(expression.set)[2])){
model <- lm(as.matrix(expression.set[, i]) ~ as.matrix(expression.pc[, 1:(num.PC)]))
regressed.data[, i] <- expression.set[, i] -
rowSums(sapply(1:(num.PC), function(i)model$coefficients[i+1]*expression.pc[, 1:(num.PC)][,i]))
}
rownames(regressed.data) <- rownames(expression.set)
colnames(regressed.data) <- colnames(expression.set)
regressed.data <- t(regressed.data)
exprs <- regressed.data
save(list=c("exprs", "geno", "info", "gex.pcs", "geno.pcs", "pairs"),
file = "/well/jknight/AbuDhabiRNA/eQTL/eQTL.25PCs.RData")
|
# Gráfico de barra com Rbase
# https://youtu.be/8FEVt-qnZMs
#mais simples
dados<- 4:8
barplot(dados)
#adicionando legendas e idenficações
names(dados)<- 1:5
barplot(dados)
names(dados)<- c("a","b","c","d","e")
names(dados)<- c("abacate","berinjela","cebola","dados","elefante")
barplot(dados)
barplot(dados, xlab= "legenda eixo x", ylab = "legenda eixo y", main = "título")
#cores e bordas
barplot(dados,col = "blue") # col para cores, nome das cores
barplot(dados,col = c("blue","red","orange","white","black")) #
barplot(dados,border= "#FF00FF", col = "#FFFFFF") #border para borda, sistema RBG
#transformando dados em gráficos rapidamente
data()
laranja<- Orange
porco<- ToothGrowth
#média de crescimento dentes por vitamina C
tapply(porco$len,porco$supp,mean)
barplot(tapply(porco$len,porco$supp,mean))
#circunferencia laranjeiras por idade
tapply(laranja$circumference,laranja$age,mean)
barplot(tapply(laranja$circumference,laranja$age,mean))
#gráficos mais completos
barplot(tapply(laranja$circumference,laranja$age,mean),
col = "sienna1",border= "black" ,
xlab= "Idade da árvore",ylab= "Circunferência", main = "Circunferência de laranjeiras por idade")
barplot(tapply(porco$len,porco$supp,mean),
col = "slategray1",border= "blue" ,
xlab= "Vitamina C",ylab= "Tamanho do dente", main = "Dentes de porquinho-da-índia que consomem vitamina C")
| /barras_rbase.R | no_license | igoralmeidab/R_portugues | R | false | false | 1,417 | r | # Gráfico de barra com Rbase
# https://youtu.be/8FEVt-qnZMs
#mais simples
dados<- 4:8
barplot(dados)
#adicionando legendas e idenficações
names(dados)<- 1:5
barplot(dados)
names(dados)<- c("a","b","c","d","e")
names(dados)<- c("abacate","berinjela","cebola","dados","elefante")
barplot(dados)
barplot(dados, xlab= "legenda eixo x", ylab = "legenda eixo y", main = "título")
#cores e bordas
barplot(dados,col = "blue") # col para cores, nome das cores
barplot(dados,col = c("blue","red","orange","white","black")) #
barplot(dados,border= "#FF00FF", col = "#FFFFFF") #border para borda, sistema RBG
#transformando dados em gráficos rapidamente
data()
laranja<- Orange
porco<- ToothGrowth
#média de crescimento dentes por vitamina C
tapply(porco$len,porco$supp,mean)
barplot(tapply(porco$len,porco$supp,mean))
#circunferencia laranjeiras por idade
tapply(laranja$circumference,laranja$age,mean)
barplot(tapply(laranja$circumference,laranja$age,mean))
#gráficos mais completos
barplot(tapply(laranja$circumference,laranja$age,mean),
col = "sienna1",border= "black" ,
xlab= "Idade da árvore",ylab= "Circunferência", main = "Circunferência de laranjeiras por idade")
barplot(tapply(porco$len,porco$supp,mean),
col = "slategray1",border= "blue" ,
xlab= "Vitamina C",ylab= "Tamanho do dente", main = "Dentes de porquinho-da-índia que consomem vitamina C")
|
### R code from vignette source 'rmhPoster.Rtex'
###################################################
### code chunk number 1: rmhPoster.Rtex:15-149
###################################################
library(lattice)
library(latticeExtra)
library(microplot)
## options needed by Hmisc::latex
options(latexcmd='pdflatex')
options(dviExtension='pdf')
if (nchar(Sys.which("open"))) {
options(xdvicmd="open") ## Macintosh, Windows, SMP linux
} else {
options(xdvicmd="xdg-open") ## ubuntu linux
}
## Hmisc::latex
## boxplot matrix of iris data
irisBW <-
bwplot( ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width | Species,
data=iris, outer=TRUE, as.table=TRUE,
scales=list(alternating=FALSE),
xlab=NULL,
par.strip.text=list(cex=1.5))
names(dimnames(irisBW))[[2]] <- "Measurement"
## pdf of boxplot matrix
pdf("irisBW.pdf", width=7, height=7) ## inch
useOuterStrips(irisBW)
suppress <- dev.off()
## twelve individual boxplots without axes
irisBW.update <-
update(irisBW,
xlab=NULL,
par.settings=list(
layout.heights=layoutHeightsCollapse(),
layout.widths=layoutWidthsCollapse(),
axis.line=list(col="transparent")),
layout=c(1,1)
)
## horizontal axis
irisBW.axis <-
update(irisBW.update[1,1],
scales=list(cex=.6),
par.settings=list(layout.heights=list(axis.bottom=1, panel=0),
axis.line=list(col="black")))
## create 13 pdf files, one per boxplot and one more for the horizontal axis
pdf("irisBW%03d.pdf", onefile=FALSE, height=.4, width=1.6) ## inch
irisBW.update ## 12 individual boxplots without axes
suppress <- dev.off()
pdf("irisBW013.pdf", height=.4, width=1.6) ## inch
irisBW.axis ## horizontal axis
suppress <- dev.off()
## construct names of pdf files
graphnames <- paste0("irisBW", sprintf("%03i", 1:13), ".pdf")
## matrix of latex \includegraphics{} macros for each boxplot's pdf file
graphicsnames <- t(matrix(as.includegraphics(graphnames[1:12], height="2em", raise="-1.3ex"),
nrow=3, ncol=4,
dimnames=dimnames(irisBW)))
## Measurement by Species
BWMS.latex <- Hmisc::latex(graphicsnames, caption="\\Large Measurement by Species", where="!htbp",
label="BWMS", title="Measurement", file="BWMS.tex",
size="Large")
BWMS.latex$style <- "graphicx"
## BWMS.latex
## Hmisc::dvi(BWMS.latex, width=7, height=3)
## Measurement by Species with Axis
graphicsnamesA <- rbind(graphicsnames, as.includegraphics(graphnames[13], height="2em", raise="-1.3ex"))
BWMSA.latex <- Hmisc::latex(graphicsnamesA, caption="\\Large Measurement by Species, with $x$-scale",
where="!htbp",
n.rgroup=c(4, 1),
rgroup=c("\\vspace*{-1em}", "\\vspace*{-1.25em}"),
label="BWMSA", title="Measurement", file="BWMSA.tex",
size="Large")
BWMSA.latex$style <- "graphicx"
## BWMSA.latex
## Hmisc::dvi(BWMSA.latex, width=7, height=3)
## Species by Measurement
BWSM.latex <- Hmisc::latex(t(graphicsnames), caption="\\Large Species by Measurement", where="!htbp",
label="BWSM", title="Species", file="BWSM.tex", size="large")
BWSM.latex$style <- "graphicx"
## BWSM.latex
## Hmisc::dvi(BWSM.latex, width=7.5, height=2)
## Individual boxes embedded into a more interesting table
iris.fivenum <-
sapply(levels(iris$Species),
function(i) {
tmp <- sapply(iris[iris$Species==i, 1:4], fivenum)
dimnames(tmp)[[1]] <- c("min", "Q1", "med", "Q3", "max")
tmp
},
simplify=FALSE)
## Species and Measurement in separate columns
BW5num <-
rbind(
data.frame(t(iris.fivenum[[1]]), "Box Plots"=graphicsnames[,1], check.names=FALSE),
data.frame(t(iris.fivenum[[2]]), "Box Plots"=graphicsnames[,2], check.names=FALSE),
data.frame(t(iris.fivenum[[3]]), "Box Plots"=graphicsnames[,3], check.names=FALSE))
BW5num$Measurement=names(iris)[1:4]
BW5num <- BW5num[, c(7,1:6)]
BW5num.latex <-
Hmisc::latex(BW5num,
rowname=" ",
rowlabel="Species",
rgroup=levels(iris$Species),
n.rgroup=c(4,4,4),
cgroup=c("", "Five Number Summary", ""),
n.cgroup=c(1, 5, 1),
caption="\\Large Five Number Summary and Boxplots for each Species and Measurement",
label="irisBW5num",
where="!htbp")
BW5num.latex$style <- "graphicx"
## BW5num.latex ## this line requires latex in the path
## print.default(BW5num.latex) ## the content of the R variable is the filename of
## the file containing the latex table environment
| /inst/doc/rmhPoster.R | no_license | cran/microplot | R | false | false | 4,816 | r | ### R code from vignette source 'rmhPoster.Rtex'
###################################################
### code chunk number 1: rmhPoster.Rtex:15-149
###################################################
library(lattice)
library(latticeExtra)
library(microplot)
## options needed by Hmisc::latex
options(latexcmd='pdflatex')
options(dviExtension='pdf')
if (nchar(Sys.which("open"))) {
options(xdvicmd="open") ## Macintosh, Windows, SMP linux
} else {
options(xdvicmd="xdg-open") ## ubuntu linux
}
## Hmisc::latex
## boxplot matrix of iris data
irisBW <-
bwplot( ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width | Species,
data=iris, outer=TRUE, as.table=TRUE,
scales=list(alternating=FALSE),
xlab=NULL,
par.strip.text=list(cex=1.5))
names(dimnames(irisBW))[[2]] <- "Measurement"
## pdf of boxplot matrix
pdf("irisBW.pdf", width=7, height=7) ## inch
useOuterStrips(irisBW)
suppress <- dev.off()
## twelve individual boxplots without axes
irisBW.update <-
update(irisBW,
xlab=NULL,
par.settings=list(
layout.heights=layoutHeightsCollapse(),
layout.widths=layoutWidthsCollapse(),
axis.line=list(col="transparent")),
layout=c(1,1)
)
## horizontal axis
irisBW.axis <-
update(irisBW.update[1,1],
scales=list(cex=.6),
par.settings=list(layout.heights=list(axis.bottom=1, panel=0),
axis.line=list(col="black")))
## create 13 pdf files, one per boxplot and one more for the horizontal axis
pdf("irisBW%03d.pdf", onefile=FALSE, height=.4, width=1.6) ## inch
irisBW.update ## 12 individual boxplots without axes
suppress <- dev.off()
pdf("irisBW013.pdf", height=.4, width=1.6) ## inch
irisBW.axis ## horizontal axis
suppress <- dev.off()
## construct names of pdf files
graphnames <- paste0("irisBW", sprintf("%03i", 1:13), ".pdf")
## matrix of latex \includegraphics{} macros for each boxplot's pdf file
graphicsnames <- t(matrix(as.includegraphics(graphnames[1:12], height="2em", raise="-1.3ex"),
nrow=3, ncol=4,
dimnames=dimnames(irisBW)))
## Measurement by Species
BWMS.latex <- Hmisc::latex(graphicsnames, caption="\\Large Measurement by Species", where="!htbp",
label="BWMS", title="Measurement", file="BWMS.tex",
size="Large")
BWMS.latex$style <- "graphicx"
## BWMS.latex
## Hmisc::dvi(BWMS.latex, width=7, height=3)
## Measurement by Species with Axis
graphicsnamesA <- rbind(graphicsnames, as.includegraphics(graphnames[13], height="2em", raise="-1.3ex"))
BWMSA.latex <- Hmisc::latex(graphicsnamesA, caption="\\Large Measurement by Species, with $x$-scale",
where="!htbp",
n.rgroup=c(4, 1),
rgroup=c("\\vspace*{-1em}", "\\vspace*{-1.25em}"),
label="BWMSA", title="Measurement", file="BWMSA.tex",
size="Large")
BWMSA.latex$style <- "graphicx"
## BWMSA.latex
## Hmisc::dvi(BWMSA.latex, width=7, height=3)
## Species by Measurement
BWSM.latex <- Hmisc::latex(t(graphicsnames), caption="\\Large Species by Measurement", where="!htbp",
label="BWSM", title="Species", file="BWSM.tex", size="large")
BWSM.latex$style <- "graphicx"
## BWSM.latex
## Hmisc::dvi(BWSM.latex, width=7.5, height=2)
## Individual boxes embedded into a more interesting table
iris.fivenum <-
sapply(levels(iris$Species),
function(i) {
tmp <- sapply(iris[iris$Species==i, 1:4], fivenum)
dimnames(tmp)[[1]] <- c("min", "Q1", "med", "Q3", "max")
tmp
},
simplify=FALSE)
## Species and Measurement in separate columns
BW5num <-
rbind(
data.frame(t(iris.fivenum[[1]]), "Box Plots"=graphicsnames[,1], check.names=FALSE),
data.frame(t(iris.fivenum[[2]]), "Box Plots"=graphicsnames[,2], check.names=FALSE),
data.frame(t(iris.fivenum[[3]]), "Box Plots"=graphicsnames[,3], check.names=FALSE))
BW5num$Measurement=names(iris)[1:4]
BW5num <- BW5num[, c(7,1:6)]
BW5num.latex <-
Hmisc::latex(BW5num,
rowname=" ",
rowlabel="Species",
rgroup=levels(iris$Species),
n.rgroup=c(4,4,4),
cgroup=c("", "Five Number Summary", ""),
n.cgroup=c(1, 5, 1),
caption="\\Large Five Number Summary and Boxplots for each Species and Measurement",
label="irisBW5num",
where="!htbp")
BW5num.latex$style <- "graphicx"
## BW5num.latex ## this line requires latex in the path
## print.default(BW5num.latex) ## the content of the R variable is the filename of
## the file containing the latex table environment
|
\name{plot.bgeva}
\alias{plot.bgeva}
\title{bgeva plotting}
\description{It takes a fitted \code{bgeva} object produced by \code{bgeva()} and plots the
component smooth functions that make it up on the scale of the linear predictor.
This function is based on \code{plot.gam()} in \code{mgcv}. Please see the documentation of \code{plot.gam()} for full details.
}
\usage{
\method{plot}{bgeva}(x, ...)
}
\arguments{
\item{x}{A fitted \code{bgeva} object as produced by \code{bgeva()}.}
\item{...}{Other graphics parameters to pass on to plotting commands, as described for \code{plot.gam} in \code{mgcv}.}
}
\details{
This function produces plot showing the smooth terms of a fitted semiparametric bivariate probit model. For plots
of 1-D smooths, the x axis of each plot is labelled using the name of the regressor, while the y axis is labelled as \code{s(regr,edf)}
where \code{regr} is the regressor name, and \code{edf} the estimated degrees of freedom of the smooth. As for 2-D smooths, perspective plots are produced with the x-axes labelled with the first and second variable names and the y axis
is labelled as \code{s(var1,var2,edf)}, which indicates the variables of which the term is a function and the \code{edf} for the term.
If \code{seWithMean=TRUE}, then the confidence intervals include the uncertainty about the overall mean. That is,
although each smooth is shown centred, the confidence intervals are obtained as if every other term in the model was
constrained to have average 0 (average taken over the covariate values) except for the smooth being plotted. The theoretical arguments
and simulation study of Marra and Wood (2012) suggests that \code{seWithMean=TRUE} results in intervals with
close to nominal frequentist coverage probabilities. This option should not be used when fitting a random effect model.
}
\value{
The function generates plots.
}
\author{
Maintainer: Giampiero Marra \email{giampiero.marra@ucl.ac.uk}
}
\references{
Marra G. and Wood S.N. (2012), Coverage Properties of Confidence Intervals for Generalized Additive Model Components. \emph{Scandinavian Journal of Statistics}, 39(1), 53-74.
}
\section{WARNING}{
The function can not deal with smooths of more than 2 variables.
}
\seealso{
\code{\link{bgeva}}, \code{\link{summary.bgeva}}
}
\examples{
## see examples for bgeva
}
\keyword{smooth}
\keyword{regression}
\keyword{hplot}
| /man/plot.bgeva.Rd | no_license | cran/bgeva | R | false | false | 2,534 | rd | \name{plot.bgeva}
\alias{plot.bgeva}
\title{bgeva plotting}
\description{It takes a fitted \code{bgeva} object produced by \code{bgeva()} and plots the
component smooth functions that make it up on the scale of the linear predictor.
This function is based on \code{plot.gam()} in \code{mgcv}. Please see the documentation of \code{plot.gam()} for full details.
}
\usage{
\method{plot}{bgeva}(x, ...)
}
\arguments{
\item{x}{A fitted \code{bgeva} object as produced by \code{bgeva()}.}
\item{...}{Other graphics parameters to pass on to plotting commands, as described for \code{plot.gam} in \code{mgcv}.}
}
\details{
This function produces plot showing the smooth terms of a fitted semiparametric bivariate probit model. For plots
of 1-D smooths, the x axis of each plot is labelled using the name of the regressor, while the y axis is labelled as \code{s(regr,edf)}
where \code{regr} is the regressor name, and \code{edf} the estimated degrees of freedom of the smooth. As for 2-D smooths, perspective plots are produced with the x-axes labelled with the first and second variable names and the y axis
is labelled as \code{s(var1,var2,edf)}, which indicates the variables of which the term is a function and the \code{edf} for the term.
If \code{seWithMean=TRUE}, then the confidence intervals include the uncertainty about the overall mean. That is,
although each smooth is shown centred, the confidence intervals are obtained as if every other term in the model was
constrained to have average 0 (average taken over the covariate values) except for the smooth being plotted. The theoretical arguments
and simulation study of Marra and Wood (2012) suggests that \code{seWithMean=TRUE} results in intervals with
close to nominal frequentist coverage probabilities. This option should not be used when fitting a random effect model.
}
\value{
The function generates plots.
}
\author{
Maintainer: Giampiero Marra \email{giampiero.marra@ucl.ac.uk}
}
\references{
Marra G. and Wood S.N. (2012), Coverage Properties of Confidence Intervals for Generalized Additive Model Components. \emph{Scandinavian Journal of Statistics}, 39(1), 53-74.
}
\section{WARNING}{
The function can not deal with smooths of more than 2 variables.
}
\seealso{
\code{\link{bgeva}}, \code{\link{summary.bgeva}}
}
\examples{
## see examples for bgeva
}
\keyword{smooth}
\keyword{regression}
\keyword{hplot}
|
test_that("read receipt", {
expect_error(envelope() %>% request_receipt_read())
msg <- envelope() %>%
from("olivia@google.com") %>%
request_receipt_read()
expect_match(headers(msg), "Disposition-Notification-To: +olivia@google.com")
expect_match(headers(msg), "X-Confirm-Reading-To: +olivia@google.com")
})
test_that("delivery receipt", {
expect_error(envelope() %>% request_receipt_delivery())
msg <- envelope() %>%
from("olivia@google.com") %>%
request_receipt_delivery()
expect_match(headers(msg), "Return-Receipt-To: +olivia@google.com")
})
| /tests/testthat/test-header-receipt.R | no_license | datawookie/emayili | R | false | false | 580 | r | test_that("read receipt", {
expect_error(envelope() %>% request_receipt_read())
msg <- envelope() %>%
from("olivia@google.com") %>%
request_receipt_read()
expect_match(headers(msg), "Disposition-Notification-To: +olivia@google.com")
expect_match(headers(msg), "X-Confirm-Reading-To: +olivia@google.com")
})
test_that("delivery receipt", {
expect_error(envelope() %>% request_receipt_delivery())
msg <- envelope() %>%
from("olivia@google.com") %>%
request_receipt_delivery()
expect_match(headers(msg), "Return-Receipt-To: +olivia@google.com")
})
|
# nocov start
.onLoad <- function(libname, pkgname) {
.frame0 <<- new.env()
shiny::addResourcePath("smrd_apps",
system.file("smrd_apps", package = "SMRD"))
shiny::shinyOptions('theme' = 'flatly')
}
.onUnload <- function (libpath) {
library.dynam.unload("SMRD", libpath)
}
.onAttach = function(libname, pkgname) {
# Runs when attached to search() path such as by library() or require()
if (!interactive()) return()
v = packageVersion("SMRD")
br = read.dcf(system.file("DESCRIPTION", package="SMRD"), fields = c("BugReports"))
packageStartupMessage("SMRD (version ", v, ") is experimental software under active development\n\n",
"If you encounter unexpected errors or problems\n",
"please submit an issue at: ", br[1L],
"\n\nThe best way to start using SMRD is check out the echapters",
"\n\nFor example: echapter(chapter = 1)")
}
info <- function(info,...) {
INFO <- switch(as.character(info),
'authors' = "W. Q. Meeker and L. A. Escobar",
'book' = 'Statistical Methods for Reliability Data',
'edition' = '1st ed.',
'work' = "Air Force Institute of Technology",
'job' = 'Assistant Professor of Systems Engineering',
'dept' = 'Department of Systems Engineering and Management',
'chapter1' = 'Chapter 1 - Reliability Concepts and Reliability Data',
'chapter2' = 'Chapter 2 - Models, Censoring, and Likelihood for Failure-Time Data',
'chapter3' = 'Chapter 3 - Nonparametric Estimation',
'chapter4' = "Chapter 4 - Location-Scale-Based Parametric Distributions",
'chapter5' = 'Chapter 5 - Other Parametric Distributions',
'chapter6' = 'Chapter 6 - Probability Plotting',
'chapter7' = 'Chapter 7 - Parametric Likelihood Fitting Concepts: Exponential Distribution',
'chapter8' = 'Chapter 8 - Maximum Likelihood for Log-Location-Scale Distributions',
'chapter9' = 'Chapter 9 - Bootstrap Confidence Intervals',
'chapter10' = 'Chapter 10 - Planning Life Tests',
'chapter11' = 'Chapter 11 - Parametric Maximum Likelihood: Other Models',
'chapter12' = 'Chapter 12 - Prediction of Future Random Quantities',
'chapter13' = 'Chapter 13 - Degradation Data, Models and Data Analysis',
'chapter14' = 'Chapter 14 - Introduction to the Use of Bayesian Methods for Reliability Data',
'chapter15' = 'Chapter 15 - System Reliability Concepts and Methods',
'chapter16' = 'Chapter 16 - Analysis of Repairable System and Other Recurrence Data',
'chapter17' = 'Chapter 17 - Failure-Time Regression Analysis',
'chapter18' = 'Chapter 18 - Accelerated Test Models',
'chapter19' = 'Chapter 19 - Accelerated Life Tests',
'chapter20' = 'Chapter 20 - Planning Accelerated Life Tests',
'chapter21' = 'Chapter 21 - Accelerated Degradation Tests',
'chapter22' = 'Chapter 22 - Case Studies and Further Applications',
'chapter23' = 'Chapter 23 - Analysis of Accelerated Destructive Degradation Test (ADDT) Data',
'chapter24' = 'Chapter 24 - Accelerated Destructive Degradation Test (ADDT) Planning',
'chap1' = 'Reliability Concepts and Reliability Data',
'chap2' = 'Models, Censoring, and Likelihood for Failure-Time Data',
'chap3' = 'Nonparametric Estimation',
'chap4' = "Location-Scale-Based Parametric Distributions",
'chap5' = 'Other Parametric Distributions',
'chap6' = 'Probability Plotting',
'chap7' = 'Parametric Likelihood Fitting Concepts: Exponential Distribution',
'chap8' = 'Maximum Likelihood for Log-Location-Scale Distributions',
'chap9' = 'Bootstrap Confidence Intervals',
'chap10' = 'Planning Life Tests',
'chap11' = 'Parametric Maximum Likelihood: Other Models',
'chap12' = 'Prediction of Future Random Quantities',
'chap13' = 'Degradation Data, Models and Data Analysis',
'chap14' = 'Introduction to the Use of Bayesian Methods for Reliability Data',
'chap15' = 'System Reliability Concepts and Methods',
'chap16' = 'Analysis of Repairable System and Other Recurrence Data',
'chap17' = 'Failure-Time Regression Analysis',
'chap18' = 'Accelerated Test Models',
'chap19' = 'Accelerated Life Tests',
'chap20' = 'Planning Accelerated Life Tests',
'chap21' = 'Accelerated Degradation Tests',
'chap22' = 'Case Studies and Further Applications',
'chap23' = 'Analysis of Accelerated Destructive Degradation Test (ADDT) Data',
'chap24' = 'Accelerated Destructive Degradation Test (ADDT) Planning',
'appendixb' = 'Appendix B - Review of Results from Statistical Theory')
return(INFO)
}
vinny <- function(fw = 8, fh = 6,...) {
vign <- function() {
knitr::opts_chunk$set(message = FALSE,
warning = FALSE,
fig.align = 'center',
fig.width = fw,
fig.height = fh,
comment = NA,...)
}
vign()
}
# nocov end | /R/zzz.R | no_license | anhnguyendepocen/SMRD | R | false | false | 5,794 | r | # nocov start
.onLoad <- function(libname, pkgname) {
.frame0 <<- new.env()
shiny::addResourcePath("smrd_apps",
system.file("smrd_apps", package = "SMRD"))
shiny::shinyOptions('theme' = 'flatly')
}
.onUnload <- function (libpath) {
library.dynam.unload("SMRD", libpath)
}
.onAttach = function(libname, pkgname) {
# Runs when attached to search() path such as by library() or require()
if (!interactive()) return()
v = packageVersion("SMRD")
br = read.dcf(system.file("DESCRIPTION", package="SMRD"), fields = c("BugReports"))
packageStartupMessage("SMRD (version ", v, ") is experimental software under active development\n\n",
"If you encounter unexpected errors or problems\n",
"please submit an issue at: ", br[1L],
"\n\nThe best way to start using SMRD is check out the echapters",
"\n\nFor example: echapter(chapter = 1)")
}
info <- function(info,...) {
INFO <- switch(as.character(info),
'authors' = "W. Q. Meeker and L. A. Escobar",
'book' = 'Statistical Methods for Reliability Data',
'edition' = '1st ed.',
'work' = "Air Force Institute of Technology",
'job' = 'Assistant Professor of Systems Engineering',
'dept' = 'Department of Systems Engineering and Management',
'chapter1' = 'Chapter 1 - Reliability Concepts and Reliability Data',
'chapter2' = 'Chapter 2 - Models, Censoring, and Likelihood for Failure-Time Data',
'chapter3' = 'Chapter 3 - Nonparametric Estimation',
'chapter4' = "Chapter 4 - Location-Scale-Based Parametric Distributions",
'chapter5' = 'Chapter 5 - Other Parametric Distributions',
'chapter6' = 'Chapter 6 - Probability Plotting',
'chapter7' = 'Chapter 7 - Parametric Likelihood Fitting Concepts: Exponential Distribution',
'chapter8' = 'Chapter 8 - Maximum Likelihood for Log-Location-Scale Distributions',
'chapter9' = 'Chapter 9 - Bootstrap Confidence Intervals',
'chapter10' = 'Chapter 10 - Planning Life Tests',
'chapter11' = 'Chapter 11 - Parametric Maximum Likelihood: Other Models',
'chapter12' = 'Chapter 12 - Prediction of Future Random Quantities',
'chapter13' = 'Chapter 13 - Degradation Data, Models and Data Analysis',
'chapter14' = 'Chapter 14 - Introduction to the Use of Bayesian Methods for Reliability Data',
'chapter15' = 'Chapter 15 - System Reliability Concepts and Methods',
'chapter16' = 'Chapter 16 - Analysis of Repairable System and Other Recurrence Data',
'chapter17' = 'Chapter 17 - Failure-Time Regression Analysis',
'chapter18' = 'Chapter 18 - Accelerated Test Models',
'chapter19' = 'Chapter 19 - Accelerated Life Tests',
'chapter20' = 'Chapter 20 - Planning Accelerated Life Tests',
'chapter21' = 'Chapter 21 - Accelerated Degradation Tests',
'chapter22' = 'Chapter 22 - Case Studies and Further Applications',
'chapter23' = 'Chapter 23 - Analysis of Accelerated Destructive Degradation Test (ADDT) Data',
'chapter24' = 'Chapter 24 - Accelerated Destructive Degradation Test (ADDT) Planning',
'chap1' = 'Reliability Concepts and Reliability Data',
'chap2' = 'Models, Censoring, and Likelihood for Failure-Time Data',
'chap3' = 'Nonparametric Estimation',
'chap4' = "Location-Scale-Based Parametric Distributions",
'chap5' = 'Other Parametric Distributions',
'chap6' = 'Probability Plotting',
'chap7' = 'Parametric Likelihood Fitting Concepts: Exponential Distribution',
'chap8' = 'Maximum Likelihood for Log-Location-Scale Distributions',
'chap9' = 'Bootstrap Confidence Intervals',
'chap10' = 'Planning Life Tests',
'chap11' = 'Parametric Maximum Likelihood: Other Models',
'chap12' = 'Prediction of Future Random Quantities',
'chap13' = 'Degradation Data, Models and Data Analysis',
'chap14' = 'Introduction to the Use of Bayesian Methods for Reliability Data',
'chap15' = 'System Reliability Concepts and Methods',
'chap16' = 'Analysis of Repairable System and Other Recurrence Data',
'chap17' = 'Failure-Time Regression Analysis',
'chap18' = 'Accelerated Test Models',
'chap19' = 'Accelerated Life Tests',
'chap20' = 'Planning Accelerated Life Tests',
'chap21' = 'Accelerated Degradation Tests',
'chap22' = 'Case Studies and Further Applications',
'chap23' = 'Analysis of Accelerated Destructive Degradation Test (ADDT) Data',
'chap24' = 'Accelerated Destructive Degradation Test (ADDT) Planning',
'appendixb' = 'Appendix B - Review of Results from Statistical Theory')
return(INFO)
}
vinny <- function(fw = 8, fh = 6,...) {
vign <- function() {
knitr::opts_chunk$set(message = FALSE,
warning = FALSE,
fig.align = 'center',
fig.width = fw,
fig.height = fh,
comment = NA,...)
}
vign()
}
# nocov end |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexmodelbuildingservice_operations.R
\name{lexmodelbuildingservice_create_slot_type_version}
\alias{lexmodelbuildingservice_create_slot_type_version}
\title{Creates a new version of a slot type based on the $LATEST version of the
specified slot type}
\usage{
lexmodelbuildingservice_create_slot_type_version(name, checksum = NULL)
}
\arguments{
\item{name}{[required] The name of the slot type that you want to create a new version for. The
name is case sensitive.}
\item{checksum}{Checksum for the \verb{$LATEST} version of the slot type that you want to
publish. If you specify a checksum and the \verb{$LATEST} version of the slot
type has a different checksum, Amazon Lex returns a
\code{PreconditionFailedException} exception and doesn't publish the new
version. If you don't specify a checksum, Amazon Lex publishes the
\verb{$LATEST} version.}
}
\description{
Creates a new version of a slot type based on the \verb{$LATEST} version of the specified slot type. If the \verb{$LATEST} version of this resource has not changed since the last version that you created, Amazon Lex doesn't create a new version. It returns the last version that you created.
See \url{https://www.paws-r-sdk.com/docs/lexmodelbuildingservice_create_slot_type_version/} for full documentation.
}
\keyword{internal}
| /cran/paws.machine.learning/man/lexmodelbuildingservice_create_slot_type_version.Rd | permissive | paws-r/paws | R | false | true | 1,376 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexmodelbuildingservice_operations.R
\name{lexmodelbuildingservice_create_slot_type_version}
\alias{lexmodelbuildingservice_create_slot_type_version}
\title{Creates a new version of a slot type based on the $LATEST version of the
specified slot type}
\usage{
lexmodelbuildingservice_create_slot_type_version(name, checksum = NULL)
}
\arguments{
\item{name}{[required] The name of the slot type that you want to create a new version for. The
name is case sensitive.}
\item{checksum}{Checksum for the \verb{$LATEST} version of the slot type that you want to
publish. If you specify a checksum and the \verb{$LATEST} version of the slot
type has a different checksum, Amazon Lex returns a
\code{PreconditionFailedException} exception and doesn't publish the new
version. If you don't specify a checksum, Amazon Lex publishes the
\verb{$LATEST} version.}
}
\description{
Creates a new version of a slot type based on the \verb{$LATEST} version of the specified slot type. If the \verb{$LATEST} version of this resource has not changed since the last version that you created, Amazon Lex doesn't create a new version. It returns the last version that you created.
See \url{https://www.paws-r-sdk.com/docs/lexmodelbuildingservice_create_slot_type_version/} for full documentation.
}
\keyword{internal}
|
library(magick)
### Name: device
### Title: Magick Graphics Device
### Aliases: device image_graph image_device image_draw image_capture
### ** Examples
# Regular image
frink <- image_read("https://jeroen.github.io/images/frink.png")
# Produce image using graphics device
fig <- image_graph(res = 96)
ggplot2::qplot(mpg, wt, data = mtcars, colour = cyl)
dev.off()
# Combine
out <- image_composite(fig, frink, offset = "+70+30")
print(out)
# Or paint over an existing image
img <- image_draw(frink)
rect(20, 20, 200, 100, border = "red", lty = "dashed", lwd = 5)
abline(h = 300, col = 'blue', lwd = '10', lty = "dotted")
text(10, 250, "Hoiven-Glaven", family = "monospace", cex = 4, srt = 90)
palette(rainbow(11, end = 0.9))
symbols(rep(200, 11), seq(0, 400, 40), circles = runif(11, 5, 35),
bg = 1:11, inches = FALSE, add = TRUE)
dev.off()
print(img)
# Vectorized example with custom coordinates
earth <- image_read("https://jeroen.github.io/images/earth.gif")
img <- image_draw(earth, xlim = c(0,1), ylim = c(0,1))
rect(.1, .1, .9, .9, border = "red", lty = "dashed", lwd = 5)
text(.5, .9, "Our planet", cex = 3, col = "white")
dev.off()
print(img)
| /data/genthat_extracted_code/magick/examples/device.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,163 | r | library(magick)
### Name: device
### Title: Magick Graphics Device
### Aliases: device image_graph image_device image_draw image_capture
### ** Examples
# Regular image
frink <- image_read("https://jeroen.github.io/images/frink.png")
# Produce image using graphics device
fig <- image_graph(res = 96)
ggplot2::qplot(mpg, wt, data = mtcars, colour = cyl)
dev.off()
# Combine
out <- image_composite(fig, frink, offset = "+70+30")
print(out)
# Or paint over an existing image
img <- image_draw(frink)
rect(20, 20, 200, 100, border = "red", lty = "dashed", lwd = 5)
abline(h = 300, col = 'blue', lwd = '10', lty = "dotted")
text(10, 250, "Hoiven-Glaven", family = "monospace", cex = 4, srt = 90)
palette(rainbow(11, end = 0.9))
symbols(rep(200, 11), seq(0, 400, 40), circles = runif(11, 5, 35),
bg = 1:11, inches = FALSE, add = TRUE)
dev.off()
print(img)
# Vectorized example with custom coordinates
earth <- image_read("https://jeroen.github.io/images/earth.gif")
img <- image_draw(earth, xlim = c(0,1), ylim = c(0,1))
rect(.1, .1, .9, .9, border = "red", lty = "dashed", lwd = 5)
text(.5, .9, "Our planet", cex = 3, col = "white")
dev.off()
print(img)
|
// THIS IS ALSO CALLED LEVEL ORDER TRAVERSAL
/*
For Breadth First Traversal, we make use of a queue(circular may be) and it is implemented in the followning steps
1) We start from the root node and enqueue it in the queue
2) Dequeue root from the queue, print it and enqueue its children in the queue
3) Dequeue next element from the queue, print it and enqueue the children of removed element in the queue
4) when we reach the leaf nodes, we just remove them from the queue and print them but does not add anything in the queue because leaf nodes has no children
*/
//We will follow the same procedure in the implementation
| /BreadthFirstTraversal.rd | no_license | TriumphantAkash/DataStructureProblems | R | false | false | 628 | rd | // THIS IS ALSO CALLED LEVEL ORDER TRAVERSAL
/*
For Breadth First Traversal, we make use of a queue(circular may be) and it is implemented in the followning steps
1) We start from the root node and enqueue it in the queue
2) Dequeue root from the queue, print it and enqueue its children in the queue
3) Dequeue next element from the queue, print it and enqueue the children of removed element in the queue
4) when we reach the leaf nodes, we just remove them from the queue and print them but does not add anything in the queue because leaf nodes has no children
*/
//We will follow the same procedure in the implementation
|
# We overwrite the base source function to allow us to keep track
# of whether or not files loaded in a syberia directory have been modified.
#' Overwrite built-in source function.
#' @name source
# TODO: (RK) Re-investigate this. Deprecated for now.
.source <- function(filename, ...) {
filename <- normalizePath(filename)
root <- syberia_root()
if (substring(filename, 1, nchar(root)) == root &&
identical(get_cache('runtime/executing'), TRUE)) {
# We are running a syberia resource
# TODO: (RK) Maybe just need to compare mtime for this..
resource <- syberia_resource(filename, root, ...)
if (resource$modified) set_cache(TRUE, 'runtime/any_modified')
list(value = resource$value(), invisible = TRUE)
} else {
env <- as.environment(list(source = source))
parent.env(env) <- parent.frame()
base::source(filename, env, ...)
}
}
| /R/source.r | permissive | robertzk/syberiaStructure | R | false | false | 878 | r | # We overwrite the base source function to allow us to keep track
# of whether or not files loaded in a syberia directory have been modified.
#' Overwrite built-in source function.
#' @name source
# TODO: (RK) Re-investigate this. Deprecated for now.
.source <- function(filename, ...) {
filename <- normalizePath(filename)
root <- syberia_root()
if (substring(filename, 1, nchar(root)) == root &&
identical(get_cache('runtime/executing'), TRUE)) {
# We are running a syberia resource
# TODO: (RK) Maybe just need to compare mtime for this..
resource <- syberia_resource(filename, root, ...)
if (resource$modified) set_cache(TRUE, 'runtime/any_modified')
list(value = resource$value(), invisible = TRUE)
} else {
env <- as.environment(list(source = source))
parent.env(env) <- parent.frame()
base::source(filename, env, ...)
}
}
|
########################### Coursera Exploratory Data Annalysis Project 1 ######################################
##### Load Data and identify missing values as "?" #####
household_energy_data <- read.table("household_power_consumption.txt", header= TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
summary(household_energy_data)
# Create subset of data to only include necessary dates
subsetdata <- household_energy_data[household_energy_data$Date %in% c("1/2/2007","2/2/2007"),]
# can also use as.date function to come to the same result may help with time variable creation
household_energy_data$Date <- as.Date(household_energy_data$Date, format = c("%d/%m/%Y"))
Sub_Energy_Data <- subset(household_energy_data,Date >= as.Date("2007/2/1") & Date <= as.Date("2007/2/2"))
# Remove incomplete data point from series
Sub_Energy_Data <- Sub_Energy_Data[complete.cases(Sub_Energy_Data),]
# Create a variable for time series by combining the columns of date and time then correct column format
Sub_Energy_Data$TimeSeries <- paste(Sub_Energy_Data$Date, Sub_Energy_Data$Time)
Sub_Energy_Data$TimeSeries <- as.POSIXct(TimeSeries)
# Last data preparation step is to set values from characters to numeric for plotting
Sub_Energy_Data$Global_active_power <- as.numeric(Sub_Energy_Data$Global_active_power)
Sub_Energy_Data$Global_reactive_power <- as.numeric(Sub_Energy_Data$Global_reactive_power)
Sub_Energy_Data$Voltage <- as.numeric(Sub_Energy_Data$Voltage)
Sub_Energy_Data$Sub_metering_1 <- as.numeric(Sub_Energy_Data$Sub_metering_1)
Sub_Energy_Data$Sub_metering_2 <- as.numeric(Sub_Energy_Data$Sub_metering_2)
Sub_Energy_Data$Sub_metering_3 <- as.numeric(Sub_Energy_Data$Sub_metering_3)
############# Plot 1: Create histogram displaying red bars of global active power #############
dev.off()
hist(Sub_Energy_Data$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
# Save as a png with file name plot# and height and width at 480
dev.copy(png, file="plot1.png", height=480, width=480)
# remove plot settings before moving forward
dev.off()
| /Plot1.R | no_license | merrigan33/ExData_Plotting1 | R | false | false | 2,125 | r | ########################### Coursera Exploratory Data Annalysis Project 1 ######################################
##### Load Data and identify missing values as "?" #####
household_energy_data <- read.table("household_power_consumption.txt", header= TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
summary(household_energy_data)
# Create subset of data to only include necessary dates
subsetdata <- household_energy_data[household_energy_data$Date %in% c("1/2/2007","2/2/2007"),]
# can also use as.date function to come to the same result may help with time variable creation
household_energy_data$Date <- as.Date(household_energy_data$Date, format = c("%d/%m/%Y"))
Sub_Energy_Data <- subset(household_energy_data,Date >= as.Date("2007/2/1") & Date <= as.Date("2007/2/2"))
# Remove incomplete data point from series
Sub_Energy_Data <- Sub_Energy_Data[complete.cases(Sub_Energy_Data),]
# Create a variable for time series by combining the columns of date and time then correct column format
Sub_Energy_Data$TimeSeries <- paste(Sub_Energy_Data$Date, Sub_Energy_Data$Time)
Sub_Energy_Data$TimeSeries <- as.POSIXct(TimeSeries)
# Last data preparation step is to set values from characters to numeric for plotting
Sub_Energy_Data$Global_active_power <- as.numeric(Sub_Energy_Data$Global_active_power)
Sub_Energy_Data$Global_reactive_power <- as.numeric(Sub_Energy_Data$Global_reactive_power)
Sub_Energy_Data$Voltage <- as.numeric(Sub_Energy_Data$Voltage)
Sub_Energy_Data$Sub_metering_1 <- as.numeric(Sub_Energy_Data$Sub_metering_1)
Sub_Energy_Data$Sub_metering_2 <- as.numeric(Sub_Energy_Data$Sub_metering_2)
Sub_Energy_Data$Sub_metering_3 <- as.numeric(Sub_Energy_Data$Sub_metering_3)
############# Plot 1: Create histogram displaying red bars of global active power #############
dev.off()
hist(Sub_Energy_Data$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
# Save as a png with file name plot# and height and width at 480
dev.copy(png, file="plot1.png", height=480, width=480)
# remove plot settings before moving forward
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcVarPart.R
\docType{methods}
\name{calcVarPart}
\alias{calcVarPart}
\alias{calcVarPart,lm-method}
\alias{calcVarPart,lmerMod-method}
\alias{calcVarPart,glm-method}
\title{Compute variance statistics}
\usage{
calcVarPart(fit, showWarnings = TRUE, ...)
\S4method{calcVarPart}{lm}(fit, showWarnings = TRUE, ...)
\S4method{calcVarPart}{lmerMod}(fit, showWarnings = TRUE, ...)
\S4method{calcVarPart}{glm}(fit, showWarnings = TRUE, ...)
}
\arguments{
\item{fit}{model fit from lm() or lmer()}
\item{showWarnings}{show warnings about model fit (default TRUE)}
\item{...}{additional arguments (not currently used)}
}
\value{
fraction of variance explained / ICC for each variable in the linear model
}
\description{
For linear model, variance fractions are computed based on the sum of squares explained by each component. For the linear mixed mode, the variance fractions are computed by variance component estimates for random effects and sum of squares for fixed effects.
For a generalized linear model, the variance fraction also includes the contribution of the link function so that fractions are reported on the linear (i.e. link) scale rather than the observed (i.e. response) scale. For linear regression with an identity link, fractions are the same on both scales. But for logit or probit links, the fractions are not well defined on the observed scale due to the transformation imposed by the link function.
The variance implied by the link function is the variance of the corresponding distribution:
logit -> logistic distribution -> variance is pi^2/3
probit -> standard normal distribution -> variance is 1
Reviewed by
Nakagawa and Schielzeth. 2012. A general and simple method for obtaining R2 from generalized linear mixed-effects models. https://doi.org/10.1111/j.2041-210x.2012.00261.x
Proposed by
McKelvey and Zavoina. A statistical model for the analysis of ordinal level dependent variables. The Journal of Mathematical Sociology 4(1) 103-120 https://doi.org/10.1080/0022250X.1975.9989847
Also see
DeMaris. Explained Variance in Logistic Regression: A Monte Carlo Study of Proposed Measures. Sociological Methods & Research 2002 https://doi.org/10.1177/0049124102031001002
We note that Nagelkerke's pseudo R^2 evaluates the variance explained by the full model. Instead, a variance partitioning approach evaluates the variance explained by each term in the model, so that the sum of each systematic plus random term sums to 1 (Hoffman and Schadt, 2016, Nakagawa and Schielzeth, 2012).
}
\details{
Compute fraction of variation attributable to each variable in regression model. Also interpretable as the intra-class correlation after correcting for all other variables in the model.
}
\examples{
library(lme4)
data(varPartData)
# Linear mixed model
fit <- lmer( geneExpr[1,] ~ (1|Tissue) + Age, info)
calcVarPart( fit )
# Linear model
# Note that the two models produce slightly different results
# This is expected: they are different statistical estimates
# of the same underlying value
fit <- lm( geneExpr[1,] ~ Tissue + Age, info)
calcVarPart( fit )
}
| /man/calcVarPart-method.Rd | no_license | DarwinAwardWinner/variancePartition | R | false | true | 3,176 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcVarPart.R
\docType{methods}
\name{calcVarPart}
\alias{calcVarPart}
\alias{calcVarPart,lm-method}
\alias{calcVarPart,lmerMod-method}
\alias{calcVarPart,glm-method}
\title{Compute variance statistics}
\usage{
calcVarPart(fit, showWarnings = TRUE, ...)
\S4method{calcVarPart}{lm}(fit, showWarnings = TRUE, ...)
\S4method{calcVarPart}{lmerMod}(fit, showWarnings = TRUE, ...)
\S4method{calcVarPart}{glm}(fit, showWarnings = TRUE, ...)
}
\arguments{
\item{fit}{model fit from lm() or lmer()}
\item{showWarnings}{show warnings about model fit (default TRUE)}
\item{...}{additional arguments (not currently used)}
}
\value{
fraction of variance explained / ICC for each variable in the linear model
}
\description{
For linear model, variance fractions are computed based on the sum of squares explained by each component. For the linear mixed mode, the variance fractions are computed by variance component estimates for random effects and sum of squares for fixed effects.
For a generalized linear model, the variance fraction also includes the contribution of the link function so that fractions are reported on the linear (i.e. link) scale rather than the observed (i.e. response) scale. For linear regression with an identity link, fractions are the same on both scales. But for logit or probit links, the fractions are not well defined on the observed scale due to the transformation imposed by the link function.
The variance implied by the link function is the variance of the corresponding distribution:
logit -> logistic distribution -> variance is pi^2/3
probit -> standard normal distribution -> variance is 1
Reviewed by
Nakagawa and Schielzeth. 2012. A general and simple method for obtaining R2 from generalized linear mixed-effects models. https://doi.org/10.1111/j.2041-210x.2012.00261.x
Proposed by
McKelvey and Zavoina. A statistical model for the analysis of ordinal level dependent variables. The Journal of Mathematical Sociology 4(1) 103-120 https://doi.org/10.1080/0022250X.1975.9989847
Also see
DeMaris. Explained Variance in Logistic Regression: A Monte Carlo Study of Proposed Measures. Sociological Methods & Research 2002 https://doi.org/10.1177/0049124102031001002
We note that Nagelkerke's pseudo R^2 evaluates the variance explained by the full model. Instead, a variance partitioning approach evaluates the variance explained by each term in the model, so that the sum of each systematic plus random term sums to 1 (Hoffman and Schadt, 2016, Nakagawa and Schielzeth, 2012).
}
\details{
Compute fraction of variation attributable to each variable in regression model. Also interpretable as the intra-class correlation after correcting for all other variables in the model.
}
\examples{
library(lme4)
data(varPartData)
# Linear mixed model
fit <- lmer( geneExpr[1,] ~ (1|Tissue) + Age, info)
calcVarPart( fit )
# Linear model
# Note that the two models produce slightly different results
# This is expected: they are different statistical estimates
# of the same underlying value
fit <- lm( geneExpr[1,] ~ Tissue + Age, info)
calcVarPart( fit )
}
|
CAAElementsPlanetaryOrbit_MarsMeanLongitudeJ2000 <-
function(JD){
.Call("CAAElementsPlanetaryOrbit_MarsMeanLongitudeJ2000", JD)
}
| /R/CAAElementsPlanetaryOrbit_MarsMeanLongitudeJ2000.R | no_license | helixcn/skycalc | R | false | false | 134 | r | CAAElementsPlanetaryOrbit_MarsMeanLongitudeJ2000 <-
function(JD){
.Call("CAAElementsPlanetaryOrbit_MarsMeanLongitudeJ2000", JD)
}
|
library(GLDEX)
### Name: fun.bimodal.init
### Title: Finds the initial values for optimisation in fitting the bimodal
### generalised lambda distribution.
### Aliases: fun.bimodal.init
### Keywords: smooth
### ** Examples
## Split the first column of the faithful data into two using
## fun.class.regime.bi
# faithful1.mod<-fun.class.regime.bi(faithful[,1], 0.1, clara)
## Save the datasets
# qqqq1.faithful1.cc1<-faithful1.mod$data.a
# qqqq2.faithful1.cc1<-faithful1.mod$data.b
## Find the initial values for secondary optimisation.
# result.faithful1.init1<-fun.bimodal.init(data1=qqqq1.faithful1.cc1,
# data2=qqqq2.faithful1.cc1, rs.leap1=3,fmkl.leap1=3,rs.init1 = c(-1.5, 1.5),
# fmkl.init1 = c(-0.25, 1.5), rs.leap2=3,fmkl.leap2=3,rs.init2 = c(-1.5, 1.5),
# fmkl.init2 = c(-0.25, 1.5))
## These initial values are then passed onto fun,bimodal.fit.ml to obtain the
## final fits.
| /data/genthat_extracted_code/GLDEX/examples/fun.bimodal.init.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 900 | r | library(GLDEX)
### Name: fun.bimodal.init
### Title: Finds the initial values for optimisation in fitting the bimodal
### generalised lambda distribution.
### Aliases: fun.bimodal.init
### Keywords: smooth
### ** Examples
## Split the first column of the faithful data into two using
## fun.class.regime.bi
# faithful1.mod<-fun.class.regime.bi(faithful[,1], 0.1, clara)
## Save the datasets
# qqqq1.faithful1.cc1<-faithful1.mod$data.a
# qqqq2.faithful1.cc1<-faithful1.mod$data.b
## Find the initial values for secondary optimisation.
# result.faithful1.init1<-fun.bimodal.init(data1=qqqq1.faithful1.cc1,
# data2=qqqq2.faithful1.cc1, rs.leap1=3,fmkl.leap1=3,rs.init1 = c(-1.5, 1.5),
# fmkl.init1 = c(-0.25, 1.5), rs.leap2=3,fmkl.leap2=3,rs.init2 = c(-1.5, 1.5),
# fmkl.init2 = c(-0.25, 1.5))
## These initial values are then passed onto fun,bimodal.fit.ml to obtain the
## final fits.
|
#' Power of the 1/3-1/3-1/3 procedure
#'
#' Computes the power of the 1/3-1/3-1/3 procedure, that is, the power to
#' detect the overall A effect, the simple A effect, or the simple AB effect.
#'
#' @param n total subjects with n/4 subjects in each of the C, A, B, and AB groups
#' @param hrA group A to group C hazard ratio; \code{hrA} < 1 corresponds to group A superiority
#' @param hrB group B to group C hazard ratio; \code{hrA} < 1 corresponds to group A superiority
#' @param hrAB group AB to group C hazard ratio; \code{hrAB} < 1 corresponds to group AB superiority
#' @param avgprob event probability averaged across the C, A, B, and AB groups
#' @param probA_C event probability averaged across the A and C groups
#' @param probAB_C event probability averaged across the AB and C groups
#' @param crit13 rejection critical value for the overall A, simple A, and simple AB logrank statistics
#' @param dig number of decimal places to \code{\link{roundDown}} the critical value to
#' @param cormat12 asymptotic correlation matrix for the overall A and simple A, respectively, simple AB logrank statistics
#' @param cormat23 asymptotic correlation matrix for the simple A and simple AB logrank statistics
#' @param cormat123 asymptotic correlation matrix for the overall A, simple A, and simple AB logrank statistics
#' @param niter number of times we call \code{pmvnorm} to average out its randomness
#' @param abseps \code{abseps} setting in the \code{pmvnorm} call
#' @return \item{poweroverA }{power to detect the overall A effect}
#' @return \item{powerA }{power to detect the simple A effect}
#' @return \item{powerAB }{power to detect the simple AB effect}
#' @return \item{power13.13.13 }{power to detect the overall A, simple A, or simple AB effects, i.e.,
#' power of the 1/3-1/3-1/3 procedure}
#' @import mvtnorm
#' @details For a 2-by-2 factorial design, this function computes
#' the probability that either the overall A
#' or the simple A or the simple AB logrank statistics
#' reject their null hypotheses at the
#' \code{crit13} critical value. As described in Leifer, Troendle, et al. (2019),
#' the \code{crit13} = -2.32 critical value
#' corresponds to controlling the famiywise error of the 1/3-1/3-1/3 procedure at the
#' two-sided 0.05 significance level.
#' The critical value -2.32 may be computed using the \code{crit2x2} function.
#' The \code{pmvnorm} function
#' from the \code{mvtnorm} package is used to calculate
#' the powers for rejecting the pairwise and three-way intersections of
#' Since these powers involve bivariate, respectively, trivariate,
#' normal integration over an unbounded region in R^2, respectively, R^3, \code{pmvnorm}
#' uses a random seed for these computations. To smooth out the
#' randomness, \code{pmvnorm} is called \code{niter} times and
#' the average value over the \code{niter} calls is taken to be those powers.
#' @references Leifer, E.S., Troendle, J.F., Kolecki, A., Follmann, D.
#' Joint testing of overall and simple effect for the two-by-two factorial design. (2019). Submitted.
#' @references Slud, E.V. Analysis of factorial survival experiments. Biometrics. 1994; 50: 25-38.
#' @export power13_13_13
#' @seealso \code{\link{crit2x2}}, \code{lgrkPower}, \code{strLgrkPower}, \code{pmvnorm}
#' @examples
#' # Corresponds to scenario 5 in Table 2 from Leifer, Troendle, et al. (2019).
#' rateC <- 0.0445
#' hrA <- 0.80
#' hrB <- 0.80
#' hrAB <- 0.72
#' mincens <- 4.0
#' maxcens <- 8.4
#' evtprob <- eventProb(rateC, hrA, hrB, hrAB, mincens, maxcens)
#' avgprob <- evtprob$avgprob
#' probAB_C <- evtprob$probAB_C
#' probA_C <- evtprob$probA_C
#' dig <- 2
#' alpha <- 0.05
#' corAa <- 1/sqrt(2)
#' corAab <- 1/sqrt(2)
#' coraab <- 1/2
#' crit13 <- crit2x2(corAa, corAab, coraab, dig, alpha)$crit13
#' n <- 4600
#' power13_13_13(n, hrA, hrB, hrAB, avgprob, probA_C, probAB_C,
#' crit13, dig, cormat12 = matrix(c(1, sqrt(0.5), sqrt(0.5), 1), byrow = TRUE,
#' nrow = 2), cormat23 = matrix(c(1, 0.5, 0.5, 1), byrow = TRUE, nrow = 2),
#' cormat123 = matrix(c(1, sqrt(0.5), sqrt(0.5), sqrt(0.5), 1, 0.5,
#' sqrt(0.5), 0.5, 1), byrow=TRUE, nrow = 3), niter = 1, abseps = 1e-03)
#'
#' # $poweroverA
#' # [1] 0.5861992
#'
#' # $powerA
#' # [1] 0.5817954
#'
#' # $powerAB
#' # [1] 0.9071236
#'
#' # $power13.13.13
#' # [1] 0.9302078
power13_13_13 <- function(n, hrA, hrB, hrAB, avgprob, probA_C, probAB_C,
crit13, dig,
cormat12 = matrix(c(1, sqrt(0.5),
sqrt(0.5), 1), byrow = T, nrow = 2),
cormat23 = matrix(c(1, 0.5,
0.5, 1), byrow = T, nrow = 2),
cormat123 = matrix(c(1, sqrt(0.5), sqrt(0.5),
sqrt(0.5), 1, 0.5,
sqrt(0.5), 0.5, 1), byrow=T, nrow = 3),
niter = 5, abseps = 1e-03)
{
alpha <- 2 * pnorm(crit13)
muoverA <- (log(hrA) + 0.5 * log(hrAB/(hrA*hrB)))* sqrt((n/4) * avgprob)
muA <- log(hrA) * sqrt((n/8) * probA_C)
muAB <- log(hrAB) * sqrt((n/8) * probAB_C)
# Compute power for overall A effect
poweroverA <- strLgrkPower(n, hrA, hrB, hrAB, avgprob, dig, alpha)$power
# Compute power for simple A effect
powerA <- lgrkPower(hrA, (n/2) * probA_C, alpha)$power
# Compute power for simple AB effect
powerAB <- lgrkPower(hrAB, (n/2) * probAB_C, alpha)$power
# compute the power that:
# 12. Both the overall A and simple A effects are detected.
# 13. Both the overall A and simple AB effects are detected.
# 23. Both the simple A and simple AB effects are detected.
# 123. The overall A, simple A, and simple AB effects are all detected.
# Use pmvnorm to compute the power to detect overall A and simple AB effects.
# Do this niter times to average out the randomness in pmvnorm.
# Previous versions of crit2x2 set a random seed here
# to be used in conjunction with the pmvnorm call. CRAN
# suggested that this be omitted.
# set.seed(rseed)
powermat <- matrix(rep(0, 4 * niter), nrow = niter)
for(i in 1:niter){
powermat[i, 1] <- pmvnorm(lower=-Inf, upper=c(crit13, crit13), mean=c(muoverA, muA),
corr=cormat12, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
powermat[i, 2] <- pmvnorm(lower=-Inf, upper=c(crit13, crit13), mean=c(muoverA, muAB),
corr=cormat12, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
powermat[i, 3] <- pmvnorm(lower=-Inf, upper = c(crit13, crit13), mean = c(muA, muAB),
corr=cormat23, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
powermat[i, 4] <- pmvnorm(lower=-Inf, upper=c(crit13, crit13, crit13), mean=c(muoverA, muA, muAB),
corr=cormat123, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
}
poweraux <- apply(powermat, 2, mean)
powerinter12 <- poweraux[1]
powerinter13 <- poweraux[2]
powerinter23 <- poweraux[3]
powerinter123 <- poweraux[4]
power13.13.13 <- poweroverA + powerA + powerAB -
(powerinter12 + powerinter13 + powerinter23) +
powerinter123
list(poweroverA = poweroverA, powerA = powerA, powerAB = powerAB,
power13.13.13 = power13.13.13)
}
| /FacTest6/R/power13_13_13.R | no_license | EricSLeifer/factorial2x2 | R | false | false | 7,528 | r |
#' Power of the 1/3-1/3-1/3 procedure
#'
#' Computes the power of the 1/3-1/3-1/3 procedure, that is, the power to
#' detect the overall A effect, the simple A effect, or the simple AB effect.
#'
#' @param n total subjects with n/4 subjects in each of the C, A, B, and AB groups
#' @param hrA group A to group C hazard ratio; \code{hrA} < 1 corresponds to group A superiority
#' @param hrB group B to group C hazard ratio; \code{hrA} < 1 corresponds to group A superiority
#' @param hrAB group AB to group C hazard ratio; \code{hrAB} < 1 corresponds to group AB superiority
#' @param avgprob event probability averaged across the C, A, B, and AB groups
#' @param probA_C event probability averaged across the A and C groups
#' @param probAB_C event probability averaged across the AB and C groups
#' @param crit13 rejection critical value for the overall A, simple A, and simple AB logrank statistics
#' @param dig number of decimal places to \code{\link{roundDown}} the critical value to
#' @param cormat12 asymptotic correlation matrix for the overall A and simple A, respectively, simple AB logrank statistics
#' @param cormat23 asymptotic correlation matrix for the simple A and simple AB logrank statistics
#' @param cormat123 asymptotic correlation matrix for the overall A, simple A, and simple AB logrank statistics
#' @param niter number of times we call \code{pmvnorm} to average out its randomness
#' @param abseps \code{abseps} setting in the \code{pmvnorm} call
#' @return \item{poweroverA }{power to detect the overall A effect}
#' @return \item{powerA }{power to detect the simple A effect}
#' @return \item{powerAB }{power to detect the simple AB effect}
#' @return \item{power13.13.13 }{power to detect the overall A, simple A, or simple AB effects, i.e.,
#' power of the 1/3-1/3-1/3 procedure}
#' @import mvtnorm
#' @details For a 2-by-2 factorial design, this function computes
#' the probability that either the overall A
#' or the simple A or the simple AB logrank statistics
#' reject their null hypotheses at the
#' \code{crit13} critical value. As described in Leifer, Troendle, et al. (2019),
#' the \code{crit13} = -2.32 critical value
#' corresponds to controlling the famiywise error of the 1/3-1/3-1/3 procedure at the
#' two-sided 0.05 significance level.
#' The critical value -2.32 may be computed using the \code{crit2x2} function.
#' The \code{pmvnorm} function
#' from the \code{mvtnorm} package is used to calculate
#' the powers for rejecting the pairwise and three-way intersections of
#' Since these powers involve bivariate, respectively, trivariate,
#' normal integration over an unbounded region in R^2, respectively, R^3, \code{pmvnorm}
#' uses a random seed for these computations. To smooth out the
#' randomness, \code{pmvnorm} is called \code{niter} times and
#' the average value over the \code{niter} calls is taken to be those powers.
#' @references Leifer, E.S., Troendle, J.F., Kolecki, A., Follmann, D.
#' Joint testing of overall and simple effect for the two-by-two factorial design. (2019). Submitted.
#' @references Slud, E.V. Analysis of factorial survival experiments. Biometrics. 1994; 50: 25-38.
#' @export power13_13_13
#' @seealso \code{\link{crit2x2}}, \code{lgrkPower}, \code{strLgrkPower}, \code{pmvnorm}
#' @examples
#' # Corresponds to scenario 5 in Table 2 from Leifer, Troendle, et al. (2019).
#' rateC <- 0.0445
#' hrA <- 0.80
#' hrB <- 0.80
#' hrAB <- 0.72
#' mincens <- 4.0
#' maxcens <- 8.4
#' evtprob <- eventProb(rateC, hrA, hrB, hrAB, mincens, maxcens)
#' avgprob <- evtprob$avgprob
#' probAB_C <- evtprob$probAB_C
#' probA_C <- evtprob$probA_C
#' dig <- 2
#' alpha <- 0.05
#' corAa <- 1/sqrt(2)
#' corAab <- 1/sqrt(2)
#' coraab <- 1/2
#' crit13 <- crit2x2(corAa, corAab, coraab, dig, alpha)$crit13
#' n <- 4600
#' power13_13_13(n, hrA, hrB, hrAB, avgprob, probA_C, probAB_C,
#' crit13, dig, cormat12 = matrix(c(1, sqrt(0.5), sqrt(0.5), 1), byrow = TRUE,
#' nrow = 2), cormat23 = matrix(c(1, 0.5, 0.5, 1), byrow = TRUE, nrow = 2),
#' cormat123 = matrix(c(1, sqrt(0.5), sqrt(0.5), sqrt(0.5), 1, 0.5,
#' sqrt(0.5), 0.5, 1), byrow=TRUE, nrow = 3), niter = 1, abseps = 1e-03)
#'
#' # $poweroverA
#' # [1] 0.5861992
#'
#' # $powerA
#' # [1] 0.5817954
#'
#' # $powerAB
#' # [1] 0.9071236
#'
#' # $power13.13.13
#' # [1] 0.9302078
power13_13_13 <- function(n, hrA, hrB, hrAB, avgprob, probA_C, probAB_C,
crit13, dig,
cormat12 = matrix(c(1, sqrt(0.5),
sqrt(0.5), 1), byrow = T, nrow = 2),
cormat23 = matrix(c(1, 0.5,
0.5, 1), byrow = T, nrow = 2),
cormat123 = matrix(c(1, sqrt(0.5), sqrt(0.5),
sqrt(0.5), 1, 0.5,
sqrt(0.5), 0.5, 1), byrow=T, nrow = 3),
niter = 5, abseps = 1e-03)
{
alpha <- 2 * pnorm(crit13)
muoverA <- (log(hrA) + 0.5 * log(hrAB/(hrA*hrB)))* sqrt((n/4) * avgprob)
muA <- log(hrA) * sqrt((n/8) * probA_C)
muAB <- log(hrAB) * sqrt((n/8) * probAB_C)
# Compute power for overall A effect
poweroverA <- strLgrkPower(n, hrA, hrB, hrAB, avgprob, dig, alpha)$power
# Compute power for simple A effect
powerA <- lgrkPower(hrA, (n/2) * probA_C, alpha)$power
# Compute power for simple AB effect
powerAB <- lgrkPower(hrAB, (n/2) * probAB_C, alpha)$power
# compute the power that:
# 12. Both the overall A and simple A effects are detected.
# 13. Both the overall A and simple AB effects are detected.
# 23. Both the simple A and simple AB effects are detected.
# 123. The overall A, simple A, and simple AB effects are all detected.
# Use pmvnorm to compute the power to detect overall A and simple AB effects.
# Do this niter times to average out the randomness in pmvnorm.
# Previous versions of crit2x2 set a random seed here
# to be used in conjunction with the pmvnorm call. CRAN
# suggested that this be omitted.
# set.seed(rseed)
powermat <- matrix(rep(0, 4 * niter), nrow = niter)
for(i in 1:niter){
powermat[i, 1] <- pmvnorm(lower=-Inf, upper=c(crit13, crit13), mean=c(muoverA, muA),
corr=cormat12, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
powermat[i, 2] <- pmvnorm(lower=-Inf, upper=c(crit13, crit13), mean=c(muoverA, muAB),
corr=cormat12, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
powermat[i, 3] <- pmvnorm(lower=-Inf, upper = c(crit13, crit13), mean = c(muA, muAB),
corr=cormat23, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
powermat[i, 4] <- pmvnorm(lower=-Inf, upper=c(crit13, crit13, crit13), mean=c(muoverA, muA, muAB),
corr=cormat123, sigma=NULL, maxpts = 25000, abseps = abseps, releps = 0)
}
poweraux <- apply(powermat, 2, mean)
powerinter12 <- poweraux[1]
powerinter13 <- poweraux[2]
powerinter23 <- poweraux[3]
powerinter123 <- poweraux[4]
power13.13.13 <- poweroverA + powerA + powerAB -
(powerinter12 + powerinter13 + powerinter23) +
powerinter123
list(poweroverA = poweroverA, powerA = powerA, powerAB = powerAB,
power13.13.13 = power13.13.13)
}
|
## ------------------------------------------------------------------------
# Notice that comments are started with the "#" character
# The basic arithmetic operators (+, - , * , /, ^ [power], %% [modulus])
# The functions will operate element-wise on vectors too
# This code will add these two numbers together
2 + 2
## ------------------------------------------------------------------------
# Here's the exponential and log functions (base e by default)
exp(1) # Exponential function
log(3) # Natural log
## ------------------------------------------------------------------------
help(rt) # Learn about the command for the t-distribution
help.search("rt") # Objects matching rt (note the quotes)
# apropos returns a vector of objects that fuzzy match
apropos("which") # "which" in the search list (note the quotes)
?glm # A short cut to the help command for glm
??glm # Everything matching glm on the search path
## ------------------------------------------------------------------------
# Set the seed of the random number generator to my Mum's birthday
set.seed(19390909)
# Generate some standard normals (in this case 25 Z's)
rnorm(25)
# You can assign (save) the results of a calculation into a variable
# The variable "a" will be a vector with 25 elements
a <- rnorm(25)
## ------------------------------------------------------------------------
# Have a look at what is in "a" now
print(a)
# Basic statistical summaries of "a"
summary(a)
# Manipulate a: here we are squaring it
b <- a^2
## ----basic-plots,fig.width=4,fig.height=4,out.width='.45\\linewidth',echo=-1----
par(las=1,mar=c(4,4,1,.3)) # tick labels direction
# Create histograms of "a" and "b"
hist(a)
hist(b)
## ------------------------------------------------------------------------
# Make a vector of values by using the "c" (combine) function
var1 <- c(21.2,15.6)
var2 <- c("Ford F-150", "Corvette")
var3 <- c(TRUE,FALSE)
# You can ask what type of data R thinks any variable is with
# the "class" function.
# You can have multiple commands on the same line separated by ";"
class(var1); class(var2); class(var3)
## ------------------------------------------------------------------------
# A named vector of GPA's
gpas <- c("Math" = 3.4, "Verbal" = 3.7, "Analytics" = 3.9)
print(gpas) # Note the output contains the names
## ------------------------------------------------------------------------
# Create two vectors, x and y
x <- c(1,2,3,4); y <- c(1,4,2,6)
# Add the numbers element-wise
x + y
# Multiply the numbers element-wise
x * y
# Divide x by y element-wise
x / y
## ------------------------------------------------------------------------
# We will apply the "sum" function to each of the three vectors
sum(var1)
sum(var2)
# Notice that you can't add up character data
sum(var3)
# Notice that TRUE/FALSE is converted/coerced to 1/0 for summing
## ------------------------------------------------------------------------
mean(var1); mean(var2) ;mean(var3)
## ------------------------------------------------------------------------
# Entering this data as a factor
# Notice below that you can add comments at the end of a line too
opinions <- factor(
x = c(4,5,3,4,4,5,2,4,1,3), # the data
levels = c(1,2,3,4,5), # the possible values
labels = c("definitely no","probably no",
"maybe", "probably yes",
"definitely yes"),# labels for each level
ordered = TRUE)
print(opinions)
## ------------------------------------------------------------------------
x <- c(1,2,3); y <- c(3,2,1)
x < y ; x <= y
x > y ; x >= y
x == y; x != y
## ------------------------------------------------------------------------
x <- c(TRUE,TRUE,FALSE,FALSE); y <- c(TRUE,FALSE,TRUE,FALSE)
x | y
x & y
!y
| /STAT405/Class .R files/class_01.R | no_license | apatoski/STAT405 | R | false | false | 3,744 | r | ## ------------------------------------------------------------------------
# Notice that comments are started with the "#" character
# The basic arithmetic operators (+, - , * , /, ^ [power], %% [modulus])
# The functions will operate element-wise on vectors too
# This code will add these two numbers together
2 + 2
## ------------------------------------------------------------------------
# Here's the exponential and log functions (base e by default)
exp(1) # Exponential function
log(3) # Natural log
## ------------------------------------------------------------------------
help(rt) # Learn about the command for the t-distribution
help.search("rt") # Objects matching rt (note the quotes)
# apropos returns a vector of objects that fuzzy match
apropos("which") # "which" in the search list (note the quotes)
?glm # A short cut to the help command for glm
??glm # Everything matching glm on the search path
## ------------------------------------------------------------------------
# Set the seed of the random number generator to my Mum's birthday
set.seed(19390909)
# Generate some standard normals (in this case 25 Z's)
rnorm(25)
# You can assign (save) the results of a calculation into a variable
# The variable "a" will be a vector with 25 elements
a <- rnorm(25)
## ------------------------------------------------------------------------
# Have a look at what is in "a" now
print(a)
# Basic statistical summaries of "a"
summary(a)
# Manipulate a: here we are squaring it
b <- a^2
## ----basic-plots,fig.width=4,fig.height=4,out.width='.45\\linewidth',echo=-1----
par(las=1,mar=c(4,4,1,.3)) # tick labels direction
# Create histograms of "a" and "b"
hist(a)
hist(b)
## ------------------------------------------------------------------------
# Make a vector of values by using the "c" (combine) function
var1 <- c(21.2,15.6)
var2 <- c("Ford F-150", "Corvette")
var3 <- c(TRUE,FALSE)
# You can ask what type of data R thinks any variable is with
# the "class" function.
# You can have multiple commands on the same line separated by ";"
class(var1); class(var2); class(var3)
## ------------------------------------------------------------------------
# A named vector of GPA's
gpas <- c("Math" = 3.4, "Verbal" = 3.7, "Analytics" = 3.9)
print(gpas) # Note the output contains the names
## ------------------------------------------------------------------------
# Create two vectors, x and y
x <- c(1,2,3,4); y <- c(1,4,2,6)
# Add the numbers element-wise
x + y
# Multiply the numbers element-wise
x * y
# Divide x by y element-wise
x / y
## ------------------------------------------------------------------------
# We will apply the "sum" function to each of the three vectors
sum(var1)
sum(var2)
# Notice that you can't add up character data
sum(var3)
# Notice that TRUE/FALSE is converted/coerced to 1/0 for summing
## ------------------------------------------------------------------------
mean(var1); mean(var2) ;mean(var3)
## ------------------------------------------------------------------------
# Entering this data as a factor
# Notice below that you can add comments at the end of a line too
opinions <- factor(
x = c(4,5,3,4,4,5,2,4,1,3), # the data
levels = c(1,2,3,4,5), # the possible values
labels = c("definitely no","probably no",
"maybe", "probably yes",
"definitely yes"),# labels for each level
ordered = TRUE)
print(opinions)
## ------------------------------------------------------------------------
x <- c(1,2,3); y <- c(3,2,1)
x < y ; x <= y
x > y ; x >= y
x == y; x != y
## ------------------------------------------------------------------------
x <- c(TRUE,TRUE,FALSE,FALSE); y <- c(TRUE,FALSE,TRUE,FALSE)
x | y
x & y
!y
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{min_mp_idx}
\alias{min_mp_idx}
\title{Get index of the minimum value from a matrix profile and its nearest neighbor}
\usage{
min_mp_idx(.mp, n_dim = NULL, valid = TRUE)
}
\arguments{
\item{.mp}{a \code{MatrixProfile} object.}
\item{n_dim}{number of dimensions of the matrix profile}
\item{valid}{check for valid numbers}
}
\value{
returns a \code{matrix} with two columns: the minimum and the nearest neighbor
}
\description{
Get index of the minimum value from a matrix profile and its nearest neighbor
}
\examples{
w <- 50
data <- mp_gait_data
mp <- tsmp(data, window_size = w, exclusion_zone = 1 / 4, verbose = 0)
min_val <- min_mp_idx(mp)
}
| /man/min_mp_idx.Rd | permissive | franzbischoff/tsmp | R | false | true | 738 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{min_mp_idx}
\alias{min_mp_idx}
\title{Get index of the minimum value from a matrix profile and its nearest neighbor}
\usage{
min_mp_idx(.mp, n_dim = NULL, valid = TRUE)
}
\arguments{
\item{.mp}{a \code{MatrixProfile} object.}
\item{n_dim}{number of dimensions of the matrix profile}
\item{valid}{check for valid numbers}
}
\value{
returns a \code{matrix} with two columns: the minimum and the nearest neighbor
}
\description{
Get index of the minimum value from a matrix profile and its nearest neighbor
}
\examples{
w <- 50
data <- mp_gait_data
mp <- tsmp(data, window_size = w, exclusion_zone = 1 / 4, verbose = 0)
min_val <- min_mp_idx(mp)
}
|
pollutantmean <- function(directory,pollutant, id=1:332){
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
char_id <- character(0)
if (length(id[id<10]) > 0) char_id <- c(char_id, paste("00", id[id < 10],sep=""))
if (length(id[id >= 10 & id < 100] > 0)) char_id <- c(char_id, paste("0", id[id >= 10 & id < 100],sep=""))
char_id <- c(char_id, id[id >= 100])
files <- paste(directory,"/",char_id,".csv",sep="")
all_data <- lapply(files, read.csv)
all_data <- do.call(rbind, all_data)
return (mean(all_data[pollutant][,],na.rm=1))
} | /pollutantmean.R | no_license | NRJA/rprogrammingcoursera | R | false | false | 964 | r | pollutantmean <- function(directory,pollutant, id=1:332){
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
char_id <- character(0)
if (length(id[id<10]) > 0) char_id <- c(char_id, paste("00", id[id < 10],sep=""))
if (length(id[id >= 10 & id < 100] > 0)) char_id <- c(char_id, paste("0", id[id >= 10 & id < 100],sep=""))
char_id <- c(char_id, id[id >= 100])
files <- paste(directory,"/",char_id,".csv",sep="")
all_data <- lapply(files, read.csv)
all_data <- do.call(rbind, all_data)
return (mean(all_data[pollutant][,],na.rm=1))
} |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##The function 'makeCacheMatrix' creates a special 'matrix' object and is a list of
#functions to firstly set the value of the matrix, get the value of the matrix, afterthat
# set the value of the inverse, get the value of the inverse. Also the
#matrix object can cache its inverse.
##the <<- operator which can be used to assign a value to an object in
#an environment that is different from the current environment.
makeCacheMatrix <- function(x = matrix()) {
invs_Mat<-NULL
#set the value of the 'matrix'
set_Mat<-function(y){
x<<-y
invs_Mat<-NULL
}
#get the value of the 'matrix'
get_Mat<-function() x
#set the value of the matrix which is invertible
set_Invs<- function(inverse) invs_Mat<<- inverse
#get the value of the matrix which is invertible
get_Invs<- function() invs_Mat
list(set_Mat=set_Mat, get_Mat=get_Mat, set_Invs=set_Invs, get_Invs=get_Invs)
}
## Write a short comment describing this function
##The function 'cacheSolve' takes the output returned by 'makeCacheMatrix' as an
#input and computes the inverse. However, it firstly checks whether inverse has
#been computed or not. If the inverse matrix obtained from makeCachematrix(matrix)
#is empty, it gets the original matrix from data and use solve function to compute
#inverse. Otherwise, returns a message saying, "GETTING CACHED INVERTIBLE MATRIX!".
#retrieve inverse from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#get the value of invertible matrix from function 'makecachematrix'
invs_Mat<- x$get_Invs()
#if inverse matrix'invs_Mat' is not empty, type a message and return the invertible matrix'invs_Mat'
if(!is.null(invs_Mat)){
message("GETTING CACHED INVERTIBLE MATRIX!")
return(invs_Mat)
}
#if inverse matrix 'invs_Mat' is empty, get the original matrix, use solve function to get inverse, set the invertible matrix and return it.
mat_Data<- x$get_Mat()
invs_Mat<-solve(mat_Data, ...)
x$set_Invs(invs_Mat)
return(invs_Mat)
}
| /cachematrix.R | no_license | LubanaTanvia-new/ProgrammingAssignment2 | R | false | false | 2,190 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##The function 'makeCacheMatrix' creates a special 'matrix' object and is a list of
#functions to firstly set the value of the matrix, get the value of the matrix, afterthat
# set the value of the inverse, get the value of the inverse. Also the
#matrix object can cache its inverse.
##the <<- operator which can be used to assign a value to an object in
#an environment that is different from the current environment.
makeCacheMatrix <- function(x = matrix()) {
invs_Mat<-NULL
#set the value of the 'matrix'
set_Mat<-function(y){
x<<-y
invs_Mat<-NULL
}
#get the value of the 'matrix'
get_Mat<-function() x
#set the value of the matrix which is invertible
set_Invs<- function(inverse) invs_Mat<<- inverse
#get the value of the matrix which is invertible
get_Invs<- function() invs_Mat
list(set_Mat=set_Mat, get_Mat=get_Mat, set_Invs=set_Invs, get_Invs=get_Invs)
}
## Write a short comment describing this function
##The function 'cacheSolve' takes the output returned by 'makeCacheMatrix' as an
#input and computes the inverse. However, it firstly checks whether inverse has
#been computed or not. If the inverse matrix obtained from makeCachematrix(matrix)
#is empty, it gets the original matrix from data and use solve function to compute
#inverse. Otherwise, returns a message saying, "GETTING CACHED INVERTIBLE MATRIX!".
#retrieve inverse from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#get the value of invertible matrix from function 'makecachematrix'
invs_Mat<- x$get_Invs()
#if inverse matrix'invs_Mat' is not empty, type a message and return the invertible matrix'invs_Mat'
if(!is.null(invs_Mat)){
message("GETTING CACHED INVERTIBLE MATRIX!")
return(invs_Mat)
}
#if inverse matrix 'invs_Mat' is empty, get the original matrix, use solve function to get inverse, set the invertible matrix and return it.
mat_Data<- x$get_Mat()
invs_Mat<-solve(mat_Data, ...)
x$set_Invs(invs_Mat)
return(invs_Mat)
}
|
files <- list.files(paste0(.properties$SOURCE_DIR, "RefSeq/sequences"))
files_splitted_names <- stri_match_first_regex(files, "(.*)(?>_aa\\.fasta)")[, 2] %>% .[!is.na(.)]
rm(files)
#####
c.seq_ext <- function(...) {
objs <- list(...)
ret <- list()
ret$seqs <- do.call(c, lapply(objs, function(elem) elem$seqs))
ret$type <- do.call(c, lapply(objs, function(elem) elem$type))
class(ret) <- "seq_ext"
ret
}
read_sequences <- function(NCBIOrganismID, type) {
ret <- list()
ret$seqs <- readAAStringSet(
paste0(.properties$SOURCE_DIR, "RefSeq/sequences/", NCBIOrganismID, "_", type, ".fasta")
)
ret$type <- rep(type, length(ret$seqs))
class(ret) <- "seq_ext"
ret
}
sequences <- do.call(c, lapply(files_splitted_names, function(NCBIOrganismID)
c(
read_sequences(NCBIOrganismID, "aa"),
read_sequences(NCBIOrganismID, "nucl"),
read_sequences(NCBIOrganismID, "rrna"),
read_sequences(NCBIOrganismID, "trna")
)
))
Sequence <- Sequence %>%
left_join(
tibble(value = as.character(sequences$seqs),
code = names(sequences$seqs),
type = sequences$type) %>%
distinct(code, .keep_all = TRUE),
by = c("NCBICode" = "code")) %>%
mutate(Type = if_else(type == "aa","AA_sequence",
if_else(type == "nucl", "nucl_sequence",
if_else(type == "trna", "tRNA_sequence",
if_else(type == "rrna", "rRNA_sequence", NA_character_))))) %>%
select(ID,
Value = value,
NCBICode,
Type)
rm(c.seq_ext,
read_sequences,
files_splitted_names,
sequences)
| /R/load_sequences.R | no_license | DominikRafacz/PhyloPlastDB-initialization | R | false | false | 1,641 | r | files <- list.files(paste0(.properties$SOURCE_DIR, "RefSeq/sequences"))
files_splitted_names <- stri_match_first_regex(files, "(.*)(?>_aa\\.fasta)")[, 2] %>% .[!is.na(.)]
rm(files)
#####
c.seq_ext <- function(...) {
objs <- list(...)
ret <- list()
ret$seqs <- do.call(c, lapply(objs, function(elem) elem$seqs))
ret$type <- do.call(c, lapply(objs, function(elem) elem$type))
class(ret) <- "seq_ext"
ret
}
read_sequences <- function(NCBIOrganismID, type) {
ret <- list()
ret$seqs <- readAAStringSet(
paste0(.properties$SOURCE_DIR, "RefSeq/sequences/", NCBIOrganismID, "_", type, ".fasta")
)
ret$type <- rep(type, length(ret$seqs))
class(ret) <- "seq_ext"
ret
}
sequences <- do.call(c, lapply(files_splitted_names, function(NCBIOrganismID)
c(
read_sequences(NCBIOrganismID, "aa"),
read_sequences(NCBIOrganismID, "nucl"),
read_sequences(NCBIOrganismID, "rrna"),
read_sequences(NCBIOrganismID, "trna")
)
))
Sequence <- Sequence %>%
left_join(
tibble(value = as.character(sequences$seqs),
code = names(sequences$seqs),
type = sequences$type) %>%
distinct(code, .keep_all = TRUE),
by = c("NCBICode" = "code")) %>%
mutate(Type = if_else(type == "aa","AA_sequence",
if_else(type == "nucl", "nucl_sequence",
if_else(type == "trna", "tRNA_sequence",
if_else(type == "rrna", "rRNA_sequence", NA_character_))))) %>%
select(ID,
Value = value,
NCBICode,
Type)
rm(c.seq_ext,
read_sequences,
files_splitted_names,
sequences)
|
>FilePath <- "C:/Users/fermi/Documents/Fall 2020/Lab 9/lung_cancer.txt"
#1)****
dat<- read.table(FilePath, header=T, row.names=1)
#2)***
> class=c("adeno","adeno","adeno","adeno","adeno","adeno","adeno","adeno","adeno","adeno","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","Normal","Normal","Normal","Normal","Normal")
> clas<-names(dat)
> datx<-as.data.frame(t(dat))
datx<-data.frame(class,datx)
#3)***
traindat<-datx[1:6,]
traindat<-rbind(traindat, datx[11:16,])
traindat<-rbind(traindat, datx[20:22,])
testdat<-datx[7:10,]
testdat<-rbind(testdat, datx[17:19,])
testdat<-rbind(testdat, datx[23:24,])
> testclass<-testdat[,1]
> testdat<-testdat[,-1]
#4)***
> traindat.lda<-lda(clas~X1007_s_at + X1053_at,traindat)
> testdat.pred<-predict(traindat.lda, testdat[,1:2])
#5)**
#> testdat.pred$x[,1] vs > testdat.pred$x[,2]
#> c(rownames(testdat.pred$x))
> plot(testdat.pred$x[,1], testdat.pred$x[,2], main='Discriminant Functions', xlab='Discriminant Function 1', ylab='Discriminant Function 2', col=1:length(c(rownames(testdat.pred$x))), lwd=3)
> legend("topleft",legend=c(rownames(testdat.pred$x)), fill=1:length(c(rownames(testdat.pred$x))))
#6)***
> traindat.all.lda<-lda(clas~.,traindat)
> testdat.all.pred<-predict(traindat.all.lda, testdat)
#7)***
> plot(testdat.all.pred$x[,1], testdat.all.pred$x[,2], main='Discriminant Functions', xlab='Discriminant Function 1', ylab='Discriminant Function 2', col=1:length(c(rownames(testdat.all.pred$x))), lwd=3)
> legend(0,0,legend=c(rownames(testdat.all.pred$x)), fill=1:length(c(rownames(testdat.all.pred$x))))
| /Gene Expression Analysis/Data/Classification.r | no_license | fermingc/Bioinformatics | R | false | false | 1,631 | r | >FilePath <- "C:/Users/fermi/Documents/Fall 2020/Lab 9/lung_cancer.txt"
#1)****
dat<- read.table(FilePath, header=T, row.names=1)
#2)***
> class=c("adeno","adeno","adeno","adeno","adeno","adeno","adeno","adeno","adeno","adeno","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","SCLC","Normal","Normal","Normal","Normal","Normal")
> clas<-names(dat)
> datx<-as.data.frame(t(dat))
datx<-data.frame(class,datx)
#3)***
traindat<-datx[1:6,]
traindat<-rbind(traindat, datx[11:16,])
traindat<-rbind(traindat, datx[20:22,])
testdat<-datx[7:10,]
testdat<-rbind(testdat, datx[17:19,])
testdat<-rbind(testdat, datx[23:24,])
> testclass<-testdat[,1]
> testdat<-testdat[,-1]
#4)***
> traindat.lda<-lda(clas~X1007_s_at + X1053_at,traindat)
> testdat.pred<-predict(traindat.lda, testdat[,1:2])
#5)**
#> testdat.pred$x[,1] vs > testdat.pred$x[,2]
#> c(rownames(testdat.pred$x))
> plot(testdat.pred$x[,1], testdat.pred$x[,2], main='Discriminant Functions', xlab='Discriminant Function 1', ylab='Discriminant Function 2', col=1:length(c(rownames(testdat.pred$x))), lwd=3)
> legend("topleft",legend=c(rownames(testdat.pred$x)), fill=1:length(c(rownames(testdat.pred$x))))
#6)***
> traindat.all.lda<-lda(clas~.,traindat)
> testdat.all.pred<-predict(traindat.all.lda, testdat)
#7)***
> plot(testdat.all.pred$x[,1], testdat.all.pred$x[,2], main='Discriminant Functions', xlab='Discriminant Function 1', ylab='Discriminant Function 2', col=1:length(c(rownames(testdat.all.pred$x))), lwd=3)
> legend(0,0,legend=c(rownames(testdat.all.pred$x)), fill=1:length(c(rownames(testdat.all.pred$x))))
|
#' create simulated data for demonstrating LDA analysis
#' 2 topics
#'
#' @param nspecies = number of species in all topic groups
#' @param tsteps = number of [monthly] time steps
#'
#' @return
#' beta = matrix of species composition of the groups
#' gamma = matrix of topic composition over time
#' 3 simulations of gamma: uniform, slow transition, and fast transition
#' @export
create_sim_data_2topic = function(nspecies=24,tsteps=400) {
topics = 2
# beta: species composition of topics -- uniform distribution, nonoverlapping species composition
beta = matrix(rep(0,topics*nspecies),nrow=topics,ncol=nspecies)
beta[1,] = c(rep(1/(nspecies/2),nspecies/2),rep(0,nspecies/2))
beta[2,] = c(rep(0,nspecies/2),rep(1/(nspecies/2),nspecies/2))
# gamma for a constant topic prevalence through time: topic1 at 90% and topic2 at 10%
gamma_constant = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_constant[,1] = rep(1,tsteps)
gamma_constant[,2] = rep(0,tsteps)
# gamma for a fast transition from topic1 to topic2 (one year/12 time steps)
gamma_fast = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# proportions are constant for first 200 time steps
gamma_fast[1:200,1] = rep(1)
gamma_fast[1:200,2] = rep(0)
# fast transition from tstep 201-212
gamma_fast[201:212,1] = seq(12)*(-1/12)+1
gamma_fast[201:212,2] = seq(12)*(1/12)+0
# proportions are constant for rest of time series
gamma_fast[213:tsteps,1] = rep(0)
gamma_fast[213:tsteps,2] = rep(1)
# gamma for a slow transition from topic1 to topic2
gamma_slow = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# brief period of constant values at beginning and end of series
gamma_slow[1:50,1] = rep(1)
gamma_slow[1:50,2] = rep(0)
gamma_slow[351:400,1] = rep(0)
gamma_slow[351:400,2] = rep(1)
gamma_slow[51:350,1] = seq(300)*(-1/(tsteps-100))+1
gamma_slow[51:350,2] = seq(300)*(1/(tsteps-100))+0
return(list(beta,gamma_constant,gamma_fast,gamma_slow))
}
#' variation: create simulated data for demonstrating LDA analysis
#' 2 topics, nonuniform distribution of species in two community-types
#'
#' @param tsteps = number of [monthly] time steps
#'
#' @return
#' beta = matrix of species composition of the groups
#' gamma = matrix of topic composition over time
#' 3 simulations of gamma: uniform, slow transition, and fast transition
#' @export
create_sim_data_2topic_nonuniform = function(tsteps=400) {
topics = 2
nspecies = 12
# beta: species composition of topics
# I calculated this distribution by taking the average of each Portal sampling sp distribution (periods 1:436)
distribution = c(27,13,7, 5, 3, 2, 1, 1, 1, 0, 0, 0)
# simple permutation of the first distribution
distribution2 = c(3,1, 0, 1, 0, 13,2, 0, 1,27, 5, 7)
beta = matrix(rep(0,topics*nspecies),nrow=topics,ncol=nspecies)
beta[1,] = distribution/sum(distribution)
beta[2,] = distribution2/sum(distribution2)
# gamma for a constant topic prevalence through time: topic1 at 90% and topic2 at 10%
gamma_constant = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_constant[,1] = rep(1,tsteps)
gamma_constant[,2] = rep(0,tsteps)
# gamma for a fast transition from topic1 to topic2 (one year/12 time steps)
gamma_fast = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# proportions are constant for first 200 time steps
gamma_fast[1:200,1] = rep(1)
gamma_fast[1:200,2] = rep(0)
# fast transition from tstep 201-212
gamma_fast[201:212,1] = seq(12)*(-1/12)+1
gamma_fast[201:212,2] = seq(12)*(1/12)+0
# proportions are constant for rest of time series
gamma_fast[213:tsteps,1] = rep(0)
gamma_fast[213:tsteps,2] = rep(1)
# gamma for a slow transition from topic1 to topic2
gamma_slow = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# brief period of constant values at beginning and end of series
gamma_slow[1:50,1] = rep(1)
gamma_slow[1:50,2] = rep(0)
gamma_slow[351:400,1] = rep(0)
gamma_slow[351:400,2] = rep(1)
gamma_slow[51:350,1] = seq(300)*(-1/(tsteps-100))+1
gamma_slow[51:350,2] = seq(300)*(1/(tsteps-100))+0
return(list(beta,gamma_constant,gamma_fast,gamma_slow))
}
#' create simulated data for demonstrating LDA analysis
#' 3 topics
#'
#' @param nspecies = number of species in all topic groups
#' @param tsteps = number of [monthly] time steps
#'
#' @export
create_sim_data_3topic = function(nspecies=24,tsteps=400) {
topics = 3
beta = matrix(rep(0,topics*nspecies),nrow=topics,ncol=nspecies)
evencomp = 1/(nspecies/3)
beta[1,] = c(rep(evencomp,nspecies/3),rep(0,nspecies/3),rep(0,nspecies/3))
beta[2,] = c(rep(0,nspecies/3),rep(evencomp,nspecies/3),rep(0,nspecies/3))
beta[3,] = c(rep(0,nspecies/3),rep(0,nspecies/3),rep(evencomp,nspecies/3))
# gamma for a constant topic prevalence through time
gamma_constant = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_constant[,1] = rep(.7,tsteps)
gamma_constant[,2] = rep(.2,tsteps)
gamma_constant[,3] = rep(.1,tsteps)
# gamma for a fast transition from topic1 to topic2 (one year/12 time steps)
gamma_fast = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# proportions are constant for first 1/3 of the series
gamma_fast[1:150,1] = rep(1)
# fast transition from tstep 151-163
gamma_fast[151:162,1] = seq(12)*(-1/12)+1
gamma_fast[151:162,2] = seq(12)*(1/12)
# topic 2 prevails for middle
gamma_fast[163:250,2] = rep(1)
# fast transition from 251-263
gamma_fast[251:262,2] = seq(12)*(-1/12)+1
gamma_fast[251:262,3] = seq(12)*(1/12)
# proportions are constant for rest of time series
gamma_fast[263:400,3] = rep(1)
# gamma for a slow transition from topic1 to topic2
gamma_slow = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_slow[,1] = c(seq(tsteps/2)*(-1/(tsteps/2))+1,rep(0,tsteps/2))
gamma_slow[,2] = c(seq(tsteps/2)*(1/(tsteps/2)),seq(tsteps/2)*(-1/(tsteps/2))+1)
gamma_slow[,3] = c(rep(0,tsteps/2),seq(tsteps/2)*(1/(tsteps/2)))
return(list(beta,gamma_constant,gamma_fast,gamma_slow))
} | /previous_work/paper/create_sim_data.R | no_license | ethanwhite/LDATS | R | false | false | 6,148 | r |
#' create simulated data for demonstrating LDA analysis
#' 2 topics
#'
#' @param nspecies = number of species in all topic groups
#' @param tsteps = number of [monthly] time steps
#'
#' @return
#' beta = matrix of species composition of the groups
#' gamma = matrix of topic composition over time
#' 3 simulations of gamma: uniform, slow transition, and fast transition
#' @export
create_sim_data_2topic = function(nspecies=24,tsteps=400) {
topics = 2
# beta: species composition of topics -- uniform distribution, nonoverlapping species composition
beta = matrix(rep(0,topics*nspecies),nrow=topics,ncol=nspecies)
beta[1,] = c(rep(1/(nspecies/2),nspecies/2),rep(0,nspecies/2))
beta[2,] = c(rep(0,nspecies/2),rep(1/(nspecies/2),nspecies/2))
# gamma for a constant topic prevalence through time: topic1 at 90% and topic2 at 10%
gamma_constant = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_constant[,1] = rep(1,tsteps)
gamma_constant[,2] = rep(0,tsteps)
# gamma for a fast transition from topic1 to topic2 (one year/12 time steps)
gamma_fast = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# proportions are constant for first 200 time steps
gamma_fast[1:200,1] = rep(1)
gamma_fast[1:200,2] = rep(0)
# fast transition from tstep 201-212
gamma_fast[201:212,1] = seq(12)*(-1/12)+1
gamma_fast[201:212,2] = seq(12)*(1/12)+0
# proportions are constant for rest of time series
gamma_fast[213:tsteps,1] = rep(0)
gamma_fast[213:tsteps,2] = rep(1)
# gamma for a slow transition from topic1 to topic2
gamma_slow = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# brief period of constant values at beginning and end of series
gamma_slow[1:50,1] = rep(1)
gamma_slow[1:50,2] = rep(0)
gamma_slow[351:400,1] = rep(0)
gamma_slow[351:400,2] = rep(1)
gamma_slow[51:350,1] = seq(300)*(-1/(tsteps-100))+1
gamma_slow[51:350,2] = seq(300)*(1/(tsteps-100))+0
return(list(beta,gamma_constant,gamma_fast,gamma_slow))
}
#' variation: create simulated data for demonstrating LDA analysis
#' 2 topics, nonuniform distribution of species in two community-types
#'
#' @param tsteps = number of [monthly] time steps
#'
#' @return
#' beta = matrix of species composition of the groups
#' gamma = matrix of topic composition over time
#' 3 simulations of gamma: uniform, slow transition, and fast transition
#' @export
create_sim_data_2topic_nonuniform = function(tsteps=400) {
topics = 2
nspecies = 12
# beta: species composition of topics
# I calculated this distribution by taking the average of each Portal sampling sp distribution (periods 1:436)
distribution = c(27,13,7, 5, 3, 2, 1, 1, 1, 0, 0, 0)
# simple permutation of the first distribution
distribution2 = c(3,1, 0, 1, 0, 13,2, 0, 1,27, 5, 7)
beta = matrix(rep(0,topics*nspecies),nrow=topics,ncol=nspecies)
beta[1,] = distribution/sum(distribution)
beta[2,] = distribution2/sum(distribution2)
# gamma for a constant topic prevalence through time: topic1 at 90% and topic2 at 10%
gamma_constant = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_constant[,1] = rep(1,tsteps)
gamma_constant[,2] = rep(0,tsteps)
# gamma for a fast transition from topic1 to topic2 (one year/12 time steps)
gamma_fast = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# proportions are constant for first 200 time steps
gamma_fast[1:200,1] = rep(1)
gamma_fast[1:200,2] = rep(0)
# fast transition from tstep 201-212
gamma_fast[201:212,1] = seq(12)*(-1/12)+1
gamma_fast[201:212,2] = seq(12)*(1/12)+0
# proportions are constant for rest of time series
gamma_fast[213:tsteps,1] = rep(0)
gamma_fast[213:tsteps,2] = rep(1)
# gamma for a slow transition from topic1 to topic2
gamma_slow = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# brief period of constant values at beginning and end of series
gamma_slow[1:50,1] = rep(1)
gamma_slow[1:50,2] = rep(0)
gamma_slow[351:400,1] = rep(0)
gamma_slow[351:400,2] = rep(1)
gamma_slow[51:350,1] = seq(300)*(-1/(tsteps-100))+1
gamma_slow[51:350,2] = seq(300)*(1/(tsteps-100))+0
return(list(beta,gamma_constant,gamma_fast,gamma_slow))
}
#' create simulated data for demonstrating LDA analysis
#' 3 topics
#'
#' @param nspecies = number of species in all topic groups
#' @param tsteps = number of [monthly] time steps
#'
#' @export
create_sim_data_3topic = function(nspecies=24,tsteps=400) {
topics = 3
beta = matrix(rep(0,topics*nspecies),nrow=topics,ncol=nspecies)
evencomp = 1/(nspecies/3)
beta[1,] = c(rep(evencomp,nspecies/3),rep(0,nspecies/3),rep(0,nspecies/3))
beta[2,] = c(rep(0,nspecies/3),rep(evencomp,nspecies/3),rep(0,nspecies/3))
beta[3,] = c(rep(0,nspecies/3),rep(0,nspecies/3),rep(evencomp,nspecies/3))
# gamma for a constant topic prevalence through time
gamma_constant = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_constant[,1] = rep(.7,tsteps)
gamma_constant[,2] = rep(.2,tsteps)
gamma_constant[,3] = rep(.1,tsteps)
# gamma for a fast transition from topic1 to topic2 (one year/12 time steps)
gamma_fast = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
# proportions are constant for first 1/3 of the series
gamma_fast[1:150,1] = rep(1)
# fast transition from tstep 151-163
gamma_fast[151:162,1] = seq(12)*(-1/12)+1
gamma_fast[151:162,2] = seq(12)*(1/12)
# topic 2 prevails for middle
gamma_fast[163:250,2] = rep(1)
# fast transition from 251-263
gamma_fast[251:262,2] = seq(12)*(-1/12)+1
gamma_fast[251:262,3] = seq(12)*(1/12)
# proportions are constant for rest of time series
gamma_fast[263:400,3] = rep(1)
# gamma for a slow transition from topic1 to topic2
gamma_slow = matrix(rep(0,tsteps*topics),nrow=tsteps,ncol=topics)
gamma_slow[,1] = c(seq(tsteps/2)*(-1/(tsteps/2))+1,rep(0,tsteps/2))
gamma_slow[,2] = c(seq(tsteps/2)*(1/(tsteps/2)),seq(tsteps/2)*(-1/(tsteps/2))+1)
gamma_slow[,3] = c(rep(0,tsteps/2),seq(tsteps/2)*(1/(tsteps/2)))
return(list(beta,gamma_constant,gamma_fast,gamma_slow))
} |
#EQC-3-rain-import
openandsave <- function(ncname) {
######
#Reading in the data
library(ncdf4)
ncfname <- paste(ncname, ".nc", sep="")
dname <- "precipitation_amount"
# note: rain = precipitation amount in kg m-2 -
# or full description: "virtual climate station rainfall in mm/day from 9am to 9 am recorded against day of start of period"
ncin <- nc_open(ncfname)
print(ncin)
# These files are raster "bricks" organised by longitude, latitude, time
# So, first we read in the metadata for each of those dimensions
## get longitude and latitude
lon <- ncvar_get(ncin,"longitude")
nlon <- dim(lon)
head(lon)
lat <- ncvar_get(ncin,"latitude")
nlat <- dim(lat)
head(lat)
print(c(nlon,nlat))
# get time
time <- ncvar_get(ncin,"time")
head(time)
tunits <- ncatt_get(ncin,"time","units")
nt <- dim(time)
nt
# Print the time units string. Note the structure of the time units attribute: The object tunits has two components hasatt (a logical variable), and tunits$value, the actual "time since" string.
tunits
# Now that that is under control, we can collect the actual observatiosn we're interested in (while being confident we can trace back against the metadata to know what we're looking at)
# get rain
rain_array <- ncvar_get(ncin,dname)
dlname <- ncatt_get(ncin,dname,"long_name")
dunits <- ncatt_get(ncin,dname,"units")
fillvalue <- ncatt_get(ncin,dname,"_FillValue")
dim(rain_array)
# get global attributes
CDO <- ncatt_get(ncin,0,"CDO")
description <- ncatt_get(ncin,0,"description")
# also may be a third - updates info - ignored in this case
#Check you got them all (print current workspace):
ls()
######
#Reshaping the data (with a bit of cleaning along the way)
# this piece first saving only one day against lat longs for each grid
# load some necessary packages
library(lattice)
library(RColorBrewer)
library(raster)
# Convert time -- split the time units string into fields
tustr <- strsplit(tunits$value, " ")
time_values <- as.Date(time,origin=as.Date(unlist(tustr)[3]))
time_values_c <- as.character(time_values)
time_values_df<-as.data.frame(time_values_c)
# Replace netCDF fill values with NA's
rain_array[rain_array==fillvalue$value] <- NA
# create dataframe -- reshape data
# matrix (nlon*nlat rows by 2 cols) of lons and lats
lonlat <- as.matrix(expand.grid(lon,lat))
dim(lonlat)
# reshape the array into vector
rain_vec_long <- as.vector(rain_array)
length(rain_vec_long)
# reshape the vector into a matrix
rain_mat <- matrix(rain_vec_long, nrow=nlon*nlat, ncol=nt)
dim(rain_mat)
#head(na.omit(rain_mat)) #<- this has a look at the data
# create a dataframe
lonlat <- as.matrix(expand.grid(lon,lat))
rain_df02 <- na.omit(data.frame(cbind(lonlat,rain_mat)))
names(rain_df02) <- c("lon","lat") # could rename variables to be rain on days 1-365
names(rain_df02)[3:ncol(rain_df02)]<- t(time_values_df)
#head(na.omit(rain_df02, 10))
#At this point we could add a variable containing summary statistics to each grid if we wanted
# write out the dataframe as a .csv file
csvfile <- paste(ncname, ".csv", sep="")
write.table(rain_df02,csvfile, row.names=FALSE, sep=",")
# This was the nicest example I found to work from:
# http://geog.uoregon.edu/bartlein/courses/geog490/week04-netCDF.html
}
# set path and filename
ncname <- "Data/VCSN_Rain5k_1999"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2000"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2001"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2002"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2003"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2004"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2005"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2006"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2007"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2008"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2009"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2010"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2011"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2012"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2013"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2014"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2015"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2016"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2017"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2018"
openandsave(ncname)
######
# Now adding them all together:
# read in new data as R object
VCSN_Rain5k_1999 <- read.csv("Data/VCSN_Rain5k_1999.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2000 <- read.csv("Data/VCSN_Rain5k_2000.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2001 <- read.csv("Data/VCSN_Rain5k_2001.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2002 <- read.csv("Data/VCSN_Rain5k_2002.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2003 <- read.csv("Data/VCSN_Rain5k_2003.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2004 <- read.csv("Data/VCSN_Rain5k_2004.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2005 <- read.csv("Data/VCSN_Rain5k_2005.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2006 <- read.csv("Data/VCSN_Rain5k_2006.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2007 <- read.csv("Data/VCSN_Rain5k_2007.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2008 <- read.csv("Data/VCSN_Rain5k_2008.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2009 <- read.csv("Data/VCSN_Rain5k_2009.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2010 <- read.csv("Data/VCSN_Rain5k_2010.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2011 <- read.csv("Data/VCSN_Rain5k_2011.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2012 <- read.csv("Data/VCSN_Rain5k_2012.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2013 <- read.csv("Data/VCSN_Rain5k_2013.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2014 <- read.csv("Data/VCSN_Rain5k_2014.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2015 <- read.csv("Data/VCSN_Rain5k_2015.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2016 <- read.csv("Data/VCSN_Rain5k_2016.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2017 <- read.csv("Data/VCSN_Rain5k_2017.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2018 <- read.csv("Data/VCSN_Rain5k_2018.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_working <- merge(VCSN_Rain5k_1999, VCSN_Rain5k_2000, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2001, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2002, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2003, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2004, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2005, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2006, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2007, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2008, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2009, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2010, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2011, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2012, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2013, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2014, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2015, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2016, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2017, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2018, by=c("lon", "lat"))
# write out the dataframe as a .csv file
csvfile <- paste("Data/VCSN_Rain5k_1999_2018", ".csv", sep="")
write.table(VCSN_Rain5k_working, csvfile, row.names=FALSE, sep=",")
# clean up workspace:
rm(VCSN_Rain5k_1999)
rm(VCSN_Rain5k_2000)
rm(VCSN_Rain5k_2001)
rm(VCSN_Rain5k_2002)
rm(VCSN_Rain5k_2003)
rm(VCSN_Rain5k_2004)
rm(VCSN_Rain5k_2005)
rm(VCSN_Rain5k_2006)
rm(VCSN_Rain5k_2007)
rm(VCSN_Rain5k_2008)
rm(VCSN_Rain5k_2009)
rm(VCSN_Rain5k_2010)
rm(VCSN_Rain5k_2011)
rm(VCSN_Rain5k_2012)
rm(VCSN_Rain5k_2013)
rm(VCSN_Rain5k_2014)
rm(VCSN_Rain5k_2015)
rm(VCSN_Rain5k_2016)
rm(VCSN_Rain5k_2017)
rm(VCSN_Rain5k_2018)
# setwd("~/EQC-climate-change-part-two")
# load the ncdf4 package
library(sf)
# Inputting:
# Attempt at full set: note this won't compile to claims for the whole country (uses all 30GB)
precip_table <- read.csv("Data/VCSN_Rain5k_1999_2018.csv", sep=",", stringsAsFactors = FALSE)
head(names(precip_table))
names(precip_table) <- gsub("X", "precip", names(precip_table))
head(names(precip_table))
#names(precip_table) <- gsub(".", "", names(precip_table))
# This has columns for lat & lon, then a column for each day, containing rainfall
# Note the "centroid" is not actually a centroid (just NIWA's coordinates for each grid)
precipWorking <- as.data.frame(precip_table)
rm(precip_table)
head(names(precipWorking))
# prefer a "point / day / rain" column format for processing later on, with
library(reshape2)
## If you're on an old machine, may be a problem with below - slow
precipWorking <- melt(precipWorking, id=c("lon","lat"))
head(precipWorking)
names(precipWorking) <- c("longitude", "latitude", "day", "rain")
head(precipWorking)
sapply(precipWorking, class)
precipWorking$day <- gsub("precip", "", precipWorking$day)
head(precipWorking)
precipWorking$day <- as.Date(precipWorking$day, format = "%Y.%m.%d")
sapply(precipWorking,class)
rm(csvfile)
rm(ncname)
vcsn <- precipWorking
names(vcsn) <- c("vcsnLongitude", "vcsnLatitude", "vcsnDay", "rain")
vcsnWide <- dcast(vcsn, vcsnLatitude + vcsnLongitude ~ vcsnDay, value.var="rain")
vcsnWide$long <- vcsnWide$vcsnLongitude
vcsnWide$lat <- vcsnWide$vcsnLatitude
vcsnWide <- st_as_sf(vcsnWide, coords = c("long", "lat"), crs = 4326) #note crs code let's the function know the latlons are wgs84
#vcsnWorking <- melt(vcsnWide, id=c("vcsnLatitude", "vcsnLongitude"))
rm(precipWorking)
| /Archive/first histograms/EQC-03-rain-import.R | no_license | SallyFreanOwen/insurance-and-climate | R | false | false | 10,369 | r | #EQC-3-rain-import
openandsave <- function(ncname) {
######
#Reading in the data
library(ncdf4)
ncfname <- paste(ncname, ".nc", sep="")
dname <- "precipitation_amount"
# note: rain = precipitation amount in kg m-2 -
# or full description: "virtual climate station rainfall in mm/day from 9am to 9 am recorded against day of start of period"
ncin <- nc_open(ncfname)
print(ncin)
# These files are raster "bricks" organised by longitude, latitude, time
# So, first we read in the metadata for each of those dimensions
## get longitude and latitude
lon <- ncvar_get(ncin,"longitude")
nlon <- dim(lon)
head(lon)
lat <- ncvar_get(ncin,"latitude")
nlat <- dim(lat)
head(lat)
print(c(nlon,nlat))
# get time
time <- ncvar_get(ncin,"time")
head(time)
tunits <- ncatt_get(ncin,"time","units")
nt <- dim(time)
nt
# Print the time units string. Note the structure of the time units attribute: The object tunits has two components hasatt (a logical variable), and tunits$value, the actual "time since" string.
tunits
# Now that that is under control, we can collect the actual observatiosn we're interested in (while being confident we can trace back against the metadata to know what we're looking at)
# get rain
rain_array <- ncvar_get(ncin,dname)
dlname <- ncatt_get(ncin,dname,"long_name")
dunits <- ncatt_get(ncin,dname,"units")
fillvalue <- ncatt_get(ncin,dname,"_FillValue")
dim(rain_array)
# get global attributes
CDO <- ncatt_get(ncin,0,"CDO")
description <- ncatt_get(ncin,0,"description")
# also may be a third - updates info - ignored in this case
#Check you got them all (print current workspace):
ls()
######
#Reshaping the data (with a bit of cleaning along the way)
# this piece first saving only one day against lat longs for each grid
# load some necessary packages
library(lattice)
library(RColorBrewer)
library(raster)
# Convert time -- split the time units string into fields
tustr <- strsplit(tunits$value, " ")
time_values <- as.Date(time,origin=as.Date(unlist(tustr)[3]))
time_values_c <- as.character(time_values)
time_values_df<-as.data.frame(time_values_c)
# Replace netCDF fill values with NA's
rain_array[rain_array==fillvalue$value] <- NA
# create dataframe -- reshape data
# matrix (nlon*nlat rows by 2 cols) of lons and lats
lonlat <- as.matrix(expand.grid(lon,lat))
dim(lonlat)
# reshape the array into vector
rain_vec_long <- as.vector(rain_array)
length(rain_vec_long)
# reshape the vector into a matrix
rain_mat <- matrix(rain_vec_long, nrow=nlon*nlat, ncol=nt)
dim(rain_mat)
#head(na.omit(rain_mat)) #<- this has a look at the data
# create a dataframe
lonlat <- as.matrix(expand.grid(lon,lat))
rain_df02 <- na.omit(data.frame(cbind(lonlat,rain_mat)))
names(rain_df02) <- c("lon","lat") # could rename variables to be rain on days 1-365
names(rain_df02)[3:ncol(rain_df02)]<- t(time_values_df)
#head(na.omit(rain_df02, 10))
#At this point we could add a variable containing summary statistics to each grid if we wanted
# write out the dataframe as a .csv file
csvfile <- paste(ncname, ".csv", sep="")
write.table(rain_df02,csvfile, row.names=FALSE, sep=",")
# This was the nicest example I found to work from:
# http://geog.uoregon.edu/bartlein/courses/geog490/week04-netCDF.html
}
# set path and filename
ncname <- "Data/VCSN_Rain5k_1999"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2000"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2001"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2002"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2003"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2004"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2005"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2006"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2007"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2008"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2009"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2010"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2011"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2012"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2013"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2014"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2015"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2016"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2017"
openandsave(ncname)
ncname <- "Data/VCSN_Rain5k_2018"
openandsave(ncname)
######
# Now adding them all together:
# read in new data as R object
VCSN_Rain5k_1999 <- read.csv("Data/VCSN_Rain5k_1999.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2000 <- read.csv("Data/VCSN_Rain5k_2000.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2001 <- read.csv("Data/VCSN_Rain5k_2001.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2002 <- read.csv("Data/VCSN_Rain5k_2002.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2003 <- read.csv("Data/VCSN_Rain5k_2003.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2004 <- read.csv("Data/VCSN_Rain5k_2004.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2005 <- read.csv("Data/VCSN_Rain5k_2005.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2006 <- read.csv("Data/VCSN_Rain5k_2006.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2007 <- read.csv("Data/VCSN_Rain5k_2007.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2008 <- read.csv("Data/VCSN_Rain5k_2008.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2009 <- read.csv("Data/VCSN_Rain5k_2009.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2010 <- read.csv("Data/VCSN_Rain5k_2010.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2011 <- read.csv("Data/VCSN_Rain5k_2011.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2012 <- read.csv("Data/VCSN_Rain5k_2012.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2013 <- read.csv("Data/VCSN_Rain5k_2013.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2014 <- read.csv("Data/VCSN_Rain5k_2014.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2015 <- read.csv("Data/VCSN_Rain5k_2015.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2016 <- read.csv("Data/VCSN_Rain5k_2016.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2017 <- read.csv("Data/VCSN_Rain5k_2017.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_2018 <- read.csv("Data/VCSN_Rain5k_2018.csv", stringsAsFactors = FALSE)
VCSN_Rain5k_working <- merge(VCSN_Rain5k_1999, VCSN_Rain5k_2000, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2001, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2002, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2003, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2004, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2005, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2006, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2007, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2008, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2009, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2010, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2011, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2012, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2013, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2014, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2015, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2016, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2017, by=c("lon", "lat"))
VCSN_Rain5k_working <- merge(VCSN_Rain5k_working, VCSN_Rain5k_2018, by=c("lon", "lat"))
# write out the dataframe as a .csv file
csvfile <- paste("Data/VCSN_Rain5k_1999_2018", ".csv", sep="")
write.table(VCSN_Rain5k_working, csvfile, row.names=FALSE, sep=",")
# clean up workspace:
rm(VCSN_Rain5k_1999)
rm(VCSN_Rain5k_2000)
rm(VCSN_Rain5k_2001)
rm(VCSN_Rain5k_2002)
rm(VCSN_Rain5k_2003)
rm(VCSN_Rain5k_2004)
rm(VCSN_Rain5k_2005)
rm(VCSN_Rain5k_2006)
rm(VCSN_Rain5k_2007)
rm(VCSN_Rain5k_2008)
rm(VCSN_Rain5k_2009)
rm(VCSN_Rain5k_2010)
rm(VCSN_Rain5k_2011)
rm(VCSN_Rain5k_2012)
rm(VCSN_Rain5k_2013)
rm(VCSN_Rain5k_2014)
rm(VCSN_Rain5k_2015)
rm(VCSN_Rain5k_2016)
rm(VCSN_Rain5k_2017)
rm(VCSN_Rain5k_2018)
# setwd("~/EQC-climate-change-part-two")
# load the ncdf4 package
library(sf)
# Inputting:
# Attempt at full set: note this won't compile to claims for the whole country (uses all 30GB)
precip_table <- read.csv("Data/VCSN_Rain5k_1999_2018.csv", sep=",", stringsAsFactors = FALSE)
head(names(precip_table))
names(precip_table) <- gsub("X", "precip", names(precip_table))
head(names(precip_table))
#names(precip_table) <- gsub(".", "", names(precip_table))
# This has columns for lat & lon, then a column for each day, containing rainfall
# Note the "centroid" is not actually a centroid (just NIWA's coordinates for each grid)
precipWorking <- as.data.frame(precip_table)
rm(precip_table)
head(names(precipWorking))
# prefer a "point / day / rain" column format for processing later on, with
library(reshape2)
## If you're on an old machine, may be a problem with below - slow
precipWorking <- melt(precipWorking, id=c("lon","lat"))
head(precipWorking)
names(precipWorking) <- c("longitude", "latitude", "day", "rain")
head(precipWorking)
sapply(precipWorking, class)
precipWorking$day <- gsub("precip", "", precipWorking$day)
head(precipWorking)
precipWorking$day <- as.Date(precipWorking$day, format = "%Y.%m.%d")
sapply(precipWorking,class)
rm(csvfile)
rm(ncname)
vcsn <- precipWorking
names(vcsn) <- c("vcsnLongitude", "vcsnLatitude", "vcsnDay", "rain")
vcsnWide <- dcast(vcsn, vcsnLatitude + vcsnLongitude ~ vcsnDay, value.var="rain")
vcsnWide$long <- vcsnWide$vcsnLongitude
vcsnWide$lat <- vcsnWide$vcsnLatitude
vcsnWide <- st_as_sf(vcsnWide, coords = c("long", "lat"), crs = 4326) #note crs code let's the function know the latlons are wgs84
#vcsnWorking <- melt(vcsnWide, id=c("vcsnLatitude", "vcsnLongitude"))
rm(precipWorking)
|
#***************************************************
# VGH MDC - Reading in volumes and durations data
# 2019-03-21
# Nayef
#***************************************************
library(tidyverse)
library(magrittr)
library(lubridate)
# 1. Read in data: ------------------------------
options(readr.default_locale=readr::locale(tz="America/Los_Angeles"))
# 1.1 > volumes: ---------
df1.volumes <-
read_csv(here::here("results",
"clean data",
"2019-03-21_vgh_mdc-historical-treatment-volumes.csv"))
df1.volumes %<>%
mutate(Date = mdy(Date)) %>%
select(-IsWeekend) %>%
gather(key = treatment,
value = volume,
-Date) %>%
mutate(treatment = as.factor(treatment))
head(df1.volumes)
str(df1.volumes)
summary(df1.volumes)
# 1.2 > durations: ------
df2.durations <-
read_csv(here::here("results",
"clean data",
"2019-03-21_vgh_mdc-historical-treatment-durations.csv"))
df2.durations %<>%
mutate(Date = mdy(Date)) %>%
select(-IsWeekend) %>%
gather(key = treatment,
value = duration,
-Date) %>%
mutate(treatment = as.factor(treatment))
head(df2.durations)
str(df2.durations)
summary(df2.durations)
| /src/2019-03-21_vgh_mdc_read-data.R | no_license | nayefahmad/2019-13-15_vgh_mdc-capacity-planning-including-OPAT | R | false | false | 1,275 | r |
#***************************************************
# VGH MDC - Reading in volumes and durations data
# 2019-03-21
# Nayef
#***************************************************
library(tidyverse)
library(magrittr)
library(lubridate)
# 1. Read in data: ------------------------------
options(readr.default_locale=readr::locale(tz="America/Los_Angeles"))
# 1.1 > volumes: ---------
df1.volumes <-
read_csv(here::here("results",
"clean data",
"2019-03-21_vgh_mdc-historical-treatment-volumes.csv"))
df1.volumes %<>%
mutate(Date = mdy(Date)) %>%
select(-IsWeekend) %>%
gather(key = treatment,
value = volume,
-Date) %>%
mutate(treatment = as.factor(treatment))
head(df1.volumes)
str(df1.volumes)
summary(df1.volumes)
# 1.2 > durations: ------
df2.durations <-
read_csv(here::here("results",
"clean data",
"2019-03-21_vgh_mdc-historical-treatment-durations.csv"))
df2.durations %<>%
mutate(Date = mdy(Date)) %>%
select(-IsWeekend) %>%
gather(key = treatment,
value = duration,
-Date) %>%
mutate(treatment = as.factor(treatment))
head(df2.durations)
str(df2.durations)
summary(df2.durations)
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810619095752e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613114374-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 251 | r | testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810619095752e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/purrit.R
\name{purrit}
\alias{purrit}
\title{helper function for comp data}
\usage{
purrit(obs, pred = NULL, rec_age, plus_age, comp = "length", lenbins = NULL)
}
\arguments{
\item{obs}{observed data from .rep file}
\item{pred}{predicted data from .rep file (if used)}
\item{rec_age}{recruitement age}
\item{plus_age}{plus age group}
\item{comp}{`age` or `length` - default is length}
\item{lenbins}{set to base unless using alt in which case the file should be in the `user_input`` folder and the name needs to be provided e.g., `lengthbins.csv` - the column must be named `len_bin`}
}
\value{
}
\description{
helper function for comp data
}
\examples{
purrit(obs, pred = NULL, rec_age, plus_age, comp = "length", lenbins = "lengthbins.csv")
}
| /man/purrit.Rd | no_license | BenWilliams-NOAA/rockfishr | R | false | true | 829 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/purrit.R
\name{purrit}
\alias{purrit}
\title{helper function for comp data}
\usage{
purrit(obs, pred = NULL, rec_age, plus_age, comp = "length", lenbins = NULL)
}
\arguments{
\item{obs}{observed data from .rep file}
\item{pred}{predicted data from .rep file (if used)}
\item{rec_age}{recruitement age}
\item{plus_age}{plus age group}
\item{comp}{`age` or `length` - default is length}
\item{lenbins}{set to base unless using alt in which case the file should be in the `user_input`` folder and the name needs to be provided e.g., `lengthbins.csv` - the column must be named `len_bin`}
}
\value{
}
\description{
helper function for comp data
}
\examples{
purrit(obs, pred = NULL, rec_age, plus_age, comp = "length", lenbins = "lengthbins.csv")
}
|
repfdr_clusters <- function(pdf.binned.z, binned.z.mat,clusters, non.null = c('replication','meta-analysis'),
Pi.previous.result=NULL, control = em.control(),clustering.ldr.report = NULL,clustering.verbose = F)
{
if(!(non.null %in% c('replication','meta-analysis'))){stop('for Cluster Analysis only replication and meta-analysis are allowd, (no option user defined)')}
nr_studies = dim(pdf.binned.z)[1] #total number of studies in analysis
n_association_status = dim(pdf.binned.z)[3] #association status used, 2 or 3
n_bins = dim(pdf.binned.z)[2] #number of bins in discretization
Pi_list = list() # list of pi results from running repfdr in each cluster
nr_clusters = max(clusters) # number of clusters
# we now check that the vector of cluster partitions is legal:
CLUSTERS_STOP_MSG = "Argument Clusters must be a vector of integers, covering all values between 1 and the chosen number of clusters. Use NULL for single cluster analysis."
if(sum(clusters<1 )>0){stop(CLUSTERS_STOP_MSG)}
for(i in 1:length(clusters)){if(!is.integer(clusters[i])){stop(CLUSTERS_STOP_MSG)}}
for(i in 1:nr_clusters){if(sum(clusters ==i)<1){stop(CLUSTERS_STOP_MSG)}}
#holders for the current cluster parameters, when doing the per cluster repfdr
current_pdf.binned.z = NULL
current_binned.z.mat = NULL
current_Pi.previous.result = NULL
# these are lists of the parameters and results for the per cluster repfdrs
cluster.ind.list = list()
pdf.binned.z.list = list()
pdf.binned.z.list.index0 = list()
pdf.binned.z.list.index1 = list()
pdf.binned.z.list.index2 = list()
binned.z.mat.list = list()
repfdr.res.list = list()
repfdr.mat.list = list()
repfdr.Pi.list = list()
repfdr.Pi.list.NA.corrected = list()
#actual iteration over the clusters
for(i in 1:nr_clusters){
#getting cluster parameters
cluster_ind = which(clusters == i)
cluster.ind.list [[ i ]] = cluster_ind
current_Pi.previous.result = Pi.previous.result[cluster_ind]
if(length(cluster_ind)>1){
current_pdf.binned.z = pdf.binned.z[cluster_ind,,]
current_binned.z.mat = binned.z.mat[,cluster_ind]
}else{
current_pdf.binned.z = array(pdf.binned.z[cluster_ind,,],dim = c(1,dim(pdf.binned.z[cluster_ind,,])))
current_binned.z.mat = matrix(binned.z.mat[,cluster_ind],ncol = 1)
}
pdf.binned.z.list[[i]] = current_pdf.binned.z
pdf.binned.z.list.index0 [[ i ]] = matrix(current_pdf.binned.z[,,1],ncol = dim(current_pdf.binned.z)[2] ,nrow = length(cluster_ind))
pdf.binned.z.list.index1 [[ i ]] = matrix(current_pdf.binned.z[,,2],ncol = dim(current_pdf.binned.z)[2] ,nrow = length(cluster_ind))
if(n_association_status==3){
pdf.binned.z.list.index2 [[ i ]] = matrix(current_pdf.binned.z[,,3],ncol = dim(current_pdf.binned.z)[2] ,nrow = length(cluster_ind))
}
binned.z.mat.list[[i]] = current_binned.z.mat
if(clustering.verbose){cat(paste0("repfdr cluster :",i,"\n"))}
repfdr.res.list[[i]] = repfdr::repfdr(current_pdf.binned.z,
current_binned.z.mat,
non.null[1],
Pi.previous.result = current_Pi.previous.result,
control = control)
if(clustering.verbose){cat(paste0("\n"))}
repfdr.mat.list[[i]] = repfdr.res.list[[i]]$mat
repfdr.Pi.list[[i]] = repfdr.res.list[[i]]$Pi
#handling NAs and NaNs
repfdr.Pi.list.NA.corrected[[i]] = repfdr.Pi.list[[i]]
repfdr.Pi.list.NA.corrected[[i]][is.na(repfdr.Pi.list.NA.corrected[[i]])] = 0
pdf.binned.z.list.index0 [[ i ]][is.na(pdf.binned.z.list.index0 [[ i ]])] = 0
pdf.binned.z.list.index1 [[ i ]][is.na(pdf.binned.z.list.index1 [[ i ]])] = 0
if(n_association_status == 3){
pdf.binned.z.list.index2 [[ i ]][is.na(pdf.binned.z.list.index2 [[ i ]])] = 0
}
}
Rcpp_res = NULL
non.null.trans = NULL
non.null.u=2
#number of rows in ldr matrix
lfdr_mat_rows = choose(3+nr_studies-1,3-1)
if(n_association_status == 2){
lfdr_mat_rows = choose(2+nr_studies-1,2-1)
}
#thresholding on number of non null hypothesis for the aggregated local fdr
if(non.null == 'replication'){non.null.trans=0 ; non.null.u = 2}
if(non.null == 'meta-analysis'){non.null.trans=1 ; non.null.u = 1}
#ldr reports
ldr_report_code = 0
lfdr_ncol = 1
if(!is.null(clustering.ldr.report)){
if(clustering.ldr.report[1] == "ALL"){
ldr_report_code = 1
lfdr_ncol = (dim(binned.z.mat)[1])
}else{
ldr_report_code = 2
lfdr_ncol = length(clustering.ldr.report)
}
}
lfdr_mat = matrix(NA,nrow = lfdr_mat_rows,ncol = lfdr_ncol)
fdr_vec = rep(NA,(dim(binned.z.mat)[1]))
Fdr_vec = rep(NA,(dim(binned.z.mat)[1]))
#we now iterate over SNPs and aggregate the results
for(i in 1:(dim(binned.z.mat)[1])){
#index of the current SNP
current_SNP=as.integer(i)
if(clustering.verbose){
if(i%%round((dim(binned.z.mat)[1])/100) == 1)
cat(paste0('Doing SNP: ',current_SNP,'\n\r'))
}
#performing the per SNP aggregation of lfdr
i_is_last = (i==dim(binned.z.mat)[1]) #PI is computed only for the last i
Rcpp_res = rcpp_main(Sizes = c(nr_studies,n_bins,n_association_status,
nr_clusters,non.null.trans,non.null.u,current_SNP,0,1*i_is_last), #0 is for the debug value
pdf.binned.z.list.index0,
pdf.binned.z.list.index1,
pdf.binned.z.list.index2,
binned.z.mat.list,
cluster.ind.list,
repfdr.Pi.list.NA.corrected
)
#under this formulation, do we really need a different analysis for meta & rep?
if(non.null == 'replication'){
if(n_association_status == 2)
h1_rows = which(Rcpp_res[[1]][,2] >= non.null.u)
if(n_association_status == 3)
h1_rows = which(Rcpp_res[[1]][,1] >= non.null.u | Rcpp_res[[1]][,3] >= non.null.u)
}
if(non.null == 'meta-analysis'){
if(n_association_status == 2)
h1_rows = which(Rcpp_res[[1]][,2] >= non.null.u)
if(n_association_status == 3)
h1_rows = which(Rcpp_res[[1]][,1] >= non.null.u | Rcpp_res[[1]][,3] >= non.null.u)
}
lfdr = (Rcpp_res[[2]]) / sum(Rcpp_res[[2]]) #computing the aggregated local fdr
if(ldr_report_code>0){
if(ldr_report_code == 1){
lfdr_mat[,i] = lfdr
}else if(ldr_report_code == 2){
col_to_report = which(clustering.ldr.report == i)
if(length(col_to_report)>0){
lfdr_mat[,col_to_report[1]] = lfdr
}
}
}
fdr = sum(lfdr[-h1_rows])
fdr_vec[i] = fdr
}
o <- order(fdr_vec)
ro <- order(o)
Fdr_vec <- (cumsum(fdr_vec[o])/(1:length(fdr_vec)))[ro]
ret = list(repfdr.mat.percluster = repfdr.mat.list,
repfdr.Pi.percluster = repfdr.Pi.list,
mat = data.frame(fdr = fdr_vec,Fdr = Fdr_vec))
#add col names to association values (0,1) or (-1,0,1)
comb_mat = Rcpp_res[[1]]
if(n_association_status == 2){
comb_mat = comb_mat[,-c(3)]
colnames(comb_mat) = c("H:0","H:1")
}
if(n_association_status == 3){
colnames(comb_mat) = c("H:-1","H:0","H:1")
}
#handle ldr reporting
if(ldr_report_code>0){
# add col names to SNP LFDRs
if(ldr_report_code == 1){
colnames(lfdr_mat) = paste0("SNP ", 1:ncol(lfdr_mat))
}else if(ldr_report_code == 2){
colnames(lfdr_mat) = paste0("SNP ", clustering.ldr.report)
}
ldr = cbind(comb_mat,lfdr_mat)
ret$ldr = ldr
}
PI = cbind(comb_mat,Rcpp_res[[4]])
colnames(PI) = c(colnames(comb_mat),'PI')
ret$Pi =PI
return (ret)
}
| /R/repfdr_clusters.R | no_license | cran/repfdr | R | false | false | 8,138 | r | repfdr_clusters <- function(pdf.binned.z, binned.z.mat,clusters, non.null = c('replication','meta-analysis'),
Pi.previous.result=NULL, control = em.control(),clustering.ldr.report = NULL,clustering.verbose = F)
{
if(!(non.null %in% c('replication','meta-analysis'))){stop('for Cluster Analysis only replication and meta-analysis are allowd, (no option user defined)')}
nr_studies = dim(pdf.binned.z)[1] #total number of studies in analysis
n_association_status = dim(pdf.binned.z)[3] #association status used, 2 or 3
n_bins = dim(pdf.binned.z)[2] #number of bins in discretization
Pi_list = list() # list of pi results from running repfdr in each cluster
nr_clusters = max(clusters) # number of clusters
# we now check that the vector of cluster partitions is legal:
CLUSTERS_STOP_MSG = "Argument Clusters must be a vector of integers, covering all values between 1 and the chosen number of clusters. Use NULL for single cluster analysis."
if(sum(clusters<1 )>0){stop(CLUSTERS_STOP_MSG)}
for(i in 1:length(clusters)){if(!is.integer(clusters[i])){stop(CLUSTERS_STOP_MSG)}}
for(i in 1:nr_clusters){if(sum(clusters ==i)<1){stop(CLUSTERS_STOP_MSG)}}
#holders for the current cluster parameters, when doing the per cluster repfdr
current_pdf.binned.z = NULL
current_binned.z.mat = NULL
current_Pi.previous.result = NULL
# these are lists of the parameters and results for the per cluster repfdrs
cluster.ind.list = list()
pdf.binned.z.list = list()
pdf.binned.z.list.index0 = list()
pdf.binned.z.list.index1 = list()
pdf.binned.z.list.index2 = list()
binned.z.mat.list = list()
repfdr.res.list = list()
repfdr.mat.list = list()
repfdr.Pi.list = list()
repfdr.Pi.list.NA.corrected = list()
#actual iteration over the clusters
for(i in 1:nr_clusters){
#getting cluster parameters
cluster_ind = which(clusters == i)
cluster.ind.list [[ i ]] = cluster_ind
current_Pi.previous.result = Pi.previous.result[cluster_ind]
if(length(cluster_ind)>1){
current_pdf.binned.z = pdf.binned.z[cluster_ind,,]
current_binned.z.mat = binned.z.mat[,cluster_ind]
}else{
current_pdf.binned.z = array(pdf.binned.z[cluster_ind,,],dim = c(1,dim(pdf.binned.z[cluster_ind,,])))
current_binned.z.mat = matrix(binned.z.mat[,cluster_ind],ncol = 1)
}
pdf.binned.z.list[[i]] = current_pdf.binned.z
pdf.binned.z.list.index0 [[ i ]] = matrix(current_pdf.binned.z[,,1],ncol = dim(current_pdf.binned.z)[2] ,nrow = length(cluster_ind))
pdf.binned.z.list.index1 [[ i ]] = matrix(current_pdf.binned.z[,,2],ncol = dim(current_pdf.binned.z)[2] ,nrow = length(cluster_ind))
if(n_association_status==3){
pdf.binned.z.list.index2 [[ i ]] = matrix(current_pdf.binned.z[,,3],ncol = dim(current_pdf.binned.z)[2] ,nrow = length(cluster_ind))
}
binned.z.mat.list[[i]] = current_binned.z.mat
if(clustering.verbose){cat(paste0("repfdr cluster :",i,"\n"))}
repfdr.res.list[[i]] = repfdr::repfdr(current_pdf.binned.z,
current_binned.z.mat,
non.null[1],
Pi.previous.result = current_Pi.previous.result,
control = control)
if(clustering.verbose){cat(paste0("\n"))}
repfdr.mat.list[[i]] = repfdr.res.list[[i]]$mat
repfdr.Pi.list[[i]] = repfdr.res.list[[i]]$Pi
#handling NAs and NaNs
repfdr.Pi.list.NA.corrected[[i]] = repfdr.Pi.list[[i]]
repfdr.Pi.list.NA.corrected[[i]][is.na(repfdr.Pi.list.NA.corrected[[i]])] = 0
pdf.binned.z.list.index0 [[ i ]][is.na(pdf.binned.z.list.index0 [[ i ]])] = 0
pdf.binned.z.list.index1 [[ i ]][is.na(pdf.binned.z.list.index1 [[ i ]])] = 0
if(n_association_status == 3){
pdf.binned.z.list.index2 [[ i ]][is.na(pdf.binned.z.list.index2 [[ i ]])] = 0
}
}
Rcpp_res = NULL
non.null.trans = NULL
non.null.u=2
#number of rows in ldr matrix
lfdr_mat_rows = choose(3+nr_studies-1,3-1)
if(n_association_status == 2){
lfdr_mat_rows = choose(2+nr_studies-1,2-1)
}
#thresholding on number of non null hypothesis for the aggregated local fdr
if(non.null == 'replication'){non.null.trans=0 ; non.null.u = 2}
if(non.null == 'meta-analysis'){non.null.trans=1 ; non.null.u = 1}
#ldr reports
ldr_report_code = 0
lfdr_ncol = 1
if(!is.null(clustering.ldr.report)){
if(clustering.ldr.report[1] == "ALL"){
ldr_report_code = 1
lfdr_ncol = (dim(binned.z.mat)[1])
}else{
ldr_report_code = 2
lfdr_ncol = length(clustering.ldr.report)
}
}
lfdr_mat = matrix(NA,nrow = lfdr_mat_rows,ncol = lfdr_ncol)
fdr_vec = rep(NA,(dim(binned.z.mat)[1]))
Fdr_vec = rep(NA,(dim(binned.z.mat)[1]))
#we now iterate over SNPs and aggregate the results
for(i in 1:(dim(binned.z.mat)[1])){
#index of the current SNP
current_SNP=as.integer(i)
if(clustering.verbose){
if(i%%round((dim(binned.z.mat)[1])/100) == 1)
cat(paste0('Doing SNP: ',current_SNP,'\n\r'))
}
#performing the per SNP aggregation of lfdr
i_is_last = (i==dim(binned.z.mat)[1]) #PI is computed only for the last i
Rcpp_res = rcpp_main(Sizes = c(nr_studies,n_bins,n_association_status,
nr_clusters,non.null.trans,non.null.u,current_SNP,0,1*i_is_last), #0 is for the debug value
pdf.binned.z.list.index0,
pdf.binned.z.list.index1,
pdf.binned.z.list.index2,
binned.z.mat.list,
cluster.ind.list,
repfdr.Pi.list.NA.corrected
)
#under this formulation, do we really need a different analysis for meta & rep?
if(non.null == 'replication'){
if(n_association_status == 2)
h1_rows = which(Rcpp_res[[1]][,2] >= non.null.u)
if(n_association_status == 3)
h1_rows = which(Rcpp_res[[1]][,1] >= non.null.u | Rcpp_res[[1]][,3] >= non.null.u)
}
if(non.null == 'meta-analysis'){
if(n_association_status == 2)
h1_rows = which(Rcpp_res[[1]][,2] >= non.null.u)
if(n_association_status == 3)
h1_rows = which(Rcpp_res[[1]][,1] >= non.null.u | Rcpp_res[[1]][,3] >= non.null.u)
}
lfdr = (Rcpp_res[[2]]) / sum(Rcpp_res[[2]]) #computing the aggregated local fdr
if(ldr_report_code>0){
if(ldr_report_code == 1){
lfdr_mat[,i] = lfdr
}else if(ldr_report_code == 2){
col_to_report = which(clustering.ldr.report == i)
if(length(col_to_report)>0){
lfdr_mat[,col_to_report[1]] = lfdr
}
}
}
fdr = sum(lfdr[-h1_rows])
fdr_vec[i] = fdr
}
o <- order(fdr_vec)
ro <- order(o)
Fdr_vec <- (cumsum(fdr_vec[o])/(1:length(fdr_vec)))[ro]
ret = list(repfdr.mat.percluster = repfdr.mat.list,
repfdr.Pi.percluster = repfdr.Pi.list,
mat = data.frame(fdr = fdr_vec,Fdr = Fdr_vec))
#add col names to association values (0,1) or (-1,0,1)
comb_mat = Rcpp_res[[1]]
if(n_association_status == 2){
comb_mat = comb_mat[,-c(3)]
colnames(comb_mat) = c("H:0","H:1")
}
if(n_association_status == 3){
colnames(comb_mat) = c("H:-1","H:0","H:1")
}
#handle ldr reporting
if(ldr_report_code>0){
# add col names to SNP LFDRs
if(ldr_report_code == 1){
colnames(lfdr_mat) = paste0("SNP ", 1:ncol(lfdr_mat))
}else if(ldr_report_code == 2){
colnames(lfdr_mat) = paste0("SNP ", clustering.ldr.report)
}
ldr = cbind(comb_mat,lfdr_mat)
ret$ldr = ldr
}
PI = cbind(comb_mat,Rcpp_res[[4]])
colnames(PI) = c(colnames(comb_mat),'PI')
ret$Pi =PI
return (ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colour_clusters.R
\name{set_leaf_colours}
\alias{set_leaf_colours}
\alias{set_leaf_colors}
\alias{set_leaf_colors}
\title{Set the leaf colours of a dendrogram}
\usage{
set_leaf_colours(d, col, col_to_set = c("edge", "node", "label"))
set_leaf_colors(d, col, col_to_set = c("edge", "node", "label"))
}
\arguments{
\item{d}{the dendrogram}
\item{col}{Single colour or named character vector of colours. When NA no
colour will be set.}
\item{col_to_set}{Character scalar - kind of colour attribute to set}
}
\description{
Set the leaf colours of a dendrogram
}
\examples{
d5=colour_clusters(hclust(dist(USArrests), "ave"),5)
dred=set_leaf_colours(d5,'red','edge')
stopifnot(isTRUE(all(leaf_colours(dred)=='red')))
d52=set_leaf_colours(d5,leaf_colours(d5),'edge')
stopifnot(all.equal(d5,d52))
}
\seealso{
\code{\link{slice},\link{colour_clusters}}
}
\author{
jefferis
}
| /man/set_leaf_colours.Rd | no_license | cran/dendroextras | R | false | true | 947 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colour_clusters.R
\name{set_leaf_colours}
\alias{set_leaf_colours}
\alias{set_leaf_colors}
\alias{set_leaf_colors}
\title{Set the leaf colours of a dendrogram}
\usage{
set_leaf_colours(d, col, col_to_set = c("edge", "node", "label"))
set_leaf_colors(d, col, col_to_set = c("edge", "node", "label"))
}
\arguments{
\item{d}{the dendrogram}
\item{col}{Single colour or named character vector of colours. When NA no
colour will be set.}
\item{col_to_set}{Character scalar - kind of colour attribute to set}
}
\description{
Set the leaf colours of a dendrogram
}
\examples{
d5=colour_clusters(hclust(dist(USArrests), "ave"),5)
dred=set_leaf_colours(d5,'red','edge')
stopifnot(isTRUE(all(leaf_colours(dred)=='red')))
d52=set_leaf_colours(d5,leaf_colours(d5),'edge')
stopifnot(all.equal(d5,d52))
}
\seealso{
\code{\link{slice},\link{colour_clusters}}
}
\author{
jefferis
}
|
################################
#### List of models for which we have tran:
list_models_tran_PCMDI <- c("bcc-csm1-1","bcc-csm1-1-m","BNU-ESM","CCSM4","CESM1-BGC","CESM1-CAM5", "CESM1-FASTCHEM","CESM1-WACCM","CMCC-CESM","CNRM-CM5","CNRM-CM5-2","CanESM2","FGOALS-g2", "FGOALS-s2","FIO-ESM","GFDL-CM3","GFDL-ESM2G","GFDL-ESM2M","GISS-E2-H","GISS-E2-H-CC","GISS-E2-R","GISS-E2-R-CC","HadGEM2-AO","inmcm4","IPSL-CM5A-LR","IPSL-CM5A-MR","IPSL-CM5B-LR","MIROC-ESM","MIROC-ESM-CHEM","MIROC4h","MIROC5","MPI-ESM-LR","MPI-ESM-MR","MPI-ESM-P","MRI-CGCM3","MRI-ESM1","NorESM1-M","NorESM1-ME")
list_models_tran_PCMDI2 <- c("bcc_csm1_1","bcc_csm1_1_m","BNU_ESM","CCSM4","CESM1_BGC","CESM1_CAM5", "CESM1_FASTCHEM","CESM1_WACCM","CMCC_CESM","CNRM_CM5","CNRM_CM5_2","CanESM2","FGOALS_g2", "FGOALS_s2","FIO_ESM","GFDL_CM3","GFDL_ESM2G","GFDL_ESM2M","GISS_E2_H","GISS_E2_H_CC","GISS_E2_R","GISS_E2_R_CC","HadGEM2_AO","inmcm4","IPSL_CM5A_LR","IPSL_CM5A_MR","IPSL_CM5B_LR","MIROC_ESM","MIROC_ESM_CHEM","MIROC4h","MIROC5","MPI_ESM_LR","MPI_ESM_MR","MPI_ESM_P","MRI_CGCM3","MRI_ESM1","NorESM1_M","NorESM1_ME")
list_var_evap <- c("evspsbl", "tran", "evspsblsoi", "evspsblveg")
########################################################### Getting Tran #############################
v=2; print(list_var_evap[v])
names_models <- get(paste("list_models_",list_var_evap[v], "_PCMDI", sep=""))
names_models2 <- get(paste("list_models_",list_var_evap[v], "_PCMDI2", sep=""))
for (m in 1:length(names_models)){ #models
print(names_models[m])
data <- nc_open(paste("http://strega.ldeo.columbia.edu:81/CMIP5/.byScenario/.historical/.land/.mon/.", list_var_evap[v], "/.", names_models[m],"/.r1i1p1/.",list_var_evap[v],"/dods", sep=""))
lat <- ncvar_get(data, "lat")
lon <- ncvar_get(data, "lon")
assign(paste("lat_",names_models2[m], sep=""), lat)
assign(paste("lon_",names_models2[m], sep=""), lon)
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$T$len-56*12+1):data$dim$T$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
## For CESM1-CAM5, Tran is the same as Esoil... You'll have to download one from PCMDI, but it is wrong it is actually Esoil + Tran (see correction below).
v=2; print(list_var_evap[v])
names_models <- get(paste("list_models_",list_var_evap[v], "_PCMDI", sep=""))
names_models2 <- get(paste("list_models_",list_var_evap[v], "_PCMDI2",sep=""))
m=6; print(names_models[m])
data <- nc_open(paste("/home/air3/ab5/CMIP5_data/", list_var_evap[v],"/", names_models[m],"/tran_Lmon_", names_models[m], "_historical_r1i1p1_185001-200512.nc", sep=""))
lat <- ncvar_get(data, "lat")
lon <- ncvar_get(data, "lon")
assign(paste("lat_",names_models2[m], sep=""), lat)
assign(paste("lon_",names_models2[m], sep=""), lon)
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$time$len-56*12+1):data$dim$time$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
plus_models <- c("CNRM-CM5", "CNRM-CM5-2","IPSL-CM5A-LR", "IPSL-CM5A-MR", "IPSL-CM5B-LR")
plus_models2 <- c("CNRM_CM5" , "CNRM_CM5_2", "IPSL_CM5A_LR", "IPSL_CM5A_MR", "IPSL_CM5B_LR")
for (m in 1:length(plus_models)){ #models
print(plus_models[m])
data <- nc_open(paste("/home/air3/ab5/CMIP5_data/", list_var_evap[v],"/", plus_models[m],"/tran_Lmon_", plus_models[m], "_historical_r1i1p1_allmonths.nc", sep=""))
lat <- ncvar_get(data, "lat")
lon <- ncvar_get(data, "lon")
assign(paste("lat_",plus_models2[m], sep=""), lat)
assign(paste("lon_",plus_models2[m], sep=""), lon)
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$time$len-56*12+1):data$dim$time$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
########################################################## Evspsblsoi #############################################
### Download the data from PCMDI
### We take Esoil to correct some NCAR models:
list_models_evspsblsoi_PCMDI<-c("ACCESS1-0","ACCESS1-3","bcc-csm1-1","bcc-csm1-1-m","BNU-ESM","CCSM4","CESM1-BGC","CESM1-CAM5","CESM1-FASTCHEM","CESM1-WACCM","CMCC-CESM","CMCC-CM","CNRM-CM5","CNRM-CM5-2","CanESM2","FGOALS-g2","FGOALS-s2","FIO-ESM","GFDL-ESM2G","GFDL-ESM2M","GISS-E2-H","GISS-E2-H-CC","GISS-E2-R","GISS-E2-R-CC","HadGEM2-AO","inmcm4","IPSL-CM5A-LR","IPSL-CM5A-MR","IPSL-CM5B-LR","MIROC-ESM","MIROC-ESM-CHEM","MIROC4h","MIROC5","MRI-CGCM3","MRI-ESM1","NorESM1-M","NorESM1-ME")
list_models_evspsblsoi_PCMDI2<-c("ACCESS1_0","ACCESS1_3","bcc_csm1_1","bcc_csm1_1_m","BNU_ESM","CCSM4","CESM1_BGC","CESM1_CAM5","CESM1_FASTCHEM","CESM1_WACCM","CMCC_CESM","CMCC_CM","CNRM_CM5","CNRM_CM5_2","CanESM2","FGOALS_g2","FGOALS_s2","FIO_ESM","GFDL_ESM2G","GFDL_ESM2M","GISS_E2_H","GISS_E2_H_CC","GISS_E2_R","GISS_E2_R_CC","HadGEM2_AO","inmcm4","IPSL_CM5A_LR","IPSL_CM5A_MR","IPSL_CM5B_LR","MIROC_ESM","MIROC_ESM_CHEM","MIROC4h","MIROC5","MRI_CGCM3","MRI_ESM1","NorESM1_M","NorESM1_ME")
v <- 3
print(list_var_evap[v])
for (m in 1:length(list_models_evspsblsoi_PCMDI)){ #models
data <- nc_open(paste("/home/air3/ab5/CMIP5_data/",list_var_evap[v], "/", list_models_evspsblsoi_PCMDI[m],"/",list_var_evap[v],"_Lmon_",list_models_evspsblsoi_PCMDI[m],"_historical_r1i1p1_allmonths.nc", sep=""))
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$time$len-56*12+1):data$dim$time$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
## We have to correct GFDL-ESM2M - We take it from LDEO - also FGOALS-g2:
v <- 3
for (m in c(16, 20)){
print(list_models_evspsblsoi_PCMDI[m])
## We need to get that model:
data <- nc_open(paste("http://strega.ldeo.columbia.edu:81/CMIP5/.byScenario/.historical/.land/.mon/.", list_var_evap[v], "/.", list_models_evspsblsoi_PCMDI[m],"/.r1i1p1/.",list_var_evap[v],"/dods", sep=""))
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$T$len-56*12+1):data$dim$T$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff)
########################################################## Evspsblveg #############################################
### Not needed here.
#################################################################################################
### We have to correct some NCAR models: what we have is TRAN+EVSPSBLSOI. Substracting EVSPSBLSOI
tran_CCSM4 <- tran_CCSM4 - evspsblsoi_CCSM4
tran_CESM1_BGC <- tran_CESM1_BGC - evspsblsoi_CESM1_BGC
tran_CESM1_FASTCHEM <- tran_CESM1_FASTCHEM - evspsblsoi_CESM1_FASTCHEM
tran_CESM1_WACCM <- tran_CESM1_WACCM - evspsblsoi_CESM1_WACCM
tran_CESM1_CAM5 <- tran_CESM1_CAM5 - evspsblsoi_CESM1_CAM5
tran_CESM1_CAM5[which(tran_CESM1_CAM5 < 0 )] <- 0
for (m in 1:length(list_models_tran_PCMDI)){
print(list_models_tran_PCMDI[m])
tran_year <- get(paste("tran_",list_models_tran_PCMDI2[m],"_year" , sep=""))
save(tran_year, file=paste("tran_", list_models_tran_PCMDI_fut2[m],"_year.RData", sep="")) }
| /get_tran.R | no_license | alexismberg/Berg_McColl_2021_drylands | R | false | false | 7,736 | r | ################################
#### List of models for which we have tran:
list_models_tran_PCMDI <- c("bcc-csm1-1","bcc-csm1-1-m","BNU-ESM","CCSM4","CESM1-BGC","CESM1-CAM5", "CESM1-FASTCHEM","CESM1-WACCM","CMCC-CESM","CNRM-CM5","CNRM-CM5-2","CanESM2","FGOALS-g2", "FGOALS-s2","FIO-ESM","GFDL-CM3","GFDL-ESM2G","GFDL-ESM2M","GISS-E2-H","GISS-E2-H-CC","GISS-E2-R","GISS-E2-R-CC","HadGEM2-AO","inmcm4","IPSL-CM5A-LR","IPSL-CM5A-MR","IPSL-CM5B-LR","MIROC-ESM","MIROC-ESM-CHEM","MIROC4h","MIROC5","MPI-ESM-LR","MPI-ESM-MR","MPI-ESM-P","MRI-CGCM3","MRI-ESM1","NorESM1-M","NorESM1-ME")
list_models_tran_PCMDI2 <- c("bcc_csm1_1","bcc_csm1_1_m","BNU_ESM","CCSM4","CESM1_BGC","CESM1_CAM5", "CESM1_FASTCHEM","CESM1_WACCM","CMCC_CESM","CNRM_CM5","CNRM_CM5_2","CanESM2","FGOALS_g2", "FGOALS_s2","FIO_ESM","GFDL_CM3","GFDL_ESM2G","GFDL_ESM2M","GISS_E2_H","GISS_E2_H_CC","GISS_E2_R","GISS_E2_R_CC","HadGEM2_AO","inmcm4","IPSL_CM5A_LR","IPSL_CM5A_MR","IPSL_CM5B_LR","MIROC_ESM","MIROC_ESM_CHEM","MIROC4h","MIROC5","MPI_ESM_LR","MPI_ESM_MR","MPI_ESM_P","MRI_CGCM3","MRI_ESM1","NorESM1_M","NorESM1_ME")
list_var_evap <- c("evspsbl", "tran", "evspsblsoi", "evspsblveg")
########################################################### Getting Tran #############################
v=2; print(list_var_evap[v])
names_models <- get(paste("list_models_",list_var_evap[v], "_PCMDI", sep=""))
names_models2 <- get(paste("list_models_",list_var_evap[v], "_PCMDI2", sep=""))
for (m in 1:length(names_models)){ #models
print(names_models[m])
data <- nc_open(paste("http://strega.ldeo.columbia.edu:81/CMIP5/.byScenario/.historical/.land/.mon/.", list_var_evap[v], "/.", names_models[m],"/.r1i1p1/.",list_var_evap[v],"/dods", sep=""))
lat <- ncvar_get(data, "lat")
lon <- ncvar_get(data, "lon")
assign(paste("lat_",names_models2[m], sep=""), lat)
assign(paste("lon_",names_models2[m], sep=""), lon)
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$T$len-56*12+1):data$dim$T$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
## For CESM1-CAM5, Tran is the same as Esoil... You'll have to download one from PCMDI, but it is wrong it is actually Esoil + Tran (see correction below).
v=2; print(list_var_evap[v])
names_models <- get(paste("list_models_",list_var_evap[v], "_PCMDI", sep=""))
names_models2 <- get(paste("list_models_",list_var_evap[v], "_PCMDI2",sep=""))
m=6; print(names_models[m])
data <- nc_open(paste("/home/air3/ab5/CMIP5_data/", list_var_evap[v],"/", names_models[m],"/tran_Lmon_", names_models[m], "_historical_r1i1p1_185001-200512.nc", sep=""))
lat <- ncvar_get(data, "lat")
lon <- ncvar_get(data, "lon")
assign(paste("lat_",names_models2[m], sep=""), lat)
assign(paste("lon_",names_models2[m], sep=""), lon)
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$time$len-56*12+1):data$dim$time$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
plus_models <- c("CNRM-CM5", "CNRM-CM5-2","IPSL-CM5A-LR", "IPSL-CM5A-MR", "IPSL-CM5B-LR")
plus_models2 <- c("CNRM_CM5" , "CNRM_CM5_2", "IPSL_CM5A_LR", "IPSL_CM5A_MR", "IPSL_CM5B_LR")
for (m in 1:length(plus_models)){ #models
print(plus_models[m])
data <- nc_open(paste("/home/air3/ab5/CMIP5_data/", list_var_evap[v],"/", plus_models[m],"/tran_Lmon_", plus_models[m], "_historical_r1i1p1_allmonths.nc", sep=""))
lat <- ncvar_get(data, "lat")
lon <- ncvar_get(data, "lon")
assign(paste("lat_",plus_models2[m], sep=""), lat)
assign(paste("lon_",plus_models2[m], sep=""), lon)
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$time$len-56*12+1):data$dim$time$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
########################################################## Evspsblsoi #############################################
### Download the data from PCMDI
### We take Esoil to correct some NCAR models:
list_models_evspsblsoi_PCMDI<-c("ACCESS1-0","ACCESS1-3","bcc-csm1-1","bcc-csm1-1-m","BNU-ESM","CCSM4","CESM1-BGC","CESM1-CAM5","CESM1-FASTCHEM","CESM1-WACCM","CMCC-CESM","CMCC-CM","CNRM-CM5","CNRM-CM5-2","CanESM2","FGOALS-g2","FGOALS-s2","FIO-ESM","GFDL-ESM2G","GFDL-ESM2M","GISS-E2-H","GISS-E2-H-CC","GISS-E2-R","GISS-E2-R-CC","HadGEM2-AO","inmcm4","IPSL-CM5A-LR","IPSL-CM5A-MR","IPSL-CM5B-LR","MIROC-ESM","MIROC-ESM-CHEM","MIROC4h","MIROC5","MRI-CGCM3","MRI-ESM1","NorESM1-M","NorESM1-ME")
list_models_evspsblsoi_PCMDI2<-c("ACCESS1_0","ACCESS1_3","bcc_csm1_1","bcc_csm1_1_m","BNU_ESM","CCSM4","CESM1_BGC","CESM1_CAM5","CESM1_FASTCHEM","CESM1_WACCM","CMCC_CESM","CMCC_CM","CNRM_CM5","CNRM_CM5_2","CanESM2","FGOALS_g2","FGOALS_s2","FIO_ESM","GFDL_ESM2G","GFDL_ESM2M","GISS_E2_H","GISS_E2_H_CC","GISS_E2_R","GISS_E2_R_CC","HadGEM2_AO","inmcm4","IPSL_CM5A_LR","IPSL_CM5A_MR","IPSL_CM5B_LR","MIROC_ESM","MIROC_ESM_CHEM","MIROC4h","MIROC5","MRI_CGCM3","MRI_ESM1","NorESM1_M","NorESM1_ME")
v <- 3
print(list_var_evap[v])
for (m in 1:length(list_models_evspsblsoi_PCMDI)){ #models
data <- nc_open(paste("/home/air3/ab5/CMIP5_data/",list_var_evap[v], "/", list_models_evspsblsoi_PCMDI[m],"/",list_var_evap[v],"_Lmon_",list_models_evspsblsoi_PCMDI[m],"_historical_r1i1p1_allmonths.nc", sep=""))
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$time$len-56*12+1):data$dim$time$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff) }
## We have to correct GFDL-ESM2M - We take it from LDEO - also FGOALS-g2:
v <- 3
for (m in c(16, 20)){
print(list_models_evspsblsoi_PCMDI[m])
## We need to get that model:
data <- nc_open(paste("http://strega.ldeo.columbia.edu:81/CMIP5/.byScenario/.historical/.land/.mon/.", list_var_evap[v], "/.", list_models_evspsblsoi_PCMDI[m],"/.r1i1p1/.",list_var_evap[v],"/dods", sep=""))
bob <- ncvar_get(data, list_var_evap[v])[,,(data$dim$T$len-56*12+1):data$dim$T$len]
nc_close(data)
buff <- array(NA, dim=c(dim(bob)[1],dim(bob)[2], 56))
for (t in 1:56){buff[,,t] <- apply(bob[ ,, (12*(t-1)+1):(12*(t-1)+12)], c(1,2), mean, na.rm=T)}
assign(paste(list_var_evap[v],"_",names_models2[m],"_year" , sep=""), buff )
rm(bob); rm(buff)
########################################################## Evspsblveg #############################################
### Not needed here.
#################################################################################################
### We have to correct some NCAR models: what we have is TRAN+EVSPSBLSOI. Substracting EVSPSBLSOI
tran_CCSM4 <- tran_CCSM4 - evspsblsoi_CCSM4
tran_CESM1_BGC <- tran_CESM1_BGC - evspsblsoi_CESM1_BGC
tran_CESM1_FASTCHEM <- tran_CESM1_FASTCHEM - evspsblsoi_CESM1_FASTCHEM
tran_CESM1_WACCM <- tran_CESM1_WACCM - evspsblsoi_CESM1_WACCM
tran_CESM1_CAM5 <- tran_CESM1_CAM5 - evspsblsoi_CESM1_CAM5
tran_CESM1_CAM5[which(tran_CESM1_CAM5 < 0 )] <- 0
for (m in 1:length(list_models_tran_PCMDI)){
print(list_models_tran_PCMDI[m])
tran_year <- get(paste("tran_",list_models_tran_PCMDI2[m],"_year" , sep=""))
save(tran_year, file=paste("tran_", list_models_tran_PCMDI_fut2[m],"_year.RData", sep="")) }
|
# Author: Robert J. Hijmans, r.hijmans@gmail.com
# Date: December 2009
# Version 0.1
# Licence GPL v3
setClass('MaxEnt',
contains = 'DistModel',
representation (
lambdas = 'vector',
results = 'matrix',
path = 'character',
html = 'character'
),
prototype (
lambdas = as.vector(NA),
results = as.matrix(NA),
path = '',
html = ''
),
)
setClass('MaxEntReplicates',
representation (
models = 'list',
results = 'matrix',
html = 'character'
),
prototype (
models = list(),
results = as.matrix(NA),
html = ''
),
)
setMethod ('show' , 'MaxEntReplicates',
function(object) {
cat('class :' , class(object), '\n')
cat('replicates:', length(object@models), '\n')
if (file.exists(object@html)) {
browseURL( paste("file:///", object@html, sep='') )
} else {
cat('output html file no longer exists\n')
}
}
)
setMethod ('show' , 'MaxEnt',
function(object) {
cat('class :' , class(object), '\n')
cat('variables:', colnames(object@presence), '\n')
# cat('lambdas\n')
# print(object@lambdas)
# pp <- nrow(object@presence)
# cat('\npresence points:', pp, '\n')
# if (pp < 5) {
# print(object@presence)
# } else {
# print(object@presence[1:5,])
# cat(' (... ... ...)\n')
# cat('\n')
# }
# pp <- nrow(object@absence)
# cat('\nabsence points:', pp, '\n')
# if (pp < 5) {
# print(object@absence)
# } else {
# print(object@absence[1:5,])
# cat(' (... ... ...)\n')
# cat('\n')
# }
# cat('\nmodel fit\n')
# print(object@results)
# cat('\n')
if (file.exists(object@html)) {
browseURL( paste("file:///", object@html, sep='') )
} else {
cat('output html file no longer exists\n')
}
}
)
if (!isGeneric("maxent")) {
setGeneric("maxent", function(x, p, ...)
standardGeneric("maxent"))
}
.rJava <- function() {
if (is.null(getOption('dismo_rJavaLoaded'))) {
# to avoid trouble on macs
Sys.setenv(NOAWT=TRUE)
if ( requireNamespace('rJava') ) {
rJava::.jpackage('dismo')
options(dismo_rJavaLoaded=TRUE)
} else {
stop('rJava cannot be loaded')
}
}
}
.getMeVersion <- function() {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {
stop('file missing:\n', jar, '.\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/')
}
.rJava()
mxe <- rJava::.jnew("meversion")
v <- try(rJava::.jcall(mxe, "S", "meversion") )
if (class(v) == 'try-error') {
stop('"dismo" needs a more recent version of Maxent (3.3.3b or later) \nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/
\n and put it in this folder:\n',
system.file("java", package="dismo"))
} else if (v == '3.3.3a') {
stop("please update your maxent program to version 3.3.3b or later. This version is no longer supported. \nYou can download it here: http://www.cs.princeton.edu/~schapire/maxent/'")
}
return(v)
}
setMethod('maxent', signature(x='missing', p='missing'),
function(x, p, silent=FALSE, ...) {
v <- .getMeVersion()
if (!silent) {
cat('This is MaxEnt version', v, '\n' )
}
invisible(TRUE)
}
)
setMethod('maxent', signature(x='SpatialGridDataFrame', p='ANY'),
function(x, p, a=NULL,...) {
factors = NULL
for (i in 1:ncol(x@data)) {
if (is.factor(x@data[,i]) | is.character(x@data[,i])) {
factors = c(factors, colnames(x@data)[i])
}
}
x <- brick(x)
p <- .getMatrix(p)
if (! is.null(a) ) {
a <- .getMatrix(a)
}
# Signature = raster, ANY
maxent(x, p, a, factors=factors, ...)
}
)
.getMatrix <- function(x) {
if (inherits(x, 'SpatialPoints')) {
x <- data.frame(coordinates(x))
} else if (inherits(x, 'matrix')) {
x <- data.frame(x)
}
if (! class(x) == 'data.frame' ) {
stop('data should be a matrix, data.frame, or SpatialPoints* object')
}
if (dim(x)[2] != 2) {
stop('presence or absence coordinates data should be a matrix or data.frame with 2 columns' )
}
colnames(x) <- c('x', 'y')
return(x)
}
setMethod('maxent', signature(x='Raster', p='ANY'),
function(x, p, a=NULL, factors=NULL, removeDuplicates=TRUE, nbg=10000, ...) {
p <- .getMatrix(p)
if (removeDuplicates) {
cells <- unique(cellFromXY(x, p))
pv <- data.frame(extract(x, cells))
} else {
pv <- data.frame(extract(x, p))
}
lpv <- nrow(pv)
pv <- stats::na.omit(pv)
nas <- lpv - nrow(pv)
if (nas > 0) {
if (nas >= 0.5 * lpv) {
stop('more than half of the presence points have NA predictor values')
} else {
warning(nas, ' (', round(100*nas/lpv,2), '%) of the presence points have NA predictor values')
}
}
if (! is.null(a) ) {
a <- .getMatrix(a)
av <- data.frame(extract(x, a))
avr <- nrow(av)
av <- stats::na.omit(av)
nas <- length(as.vector(attr(av, "na.action")))
if (nas > 0) {
if (nas >= 0.5 * avr) {
stop('more than half of the absence points have NA predictor values')
} else {
warning(nas, ' (', round(100*nas/avr, 2), '%) of the presence points have NA predictor values')
}
}
} else {
# random absence
if (is.null(nbg)) {
nbg <- 10000
} else {
if (nbg < 100) {
stop('number of background points is very low')
} else if (nbg < 1000) {
warning('number of background points is very low')
}
}
if (nlayers(x) > 1) {
xy <- randomPoints( raster(x,1), nbg, p, warn=0 )
} else {
xy <- randomPoints(x, nbg, p, warn=0 )
}
av <- data.frame(extract(x, xy))
av <- stats::na.omit(av)
if (nrow(av) == 0) {
stop('could not get valid background point values; is there a layer with only NA values?')
}
if (nrow(av) < 100) {
stop('only got:', nrow(av), 'random background point values; is there a layer with many NA values?')
}
if (nrow(av) < 1000) {
warning('only got:', nrow(av), 'random background point values; Small exent? Or is there a layer with many NA values?')
}
}
# Signature = data.frame, missing
x <- rbind(pv, av)
if (!is.null(factors)) {
for (f in factors) {
x[,f] <- factor(x[,f])
}
}
p <- c(rep(1, nrow(pv)), rep(0, nrow(av)))
maxent(x, p, ...)
}
)
.getreps <- function(args) {
if (is.null(args)) { return(1) }
args <- trim(args)
i <- which(substr(args,1,10) == 'replicates')
if (! isTRUE(i > 0)) {
return(1)
} else {
i <- args[i]
i <- strsplit(i, '=')[[1]][[2]]
return(as.integer(i))
}
}
setMethod('maxent', signature(x='data.frame', p='vector'),
function(x, p, args=NULL, path, silent=FALSE, ...) {
MEversion <- .getMeVersion()
x <- cbind(p, x)
x <- stats::na.omit(x)
x[is.na(x)] <- -9999 # maxent flag for NA, unless changed with args(nodata= ), so we should check for that rather than use this fixed value.
p <- x[,1]
x <- x[, -1 ,drop=FALSE]
factors <- NULL
for (i in 1:ncol(x)) {
if (class(x[,i]) == 'factor') {
factors <- c(factors, colnames(x)[i])
}
}
if (!missing(path)) {
path <- trim(path)
dir.create(path, recursive=TRUE, showWarnings=FALSE)
if (!file.exists(path)) {
stop('cannot create output directory: ', path)
}
dirout <- path
} else {
dirout <- .meTmpDir()
f <- paste(round(runif(10)*10), collapse="")
dirout <- paste(dirout, '/', f, sep='')
dir.create(dirout, recursive=TRUE, showWarnings=FALSE)
if (! file.exists(dirout)) {
stop('cannot create output directory: ', f)
}
}
pv <- x[p==1, ,drop=FALSE]
av <- x[p==0, ,drop=FALSE]
me <- new('MaxEnt')
me@presence <- pv
me@absence <- av
me@hasabsence <- TRUE
me@path <- dirout
pv <- cbind(data.frame(species='species'), x=1:nrow(pv), y=1:nrow(pv), pv)
av <- cbind(data.frame(species='background'), x=1:nrow(av), y=1:nrow(av), av)
pfn <- paste(dirout, '/presence', sep="")
afn <- paste(dirout, '/absence', sep="")
write.table(pv, file=pfn, sep=',', row.names=FALSE)
write.table(av, file=afn, sep=',', row.names=FALSE)
mxe <- rJava::.jnew("mebridge")
replicates <- .getreps(args)
args <- c("-z", args)
if (is.null(factors)) {
str <- rJava::.jcall(mxe, "S", "fit", c("autorun", "-e", afn, "-o", dirout, "-s", pfn, args))
} else {
str <- rJava::.jcall(mxe, "S", "fit", c("autorun", "-e", afn, "-o", dirout, "-s", pfn, args), rJava::.jarray(factors))
}
if (!is.null(str)) {
stop("args not understood:\n", str)
}
if (replicates > 1) {
mer <- new('MaxEntReplicates')
d <- t(read.csv(paste(dirout, '/maxentResults.csv', sep='') ))
d1 <- d[1,]
d <- d[-1, ,drop=FALSE]
dd <- matrix(as.numeric(d), ncol=ncol(d))
rownames(dd) <- rownames(d)
colnames(dd) <- d1
mer@results <- dd
f <- paste(dirout, "/species.html", sep='')
html <- readLines(f)
html[1] <- "<title>Maxent model</title>"
html[2] <- "<CENTER><H1>Maxent model</H1></CENTER>"
html[3] <- sub("model for species", "model result", html[3])
newtext <- paste("using 'dismo' version ", packageDescription('dismo')$Version, "& Maxent version")
html[3] <- sub("using Maxent version", newtext, html[3])
f <- paste(dirout, "/maxent.html", sep='')
writeLines(html, f)
mer@html <- f
for (i in 0:(replicates-1)) {
mex <- me
mex@lambdas <- unlist( readLines( paste(dirout, '/species_', i, '.lambdas', sep='') ) )
f <- paste(mex@path, "/species_", i, ".html", sep='')
html <- readLines(f)
html[1] <- "<title>Maxent model</title>"
html[2] <- "<CENTER><H1>Maxent model</H1></CENTER>"
html[3] <- sub("model for species", "model result", html[3])
newtext <- paste("using 'dismo' version ", packageDescription('dismo')$Version, "& Maxent version")
html[3] <- sub("using Maxent version", newtext, html[3])
f <- paste(mex@path, "/maxent_", i, ".html", sep='')
writeLines(html, f)
mex@html <- f
mer@models[[i+1]] <- mex
mer@models[[i+1]]@results <- dd[, 1+1, drop=FALSE]
}
return(mer)
} else {
me@lambdas <- unlist( readLines( paste(dirout, '/species.lambdas', sep='') ) )
d <- t(read.csv(paste(dirout, '/maxentResults.csv', sep='') ))
d <- d[-1, ,drop=FALSE]
dd <- matrix(as.numeric(d))
rownames(dd) <- rownames(d)
me@results <- dd
f <- paste(me@path, "/species.html", sep='')
html <- readLines(f)
html[1] <- "<title>Maxent model</title>"
html[2] <- "<CENTER><H1>Maxent model</H1></CENTER>"
html[3] <- sub("model for species", "model result", html[3])
newtext <- paste("using 'dismo' version ", packageDescription('dismo')$Version, "& Maxent version")
html[3] <- sub("using Maxent version", newtext, html[3])
f <- paste(me@path, "/maxent.html", sep='')
writeLines(html, f)
me@html <- f
}
me
}
)
.meTmpDir <- function() {
return( paste(raster::tmpDir(), 'maxent', sep="") )
}
.maxentRemoveTmpFiles <- function() {
d <- .meTmpDir()
if (file.exists(d)) {
unlink(paste(d, "/*", sep=""), recursive = TRUE)
}
}
setMethod("plot", signature(x='MaxEnt', y='missing'),
function(x, sort=TRUE, main='Variable contribution', xlab='Percentage', ...) {
r <- x@results
rnames <- rownames(r)
i <- grep('.contribution', rnames)
r <- r[i, ]
names(r) <- gsub('.contribution', '', names(r))
if (sort) {
r <- sort(r)
}
dotchart(r, main=main, xlab=xlab, ...)
invisible(r)
}
)
| /dismo/R/maxent.R | no_license | ingted/R-Examples | R | false | false | 11,678 | r | # Author: Robert J. Hijmans, r.hijmans@gmail.com
# Date: December 2009
# Version 0.1
# Licence GPL v3
setClass('MaxEnt',
contains = 'DistModel',
representation (
lambdas = 'vector',
results = 'matrix',
path = 'character',
html = 'character'
),
prototype (
lambdas = as.vector(NA),
results = as.matrix(NA),
path = '',
html = ''
),
)
setClass('MaxEntReplicates',
representation (
models = 'list',
results = 'matrix',
html = 'character'
),
prototype (
models = list(),
results = as.matrix(NA),
html = ''
),
)
setMethod ('show' , 'MaxEntReplicates',
function(object) {
cat('class :' , class(object), '\n')
cat('replicates:', length(object@models), '\n')
if (file.exists(object@html)) {
browseURL( paste("file:///", object@html, sep='') )
} else {
cat('output html file no longer exists\n')
}
}
)
setMethod ('show' , 'MaxEnt',
function(object) {
cat('class :' , class(object), '\n')
cat('variables:', colnames(object@presence), '\n')
# cat('lambdas\n')
# print(object@lambdas)
# pp <- nrow(object@presence)
# cat('\npresence points:', pp, '\n')
# if (pp < 5) {
# print(object@presence)
# } else {
# print(object@presence[1:5,])
# cat(' (... ... ...)\n')
# cat('\n')
# }
# pp <- nrow(object@absence)
# cat('\nabsence points:', pp, '\n')
# if (pp < 5) {
# print(object@absence)
# } else {
# print(object@absence[1:5,])
# cat(' (... ... ...)\n')
# cat('\n')
# }
# cat('\nmodel fit\n')
# print(object@results)
# cat('\n')
if (file.exists(object@html)) {
browseURL( paste("file:///", object@html, sep='') )
} else {
cat('output html file no longer exists\n')
}
}
)
if (!isGeneric("maxent")) {
setGeneric("maxent", function(x, p, ...)
standardGeneric("maxent"))
}
.rJava <- function() {
if (is.null(getOption('dismo_rJavaLoaded'))) {
# to avoid trouble on macs
Sys.setenv(NOAWT=TRUE)
if ( requireNamespace('rJava') ) {
rJava::.jpackage('dismo')
options(dismo_rJavaLoaded=TRUE)
} else {
stop('rJava cannot be loaded')
}
}
}
.getMeVersion <- function() {
jar <- paste(system.file(package="dismo"), "/java/maxent.jar", sep='')
if (!file.exists(jar)) {
stop('file missing:\n', jar, '.\nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/')
}
.rJava()
mxe <- rJava::.jnew("meversion")
v <- try(rJava::.jcall(mxe, "S", "meversion") )
if (class(v) == 'try-error') {
stop('"dismo" needs a more recent version of Maxent (3.3.3b or later) \nPlease download it here: http://www.cs.princeton.edu/~schapire/maxent/
\n and put it in this folder:\n',
system.file("java", package="dismo"))
} else if (v == '3.3.3a') {
stop("please update your maxent program to version 3.3.3b or later. This version is no longer supported. \nYou can download it here: http://www.cs.princeton.edu/~schapire/maxent/'")
}
return(v)
}
setMethod('maxent', signature(x='missing', p='missing'),
function(x, p, silent=FALSE, ...) {
v <- .getMeVersion()
if (!silent) {
cat('This is MaxEnt version', v, '\n' )
}
invisible(TRUE)
}
)
setMethod('maxent', signature(x='SpatialGridDataFrame', p='ANY'),
function(x, p, a=NULL,...) {
factors = NULL
for (i in 1:ncol(x@data)) {
if (is.factor(x@data[,i]) | is.character(x@data[,i])) {
factors = c(factors, colnames(x@data)[i])
}
}
x <- brick(x)
p <- .getMatrix(p)
if (! is.null(a) ) {
a <- .getMatrix(a)
}
# Signature = raster, ANY
maxent(x, p, a, factors=factors, ...)
}
)
.getMatrix <- function(x) {
if (inherits(x, 'SpatialPoints')) {
x <- data.frame(coordinates(x))
} else if (inherits(x, 'matrix')) {
x <- data.frame(x)
}
if (! class(x) == 'data.frame' ) {
stop('data should be a matrix, data.frame, or SpatialPoints* object')
}
if (dim(x)[2] != 2) {
stop('presence or absence coordinates data should be a matrix or data.frame with 2 columns' )
}
colnames(x) <- c('x', 'y')
return(x)
}
setMethod('maxent', signature(x='Raster', p='ANY'),
function(x, p, a=NULL, factors=NULL, removeDuplicates=TRUE, nbg=10000, ...) {
p <- .getMatrix(p)
if (removeDuplicates) {
cells <- unique(cellFromXY(x, p))
pv <- data.frame(extract(x, cells))
} else {
pv <- data.frame(extract(x, p))
}
lpv <- nrow(pv)
pv <- stats::na.omit(pv)
nas <- lpv - nrow(pv)
if (nas > 0) {
if (nas >= 0.5 * lpv) {
stop('more than half of the presence points have NA predictor values')
} else {
warning(nas, ' (', round(100*nas/lpv,2), '%) of the presence points have NA predictor values')
}
}
if (! is.null(a) ) {
a <- .getMatrix(a)
av <- data.frame(extract(x, a))
avr <- nrow(av)
av <- stats::na.omit(av)
nas <- length(as.vector(attr(av, "na.action")))
if (nas > 0) {
if (nas >= 0.5 * avr) {
stop('more than half of the absence points have NA predictor values')
} else {
warning(nas, ' (', round(100*nas/avr, 2), '%) of the presence points have NA predictor values')
}
}
} else {
# random absence
if (is.null(nbg)) {
nbg <- 10000
} else {
if (nbg < 100) {
stop('number of background points is very low')
} else if (nbg < 1000) {
warning('number of background points is very low')
}
}
if (nlayers(x) > 1) {
xy <- randomPoints( raster(x,1), nbg, p, warn=0 )
} else {
xy <- randomPoints(x, nbg, p, warn=0 )
}
av <- data.frame(extract(x, xy))
av <- stats::na.omit(av)
if (nrow(av) == 0) {
stop('could not get valid background point values; is there a layer with only NA values?')
}
if (nrow(av) < 100) {
stop('only got:', nrow(av), 'random background point values; is there a layer with many NA values?')
}
if (nrow(av) < 1000) {
warning('only got:', nrow(av), 'random background point values; Small exent? Or is there a layer with many NA values?')
}
}
# Signature = data.frame, missing
x <- rbind(pv, av)
if (!is.null(factors)) {
for (f in factors) {
x[,f] <- factor(x[,f])
}
}
p <- c(rep(1, nrow(pv)), rep(0, nrow(av)))
maxent(x, p, ...)
}
)
.getreps <- function(args) {
if (is.null(args)) { return(1) }
args <- trim(args)
i <- which(substr(args,1,10) == 'replicates')
if (! isTRUE(i > 0)) {
return(1)
} else {
i <- args[i]
i <- strsplit(i, '=')[[1]][[2]]
return(as.integer(i))
}
}
setMethod('maxent', signature(x='data.frame', p='vector'),
function(x, p, args=NULL, path, silent=FALSE, ...) {
MEversion <- .getMeVersion()
x <- cbind(p, x)
x <- stats::na.omit(x)
x[is.na(x)] <- -9999 # maxent flag for NA, unless changed with args(nodata= ), so we should check for that rather than use this fixed value.
p <- x[,1]
x <- x[, -1 ,drop=FALSE]
factors <- NULL
for (i in 1:ncol(x)) {
if (class(x[,i]) == 'factor') {
factors <- c(factors, colnames(x)[i])
}
}
if (!missing(path)) {
path <- trim(path)
dir.create(path, recursive=TRUE, showWarnings=FALSE)
if (!file.exists(path)) {
stop('cannot create output directory: ', path)
}
dirout <- path
} else {
dirout <- .meTmpDir()
f <- paste(round(runif(10)*10), collapse="")
dirout <- paste(dirout, '/', f, sep='')
dir.create(dirout, recursive=TRUE, showWarnings=FALSE)
if (! file.exists(dirout)) {
stop('cannot create output directory: ', f)
}
}
pv <- x[p==1, ,drop=FALSE]
av <- x[p==0, ,drop=FALSE]
me <- new('MaxEnt')
me@presence <- pv
me@absence <- av
me@hasabsence <- TRUE
me@path <- dirout
pv <- cbind(data.frame(species='species'), x=1:nrow(pv), y=1:nrow(pv), pv)
av <- cbind(data.frame(species='background'), x=1:nrow(av), y=1:nrow(av), av)
pfn <- paste(dirout, '/presence', sep="")
afn <- paste(dirout, '/absence', sep="")
write.table(pv, file=pfn, sep=',', row.names=FALSE)
write.table(av, file=afn, sep=',', row.names=FALSE)
mxe <- rJava::.jnew("mebridge")
replicates <- .getreps(args)
args <- c("-z", args)
if (is.null(factors)) {
str <- rJava::.jcall(mxe, "S", "fit", c("autorun", "-e", afn, "-o", dirout, "-s", pfn, args))
} else {
str <- rJava::.jcall(mxe, "S", "fit", c("autorun", "-e", afn, "-o", dirout, "-s", pfn, args), rJava::.jarray(factors))
}
if (!is.null(str)) {
stop("args not understood:\n", str)
}
if (replicates > 1) {
mer <- new('MaxEntReplicates')
d <- t(read.csv(paste(dirout, '/maxentResults.csv', sep='') ))
d1 <- d[1,]
d <- d[-1, ,drop=FALSE]
dd <- matrix(as.numeric(d), ncol=ncol(d))
rownames(dd) <- rownames(d)
colnames(dd) <- d1
mer@results <- dd
f <- paste(dirout, "/species.html", sep='')
html <- readLines(f)
html[1] <- "<title>Maxent model</title>"
html[2] <- "<CENTER><H1>Maxent model</H1></CENTER>"
html[3] <- sub("model for species", "model result", html[3])
newtext <- paste("using 'dismo' version ", packageDescription('dismo')$Version, "& Maxent version")
html[3] <- sub("using Maxent version", newtext, html[3])
f <- paste(dirout, "/maxent.html", sep='')
writeLines(html, f)
mer@html <- f
for (i in 0:(replicates-1)) {
mex <- me
mex@lambdas <- unlist( readLines( paste(dirout, '/species_', i, '.lambdas', sep='') ) )
f <- paste(mex@path, "/species_", i, ".html", sep='')
html <- readLines(f)
html[1] <- "<title>Maxent model</title>"
html[2] <- "<CENTER><H1>Maxent model</H1></CENTER>"
html[3] <- sub("model for species", "model result", html[3])
newtext <- paste("using 'dismo' version ", packageDescription('dismo')$Version, "& Maxent version")
html[3] <- sub("using Maxent version", newtext, html[3])
f <- paste(mex@path, "/maxent_", i, ".html", sep='')
writeLines(html, f)
mex@html <- f
mer@models[[i+1]] <- mex
mer@models[[i+1]]@results <- dd[, 1+1, drop=FALSE]
}
return(mer)
} else {
me@lambdas <- unlist( readLines( paste(dirout, '/species.lambdas', sep='') ) )
d <- t(read.csv(paste(dirout, '/maxentResults.csv', sep='') ))
d <- d[-1, ,drop=FALSE]
dd <- matrix(as.numeric(d))
rownames(dd) <- rownames(d)
me@results <- dd
f <- paste(me@path, "/species.html", sep='')
html <- readLines(f)
html[1] <- "<title>Maxent model</title>"
html[2] <- "<CENTER><H1>Maxent model</H1></CENTER>"
html[3] <- sub("model for species", "model result", html[3])
newtext <- paste("using 'dismo' version ", packageDescription('dismo')$Version, "& Maxent version")
html[3] <- sub("using Maxent version", newtext, html[3])
f <- paste(me@path, "/maxent.html", sep='')
writeLines(html, f)
me@html <- f
}
me
}
)
.meTmpDir <- function() {
return( paste(raster::tmpDir(), 'maxent', sep="") )
}
.maxentRemoveTmpFiles <- function() {
d <- .meTmpDir()
if (file.exists(d)) {
unlink(paste(d, "/*", sep=""), recursive = TRUE)
}
}
setMethod("plot", signature(x='MaxEnt', y='missing'),
function(x, sort=TRUE, main='Variable contribution', xlab='Percentage', ...) {
r <- x@results
rnames <- rownames(r)
i <- grep('.contribution', rnames)
r <- r[i, ]
names(r) <- gsub('.contribution', '', names(r))
if (sort) {
r <- sort(r)
}
dotchart(r, main=main, xlab=xlab, ...)
invisible(r)
}
)
|
########################################################
# 1. Load Libraries and Data
########################################################
library(raster)
library(lme4)
########################################################
# 2. Set Working Directory or Cluster Info
########################################################
if(Sys.info()["nodename"] == "IDIVNB193"){
setwd("C:\\restore2\\hp39wasi\\sWorm\\EarthwormAnalysis\\")
GLs_folder <- "I:\\sWorm\\ProcessedGLs\\Same_resolution\\regions"
models <- "Models"
}else{ ## i.e. cluster
args <- commandArgs(trailingOnly = TRUE)
GLs_folder <- args[1] # GLs_dir
models <- args[2] # models_dir
savefolder <- args[3] # output_dir
reg <- args[4] ## Which continent
print(GLs_folder)
print(models)
print(savefolder)
print(reg)
rasterOptions(tmpdir = "/work/phillips", chunksize = 524288, maxmemory = 134217728)
}
#################################################
# 3. Load in models
#################################################
print("Loading in the biodiversity models")
load(file.path(models, "richnessmodel_revised.rds"))
# load(file.path(models, "richnessmodel.rds"))
if(!dir.exists(file.path(savefolder, reg))){
dir.create(file.path(savefolder, reg))
}
# data_out <- file.path(savefolder, reg)
#################################################
# 4. Rerun model with different factor levels for ESA
#################################################
if(file.exists(file.path(models, "richnessmodel_revised_ESA.rds"))){
print("Model already exists")
load(file.path(models, "richnessmodel_revised_ESA.rds"))
}else{print("Re-running model with new ESA values....")
data <- richness_model@frame
levels(data$ESA)[levels(data$ESA) == 'Broadleaf deciduous forest'] <- "60"
levels(data$ESA)[levels(data$ESA) == 'Broadleaf evergreen forest'] <- "50"
levels(data$ESA)[levels(data$ESA) == 'Needleleaf evergreen forest'] <- "70"
levels(data$ESA)[levels(data$ESA) == 'Mixed forest'] <- "90"
levels(data$ESA)[levels(data$ESA) == 'Herbaceous with spare tree/shrub'] <- "110"
levels(data$ESA)[levels(data$ESA) == 'Shrub'] <- "120"
levels(data$ESA)[levels(data$ESA) == 'Herbaceous'] <- "130"
levels(data$ESA)[levels(data$ESA) == 'Production - Herbaceous'] <- "10"
levels(data$ESA)[levels(data$ESA) == 'Production - Plantation'] <- "12"
# levels(data$ESA)[levels(data$ESA) == 'Cropland/Other vegetation mosaic'] <- "30"
mod <- glmer(formula = richness_model@call$formula, data = data, family = "poisson",
control = glmerControl(optimizer = "bobyqa",optCtrl=list(maxfun=2e5)))
save(mod, file = file.path(models, "richnessmodel_revised_ESA.rds"))
}
#################################################
# 5. RICHNESS
#################################################
print("Creating richness raster")
print("Loading all rasters")
bio10_7_scaled <- raster(file.path(GLs_folder,reg, "scaled_Richness_bio10_7_.tif"))
dimensions <- dim(bio10_7_scaled)
resol <-res(bio10_7_scaled)
coordred <- crs(bio10_7_scaled)
exten <- extent(bio10_7_scaled)
bio10_7_scaled <- as.vector(bio10_7_scaled)
bio10_15_scaled <- raster(file.path(GLs_folder,reg, "scaled_Richness_bio10_15_.tif"))
bio10_15_scaled <- as.vector(bio10_15_scaled)
SnowMonths_cat <- raster(file.path(GLs_folder,reg, "Snow_newValues_WGS84.tif"))
SnowMonths_cat <- as.vector(SnowMonths_cat)
SnowMonths_cat <- as.factor(SnowMonths_cat)
levels(SnowMonths_cat)[levels(SnowMonths_cat) == "4"] <- "4plus"
scaleAridity <- raster(file.path(GLs_folder,reg, "scaled_Richness_ai_.tif"))
scaleAridity <- as.vector(scaleAridity)
ScalePET <- raster(file.path(GLs_folder,reg, "scaled_Richness_pet_.tif"))
ScalePET <- as.vector(ScalePET)
scalePH <- raster(file.path(GLs_folder,reg,"scaled_Richness_ph_.tif"))
scalePH <- as.vector(scalePH)
scaleElevation <- raster(file.path(GLs_folder,reg,"scaled_Richness_elevation_.tif"))
scaleElevation <- as.vector(scaleElevation)
scaleCLYPPT <- raster(file.path(GLs_folder,reg,"scaled_Richness_clay_.tif"))
scaleCLYPPT <- as.vector(scaleCLYPPT)
scaleSLTPPT <- raster(file.path(GLs_folder,reg,"scaled_Richness_silt_.tif"))
scaleSLTPPT <- as.vector(scaleSLTPPT)
scaleCECSOL <- raster(file.path(GLs_folder,reg,"scaled_Richness_cation_.tif"))
scaleCECSOL <- as.vector(scaleCECSOL)
scaleORCDRC <- raster(file.path(GLs_folder,reg,"scaled_Richness_carbon_.tif"))
scaleORCDRC <- as.vector(scaleORCDRC)
ESA <- raster(file.path(GLs_folder, reg, "ESA_newValuesCropped.tif"))
ESA <- as.vector(ESA)
keep <- c(60, 50, 70, 90, 110, 120, 130, 10, 12)
ESA <- ifelse(ESA %in% keep, ESA, NA)
ESA <- as.factor(ESA)
newdat <- data.frame(ESA = ESA,
scaleORCDRC = scaleORCDRC,
scaleCECSOL = scaleCECSOL,
scaleSLTPPT = scaleSLTPPT,
scaleCLYPPT = scaleCLYPPT,
scalePH = scalePH,
ScalePET = ScalePET,
scaleAridity = scaleAridity,
SnowMonths_cat = SnowMonths_cat,
bio10_15_scaled = bio10_15_scaled,
bio10_7_scaled = bio10_7_scaled,
scaleElevation = scaleElevation)
rm(list=c("bio10_7_scaled", "bio10_15_scaled", "SnowMonths_cat", "scaleAridity", "ScalePET",
"scalePH", "scaleCLYPPT", "scaleSLTPPT", "scaleCECSOL", "scaleORCDRC", "ESA", "scaleElevation"))
#############################################################
print("Splitting dataframe...")
library(data.table)
n <- 3000
letterwrap <- function(n, depth = 1) {
args <- lapply(1:depth, FUN = function(x) return(LETTERS))
x <- do.call(expand.grid, args = list(args, stringsAsFactors = F))
x <- x[, rev(names(x)), drop = F]
x <- do.call(paste0, x)
if (n <= length(x)) return(x[1:n])
return(c(x, letterwrap(n - length(x), depth = depth + 1)))
}
t <- nrow(newdat) %/% n
alp <- letterwrap(t, depth = ceiling(log(t, base = 26)))
last <- alp[length(alp)]
print("1")
t <- rep(alp, each = n)
rm(alp)
# more <- letterwrap(1, depth = nchar(last) + 1)
more <- rep("Z", length = nchar(last) + 1)
implode <- function(..., sep='') {
paste(..., collapse=sep)
}
more <- implode(more)
print("2")
newdat$z <- c(t, rep(more, times = (nrow(newdat) - length(t))))
rm(more)
rm(t)
rm(n)
print("3")
newdat_t = as.data.table(newdat)
rm(newdat)
gc()
print("4")
#system.time(
x <- split(newdat_t, f = newdat_t$z)
#)
rm(newdat_t)
print("Predicting values...")
# x <- split(newdat, (0:nrow(newdat) %/% 10000)) # modulo division
for(l in 1:length(x)){
print(paste(l, "in", length(x), "iterations.."))
res <- predict(mod, x[[l]], re.form = NA)
write.table(res, file= file.path(savefolder, reg, "predictedValues.csv"),
append=TRUE, row.names = FALSE,
col.names = FALSE,
sep = ',')
}
res <- NULL
x <- NULL
# length(res) == nrow(newdat)
# need number of rows of the original raster
# The resolution
# the extent
# the coord.ref
# dimensions
# resol
print("Loading csv of predicted values and converting to vector....")
predValues <- read.csv(file.path(savefolder, reg, "predictedValues.csv"), header = FALSE)
predValues <- as.vector(predValues$V1)
print("Converting to raster...")
print(dimensions[1])
print(dimensions[2])
print(dimensions[1] * dimensions[2])
# dimensions <- c(3032, 3074)
r <- matrix(predValues, nrow = dimensions[1], ncol = dimensions[2], byrow = TRUE)
r <- raster(r)
print("Adding in the raster information")
extent(r) <- exten
# ... and assign a projection
projection(r) <- coordred
# Save raster
print("Saving raster...")
r <- writeRaster(r, filename= file.path(savefolder, reg, "spRFinalRaster.tif"), format="GTiff", overwrite=TRUE)
print("Done!")
| /10.1_MapCoefficients_spRichness.R | permissive | MaximilianPi/GlobalEWDiversity | R | false | false | 7,759 | r |
########################################################
# 1. Load Libraries and Data
########################################################
library(raster)
library(lme4)
########################################################
# 2. Set Working Directory or Cluster Info
########################################################
if(Sys.info()["nodename"] == "IDIVNB193"){
setwd("C:\\restore2\\hp39wasi\\sWorm\\EarthwormAnalysis\\")
GLs_folder <- "I:\\sWorm\\ProcessedGLs\\Same_resolution\\regions"
models <- "Models"
}else{ ## i.e. cluster
args <- commandArgs(trailingOnly = TRUE)
GLs_folder <- args[1] # GLs_dir
models <- args[2] # models_dir
savefolder <- args[3] # output_dir
reg <- args[4] ## Which continent
print(GLs_folder)
print(models)
print(savefolder)
print(reg)
rasterOptions(tmpdir = "/work/phillips", chunksize = 524288, maxmemory = 134217728)
}
#################################################
# 3. Load in models
#################################################
print("Loading in the biodiversity models")
load(file.path(models, "richnessmodel_revised.rds"))
# load(file.path(models, "richnessmodel.rds"))
if(!dir.exists(file.path(savefolder, reg))){
dir.create(file.path(savefolder, reg))
}
# data_out <- file.path(savefolder, reg)
#################################################
# 4. Rerun model with different factor levels for ESA
#################################################
if(file.exists(file.path(models, "richnessmodel_revised_ESA.rds"))){
print("Model already exists")
load(file.path(models, "richnessmodel_revised_ESA.rds"))
}else{print("Re-running model with new ESA values....")
data <- richness_model@frame
levels(data$ESA)[levels(data$ESA) == 'Broadleaf deciduous forest'] <- "60"
levels(data$ESA)[levels(data$ESA) == 'Broadleaf evergreen forest'] <- "50"
levels(data$ESA)[levels(data$ESA) == 'Needleleaf evergreen forest'] <- "70"
levels(data$ESA)[levels(data$ESA) == 'Mixed forest'] <- "90"
levels(data$ESA)[levels(data$ESA) == 'Herbaceous with spare tree/shrub'] <- "110"
levels(data$ESA)[levels(data$ESA) == 'Shrub'] <- "120"
levels(data$ESA)[levels(data$ESA) == 'Herbaceous'] <- "130"
levels(data$ESA)[levels(data$ESA) == 'Production - Herbaceous'] <- "10"
levels(data$ESA)[levels(data$ESA) == 'Production - Plantation'] <- "12"
# levels(data$ESA)[levels(data$ESA) == 'Cropland/Other vegetation mosaic'] <- "30"
mod <- glmer(formula = richness_model@call$formula, data = data, family = "poisson",
control = glmerControl(optimizer = "bobyqa",optCtrl=list(maxfun=2e5)))
save(mod, file = file.path(models, "richnessmodel_revised_ESA.rds"))
}
#################################################
# 5. RICHNESS
#################################################
print("Creating richness raster")
print("Loading all rasters")
bio10_7_scaled <- raster(file.path(GLs_folder,reg, "scaled_Richness_bio10_7_.tif"))
dimensions <- dim(bio10_7_scaled)
resol <-res(bio10_7_scaled)
coordred <- crs(bio10_7_scaled)
exten <- extent(bio10_7_scaled)
bio10_7_scaled <- as.vector(bio10_7_scaled)
bio10_15_scaled <- raster(file.path(GLs_folder,reg, "scaled_Richness_bio10_15_.tif"))
bio10_15_scaled <- as.vector(bio10_15_scaled)
SnowMonths_cat <- raster(file.path(GLs_folder,reg, "Snow_newValues_WGS84.tif"))
SnowMonths_cat <- as.vector(SnowMonths_cat)
SnowMonths_cat <- as.factor(SnowMonths_cat)
levels(SnowMonths_cat)[levels(SnowMonths_cat) == "4"] <- "4plus"
scaleAridity <- raster(file.path(GLs_folder,reg, "scaled_Richness_ai_.tif"))
scaleAridity <- as.vector(scaleAridity)
ScalePET <- raster(file.path(GLs_folder,reg, "scaled_Richness_pet_.tif"))
ScalePET <- as.vector(ScalePET)
scalePH <- raster(file.path(GLs_folder,reg,"scaled_Richness_ph_.tif"))
scalePH <- as.vector(scalePH)
scaleElevation <- raster(file.path(GLs_folder,reg,"scaled_Richness_elevation_.tif"))
scaleElevation <- as.vector(scaleElevation)
scaleCLYPPT <- raster(file.path(GLs_folder,reg,"scaled_Richness_clay_.tif"))
scaleCLYPPT <- as.vector(scaleCLYPPT)
scaleSLTPPT <- raster(file.path(GLs_folder,reg,"scaled_Richness_silt_.tif"))
scaleSLTPPT <- as.vector(scaleSLTPPT)
scaleCECSOL <- raster(file.path(GLs_folder,reg,"scaled_Richness_cation_.tif"))
scaleCECSOL <- as.vector(scaleCECSOL)
scaleORCDRC <- raster(file.path(GLs_folder,reg,"scaled_Richness_carbon_.tif"))
scaleORCDRC <- as.vector(scaleORCDRC)
ESA <- raster(file.path(GLs_folder, reg, "ESA_newValuesCropped.tif"))
ESA <- as.vector(ESA)
keep <- c(60, 50, 70, 90, 110, 120, 130, 10, 12)
ESA <- ifelse(ESA %in% keep, ESA, NA)
ESA <- as.factor(ESA)
newdat <- data.frame(ESA = ESA,
scaleORCDRC = scaleORCDRC,
scaleCECSOL = scaleCECSOL,
scaleSLTPPT = scaleSLTPPT,
scaleCLYPPT = scaleCLYPPT,
scalePH = scalePH,
ScalePET = ScalePET,
scaleAridity = scaleAridity,
SnowMonths_cat = SnowMonths_cat,
bio10_15_scaled = bio10_15_scaled,
bio10_7_scaled = bio10_7_scaled,
scaleElevation = scaleElevation)
rm(list=c("bio10_7_scaled", "bio10_15_scaled", "SnowMonths_cat", "scaleAridity", "ScalePET",
"scalePH", "scaleCLYPPT", "scaleSLTPPT", "scaleCECSOL", "scaleORCDRC", "ESA", "scaleElevation"))
#############################################################
print("Splitting dataframe...")
library(data.table)
n <- 3000
letterwrap <- function(n, depth = 1) {
args <- lapply(1:depth, FUN = function(x) return(LETTERS))
x <- do.call(expand.grid, args = list(args, stringsAsFactors = F))
x <- x[, rev(names(x)), drop = F]
x <- do.call(paste0, x)
if (n <= length(x)) return(x[1:n])
return(c(x, letterwrap(n - length(x), depth = depth + 1)))
}
t <- nrow(newdat) %/% n
alp <- letterwrap(t, depth = ceiling(log(t, base = 26)))
last <- alp[length(alp)]
print("1")
t <- rep(alp, each = n)
rm(alp)
# more <- letterwrap(1, depth = nchar(last) + 1)
more <- rep("Z", length = nchar(last) + 1)
implode <- function(..., sep='') {
paste(..., collapse=sep)
}
more <- implode(more)
print("2")
newdat$z <- c(t, rep(more, times = (nrow(newdat) - length(t))))
rm(more)
rm(t)
rm(n)
print("3")
newdat_t = as.data.table(newdat)
rm(newdat)
gc()
print("4")
#system.time(
x <- split(newdat_t, f = newdat_t$z)
#)
rm(newdat_t)
print("Predicting values...")
# x <- split(newdat, (0:nrow(newdat) %/% 10000)) # modulo division
for(l in 1:length(x)){
print(paste(l, "in", length(x), "iterations.."))
res <- predict(mod, x[[l]], re.form = NA)
write.table(res, file= file.path(savefolder, reg, "predictedValues.csv"),
append=TRUE, row.names = FALSE,
col.names = FALSE,
sep = ',')
}
res <- NULL
x <- NULL
# length(res) == nrow(newdat)
# need number of rows of the original raster
# The resolution
# the extent
# the coord.ref
# dimensions
# resol
print("Loading csv of predicted values and converting to vector....")
predValues <- read.csv(file.path(savefolder, reg, "predictedValues.csv"), header = FALSE)
predValues <- as.vector(predValues$V1)
print("Converting to raster...")
print(dimensions[1])
print(dimensions[2])
print(dimensions[1] * dimensions[2])
# dimensions <- c(3032, 3074)
r <- matrix(predValues, nrow = dimensions[1], ncol = dimensions[2], byrow = TRUE)
r <- raster(r)
print("Adding in the raster information")
extent(r) <- exten
# ... and assign a projection
projection(r) <- coordred
# Save raster
print("Saving raster...")
r <- writeRaster(r, filename= file.path(savefolder, reg, "spRFinalRaster.tif"), format="GTiff", overwrite=TRUE)
print("Done!")
|
#' The application User-Interface
#'
#' @param request Internal parameter for `{shiny}`.
#' DO NOT REMOVE.
#' @import shiny
#' @noRd
app_ui <- function(request) {
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
# List the first level UI elements here
fluidPage(
class = "split",
mod_left_ui("left_ui_1"),
mod_right_ui("right_ui_1")
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
golem_add_external_resources <- function(){
add_resource_path(
'www', app_sys('app/www')
)
tags$head(
favicon(),
bundle_resources(
path = app_sys('app/www'),
app_title = 'minifying'
)
# Add here other external resources
# for example, you can add shinyalert::useShinyalert()
)
}
| /step-3-build/R/app_ui.R | permissive | ColinFay/minifying | R | false | false | 1,015 | r | #' The application User-Interface
#'
#' @param request Internal parameter for `{shiny}`.
#' DO NOT REMOVE.
#' @import shiny
#' @noRd
app_ui <- function(request) {
tagList(
# Leave this function for adding external resources
golem_add_external_resources(),
# List the first level UI elements here
fluidPage(
class = "split",
mod_left_ui("left_ui_1"),
mod_right_ui("right_ui_1")
)
)
}
#' Add external Resources to the Application
#'
#' This function is internally used to add external
#' resources inside the Shiny application.
#'
#' @import shiny
#' @importFrom golem add_resource_path activate_js favicon bundle_resources
#' @noRd
golem_add_external_resources <- function(){
add_resource_path(
'www', app_sys('app/www')
)
tags$head(
favicon(),
bundle_resources(
path = app_sys('app/www'),
app_title = 'minifying'
)
# Add here other external resources
# for example, you can add shinyalert::useShinyalert()
)
}
|
context('Calendar register')
test_that('it should list calendars thru register', {
# expect_output(calendars(), 'actual/365')
# expect_equal(length(calendars()), 1)
l <- length(calendars())
cal <- Calendar_()
expect_equal(length(calendars()), l)
cal <- create.calendar('try-ANBIMA', holidaysANBIMA, weekdays=c('saturday', 'sunday'))
expect_equal(length(calendars()), l+1)
expect_output(calendars(), 'try-ANBIMA')
})
test_that('it should retrieve registered calendars', {
expect_is(calendars()[['actual']], 'Calendar')
expect_null(calendars()[['blá']])
})
test_that('it should call calendar\'s methods with calendar\'s name', {
expect_error(bizdays('2016-02-01', '2016-02-02', 'actual/365'), 'Invalid calendar')
expect_equal(bizdays('2016-02-01', '2016-02-02', 'actual'), 1)
# expect_equal(bizyears('2016-02-01', '2016-02-02', 'actual'), 1/365)
expect_equal(is.bizday('2016-02-01', 'actual'), TRUE)
expect_equal(offset('2016-02-01', 1, 'actual'), as.Date('2016-02-02'))
expect_equal(bizseq('2016-02-01', '2016-02-02', 'actual'), as.Date(c('2016-02-01', '2016-02-02')))
expect_equal(modified.following('2013-01-01', 'actual'), as.Date('2013-01-01'))
expect_equal(modified.preceding('2013-01-01', 'actual'), as.Date('2013-01-01'))
expect_equal(following('2013-01-01', 'actual'), as.Date('2013-01-01'))
expect_equal(preceding('2013-01-01', 'actual'), as.Date('2013-01-01'))
})
test_that('it should set default calendar with calendar\'s name', {
cal <- create.calendar("actual-calendar")
bizdays.options$set(default.calendar='actual-calendar')
expect_is(bizdays.options$get('default.calendar'), 'character')
expect_output(bizdays.options$get('default.calendar'), 'actual-calendar')
})
test_that('it should remove a calendar', {
cal <- create.calendar("actual")
expect_false( is.null(calendars()[["actual"]]) )
remove.calendars("actual")
expect_true( is.null(calendars()[["actual"]]) )
}) | /inst/tests/test-register.R | no_license | miceli/R-bizdays | R | false | false | 1,945 | r |
context('Calendar register')
test_that('it should list calendars thru register', {
# expect_output(calendars(), 'actual/365')
# expect_equal(length(calendars()), 1)
l <- length(calendars())
cal <- Calendar_()
expect_equal(length(calendars()), l)
cal <- create.calendar('try-ANBIMA', holidaysANBIMA, weekdays=c('saturday', 'sunday'))
expect_equal(length(calendars()), l+1)
expect_output(calendars(), 'try-ANBIMA')
})
test_that('it should retrieve registered calendars', {
expect_is(calendars()[['actual']], 'Calendar')
expect_null(calendars()[['blá']])
})
test_that('it should call calendar\'s methods with calendar\'s name', {
expect_error(bizdays('2016-02-01', '2016-02-02', 'actual/365'), 'Invalid calendar')
expect_equal(bizdays('2016-02-01', '2016-02-02', 'actual'), 1)
# expect_equal(bizyears('2016-02-01', '2016-02-02', 'actual'), 1/365)
expect_equal(is.bizday('2016-02-01', 'actual'), TRUE)
expect_equal(offset('2016-02-01', 1, 'actual'), as.Date('2016-02-02'))
expect_equal(bizseq('2016-02-01', '2016-02-02', 'actual'), as.Date(c('2016-02-01', '2016-02-02')))
expect_equal(modified.following('2013-01-01', 'actual'), as.Date('2013-01-01'))
expect_equal(modified.preceding('2013-01-01', 'actual'), as.Date('2013-01-01'))
expect_equal(following('2013-01-01', 'actual'), as.Date('2013-01-01'))
expect_equal(preceding('2013-01-01', 'actual'), as.Date('2013-01-01'))
})
test_that('it should set default calendar with calendar\'s name', {
cal <- create.calendar("actual-calendar")
bizdays.options$set(default.calendar='actual-calendar')
expect_is(bizdays.options$get('default.calendar'), 'character')
expect_output(bizdays.options$get('default.calendar'), 'actual-calendar')
})
test_that('it should remove a calendar', {
cal <- create.calendar("actual")
expect_false( is.null(calendars()[["actual"]]) )
remove.calendars("actual")
expect_true( is.null(calendars()[["actual"]]) )
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{compile4float32}
\alias{compile4float32}
\title{Enable compiling of user-defined operators using float 32bits precision.}
\usage{
compile4float32()
}
\value{
None
}
\description{
Set up \code{rkeops} compile options to compile user-defined operators that use
float 32bits precision in computation.
}
\details{
\strong{Note:} Default behavior is to compile operators operators that use
float 32bits precision in computation. Hence, if you do not modify \code{rkeops}
options, you do not have to call the function \code{compile4float32} to
compile operators using float 32bits precision.
Since R only manages float 64bits or double numbers, the input and output
are casted to float 32bits before and after computations respectively.
}
\examples{
library(rkeops)
compile4float32()
}
\seealso{
\code{\link[rkeops:compile4float64]{rkeops::compile4float64()}}
}
\author{
Ghislain Durif
}
| /rkeops/man/compile4float32.Rd | permissive | dvolgyes/keops | R | false | true | 975 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{compile4float32}
\alias{compile4float32}
\title{Enable compiling of user-defined operators using float 32bits precision.}
\usage{
compile4float32()
}
\value{
None
}
\description{
Set up \code{rkeops} compile options to compile user-defined operators that use
float 32bits precision in computation.
}
\details{
\strong{Note:} Default behavior is to compile operators operators that use
float 32bits precision in computation. Hence, if you do not modify \code{rkeops}
options, you do not have to call the function \code{compile4float32} to
compile operators using float 32bits precision.
Since R only manages float 64bits or double numbers, the input and output
are casted to float 32bits before and after computations respectively.
}
\examples{
library(rkeops)
compile4float32()
}
\seealso{
\code{\link[rkeops:compile4float64]{rkeops::compile4float64()}}
}
\author{
Ghislain Durif
}
|
library(queueing)
### Name: Lk.o_CJN
### Title: Returns the vector with the mean number of customers in each
### node (server) of a Closed Jackson Network
### Aliases: Lk.o_CJN
### Keywords: Closed Jackson Network
### ** Examples
## See example 11.13 in reference [Sixto2004] for more details.
## create the nodes
n <- 2
n1 <- NewInput.MM1(lambda=0, mu=1/0.2, n=0)
n2 <- NewInput.MM1(lambda=0, mu=1/0.4, n=0)
# think time = 0
z <- 0
# operational value
operational <- FALSE
# definition of the transition probabilities
prob <- matrix(data=c(0.5, 0.5, 0.5, 0.5), nrow=2, ncol=2, byrow=TRUE)
# Define a new input
cjn1 <- NewInput.CJN(prob, n, z, operational, 0, 0.001, n1, n2)
# Check the inputs and build the model
m_cjn1 <- QueueingModel(cjn1)
Lk(m_cjn1)
| /data/genthat_extracted_code/queueing/examples/Lk.o_CJN.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 771 | r | library(queueing)
### Name: Lk.o_CJN
### Title: Returns the vector with the mean number of customers in each
### node (server) of a Closed Jackson Network
### Aliases: Lk.o_CJN
### Keywords: Closed Jackson Network
### ** Examples
## See example 11.13 in reference [Sixto2004] for more details.
## create the nodes
n <- 2
n1 <- NewInput.MM1(lambda=0, mu=1/0.2, n=0)
n2 <- NewInput.MM1(lambda=0, mu=1/0.4, n=0)
# think time = 0
z <- 0
# operational value
operational <- FALSE
# definition of the transition probabilities
prob <- matrix(data=c(0.5, 0.5, 0.5, 0.5), nrow=2, ncol=2, byrow=TRUE)
# Define a new input
cjn1 <- NewInput.CJN(prob, n, z, operational, 0, 0.001, n1, n2)
# Check the inputs and build the model
m_cjn1 <- QueueingModel(cjn1)
Lk(m_cjn1)
|
library("ggplot2")
library("mvtnorm")
shinyServer( function(input, output) {
update_x <- reactive({
mu <- c(10, 15)
A <- matrix(c(4, input$corr * 6,
input$corr * 6, 9),
nrow = 2)
X <- rmvnorm(input$n_obs, mean = mu,
sigma = A)
X
})
output$histogram <- renderPlot({
X <- update_x()
qplot(X[, 1])
})
output$scatter <- renderPlot({
X <- update_x()
qplot(x = X[, 1], y = X[, 2])
})
})
# n_obs <- 100
# corr <- 0.5
| /2015/mtv_shiny/server.R | no_license | bdemeshev/pr201 | R | false | false | 516 | r | library("ggplot2")
library("mvtnorm")
shinyServer( function(input, output) {
update_x <- reactive({
mu <- c(10, 15)
A <- matrix(c(4, input$corr * 6,
input$corr * 6, 9),
nrow = 2)
X <- rmvnorm(input$n_obs, mean = mu,
sigma = A)
X
})
output$histogram <- renderPlot({
X <- update_x()
qplot(X[, 1])
})
output$scatter <- renderPlot({
X <- update_x()
qplot(x = X[, 1], y = X[, 2])
})
})
# n_obs <- 100
# corr <- 0.5
|
#install.packages("pnn")
#install.packages("neuralnet")
library(png)
library(imager)
library(radiomics)
library(pnn)
library(neuralnet)
normlinha <- function(vetor){
minimo = min(vetor)
maximo = max(vetor)
d = maximo-minimo
vetor = (vetor - minimo)/d
return(vetor)
}
normset <- function(dados){
return(apply(dados, 2, normlinha))
}
### Features para a rede
feat = c("glcm_mean","glcm_variance","glcm_energy","glcm_contrast","glcm_entropy","glcm_homogeneity1","glcm_correlation","glcm_IDMN")
### Leitura dos arquivos
healthybase = paste(getwd(), "/testimgs/saudavel", sep="")
tribase = paste(getwd(), "/testimgs/triangulo", sep="")
healthyfiles = list.files(healthybase)
trifiles = list.files(tribase)
# Para ver os PNGs
#a = readPNG(paste(healthybase,"s1.png",sep="/"))[,,1]
#image(a, col=grey(0:64*(max(a))/64), axes=FALSE, ylab="")
#display(a)
### Gera as matrizes de features saudaveis e triangulo
featmatrix = c()
for (arq in healthyfiles){
a = readPNG(paste(healthybase,arq,sep="/"))[,,1]
m = radiomics::glcm(a,angle=0, d=1)
f = calc_features(m) #quais features usaremos depende da rede neural
f = f[names(f)%in% feat]
featmatrix = rbind(featmatrix,f)
}
trimatrix = c()
for (arq in trifiles){
a = readPNG(paste(tribase,arq,sep="/"))[,,1]
m = glcm(a, angle=0,d=1)
f = calc_features(m) #quais features usaremos depende da rede neural
f = f[names(f)%in% feat]
trimatrix = rbind(trimatrix,f)
}
#apply(featmatrix, 2, mean)
#apply(trimatrix, 2, mean)
#plot(c(featmatrix[,1],trimatrix[,1]),c(featmatrix[,2],trimatrix[,2]),col = c(rep("blue",6),rep("red",6)),pch = 16)
#### Separa treino e test saudavel e tri
h_n = dim(featmatrix)[1]
tri_n = dim(trimatrix)[1]
trainp = 0.75
h_train = floor(h_n*trainp)
train_h_set = sample(1:h_n, h_train)
test_h_set = (1:h_n)[-train_h_set]
healthytrain = featmatrix[train_h_set,]
healthytest = featmatrix[test_h_set,]
tri_n = dim(trimatrix)[1]
tri_train = floor(tri_n*trainp)
train_t_set = sample(1:tri_n, tri_train)
test_t_set = (1:tri_n)[-train_t_set]
tritrain = trimatrix[train_t_set,]
tritest = trimatrix[test_t_set,]
### Normaliza todas matrizes
treino = rbind(healthytrain,tritrain)
medias = apply(treino, 2, mean)
desvios = apply(treino, 2, sd)
for( i in 1:length(feat) ){
treino[,i] = (treino[,i] - medias[i])/desvios[i]
}
teste=rbind(healthytest,tritest)
for( i in 1:length(feat)){
teste[,i] = (teste[,i] - medias[i])/desvios[i]
}
#d = rbind(featmatrix,trimatrix)
#nd = normset(d)
#nd_h = nd[1:h_n,]
#nd_t = nd[ (h_n+1):(h_n+tri_n),]
##### Treina a rede #####
train = cbind(c(rep(0,h_train),rep(1,tri_train)), treino)
colnames(train)[1] = c("class")
x = paste(colnames(train)[-1],collapse="+")
net.d = neuralnet(data = train, formula = paste('class ~ ' ,x) , rep=5, hidden=5, linear.output=FALSE, threshold = 0.001,act.fct="tanh")
min(net.d$result.matrix[1,])
plot(net.d,rep="best")
compute(net.d,train[,-1])$net.result
##### Teste #####
compute(net.d,teste)$net.result
#### FIM #### | /modelo.R | no_license | Cicconella/AI | R | false | false | 2,983 | r | #install.packages("pnn")
#install.packages("neuralnet")
library(png)
library(imager)
library(radiomics)
library(pnn)
library(neuralnet)
normlinha <- function(vetor){
minimo = min(vetor)
maximo = max(vetor)
d = maximo-minimo
vetor = (vetor - minimo)/d
return(vetor)
}
normset <- function(dados){
return(apply(dados, 2, normlinha))
}
### Features para a rede
feat = c("glcm_mean","glcm_variance","glcm_energy","glcm_contrast","glcm_entropy","glcm_homogeneity1","glcm_correlation","glcm_IDMN")
### Leitura dos arquivos
healthybase = paste(getwd(), "/testimgs/saudavel", sep="")
tribase = paste(getwd(), "/testimgs/triangulo", sep="")
healthyfiles = list.files(healthybase)
trifiles = list.files(tribase)
# Para ver os PNGs
#a = readPNG(paste(healthybase,"s1.png",sep="/"))[,,1]
#image(a, col=grey(0:64*(max(a))/64), axes=FALSE, ylab="")
#display(a)
### Gera as matrizes de features saudaveis e triangulo
featmatrix = c()
for (arq in healthyfiles){
a = readPNG(paste(healthybase,arq,sep="/"))[,,1]
m = radiomics::glcm(a,angle=0, d=1)
f = calc_features(m) #quais features usaremos depende da rede neural
f = f[names(f)%in% feat]
featmatrix = rbind(featmatrix,f)
}
trimatrix = c()
for (arq in trifiles){
a = readPNG(paste(tribase,arq,sep="/"))[,,1]
m = glcm(a, angle=0,d=1)
f = calc_features(m) #quais features usaremos depende da rede neural
f = f[names(f)%in% feat]
trimatrix = rbind(trimatrix,f)
}
#apply(featmatrix, 2, mean)
#apply(trimatrix, 2, mean)
#plot(c(featmatrix[,1],trimatrix[,1]),c(featmatrix[,2],trimatrix[,2]),col = c(rep("blue",6),rep("red",6)),pch = 16)
#### Separa treino e test saudavel e tri
h_n = dim(featmatrix)[1]
tri_n = dim(trimatrix)[1]
trainp = 0.75
h_train = floor(h_n*trainp)
train_h_set = sample(1:h_n, h_train)
test_h_set = (1:h_n)[-train_h_set]
healthytrain = featmatrix[train_h_set,]
healthytest = featmatrix[test_h_set,]
tri_n = dim(trimatrix)[1]
tri_train = floor(tri_n*trainp)
train_t_set = sample(1:tri_n, tri_train)
test_t_set = (1:tri_n)[-train_t_set]
tritrain = trimatrix[train_t_set,]
tritest = trimatrix[test_t_set,]
### Normaliza todas matrizes
treino = rbind(healthytrain,tritrain)
medias = apply(treino, 2, mean)
desvios = apply(treino, 2, sd)
for( i in 1:length(feat) ){
treino[,i] = (treino[,i] - medias[i])/desvios[i]
}
teste=rbind(healthytest,tritest)
for( i in 1:length(feat)){
teste[,i] = (teste[,i] - medias[i])/desvios[i]
}
#d = rbind(featmatrix,trimatrix)
#nd = normset(d)
#nd_h = nd[1:h_n,]
#nd_t = nd[ (h_n+1):(h_n+tri_n),]
##### Treina a rede #####
train = cbind(c(rep(0,h_train),rep(1,tri_train)), treino)
colnames(train)[1] = c("class")
x = paste(colnames(train)[-1],collapse="+")
net.d = neuralnet(data = train, formula = paste('class ~ ' ,x) , rep=5, hidden=5, linear.output=FALSE, threshold = 0.001,act.fct="tanh")
min(net.d$result.matrix[1,])
plot(net.d,rep="best")
compute(net.d,train[,-1])$net.result
##### Teste #####
compute(net.d,teste)$net.result
#### FIM #### |
# Note that this assumes `household_power_consumption.txt` is in your current directory.
# I'm not adding a 127MB file to my repo.
# This can be found at: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
d <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";", na.strings="?")
d$Date <- as.Date(d$Date, "%d/%m/%Y")
d$Time <- strptime(paste(d$Date, d$Time), format="%Y-%m-%d %H:%M:%S")
dates <- as.Date(c("2007-02-01", "2007-02-02"))
ds <- d[d$Date %in% dates,]
png("./plot1.png")
hist(ds$Global_active_power,
main="Global Active Power",
col="red",
ylab="Frequency",
xlab="Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | slpsys/ExData_Plotting1 | R | false | false | 680 | r | # Note that this assumes `household_power_consumption.txt` is in your current directory.
# I'm not adding a 127MB file to my repo.
# This can be found at: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
d <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";", na.strings="?")
d$Date <- as.Date(d$Date, "%d/%m/%Y")
d$Time <- strptime(paste(d$Date, d$Time), format="%Y-%m-%d %H:%M:%S")
dates <- as.Date(c("2007-02-01", "2007-02-02"))
ds <- d[d$Date %in% dates,]
png("./plot1.png")
hist(ds$Global_active_power,
main="Global Active Power",
col="red",
ylab="Frequency",
xlab="Global Active Power (kilowatts)")
dev.off()
|
getwd()
util<-read.csv("P3-Machine-Utilization.csv")
#?as.POSIXct
util$posixtime<-as.POSIXct(util$Timestamp,format="%d/%m/%Y %H:%M")
util$Percent.util<-1-util$Percent.Idle
rl1<-util[util$Machine=="RL1",]
rl2<-util[util$Machine=="RL2",]
head(rl1,15)
rl1<-rl1[,c(4,2,5,1,3)]
head(rl1,15)
rl1$Timestamp<-NULL
rl1$Percent.Idle<-NULL
unknown_hours<-rl1[is.na(rl1$Percent.util),]
head(rl1,15)
m1<-max(rl1$Percent.util,na.rm = TRUE)
m1
maxutil<-rl1[which(rl1$Percent.util==m1),]
maxutil
m2<-min(rl1$Percent.util,na.rm = TRUE)
m2
minutil<-rl1[which(rl1$Percent.util==m2),]
minutil
vec<-rl1$Percent.util<0.9
vec
util_check<-nrow(rl1[vec,])>1
util_check
library(ggplot2)
p<-ggplot(data=util,aes(x=posixtime,y=Percent.util,colour=Machine))
q<-p+geom_line(size=1.0)+facet_grid(Machine~.,scales="free")+geom_hline(yintercept=0.9,size=1)
q<-q+ylab("Percentage Utilization")+xlab("Time")+ggtitle("Plot for Machine Utilization")
plot<-q
listRl1<-list("DATA"=rl1,"MACHINE"="RL1","UNKNOWN HOURS"=unknown_hours,"MAX UTIL"=maxutil,"MIN UTIL"=minutil,"DROP BELOW 90%"=util_check,"PLOT"=plot)
listRl1
| /Machine Utilization.R | no_license | shashwatb10/Machine-Utilization | R | false | false | 1,119 | r | getwd()
util<-read.csv("P3-Machine-Utilization.csv")
#?as.POSIXct
util$posixtime<-as.POSIXct(util$Timestamp,format="%d/%m/%Y %H:%M")
util$Percent.util<-1-util$Percent.Idle
rl1<-util[util$Machine=="RL1",]
rl2<-util[util$Machine=="RL2",]
head(rl1,15)
rl1<-rl1[,c(4,2,5,1,3)]
head(rl1,15)
rl1$Timestamp<-NULL
rl1$Percent.Idle<-NULL
unknown_hours<-rl1[is.na(rl1$Percent.util),]
head(rl1,15)
m1<-max(rl1$Percent.util,na.rm = TRUE)
m1
maxutil<-rl1[which(rl1$Percent.util==m1),]
maxutil
m2<-min(rl1$Percent.util,na.rm = TRUE)
m2
minutil<-rl1[which(rl1$Percent.util==m2),]
minutil
vec<-rl1$Percent.util<0.9
vec
util_check<-nrow(rl1[vec,])>1
util_check
library(ggplot2)
p<-ggplot(data=util,aes(x=posixtime,y=Percent.util,colour=Machine))
q<-p+geom_line(size=1.0)+facet_grid(Machine~.,scales="free")+geom_hline(yintercept=0.9,size=1)
q<-q+ylab("Percentage Utilization")+xlab("Time")+ggtitle("Plot for Machine Utilization")
plot<-q
listRl1<-list("DATA"=rl1,"MACHINE"="RL1","UNKNOWN HOURS"=unknown_hours,"MAX UTIL"=maxutil,"MIN UTIL"=minutil,"DROP BELOW 90%"=util_check,"PLOT"=plot)
listRl1
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_funs.R
\name{open_grass_help}
\alias{open_grass_help}
\title{Open the GRASS online help}
\usage{
open_grass_help(alg)
}
\arguments{
\item{alg}{The name of the algorithm for which you wish to retrieve arguments
and default values.}
}
\description{
\code{open_grass_help} opens the GRASS online help for a
specified GRASS geoalgorithm.
}
\examples{
\dontrun{
open_grass_help("grass7:r.sunmask")
}
}
\author{
Jannes Muenchow
}
| /man/open_grass_help.Rd | no_license | rededsky/RQGIS | R | false | true | 512 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_funs.R
\name{open_grass_help}
\alias{open_grass_help}
\title{Open the GRASS online help}
\usage{
open_grass_help(alg)
}
\arguments{
\item{alg}{The name of the algorithm for which you wish to retrieve arguments
and default values.}
}
\description{
\code{open_grass_help} opens the GRASS online help for a
specified GRASS geoalgorithm.
}
\examples{
\dontrun{
open_grass_help("grass7:r.sunmask")
}
}
\author{
Jannes Muenchow
}
|
# prepare_ea.r
# Filters and prepare R's version of Ethnographic Atlas
# 1. Fixes some errors in EA coding
# 2. Modify slavery variable
# 3. selection of variables of interest
library("argparser")
import::from("src/lib/prepare.r", "modify_slavery")
import::from("src/lib/prepare.r", "select_variables")
import::from("src/lib/prepare.r", "get_residence")
import::from("src/lib/prepare.r", "remove_PMR")
import::from("src/lib/utils.r", "write_named_vector")
args_parser = function(){
parser = arg_parser(
"Parse and filter R's EA.Rdata obtained from pydplace API"
)
parser = add_argument(
parser, "input", type="character",
help = "path to dplace csv file",
)
parser = add_argument(
parser, "output", type="character",
help = "output file"
)
parser = add_argument(
parser, "--residence", flag=TRUE,
help = "if specified, only residence information is parsed"
)
args = parse_args(parser)
args
}
main = function(input, output, residence){
load(input)
names = EA$society
rownames(EA) = names
if(residence){
residences = get_residence(EA)
write_named_vector(residences, output)
} else {
EA = remove_wrong_na(EA)
EA = modify_slavery(EA)
EA = select_variables(EA)
EA = remove_PMR(EA)
saveRDS(EA, file=output)
}
}
# remove_wrong_na
# some variables are incorrectly coded as 0 instead of NA
remove_wrong_na = function(EA){
wrong_na = c("v34","v81","v86","v90","v94","v95","v96")
EA[wrong_na][EA[wrong_na] == 0] == NA
EA
}
if(!interactive()){
args = args_parser()
main(args$input, args$output, args$residence)
}
| /src/prepare_ea.r | no_license | J-Moravec/clustering_ethnographic_atlas | R | false | false | 1,738 | r | # prepare_ea.r
# Filters and prepare R's version of Ethnographic Atlas
# 1. Fixes some errors in EA coding
# 2. Modify slavery variable
# 3. selection of variables of interest
library("argparser")
import::from("src/lib/prepare.r", "modify_slavery")
import::from("src/lib/prepare.r", "select_variables")
import::from("src/lib/prepare.r", "get_residence")
import::from("src/lib/prepare.r", "remove_PMR")
import::from("src/lib/utils.r", "write_named_vector")
args_parser = function(){
parser = arg_parser(
"Parse and filter R's EA.Rdata obtained from pydplace API"
)
parser = add_argument(
parser, "input", type="character",
help = "path to dplace csv file",
)
parser = add_argument(
parser, "output", type="character",
help = "output file"
)
parser = add_argument(
parser, "--residence", flag=TRUE,
help = "if specified, only residence information is parsed"
)
args = parse_args(parser)
args
}
main = function(input, output, residence){
load(input)
names = EA$society
rownames(EA) = names
if(residence){
residences = get_residence(EA)
write_named_vector(residences, output)
} else {
EA = remove_wrong_na(EA)
EA = modify_slavery(EA)
EA = select_variables(EA)
EA = remove_PMR(EA)
saveRDS(EA, file=output)
}
}
# remove_wrong_na
# some variables are incorrectly coded as 0 instead of NA
remove_wrong_na = function(EA){
wrong_na = c("v34","v81","v86","v90","v94","v95","v96")
EA[wrong_na][EA[wrong_na] == 0] == NA
EA
}
if(!interactive()){
args = args_parser()
main(args$input, args$output, args$residence)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter.R
\name{filter.univariate}
\alias{filter.univariate}
\title{Univariate Filtering}
\usage{
filter.univariate(
data,
type,
yvar,
xvars,
censorvar,
trtvar,
trtref = 1,
pre.filter = length(xvars)
)
}
\arguments{
\item{data}{input data frame}
\item{type}{"c" continuous; "s" survival; "b" binary}
\item{yvar}{response variable name}
\item{xvars}{covariates variable name}
\item{censorvar}{censoring variable name 1:event; 0: censor.}
\item{trtvar}{treatment variable name}
\item{trtref}{code for treatment arm}
\item{pre.filter}{NULL, no prefiltering conducted;"opt", optimized number of predictors selected; An integer: min(opt, integer) of predictors selected}
}
\value{
covariate names after univariate filtering.
}
\description{
Univariate Filtering
}
| /man/filter.univariate.Rd | no_license | xhuang4/optaucx | R | false | true | 859 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter.R
\name{filter.univariate}
\alias{filter.univariate}
\title{Univariate Filtering}
\usage{
filter.univariate(
data,
type,
yvar,
xvars,
censorvar,
trtvar,
trtref = 1,
pre.filter = length(xvars)
)
}
\arguments{
\item{data}{input data frame}
\item{type}{"c" continuous; "s" survival; "b" binary}
\item{yvar}{response variable name}
\item{xvars}{covariates variable name}
\item{censorvar}{censoring variable name 1:event; 0: censor.}
\item{trtvar}{treatment variable name}
\item{trtref}{code for treatment arm}
\item{pre.filter}{NULL, no prefiltering conducted;"opt", optimized number of predictors selected; An integer: min(opt, integer) of predictors selected}
}
\value{
covariate names after univariate filtering.
}
\description{
Univariate Filtering
}
|
#' Filter the river data with a filter
#' \code{datafilter.riv}
#' @param x Input data
#' @param filter Data filter
#' @param plot Whether plot the data
#' @importFrom grDevices dev.off graphics.off png rgb topo.colors
#' @importFrom graphics grid hist lines par plot points
#' @importFrom methods as
#' @importFrom stats dist rnorm time
#' @importFrom utils read.table
#' @return Matrix information, c('ID','Vmin','Vmax', 'Filter')
#' @export
datafilter.riv <-function(x, filter=NULL, plot=TRUE){
msg=paste0('datafilter.riv::')
# y=x[['YRivstage']]
y = x
# plot(y)
pr=readriv()
cb = readcalib()
tid = pr@river[,'Type']
uid = sort(unique(tid))
st=pr@rivertype[tid, 'Depth'] + cb['RIV_DPTH']
if( is.null(filter) ){
filter = st
}
ymax = apply(y, 2, max, na.rm=T)
ymin = apply(y, 2, min, na.rm=T)
id = which(ymax > filter)
ret = data.frame(id, ymin[id], ymax[id], filter[id])
colnames(ret) = c('ID','Vmin','Vmax', 'Filter')
rownames(ret) = id
ylim=range(c(filter, y))
if(plot ){
if(length(id) > 0){
id = id
message(msg, length(id), ' rivers are filtered.')
}else{
id = 1:ncol(x)
}
yv = sort(( unique(filter) ))
ny = length(yv)
col = uid
zoo::plot.zoo(y[,id], col=col[tid[id]], ylim=ylim, screen=1)
graphics::abline( h=yv, col=col, lwd=3, lty=2)
}
ret
}
| /R/DataFilter.R | permissive | SHUD-System/rSHUD | R | false | false | 1,355 | r |
#' Filter the river data with a filter
#' \code{datafilter.riv}
#' @param x Input data
#' @param filter Data filter
#' @param plot Whether plot the data
#' @importFrom grDevices dev.off graphics.off png rgb topo.colors
#' @importFrom graphics grid hist lines par plot points
#' @importFrom methods as
#' @importFrom stats dist rnorm time
#' @importFrom utils read.table
#' @return Matrix information, c('ID','Vmin','Vmax', 'Filter')
#' @export
datafilter.riv <-function(x, filter=NULL, plot=TRUE){
msg=paste0('datafilter.riv::')
# y=x[['YRivstage']]
y = x
# plot(y)
pr=readriv()
cb = readcalib()
tid = pr@river[,'Type']
uid = sort(unique(tid))
st=pr@rivertype[tid, 'Depth'] + cb['RIV_DPTH']
if( is.null(filter) ){
filter = st
}
ymax = apply(y, 2, max, na.rm=T)
ymin = apply(y, 2, min, na.rm=T)
id = which(ymax > filter)
ret = data.frame(id, ymin[id], ymax[id], filter[id])
colnames(ret) = c('ID','Vmin','Vmax', 'Filter')
rownames(ret) = id
ylim=range(c(filter, y))
if(plot ){
if(length(id) > 0){
id = id
message(msg, length(id), ' rivers are filtered.')
}else{
id = 1:ncol(x)
}
yv = sort(( unique(filter) ))
ny = length(yv)
col = uid
zoo::plot.zoo(y[,id], col=col[tid[id]], ylim=ylim, screen=1)
graphics::abline( h=yv, col=col, lwd=3, lty=2)
}
ret
}
|
library(rgee)
# ee_reattach() # reattach ee as a reserved word
ee_Initialize()
# Load a cloudy Landsat 8 image.
image <- ee$Image("LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603")
Map$addLayer(
eeObject = image,
visParams = list(bands = c("B5", "B4", "B3"), min = 0, max = 0.5),
name = "original image"
)
# Load another image to replace the cloudy pixels.
replacement <- ee$Image("LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416")
# Compute a cloud score band$
cloud <- ee$Algorithms$Landsat$simpleCloudScore(image)$select("cloud")
# Set cloudy pixels to the other image.
replaced <- image$where(cloud$gt(10), replacement)
# Display the result.
Map$centerObject(image, zoom = 9)
Map$addLayer(
eeObject = replaced,
visParams = list(
bands = c("B5", "B4", "B3"),
min = 0,
max = 0.5
),
name = "clouds replaced"
)
| /examples/image/where_operators.R | permissive | benardonyango/rgee | R | false | false | 837 | r | library(rgee)
# ee_reattach() # reattach ee as a reserved word
ee_Initialize()
# Load a cloudy Landsat 8 image.
image <- ee$Image("LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130603")
Map$addLayer(
eeObject = image,
visParams = list(bands = c("B5", "B4", "B3"), min = 0, max = 0.5),
name = "original image"
)
# Load another image to replace the cloudy pixels.
replacement <- ee$Image("LANDSAT/LC08/C01/T1_TOA/LC08_044034_20130416")
# Compute a cloud score band$
cloud <- ee$Algorithms$Landsat$simpleCloudScore(image)$select("cloud")
# Set cloudy pixels to the other image.
replaced <- image$where(cloud$gt(10), replacement)
# Display the result.
Map$centerObject(image, zoom = 9)
Map$addLayer(
eeObject = replaced,
visParams = list(
bands = c("B5", "B4", "B3"),
min = 0,
max = 0.5
),
name = "clouds replaced"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomics_functions.R
\name{datasets.list}
\alias{datasets.list}
\title{Lists datasets within a project. For the definitions of datasets and other genomics resources, see [Fundamentals of Google Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)}
\usage{
datasets.list(projectId = NULL, pageSize = NULL, pageToken = NULL)
}
\arguments{
\item{projectId}{Required}
\item{pageSize}{The maximum number of results to return in a single page}
\item{pageToken}{The continuation token, which is used to page through large result sets}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/genomics
\item https://www.googleapis.com/auth/genomics.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics, https://www.googleapis.com/auth/genomics.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/genomics/}{Google Documentation}
}
| /googlegenomicsv1.auto/man/datasets.list.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,330 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomics_functions.R
\name{datasets.list}
\alias{datasets.list}
\title{Lists datasets within a project. For the definitions of datasets and other genomics resources, see [Fundamentals of Google Genomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)}
\usage{
datasets.list(projectId = NULL, pageSize = NULL, pageToken = NULL)
}
\arguments{
\item{projectId}{Required}
\item{pageSize}{The maximum number of results to return in a single page}
\item{pageToken}{The continuation token, which is used to page through large result sets}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/genomics
\item https://www.googleapis.com/auth/genomics.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics, https://www.googleapis.com/auth/genomics.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/genomics/}{Google Documentation}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{coxprocess_logprior}
\alias{coxprocess_logprior}
\title{Evaluate multivariate Gaussian prior density}
\usage{
coxprocess_logprior(x)
}
\arguments{
\item{x}{evaluation points}
}
\value{
density values
}
\description{
Evaluate multivariate Gaussian prior density
}
| /man/coxprocess_logprior.Rd | no_license | jeremyhengjm/GibbsFlow | R | false | true | 361 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{coxprocess_logprior}
\alias{coxprocess_logprior}
\title{Evaluate multivariate Gaussian prior density}
\usage{
coxprocess_logprior(x)
}
\arguments{
\item{x}{evaluation points}
}
\value{
density values
}
\description{
Evaluate multivariate Gaussian prior density
}
|
## Coursera: Exploratory Data Analysis
## John Hopkins University
## Making Plot 2
downloadURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url = downloadURL, destfile = "./EDAWeek1data.zip", method = "curl")
rawData <- read.table(unz("EDAWeek1data.zip", "household_power_consumption.txt" ), header = TRUE, sep = ";", dec = ".", na.strings = "?")
RelevantDates <- subset(rawData, Date == "1/2/2007" | Date == "2/2/2007")
rm(rawData, downloadURL)
#Convert date-column to actual dates & add column with short form of weekday
RelevantDates$DateTime <- as.POSIXct(strptime(paste(RelevantDates$Date, RelevantDates$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S"))
RelevantDates$Weekday <- format(RelevantDates$DateTime, "%a") #this turned out not to be relevant, but I got confused with the x-axis labeling
#make the plot
png("Plot2.png", width=480, height= 480)
plot(RelevantDates$DateTime, RelevantDates$Global_active_power, lwd=1, ylab = "Global Active Power Output (kilowatts)", xlab="", type="l")
dev.off() | /Plot2.R | no_license | SandervdBelt/ExData_Plotting1 | R | false | false | 1,085 | r | ## Coursera: Exploratory Data Analysis
## John Hopkins University
## Making Plot 2
downloadURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url = downloadURL, destfile = "./EDAWeek1data.zip", method = "curl")
rawData <- read.table(unz("EDAWeek1data.zip", "household_power_consumption.txt" ), header = TRUE, sep = ";", dec = ".", na.strings = "?")
RelevantDates <- subset(rawData, Date == "1/2/2007" | Date == "2/2/2007")
rm(rawData, downloadURL)
#Convert date-column to actual dates & add column with short form of weekday
RelevantDates$DateTime <- as.POSIXct(strptime(paste(RelevantDates$Date, RelevantDates$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S"))
RelevantDates$Weekday <- format(RelevantDates$DateTime, "%a") #this turned out not to be relevant, but I got confused with the x-axis labeling
#make the plot
png("Plot2.png", width=480, height= 480)
plot(RelevantDates$DateTime, RelevantDates$Global_active_power, lwd=1, ylab = "Global Active Power Output (kilowatts)", xlab="", type="l")
dev.off() |
# Activate the R virtualenv
source(paste("renv", "activate.R", sep = .Platform$file.sep))
# Absolute path to project directory
project_path = function(project_dir="PPP-Table"){
#' Returns the absolute project path string.
#' args: none
#' raises: error if <project_dir> is not within current wd absolute path.
#'
#' Author: ck
current = getwd()
path_sep = .Platform$file.sep
dirs = strsplit(current, path_sep)[[1]]
if (project_dir %in% dirs){
i = which(dirs == project_dir)
outpath = paste(dirs[1:i], collapse = path_sep)
# TODO: add argument for appending sub-dirs
} else {
return(warning(paste(
"Current working directory is not within the project path.",
"The function 'project_path()' not defined.",
sep = "\n")))
}
return(outpath)
}
# Add environments created for project
if (!suppressWarnings(readRenviron(paste(project_path(),
"configs",
".Renviron",
sep=.Platform$file.sep)))) {
warning(paste("Could not read 'configs/.Renviron'.",
"There may be missing environment variables.",
sep = "\n"
))
}
# Create a database connection to a specific schema.
connection = function(username,
password,
schema,
host="192.168.2.12",
port=3306,
ssl_ca=NULL){
#' Create database connection.
#' args: schema name, path to SSL cert, and user credentials
#' raises: none
#'
#' Author: ck
require(RMariaDB)
con = dbConnect(MariaDB(),
user = username,
host = host,
port = port,
password = password,
dbname = schema,
ssl.ca = ssl_ca
)
}
# # Add functions above and project path to '.env' list
# .env = new.env()
# .env$project_dir = project_path()
# attach(.env)
| /.Rprofile | no_license | cjkeyes/PPP-Table | R | false | false | 2,112 | rprofile | # Activate the R virtualenv
source(paste("renv", "activate.R", sep = .Platform$file.sep))
# Absolute path to project directory
project_path = function(project_dir="PPP-Table"){
#' Returns the absolute project path string.
#' args: none
#' raises: error if <project_dir> is not within current wd absolute path.
#'
#' Author: ck
current = getwd()
path_sep = .Platform$file.sep
dirs = strsplit(current, path_sep)[[1]]
if (project_dir %in% dirs){
i = which(dirs == project_dir)
outpath = paste(dirs[1:i], collapse = path_sep)
# TODO: add argument for appending sub-dirs
} else {
return(warning(paste(
"Current working directory is not within the project path.",
"The function 'project_path()' not defined.",
sep = "\n")))
}
return(outpath)
}
# Add environments created for project
if (!suppressWarnings(readRenviron(paste(project_path(),
"configs",
".Renviron",
sep=.Platform$file.sep)))) {
warning(paste("Could not read 'configs/.Renviron'.",
"There may be missing environment variables.",
sep = "\n"
))
}
# Create a database connection to a specific schema.
connection = function(username,
password,
schema,
host="192.168.2.12",
port=3306,
ssl_ca=NULL){
#' Create database connection.
#' args: schema name, path to SSL cert, and user credentials
#' raises: none
#'
#' Author: ck
require(RMariaDB)
con = dbConnect(MariaDB(),
user = username,
host = host,
port = port,
password = password,
dbname = schema,
ssl.ca = ssl_ca
)
}
# # Add functions above and project path to '.env' list
# .env = new.env()
# .env$project_dir = project_path()
# attach(.env)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{getProteinFastaUrlFromUCSC}
\alias{getProteinFastaUrlFromUCSC}
\title{Get URL to download protein sequence FASTA from UCSC genome browser for a given dbkey.}
\usage{
getProteinFastaUrlFromUCSC(dbkey)
}
\arguments{
\item{dbkey}{The UCSC dbkey to get protein sequences for, e.g. hg19, hg38, mm10.}
}
\value{
A URL which can be downloaded with \code{\link{download.file}}
}
\description{
Get URL to download protein sequence FASTA from UCSC genome browser for a given dbkey.
}
\examples{
getProteinFastaUrlFromUCSC("hg38")
}
| /man/getProteinFastaUrlFromUCSC.Rd | no_license | liangdp1984/customProDB | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{getProteinFastaUrlFromUCSC}
\alias{getProteinFastaUrlFromUCSC}
\title{Get URL to download protein sequence FASTA from UCSC genome browser for a given dbkey.}
\usage{
getProteinFastaUrlFromUCSC(dbkey)
}
\arguments{
\item{dbkey}{The UCSC dbkey to get protein sequences for, e.g. hg19, hg38, mm10.}
}
\value{
A URL which can be downloaded with \code{\link{download.file}}
}
\description{
Get URL to download protein sequence FASTA from UCSC genome browser for a given dbkey.
}
\examples{
getProteinFastaUrlFromUCSC("hg38")
}
|
# The first two consecutive numbers to have two distinct prime factors are:
# 14 = 2 × 7
# 15 = 3 × 5
#The first three consecutive numbers to have three distinct prime factors are:
# 644 = 2^2 × 7 × 23
# 645 = 3 × 5 × 43
# 646 = 2 × 17 × 19.
# Find the first four consecutive integers to have four distinct prime factors. What is the first
# of these numbers?
# SOLVES IN ABOUT 8-9 SECONDS, USES GMP FOR EFFICIENT FACTORIZATION FUNCTION
library("gmp") # Use for factorize function
num_primes <- 4 # Number of prime factors to find
consecutive <- 0
first_term <- 0
n <- 4
while(consecutive < num_primes){
#hasN <- hasn_factors(n,num_primes)
hasN <- length(unique(factorize(n))) == num_primes
if(hasN & first_term == 0){
first_term <- n
consecutive <- 1
}else if(hasN){
consecutive <- consecutive + 1
}else{
first_term <- 0
consecutive <- 0
}
n <- n + 1
}
print(first_term) | /Problems_26_to_50/Euler047.R | permissive | lawphill/ProjectEuler | R | false | false | 929 | r | # The first two consecutive numbers to have two distinct prime factors are:
# 14 = 2 × 7
# 15 = 3 × 5
#The first three consecutive numbers to have three distinct prime factors are:
# 644 = 2^2 × 7 × 23
# 645 = 3 × 5 × 43
# 646 = 2 × 17 × 19.
# Find the first four consecutive integers to have four distinct prime factors. What is the first
# of these numbers?
# SOLVES IN ABOUT 8-9 SECONDS, USES GMP FOR EFFICIENT FACTORIZATION FUNCTION
library("gmp") # Use for factorize function
num_primes <- 4 # Number of prime factors to find
consecutive <- 0
first_term <- 0
n <- 4
while(consecutive < num_primes){
#hasN <- hasn_factors(n,num_primes)
hasN <- length(unique(factorize(n))) == num_primes
if(hasN & first_term == 0){
first_term <- n
consecutive <- 1
}else if(hasN){
consecutive <- consecutive + 1
}else{
first_term <- 0
consecutive <- 0
}
n <- n + 1
}
print(first_term) |
# Kaggle Santander 2
# predictions without models, purely based on a priori probabilities
# Scores 0.0183025 on LB using all months
# ... using May and June
# https://www.kaggle.com/operdeck/santander-product-recommendation/predictions-without-models
library(data.table)
library(fasttime)
# Read data
data_folder <- "../data"
data_colClasses <- list(character=c("ult_fec_cli_1t","indrel_1mes","conyuemp"))
train <- fread(paste(data_folder,"train_ver2.csv",sep="/"), colClasses = data_colClasses)
test <- fread(paste(data_folder,"test_ver2.csv",sep="/"), colClasses = data_colClasses)
productFlds <- names(train)[grepl("^ind_.*ult1$",names(train))] # products purchased
train <- train[fecha_dato %in% c("2015-05-28","2015-06-28","2016-04-28","2016-05-28"),
c("ncodpers","fecha_dato",productFlds), with=F]
train$fecha_dato <- fastPOSIXct(train$fecha_dato)
test$fecha_dato <- fastPOSIXct(test$fecha_dato)
train$monthnr <- month(train$fecha_dato)+ 12*year(train$fecha_dato)-1
test$monthnr <- month(test$fecha_dato)+ 12*year(test$fecha_dato)-1
# Self-merge so previous month is next to current month
train$nextmonthnr <- 1+train$monthnr
train <- merge(train, train, by.x=c("ncodpers","monthnr"), by.y=c("ncodpers","nextmonthnr"))
# Outcomes are products in portfolio this month but not in previous
d1 <- as.matrix( train[, paste(productFlds, "x", sep="."), with=F])
d2 <- as.matrix( train[, paste(productFlds, "y", sep="."), with=F])
aPrioris <- colSums((d1 == 1) & (is.na(d2) | (d2 == 0)), na.rm = T) / colSums(!is.na(d1) & !is.na(d2))
names(aPrioris) <- productFlds
print(aPrioris)
# Merge the test set with the last month from the train set so we can null out the
# probabilities for products already owned, otherwise set them to the a priori probabilities
test <- merge(test[, c("ncodpers","monthnr"), with=F],
train[, c("ncodpers","nextmonthnr",paste(productFlds, "x", sep=".")), with=F],
by.x=c("ncodpers","monthnr"), by.y=c("ncodpers","nextmonthnr"),
all.x = T, all.y = F)
setnames(test, paste(productFlds, "x", sep="."), productFlds)
probs <- apply( 1-as.matrix(test[, productFlds, with=F]), 1, "*", aPrioris)
# Just for verification, check the resulting probabilities
aPosterioris <- rowSums(apply(-probs, 2, rank, ties.method = "first") <= 7) / ncol(probs)
print(cor(aPosterioris, aPrioris))
# Create the submission file. Take only the first 7 predictions because of the map@7 evaluation
testResults <- data.frame(ncodpers = test[, ncodpers])
testResults$added_products <- apply(probs, 2, function(col) {
paste(names(sort(rank(-col, ties.method = "first")))[1:7], collapse=" ") })
submFile <- paste(data_folder,"mysubmission.csv",sep="/")
write.csv(testResults, submFile,row.names = F, quote=F)
| /apriori/apriori.R | no_license | operdeck/santa2 | R | false | false | 2,843 | r | # Kaggle Santander 2
# predictions without models, purely based on a priori probabilities
# Scores 0.0183025 on LB using all months
# ... using May and June
# https://www.kaggle.com/operdeck/santander-product-recommendation/predictions-without-models
library(data.table)
library(fasttime)
# Read data
data_folder <- "../data"
data_colClasses <- list(character=c("ult_fec_cli_1t","indrel_1mes","conyuemp"))
train <- fread(paste(data_folder,"train_ver2.csv",sep="/"), colClasses = data_colClasses)
test <- fread(paste(data_folder,"test_ver2.csv",sep="/"), colClasses = data_colClasses)
productFlds <- names(train)[grepl("^ind_.*ult1$",names(train))] # products purchased
train <- train[fecha_dato %in% c("2015-05-28","2015-06-28","2016-04-28","2016-05-28"),
c("ncodpers","fecha_dato",productFlds), with=F]
train$fecha_dato <- fastPOSIXct(train$fecha_dato)
test$fecha_dato <- fastPOSIXct(test$fecha_dato)
train$monthnr <- month(train$fecha_dato)+ 12*year(train$fecha_dato)-1
test$monthnr <- month(test$fecha_dato)+ 12*year(test$fecha_dato)-1
# Self-merge so previous month is next to current month
train$nextmonthnr <- 1+train$monthnr
train <- merge(train, train, by.x=c("ncodpers","monthnr"), by.y=c("ncodpers","nextmonthnr"))
# Outcomes are products in portfolio this month but not in previous
d1 <- as.matrix( train[, paste(productFlds, "x", sep="."), with=F])
d2 <- as.matrix( train[, paste(productFlds, "y", sep="."), with=F])
aPrioris <- colSums((d1 == 1) & (is.na(d2) | (d2 == 0)), na.rm = T) / colSums(!is.na(d1) & !is.na(d2))
names(aPrioris) <- productFlds
print(aPrioris)
# Merge the test set with the last month from the train set so we can null out the
# probabilities for products already owned, otherwise set them to the a priori probabilities
test <- merge(test[, c("ncodpers","monthnr"), with=F],
train[, c("ncodpers","nextmonthnr",paste(productFlds, "x", sep=".")), with=F],
by.x=c("ncodpers","monthnr"), by.y=c("ncodpers","nextmonthnr"),
all.x = T, all.y = F)
setnames(test, paste(productFlds, "x", sep="."), productFlds)
probs <- apply( 1-as.matrix(test[, productFlds, with=F]), 1, "*", aPrioris)
# Just for verification, check the resulting probabilities
aPosterioris <- rowSums(apply(-probs, 2, rank, ties.method = "first") <= 7) / ncol(probs)
print(cor(aPosterioris, aPrioris))
# Create the submission file. Take only the first 7 predictions because of the map@7 evaluation
testResults <- data.frame(ncodpers = test[, ncodpers])
testResults$added_products <- apply(probs, 2, function(col) {
paste(names(sort(rank(-col, ties.method = "first")))[1:7], collapse=" ") })
submFile <- paste(data_folder,"mysubmission.csv",sep="/")
write.csv(testResults, submFile,row.names = F, quote=F)
|
\name{probNonEquiv}
\alias{probNonEquiv}
\alias{probNonEquiv,ExpressionSet-method}
\alias{probNonEquiv,list-method}
\alias{pvalTreat}
\alias{pvalTreat,ExpressionSet-method}
\alias{pvalTreat,list-method}
\title{
\code{probNonEquiv} performs a Bayesian hypothesis test for equivalence between group means.
It returns the posterior probability that |mu1-mu2|>logfc.
\code{pvalTreat} is a wrapper to \code{treat} in package \code{limma},
which returns P-values for the same hypothesis test.
}
\description{
\code{probNonEquiv} computes v_i=P(|theta_i| > logfc | data), where theta_i is
the difference between group means for gene i. This posterior
probability is based on the NNGCV model from package EBarrays, which
has a formulation similar to limma in an empirical Bayes framework.
Notice that the null hypothesis here is that |theta_i|<logfc,
e.g. isoforms with small fold changes are regarded as uninteresting.
Subsequent differential expression calls are based on selecting large
v_i. For instance, selecting v_i >= 0.95 guarantees that the posterior
expected false discovery proportion (a Bayesian FDR analog) is below 0.05.
}
\usage{
probNonEquiv(x, groups, logfc = log(2), minCount, method = "plugin", mc.cores=1)
pvalTreat(x, groups, logfc = log(2), minCount, p.adjust.method='none', mc.cores = 1)
}
\arguments{
\item{x}{ExpressionSet containing expression levels, or list of ExpressionSets}
\item{groups}{Variable in fData(x) indicating the two groups to
compare (the case with more than 2 groups is not implemented).}
\item{logfc}{Biologically relevant threshold for the log fold change,
i.e. difference between groups means in log-scale}
\item{minCount}{ If specified, probabilities are only computed for rows with \code{fData(x)$readCount >= minCount}}
\item{method}{ Set to \code{'exact'} for exact posterior probabilities
(slower), \code{'plugin'} for plug-in approximation (much faster).
Typically both give very similar results.}
\item{mc.cores}{Number of parallel processors to use. Ignored unless
\code{x} is a list.}
\item{p.adjust.method}{P-value adjustment method, passed on to \code{p.adjust}}
}
\value{
If \code{x} is a single \code{ExpressionSet}, \code{probNonEquiv}
returns a vector with posterior probabilities
(NA for rows with less than \code{minCount} reads).
\code{pvalTreat} returns TREAT P-values instead.
If \code{x} is a list of \code{ExpressionSet}, the function is applied
to each element separately and results are returned as columns in the
output matrix.
}
\seealso{
\code{treat} in package \code{limma}, \code{p.adjust}
}
\references{
Rossell D, Stephan-Otto Attolini C, Kroiss M, Stocker A. Quantifying
Alternative Splicing from Paired-End RNA-sequencing data. Annals of
Applied Statistics, 8(1):309-330
McCarthy DJ, Smyth GK. Testing significance relative to a fold-change
threshold is a TREAT. Bioinformatics, 25(6):765-771
}
\author{
Victor Pena, David Rossell
}
\examples{
#Simulate toy data
p <- 50; n <- 10
x <- matrix(rnorm(p*2*n),nrow=p)
x[(p-10):p,1:n] <- x[(p-10):p,1:n] + 1.5
x <- new("ExpressionSet",exprs=x)
x$group <- rep(c('group1','group2'),each=n)
#Posterior probabilities
pp <- probNonEquiv(x, groups='group', logfc=0.5)
d <- rowMeans(exprs(x[,1:n])) - rowMeans(exprs(x[,-1:-n]))
plot(d,pp,xlab='Observed log-FC')
abline(v=c(-.5,.5))
#Check false positives
truth <- rep(c(FALSE,TRUE),c(p-11,11))
getRoc(truth, pp>.9)
getRoc(truth, pp>.5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }% __ONLY ONE__ keyword per line
\keyword{ htest }
| /man/probNonEquiv.Rd | no_license | davidrusi/casper | R | false | false | 3,663 | rd | \name{probNonEquiv}
\alias{probNonEquiv}
\alias{probNonEquiv,ExpressionSet-method}
\alias{probNonEquiv,list-method}
\alias{pvalTreat}
\alias{pvalTreat,ExpressionSet-method}
\alias{pvalTreat,list-method}
\title{
\code{probNonEquiv} performs a Bayesian hypothesis test for equivalence between group means.
It returns the posterior probability that |mu1-mu2|>logfc.
\code{pvalTreat} is a wrapper to \code{treat} in package \code{limma},
which returns P-values for the same hypothesis test.
}
\description{
\code{probNonEquiv} computes v_i=P(|theta_i| > logfc | data), where theta_i is
the difference between group means for gene i. This posterior
probability is based on the NNGCV model from package EBarrays, which
has a formulation similar to limma in an empirical Bayes framework.
Notice that the null hypothesis here is that |theta_i|<logfc,
e.g. isoforms with small fold changes are regarded as uninteresting.
Subsequent differential expression calls are based on selecting large
v_i. For instance, selecting v_i >= 0.95 guarantees that the posterior
expected false discovery proportion (a Bayesian FDR analog) is below 0.05.
}
\usage{
probNonEquiv(x, groups, logfc = log(2), minCount, method = "plugin", mc.cores=1)
pvalTreat(x, groups, logfc = log(2), minCount, p.adjust.method='none', mc.cores = 1)
}
\arguments{
\item{x}{ExpressionSet containing expression levels, or list of ExpressionSets}
\item{groups}{Variable in fData(x) indicating the two groups to
compare (the case with more than 2 groups is not implemented).}
\item{logfc}{Biologically relevant threshold for the log fold change,
i.e. difference between groups means in log-scale}
\item{minCount}{ If specified, probabilities are only computed for rows with \code{fData(x)$readCount >= minCount}}
\item{method}{ Set to \code{'exact'} for exact posterior probabilities
(slower), \code{'plugin'} for plug-in approximation (much faster).
Typically both give very similar results.}
\item{mc.cores}{Number of parallel processors to use. Ignored unless
\code{x} is a list.}
\item{p.adjust.method}{P-value adjustment method, passed on to \code{p.adjust}}
}
\value{
If \code{x} is a single \code{ExpressionSet}, \code{probNonEquiv}
returns a vector with posterior probabilities
(NA for rows with less than \code{minCount} reads).
\code{pvalTreat} returns TREAT P-values instead.
If \code{x} is a list of \code{ExpressionSet}, the function is applied
to each element separately and results are returned as columns in the
output matrix.
}
\seealso{
\code{treat} in package \code{limma}, \code{p.adjust}
}
\references{
Rossell D, Stephan-Otto Attolini C, Kroiss M, Stocker A. Quantifying
Alternative Splicing from Paired-End RNA-sequencing data. Annals of
Applied Statistics, 8(1):309-330
McCarthy DJ, Smyth GK. Testing significance relative to a fold-change
threshold is a TREAT. Bioinformatics, 25(6):765-771
}
\author{
Victor Pena, David Rossell
}
\examples{
#Simulate toy data
p <- 50; n <- 10
x <- matrix(rnorm(p*2*n),nrow=p)
x[(p-10):p,1:n] <- x[(p-10):p,1:n] + 1.5
x <- new("ExpressionSet",exprs=x)
x$group <- rep(c('group1','group2'),each=n)
#Posterior probabilities
pp <- probNonEquiv(x, groups='group', logfc=0.5)
d <- rowMeans(exprs(x[,1:n])) - rowMeans(exprs(x[,-1:-n]))
plot(d,pp,xlab='Observed log-FC')
abline(v=c(-.5,.5))
#Check false positives
truth <- rep(c(FALSE,TRUE),c(p-11,11))
getRoc(truth, pp>.9)
getRoc(truth, pp>.5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ models }% __ONLY ONE__ keyword per line
\keyword{ htest }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_plots.R
\name{save_plots}
\alias{save_plots}
\title{Save multiple plots in one PDF.}
\usage{
save_plots(.data, ..., files, width = 8, height = 6,
bookmarks = NULL, gs.exec = "gs")
}
\arguments{
\item{.data}{a tbl.}
\item{...}{one or more list-columns where plots are stored.}
\item{files}{character vector. One file path for each column.}
\item{width}{width of the plots.}
\item{height}{height of the plots.}
\item{bookmarks}{Bookmarks to be added to the PDF. A list of columns generated by vars().
Columns will be interpreted as hierarchical groups and order matters.
Plots will be reodered according to bookmarks in the PDF.
If \code{NULL} (default), no bookmarks are added to the PDF.}
\item{gs.exec}{a path to your Ghostscript executable
(necessary to add bookmarks).}
}
\description{
Convenient function for saving multiple plots stored in a list. The function
can also add bookmarks to the created pdf files.
}
\details{
Bookmarks are added to pdf using Ghostscript, a third party program which
must be installed manually by the user. Tested on Linux only, probably not working
on Windows.
}
| /man/save_plots.Rd | no_license | fkeck/xplots | R | false | true | 1,189 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_plots.R
\name{save_plots}
\alias{save_plots}
\title{Save multiple plots in one PDF.}
\usage{
save_plots(.data, ..., files, width = 8, height = 6,
bookmarks = NULL, gs.exec = "gs")
}
\arguments{
\item{.data}{a tbl.}
\item{...}{one or more list-columns where plots are stored.}
\item{files}{character vector. One file path for each column.}
\item{width}{width of the plots.}
\item{height}{height of the plots.}
\item{bookmarks}{Bookmarks to be added to the PDF. A list of columns generated by vars().
Columns will be interpreted as hierarchical groups and order matters.
Plots will be reodered according to bookmarks in the PDF.
If \code{NULL} (default), no bookmarks are added to the PDF.}
\item{gs.exec}{a path to your Ghostscript executable
(necessary to add bookmarks).}
}
\description{
Convenient function for saving multiple plots stored in a list. The function
can also add bookmarks to the created pdf files.
}
\details{
Bookmarks are added to pdf using Ghostscript, a third party program which
must be installed manually by the user. Tested on Linux only, probably not working
on Windows.
}
|
# utilities for parsing mplus output
# Levels of nesting:
# level 1: sections
# level 2: classes
# level 3: line types
# section parser only needs to understand alternative parameterizations
# define section headers
section_headers <- c("alternative parameterizations for the categorical latent variable regression",
"odds ratio for the alternative parameterizations for the categorical latent variable regression",
"quality of numerical results")
mplus_section_parser <- function(mplustxt, chunknames) {
# chunkpositions <- map(chunknames, ~str_detect(mplustxt, .x)) #too fuzzy, look for exact matches
chunkpositions <- map(chunknames, function(x) mplustxt == x) # exact matches
sectioncol <- vector(mode = "character", length = length(mplustxt))
sectioncol[sectioncol == ""] <- NA
for(chunk in seq_along(chunknames)) {
sectioncol[chunkpositions[[chunk]] == TRUE] <- chunknames[chunk]
}
return(sectioncol)
}
# takes file, converts, creates sections
convert_mplus <- function(file, varnames) {
out <- read.delim(file, stringsAsFactors = FALSE)
names(out) <- "output"
out <- tibble(output = tolower(out$output)) %>% mutate(linenumber = row_number())
# generate section header column
out$section <- mplus_section_parser(out$output, section_headers)
#fill all section rows with corresponding section
out <- out %>% tidyr::fill(section, .direction = "down")
# discard sections which are not yet coded, create dataframe holding each of the sections (7/22/2020: odds ratios, normal coefficients)
out <- out %>% filter(section != 'quality of numerical results')
out_odds <- out %>% filter(section == 'odds ratio for the alternative parameterizations for the categorical latent variable regression')
out_coef <- out %>% filter(section == 'alternative parameterizations for the categorical latent variable regression')
# because tidytext is unsutaible for mplus output, define another chain of string splits and trimmings
out_odds$output <- map(out_odds$output, ~mplus_line_parser(.x))
out_coef$output <- map(out_coef$output, ~mplus_line_parser(.x))
line_types_list <- line_classifier_options(varnames)
out_odds <-
out_odds %>% mutate(line_type = map_chr(out_odds$output, ~ mplus_line_classifier(.x, line_types_list))) %>% filter(line_type != "unclassified")
out_coef <-
out_coef %>% mutate(line_type = map_chr(out_coef$output, ~ mplus_line_classifier(.x, line_types_list))) %>% filter(line_type != "unclassified")
# leads to a weird structure of output column but ok ...
out <- list(out_coef, out_odds)
out <- mplus_parameters_parser(out[[1]], odds = out[[2]]) %>%
mutate(ref_class = as.numeric(ref_class), y_class = as.numeric(str_extract(y_class, "\\d")))
return(out)
}
# parses lines, splitting them into words/elements
mplus_line_parser <- function(line) {
stringi::stri_split_boundaries(line) %>% flatten() %>% str_trim(side = "both")
}
# line classifier
## define line types
line_classifier_options <- function(varnames) {
varnames_grouping <- str_c("(", varnames, ")")
tibble(
type = c("class_regressed",
"parameters",
"refclass"),
regexpr = c(
"\\bon\\b",
str_c(varnames_grouping, collapse = "|"),
"parameterization using reference class.\\d"
)
)
}
## classifies lines function
mplus_line_classifier <- function(line, line_types_list) {
line_c <- str_c(line, collapse = " ")
classified <- map2_chr(line_types_list$type, line_types_list$regexpr,
function(x, y) {
# om <- "om"
return(ifelse(any(str_detect(line_c, y)), x, NA))
})
classified <- classified[!is.na(classified)] #insert the one which is not NA
if(is_empty(classified)) {
classified <- "unclassified"
} #character(0) to unclassified
return(classified)
}
# parses input lines line_type-specific
mplus_parameters_parser <- function(lines_df, filter = TRUE, odds = NULL) {
# precreate df
lines <- lines_df$output
line_type <- lines_df$line_type
# if Odds Ratios are wanted, include.
if (!is.null(odds)) {
odd_logical = TRUE
odds_lines <- odds$output
} else { odd_logical = FALSE }
df <- tibble(ref_class = character(1), y_class = character(1),
param = character(1), estimate = character(1), or = character(1), se = character(1), est_se = character(1), pval = character(1),
.rows = sum(line_type == "parameters"))
p <- 1 #holds the current row for passing of parameter to df, which is unequal the current text line
# go thourhg line by line
for(l in 1:length(line_type)) {
if(line_type[l] == "refclass") {
line_c <- lines[[l]] %>% str_c(collapse = " ")
ref_class = str_extract(line_c, "\\d")
}
if(line_type[l] == "class_regressed") {
line_c <- lines[[l]] %>% str_c(collapse = " ")
y_class = str_extract(line_c, str_c(clustervar, "#\\d"))
}
if(line_type[l] == "parameters") {
line <- stringi::stri_remove_empty(lines[[l]])
if (odd_logical) { odds_line <- stringi::stri_remove_empty(odds_lines[[l]]) }
df[p,] <- tibble(ref_class = ref_class, y_class = y_class,
param = line[1], estimate = line[2], or = ifelse(odd_logical, odds_line[2], NA),
se = line[3], est_se = line[4], pval = line[5])
p <- p+1
}
}
df <- df %>% mutate_at(vars(estimate, se, est_se, pval), list(~as.numeric(.))) #convert some columns
# reduce columns so that they do not appear twice
if (filter == TRUE) {
list_filtered <- vector(mode = "list", length = ref_class)
list_filtered[[1]] <- filter(df, ref_class == 1)
for (ref in seq_len(max(df$ref_class))) {
if (ref > 1) {
list_filtered[[ref]] <- filter(df, ref_class == ref) %>%
filter(!str_detect(.$y_class, str_c("c#", 1:ref - 1, collapse = "|")))
}
}
df <- list_filtered %>% bind_rows()
}
return(df)
}
| /mplus_parsing_utils.R | permissive | franciscowilhelm/r-collection | R | false | false | 6,018 | r | # utilities for parsing mplus output
# Levels of nesting:
# level 1: sections
# level 2: classes
# level 3: line types
# section parser only needs to understand alternative parameterizations
# define section headers
section_headers <- c("alternative parameterizations for the categorical latent variable regression",
"odds ratio for the alternative parameterizations for the categorical latent variable regression",
"quality of numerical results")
mplus_section_parser <- function(mplustxt, chunknames) {
# chunkpositions <- map(chunknames, ~str_detect(mplustxt, .x)) #too fuzzy, look for exact matches
chunkpositions <- map(chunknames, function(x) mplustxt == x) # exact matches
sectioncol <- vector(mode = "character", length = length(mplustxt))
sectioncol[sectioncol == ""] <- NA
for(chunk in seq_along(chunknames)) {
sectioncol[chunkpositions[[chunk]] == TRUE] <- chunknames[chunk]
}
return(sectioncol)
}
# takes file, converts, creates sections
convert_mplus <- function(file, varnames) {
out <- read.delim(file, stringsAsFactors = FALSE)
names(out) <- "output"
out <- tibble(output = tolower(out$output)) %>% mutate(linenumber = row_number())
# generate section header column
out$section <- mplus_section_parser(out$output, section_headers)
#fill all section rows with corresponding section
out <- out %>% tidyr::fill(section, .direction = "down")
# discard sections which are not yet coded, create dataframe holding each of the sections (7/22/2020: odds ratios, normal coefficients)
out <- out %>% filter(section != 'quality of numerical results')
out_odds <- out %>% filter(section == 'odds ratio for the alternative parameterizations for the categorical latent variable regression')
out_coef <- out %>% filter(section == 'alternative parameterizations for the categorical latent variable regression')
# because tidytext is unsutaible for mplus output, define another chain of string splits and trimmings
out_odds$output <- map(out_odds$output, ~mplus_line_parser(.x))
out_coef$output <- map(out_coef$output, ~mplus_line_parser(.x))
line_types_list <- line_classifier_options(varnames)
out_odds <-
out_odds %>% mutate(line_type = map_chr(out_odds$output, ~ mplus_line_classifier(.x, line_types_list))) %>% filter(line_type != "unclassified")
out_coef <-
out_coef %>% mutate(line_type = map_chr(out_coef$output, ~ mplus_line_classifier(.x, line_types_list))) %>% filter(line_type != "unclassified")
# leads to a weird structure of output column but ok ...
out <- list(out_coef, out_odds)
out <- mplus_parameters_parser(out[[1]], odds = out[[2]]) %>%
mutate(ref_class = as.numeric(ref_class), y_class = as.numeric(str_extract(y_class, "\\d")))
return(out)
}
# parses lines, splitting them into words/elements
mplus_line_parser <- function(line) {
stringi::stri_split_boundaries(line) %>% flatten() %>% str_trim(side = "both")
}
# line classifier
## define line types
line_classifier_options <- function(varnames) {
varnames_grouping <- str_c("(", varnames, ")")
tibble(
type = c("class_regressed",
"parameters",
"refclass"),
regexpr = c(
"\\bon\\b",
str_c(varnames_grouping, collapse = "|"),
"parameterization using reference class.\\d"
)
)
}
## classifies lines function
mplus_line_classifier <- function(line, line_types_list) {
line_c <- str_c(line, collapse = " ")
classified <- map2_chr(line_types_list$type, line_types_list$regexpr,
function(x, y) {
# om <- "om"
return(ifelse(any(str_detect(line_c, y)), x, NA))
})
classified <- classified[!is.na(classified)] #insert the one which is not NA
if(is_empty(classified)) {
classified <- "unclassified"
} #character(0) to unclassified
return(classified)
}
# parses input lines line_type-specific
mplus_parameters_parser <- function(lines_df, filter = TRUE, odds = NULL) {
# precreate df
lines <- lines_df$output
line_type <- lines_df$line_type
# if Odds Ratios are wanted, include.
if (!is.null(odds)) {
odd_logical = TRUE
odds_lines <- odds$output
} else { odd_logical = FALSE }
df <- tibble(ref_class = character(1), y_class = character(1),
param = character(1), estimate = character(1), or = character(1), se = character(1), est_se = character(1), pval = character(1),
.rows = sum(line_type == "parameters"))
p <- 1 #holds the current row for passing of parameter to df, which is unequal the current text line
# go thourhg line by line
for(l in 1:length(line_type)) {
if(line_type[l] == "refclass") {
line_c <- lines[[l]] %>% str_c(collapse = " ")
ref_class = str_extract(line_c, "\\d")
}
if(line_type[l] == "class_regressed") {
line_c <- lines[[l]] %>% str_c(collapse = " ")
y_class = str_extract(line_c, str_c(clustervar, "#\\d"))
}
if(line_type[l] == "parameters") {
line <- stringi::stri_remove_empty(lines[[l]])
if (odd_logical) { odds_line <- stringi::stri_remove_empty(odds_lines[[l]]) }
df[p,] <- tibble(ref_class = ref_class, y_class = y_class,
param = line[1], estimate = line[2], or = ifelse(odd_logical, odds_line[2], NA),
se = line[3], est_se = line[4], pval = line[5])
p <- p+1
}
}
df <- df %>% mutate_at(vars(estimate, se, est_se, pval), list(~as.numeric(.))) #convert some columns
# reduce columns so that they do not appear twice
if (filter == TRUE) {
list_filtered <- vector(mode = "list", length = ref_class)
list_filtered[[1]] <- filter(df, ref_class == 1)
for (ref in seq_len(max(df$ref_class))) {
if (ref > 1) {
list_filtered[[ref]] <- filter(df, ref_class == ref) %>%
filter(!str_detect(.$y_class, str_c("c#", 1:ref - 1, collapse = "|")))
}
}
df <- list_filtered %>% bind_rows()
}
return(df)
}
|
##BIONET ALGORITHM###
D3bioNetwork<-function(File=NULL){
library(igraph)
library(BioNet)
library(DLBCL)
data(interactome)
source("geneInfoFromPortals.R")
source("sortNetwork.R")
source("rashidplotmodule.R")
if(!is.null(File))
{
logic<-read.csv(file=File,sep='\t')
}
else{
logic<-read.csv(file="files/sig_try3.tsv",sep='\t')
}
source("geneInfoFromPortals.R")
geninfo<-geneInfoFromPortals(geneList=as.character(logic$GeneID),symbol=T,names=F)
geneLabels<-apply(geninfo,1,function(x) paste(x[2],"(",as.integer(x[1]),")",sep=""))
pval<-as.numeric(logic$Pvals)
names(pval)<-geneLabels
logFC<-as.numeric(logic$coefficients)
names(logFC)<-geneLabels
subnet <- subNetwork(geneLabels, interactome)
subnet <- rmSelfLoops(subnet)
system.time( fb <- fitBumModel(pval, plot = FALSE))
#err2<<-try(scoreNodes(subnet, fb, fdr = 0.1),silent=TRUE)
#if(class(err2)=="try-error"){
# output$input_error=renderText("No significant subnetwork generated.Please upload another Signature.")
# }
#else{
#output$input_error=renderText("")
system.time(scores <- scoreNodes(subnet, fb, fdr = 0.1))
#err<<-try(runFastHeinz(subnet, scores),silent=TRUE)
# if(class(err) == "try-error"){
#
#
# output$input_error=renderText("No significant subnetwork generated.Please upload another Signature.")
# stopifnot(class(err) == "try-error")
#
# }
# else{
#output$input_error=renderText("")
system.time(module <- runFastHeinz(subnet, scores))
source("rashidplotmodule.R")
pdf("wor.pdf")
colorNet<-rashidplotmodule(module, scores = scores, diff.expr = logFC)
dev.off()
library(rcytoscapejs)
id <- nodes(module)
name <- id
nodeData <- data.frame(id, name, stringsAsFactors=FALSE)
nodeData$color<- rep("#00FF0F",nrow(nodeData)) #changed color of nodes
nodeData$shape <- "none" #default shape
nodeData$href <- paste0("http://www.ncbi.nlm.nih.gov/gene/",gsub("[\\(\\)]", "", regmatches(nodeData$name, gregexpr("\\(.*?\\)", nodeData$name))))
nodeData$geneID<-gsub("[\\(\\)]", "", regmatches(nodeData$name, gregexpr("\\(.*?\\)", nodeData$name)))
nodeNameEntrez<-nodeData$name
nodeData$name<-sub(" *\\(.*", "", nodeData$name)
nodeData$Diff_Exp="none"
nodeData$score="none"
for(i in 1:length(name)){
nodeData[i,3]<-colorNet$c[i];
nodeData[i,7]<-colorNet$d[i]
nodeData[i,8]<-colorNet$sc[i]
}
for(i in 1:length(name)){
if(colorNet$s[i]=="csquare")
#colorNet$s[i]<-"rectangle"
colorNet$s[i]<-"ellipse"
else
colorNet$s[i]<-"ellipse"
nodeData[i,4]<-colorNet$s[i];
}
statNet<<-nodeData
ltn<-unlist(lapply(edgeL(module),function(x) length(x[[1]])))
source<-unlist(lapply(1:length(ltn),function(x) rep(id[x],ltn[x])))
target<-unlist(lapply(edgeL(module), function(x) id[unlist(x)]))
networkData<-data.frame(source,target)
pdf("d3.pdf")
simpleNetwork(networkData)
dev.off()
print(simpleNetwork(networkData))
} #end of bionet algorithm | /examples/SigNetA/D3bioNetwork.R | permissive | Saadman/r-cytoscape.js.2 | R | false | false | 3,059 | r | ##BIONET ALGORITHM###
D3bioNetwork<-function(File=NULL){
library(igraph)
library(BioNet)
library(DLBCL)
data(interactome)
source("geneInfoFromPortals.R")
source("sortNetwork.R")
source("rashidplotmodule.R")
if(!is.null(File))
{
logic<-read.csv(file=File,sep='\t')
}
else{
logic<-read.csv(file="files/sig_try3.tsv",sep='\t')
}
source("geneInfoFromPortals.R")
geninfo<-geneInfoFromPortals(geneList=as.character(logic$GeneID),symbol=T,names=F)
geneLabels<-apply(geninfo,1,function(x) paste(x[2],"(",as.integer(x[1]),")",sep=""))
pval<-as.numeric(logic$Pvals)
names(pval)<-geneLabels
logFC<-as.numeric(logic$coefficients)
names(logFC)<-geneLabels
subnet <- subNetwork(geneLabels, interactome)
subnet <- rmSelfLoops(subnet)
system.time( fb <- fitBumModel(pval, plot = FALSE))
#err2<<-try(scoreNodes(subnet, fb, fdr = 0.1),silent=TRUE)
#if(class(err2)=="try-error"){
# output$input_error=renderText("No significant subnetwork generated.Please upload another Signature.")
# }
#else{
#output$input_error=renderText("")
system.time(scores <- scoreNodes(subnet, fb, fdr = 0.1))
#err<<-try(runFastHeinz(subnet, scores),silent=TRUE)
# if(class(err) == "try-error"){
#
#
# output$input_error=renderText("No significant subnetwork generated.Please upload another Signature.")
# stopifnot(class(err) == "try-error")
#
# }
# else{
#output$input_error=renderText("")
system.time(module <- runFastHeinz(subnet, scores))
source("rashidplotmodule.R")
pdf("wor.pdf")
colorNet<-rashidplotmodule(module, scores = scores, diff.expr = logFC)
dev.off()
library(rcytoscapejs)
id <- nodes(module)
name <- id
nodeData <- data.frame(id, name, stringsAsFactors=FALSE)
nodeData$color<- rep("#00FF0F",nrow(nodeData)) #changed color of nodes
nodeData$shape <- "none" #default shape
nodeData$href <- paste0("http://www.ncbi.nlm.nih.gov/gene/",gsub("[\\(\\)]", "", regmatches(nodeData$name, gregexpr("\\(.*?\\)", nodeData$name))))
nodeData$geneID<-gsub("[\\(\\)]", "", regmatches(nodeData$name, gregexpr("\\(.*?\\)", nodeData$name)))
nodeNameEntrez<-nodeData$name
nodeData$name<-sub(" *\\(.*", "", nodeData$name)
nodeData$Diff_Exp="none"
nodeData$score="none"
for(i in 1:length(name)){
nodeData[i,3]<-colorNet$c[i];
nodeData[i,7]<-colorNet$d[i]
nodeData[i,8]<-colorNet$sc[i]
}
for(i in 1:length(name)){
if(colorNet$s[i]=="csquare")
#colorNet$s[i]<-"rectangle"
colorNet$s[i]<-"ellipse"
else
colorNet$s[i]<-"ellipse"
nodeData[i,4]<-colorNet$s[i];
}
statNet<<-nodeData
ltn<-unlist(lapply(edgeL(module),function(x) length(x[[1]])))
source<-unlist(lapply(1:length(ltn),function(x) rep(id[x],ltn[x])))
target<-unlist(lapply(edgeL(module), function(x) id[unlist(x)]))
networkData<-data.frame(source,target)
pdf("d3.pdf")
simpleNetwork(networkData)
dev.off()
print(simpleNetwork(networkData))
} #end of bionet algorithm |
\name{writePeaklist}
\alias{writePeaklist}
\title{
Export a .csv peak table from an MSlist object
}
\description{
Given an MSlist object containing peak picking results from \code{\link[enviPick]{mzpick}}, export a peak table.csv.
}
\usage{
writePeaklist(MSlist, directory, filename, overwrite = FALSE)
}
\arguments{
\item{MSlist}{A MSlist object generated by \code{\link[enviPick]{enviPickwrap}} or \code{\link[enviPick]{mzpick}}}
\item{directory}{Character string with the directory to write to}
\item{filename}{Name of the .csv file to create}
\item{overwrite}{TRUE/FALSE}
}
\value{
.csv table, with columns:
m/z (mean m/z of peak measurements),
var_m/z (m/z variation of peak measurements),
max_int (base-line corrected maximum intensity),
sum_int (sum of all base-line corrected peak measurement intensities),
RT (retention time at maximum intensity),
minRT (start peak RT),
maxRT (end peak RT),
peak# (peak ID number),
EIC# (EIC ID number),
Score (not yet implemented)
}
\author{Martin Loos}
| /man/writePeaklist.Rd | no_license | cran/enviPick | R | false | false | 1,072 | rd | \name{writePeaklist}
\alias{writePeaklist}
\title{
Export a .csv peak table from an MSlist object
}
\description{
Given an MSlist object containing peak picking results from \code{\link[enviPick]{mzpick}}, export a peak table.csv.
}
\usage{
writePeaklist(MSlist, directory, filename, overwrite = FALSE)
}
\arguments{
\item{MSlist}{A MSlist object generated by \code{\link[enviPick]{enviPickwrap}} or \code{\link[enviPick]{mzpick}}}
\item{directory}{Character string with the directory to write to}
\item{filename}{Name of the .csv file to create}
\item{overwrite}{TRUE/FALSE}
}
\value{
.csv table, with columns:
m/z (mean m/z of peak measurements),
var_m/z (m/z variation of peak measurements),
max_int (base-line corrected maximum intensity),
sum_int (sum of all base-line corrected peak measurement intensities),
RT (retention time at maximum intensity),
minRT (start peak RT),
maxRT (end peak RT),
peak# (peak ID number),
EIC# (EIC ID number),
Score (not yet implemented)
}
\author{Martin Loos}
|
#' 2015 Point-in-Time (PIT) homeless counts by CoC
#'
#' This is a raw data set that contains Point-in-Time (PIT) estimates of homelessness by CoC.
#'
#' @source https://www.hudexchange.info/resource/3031/pit-and-hic-data-since-2007/
"hud2015"
| /R/hud2015.R | no_license | erinyunyou/USHomeless | R | false | false | 244 | r | #' 2015 Point-in-Time (PIT) homeless counts by CoC
#'
#' This is a raw data set that contains Point-in-Time (PIT) estimates of homelessness by CoC.
#'
#' @source https://www.hudexchange.info/resource/3031/pit-and-hic-data-since-2007/
"hud2015"
|
# Downloading and extracting the data.
if (!file.exists ("Project1_Data")) {
dir.create ("Project1_Data")
download.file ("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="Project1_Data/exdata-data-household_power_consumption.zip",
method="auto")
unzip ("Project1_Data/exdata-data-household_power_consumption.zip")
dateDownloaded <- date() # Saves the date the download was done.
}
# Read only 1st and 2nd Feb, 2007 data points into R.
library (RSQLite)
con <- dbConnect ("SQLite", dbname="household_data")
dbWriteTable (con, name="data_table", value="household_power_consumption.txt",
row.names=F, header=T, sep=";")
finalData <- dbGetQuery (con,
"SELECT * FROM data_table WHERE Date='1/2/2007' OR Date='2/2/2007'")
dbDisconnect(con)
# Convert character to date and time
finalData$Date <- strptime(paste(finalData$Date,finalData$Time), format="%d/%m/%Y %H:%M:%S")
# Delete the Time column (combined with Date now).
finalData <- finalData[,-2]
colnames(finalData)[1] <- "datetime"
## Plot 2
############################################################################
#
png (filename="plot2.png") #
plot(finalData$datetime, finalData$Global_active_power, type="l", xlab="", #
ylab="Global Active Power (kilowatts)") #
dev.off() #
#
############################################################################
# Deletes the temporary folder used to store the data.
unlink("Project1_Data", recursive=TRUE)
unlink(c("household_data.sql", "household_power_consumption.txt")) | /plot2.R | no_license | Vaskoman/ExData_Plotting1 | R | false | false | 1,950 | r | # Downloading and extracting the data.
if (!file.exists ("Project1_Data")) {
dir.create ("Project1_Data")
download.file ("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile="Project1_Data/exdata-data-household_power_consumption.zip",
method="auto")
unzip ("Project1_Data/exdata-data-household_power_consumption.zip")
dateDownloaded <- date() # Saves the date the download was done.
}
# Read only 1st and 2nd Feb, 2007 data points into R.
library (RSQLite)
con <- dbConnect ("SQLite", dbname="household_data")
dbWriteTable (con, name="data_table", value="household_power_consumption.txt",
row.names=F, header=T, sep=";")
finalData <- dbGetQuery (con,
"SELECT * FROM data_table WHERE Date='1/2/2007' OR Date='2/2/2007'")
dbDisconnect(con)
# Convert character to date and time
finalData$Date <- strptime(paste(finalData$Date,finalData$Time), format="%d/%m/%Y %H:%M:%S")
# Delete the Time column (combined with Date now).
finalData <- finalData[,-2]
colnames(finalData)[1] <- "datetime"
## Plot 2
############################################################################
#
png (filename="plot2.png") #
plot(finalData$datetime, finalData$Global_active_power, type="l", xlab="", #
ylab="Global Active Power (kilowatts)") #
dev.off() #
#
############################################################################
# Deletes the temporary folder used to store the data.
unlink("Project1_Data", recursive=TRUE)
unlink(c("household_data.sql", "household_power_consumption.txt")) |
#' @include draws-class.R
NULL
#' Simulate from a model.
#'
#' Given a reference to a \code{\linkS4class{Model}} object, this function calls the
#' model's \code{simulate} function on its \code{params}. It repeats this
#' \code{nsim} times. For example, when simulating regression with a fixed
#' design, this function would generate \code{nsim} response vectors \code{y}.
#'
#' This function creates objects of class \code{\linkS4class{Draws}} and saves each to
#' file (at dir/files/model_name/r<index>.Rdata). Note: while "files" is the
#' default, the name of this directory is from getOption("simulator.files"),
#' which is the value of getOption("simulator.files") when the model was
#' created.
#'
#' If parallel is not NULL, then it must be a list containing
#' \code{socket_names}, which can either be a positive integer specifying the
#' number of copies to run on localhost or else a character vector of machine
#' names (e.g., "mycluster-0-0"). The list \code{parallel} can also contain
#' \code{libraries}, a character vector of R packages that will be needed on the
#' slaves and \code{save_locally}, a logical that indicates whether the files
#' generated should be saved on the slaves (i.e., locally) or on the master.
#'
#' @export
#' @param object an object of class \code{\linkS4class{ModelRef}} as returned by
#' \code{link{generate_model}}. Or a list of such objects. If
#' \code{object} is a \code{Simulation}, then function is applied to the
#' referenced models in that simulation and returns the same
#' \code{Simulation} object but with references added to the new draws
#' created.
#' @param nsim number of simulations to be conducted. If a scalar, then
#' value repeated for each index. Otherwise can be a vector of length
#' \code{length(index)}
#' @param index a vector of positive integer indices. Allows simulations to be
#' carried out in chunks. Each chunk gets a separate RNG stream,
#' meaning that the results will be identical whether we run these in
#' parallel or sequentially.
#' @param parallel either \code{NULL} or a list containing \code{socket_names}
#' and (optionally) \code{libraries} and \code{save_locally}
#' (see Details for more information)
#' @seealso \code{\link{load_draws}} \code{\link{generate_model}} \code{\link{run_method}}
#' @examples
#' \dontrun{
#' mref <- generate_model(".", make_my_model)
#' dref1 <- simulate_from_model(mref, nsim = 50, index = 1:2)
#' dref2 <- simulate_from_model(mref, nsim = 50, index = 3:5,
#' parallel = list(socket_names = 3))
#' }
simulate_from_model <- function(object, nsim,
index = 1, parallel = NULL) {
if (class(object) == "Simulation")
model_ref <- model(object, reference = TRUE)
else
model_ref <- object
if (class(model_ref) == "list") {
dref <- lapply(model_ref, simulate_from_model, nsim = nsim, index = index,
parallel = parallel)
if (class(object) == "Simulation")
return(invisible(add(object, dref)))
return(invisible(dref))
}
stopifnot(index == round(index), index > 0)
stopifnot(nsim == round(nsim), nsim > 0)
if (length(nsim) == 1) {
nsim <- rep(nsim, length(index))
} else {
stopifnot(length(nsim) == length(index))
o <- order(index)
index <- index[o]; nsim <- nsim[o]
}
dir <- model_ref@dir
model_name <- model_ref@name
if (model_ref@simulator.files != getOption("simulator.files"))
stop("model_ref@simulator.files must match getOption(\"simulator.files\")")
md <- get_model_dir_and_file(dir, model_name,
simulator.files = model_ref@simulator.files)
# generate L'Ecuyer seeds based on model's seed
m <- load_model(dir, model_name, more_info = TRUE,
simulator.files = model_ref@simulator.files)
model_seed <- m$rng$rng_seed # seed used to generate m$model
seeds <- get_seeds_for_draws(model_seed, index)
dref <- list() # a list of DrawsRef objects
if (is.null(parallel) || length(index) == 1) {
# simulate sequentially
for (i in seq(length(index))) {
d <- simulate_from_model_single(m$model, nsim = nsim[i],
index = index[i], seed = seeds[[i]])
dref[[i]] <- save_draws_to_file(md$dir, model_ref, index[i], nsim[i],
d$draws, d$rng, d$time[1])
}
} else {
check_parallel_list(parallel)
if (is.null(parallel$save_locally)) parallel$save_locally <- FALSE
dref <- simulate_parallel(model_ref, nsim, index, seeds = seeds,
socket_names = parallel$socket_names,
libraries = parallel$libraries,
save_locally = parallel$save_locally)
}
if (class(object) == "Simulation")
return(invisible(add(object, dref)))
invisible(dref)
}
save_draws_to_file <- function(out_dir, model_ref, index, nsim, draws, rng,
time) {
file <- sprintf("%s/r%s.Rdata", out_dir, index)
save(draws, rng, file = file)
catsim(sprintf("..Simulated %s draws in %s sec and saved in %s", nsim,
round(time, 2), sprintf("%s/r%s.Rdata", model_ref@name,
index)), fill = TRUE)
new("DrawsRef", dir = model_ref@dir, model_name = model_ref@name,
index = index, simulator.files = getOption("simulator.files"))
}
get_seeds_for_draws <- function(model_seed, index) {
RNGkind("L'Ecuyer-CMRG")
# index gives which stream relative to stream used to generate model:
seeds <- list(model_seed)
for (i in seq(2, 1 + max(index)))
seeds[[i]] <- parallel::nextRNGStream(seeds[[i - 1]])
seeds <- seeds[-1]
seeds <- seeds[index] # now use these seeds[[i]] for index[i]'s chunk:
seeds
}
#' Simulate from a model.
#'
#' This is an internal function. Users should call the wrapper function
#' \code{\link{simulate_from_model}}.
#'
#' @param model a Model object
#' @param nsim number of simulations to be conducted.
#' @param index a positive integer index.
#' @param seed this is the 7 digit seed used by L'Ecuyer RNG
simulate_from_model_single <- function(model, nsim, index, seed) {
stopifnot(length(nsim) == 1, length(index) == 1)
RNGkind("L'Ecuyer-CMRG")
.Random.seed <<- seed
args <- setdiff(names(formals(model@simulate)), "nsim")
time <- system.time({
sims1 <- do.call(model@simulate, c(model@params[args], nsim = nsim))
})
if (length(sims1) != nsim)
stop("model's simulate function must return list of length nsim.")
rng <- list(rng_seed = seed, rng_end_seed = .Random.seed)
sims <- list()
for (i in seq(nsim))
sims[[sprintf("r%s.%s", index, i)]] <- sims1[[i]]
rm(sims1)
# create object of class Draws
draws <- new("Draws", name = model@name,
label = sprintf("(Block %s:) %s draws from %s", index, nsim,
model@label),
draws = sims,
index = as.integer(index))
validObject(draws)
return(list(draws = draws, rng = rng, time = time))
}
#' Load one or more draws objects from file.
#'
#' After \code{\link{simulate_from_model}} has been called, this function can
#' be used to load one or more of the saved \code{\linkS4class{Draws}} object(s)
#' (along with RNG information). If multiple indices are provided, these will be combined
#' into a new single \code{\linkS4class{Draws}} object.
#'
#' @export
#' @param dir the directory passed to \code{\link{generate_model}})
#' @param model_name the Model object's \code{name} attribute
#' @param index a vector of positive integers.
#' @param more_info if TRUE, then returns additional information such as
#' state of RNG after calling \code{\link{generate_model}}
#' @param simulator.files if NULL, then \code{getOption("simulator.files")}
#' will be used.
#' @seealso \code{\link{simulate_from_model}} \code{\link{load_model}}
#' @examples
#' \dontrun{
#' # see example ?generate_model for make_my_model definition
#' mref <- generate_model(make_my_model, dir = ".")
#' dref <- simulate_from_model(mref, nsim = 50, index = 1:2)
#' load(dref) # loads Draws object with 100 entries
#' }
load_draws <- function(dir, model_name, index, more_info = FALSE,
simulator.files = NULL) {
md <- get_model_dir_and_file(dir, model_name,
simulator.files = simulator.files)
index <- sort(unique(index))
draws_files <- sprintf("%s/r%s.Rdata", md$dir, index)
if (length(index) == 1) {
env <- new.env()
tryCatch(load(draws_files, envir = env),
warning=function(w) stop(sprintf("Could not find draws file at %s.",
draws_files)))
draws <- env$draws
if (more_info) return(list(draws = draws, rng = env$rng))
else return(draws)
}
newdraws <- rnglist <- list()
env <- new.env()
for (i in seq_along(index)) {
tryCatch(load(draws_files[i], envir = env),
warning=function(w) stop(sprintf("Could not find draws file at %s.",
draws_files[i])))
newdraws <- c(newdraws, env$draws@draws)
rnglist[[i]] <- env$rng
}
indices <- paste(index, collapse = ", ")
nsim <- length(newdraws)
model <- load_model(dir, model_name, more_info = FALSE)
draws <- new("Draws", name = model_name,
label = sprintf("(Blocks %s:) %s draws from %s", indices, nsim,
model@label), index = index, draws = newdraws)
if (more_info)
return(list(draws = draws, rng = rnglist))
else
return(draws)
}
| /R/draws.R | no_license | jasonabr/simulator | R | false | false | 9,662 | r | #' @include draws-class.R
NULL
#' Simulate from a model.
#'
#' Given a reference to a \code{\linkS4class{Model}} object, this function calls the
#' model's \code{simulate} function on its \code{params}. It repeats this
#' \code{nsim} times. For example, when simulating regression with a fixed
#' design, this function would generate \code{nsim} response vectors \code{y}.
#'
#' This function creates objects of class \code{\linkS4class{Draws}} and saves each to
#' file (at dir/files/model_name/r<index>.Rdata). Note: while "files" is the
#' default, the name of this directory is from getOption("simulator.files"),
#' which is the value of getOption("simulator.files") when the model was
#' created.
#'
#' If parallel is not NULL, then it must be a list containing
#' \code{socket_names}, which can either be a positive integer specifying the
#' number of copies to run on localhost or else a character vector of machine
#' names (e.g., "mycluster-0-0"). The list \code{parallel} can also contain
#' \code{libraries}, a character vector of R packages that will be needed on the
#' slaves and \code{save_locally}, a logical that indicates whether the files
#' generated should be saved on the slaves (i.e., locally) or on the master.
#'
#' @export
#' @param object an object of class \code{\linkS4class{ModelRef}} as returned by
#' \code{link{generate_model}}. Or a list of such objects. If
#' \code{object} is a \code{Simulation}, then function is applied to the
#' referenced models in that simulation and returns the same
#' \code{Simulation} object but with references added to the new draws
#' created.
#' @param nsim number of simulations to be conducted. If a scalar, then
#' value repeated for each index. Otherwise can be a vector of length
#' \code{length(index)}
#' @param index a vector of positive integer indices. Allows simulations to be
#' carried out in chunks. Each chunk gets a separate RNG stream,
#' meaning that the results will be identical whether we run these in
#' parallel or sequentially.
#' @param parallel either \code{NULL} or a list containing \code{socket_names}
#' and (optionally) \code{libraries} and \code{save_locally}
#' (see Details for more information)
#' @seealso \code{\link{load_draws}} \code{\link{generate_model}} \code{\link{run_method}}
#' @examples
#' \dontrun{
#' mref <- generate_model(".", make_my_model)
#' dref1 <- simulate_from_model(mref, nsim = 50, index = 1:2)
#' dref2 <- simulate_from_model(mref, nsim = 50, index = 3:5,
#' parallel = list(socket_names = 3))
#' }
simulate_from_model <- function(object, nsim,
index = 1, parallel = NULL) {
if (class(object) == "Simulation")
model_ref <- model(object, reference = TRUE)
else
model_ref <- object
if (class(model_ref) == "list") {
dref <- lapply(model_ref, simulate_from_model, nsim = nsim, index = index,
parallel = parallel)
if (class(object) == "Simulation")
return(invisible(add(object, dref)))
return(invisible(dref))
}
stopifnot(index == round(index), index > 0)
stopifnot(nsim == round(nsim), nsim > 0)
if (length(nsim) == 1) {
nsim <- rep(nsim, length(index))
} else {
stopifnot(length(nsim) == length(index))
o <- order(index)
index <- index[o]; nsim <- nsim[o]
}
dir <- model_ref@dir
model_name <- model_ref@name
if (model_ref@simulator.files != getOption("simulator.files"))
stop("model_ref@simulator.files must match getOption(\"simulator.files\")")
md <- get_model_dir_and_file(dir, model_name,
simulator.files = model_ref@simulator.files)
# generate L'Ecuyer seeds based on model's seed
m <- load_model(dir, model_name, more_info = TRUE,
simulator.files = model_ref@simulator.files)
model_seed <- m$rng$rng_seed # seed used to generate m$model
seeds <- get_seeds_for_draws(model_seed, index)
dref <- list() # a list of DrawsRef objects
if (is.null(parallel) || length(index) == 1) {
# simulate sequentially
for (i in seq(length(index))) {
d <- simulate_from_model_single(m$model, nsim = nsim[i],
index = index[i], seed = seeds[[i]])
dref[[i]] <- save_draws_to_file(md$dir, model_ref, index[i], nsim[i],
d$draws, d$rng, d$time[1])
}
} else {
check_parallel_list(parallel)
if (is.null(parallel$save_locally)) parallel$save_locally <- FALSE
dref <- simulate_parallel(model_ref, nsim, index, seeds = seeds,
socket_names = parallel$socket_names,
libraries = parallel$libraries,
save_locally = parallel$save_locally)
}
if (class(object) == "Simulation")
return(invisible(add(object, dref)))
invisible(dref)
}
save_draws_to_file <- function(out_dir, model_ref, index, nsim, draws, rng,
time) {
file <- sprintf("%s/r%s.Rdata", out_dir, index)
save(draws, rng, file = file)
catsim(sprintf("..Simulated %s draws in %s sec and saved in %s", nsim,
round(time, 2), sprintf("%s/r%s.Rdata", model_ref@name,
index)), fill = TRUE)
new("DrawsRef", dir = model_ref@dir, model_name = model_ref@name,
index = index, simulator.files = getOption("simulator.files"))
}
get_seeds_for_draws <- function(model_seed, index) {
RNGkind("L'Ecuyer-CMRG")
# index gives which stream relative to stream used to generate model:
seeds <- list(model_seed)
for (i in seq(2, 1 + max(index)))
seeds[[i]] <- parallel::nextRNGStream(seeds[[i - 1]])
seeds <- seeds[-1]
seeds <- seeds[index] # now use these seeds[[i]] for index[i]'s chunk:
seeds
}
#' Simulate from a model.
#'
#' This is an internal function. Users should call the wrapper function
#' \code{\link{simulate_from_model}}.
#'
#' @param model a Model object
#' @param nsim number of simulations to be conducted.
#' @param index a positive integer index.
#' @param seed this is the 7 digit seed used by L'Ecuyer RNG
simulate_from_model_single <- function(model, nsim, index, seed) {
stopifnot(length(nsim) == 1, length(index) == 1)
RNGkind("L'Ecuyer-CMRG")
.Random.seed <<- seed
args <- setdiff(names(formals(model@simulate)), "nsim")
time <- system.time({
sims1 <- do.call(model@simulate, c(model@params[args], nsim = nsim))
})
if (length(sims1) != nsim)
stop("model's simulate function must return list of length nsim.")
rng <- list(rng_seed = seed, rng_end_seed = .Random.seed)
sims <- list()
for (i in seq(nsim))
sims[[sprintf("r%s.%s", index, i)]] <- sims1[[i]]
rm(sims1)
# create object of class Draws
draws <- new("Draws", name = model@name,
label = sprintf("(Block %s:) %s draws from %s", index, nsim,
model@label),
draws = sims,
index = as.integer(index))
validObject(draws)
return(list(draws = draws, rng = rng, time = time))
}
#' Load one or more draws objects from file.
#'
#' After \code{\link{simulate_from_model}} has been called, this function can
#' be used to load one or more of the saved \code{\linkS4class{Draws}} object(s)
#' (along with RNG information). If multiple indices are provided, these will be combined
#' into a new single \code{\linkS4class{Draws}} object.
#'
#' @export
#' @param dir the directory passed to \code{\link{generate_model}})
#' @param model_name the Model object's \code{name} attribute
#' @param index a vector of positive integers.
#' @param more_info if TRUE, then returns additional information such as
#' state of RNG after calling \code{\link{generate_model}}
#' @param simulator.files if NULL, then \code{getOption("simulator.files")}
#' will be used.
#' @seealso \code{\link{simulate_from_model}} \code{\link{load_model}}
#' @examples
#' \dontrun{
#' # see example ?generate_model for make_my_model definition
#' mref <- generate_model(make_my_model, dir = ".")
#' dref <- simulate_from_model(mref, nsim = 50, index = 1:2)
#' load(dref) # loads Draws object with 100 entries
#' }
load_draws <- function(dir, model_name, index, more_info = FALSE,
simulator.files = NULL) {
md <- get_model_dir_and_file(dir, model_name,
simulator.files = simulator.files)
index <- sort(unique(index))
draws_files <- sprintf("%s/r%s.Rdata", md$dir, index)
if (length(index) == 1) {
env <- new.env()
tryCatch(load(draws_files, envir = env),
warning=function(w) stop(sprintf("Could not find draws file at %s.",
draws_files)))
draws <- env$draws
if (more_info) return(list(draws = draws, rng = env$rng))
else return(draws)
}
newdraws <- rnglist <- list()
env <- new.env()
for (i in seq_along(index)) {
tryCatch(load(draws_files[i], envir = env),
warning=function(w) stop(sprintf("Could not find draws file at %s.",
draws_files[i])))
newdraws <- c(newdraws, env$draws@draws)
rnglist[[i]] <- env$rng
}
indices <- paste(index, collapse = ", ")
nsim <- length(newdraws)
model <- load_model(dir, model_name, more_info = FALSE)
draws <- new("Draws", name = model_name,
label = sprintf("(Blocks %s:) %s draws from %s", indices, nsim,
model@label), index = index, draws = newdraws)
if (more_info)
return(list(draws = draws, rng = rnglist))
else
return(draws)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AEI.R
\name{AEI}
\alias{AEI}
\title{Augmented Expected Improvement}
\usage{
AEI(x, model, new.noise.var = 0, y.min = NULL, type = "UK", envir = NULL)
}
\arguments{
\item{x}{the input vector at which one wants to evaluate the criterion}
\item{model}{a Kriging model of "km" class}
\item{new.noise.var}{the (scalar) noise variance of the future observation.}
\item{y.min}{The kriging predictor at the current best point (point with
smallest kriging quantile). If not provided, this quantity is evaluated.}
\item{type}{Kriging type: "SK" or "UK"}
\item{envir}{environment for saving intermediate calculations and reusing
them within AEI.grad}
}
\value{
Augmented Expected Improvement
}
\description{
Evaluation of the Augmented Expected Improvement (AEI) criterion, which is
a modification of the classical EI criterion for noisy functions. The AEI
consists of the regular EI multiplied by a penalization function that
accounts for the disminishing payoff of observation replicates. The current
minimum y.min is chosen as the kriging predictor of the observation with
smallest kriging quantile.
}
\examples{
##########################################################################
### AEI SURFACE ASSOCIATED WITH AN ORDINARY KRIGING MODEL ####
### OF THE BRANIN FUNCTION KNOWN AT A 12-POINT LATIN HYPERCUBE DESIGN ####
##########################################################################
set.seed(421)
# Set test problem parameters
doe.size <- 12
dim <- 2
test.function <- get("branin2")
lower <- rep(0,1,dim)
upper <- rep(1,1,dim)
noise.var <- 0.2
# Generate DOE and response
doe <- as.data.frame(matrix(runif(doe.size*dim),doe.size))
y.tilde <- rep(0, 1, doe.size)
for (i in 1:doe.size) {
y.tilde[i] <- test.function(doe[i,]) + sqrt(noise.var)*rnorm(n=1)
}
y.tilde <- as.numeric(y.tilde)
# Create kriging model
model <- km(y~1, design=doe, response=data.frame(y=y.tilde),
covtype="gauss", noise.var=rep(noise.var,1,doe.size),
lower=rep(.1,dim), upper=rep(1,dim), control=list(trace=FALSE))
# Compute actual function and criterion on a grid
n.grid <- 12 # Change to 21 for a nicer picture
x.grid <- y.grid <- seq(0,1,length=n.grid)
design.grid <- expand.grid(x.grid, y.grid)
nt <- nrow(design.grid)
crit.grid <- rep(0,1,nt)
func.grid <- rep(0,1,nt)
crit.grid <- apply(design.grid, 1, AEI, model=model, new.noise.var=noise.var)
func.grid <- apply(design.grid, 1, test.function)
# Compute kriging mean and variance on a grid
names(design.grid) <- c("V1","V2")
pred <- predict.km(model, newdata=design.grid, type="UK")
mk.grid <- pred$m
sk.grid <- pred$sd
# Plot actual function
z.grid <- matrix(func.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Actual function");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging mean
z.grid <- matrix(mk.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging mean");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging variance
z.grid <- matrix(sk.grid^2, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging variance");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot AEI criterion
z.grid <- matrix(crit.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("AEI");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
}
\references{
D. Huang, T.T. Allen, W.I. Notz, and N. Zeng (2006), Global Optimization of
Stochastic Black-Box Systems via Sequential Kriging Meta-Models,
\emph{Journal of Global Optimization}, 34, 441-466.
}
\author{
Victor Picheny
David Ginsbourger
}
\keyword{models}
| /man/AEI.Rd | no_license | ProgramMonkey-soso/DiceOptim | R | false | true | 4,039 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AEI.R
\name{AEI}
\alias{AEI}
\title{Augmented Expected Improvement}
\usage{
AEI(x, model, new.noise.var = 0, y.min = NULL, type = "UK", envir = NULL)
}
\arguments{
\item{x}{the input vector at which one wants to evaluate the criterion}
\item{model}{a Kriging model of "km" class}
\item{new.noise.var}{the (scalar) noise variance of the future observation.}
\item{y.min}{The kriging predictor at the current best point (point with
smallest kriging quantile). If not provided, this quantity is evaluated.}
\item{type}{Kriging type: "SK" or "UK"}
\item{envir}{environment for saving intermediate calculations and reusing
them within AEI.grad}
}
\value{
Augmented Expected Improvement
}
\description{
Evaluation of the Augmented Expected Improvement (AEI) criterion, which is
a modification of the classical EI criterion for noisy functions. The AEI
consists of the regular EI multiplied by a penalization function that
accounts for the disminishing payoff of observation replicates. The current
minimum y.min is chosen as the kriging predictor of the observation with
smallest kriging quantile.
}
\examples{
##########################################################################
### AEI SURFACE ASSOCIATED WITH AN ORDINARY KRIGING MODEL ####
### OF THE BRANIN FUNCTION KNOWN AT A 12-POINT LATIN HYPERCUBE DESIGN ####
##########################################################################
set.seed(421)
# Set test problem parameters
doe.size <- 12
dim <- 2
test.function <- get("branin2")
lower <- rep(0,1,dim)
upper <- rep(1,1,dim)
noise.var <- 0.2
# Generate DOE and response
doe <- as.data.frame(matrix(runif(doe.size*dim),doe.size))
y.tilde <- rep(0, 1, doe.size)
for (i in 1:doe.size) {
y.tilde[i] <- test.function(doe[i,]) + sqrt(noise.var)*rnorm(n=1)
}
y.tilde <- as.numeric(y.tilde)
# Create kriging model
model <- km(y~1, design=doe, response=data.frame(y=y.tilde),
covtype="gauss", noise.var=rep(noise.var,1,doe.size),
lower=rep(.1,dim), upper=rep(1,dim), control=list(trace=FALSE))
# Compute actual function and criterion on a grid
n.grid <- 12 # Change to 21 for a nicer picture
x.grid <- y.grid <- seq(0,1,length=n.grid)
design.grid <- expand.grid(x.grid, y.grid)
nt <- nrow(design.grid)
crit.grid <- rep(0,1,nt)
func.grid <- rep(0,1,nt)
crit.grid <- apply(design.grid, 1, AEI, model=model, new.noise.var=noise.var)
func.grid <- apply(design.grid, 1, test.function)
# Compute kriging mean and variance on a grid
names(design.grid) <- c("V1","V2")
pred <- predict.km(model, newdata=design.grid, type="UK")
mk.grid <- pred$m
sk.grid <- pred$sd
# Plot actual function
z.grid <- matrix(func.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Actual function");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging mean
z.grid <- matrix(mk.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging mean");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot Kriging variance
z.grid <- matrix(sk.grid^2, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("Kriging variance");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
# Plot AEI criterion
z.grid <- matrix(crit.grid, n.grid, n.grid)
filled.contour(x.grid,y.grid, z.grid, nlevels=50, color = rainbow,
plot.axes = {title("AEI");
points(model@X[,1],model@X[,2],pch=17,col="blue");
axis(1); axis(2)})
}
\references{
D. Huang, T.T. Allen, W.I. Notz, and N. Zeng (2006), Global Optimization of
Stochastic Black-Box Systems via Sequential Kriging Meta-Models,
\emph{Journal of Global Optimization}, 34, 441-466.
}
\author{
Victor Picheny
David Ginsbourger
}
\keyword{models}
|
### prepare data for treemap
## set your own working dir
setwd('/home/jc/Bureau/GreenTech_Challenge/')
# load csv file
ma <- read.csv('DATA/Moyennes_analyses_pesticides\ dans\ eaux\ souterraines_HISTORIQUE/fichiers\ csv/ma_qp_fm_ttres_pesteso_2012_utf.csv',sep=';', header=TRUE, na.strings=c("–", "-",""))
str(ma)
pests <- read.csv('DATA/Pesticides/pesticides_utf.csv',sep=';', header=TRUE, na.strings=c("–", "-",""))
str(pests)
# change data type
ma$MA_MOY <- as.numeric(sub("," , ".",ma$MA_MOY))[ma$MA_MOY]
## aggregate values
agg <- aggregate(ma[,"MA_MOY"], by=list(ma$LB_PARAMETRE), "sum")
names(agg) <- c("LB_PARAMETRE","ma_tot")
# match pests
matchvec<- which(pests$LB_PARAMETRE%in%agg$LB_PARAMETRE)
agg<- cbind(agg, pests[matchvec,c("CODE_FAMILLE","CODE_FONCTION")])
write.csv(agg, "treemap/ma_tot_bypests_2012.csv")
| /treemap/treemap_prep_data.r | no_license | KirosG/GreenTech-Challenge | R | false | false | 834 | r | ### prepare data for treemap
## set your own working dir
setwd('/home/jc/Bureau/GreenTech_Challenge/')
# load csv file
ma <- read.csv('DATA/Moyennes_analyses_pesticides\ dans\ eaux\ souterraines_HISTORIQUE/fichiers\ csv/ma_qp_fm_ttres_pesteso_2012_utf.csv',sep=';', header=TRUE, na.strings=c("–", "-",""))
str(ma)
pests <- read.csv('DATA/Pesticides/pesticides_utf.csv',sep=';', header=TRUE, na.strings=c("–", "-",""))
str(pests)
# change data type
ma$MA_MOY <- as.numeric(sub("," , ".",ma$MA_MOY))[ma$MA_MOY]
## aggregate values
agg <- aggregate(ma[,"MA_MOY"], by=list(ma$LB_PARAMETRE), "sum")
names(agg) <- c("LB_PARAMETRE","ma_tot")
# match pests
matchvec<- which(pests$LB_PARAMETRE%in%agg$LB_PARAMETRE)
agg<- cbind(agg, pests[matchvec,c("CODE_FAMILLE","CODE_FONCTION")])
write.csv(agg, "treemap/ma_tot_bypests_2012.csv")
|
\name{column_anno_barplot}
\alias{column_anno_barplot}
\title{
Column annotation which is represented as barplots
}
\description{
Column annotation which is represented as barplots
}
\usage{
column_anno_barplot(...)}
\arguments{
\item{...}{pass to \code{\link{anno_barplot}}}
}
\details{
A wrapper of \code{\link{anno_barplot}} with pre-defined \code{which} to \code{column}.
}
| /man/column_anno_barplot.rd | no_license | Yixf-Self/ComplexHeatmap | R | false | false | 381 | rd | \name{column_anno_barplot}
\alias{column_anno_barplot}
\title{
Column annotation which is represented as barplots
}
\description{
Column annotation which is represented as barplots
}
\usage{
column_anno_barplot(...)}
\arguments{
\item{...}{pass to \code{\link{anno_barplot}}}
}
\details{
A wrapper of \code{\link{anno_barplot}} with pre-defined \code{which} to \code{column}.
}
|
#
#
# Use gradient boosted tree's
#
# Rev1 - Try to use a small subset of data
#
#
#
# Rev2 - Try to use all data and train over the set
#
#
# Rev3 - predict the sum
#
# Rev4 - Use the time series to predict the output and % change.
#
# Submission Info:
# Fri, 01 Jun 2012 05:13:23
# GBM + no negatives
# RMLSE = 0.76373
#
# Note, we need 6000 tree's to drive down, but can use far less to get a first hand estimate
# The predicted RMLSE from "computeRMSLE" is [1] 0.7337036
# So that is pretty accurate.
#
# When using 1000 tree (earliest convergence) the computedRMLSE is
# 1.112594
#
# When using more inputs,
#
rm(list=ls())
require(gbm)
# Give it a the estimator and real value. Will return the RMLSE calculation. This is on training set
# Obviously.
# e
computeRMSLE <- function(Ysimulated, Yreal) {
#zero out negative elements
Ysimulated <- ifelse(Ysimulated<0,0,Ysimulated)
Yreal <- ifelse(Yreal<0,0,Yreal)
#initialize values
rmsle <- 0.0
n <- 0
#perform calculations
Ysimulated <- log(Ysimulated + 1)
Yreal <- log(Yreal + 1)
#for vectors, n is the length of the vector
n <- length(Yreal)
rmsle <- sqrt(sum((Ysimulated - Yreal)^2)/n)
return (rmsle)
}
computeLogisticalError <- function(Ysimulated,Yreal) {
loge = sum(Yreal * log(Ysimulated) + (1-Yreal) * log (1-Ysimulated))
n = length(Yreal)
loge = loge /-n
return (loge)
}
### Clean and make right category
#
# If sparse, don't use the mean. Set it to the majority sparcicity value.
cleanInputDataForGBM <- function(X, forceQuan = FALSE) {
names(X);
for(i in 1:length(X)) {
name = names(X)[i]
print (name)
col = X[,i]
index = which(is.na(col))
if ( substr(name,1,3) == 'Cat' && forceQuan != TRUE ) {
col[index] = "Unknown"
X[,i] <- as.factor(col)
}
if ( substr(name,1,4) == 'Quan' || forceQuan == TRUE) {
column_mean = mean(col, na.rm = TRUE)
col[index] = column_mean
X[,i] <- as.numeric(col)
}
if ( substr(name,1,4) == 'Date'&& forceQuan != TRUE ) {
column_mean = mean(col, na.rm = TRUE)
col[index] = column_mean
X[,i] <- as.numeric(col)
}
result = is.factor(X[,i])
print(result);
}
return (X)
}
cleanInputAsNumeric <- function(X) {
names(X);
for(i in 1:length(X)) {
name = names(X)[i]
print (name)
col = X[,i]
X[,i] <- as.numeric(col)
result = is.factor(X[,i])
print(result);
}
return (X)
}
#idxCat <- c(13,558)
idxCat <- c(2,11) #31st column is messed,
#col = c("Cat_survived","Cat_pclass","Cat_name","Cat_sex","Quant_age","Cat_sibsp","Cat_parch","CAT_ticket","Quant_fare","Cat_cabin","Cat_embarked")
col = c("Cat_survived","Cat_pclass","Cat_name","Cat_sex","Quant_age","Cat_sibsp","Quant_parch","CAT_ticket","Quant_fare","Cat_cabin","Cat_embarked")
training <- read.csv(file="train.csv",header=TRUE, sep=",", col.names=col)
Xtrain <- training[, idxCat[1] : idxCat[2] ]
XtrainClean = cleanInputDataForGBM(Xtrain)
## Create levelsets for the NA's that are factors. If numeric then abort if there is an NA
## Now run Test Data set, clean and continue.
test <- read.csv(file="test.csv",header=TRUE, sep=",", col.names=col[idxCat[1] : idxCat[2]])
Xtest <- test
XtestClean = cleanInputDataForGBM(Xtest)
## GBM Parameters
ntrees <- 6000
depth <- 5
minObs <- 10
shrink <- 0.0005
folds <- 5
Ynames <- c(names(training)[1])
## Setup variables.
ntestrows = nrow(XtestClean)
ntrainrows = nrow(XtrainClean)
Yhattest = matrix(nrow = ntestrows , ncol = 13, dimnames = list (1:ntestrows,Ynames ) )
Yhattrain = matrix(nrow = ntrainrows , ncol = 13, dimnames = list (1:ntrainrows,Ynames ) )
## Density
#Y <- training[,1:12]
#Ysum <- rowSums ( Y, na.rm=TRUE)
#plot(1:12, Y[2,] )
#
# Correlations
# This is as we expected, the top category is male/female
# followed by cabin class. The least correlated is name parch?
ytraincorr <- training[,1]
ytraincorr[is.na(ytraincorr)] <- 0.0
xtraincorrIn <- training[, idxCat[1] : idxCat[2] ]
xtraincorr = cleanInputDataForGBM(xtraincorrIn, TRUE)
C2 = cor(xtraincorr, ytraincorr)
C2 [ is.na(C2)] <- 0.0
sort(C2)
print(C2)
maxV = max(abs(C2))
which( C2 == maxV, arr.ind = TRUE )
which( C2 == -1*maxV, arr.ind = TRUE )
start=date()
start
trainCols = c(1,3:6,8,10)
X = cbind(XtrainClean[trainCols] )
nColsOutput = 12
Y <- as.numeric(training[,1])
gdata <- cbind(Y,X)
mo1gbm <- gbm(Y~.,
data=gdata,
distribution = "bernoulli",
n.trees = ntrees,
shrinkage = shrink,
cv.folds = folds,
verbose = TRUE)
gbm.perf(mo1gbm,method="cv")
sqrt(min(mo1gbm$cv.error))
which.min(mo1gbm$cv.error)
Yhattrain <- predict.gbm(mo1gbm, newdata=XtrainClean[trainCols], n.trees = ntrees, type="response")
Yhattest <- predict.gbm(mo1gbm, newdata=XtestClean[trainCols], n.trees = ntrees, type="response")
gc()
end = date()
end
## Calculate total training error
YhattrainRMLSE <- Yhattrain
YtrainRMLSE <- as.matrix(training[,1])
loge <- computeLogisticalError(YhattrainRMLSE, YtrainRMLSE)
loge
# Calculate how many correct % (leaders are 98%)
YhattrainBool <- as.numeric(YhattrainRMLSE)
levelT <- 0.50
YhattrainBool[ which(YhattrainBool <= levelT) ] <- 0
YhattrainBool[ which(YhattrainBool >= levelT) ] <- 1
total <- length (YhattrainBool)
length ( which(YhattrainBool == 1) )
length ( which(YhattrainBool == 0) )
correct <- length ( which(YhattrainBool == Y) )
#.787 correlations
precentCorr <-correct/total
precentCorr
write.csv(YhattrainBool, "titanic_1_gbm_train.csv", row.names=FALSE)
Yhattest
YhattestBool = as.numeric(Yhattest)
YhattestBool[ which(YhattestBool <= levelT) ] <- 0
YhattestBool[ which(YhattestBool >= levelT) ] <- 1
write.csv(YhattestBool, "titanic_1_gbm_test.csv", row.names=FALSE)
#########################################################
# Extra's
########################################################
# 1. Which columns look like other columns
# Take the correlatoin, and find where its greater that 0.9999
# Of course remove the 1 correlaion
# You must set EACH column to a numeric one
# Finally the 'diff' returns where its not a diagonol
# TODO return the exact columnnames
trainingMatrix = as.matrix( training )
trainingMatrix = cleanInputAsNumeric( training)
trainingMatrix[is.na(trainingMatrix)] <- 0.0
corr <- cor(trainingMatrix)
idx <- which(corr > 0.9999, arr.ind = TRUE)
idxCopy <- idx[ apply(idx, 1, diff) > 0, ]
# 2.
#
#
#
| /src/Titanic/titanic_gbm_1.r | no_license | jsrawan-mobo/mrbigdata | R | false | false | 6,401 | r | #
#
# Use gradient boosted tree's
#
# Rev1 - Try to use a small subset of data
#
#
#
# Rev2 - Try to use all data and train over the set
#
#
# Rev3 - predict the sum
#
# Rev4 - Use the time series to predict the output and % change.
#
# Submission Info:
# Fri, 01 Jun 2012 05:13:23
# GBM + no negatives
# RMLSE = 0.76373
#
# Note, we need 6000 tree's to drive down, but can use far less to get a first hand estimate
# The predicted RMLSE from "computeRMSLE" is [1] 0.7337036
# So that is pretty accurate.
#
# When using 1000 tree (earliest convergence) the computedRMLSE is
# 1.112594
#
# When using more inputs,
#
rm(list=ls())
require(gbm)
# Give it a the estimator and real value. Will return the RMLSE calculation. This is on training set
# Obviously.
# e
computeRMSLE <- function(Ysimulated, Yreal) {
#zero out negative elements
Ysimulated <- ifelse(Ysimulated<0,0,Ysimulated)
Yreal <- ifelse(Yreal<0,0,Yreal)
#initialize values
rmsle <- 0.0
n <- 0
#perform calculations
Ysimulated <- log(Ysimulated + 1)
Yreal <- log(Yreal + 1)
#for vectors, n is the length of the vector
n <- length(Yreal)
rmsle <- sqrt(sum((Ysimulated - Yreal)^2)/n)
return (rmsle)
}
computeLogisticalError <- function(Ysimulated,Yreal) {
loge = sum(Yreal * log(Ysimulated) + (1-Yreal) * log (1-Ysimulated))
n = length(Yreal)
loge = loge /-n
return (loge)
}
### Clean and make right category
#
# If sparse, don't use the mean. Set it to the majority sparcicity value.
cleanInputDataForGBM <- function(X, forceQuan = FALSE) {
names(X);
for(i in 1:length(X)) {
name = names(X)[i]
print (name)
col = X[,i]
index = which(is.na(col))
if ( substr(name,1,3) == 'Cat' && forceQuan != TRUE ) {
col[index] = "Unknown"
X[,i] <- as.factor(col)
}
if ( substr(name,1,4) == 'Quan' || forceQuan == TRUE) {
column_mean = mean(col, na.rm = TRUE)
col[index] = column_mean
X[,i] <- as.numeric(col)
}
if ( substr(name,1,4) == 'Date'&& forceQuan != TRUE ) {
column_mean = mean(col, na.rm = TRUE)
col[index] = column_mean
X[,i] <- as.numeric(col)
}
result = is.factor(X[,i])
print(result);
}
return (X)
}
cleanInputAsNumeric <- function(X) {
names(X);
for(i in 1:length(X)) {
name = names(X)[i]
print (name)
col = X[,i]
X[,i] <- as.numeric(col)
result = is.factor(X[,i])
print(result);
}
return (X)
}
#idxCat <- c(13,558)
idxCat <- c(2,11) #31st column is messed,
#col = c("Cat_survived","Cat_pclass","Cat_name","Cat_sex","Quant_age","Cat_sibsp","Cat_parch","CAT_ticket","Quant_fare","Cat_cabin","Cat_embarked")
col = c("Cat_survived","Cat_pclass","Cat_name","Cat_sex","Quant_age","Cat_sibsp","Quant_parch","CAT_ticket","Quant_fare","Cat_cabin","Cat_embarked")
training <- read.csv(file="train.csv",header=TRUE, sep=",", col.names=col)
Xtrain <- training[, idxCat[1] : idxCat[2] ]
XtrainClean = cleanInputDataForGBM(Xtrain)
## Create levelsets for the NA's that are factors. If numeric then abort if there is an NA
## Now run Test Data set, clean and continue.
test <- read.csv(file="test.csv",header=TRUE, sep=",", col.names=col[idxCat[1] : idxCat[2]])
Xtest <- test
XtestClean = cleanInputDataForGBM(Xtest)
## GBM Parameters
ntrees <- 6000
depth <- 5
minObs <- 10
shrink <- 0.0005
folds <- 5
Ynames <- c(names(training)[1])
## Setup variables.
ntestrows = nrow(XtestClean)
ntrainrows = nrow(XtrainClean)
Yhattest = matrix(nrow = ntestrows , ncol = 13, dimnames = list (1:ntestrows,Ynames ) )
Yhattrain = matrix(nrow = ntrainrows , ncol = 13, dimnames = list (1:ntrainrows,Ynames ) )
## Density
#Y <- training[,1:12]
#Ysum <- rowSums ( Y, na.rm=TRUE)
#plot(1:12, Y[2,] )
#
# Correlations
# This is as we expected, the top category is male/female
# followed by cabin class. The least correlated is name parch?
ytraincorr <- training[,1]
ytraincorr[is.na(ytraincorr)] <- 0.0
xtraincorrIn <- training[, idxCat[1] : idxCat[2] ]
xtraincorr = cleanInputDataForGBM(xtraincorrIn, TRUE)
C2 = cor(xtraincorr, ytraincorr)
C2 [ is.na(C2)] <- 0.0
sort(C2)
print(C2)
maxV = max(abs(C2))
which( C2 == maxV, arr.ind = TRUE )
which( C2 == -1*maxV, arr.ind = TRUE )
start=date()
start
trainCols = c(1,3:6,8,10)
X = cbind(XtrainClean[trainCols] )
nColsOutput = 12
Y <- as.numeric(training[,1])
gdata <- cbind(Y,X)
mo1gbm <- gbm(Y~.,
data=gdata,
distribution = "bernoulli",
n.trees = ntrees,
shrinkage = shrink,
cv.folds = folds,
verbose = TRUE)
gbm.perf(mo1gbm,method="cv")
sqrt(min(mo1gbm$cv.error))
which.min(mo1gbm$cv.error)
Yhattrain <- predict.gbm(mo1gbm, newdata=XtrainClean[trainCols], n.trees = ntrees, type="response")
Yhattest <- predict.gbm(mo1gbm, newdata=XtestClean[trainCols], n.trees = ntrees, type="response")
gc()
end = date()
end
## Calculate total training error
YhattrainRMLSE <- Yhattrain
YtrainRMLSE <- as.matrix(training[,1])
loge <- computeLogisticalError(YhattrainRMLSE, YtrainRMLSE)
loge
# Calculate how many correct % (leaders are 98%)
YhattrainBool <- as.numeric(YhattrainRMLSE)
levelT <- 0.50
YhattrainBool[ which(YhattrainBool <= levelT) ] <- 0
YhattrainBool[ which(YhattrainBool >= levelT) ] <- 1
total <- length (YhattrainBool)
length ( which(YhattrainBool == 1) )
length ( which(YhattrainBool == 0) )
correct <- length ( which(YhattrainBool == Y) )
#.787 correlations
precentCorr <-correct/total
precentCorr
write.csv(YhattrainBool, "titanic_1_gbm_train.csv", row.names=FALSE)
Yhattest
YhattestBool = as.numeric(Yhattest)
YhattestBool[ which(YhattestBool <= levelT) ] <- 0
YhattestBool[ which(YhattestBool >= levelT) ] <- 1
write.csv(YhattestBool, "titanic_1_gbm_test.csv", row.names=FALSE)
#########################################################
# Extra's
########################################################
# 1. Which columns look like other columns
# Take the correlatoin, and find where its greater that 0.9999
# Of course remove the 1 correlaion
# You must set EACH column to a numeric one
# Finally the 'diff' returns where its not a diagonol
# TODO return the exact columnnames
trainingMatrix = as.matrix( training )
trainingMatrix = cleanInputAsNumeric( training)
trainingMatrix[is.na(trainingMatrix)] <- 0.0
corr <- cor(trainingMatrix)
idx <- which(corr > 0.9999, arr.ind = TRUE)
idxCopy <- idx[ apply(idx, 1, diff) > 0, ]
# 2.
#
#
#
|
library(qualityTools)
### Name: rsmDesign
### Title: Generate a response surface design (i.e. central composite
### design)
### Aliases: rsmDesign
### Keywords: Design of Experiments Six Sigma
### ** Examples
#central composite design for 2 factors with 2 blocks, alpha = 1.41,
#5 centerpoints in the cube portion and 3 centerpoints in the star portion:
rsmDesign(k = 2, blocks = 2, alpha = sqrt(2),cc = 5, cs = 3)
#central composite design with both, orthogonality and near rotatability
rsmDesign(k = 2, blocks = 2, alpha = "both")
#central composite design with
#2 centerpoints in the factorial portion of the design i.e 2
#1 centerpoint int the star portion of the design i.e. 1
#2 replications per factorial point i.e. 2^3*2 = 16
#3 replications per star points 3*2*3 = 18
#makes a total of 37 factor combinations
rsdo = rsmDesign(k = 3, blocks = 1, alpha = 2, cc = 2, cs = 1, fp = 2, sp = 3)
nrow(rsdo) #37
| /data/genthat_extracted_code/qualityTools/examples/rsmDesign.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 925 | r | library(qualityTools)
### Name: rsmDesign
### Title: Generate a response surface design (i.e. central composite
### design)
### Aliases: rsmDesign
### Keywords: Design of Experiments Six Sigma
### ** Examples
#central composite design for 2 factors with 2 blocks, alpha = 1.41,
#5 centerpoints in the cube portion and 3 centerpoints in the star portion:
rsmDesign(k = 2, blocks = 2, alpha = sqrt(2),cc = 5, cs = 3)
#central composite design with both, orthogonality and near rotatability
rsmDesign(k = 2, blocks = 2, alpha = "both")
#central composite design with
#2 centerpoints in the factorial portion of the design i.e 2
#1 centerpoint int the star portion of the design i.e. 1
#2 replications per factorial point i.e. 2^3*2 = 16
#3 replications per star points 3*2*3 = 18
#makes a total of 37 factor combinations
rsdo = rsmDesign(k = 3, blocks = 1, alpha = 2, cc = 2, cs = 1, fp = 2, sp = 3)
nrow(rsdo) #37
|
# setwd("/Users/loey/Desktop/Research/RationalLying/analysis")
expt.S <- expt.S.full %>%
group_by(util, p, k, ksay) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(util, p, k, ksay, fill=list(n=0)) %>%
group_by(util, p, k) %>%
mutate(probability = n / sum(n),
probTxt = paste0(round(probability*100), "%"))
# functions
ToMToTibble <- function(df){
df %>%
as_tibble() %>%
mutate(ksay = 0:10) %>%
pivot_longer(-ksay, names_to = 'k', values_to='probability') %>%
mutate(k = as.numeric(substr(k, 2, 10))-1,
util = ifelse(k < ceiling(max(k)/2), "red", "blue"),
util = factor(util, levels=c("red","blue"))) %>%
relocate(k, .before = ksay) %>%
arrange(k, ksay) %>%
mutate(p = rep(rep(c(0.2, 0.5, 0.8), each=121),2),
p = as.factor(p),
k = k %% 11) %>%
relocate(c(util,p), .before = k) %>%
arrange(util, p, k, ksay) %>%
mutate(probTxt = paste0(round(probability*100),"%"))
}
heurToTibble <- function(df){
df %>%
as_tibble() %>%
mutate(ksay = rep(0:10, 11)) %>%
pivot_longer(-ksay, names_to = 'condition', values_to='probability') %>%
mutate(condition = as.numeric(substr(condition, 2, 10))-1,
util = ifelse(condition < ceiling(max(condition)/2), "red", "blue"),
util = factor(util, levels=c("red","blue")),
p = condition %% 3,
p = as.factor(0.2 + 0.3*p),
k = rep(0:10, each=66),
probTxt = paste0(round(probability*100), "%")) %>%
select(-condition) %>%
relocate(c(util, p, k), .before = ksay) %>%
arrange(util, p, k, ksay) %>%
mutate(probTxt = paste0(round(probability*100),"%"))
}
# models
recurse.S.pred.df <- recurseToM.pred(
0.5, # recurseToMeval@coef['alph','Estimate'], #
0, # recurseToMeval@coef['eta.S','Estimate'], #
recurseToMeval@coef['eta.R','Estimate'],
recurseToMeval@coef['lambda','Estimate'],
recurseToMeval@coef['weight','Estimate'])[[2]] %>%
ToMToTibble()
noToM.S.pred.df <- noToM.s.pred(
0.5, # noToMeval@coef['alph','Estimate'], #
0, # noToMeval@coef['eta.S','Estimate'], #
probToLogit(0.99)) %>% # noToMeval@coef['weight','Estimate']) %>%
ToMToTibble()
everybodyLies.S.pred.df <- everybodyLies.pred(
everybodyLiesEval@coef['lambda','Estimate'],
everybodyLiesEval@coef['weight','Estimate']
) %>%
heurToTibble()
someLies.S.pred.df <- someLies.pred(
somePeopleLieEval@coef['pTrue','Estimate'],
somePeopleLieEval@coef['lambda','Estimate'],
somePeopleLieEval@coef['weight','Estimate']
) %>%
heurToTibble()
# Combine model predictions + expt results
all.sender <- expt.S %>%
select(util, p, k, ksay, probability, probTxt) %>%
mutate(type="human results",
p = as.factor(p)) %>%
bind_rows(mutate(recurse.S.pred.df, type="recursive ToM"),
mutate(noToM.S.pred.df, type="0th order ToM"),
mutate(everybodyLies.S.pred.df, type="everybody lies"),
mutate(someLies.S.pred.df, type="some people lie")) %>%
mutate(type = factor(type,
levels=c("everybody lies","some people lie","0th order ToM","recursive ToM","human results")))
model_labels <- setNames(c("'everybody lies'","'some people lie'","0^th*' order ToM'","'recursive ToM'","'human results'"),
levels(all.sender$type))
row1 <- all.sender %>%
filter(type != "human results") %>%
filter(util=="red" & p=="0.5") %>%
mutate(type = as.character(type),
type = case_when(
type == "everybody lies" ~ "eq. intrin. avers.",
type == "some people lie" ~ "uneq. intrin. avers.",
TRUE ~ type),
type = factor(type,
c("eq. intrin. avers.",
"uneq. intrin. avers.",
"0th order ToM",
"recursive ToM"))
) %>%
ggplot(aes(x=k, y=ksay, fill=probability, label=probTxt)) +
geom_tile() +
#geom_text(size=2) +
scale_x_continuous("", expand=c(0,0), breaks=seq(0,10,2)) +
scale_y_continuous("Reported", expand=c(0,0), breaks=seq(0,10,2)) +
scale_fill_gradient2("Prob. Report\nGiven Truth", low="white", mid="darkorchid", high="blue", midpoint=0.5, limits=c(0,1), labels=c("0%","25%","50%","75%","100%")) +
facet_grid(.~type) +
guides(fill = FALSE) +
theme_bw() +
theme(strip.background = element_rect(fill="snow2"),
strip.text = element_text(size=11, vjust=0),
legend.title = element_text(size=8),
legend.text = element_text(size=8))
row2 <- all.sender %>%
filter(type == "human results") %>%
filter(util=="red" & p=="0.5") %>%
ggplot(aes(x=k, y=ksay, fill=probability, label=probTxt)) +
geom_tile() +
geom_text(size=4) +
scale_x_continuous("Truth", expand=c(0,0), breaks=seq(0,10,2)) +
scale_y_continuous("Reported", expand=c(0,0), breaks=seq(0,10,2)) +
scale_fill_gradient2("Prob. Report\nGiven Truth", low="white", mid="darkorchid", high="blue", midpoint=0.5, limits=c(0,1), labels=c("0%","25%","50%","75%","100%")) +
facet_grid(.~type) +
theme_bw() +
theme(strip.background = element_rect(fill="snow2"),
strip.text = element_text(size=11, vjust=0),
legend.title = element_text(size=8),
legend.text = element_text(size=8))
tileLegend <- get_legend(row2)
modelLabel1 <- ggdraw() +
draw_label("Models", size=12, x=0.2, y=0.54, hjust=0) +
draw_line(x=c(0.1,0.1), y=c(0.25,0.79), size=1)
full_tile <- plot_grid(row1, modelLabel1,
row2 + guides(fill=FALSE), tileLegend,
nrow=2, ncol=2,
rel_heights=c(30, 70), rel_widths=c(85, 15))
ggsave("img/allpredictions.pdf", full_tile, width=7, height=7)
| /analysis/supplmodels.R | no_license | la-oey/RationalLying | R | false | false | 5,714 | r | # setwd("/Users/loey/Desktop/Research/RationalLying/analysis")
expt.S <- expt.S.full %>%
group_by(util, p, k, ksay) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(util, p, k, ksay, fill=list(n=0)) %>%
group_by(util, p, k) %>%
mutate(probability = n / sum(n),
probTxt = paste0(round(probability*100), "%"))
# functions
ToMToTibble <- function(df){
df %>%
as_tibble() %>%
mutate(ksay = 0:10) %>%
pivot_longer(-ksay, names_to = 'k', values_to='probability') %>%
mutate(k = as.numeric(substr(k, 2, 10))-1,
util = ifelse(k < ceiling(max(k)/2), "red", "blue"),
util = factor(util, levels=c("red","blue"))) %>%
relocate(k, .before = ksay) %>%
arrange(k, ksay) %>%
mutate(p = rep(rep(c(0.2, 0.5, 0.8), each=121),2),
p = as.factor(p),
k = k %% 11) %>%
relocate(c(util,p), .before = k) %>%
arrange(util, p, k, ksay) %>%
mutate(probTxt = paste0(round(probability*100),"%"))
}
heurToTibble <- function(df){
df %>%
as_tibble() %>%
mutate(ksay = rep(0:10, 11)) %>%
pivot_longer(-ksay, names_to = 'condition', values_to='probability') %>%
mutate(condition = as.numeric(substr(condition, 2, 10))-1,
util = ifelse(condition < ceiling(max(condition)/2), "red", "blue"),
util = factor(util, levels=c("red","blue")),
p = condition %% 3,
p = as.factor(0.2 + 0.3*p),
k = rep(0:10, each=66),
probTxt = paste0(round(probability*100), "%")) %>%
select(-condition) %>%
relocate(c(util, p, k), .before = ksay) %>%
arrange(util, p, k, ksay) %>%
mutate(probTxt = paste0(round(probability*100),"%"))
}
# models
recurse.S.pred.df <- recurseToM.pred(
0.5, # recurseToMeval@coef['alph','Estimate'], #
0, # recurseToMeval@coef['eta.S','Estimate'], #
recurseToMeval@coef['eta.R','Estimate'],
recurseToMeval@coef['lambda','Estimate'],
recurseToMeval@coef['weight','Estimate'])[[2]] %>%
ToMToTibble()
noToM.S.pred.df <- noToM.s.pred(
0.5, # noToMeval@coef['alph','Estimate'], #
0, # noToMeval@coef['eta.S','Estimate'], #
probToLogit(0.99)) %>% # noToMeval@coef['weight','Estimate']) %>%
ToMToTibble()
everybodyLies.S.pred.df <- everybodyLies.pred(
everybodyLiesEval@coef['lambda','Estimate'],
everybodyLiesEval@coef['weight','Estimate']
) %>%
heurToTibble()
someLies.S.pred.df <- someLies.pred(
somePeopleLieEval@coef['pTrue','Estimate'],
somePeopleLieEval@coef['lambda','Estimate'],
somePeopleLieEval@coef['weight','Estimate']
) %>%
heurToTibble()
# Combine model predictions + expt results
all.sender <- expt.S %>%
select(util, p, k, ksay, probability, probTxt) %>%
mutate(type="human results",
p = as.factor(p)) %>%
bind_rows(mutate(recurse.S.pred.df, type="recursive ToM"),
mutate(noToM.S.pred.df, type="0th order ToM"),
mutate(everybodyLies.S.pred.df, type="everybody lies"),
mutate(someLies.S.pred.df, type="some people lie")) %>%
mutate(type = factor(type,
levels=c("everybody lies","some people lie","0th order ToM","recursive ToM","human results")))
model_labels <- setNames(c("'everybody lies'","'some people lie'","0^th*' order ToM'","'recursive ToM'","'human results'"),
levels(all.sender$type))
row1 <- all.sender %>%
filter(type != "human results") %>%
filter(util=="red" & p=="0.5") %>%
mutate(type = as.character(type),
type = case_when(
type == "everybody lies" ~ "eq. intrin. avers.",
type == "some people lie" ~ "uneq. intrin. avers.",
TRUE ~ type),
type = factor(type,
c("eq. intrin. avers.",
"uneq. intrin. avers.",
"0th order ToM",
"recursive ToM"))
) %>%
ggplot(aes(x=k, y=ksay, fill=probability, label=probTxt)) +
geom_tile() +
#geom_text(size=2) +
scale_x_continuous("", expand=c(0,0), breaks=seq(0,10,2)) +
scale_y_continuous("Reported", expand=c(0,0), breaks=seq(0,10,2)) +
scale_fill_gradient2("Prob. Report\nGiven Truth", low="white", mid="darkorchid", high="blue", midpoint=0.5, limits=c(0,1), labels=c("0%","25%","50%","75%","100%")) +
facet_grid(.~type) +
guides(fill = FALSE) +
theme_bw() +
theme(strip.background = element_rect(fill="snow2"),
strip.text = element_text(size=11, vjust=0),
legend.title = element_text(size=8),
legend.text = element_text(size=8))
row2 <- all.sender %>%
filter(type == "human results") %>%
filter(util=="red" & p=="0.5") %>%
ggplot(aes(x=k, y=ksay, fill=probability, label=probTxt)) +
geom_tile() +
geom_text(size=4) +
scale_x_continuous("Truth", expand=c(0,0), breaks=seq(0,10,2)) +
scale_y_continuous("Reported", expand=c(0,0), breaks=seq(0,10,2)) +
scale_fill_gradient2("Prob. Report\nGiven Truth", low="white", mid="darkorchid", high="blue", midpoint=0.5, limits=c(0,1), labels=c("0%","25%","50%","75%","100%")) +
facet_grid(.~type) +
theme_bw() +
theme(strip.background = element_rect(fill="snow2"),
strip.text = element_text(size=11, vjust=0),
legend.title = element_text(size=8),
legend.text = element_text(size=8))
tileLegend <- get_legend(row2)
modelLabel1 <- ggdraw() +
draw_label("Models", size=12, x=0.2, y=0.54, hjust=0) +
draw_line(x=c(0.1,0.1), y=c(0.25,0.79), size=1)
full_tile <- plot_grid(row1, modelLabel1,
row2 + guides(fill=FALSE), tileLegend,
nrow=2, ncol=2,
rel_heights=c(30, 70), rel_widths=c(85, 15))
ggsave("img/allpredictions.pdf", full_tile, width=7, height=7)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/post_theme.R
\docType{data}
\name{sf_palettes}
\alias{sf_palettes}
\title{List of sf main palettes}
\format{
An object of class \code{list} of length 5.
}
\usage{
sf_palettes
}
\description{
List of sf main palettes
}
\keyword{datasets}
| /man/sf_palettes.Rd | permissive | signaux-faibles/rsignauxfaibles | R | false | true | 315 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/post_theme.R
\docType{data}
\name{sf_palettes}
\alias{sf_palettes}
\title{List of sf main palettes}
\format{
An object of class \code{list} of length 5.
}
\usage{
sf_palettes
}
\description{
List of sf main palettes
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat-qq.r
\name{stat_qq}
\alias{stat_qq}
\alias{geom_qq}
\title{Calculation for quantile-quantile plot.}
\usage{
stat_qq(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., distribution = stats::qnorm,
dparams = list(), na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE)
geom_qq(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., distribution = stats::qnorm,
dparams = list(), na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link{ggplot}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link{fortify}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{other arguments passed on to \code{\link{layer}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{distribution}{Distribution function to use, if x not specified}
\item{dparams}{Additional parameters passed on to \code{distribution}
function.}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
}
\description{
Calculation for quantile-quantile plot.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{animint2:::rd_aesthetics("stat", "qq")}
}
\section{Computed variables}{
\describe{
\item{sample}{sample quantiles}
\item{theoretical}{theoretical quantiles}
}
}
\examples{
\donttest{
df <- data.frame(y = rt(200, df = 5))
p <- ggplot(df, aes(sample = y))
p + stat_qq()
p + geom_point(stat = "qq")
# Use fitdistr from MASS to estimate distribution params
params <- as.list(MASS::fitdistr(df$y, "t")$estimate)
ggplot(df, aes(sample = y)) +
stat_qq(distribution = qt, dparams = params["df"])
# Using to explore the distribution of a variable
ggplot(mtcars) +
stat_qq(aes(sample = mpg))
ggplot(mtcars) +
stat_qq(aes(sample = mpg, colour = factor(cyl)))
}
}
| /man/stat_qq.Rd | no_license | lazycipher/animint2 | R | false | true | 3,388 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat-qq.r
\name{stat_qq}
\alias{stat_qq}
\alias{geom_qq}
\title{Calculation for quantile-quantile plot.}
\usage{
stat_qq(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., distribution = stats::qnorm,
dparams = list(), na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE)
geom_qq(mapping = NULL, data = NULL, geom = "point",
position = "identity", ..., distribution = stats::qnorm,
dparams = list(), na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link{aes}} or
\code{\link{aes_}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link{ggplot}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link{fortify}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame.}, and
will be used as the layer data.}
\item{geom}{The geometric object to use display the data}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{other arguments passed on to \code{\link{layer}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{color = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{distribution}{Distribution function to use, if x not specified}
\item{dparams}{Additional parameters passed on to \code{distribution}
function.}
\item{na.rm}{If \code{FALSE} (the default), removes missing values with
a warning. If \code{TRUE} silently removes missing values.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link{borders}}.}
}
\description{
Calculation for quantile-quantile plot.
}
\section{Aesthetics}{
\Sexpr[results=rd,stage=build]{animint2:::rd_aesthetics("stat", "qq")}
}
\section{Computed variables}{
\describe{
\item{sample}{sample quantiles}
\item{theoretical}{theoretical quantiles}
}
}
\examples{
\donttest{
df <- data.frame(y = rt(200, df = 5))
p <- ggplot(df, aes(sample = y))
p + stat_qq()
p + geom_point(stat = "qq")
# Use fitdistr from MASS to estimate distribution params
params <- as.list(MASS::fitdistr(df$y, "t")$estimate)
ggplot(df, aes(sample = y)) +
stat_qq(distribution = qt, dparams = params["df"])
# Using to explore the distribution of a variable
ggplot(mtcars) +
stat_qq(aes(sample = mpg))
ggplot(mtcars) +
stat_qq(aes(sample = mpg, colour = factor(cyl)))
}
}
|
# # ============ start test script ================
# Test <- as.numeric(c(0,1,2,3,0,1,2,3,0,2))
# ID <- c("a","a","a","a","b","b","b","b","c","c")
# DF <- data.frame(ID, Test)
#
#
# library(plyr)
# library(foreach)
# library(pracma)
# library(data.table)
#
# # ---- TRAPEZOIDAL RULE ----
# #cumtrapz
# RATE <- as.numeric(c(-1,-.5,-.3,-.1,-.05,-.01))
# TTIME <- as.numeric(c(5,8,10,15,23,30))
# Trap <- trapz(RATE,TTIME)
#
# TDIF <- diff(TTIME,lag=1)
# RDIF <- -diff(-RATE,lag=1)
# RSUM <- cumsum(RATE)
# #INTEGRATE <- 0.5*diff(TTIME, lag = 1)*diffinv(RATE,lag = 1)
# INTEGRATE <- 0.5*diffinv(RATE,lag = 1)
#
#
# SUMOUT <- cbind(DF, SUM = c(lapply(split(DF, DF$ID), function(x) cumtrapz(x$Test)), recursive = T))
#
# # --------------------------------------------
# PLY.Test <- ddply(DF,"ID", function(a) cumsum(a$Test))
#
# # --------------------------------------------
# out <-ifelse(cbind(Test>0, Test>0, Test>0),
# {C1 <- Test +1
# C2 <- Test +2
# C3 <- Test +3
# cbind(C1, C2, C3)},
# {C1 <- Test +1
# C2 <- Test +2
# C3 <- Test +3
# cbind(C1, C2, C3)})
#
library(data.table)
DAT <- data.table(DATA.NZ)
setkey(DAT, ITEST) # SETS DATA TABLE KEY = ITEST
DAT.TEST <- DAT[,cumtrapz(TIME, EAR ), by = ITEST]
DAT.TRAP <- data.frame(DAT.TEST, DAT[,EAC])
# # ============= end test script ================
# KAP0.INP <- 2.09011272735447
# KAP1.INP <- 1.32951908075078
# DDT.INP <- 0.900012234998625
# NK.INP <- 2.75382483799085
# KAP2.INP <- 1 # CONSTANT
KAP0.INP <- 10.119
KAP1.INP <- 1.005
DDT.INP <- 0.8963
NK.INP <- 1.3312
KAP2.INP <- 1
FLOW.INP<- cbind(KAP0.INP, KAP1.INP, KAP2.INP,
DDT.INP, NK.INP) # values for fiting parameters
# ==== INPUT INITIAL VALUES FOR CREEP PARAMETERS ====
# ---- Creep parameters, results from Callahan fits - shear tests only ----
ETA0 <- 0.102854 # -
ETA1 <- 3.9387 # -
ETA2 <- 1 # constant -
NF <- 3.5122 # -
AA1 <- 0.3147 # -
PP <- 1.6332 # -
NSP <- 0.557621 # -
# R1 <- 8.69760
R1 <- 1.041 * 10 ^ -6 # [K/(MPa-sec)]
# R1 <- 0.0194 # [K/(MPa-sec)]
R3 <- 15.1281 # -
R4 <- 0.1677765 # -
QSR <- 1077.46 # [K]
#QSR <- 2897.09
CREEP.INP <- cbind(ETA0, ETA1, ETA2, NF, AA1, PP,
NSP, R1, R3, R4, QSR) # INITIAL CREEP PARAMETER VALUES
CPar <- CREEP.INP
FPar <- FLOW.INP
# ---- use subset of full data set for debugging ----
TestData <- DATA.NZ[which(DATA.NZ$ITEST == "SC1B"),] # SUBSET OF DATA FOR ANALYSIS
# colnames(TestData) <- c("ICASE", "ITEST", "TIME", "DT", "TF", "TEMP", "AS",
# "LS", "EVT", "EVC", "EAT", "EAC", "RHO", "D", "RHO0",
# "RHOI", "DD", "W", "EVR", "EAR", "ELR", "RAT", "ELC")
# ---- Flow Potential Parameters (5) *KAP2 HELD CONST. ----
KAP0 <- as.numeric(FPar[1])
KAP1 <- as.numeric(FPar[2])
KAP2 <- as.numeric(FPar[3]) # Constant = 1
DDT <- as.numeric(FPar[4])
NK <- as.numeric(FPar[5])
# ---- Creep Consolidation Parameters (11) *ETA2 HELD CONST
ETA0 <- as.numeric(CPar[1])
ETA1 <- as.numeric(CPar[2])
ETA2 <- as.numeric(CPar[3]) # Constant = 1
NF <- as.numeric(CPar[4]) # callahn used NA as variable name
AA1 <- as.numeric(CPar[5])
PP <- as.numeric(CPar[6])
NSP <- as.numeric(CPar[7])
R1 <- as.numeric(CPar[8])
R3 <- as.numeric(CPar[9])
R4 <- as.numeric(CPar[10])
QSR <- as.numeric(CPar[11])
# ---- Munson-Dawson Creep Parameters (17) ----
A1 <- 8.386e22
A2 <- 9.672e12
Q1R <- 12581
Q2R <- 5033
N1 <- 5.5
N2 <- 5.0
B1 <- 6.0856e6
B2 <- 3.034e-2
Q <- 5335
S0 <- 20.57
M <- 3
K0 <- 6.275e5
C <- 9.198e-3
ALPHA <- -17.37
BETA <- -7.738
DELTA <- 0.58
MU <- 12400
# ---- fitting assumptions ----
RHOIS <- 2160.0 # ASSUMED IN SITU SALT DENSITY
NTIME <- 10^6 # NORMALIZING TIME
DSP <- 0.64 # FRACTIONAL DENSITY OF RANDOM DENSE SPHERICAL PARTICLES
# ---- Values input into function (18)----
#ICASE <- as.numeric(TestData[,1]) # TEST TYPE (1:Hyd Cons, 2:Shear Cons, 3:compaction)
ITEST <- as.character(TestData[,2]) # TEST ID
TIME <- as.numeric(TestData[,3]) # TIME [SEC]
DT <- as.numeric(TestData[,4]) # DELTA TIME [SEC]
#TF <- as.numeric(TestData[,5]) # TOTAL TEST TIME [SEC]
TEMP <- as.numeric(TestData[,6]) # TEMP [K]
AS <- as.numeric(TestData[,7]) # AXIAL STRESS [MPA]
LS <- as.numeric(TestData[,8]) # LATERAL STRESS [MPA]
#EVT <- as.numeric(TestData[,9]) # TOTAL TRUE VOLUMETRIC STRAIN
EVC <- as.numeric(TestData[,10]) # CREEP TRUE VOLUMETRIC STRAIN
#EAT <- as.numeric(TestData[,11]) # TOTAL TRUE AXIAL STRAIN
EAC <- as.numeric(TestData[,12]) # CREEP TRUE AXIAL STRAIN
RHO <- as.numeric(TestData[,13]) # CURRENT DENSITY [KG/M3]
D <- as.numeric(TestData[,14]) # FRACTIONAL DENSITY
RHO0 <- as.numeric(TestData[,15]) # DENSITY AT THE START OF CONSOLIDATION (<RHOI)
RHOI <- as.numeric(TestData[,16]) # DENSITY AT THE START OF CREEP
DD <- as.numeric(TestData[,17]) # AVERAGE GRAIN SIZE [MM]
W <- as.numeric(TestData[,18]) # WATER CONENT BY PERCENT WEIGHT
# ---- calculate variables ----
MS <- (2.0 * LS + AS) / 3 # MEAN STRESS
DS <- LS - AS # STRESS DIFFERENCE
ELC <- (EVC - EAC) / 2 # CREEP TRUE LATERAL STRAIN
D0 <- 1382.4 / RHOIS # EMPLACED FRACTIONAL DENSITY ( NOT SURE WHERE 1382.4 CAME FROM?)
DI <- RHOI / RHOIS # INITIAL FRACTIONAL DENSITY
WT1 <- DT / NTIME # WEIGHTING FUNCTION FOR CREEP CONSOLIDATION PARAMETERS
WT <- 1 # WEIGHTING FUNCTION FOR FLOW PARAMETERS
#DC <- DD # SET GRAIN SIZE FOR DCCS TESTS
Z1 <- EAC # Predicted axial strain (initial values)
Z2 <- ELC # Predicted lateral strain (initial values)
Z3 <- 0
# ==== define the differential equation ====
# ---- only calculate strain rates at TIME > 0 ----
# browser()
ERATE.OUT <- data.frame(ifelse(cbind(TIME > 0, TIME > 0, TIME > 0),
{
VOL <- Z1 + 2*Z2 # VOLUMETRIC STRAIN
VOLT <- VOL + log(DSP/DI) # USED FOR INITIAL ESTIMATE OF VOLUMETRIC STRAIN
DEN <- DI/exp(VOL) # CURRENT FRACTIONAL DENSITY
# DEN <- D # CURRENT FRACTIONAL DENSITY
ifelse(D >= 1,
{
MD <- 0 # if fractional density is 1, disclocation creep = 0
SP <- 0},# if fractional density is 1, pressure solutioning = 0
{
VAR <- ifelse(DEN <= DDT, DDT, DEN) # DEFINE DENSITY CEILING ISH
# ---- Equivalent Stress ----
OMEGAA <- ((1 - DEN) * NF / (1 - (1 - DEN)^(1 / NF)) ^ NF) ^ (2 / (NF + 1))
OMEGAK <- ((1 - VAR) * NK / (1 - (1 - VAR)^(1 / NK)) ^ NK) ^ (2 / (NK + 1))
ETA <- ETA0 * OMEGAA ^ ETA1
KAP <- KAP0 * OMEGAK ^ KAP1
TERMA <- ((2 - DEN) / DEN) ^ ((2 * NF) / (NF + 1))
TERMK <- ((2 - DEN) / DEN) ^ ((2 * NK) / (NK + 1))
# ---- Eqn. 2-3 (SAND97-2601) ----
SEQF <- sqrt(ETA * MS ^ 2 + ETA2 * TERMA * DS ^ 2) # Equivalent stress measure for Disc. Creep and Press Sol'ing
SEQ <- sqrt(KAP * MS ^ 2 + KAP2 * TERMK * DS ^ 2) # Equivalent stress measure for Flow Potential
# ---- Eqn. 2-17 (SAND97-2601) ----
ALPHA2 <- KAP * MS / 3
BETA2 <- KAP2 * TERMK * DS
# ---- Eqn. 2-20 divided by equivalent stress (for later calculation) ----
F2A <- (ALPHA2 - BETA2) / SEQ
F2L <- (ALPHA2 + 0.5 * BETA2) / SEQ
# ==== START: equivalent inelastic strain rate form for dislocation creep ====
# ---- Steady State Strain Rate Calc ----
ES1 <- A1 * (SEQF / MU) ^ N1 * exp(-Q1R / TEMP) # Dislocation climb - Eqn. 2-30
ES2 <- A2 * (SEQF / MU) ^ N2 * exp(-Q2R / TEMP) # Undefined Mechanism - Eqn. 2-31
# Slip - Eqn. 2-32 (SAND98-2601)
H <- SEQF - S0 # HEAVISIDE FUNCTION
ARG <- Q * (SEQF - S0) / MU
ES3 <- ifelse(H > 0, 0.5 * (B1 * exp(-Q1R / TEMP) +
(B2 * exp(-Q2R / TEMP)) *
(exp(ARG) - exp(-ARG))),0)
ESS = ES1 + ES2 + ES3 # Steady-state strain rate, Eqn. 2-29 (SAND97-2601)
# ---- EVALUATE TRANSIENT FUNCTION, 3 branches: work hardening, equilibrium, recovery
EFT <- K0 * exp(C * TEMP) * (SEQF / MU) ^ M # Transient Strain Limit, Eqn. 2-28
BIGD <- ALPHA + BETA * log10(SEQF / MU) # Work-Hardening parameter, Eqn 2-28
FU <- ifelse(Z3 == EFT, 1, ifelse(Z3 < EFT, exp(BIGD * (1 - Z3 / EFT) ^ 2),
exp(-DELTA * (1 - Z3 / EFT) ^ 2)))
MD <- FU * ESS # equivalent inelastic strain rate form for dislocation creep, Eqn 2-23
# ==== START: Equivalent Inelastic Strain Rate Form for Pressure Solutioning ====
# ---- Calculate initial volumetric strain - Based on spherical packing ----
CR <- abs(exp(VOLT) - 1)
# ---- Determine functional form - either large or small strains, Eqn 2-34 ----
GAMMA <- ifelse(CR <= 0.15, 1, abs((D0 - exp(VOLT)) / ((1 - D0) * exp(VOLT))) ^ NSP)
# Small Strains (Vol Strain > - 15%)
# Large Strains (Vol Strain < - 15%)
# ---- component of eqn 2-35 ---
X3 <- exp((R3 - 1) * VOLT) / (abs(1 - exp(VOLT))) ^ R4
# ---- determine value of moisture function (w) ----
M2 <- ifelse (W == 0, 0, W ^ AA1) # moisture content = 0
# moisture content > 0
G2 <- 1 / DD ^ PP # calculate grain size function
T2 <- exp(-QSR / TEMP) / TEMP
# ---- Equivalent Inelastic Strain Rate Form for Pressure Solutioning, Eqn 2-35
SP <- R1 * M2 * G2 * T2 * X3 * GAMMA * SEQF}) # end check for D < 1
DZ1 <- (MD + SP) * F2A # Predicted axial strain rate / derivative of strain
DZ2 <- (MD + SP) * F2L # Predicted lateral strain rate / derivative of strain
DZ3 <- (FU - 1) * ESS # Predicted Steady-State Creep Rate
c(DZ1, DZ2, DZ3)},{c(0,0,0)}))
colnames(ERATE.OUT) <- c("FEAR", "FELR", "FEVR") # column names
DATA.FIT <- cbind(TestData, ERATE.OUT)
# # ---- plot fit comparison (axial strain rate)----
library(ggplot2)
# ggSUB.EAR <- ggplot(data = DATA.FIT, aes(x=TIME, y=EAR))
# ggSUB.EAR <- ggSUB.EAR + geom_line()
# ggSUB.EAR <- ggSUB.EAR + geom_point(aes(y=FEAR))
# # ggSUB.EAR <- ggSUB.EAR + facet_wrap(~ITEST, ncol=3, scales = "free")
# ggSUB.EAR <- ggSUB.EAR + xlim(0,6e6) + ylim(-7.5e-6,0)
# ggSUB.EAR <- ggSUB.EAR + ylab("Axial Strain Rate: Calculated (dot) Vs. Measured (line)") + xlab("Time [sec]")
# ggSUB.EAR
# ---- integrate strain rate ----
FEA <- cumtrapz(DATA.FIT$TIME, DATA.FIT$FEAR)
FEL <- cumtrapz(DATA.FIT$TIME, DATA.FIT$FELR)
DATA.FIT <- cbind(DATA.FIT, FEA, FEL)
DT.DATA.FIT <- data.table(DATA.FIT)
setkey(DT.DATA.FIT, ITEST)
DT.FE <- DT.DATA.FIT[, c("IFEAR", "IFELR"):=list(as.vector(cumtrapz(TIME, FEAR)), as.vector(cumtrapz(TIME, FELR))), by = ITEST][]
# # ---- plot fit comparison (axial strain )----
# ggSUB.EA <- ggplot(data = DATA.FIT, aes(x=TIME, y=EAC))
# ggSUB.EA <- ggSUB.EA + geom_line()
# ggSUB.EA <- ggSUB.EA + geom_point(aes(y=FEA))
# # ggSUB.EA <- ggSUB.EA + facet_wrap(~ITEST, ncol=3, scales = "free")
# ggSUB.EA <- ggSUB.EA + xlim(0,6e6) + ylim(-0.25,0)
# ggSUB.EA <- ggSUB.EA + ylab("Axial Strain: Calculated (dot) Vs. Measured (line)") + xlab("Time [sec]")
# ggSUB.EA
ggSUB.E <- ggplot(data = DT.FE, aes(x=TIME, y=EAC, color = "SC1B"))
ggSUB.E <- ggSUB.E + geom_line(aes(color = "Axial Strain"))
ggSUB.E <- ggSUB.E + geom_point(aes(y=IFEAR, color = "Axial Strain - Fit"))
ggSUB.E <- ggSUB.E + geom_line(aes(y=ELC, color = "Lateral Strain"))
ggSUB.E <- ggSUB.E + geom_point(aes(y=IFELR, color = "Lateral Strain - Fit"))
# ggSUB.E <- ggSUB.E + facet_wrap(~ITEST, ncol=3, scales = "free")
ggSUB.E <- ggSUB.E + xlim(0,6e6) + ylim(-0.25,0)
ggSUB.E <- ggSUB.E + ylab("Axial Strain: Calculated (dot) Vs. Measured (line)") + xlab("Time [sec]")
ggSUB.E
ggDT.E <- ggplot(data = DATA.FIT, aes(x=TIME, y=EAC, color = SC1B))
ggDT.E <- ggDT.E + geom_line(aes(color = "Axial Strain"))
ggDT.E <- ggDT.E + geom_point(aes(y=FEA, color = "Axial Strain - Fit"))
ggDT.E <- ggDT.E + geom_line(aes(y=ELC, color = "Lateral Strain"))
ggDT.E <- ggDT.E + geom_point(aes(y=FEL, color = "Lateral Strain - Fit"))
# ggDT.E <- ggDT.E + facet_wrap(~ITEST, ncol=3, scales = "free")
ggDT.E <- ggDT.E + xlim(0,6e6) + ylim(-0.25,0)
ggDT.E <- ggDT.E + ylab("True Strain") + xlab("Time [sec]")
ggDT.E
| /CreepScript_SUBTEST.R | no_license | brandonlampe/R_CS-MatParFit | R | false | false | 12,201 | r | # # ============ start test script ================
# Test <- as.numeric(c(0,1,2,3,0,1,2,3,0,2))
# ID <- c("a","a","a","a","b","b","b","b","c","c")
# DF <- data.frame(ID, Test)
#
#
# library(plyr)
# library(foreach)
# library(pracma)
# library(data.table)
#
# # ---- TRAPEZOIDAL RULE ----
# #cumtrapz
# RATE <- as.numeric(c(-1,-.5,-.3,-.1,-.05,-.01))
# TTIME <- as.numeric(c(5,8,10,15,23,30))
# Trap <- trapz(RATE,TTIME)
#
# TDIF <- diff(TTIME,lag=1)
# RDIF <- -diff(-RATE,lag=1)
# RSUM <- cumsum(RATE)
# #INTEGRATE <- 0.5*diff(TTIME, lag = 1)*diffinv(RATE,lag = 1)
# INTEGRATE <- 0.5*diffinv(RATE,lag = 1)
#
#
# SUMOUT <- cbind(DF, SUM = c(lapply(split(DF, DF$ID), function(x) cumtrapz(x$Test)), recursive = T))
#
# # --------------------------------------------
# PLY.Test <- ddply(DF,"ID", function(a) cumsum(a$Test))
#
# # --------------------------------------------
# out <-ifelse(cbind(Test>0, Test>0, Test>0),
# {C1 <- Test +1
# C2 <- Test +2
# C3 <- Test +3
# cbind(C1, C2, C3)},
# {C1 <- Test +1
# C2 <- Test +2
# C3 <- Test +3
# cbind(C1, C2, C3)})
#
library(data.table)
DAT <- data.table(DATA.NZ)
setkey(DAT, ITEST) # SETS DATA TABLE KEY = ITEST
DAT.TEST <- DAT[,cumtrapz(TIME, EAR ), by = ITEST]
DAT.TRAP <- data.frame(DAT.TEST, DAT[,EAC])
# # ============= end test script ================
# KAP0.INP <- 2.09011272735447
# KAP1.INP <- 1.32951908075078
# DDT.INP <- 0.900012234998625
# NK.INP <- 2.75382483799085
# KAP2.INP <- 1 # CONSTANT
KAP0.INP <- 10.119
KAP1.INP <- 1.005
DDT.INP <- 0.8963
NK.INP <- 1.3312
KAP2.INP <- 1
FLOW.INP<- cbind(KAP0.INP, KAP1.INP, KAP2.INP,
DDT.INP, NK.INP) # values for fiting parameters
# ==== INPUT INITIAL VALUES FOR CREEP PARAMETERS ====
# ---- Creep parameters, results from Callahan fits - shear tests only ----
ETA0 <- 0.102854 # -
ETA1 <- 3.9387 # -
ETA2 <- 1 # constant -
NF <- 3.5122 # -
AA1 <- 0.3147 # -
PP <- 1.6332 # -
NSP <- 0.557621 # -
# R1 <- 8.69760
R1 <- 1.041 * 10 ^ -6 # [K/(MPa-sec)]
# R1 <- 0.0194 # [K/(MPa-sec)]
R3 <- 15.1281 # -
R4 <- 0.1677765 # -
QSR <- 1077.46 # [K]
#QSR <- 2897.09
CREEP.INP <- cbind(ETA0, ETA1, ETA2, NF, AA1, PP,
NSP, R1, R3, R4, QSR) # INITIAL CREEP PARAMETER VALUES
CPar <- CREEP.INP
FPar <- FLOW.INP
# ---- use subset of full data set for debugging ----
TestData <- DATA.NZ[which(DATA.NZ$ITEST == "SC1B"),] # SUBSET OF DATA FOR ANALYSIS
# colnames(TestData) <- c("ICASE", "ITEST", "TIME", "DT", "TF", "TEMP", "AS",
# "LS", "EVT", "EVC", "EAT", "EAC", "RHO", "D", "RHO0",
# "RHOI", "DD", "W", "EVR", "EAR", "ELR", "RAT", "ELC")
# ---- Flow Potential Parameters (5) *KAP2 HELD CONST. ----
KAP0 <- as.numeric(FPar[1])
KAP1 <- as.numeric(FPar[2])
KAP2 <- as.numeric(FPar[3]) # Constant = 1
DDT <- as.numeric(FPar[4])
NK <- as.numeric(FPar[5])
# ---- Creep Consolidation Parameters (11) *ETA2 HELD CONST
ETA0 <- as.numeric(CPar[1])
ETA1 <- as.numeric(CPar[2])
ETA2 <- as.numeric(CPar[3]) # Constant = 1
NF <- as.numeric(CPar[4]) # callahn used NA as variable name
AA1 <- as.numeric(CPar[5])
PP <- as.numeric(CPar[6])
NSP <- as.numeric(CPar[7])
R1 <- as.numeric(CPar[8])
R3 <- as.numeric(CPar[9])
R4 <- as.numeric(CPar[10])
QSR <- as.numeric(CPar[11])
# ---- Munson-Dawson Creep Parameters (17) ----
A1 <- 8.386e22
A2 <- 9.672e12
Q1R <- 12581
Q2R <- 5033
N1 <- 5.5
N2 <- 5.0
B1 <- 6.0856e6
B2 <- 3.034e-2
Q <- 5335
S0 <- 20.57
M <- 3
K0 <- 6.275e5
C <- 9.198e-3
ALPHA <- -17.37
BETA <- -7.738
DELTA <- 0.58
MU <- 12400
# ---- fitting assumptions ----
RHOIS <- 2160.0 # ASSUMED IN SITU SALT DENSITY
NTIME <- 10^6 # NORMALIZING TIME
DSP <- 0.64 # FRACTIONAL DENSITY OF RANDOM DENSE SPHERICAL PARTICLES
# ---- Values input into function (18)----
#ICASE <- as.numeric(TestData[,1]) # TEST TYPE (1:Hyd Cons, 2:Shear Cons, 3:compaction)
ITEST <- as.character(TestData[,2]) # TEST ID
TIME <- as.numeric(TestData[,3]) # TIME [SEC]
DT <- as.numeric(TestData[,4]) # DELTA TIME [SEC]
#TF <- as.numeric(TestData[,5]) # TOTAL TEST TIME [SEC]
TEMP <- as.numeric(TestData[,6]) # TEMP [K]
AS <- as.numeric(TestData[,7]) # AXIAL STRESS [MPA]
LS <- as.numeric(TestData[,8]) # LATERAL STRESS [MPA]
#EVT <- as.numeric(TestData[,9]) # TOTAL TRUE VOLUMETRIC STRAIN
EVC <- as.numeric(TestData[,10]) # CREEP TRUE VOLUMETRIC STRAIN
#EAT <- as.numeric(TestData[,11]) # TOTAL TRUE AXIAL STRAIN
EAC <- as.numeric(TestData[,12]) # CREEP TRUE AXIAL STRAIN
RHO <- as.numeric(TestData[,13]) # CURRENT DENSITY [KG/M3]
D <- as.numeric(TestData[,14]) # FRACTIONAL DENSITY
RHO0 <- as.numeric(TestData[,15]) # DENSITY AT THE START OF CONSOLIDATION (<RHOI)
RHOI <- as.numeric(TestData[,16]) # DENSITY AT THE START OF CREEP
DD <- as.numeric(TestData[,17]) # AVERAGE GRAIN SIZE [MM]
W <- as.numeric(TestData[,18]) # WATER CONENT BY PERCENT WEIGHT
# ---- calculate variables ----
MS <- (2.0 * LS + AS) / 3 # MEAN STRESS
DS <- LS - AS # STRESS DIFFERENCE
ELC <- (EVC - EAC) / 2 # CREEP TRUE LATERAL STRAIN
D0 <- 1382.4 / RHOIS # EMPLACED FRACTIONAL DENSITY ( NOT SURE WHERE 1382.4 CAME FROM?)
DI <- RHOI / RHOIS # INITIAL FRACTIONAL DENSITY
WT1 <- DT / NTIME # WEIGHTING FUNCTION FOR CREEP CONSOLIDATION PARAMETERS
WT <- 1 # WEIGHTING FUNCTION FOR FLOW PARAMETERS
#DC <- DD # SET GRAIN SIZE FOR DCCS TESTS
Z1 <- EAC # Predicted axial strain (initial values)
Z2 <- ELC # Predicted lateral strain (initial values)
Z3 <- 0
# ==== define the differential equation ====
# ---- only calculate strain rates at TIME > 0 ----
# browser()
ERATE.OUT <- data.frame(ifelse(cbind(TIME > 0, TIME > 0, TIME > 0),
{
VOL <- Z1 + 2*Z2 # VOLUMETRIC STRAIN
VOLT <- VOL + log(DSP/DI) # USED FOR INITIAL ESTIMATE OF VOLUMETRIC STRAIN
DEN <- DI/exp(VOL) # CURRENT FRACTIONAL DENSITY
# DEN <- D # CURRENT FRACTIONAL DENSITY
ifelse(D >= 1,
{
MD <- 0 # if fractional density is 1, disclocation creep = 0
SP <- 0},# if fractional density is 1, pressure solutioning = 0
{
VAR <- ifelse(DEN <= DDT, DDT, DEN) # DEFINE DENSITY CEILING ISH
# ---- Equivalent Stress ----
OMEGAA <- ((1 - DEN) * NF / (1 - (1 - DEN)^(1 / NF)) ^ NF) ^ (2 / (NF + 1))
OMEGAK <- ((1 - VAR) * NK / (1 - (1 - VAR)^(1 / NK)) ^ NK) ^ (2 / (NK + 1))
ETA <- ETA0 * OMEGAA ^ ETA1
KAP <- KAP0 * OMEGAK ^ KAP1
TERMA <- ((2 - DEN) / DEN) ^ ((2 * NF) / (NF + 1))
TERMK <- ((2 - DEN) / DEN) ^ ((2 * NK) / (NK + 1))
# ---- Eqn. 2-3 (SAND97-2601) ----
SEQF <- sqrt(ETA * MS ^ 2 + ETA2 * TERMA * DS ^ 2) # Equivalent stress measure for Disc. Creep and Press Sol'ing
SEQ <- sqrt(KAP * MS ^ 2 + KAP2 * TERMK * DS ^ 2) # Equivalent stress measure for Flow Potential
# ---- Eqn. 2-17 (SAND97-2601) ----
ALPHA2 <- KAP * MS / 3
BETA2 <- KAP2 * TERMK * DS
# ---- Eqn. 2-20 divided by equivalent stress (for later calculation) ----
F2A <- (ALPHA2 - BETA2) / SEQ
F2L <- (ALPHA2 + 0.5 * BETA2) / SEQ
# ==== START: equivalent inelastic strain rate form for dislocation creep ====
# ---- Steady State Strain Rate Calc ----
ES1 <- A1 * (SEQF / MU) ^ N1 * exp(-Q1R / TEMP) # Dislocation climb - Eqn. 2-30
ES2 <- A2 * (SEQF / MU) ^ N2 * exp(-Q2R / TEMP) # Undefined Mechanism - Eqn. 2-31
# Slip - Eqn. 2-32 (SAND98-2601)
H <- SEQF - S0 # HEAVISIDE FUNCTION
ARG <- Q * (SEQF - S0) / MU
ES3 <- ifelse(H > 0, 0.5 * (B1 * exp(-Q1R / TEMP) +
(B2 * exp(-Q2R / TEMP)) *
(exp(ARG) - exp(-ARG))),0)
ESS = ES1 + ES2 + ES3 # Steady-state strain rate, Eqn. 2-29 (SAND97-2601)
# ---- EVALUATE TRANSIENT FUNCTION, 3 branches: work hardening, equilibrium, recovery
EFT <- K0 * exp(C * TEMP) * (SEQF / MU) ^ M # Transient Strain Limit, Eqn. 2-28
BIGD <- ALPHA + BETA * log10(SEQF / MU) # Work-Hardening parameter, Eqn 2-28
FU <- ifelse(Z3 == EFT, 1, ifelse(Z3 < EFT, exp(BIGD * (1 - Z3 / EFT) ^ 2),
exp(-DELTA * (1 - Z3 / EFT) ^ 2)))
MD <- FU * ESS # equivalent inelastic strain rate form for dislocation creep, Eqn 2-23
# ==== START: Equivalent Inelastic Strain Rate Form for Pressure Solutioning ====
# ---- Calculate initial volumetric strain - Based on spherical packing ----
CR <- abs(exp(VOLT) - 1)
# ---- Determine functional form - either large or small strains, Eqn 2-34 ----
GAMMA <- ifelse(CR <= 0.15, 1, abs((D0 - exp(VOLT)) / ((1 - D0) * exp(VOLT))) ^ NSP)
# Small Strains (Vol Strain > - 15%)
# Large Strains (Vol Strain < - 15%)
# ---- component of eqn 2-35 ---
X3 <- exp((R3 - 1) * VOLT) / (abs(1 - exp(VOLT))) ^ R4
# ---- determine value of moisture function (w) ----
M2 <- ifelse (W == 0, 0, W ^ AA1) # moisture content = 0
# moisture content > 0
G2 <- 1 / DD ^ PP # calculate grain size function
T2 <- exp(-QSR / TEMP) / TEMP
# ---- Equivalent Inelastic Strain Rate Form for Pressure Solutioning, Eqn 2-35
SP <- R1 * M2 * G2 * T2 * X3 * GAMMA * SEQF}) # end check for D < 1
DZ1 <- (MD + SP) * F2A # Predicted axial strain rate / derivative of strain
DZ2 <- (MD + SP) * F2L # Predicted lateral strain rate / derivative of strain
DZ3 <- (FU - 1) * ESS # Predicted Steady-State Creep Rate
c(DZ1, DZ2, DZ3)},{c(0,0,0)}))
colnames(ERATE.OUT) <- c("FEAR", "FELR", "FEVR") # column names
DATA.FIT <- cbind(TestData, ERATE.OUT)
# # ---- plot fit comparison (axial strain rate)----
library(ggplot2)
# ggSUB.EAR <- ggplot(data = DATA.FIT, aes(x=TIME, y=EAR))
# ggSUB.EAR <- ggSUB.EAR + geom_line()
# ggSUB.EAR <- ggSUB.EAR + geom_point(aes(y=FEAR))
# # ggSUB.EAR <- ggSUB.EAR + facet_wrap(~ITEST, ncol=3, scales = "free")
# ggSUB.EAR <- ggSUB.EAR + xlim(0,6e6) + ylim(-7.5e-6,0)
# ggSUB.EAR <- ggSUB.EAR + ylab("Axial Strain Rate: Calculated (dot) Vs. Measured (line)") + xlab("Time [sec]")
# ggSUB.EAR
# ---- integrate strain rate ----
FEA <- cumtrapz(DATA.FIT$TIME, DATA.FIT$FEAR)
FEL <- cumtrapz(DATA.FIT$TIME, DATA.FIT$FELR)
DATA.FIT <- cbind(DATA.FIT, FEA, FEL)
DT.DATA.FIT <- data.table(DATA.FIT)
setkey(DT.DATA.FIT, ITEST)
DT.FE <- DT.DATA.FIT[, c("IFEAR", "IFELR"):=list(as.vector(cumtrapz(TIME, FEAR)), as.vector(cumtrapz(TIME, FELR))), by = ITEST][]
# # ---- plot fit comparison (axial strain )----
# ggSUB.EA <- ggplot(data = DATA.FIT, aes(x=TIME, y=EAC))
# ggSUB.EA <- ggSUB.EA + geom_line()
# ggSUB.EA <- ggSUB.EA + geom_point(aes(y=FEA))
# # ggSUB.EA <- ggSUB.EA + facet_wrap(~ITEST, ncol=3, scales = "free")
# ggSUB.EA <- ggSUB.EA + xlim(0,6e6) + ylim(-0.25,0)
# ggSUB.EA <- ggSUB.EA + ylab("Axial Strain: Calculated (dot) Vs. Measured (line)") + xlab("Time [sec]")
# ggSUB.EA
ggSUB.E <- ggplot(data = DT.FE, aes(x=TIME, y=EAC, color = "SC1B"))
ggSUB.E <- ggSUB.E + geom_line(aes(color = "Axial Strain"))
ggSUB.E <- ggSUB.E + geom_point(aes(y=IFEAR, color = "Axial Strain - Fit"))
ggSUB.E <- ggSUB.E + geom_line(aes(y=ELC, color = "Lateral Strain"))
ggSUB.E <- ggSUB.E + geom_point(aes(y=IFELR, color = "Lateral Strain - Fit"))
# ggSUB.E <- ggSUB.E + facet_wrap(~ITEST, ncol=3, scales = "free")
ggSUB.E <- ggSUB.E + xlim(0,6e6) + ylim(-0.25,0)
ggSUB.E <- ggSUB.E + ylab("Axial Strain: Calculated (dot) Vs. Measured (line)") + xlab("Time [sec]")
ggSUB.E
ggDT.E <- ggplot(data = DATA.FIT, aes(x=TIME, y=EAC, color = SC1B))
ggDT.E <- ggDT.E + geom_line(aes(color = "Axial Strain"))
ggDT.E <- ggDT.E + geom_point(aes(y=FEA, color = "Axial Strain - Fit"))
ggDT.E <- ggDT.E + geom_line(aes(y=ELC, color = "Lateral Strain"))
ggDT.E <- ggDT.E + geom_point(aes(y=FEL, color = "Lateral Strain - Fit"))
# ggDT.E <- ggDT.E + facet_wrap(~ITEST, ncol=3, scales = "free")
ggDT.E <- ggDT.E + xlim(0,6e6) + ylim(-0.25,0)
ggDT.E <- ggDT.E + ylab("True Strain") + xlab("Time [sec]")
ggDT.E
|
#Problem 3====
df.p3 <- NULL
theta <- seq(from = 0.025, to = 0.975, length.out = 20)
priorDist <- array(1/20, 20)
numTook <- 15
numConcern <- 12
postDist <- priorDist * theta^numConcern*(1-theta)^(numTook-numConcern)
postDist <- postDist/sum(postDist)
df.p3$theta <- theta
df.p3$priorDist <- priorDist
df.p3$postDist <- postDist
df.p3 <- as.data.frame(df.p3)
p3.plot <-
df.p3 %>%
ggplot(aes(x=theta, y=postDist)) +
geom_bar(stat = 'identity', fill='lightblue') +
theme_bw() +
labs(x='Q',
y='Posterior Probability')
| /01/problem_3.R | no_license | tjwhalenUVA/664-Homework | R | false | false | 552 | r | #Problem 3====
df.p3 <- NULL
theta <- seq(from = 0.025, to = 0.975, length.out = 20)
priorDist <- array(1/20, 20)
numTook <- 15
numConcern <- 12
postDist <- priorDist * theta^numConcern*(1-theta)^(numTook-numConcern)
postDist <- postDist/sum(postDist)
df.p3$theta <- theta
df.p3$priorDist <- priorDist
df.p3$postDist <- postDist
df.p3 <- as.data.frame(df.p3)
p3.plot <-
df.p3 %>%
ggplot(aes(x=theta, y=postDist)) +
geom_bar(stat = 'identity', fill='lightblue') +
theme_bw() +
labs(x='Q',
y='Posterior Probability')
|
#' Filled 2d contours of a 3d surface
#'
#' While ggplot2's \code{\link[ggplot2]{geom_contour}} can plot nice contours, it
#' doesn't work with the polygon geom. This stat makes some small manipulation
#' of the data to ensure that all contours are closed and also computes a new
#' aesthetic \code{int.level}, which differs from \code{level} (computed by
#' [ggplot2::geom_contour]) in that represents
#' the value of the \code{z} aesthetic *inside* the contour instead of at the edge.
#'
#' @inheritParams ggplot2::geom_contour
#' @param breaks numeric vector of breaks
#' @param bins Number of evenly spaced breaks.
#' @param binwidth Distance between breaks.
#' @param circular either NULL, "x" or "y" indicating which dimension is circular,
#' if any.
#'
#' @section Aesthetics:
#' \code{geom_contour_fill} understands the following aesthetics (required aesthetics are in bold):
#'
#' \itemize{
#' \item \strong{x}
#' \item \strong{y}
#' \item \code{alpha}
#' \item \code{colour}
#' \item \code{group}
#' \item \code{linetype}
#' \item \code{size}
#' \item \code{weight}
#'}
#'
#'
#' @section Computed variables:
#' \describe{
#' \item{int.level}{value of the interior contour}
#' }
#'
#' @examples
#' library(ggplot2)
#' surface <- reshape2::melt(volcano)
#' ggplot(surface, aes(Var1, Var2, z = value)) +
#' geom_contour_fill() +
#' geom_contour(color = "black", size = 0.1)
#'
#' # Plots only deviations from the mean.
#' ggplot(surface, aes(Var1, Var2, z = as.numeric(scale(value)))) +
#' geom_contour_fill(complete = FALSE, exclude = 0)
#'
#' # If one uses level instead of int.level, one of the small
#' # contours near the crater disapears
#' ggplot(surface, aes(Var1, Var2, z = value)) +
#' geom_contour_fill(aes(fill = ..level..))
#'
#'
#'
#' @family ggplot2 helpers
#' @export
#' @import sp
#' @import ggplot2
geom_contour_fill <- function(mapping = NULL, data = NULL,
stat = "ContourFill", position = "identity",
...,
breaks = NULL,
bins = NULL,
binwidth = NULL,
na.rm = FALSE,
circular = NULL,
show.legend = NA,
inherit.aes = TRUE) {
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomPolygon,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
breaks = breaks,
bins = bins,
binwidth = binwidth,
na.rm = na.rm,
circular = circular,
...
)
)
}
| /R/geom_contour_fill.R | no_license | brodieG/metR | R | false | false | 2,711 | r | #' Filled 2d contours of a 3d surface
#'
#' While ggplot2's \code{\link[ggplot2]{geom_contour}} can plot nice contours, it
#' doesn't work with the polygon geom. This stat makes some small manipulation
#' of the data to ensure that all contours are closed and also computes a new
#' aesthetic \code{int.level}, which differs from \code{level} (computed by
#' [ggplot2::geom_contour]) in that represents
#' the value of the \code{z} aesthetic *inside* the contour instead of at the edge.
#'
#' @inheritParams ggplot2::geom_contour
#' @param breaks numeric vector of breaks
#' @param bins Number of evenly spaced breaks.
#' @param binwidth Distance between breaks.
#' @param circular either NULL, "x" or "y" indicating which dimension is circular,
#' if any.
#'
#' @section Aesthetics:
#' \code{geom_contour_fill} understands the following aesthetics (required aesthetics are in bold):
#'
#' \itemize{
#' \item \strong{x}
#' \item \strong{y}
#' \item \code{alpha}
#' \item \code{colour}
#' \item \code{group}
#' \item \code{linetype}
#' \item \code{size}
#' \item \code{weight}
#'}
#'
#'
#' @section Computed variables:
#' \describe{
#' \item{int.level}{value of the interior contour}
#' }
#'
#' @examples
#' library(ggplot2)
#' surface <- reshape2::melt(volcano)
#' ggplot(surface, aes(Var1, Var2, z = value)) +
#' geom_contour_fill() +
#' geom_contour(color = "black", size = 0.1)
#'
#' # Plots only deviations from the mean.
#' ggplot(surface, aes(Var1, Var2, z = as.numeric(scale(value)))) +
#' geom_contour_fill(complete = FALSE, exclude = 0)
#'
#' # If one uses level instead of int.level, one of the small
#' # contours near the crater disapears
#' ggplot(surface, aes(Var1, Var2, z = value)) +
#' geom_contour_fill(aes(fill = ..level..))
#'
#'
#'
#' @family ggplot2 helpers
#' @export
#' @import sp
#' @import ggplot2
geom_contour_fill <- function(mapping = NULL, data = NULL,
stat = "ContourFill", position = "identity",
...,
breaks = NULL,
bins = NULL,
binwidth = NULL,
na.rm = FALSE,
circular = NULL,
show.legend = NA,
inherit.aes = TRUE) {
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomPolygon,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
breaks = breaks,
bins = bins,
binwidth = binwidth,
na.rm = na.rm,
circular = circular,
...
)
)
}
|
server <- function(input, output, session) {
observeEvent(input$agency, {
new_complaint_types = forecasts_daily %>%
filter(agency == input$agency) %>%
pull(complaint_type) %>%
unique() %>%
sort()
disabled_choices <- !complaint_types %in% new_complaint_types
#
updatePickerInput(session = session,
inputId = "complaint_type",
choices = complaint_types,
choicesOpt = list(
disabled = disabled_choices,
style = ifelse(disabled_choices,
yes = "color: rgba(119, 119, 119, 0.5);",
no = "")))
})
output$dailymap <- renderLeaflet(base_map)
observe({
yest_data <- yest_data %>%
filter(agency == input$agency,
complaint_type == input$complaint_type)
leafletProxy("dailymap", session) %>%
clearMarkerClusters() %>%
addCircleMarkers(
clusterOptions = markerClusterOptions(),
lng = jitter(yest_data$longitude, factor = 2),
lat = jitter(yest_data$latitude, factor = 2),
radius = 3,
color = ifelse(is.na(yest_data$closed_date), 'blue', 'red'),
stroke = TRUE,
fillOpacity = 1,
popup = paste0(
"<b> Incident Description: </b> <br>", yest_data$descriptor, "<br>",
"<b> Community Board: </b>", as.character(yest_data$community_board), "<br>",
"<b> Date: </b>", as.character(yest_data$created_date), "<br>",
"<b> Incident Address: </b>", as.character(yest_data$incident_address)))
input$reset_button
leafletProxy("dailymap") %>%
setView(lng = -73.98928, lat = 40.75042, zoom = 10)
})
output$tsplot <- plotly::renderPlotly({
plot_ts(forecasts_daily, input$agency, input$complaint_type, best_models)
})
output$table <- DT::renderDataTable(yest_data,
rownames = FALSE,
options = list(
pageLength = 5, # sets n observations shown
lengthChange = FALSE, # removes option to change n observations shown
sDom = '<"top">lrt<"bottom">ip', # removes the search bar
scrollX = TRUE # enable side scroll so table doesn't overflow
))
output$summary <- renderUI({
.yest_date <- forecasts_daily %>%
filter(complaint_type == input$complaint_type, agency == input$agency) %>%
na.omit() %>%
pull(date) %>%
min()-1
.yest_total_calls <- forecasts_daily %>%
filter(date == .yest_date) %>%
summarize(n = round(sum(.mean, na.rm = T),0)) %>%
pull(n)
.yest_agency_total_calls <- forecasts_daily %>%
filter(agency == input$agency, date == .yest_date) %>%
summarize(n = round(sum(.mean),0)) %>%
pull(n)
forecasts_daily <- forecasts_daily %>%
filter(agency == input$agency, complaint_type == input$complaint_type)
one_step_fcst <- forecasts_daily %>%
na.omit() %>%
filter(date == min(date)) %>%
pull(.mean)
weekly_avg <- forecasts_daily %>%
na.omit() %>%
summarise(mean = mean(.mean)) %>%
pull(mean)
text_string <- HTML(paste0("<br> <ul> <li>Yesterday, the <b> City received a total of ",
scales::comma_format()(.yest_total_calls)," service calls </b> and <b>",
input$agency, " received ", scales::comma_format()(.yest_agency_total_calls),
" service calls. </b> </li> <li> Today, <b>",input$agency," can expect ",
scales::comma_format()(one_step_fcst)," service calls related to '",
input$complaint_type, "'. </b> </li>",
"<li> On average, there will be ", scales::comma_format()(weekly_avg),
" service calls daily for ", "'", input$complaint_type,"'",
" over the next week.</li></ul> <br> <br>"))
return(text_string)
})
# shut down R after closing browser
session$onSessionEnded(function() {
stopApp()
})
} | /311_calls/server.R | no_license | bwaheed22/311-analysis | R | false | false | 4,455 | r | server <- function(input, output, session) {
observeEvent(input$agency, {
new_complaint_types = forecasts_daily %>%
filter(agency == input$agency) %>%
pull(complaint_type) %>%
unique() %>%
sort()
disabled_choices <- !complaint_types %in% new_complaint_types
#
updatePickerInput(session = session,
inputId = "complaint_type",
choices = complaint_types,
choicesOpt = list(
disabled = disabled_choices,
style = ifelse(disabled_choices,
yes = "color: rgba(119, 119, 119, 0.5);",
no = "")))
})
output$dailymap <- renderLeaflet(base_map)
observe({
yest_data <- yest_data %>%
filter(agency == input$agency,
complaint_type == input$complaint_type)
leafletProxy("dailymap", session) %>%
clearMarkerClusters() %>%
addCircleMarkers(
clusterOptions = markerClusterOptions(),
lng = jitter(yest_data$longitude, factor = 2),
lat = jitter(yest_data$latitude, factor = 2),
radius = 3,
color = ifelse(is.na(yest_data$closed_date), 'blue', 'red'),
stroke = TRUE,
fillOpacity = 1,
popup = paste0(
"<b> Incident Description: </b> <br>", yest_data$descriptor, "<br>",
"<b> Community Board: </b>", as.character(yest_data$community_board), "<br>",
"<b> Date: </b>", as.character(yest_data$created_date), "<br>",
"<b> Incident Address: </b>", as.character(yest_data$incident_address)))
input$reset_button
leafletProxy("dailymap") %>%
setView(lng = -73.98928, lat = 40.75042, zoom = 10)
})
output$tsplot <- plotly::renderPlotly({
plot_ts(forecasts_daily, input$agency, input$complaint_type, best_models)
})
output$table <- DT::renderDataTable(yest_data,
rownames = FALSE,
options = list(
pageLength = 5, # sets n observations shown
lengthChange = FALSE, # removes option to change n observations shown
sDom = '<"top">lrt<"bottom">ip', # removes the search bar
scrollX = TRUE # enable side scroll so table doesn't overflow
))
output$summary <- renderUI({
.yest_date <- forecasts_daily %>%
filter(complaint_type == input$complaint_type, agency == input$agency) %>%
na.omit() %>%
pull(date) %>%
min()-1
.yest_total_calls <- forecasts_daily %>%
filter(date == .yest_date) %>%
summarize(n = round(sum(.mean, na.rm = T),0)) %>%
pull(n)
.yest_agency_total_calls <- forecasts_daily %>%
filter(agency == input$agency, date == .yest_date) %>%
summarize(n = round(sum(.mean),0)) %>%
pull(n)
forecasts_daily <- forecasts_daily %>%
filter(agency == input$agency, complaint_type == input$complaint_type)
one_step_fcst <- forecasts_daily %>%
na.omit() %>%
filter(date == min(date)) %>%
pull(.mean)
weekly_avg <- forecasts_daily %>%
na.omit() %>%
summarise(mean = mean(.mean)) %>%
pull(mean)
text_string <- HTML(paste0("<br> <ul> <li>Yesterday, the <b> City received a total of ",
scales::comma_format()(.yest_total_calls)," service calls </b> and <b>",
input$agency, " received ", scales::comma_format()(.yest_agency_total_calls),
" service calls. </b> </li> <li> Today, <b>",input$agency," can expect ",
scales::comma_format()(one_step_fcst)," service calls related to '",
input$complaint_type, "'. </b> </li>",
"<li> On average, there will be ", scales::comma_format()(weekly_avg),
" service calls daily for ", "'", input$complaint_type,"'",
" over the next week.</li></ul> <br> <br>"))
return(text_string)
})
# shut down R after closing browser
session$onSessionEnded(function() {
stopApp()
})
} |
getPlots <- function(chosen, df_prop, df_LYL){
# Plot settings
panelCol <- "white"
backgroundCol <- "white"
foregroundCol <- "black"
#
ALL_SCENARIOS <- c("SQ", "MLA21", "MLA25", "SFG", "TAX5", "TAX2", "ELF", "E25",
"EP", "MLATax5", "MLAEP", "Tax5EP", "MLATax5EP", "SFGELF")
selected_idx <- match(chosen, ALL_SCENARIOS)
SCENARIOS <- ALL_SCENARIOS[selected_idx]
COLZ <- c(SQ=foregroundCol, MLA21='brown', MLA25='brown', SFG='violetred', TAX5='red', TAX2='red',
ELF='darkblue', E25='seagreen1', EP='deepskyblue',
MLATax5='darkviolet', MLAEP='orchid3', Tax5EP='deepskyblue', MLATax5EP='darkorange',
SFGELF='darkblue')[selected_idx]
FILZ <- c(SQ=foregroundCol, MLA21='white', MLA25='brown', SFG='violetred', TAX5='white', TAX2='red',
ELF='darkblue', E25='seagreen1', EP='white',
MLATax5='darkviolet', MLAEP='white', Tax5EP='white', MLATax5EP='darkorange',
SFGELF='white')[selected_idx]
# Linetypes: 1:solid, 2:dashed
LINETYPES <- c(SQ=1, MLA21=1, MLA25=1, SFG=1, TAX5=1, TAX2=1, ELF=1, E25=2, EP=1,
MLATax5=2, MLAEP=2, Tax5EP=2, MLATax5EP=2, SFGELF=2)[selected_idx]
# Shapes: 4:cross, 21:circle, 22:square, 23:diamond, 24:triangle
SHAPES <- c(SQ=4, MLA21=21, MLA25=21, SFG=21, TAX5=22, TAX2=22, ELF=24, E25=24, EP=24,
MLATax5=21, MLAEP=24, Tax5EP=22, MLATax5EP=22, SFGELF=23)[selected_idx]
LABELS <- c("Status Quo (SQ)", "Minimum Legal Age (MLA)", "Minimum Legal Age 25 (MLA25)",
"Smoke Free Generation (SFG)", "TAX5", "TAX2",
"E-cigarette Laissez-Faire (ELF)", "E-cigarette 25 (E25)",
"E-cigarette Prescription (EP)",
"MLA + TAX5", "MLA + EP", "TAX5 + EP", "MLA + TAX5 + EP", "SFG + ELF")[selected_idx]
CHOSEN_TIMES <- seq(2027, 2067, by=10)
#
axisTextSize <- 8
legendTextSize <- 8
annoteTextSize <- 8
source(file.path(codeDir, "plotLYL.R"), local=TRUE)
source(file.path(codeDir, "plotPrevalence.R"), local=TRUE)
# Prevalence
ylab="Prevalence (%)"
pN <- plotPrev(chosen, df_prop, "N", ylab, ylim = c(0, 100), text = "NEVER SMOKERS")
pC <- plotPrev(chosen, df_prop, "C", ylab, ylim = c(0, 30), text = "CIGARETTE ONLY USERS")
pQ <- plotPrev(chosen, df_prop, "Q", ylab, ylim = c(0, 30), text = "EX-SMOKERS")
pD <- plotPrev(chosen, df_prop, "D", ylab, ylim = c(0, 5), text = "DUAL USERS")
pE <- plotPrev(chosen, df_prop, "E", ylab, ylim = c(0, 5), text = "E-CIGARETTE ONLY USERS")
pNandQ <- plotPrev(chosen, df_prop, "NandQ", ylab="Prevalence of Never Smokers and Ex-Smokers (%)", ylim = c(0, 100), text = " ")
pCandD <- plotPrev(chosen, df_prop, "CandD", ylab="Prevalence of Cigarette and Dual Users (%)", ylim = c(0, 15.5), text = " ")
pCDE <- plotPrev(chosen, df_prop, "CDE", ylab="Prevalence of e/Cigarette Users (%)", ylim = c(0, 16.5), text = " ")
pCandDleg <- plotPrev(chosen, df_prop, "CandD", ylab="Prevalence of Cigarette and Dual Users (%)", ylim = c(0, 15.5), text = " ", leg=TRUE)
pCDEleg <- plotPrev(chosen, df_prop, "CDE", ylab="Prevalence of e/Cigarette Users (%)", ylim = c(0, 16.5), text = " ", leg=TRUE)
####
# Life Years Lost
pAnnualLYL <- plotLYL(chosen, df_LYL, 'QALY', 1/1000,
ylab = "Annual QALYs Gained (000s)", text = " ",
leg=FALSE, ylim = c(-10, 52), y_interval = 0.1e2)
pAnnualLYLdis <- plotLYL(chosen, df_LYL, 'QALY', 1/1000,
ylab = "Annual QALYs Gained (000s)", text = " ",
leg=FALSE, ylim = c(-5, 15), y_interval = 0.1e2)
####
plotsSet <- list(pN=pN, pC=pC, pQ=pQ, pD=pD, pE=pE,
pNandQ=pNandQ, pCandD=pCandD, pCDE=pCDE,
pCandDleg=pCandDleg, pCDEleg=pCDEleg,
pAnnualLYL=pAnnualLYL,
pAnnualLYLdis=pAnnualLYLdis)
plotsSet
} | /code/figures/microscenario_plots/plots/sens_plots/getPlotSet.R | no_license | KateDoan/gice | R | false | false | 3,902 | r | getPlots <- function(chosen, df_prop, df_LYL){
# Plot settings
panelCol <- "white"
backgroundCol <- "white"
foregroundCol <- "black"
#
ALL_SCENARIOS <- c("SQ", "MLA21", "MLA25", "SFG", "TAX5", "TAX2", "ELF", "E25",
"EP", "MLATax5", "MLAEP", "Tax5EP", "MLATax5EP", "SFGELF")
selected_idx <- match(chosen, ALL_SCENARIOS)
SCENARIOS <- ALL_SCENARIOS[selected_idx]
COLZ <- c(SQ=foregroundCol, MLA21='brown', MLA25='brown', SFG='violetred', TAX5='red', TAX2='red',
ELF='darkblue', E25='seagreen1', EP='deepskyblue',
MLATax5='darkviolet', MLAEP='orchid3', Tax5EP='deepskyblue', MLATax5EP='darkorange',
SFGELF='darkblue')[selected_idx]
FILZ <- c(SQ=foregroundCol, MLA21='white', MLA25='brown', SFG='violetred', TAX5='white', TAX2='red',
ELF='darkblue', E25='seagreen1', EP='white',
MLATax5='darkviolet', MLAEP='white', Tax5EP='white', MLATax5EP='darkorange',
SFGELF='white')[selected_idx]
# Linetypes: 1:solid, 2:dashed
LINETYPES <- c(SQ=1, MLA21=1, MLA25=1, SFG=1, TAX5=1, TAX2=1, ELF=1, E25=2, EP=1,
MLATax5=2, MLAEP=2, Tax5EP=2, MLATax5EP=2, SFGELF=2)[selected_idx]
# Shapes: 4:cross, 21:circle, 22:square, 23:diamond, 24:triangle
SHAPES <- c(SQ=4, MLA21=21, MLA25=21, SFG=21, TAX5=22, TAX2=22, ELF=24, E25=24, EP=24,
MLATax5=21, MLAEP=24, Tax5EP=22, MLATax5EP=22, SFGELF=23)[selected_idx]
LABELS <- c("Status Quo (SQ)", "Minimum Legal Age (MLA)", "Minimum Legal Age 25 (MLA25)",
"Smoke Free Generation (SFG)", "TAX5", "TAX2",
"E-cigarette Laissez-Faire (ELF)", "E-cigarette 25 (E25)",
"E-cigarette Prescription (EP)",
"MLA + TAX5", "MLA + EP", "TAX5 + EP", "MLA + TAX5 + EP", "SFG + ELF")[selected_idx]
CHOSEN_TIMES <- seq(2027, 2067, by=10)
#
axisTextSize <- 8
legendTextSize <- 8
annoteTextSize <- 8
source(file.path(codeDir, "plotLYL.R"), local=TRUE)
source(file.path(codeDir, "plotPrevalence.R"), local=TRUE)
# Prevalence
ylab="Prevalence (%)"
pN <- plotPrev(chosen, df_prop, "N", ylab, ylim = c(0, 100), text = "NEVER SMOKERS")
pC <- plotPrev(chosen, df_prop, "C", ylab, ylim = c(0, 30), text = "CIGARETTE ONLY USERS")
pQ <- plotPrev(chosen, df_prop, "Q", ylab, ylim = c(0, 30), text = "EX-SMOKERS")
pD <- plotPrev(chosen, df_prop, "D", ylab, ylim = c(0, 5), text = "DUAL USERS")
pE <- plotPrev(chosen, df_prop, "E", ylab, ylim = c(0, 5), text = "E-CIGARETTE ONLY USERS")
pNandQ <- plotPrev(chosen, df_prop, "NandQ", ylab="Prevalence of Never Smokers and Ex-Smokers (%)", ylim = c(0, 100), text = " ")
pCandD <- plotPrev(chosen, df_prop, "CandD", ylab="Prevalence of Cigarette and Dual Users (%)", ylim = c(0, 15.5), text = " ")
pCDE <- plotPrev(chosen, df_prop, "CDE", ylab="Prevalence of e/Cigarette Users (%)", ylim = c(0, 16.5), text = " ")
pCandDleg <- plotPrev(chosen, df_prop, "CandD", ylab="Prevalence of Cigarette and Dual Users (%)", ylim = c(0, 15.5), text = " ", leg=TRUE)
pCDEleg <- plotPrev(chosen, df_prop, "CDE", ylab="Prevalence of e/Cigarette Users (%)", ylim = c(0, 16.5), text = " ", leg=TRUE)
####
# Life Years Lost
pAnnualLYL <- plotLYL(chosen, df_LYL, 'QALY', 1/1000,
ylab = "Annual QALYs Gained (000s)", text = " ",
leg=FALSE, ylim = c(-10, 52), y_interval = 0.1e2)
pAnnualLYLdis <- plotLYL(chosen, df_LYL, 'QALY', 1/1000,
ylab = "Annual QALYs Gained (000s)", text = " ",
leg=FALSE, ylim = c(-5, 15), y_interval = 0.1e2)
####
plotsSet <- list(pN=pN, pC=pC, pQ=pQ, pD=pD, pE=pE,
pNandQ=pNandQ, pCandD=pCandD, pCDE=pCDE,
pCandDleg=pCandDleg, pCDEleg=pCDEleg,
pAnnualLYL=pAnnualLYL,
pAnnualLYLdis=pAnnualLYLdis)
plotsSet
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.