content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## Check that required library 'reshape2' is installed and
## install if not
if (!("reshape2" %in% rownames(installed.packages())) ) {
install.packages("reshape2")
}
## Load required library 'reshape2'
library(reshape2)
## Download and unzip raw data
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dir.create("C:/Clean_Data_Assignment", showWarnings = FALSE)
download.file(fileurl, destfile = "C:/Clean_Data_Assignment/zipdata.zip")
setwd("C:/Clean_Data_Assignment")
unzip("C:/Clean_Data_Assignment/zipdata.zip")
setwd("C:/Clean_Data_Assignment/UCI HAR Dataset")
## Read in all text files:
## Obtain activity labels based on activity ids
activity_labels <- read.table("activity_labels.txt", col.names = c("activity_id", "activity_name"))
## Obtain data column names
features <- read.table("features.txt")
transposed <- t(features)
colNames <- transposed[2,]
## Obtain test data and label column names
x_test <- read.table("./test/X_test.txt", col.names = colNames)
y_test <- read.table("./test/y_test.txt", col.names = "activity_id")
subject_test <- read.table("./test/subject_test.txt", col.names = "subject_id")
testdat <- cbind(subject_test, y_test, x_test)
## Obtain training data and label column names
x_train <- read.table("./train/X_train.txt", col.names = colNames)
y_train <- read.table("./train/y_train.txt", col.names = "activity_id")
subject_train <- read.table("./train/subject_train.txt", col.names = "subject_id")
traindat <- cbind(subject_train, y_train, x_train)
## Combine all data (test and training) into a single data set
alldat <- rbind(testdat, traindat)
## Subset the data relating to means and standard deviations
## from the single data set
## Create index of columns relating to means and standard deviations
meanindex <- grep("mean",names(alldat),ignore.case=TRUE)
stdindex <- grep("std",names(alldat),ignore.case=TRUE)
index <- c(meanindex, stdindex)
index_names <- names(alldat)[index]
## Subset data from alldat using index
meanstddat <- alldat[, index_names]
ids <- alldat[, 1:2]
meanstddat <- cbind(ids, meanstddat)
## Apply activity labels to subset data
activitydat <- merge(activity_labels, meanstddat, by.x = "activity_id", by.y = "activity_id", all = TRUE)
## Final step is to create a new data frame with
## the average of each variable for each activity and each subject
## Reshape subset data based on activity_id, activity_name and subject_id
melted <- melt(activitydat,id=c("activity_id","activity_name","subject_id"))
## Produce data frame with averages of each variable
recast <- dcast(melted, activity_id + activity_name + subject_id ~ variable, mean)
## Output dataframe containing the variable averages
## as file "tidied_data.txt"
write.table(recast, file = "./tidied_data.txt", row.names = FALSE)
| /run_analysis.R | no_license | DaveyMBS/Getting-and-Cleaning-Data-Assignment | R | false | false | 2,824 | r | ## Check that required library 'reshape2' is installed and
## install if not
if (!("reshape2" %in% rownames(installed.packages())) ) {
install.packages("reshape2")
}
## Load required library 'reshape2'
library(reshape2)
## Download and unzip raw data
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dir.create("C:/Clean_Data_Assignment", showWarnings = FALSE)
download.file(fileurl, destfile = "C:/Clean_Data_Assignment/zipdata.zip")
setwd("C:/Clean_Data_Assignment")
unzip("C:/Clean_Data_Assignment/zipdata.zip")
setwd("C:/Clean_Data_Assignment/UCI HAR Dataset")
## Read in all text files:
## Obtain activity labels based on activity ids
activity_labels <- read.table("activity_labels.txt", col.names = c("activity_id", "activity_name"))
## Obtain data column names
features <- read.table("features.txt")
transposed <- t(features)
colNames <- transposed[2,]
## Obtain test data and label column names
x_test <- read.table("./test/X_test.txt", col.names = colNames)
y_test <- read.table("./test/y_test.txt", col.names = "activity_id")
subject_test <- read.table("./test/subject_test.txt", col.names = "subject_id")
testdat <- cbind(subject_test, y_test, x_test)
## Obtain training data and label column names
x_train <- read.table("./train/X_train.txt", col.names = colNames)
y_train <- read.table("./train/y_train.txt", col.names = "activity_id")
subject_train <- read.table("./train/subject_train.txt", col.names = "subject_id")
traindat <- cbind(subject_train, y_train, x_train)
## Combine all data (test and training) into a single data set
alldat <- rbind(testdat, traindat)
## Subset the data relating to means and standard deviations
## from the single data set
## Create index of columns relating to means and standard deviations
meanindex <- grep("mean",names(alldat),ignore.case=TRUE)
stdindex <- grep("std",names(alldat),ignore.case=TRUE)
index <- c(meanindex, stdindex)
index_names <- names(alldat)[index]
## Subset data from alldat using index
meanstddat <- alldat[, index_names]
ids <- alldat[, 1:2]
meanstddat <- cbind(ids, meanstddat)
## Apply activity labels to subset data
activitydat <- merge(activity_labels, meanstddat, by.x = "activity_id", by.y = "activity_id", all = TRUE)
## Final step is to create a new data frame with
## the average of each variable for each activity and each subject
## Reshape subset data based on activity_id, activity_name and subject_id
melted <- melt(activitydat,id=c("activity_id","activity_name","subject_id"))
## Produce data frame with averages of each variable
recast <- dcast(melted, activity_id + activity_name + subject_id ~ variable, mean)
## Output dataframe containing the variable averages
## as file "tidied_data.txt"
write.table(recast, file = "./tidied_data.txt", row.names = FALSE)
|
# Coded by Rasmus Bååth
# rasmus.baath@lucs.lu.se
# www.sumsar.net
# If you modify the code, please keep this header. Thanks!
plot_dist <- function(dist, labels=c(), scale = 1, color="skyblue", plot_dist_name=T) {
old_par <- par(mar = c(0.3, 0, 0, 0), xaxt='n', yaxt='n',ann=FALSE, bty="n", xpd=NA)
x <- dist$x
y <- do.call(dist$ddist, c(list(x=x), dist$ddist_params))
# To always anchor the plot at zero and give some extra top space if neccecary.
plot(c(x[1:2], x), c(0, max(y) / (1- dist$top_space), y), type="l", col="transparent")
# only draw where the distribution is not zero
points_to_NA <- filter(c(0, y, 0), filter=c(1,1, 1)) == 0
points_to_NA <- points_to_NA[-c(1, length(points_to_NA))]
y[points_to_NA] <- NA
if("bar" %in% dist$plot_type) {
lines(x, y, type="h", col=color, lwd=6, lend=1)
# Using legend to draw a white transparent box behind the text
if(plot_dist_name) {
legend(grconvertX(dist$name_pos[1], from="npc"), grconvertY(dist$name_pos[2], from="npc"),
dist$name, cex=1.5 * scale, xjust=0.5, yjust=0.5, bty="o", box.lwd = 0, box.col="transparent",
bg=rgb(1,1, 1,0.5),x.intersp=-1, y.intersp=0 , text.col="transparent")
}
}
if("line" %in% dist$plot_type) {
lines(x, y, type="l", col=color, lwd=3 * scale)
}
lines(grconvertX(c(0.037, (1 - 0.037)), from="npc"), grconvertY(c(-0.02,-0.02), from="npc"), lwd=2 * scale)
if(plot_dist_name) {
text(grconvertX(dist$name_pos[1], from="npc"), grconvertY(dist$name_pos[2], from="npc"), dist$name, cex=1.5 * scale)
}
if(is.character(names(labels))) {
for(label_name in names(labels)) {
xpos <- dist$labels[[label_name]][1]
ypos <- dist$labels[[label_name]][2]
label <- labels[label_name]
text(grconvertX(xpos, from="npc"), grconvertY(ypos, from="npc"), label, cex=2 * scale)
}
} else {
for(i in seq_along(labels)) {
xpos <- dist$labels[[i]][1]
ypos <- dist$labels[[i]][2]
label <- labels[i]
text(grconvertX(xpos, from="npc"), grconvertY(ypos, from="npc"), label, cex=2)
}
}
par(old_par)
}
dists <- list(
normal = list(
# Name of the distribution to be displayed in the plot
name = "normal",
# Position of the name in the plot
name_pos = c(0.5, 0.1),
# Plot type, "line" for a line plots and "bar" for bar plots.
plot_type = "line",
# The values of the x-axis.
x = seq(-3.3, 3.3, 0.01),
# If top_space = 0 the distribution extends to the top of the graph, if
# 0 > top_space < 1 then that proportion of space is left at the top.
top_space = 0,
# The function defining the probability density function
ddist = dnorm,
# The arguments given to the probability density function (has to be named)
ddist_params = list(mean=0, sd=1),
# Coordinates and names for the parameter labels
labels = list(mean = c(0.5, 0.3), right_sd = c(0.80, 0.5), left_sd = c(0.20, 0.5))
),
beta = list(
name = "beta",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(0, 1, 0.01),
top_space = 0,
ddist = dbeta,
ddist_params = list(shape1=2, shape2=2),
labels = list(params=c(0.5, 0.4))
),
gamma = list(
name = "gamma",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0, 2, 0.01),
top_space = 0,
ddist = dgamma,
ddist_params = list(shape=1.3, rate=2.5),
labels = list(params = c(0.60, 0.5))
),
inv_gamma = list(
name = "inv-gamma",
name_pos = c(0.42, 0.1),
plot_type = "line",
x = seq(0, 1.1, 0.01),
top_space = 0,
ddist = function(x, shape, scale) {scale^shape / gamma(shape) * x^(-shape-1)*exp(-scale/x)},
ddist_params = list(shape=3, scale=1),
labels = list(params = c(0.65, 0.5))
),
t = list(
name = "t distrib.",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-3.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=3),
labels = list(mean = c(0.5, 0.3), right_scale = c(0.75, 0.65), left_scale = c(0.25, 0.65),
right_df = c(0.90, 0.35), left_df = c(0.10, 0.35))
),
uniform = list(
name = "uniform",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(0, 1, 0.001),
top_space = 0.6,
ddist = dunif,
ddist_params = list(min=0.15, max=0.85),
labels = list(min=c(0.18,0.55), max=c(0.82,0.55))
),
bernouli = list(
name = "Bernouli",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = round(seq(-0.4, 1.4, 0.1), 1),
top_space = 0.0,
ddist = function(x, p) {ifelse(x == 1, p, ifelse(x==0, 1-p, 0))},
ddist_params = list(p=0.7),
labels = list(p = c(0.5, 0.6))
),
binomial = list(
name = "binomial",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = -2:10,
top_space = 0.2,
ddist = dbinom,
ddist_params = list(size=8, prob = 0.45),
labels = list(params = c(0.7, 0.68))
),
folded_t = list(
name = "folded t",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=3),
labels = list(mean = c(0.15, 0.5), scale = c(0.43, 0.62), df = c(0.65, 0.4))
),
poisson = list(
name = "Poisson",
name_pos = c(0.3, 0.1),
plot_type = "bar",
x = seq(-1, 10.0, 1),
top_space = 0.0,
ddist = dpois,
ddist_params = list(lambda=2.5),
labels = list(lambda=c(0.60, 0.65))
),
chi_squared = list(
name = "chi-square",
name_pos = c(0.45, 0.1),
plot_type = "line",
x = seq(-1, 8, 0.01),
top_space = 0.0,
ddist = dchisq,
ddist_params = list(df=3),
labels = list(df=c(0.65, 0.60))
),
double_exponential = list(
name = "double exp.",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-3, 3, 0.01),
top_space = 0,
ddist = function(x, mu, diversity) {diversity * exp(-diversity * abs(x - mu))/2},
ddist_params = list(mu=0, diversity=1),
labels = list(mean = c(0.5, 0.3), right_diversity = c(0.75, 0.5), left_diversity = c(0.25, 0.5))
),
exponential = list(
name = "exponential",
name_pos = c(0.37, 0.1),
plot_type = "line",
x = seq(0, 1.4, 0.01),
top_space = 0.1,
ddist = dexp,
ddist_params = list(rate=1.5),
labels = list(rate = c(0.55, 0.55))
),
F = list(
name = "F dist.",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0, 5, 0.01),
top_space = 0,
ddist = df,
ddist_params = list(df1=5, df2=5),
labels = list(params = c(0.60, 0.5))
),
generalized_gamma = list(
name = "gen. gamma",
name_pos = c(0.45, 0.1),
plot_type = "line",
x = seq(0, 5, 0.01),
top_space = 0,
ddist = function(x, r, lambda, b) {(b*lambda^(b*r)*x^(b*r-1) * exp(-(lambda*x)^b ))/gamma(r)},
ddist_params = list(r=3, lambda=1, b=1.4),
labels = list(params = c(0.75, 0.75))
),
logistic = list(
name = "logistic",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-4.5, 4.5, 0.01),
top_space = 0,
ddist = dlogis,
ddist_params = list(location=0, scale=1),
labels = list(location = c(0.5, 0.3), right_scale = c(0.80, 0.5), left_scale = c(0.20, 0.5))
),
log_normal = list(
name = "log-normal",
name_pos = c(0.48, 0.1),
plot_type = "line",
x = seq(0, 1.7, 0.01),
top_space = 0,
ddist = dlnorm,
ddist_params = list(meanlog=-0.3, sdlog=0.4),
labels = list(meanlog = c(0.43, 0.3), sdlog = c(0.70, 0.5))
),
noncentral_chi_squared = list(
name = "noncentral\nchi-square",
name_pos = c(0.45, 0.2),
plot_type = "line",
x = seq(0, 8, 0.01),
top_space = 0.3,
ddist = dchisq,
ddist_params = list(df=2, ncp=3),
labels = list(params=c(0.70, 0.65))
),
pareto = list(
name = "Pareto",
name_pos = c(0.65, 0.2),
plot_type = "line",
x = seq(0.1, 1, 0.01),
top_space = 0,
ddist = function(x, alpha, c) {alpha*c^alpha*x^-(alpha+1)},
ddist_params = list(alpha=2, c=1),
labels = list(params=c(0.30, 0.65))
),
weibull = list(
name = "Weibull",
name_pos = c(0.35, 0.1),
plot_type = "line",
x = seq(0, 2.5, 0.01),
top_space = 0,
ddist = dweibull,
ddist_params = list(shape=2.1, scale=1),
labels = list(params = c(0.70, 0.60))
),
beta_binomial = list(
name = "beta-binomial",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = seq(0, 1, 0.1),
top_space = 0,
ddist = dbeta,
ddist_params = list(shape1=2.7, shape2=2.7),
labels = list(params=c(0.5, 0.6))
),
categorical = list(
name = "categorical",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = 0:5,
top_space = 0.2,
ddist = function(x, p_cat) {
pd <- rep(0, length(x))
pd[x %in% seq_along(p_cat)] <- p_cat[x[x %in% seq_along(p_cat)]]/sum(p_cat)
pd
},
ddist_params = list(p_cat = c(1.5, 3.3, 2, 3)),
labels = list(params=c(0.5, 0.5))
),
noncentral_hypergeometric = list(
name = "noncentral\nhypergeom.",
name_pos = c(0.5, 0.2),
plot_type = "bar",
x = 0:12,
top_space = 0.0,
ddist = dhyper,
ddist_params = list(m=50, n=50, k=12),
labels = list(params=c(0.5, 0.6))
),
negative_binomial = list(
name = "neg. binomial",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = -2:10,
top_space = 0.1,
ddist = dnbinom,
ddist_params = list(size=25, prob = 0.90),
labels = list(p = c(0.65, 0.65))
),
shifted_exponential = list(
name = "shifted exp.",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(0, 7, 0.01),
top_space = 0.2,
ddist = function(x, rate, shift) {dexp(x - shift, rate)},
ddist_params = list(rate=0.35, shift = 1),
labels = list(params = c(0.6, 0.55))
),
right_censored_normal= list(
name = "r-cens.\nnormal",
name_pos = c(0.5, 0.2),
plot_type = "line",
x = seq(-3.3, 3.3, 0.01),
top_space = 0,
ddist = function(x, mean, sd, right_limit) {ifelse(x < right_limit, dnorm(x, mean, sd), 0)},
ddist_params = list(mean=0, sd=1, right_limit=1.75),
labels = list(mean = c(0.5, 0.45), right_sd = c(0.77, 0.60), right_limit=c(0.83,0.175),
left_sd = c(0.23, 0.60))
),
left_censored_normal= list(
name = "l-cens.\nnormal",
name_pos = c(0.5, 0.2),
plot_type = "line",
x = seq(-3.3, 3.3, 0.01),
top_space = 0,
ddist = function(x, mean, sd, left_limit) {ifelse(x > left_limit, dnorm(x, mean, sd), 0)},
ddist_params = list(mean=0, sd=1, left_limit=-1.75),
labels = list(mean = c(0.5, 0.45), right_sd = c(0.77, 0.60), left_limit=c(0.17,0.175),
left_sd = c(0.23, 0.60))
),
cauchy = list(
name = "Cauchy",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-3.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=1),
labels = list(location = c(0.5, 0.3), right_scale = c(0.77, 0.55), left_scale = c(0.23, 0.55))
),
half_t = list(
name = "half-t",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=3),
labels = list(scale = c(0.43, 0.62), df = c(0.65, 0.4))
),
half_cauchy = list(
name = "half-Cauchy",
name_pos = c(0.36, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=1),
labels = list(scale = c(0.53, 0.5))
),
half_normal = list(
name = "half-normal",
name_pos = c(0.36, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dnorm,
ddist_params = list(mean=0, sd=1),
labels = list(sd = c(0.53, 0.5))
)
)
plot_dist_svg <- function(dist, labels=c(), fname="", color="skyblue", plot_dist_name=T) {
if(fname == "") {
fname = paste(gsub("\\W", "", gsub("\\s", "_", dist$name)), ".svg", sep="")
}
svg(fname, width=2.25, height=1.688, bg="transparent")
plot_dist(dist, labels, color=color, plot_dist_name=plot_dist_name)
dev.off()
}
plot_dist_png <- function(dist, labels=c(), fname="", color="skyblue", plot_dist_name=T) {
if(fname == "") {
fname = paste(gsub("\\W", "", gsub("\\s", "_", dist$name)), ".png", sep="")
}
png(fname, width=165, height=123, bg="transparent", res=72, )
plot_dist(dist, labels, color=color, plot_dist_name=plot_dist_name)
dev.off()
}
# Function that renders text as an image. Useful for constructing images of equations.
# See ?plotmath for examples and documentation
plot_text_svg <- function(expr, fname) {
svg(fname, bg="transparent")
plot.new()
text(0.5, 0.5, expr)
dev.off()
}
plot_text_png <- function(expr, fname, pointsize=32, width=640, height=480 ) {
png(fname, bg="transparent", width=width, height=height, pointsize=pointsize)
plot.new()
text(0.5, 0.5, expr)
dev.off()
}
| /bugs/distrograms/plot_dist.R | no_license | balima78/Markdowns | R | false | false | 12,904 | r | # Coded by Rasmus Bååth
# rasmus.baath@lucs.lu.se
# www.sumsar.net
# If you modify the code, please keep this header. Thanks!
plot_dist <- function(dist, labels=c(), scale = 1, color="skyblue", plot_dist_name=T) {
old_par <- par(mar = c(0.3, 0, 0, 0), xaxt='n', yaxt='n',ann=FALSE, bty="n", xpd=NA)
x <- dist$x
y <- do.call(dist$ddist, c(list(x=x), dist$ddist_params))
# To always anchor the plot at zero and give some extra top space if neccecary.
plot(c(x[1:2], x), c(0, max(y) / (1- dist$top_space), y), type="l", col="transparent")
# only draw where the distribution is not zero
points_to_NA <- filter(c(0, y, 0), filter=c(1,1, 1)) == 0
points_to_NA <- points_to_NA[-c(1, length(points_to_NA))]
y[points_to_NA] <- NA
if("bar" %in% dist$plot_type) {
lines(x, y, type="h", col=color, lwd=6, lend=1)
# Using legend to draw a white transparent box behind the text
if(plot_dist_name) {
legend(grconvertX(dist$name_pos[1], from="npc"), grconvertY(dist$name_pos[2], from="npc"),
dist$name, cex=1.5 * scale, xjust=0.5, yjust=0.5, bty="o", box.lwd = 0, box.col="transparent",
bg=rgb(1,1, 1,0.5),x.intersp=-1, y.intersp=0 , text.col="transparent")
}
}
if("line" %in% dist$plot_type) {
lines(x, y, type="l", col=color, lwd=3 * scale)
}
lines(grconvertX(c(0.037, (1 - 0.037)), from="npc"), grconvertY(c(-0.02,-0.02), from="npc"), lwd=2 * scale)
if(plot_dist_name) {
text(grconvertX(dist$name_pos[1], from="npc"), grconvertY(dist$name_pos[2], from="npc"), dist$name, cex=1.5 * scale)
}
if(is.character(names(labels))) {
for(label_name in names(labels)) {
xpos <- dist$labels[[label_name]][1]
ypos <- dist$labels[[label_name]][2]
label <- labels[label_name]
text(grconvertX(xpos, from="npc"), grconvertY(ypos, from="npc"), label, cex=2 * scale)
}
} else {
for(i in seq_along(labels)) {
xpos <- dist$labels[[i]][1]
ypos <- dist$labels[[i]][2]
label <- labels[i]
text(grconvertX(xpos, from="npc"), grconvertY(ypos, from="npc"), label, cex=2)
}
}
par(old_par)
}
dists <- list(
normal = list(
# Name of the distribution to be displayed in the plot
name = "normal",
# Position of the name in the plot
name_pos = c(0.5, 0.1),
# Plot type, "line" for a line plots and "bar" for bar plots.
plot_type = "line",
# The values of the x-axis.
x = seq(-3.3, 3.3, 0.01),
# If top_space = 0 the distribution extends to the top of the graph, if
# 0 > top_space < 1 then that proportion of space is left at the top.
top_space = 0,
# The function defining the probability density function
ddist = dnorm,
# The arguments given to the probability density function (has to be named)
ddist_params = list(mean=0, sd=1),
# Coordinates and names for the parameter labels
labels = list(mean = c(0.5, 0.3), right_sd = c(0.80, 0.5), left_sd = c(0.20, 0.5))
),
beta = list(
name = "beta",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(0, 1, 0.01),
top_space = 0,
ddist = dbeta,
ddist_params = list(shape1=2, shape2=2),
labels = list(params=c(0.5, 0.4))
),
gamma = list(
name = "gamma",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0, 2, 0.01),
top_space = 0,
ddist = dgamma,
ddist_params = list(shape=1.3, rate=2.5),
labels = list(params = c(0.60, 0.5))
),
inv_gamma = list(
name = "inv-gamma",
name_pos = c(0.42, 0.1),
plot_type = "line",
x = seq(0, 1.1, 0.01),
top_space = 0,
ddist = function(x, shape, scale) {scale^shape / gamma(shape) * x^(-shape-1)*exp(-scale/x)},
ddist_params = list(shape=3, scale=1),
labels = list(params = c(0.65, 0.5))
),
t = list(
name = "t distrib.",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-3.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=3),
labels = list(mean = c(0.5, 0.3), right_scale = c(0.75, 0.65), left_scale = c(0.25, 0.65),
right_df = c(0.90, 0.35), left_df = c(0.10, 0.35))
),
uniform = list(
name = "uniform",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(0, 1, 0.001),
top_space = 0.6,
ddist = dunif,
ddist_params = list(min=0.15, max=0.85),
labels = list(min=c(0.18,0.55), max=c(0.82,0.55))
),
bernouli = list(
name = "Bernouli",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = round(seq(-0.4, 1.4, 0.1), 1),
top_space = 0.0,
ddist = function(x, p) {ifelse(x == 1, p, ifelse(x==0, 1-p, 0))},
ddist_params = list(p=0.7),
labels = list(p = c(0.5, 0.6))
),
binomial = list(
name = "binomial",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = -2:10,
top_space = 0.2,
ddist = dbinom,
ddist_params = list(size=8, prob = 0.45),
labels = list(params = c(0.7, 0.68))
),
folded_t = list(
name = "folded t",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=3),
labels = list(mean = c(0.15, 0.5), scale = c(0.43, 0.62), df = c(0.65, 0.4))
),
poisson = list(
name = "Poisson",
name_pos = c(0.3, 0.1),
plot_type = "bar",
x = seq(-1, 10.0, 1),
top_space = 0.0,
ddist = dpois,
ddist_params = list(lambda=2.5),
labels = list(lambda=c(0.60, 0.65))
),
chi_squared = list(
name = "chi-square",
name_pos = c(0.45, 0.1),
plot_type = "line",
x = seq(-1, 8, 0.01),
top_space = 0.0,
ddist = dchisq,
ddist_params = list(df=3),
labels = list(df=c(0.65, 0.60))
),
double_exponential = list(
name = "double exp.",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-3, 3, 0.01),
top_space = 0,
ddist = function(x, mu, diversity) {diversity * exp(-diversity * abs(x - mu))/2},
ddist_params = list(mu=0, diversity=1),
labels = list(mean = c(0.5, 0.3), right_diversity = c(0.75, 0.5), left_diversity = c(0.25, 0.5))
),
exponential = list(
name = "exponential",
name_pos = c(0.37, 0.1),
plot_type = "line",
x = seq(0, 1.4, 0.01),
top_space = 0.1,
ddist = dexp,
ddist_params = list(rate=1.5),
labels = list(rate = c(0.55, 0.55))
),
F = list(
name = "F dist.",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0, 5, 0.01),
top_space = 0,
ddist = df,
ddist_params = list(df1=5, df2=5),
labels = list(params = c(0.60, 0.5))
),
generalized_gamma = list(
name = "gen. gamma",
name_pos = c(0.45, 0.1),
plot_type = "line",
x = seq(0, 5, 0.01),
top_space = 0,
ddist = function(x, r, lambda, b) {(b*lambda^(b*r)*x^(b*r-1) * exp(-(lambda*x)^b ))/gamma(r)},
ddist_params = list(r=3, lambda=1, b=1.4),
labels = list(params = c(0.75, 0.75))
),
logistic = list(
name = "logistic",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-4.5, 4.5, 0.01),
top_space = 0,
ddist = dlogis,
ddist_params = list(location=0, scale=1),
labels = list(location = c(0.5, 0.3), right_scale = c(0.80, 0.5), left_scale = c(0.20, 0.5))
),
log_normal = list(
name = "log-normal",
name_pos = c(0.48, 0.1),
plot_type = "line",
x = seq(0, 1.7, 0.01),
top_space = 0,
ddist = dlnorm,
ddist_params = list(meanlog=-0.3, sdlog=0.4),
labels = list(meanlog = c(0.43, 0.3), sdlog = c(0.70, 0.5))
),
noncentral_chi_squared = list(
name = "noncentral\nchi-square",
name_pos = c(0.45, 0.2),
plot_type = "line",
x = seq(0, 8, 0.01),
top_space = 0.3,
ddist = dchisq,
ddist_params = list(df=2, ncp=3),
labels = list(params=c(0.70, 0.65))
),
pareto = list(
name = "Pareto",
name_pos = c(0.65, 0.2),
plot_type = "line",
x = seq(0.1, 1, 0.01),
top_space = 0,
ddist = function(x, alpha, c) {alpha*c^alpha*x^-(alpha+1)},
ddist_params = list(alpha=2, c=1),
labels = list(params=c(0.30, 0.65))
),
weibull = list(
name = "Weibull",
name_pos = c(0.35, 0.1),
plot_type = "line",
x = seq(0, 2.5, 0.01),
top_space = 0,
ddist = dweibull,
ddist_params = list(shape=2.1, scale=1),
labels = list(params = c(0.70, 0.60))
),
beta_binomial = list(
name = "beta-binomial",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = seq(0, 1, 0.1),
top_space = 0,
ddist = dbeta,
ddist_params = list(shape1=2.7, shape2=2.7),
labels = list(params=c(0.5, 0.6))
),
categorical = list(
name = "categorical",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = 0:5,
top_space = 0.2,
ddist = function(x, p_cat) {
pd <- rep(0, length(x))
pd[x %in% seq_along(p_cat)] <- p_cat[x[x %in% seq_along(p_cat)]]/sum(p_cat)
pd
},
ddist_params = list(p_cat = c(1.5, 3.3, 2, 3)),
labels = list(params=c(0.5, 0.5))
),
noncentral_hypergeometric = list(
name = "noncentral\nhypergeom.",
name_pos = c(0.5, 0.2),
plot_type = "bar",
x = 0:12,
top_space = 0.0,
ddist = dhyper,
ddist_params = list(m=50, n=50, k=12),
labels = list(params=c(0.5, 0.6))
),
negative_binomial = list(
name = "neg. binomial",
name_pos = c(0.5, 0.1),
plot_type = "bar",
x = -2:10,
top_space = 0.1,
ddist = dnbinom,
ddist_params = list(size=25, prob = 0.90),
labels = list(p = c(0.65, 0.65))
),
shifted_exponential = list(
name = "shifted exp.",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(0, 7, 0.01),
top_space = 0.2,
ddist = function(x, rate, shift) {dexp(x - shift, rate)},
ddist_params = list(rate=0.35, shift = 1),
labels = list(params = c(0.6, 0.55))
),
right_censored_normal= list(
name = "r-cens.\nnormal",
name_pos = c(0.5, 0.2),
plot_type = "line",
x = seq(-3.3, 3.3, 0.01),
top_space = 0,
ddist = function(x, mean, sd, right_limit) {ifelse(x < right_limit, dnorm(x, mean, sd), 0)},
ddist_params = list(mean=0, sd=1, right_limit=1.75),
labels = list(mean = c(0.5, 0.45), right_sd = c(0.77, 0.60), right_limit=c(0.83,0.175),
left_sd = c(0.23, 0.60))
),
left_censored_normal= list(
name = "l-cens.\nnormal",
name_pos = c(0.5, 0.2),
plot_type = "line",
x = seq(-3.3, 3.3, 0.01),
top_space = 0,
ddist = function(x, mean, sd, left_limit) {ifelse(x > left_limit, dnorm(x, mean, sd), 0)},
ddist_params = list(mean=0, sd=1, left_limit=-1.75),
labels = list(mean = c(0.5, 0.45), right_sd = c(0.77, 0.60), left_limit=c(0.17,0.175),
left_sd = c(0.23, 0.60))
),
cauchy = list(
name = "Cauchy",
name_pos = c(0.5, 0.1),
plot_type = "line",
x = seq(-3.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=1),
labels = list(location = c(0.5, 0.3), right_scale = c(0.77, 0.55), left_scale = c(0.23, 0.55))
),
half_t = list(
name = "half-t",
name_pos = c(0.3, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=3),
labels = list(scale = c(0.43, 0.62), df = c(0.65, 0.4))
),
half_cauchy = list(
name = "half-Cauchy",
name_pos = c(0.36, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dt,
ddist_params = list(ncp=0, df=1),
labels = list(scale = c(0.53, 0.5))
),
half_normal = list(
name = "half-normal",
name_pos = c(0.36, 0.1),
plot_type = "line",
x = seq(0.0, 3.0, 0.01),
top_space = 0,
ddist = dnorm,
ddist_params = list(mean=0, sd=1),
labels = list(sd = c(0.53, 0.5))
)
)
plot_dist_svg <- function(dist, labels=c(), fname="", color="skyblue", plot_dist_name=T) {
if(fname == "") {
fname = paste(gsub("\\W", "", gsub("\\s", "_", dist$name)), ".svg", sep="")
}
svg(fname, width=2.25, height=1.688, bg="transparent")
plot_dist(dist, labels, color=color, plot_dist_name=plot_dist_name)
dev.off()
}
plot_dist_png <- function(dist, labels=c(), fname="", color="skyblue", plot_dist_name=T) {
if(fname == "") {
fname = paste(gsub("\\W", "", gsub("\\s", "_", dist$name)), ".png", sep="")
}
png(fname, width=165, height=123, bg="transparent", res=72, )
plot_dist(dist, labels, color=color, plot_dist_name=plot_dist_name)
dev.off()
}
# Function that renders text as an image. Useful for constructing images of equations.
# See ?plotmath for examples and documentation
plot_text_svg <- function(expr, fname) {
svg(fname, bg="transparent")
plot.new()
text(0.5, 0.5, expr)
dev.off()
}
plot_text_png <- function(expr, fname, pointsize=32, width=640, height=480 ) {
png(fname, bg="transparent", width=width, height=height, pointsize=pointsize)
plot.new()
text(0.5, 0.5, expr)
dev.off()
}
|
#' Add a visual parameter box for dot plots
#'
#' Create a visual parameter box for row- or column-based dot plots, i.e., where each feature or sample is a point.
#'
#' @param x A DataFrame with one row, containing the parameter choices for the current plot.
#' @param select_info A list of character vectors named \code{row} and \code{column} which specifies the names of panels available for transmitting single selections on the rows/columns.
#' @param se A \linkS4class{SummarizedExperiment} object after running \code{\link{.cacheCommonInfo}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for row- or column-based plots.
#'
#' @details
#' Column-based plots can be coloured by nothing, by column metadata, by the expression of a single feature or by the identity of a single sample.
#' This function creates a collapsible box that contains all of these options, initialized with the choices in \code{memory}.
#' The box will also contain options for font size, point size and opacity, and legend placement.
#'
#' Each option, once selected, yields a further subset of nested options.
#' For example, choosing to colour by column metadata will open up a \code{selectInput} to specify the metadata field to use.
#' Choosing to colour by feature name will open up a \code{selectizeInput}.
#' However, the values are filled on the server-side, rather than being sent to the client; this avoids long start times during re-rendering.
#'
#' Note that some options will be disabled depending on the nature of the input, namely:
#' \itemize{
#' \item If there are no column metadata fields, users will not be allowed to colour by column metadata, obviously.
#' \item If there are no features, users cannot colour by features.
#' \item If there are no categorical column metadata fields, users will not be allowed to view the faceting options.
#' }
#'
#' The same logic applies for row-based plots where we swap features with samples (i.e., coloring by feature will highlight a single feature, while coloring by sample will color by the expression of all features in that sample).
#' Similarly, the row metadata is used in place of the column metadata.
#'
#' @author Aaron Lun
#' @seealso
#' \code{\link{.defineInterface}}, where this function is typically called.
#'
#' @importFrom shiny radioButtons tagList selectInput selectizeInput checkboxGroupInput
#' @importFrom colourpicker colourInput
#' @importFrom stats setNames
#'
#' @rdname INTERNAL_create_visual_box
.create_visual_box <- function(x, se, select_info) {
ui <- list(
.defineVisualColorInterface(x, se, select_info),
.defineVisualShapeInterface(x, se),
.defineVisualSizeInterface(x, se),
.defineVisualPointInterface(x, se),
.defineVisualFacetInterface(x, se),
.defineVisualTextInterface(x, se),
.defineVisualOtherInterface(x)
)
names(ui) <- c(
.visualParamChoiceColorTitle,
.visualParamChoiceShapeTitle,
.visualParamChoiceSizeTitle,
.visualParamChoicePointTitle,
.visualParamChoiceFacetTitle,
.visualParamChoiceTextTitle,
.visualParamChoiceOtherTitle
)
stopifnot(all(names(ui)!=""))
keep <- !vapply(ui, is.null, logical(1))
ui <- ui[keep]
plot_name <- .getEncodedName(x)
pchoice_field <- paste0(plot_name, "_", .visualParamChoice)
collected <- lapply(names(ui), function(title)
.conditionalOnCheckGroup(pchoice_field, title, ui[[title]])
)
collapseBox(
id=paste0(plot_name, "_", .visualParamBoxOpen),
title="Visual parameters",
open=slot(x, .visualParamBoxOpen),
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=slot(x, .visualParamChoice),
choices=names(ui)
),
do.call(tagList, collected)
)
}
#' Define colouring options
#'
#' Define the available colouring options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A \linkS4class{SummarizedExperiment} object.
#' @param covariates Character vector of available covariates to use for coloring.
#' @param assay_names Character vector of available assay names to use for coloring.
#'
#' @details
#' Colouring by column data is not available if no column data exists in \code{se} - same for the row data.
#' Colouring by feature names is not available if there are no features in \code{se}.
#' There must also be assays in \code{se} to colour by features (in column-based plots) or samples (in row-based plots).
#'
#' @return A character vector of available colouring modes, i.e., nothing, by column/row data or by feature name.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_define_color_options
.define_color_options_for_column_plots <- function(se, covariates, assay_names) {
color_choices <- .colorByNothingTitle
if (length(covariates)) {
color_choices <- c(color_choices, .colorByColDataTitle)
}
if (nrow(se) && length(assay_names)) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se)) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
c(color_choices, .colorByColSelectionsTitle)
}
#' @rdname INTERNAL_define_color_options
.define_color_options_for_row_plots <- function(se, covariates, assay_names) {
color_choices <- .colorByNothingTitle
if (length(covariates)) {
color_choices <- c(color_choices, .colorByRowDataTitle)
}
if (nrow(se)) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se) && length(assay_names)) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
c(color_choices, .colorByRowSelectionsTitle)
}
#' Add a visual parameter box for heatmap plots
#'
#' Create a visual parameter box for heatmap plots, i.e., where features are rows and samples are columns.
#'
#' @param x A DataFrame with one row, containing the parameter choices for the current plot.
#' @param se A \linkS4class{SummarizedExperiment} object after running \code{\link{.cacheCommonInfo}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for heatmap plots.
#'
#' @details
#' Heatmap plots can be annotated by row and column metadata.
#' Rows or the heatmap matrix can be transformed using centering and scaling.
#' This function creates a collapsible box that contains all of these options, initialized with the choices in \code{memory}.
#' The box will also contain options for color scales and limits, visibility of row and column names, and legend placement and direction.
#'
#' Each option, once selected, yields a further subset of nested options.
#' For example, choosing to center the heatmap rows will open a \code{selectInput} to specify the divergent colorscale to use.
#'
#' @author Kevin Rue-Albrecht
#' @seealso
#' \code{\link{.defineInterface}}, where this function is typically called.
#'
#' @importFrom shiny checkboxGroupInput selectizeInput checkboxInput numericInput radioButtons
#' @importFrom shinyjs disabled
#'
#' @rdname INTERNAL_create_visual_box_for_complexheatmap
.create_visual_box_for_complexheatmap <- function(x, se) {
plot_name <- .getEncodedName(x)
all_coldata <- .getCachedCommonInfo(se, "ComplexHeatmapPlot")$valid.colData.names
all_rowdata <- .getCachedCommonInfo(se, "ComplexHeatmapPlot")$valid.rowData.names
assay_name <- slot(x, .heatMapAssay)
assay_discrete <- assay_name %in% .getCachedCommonInfo(se, "ComplexHeatmapPlot")$discrete.assay.names
.input_FUN <- function(field) paste0(plot_name, "_", field)
pchoice_field <- .input_FUN(.visualParamChoice)
ABLEFUN <- if (assay_discrete) {
disabled
} else {
identity
}
.addSpecificTour(class(x)[1], .heatMapColData, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapColData, " + .selectize-control"),
intro = "Here, we can select column annotations to show as color bars above the heat map.
This will also order the columns of the heat map by the values of the selected annotations (in the specified order, if multiple annotations are specified). This is useful for providing some structure to the heatmap."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapRowData, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapRowData, " + .selectize-control"),
intro = "Here, we can select row annotations to show as color bars on the left of the heat map.
<br/><br/>
This will <em>not</em> affect the order of rows in the heat map, as this is controlled in the <i>Data parameters</i> box."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapShowSelection, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapShowSelection),
intro = "Ticked, this checkbox displays a color bar above the heat map, indicating data points received from an incoming multiple column selection.
<br/><br/>
It also reveals another checkbox that can be used to order columns of the heat maps using the selection, with selected points on the left and unselected point on the right."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapOrderSelection, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapOrderSelection),
intro = "Ticked, this checkbox orders columns of the heat map using the incoming selection, if any, with selected points on the left and unselected point on the right.
<br/><br/>
This ordering takes precedence over the ordering by column annotations.
This is useful to compare features of selected data points to the rest of the data set."
)
)
)
})
.addSpecificTour(class(x)[1], .assayCenterRows, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayCenterRows),
intro = "Here, we can dynamically center the values for each row shown in the heat map, i.e. to a mean value of 0.
It also reveals another checkbox that can be used to scale values for each row.
<br/><br/>
This does not alter any value in the data set; centered values are only computed on the fly for the purpose of the heat map."
)
)
)
})
.addSpecificTour(class(x)[1], .assayScaleRows, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayScaleRows),
intro = "Here, we can dynamically scale the values for each row shown in the heat map, i.e. to a standard deviation of 1.
This row transformation is only available when row values are centered using the checkbox above.
<br/><br/>
This does not alter any value in the data set; scaled values are only computed on the fly for the purpose of the heat map."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapCenteredColormap, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapCenteredColormap, " + .selectize-control"),
intro = "Here, we can select from a choice of diverging color maps, when row values are centered using the checkbox above.
<br/><br/>
This is useful to visualize deviations from the mean, in particular when row values are also scaled using the second checkbox above."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapCustomAssayBounds, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapCustomAssayBounds),
intro = "Ticked, this checkbox reveals numeric fields that let us manually set custom lower and upper bounds for the color scale of the heat map.
<br/><br/>
This is useful to override the default range of the color scale, which is automatically fit to the range of values observed in the heat map."
)
)
)
})
.addSpecificTour(class(x)[1], .assayLowerBound, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayLowerBound),
intro = "Here, we can manually override the lower bound of the heat map color scale."
)
)
)
})
.addSpecificTour(class(x)[1], .assayUpperBound, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayUpperBound),
intro = "Here, we can manually override the upper bound of the heat map color scale."
)
)
)
})
.addSpecificTour(class(x)[1], .showDimnames, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .showDimnames),
intro = "Here, we can control whether to show row names or column names."
)
)
)
})
.addSpecificTour(class(x)[1], .namesRowFontSize, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .namesRowFontSize),
intro = "Here, we can control the font size of the row names."
)
)
)
})
.addSpecificTour(class(x)[1], .namesColumnFontSize, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .namesColumnFontSize),
intro = "Here, we can control the font size of the column names."
)
)
)
})
.addSpecificTour(class(x)[1], .plotLegendPosition, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .plotLegendPosition),
intro = "Changes the position of the legend on the plot, if any legend exists.
On the bottom, on the right; the choice is yours."
)
)
)
})
.addSpecificTour(class(x)[1], .plotLegendDirection, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .plotLegendDirection),
intro = "Changes the orientation of the legend on the plot, if any legend exists.
Horizontal, vertical; the choice is yours."
)
)
)
})
collapseBox(
id=paste0(plot_name, "_", .visualParamBoxOpen),
title="Visual parameters",
open=slot(x, .visualParamBoxOpen),
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=slot(x, .visualParamChoice),
choices=c(.visualParamChoiceMetadataTitle, .visualParamChoiceTransformTitle, .visualParamChoiceColorTitle,
.visualParamChoiceLabelsTitle, .visualParamChoiceLegendTitle)),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceMetadataTitle,
hr(),
.selectizeInput.iSEE(x, .heatMapColData,
label = "Column annotations:",
selected = slot(x, .heatMapColData),
choices = all_coldata,
multiple=TRUE,
options=list(plugins=list('remove_button', 'drag_drop')),
help = TRUE),
.selectizeInput.iSEE(x, .heatMapRowData,
label = "Row annotations:",
selected = slot(x, .heatMapRowData),
choices = all_rowdata,
multiple=TRUE,
options=list(plugins=list('remove_button', 'drag_drop')),
help = TRUE),
.checkboxInput.iSEE(x, .heatMapShowSelection,
label = "Show column selection",
value=slot(x, .heatMapShowSelection),
help = TRUE),
.conditionalOnCheckSolo(.input_FUN(.heatMapShowSelection), on_select = TRUE,
.checkboxInput.iSEE(x, .heatMapOrderSelection,
label = "Order by column selection",
value=slot(x, .heatMapOrderSelection),
help = TRUE),
)
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceTransformTitle,
hr(),
strong("Row transformations:"),
ABLEFUN(.checkboxInput.iSEE(x, .assayCenterRows,
label = "Center",
value=slot(x, .assayCenterRows),
help = TRUE)),
.conditionalOnCheckSolo(.input_FUN(.assayCenterRows), on_select = TRUE,
ABLEFUN(
.checkboxInput.iSEE(x, .assayScaleRows,
label = "Scale",
value=slot(x, .assayScaleRows),
help = TRUE)
),
ABLEFUN(
.selectizeInput.iSEE(x, .heatMapCenteredColormap,
label = "Centered assay colormap:",
selected=slot(x, .heatMapCenteredColormap),
choices=c(.colormapPurpleBlackYellow, .colormapBlueWhiteOrange, .colormapBlueWhiteRed, .colormapGreenWhiteRed),
help = TRUE)
))
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceColorTitle,
hr(),
ABLEFUN(
.checkboxInput.iSEE(x, .heatMapCustomAssayBounds,
label = "Use custom colorscale bounds",
value = slot(x, .heatMapCustomAssayBounds),
help = TRUE)),
.conditionalOnCheckSolo(.input_FUN(.heatMapCustomAssayBounds), on_select = TRUE,
.numericInput.iSEE(x, .assayLowerBound,
label = "Lower bound",
value=slot(x, .assayLowerBound), min = -Inf, max = Inf,
help = TRUE),
.numericInput.iSEE(x, .assayUpperBound,
label = "Upper bound",
value=slot(x, .assayUpperBound), min = -Inf, max = Inf,
help = TRUE))
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceLabelsTitle,
hr(),
.checkboxGroupInput.iSEE(x, .showDimnames,
label = "Show names:",
inline=TRUE,
selected=slot(x, .showDimnames),
choices=c(.showNamesRowTitle, .showNamesColumnTitle),
help = TRUE),
.conditionalOnCheckGroup(
.input_FUN(.showDimnames), .showNamesRowTitle,
.numericInput.iSEE(x, .namesRowFontSize,
label="Row names fontsize",
value=slot(x, .namesRowFontSize),
help=TRUE)),
.conditionalOnCheckGroup(
.input_FUN(.showDimnames), .showNamesColumnTitle,
.numericInput.iSEE(x, .namesColumnFontSize,
label="Column names fontsize",
value=slot(x, .namesColumnFontSize),
help=TRUE)),
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceLegendTitle,
hr(),
.radioButtons.iSEE(x, .plotLegendPosition,
label = "Legend position:",
inline=TRUE,
selected=slot(x, .plotLegendPosition),
choices=c(.plotLegendBottomTitle, .plotLegendRightTitle),
help = TRUE),
.radioButtons.iSEE(x, .plotLegendDirection,
label = "Legend direction:",
inline=TRUE,
selected=slot(x, .plotLegendDirection),
choices=c(.plotLegendHorizontalTitle, .plotLegendVerticalTitle),
help = TRUE)
)
)
}
| /R/interface_visual.R | permissive | andreagrioni/iSEE | R | false | false | 20,828 | r | #' Add a visual parameter box for dot plots
#'
#' Create a visual parameter box for row- or column-based dot plots, i.e., where each feature or sample is a point.
#'
#' @param x A DataFrame with one row, containing the parameter choices for the current plot.
#' @param select_info A list of character vectors named \code{row} and \code{column} which specifies the names of panels available for transmitting single selections on the rows/columns.
#' @param se A \linkS4class{SummarizedExperiment} object after running \code{\link{.cacheCommonInfo}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for row- or column-based plots.
#'
#' @details
#' Column-based plots can be coloured by nothing, by column metadata, by the expression of a single feature or by the identity of a single sample.
#' This function creates a collapsible box that contains all of these options, initialized with the choices in \code{memory}.
#' The box will also contain options for font size, point size and opacity, and legend placement.
#'
#' Each option, once selected, yields a further subset of nested options.
#' For example, choosing to colour by column metadata will open up a \code{selectInput} to specify the metadata field to use.
#' Choosing to colour by feature name will open up a \code{selectizeInput}.
#' However, the values are filled on the server-side, rather than being sent to the client; this avoids long start times during re-rendering.
#'
#' Note that some options will be disabled depending on the nature of the input, namely:
#' \itemize{
#' \item If there are no column metadata fields, users will not be allowed to colour by column metadata, obviously.
#' \item If there are no features, users cannot colour by features.
#' \item If there are no categorical column metadata fields, users will not be allowed to view the faceting options.
#' }
#'
#' The same logic applies for row-based plots where we swap features with samples (i.e., coloring by feature will highlight a single feature, while coloring by sample will color by the expression of all features in that sample).
#' Similarly, the row metadata is used in place of the column metadata.
#'
#' @author Aaron Lun
#' @seealso
#' \code{\link{.defineInterface}}, where this function is typically called.
#'
#' @importFrom shiny radioButtons tagList selectInput selectizeInput checkboxGroupInput
#' @importFrom colourpicker colourInput
#' @importFrom stats setNames
#'
#' @rdname INTERNAL_create_visual_box
.create_visual_box <- function(x, se, select_info) {
ui <- list(
.defineVisualColorInterface(x, se, select_info),
.defineVisualShapeInterface(x, se),
.defineVisualSizeInterface(x, se),
.defineVisualPointInterface(x, se),
.defineVisualFacetInterface(x, se),
.defineVisualTextInterface(x, se),
.defineVisualOtherInterface(x)
)
names(ui) <- c(
.visualParamChoiceColorTitle,
.visualParamChoiceShapeTitle,
.visualParamChoiceSizeTitle,
.visualParamChoicePointTitle,
.visualParamChoiceFacetTitle,
.visualParamChoiceTextTitle,
.visualParamChoiceOtherTitle
)
stopifnot(all(names(ui)!=""))
keep <- !vapply(ui, is.null, logical(1))
ui <- ui[keep]
plot_name <- .getEncodedName(x)
pchoice_field <- paste0(plot_name, "_", .visualParamChoice)
collected <- lapply(names(ui), function(title)
.conditionalOnCheckGroup(pchoice_field, title, ui[[title]])
)
collapseBox(
id=paste0(plot_name, "_", .visualParamBoxOpen),
title="Visual parameters",
open=slot(x, .visualParamBoxOpen),
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=slot(x, .visualParamChoice),
choices=names(ui)
),
do.call(tagList, collected)
)
}
#' Define colouring options
#'
#' Define the available colouring options for row- or column-based plots,
#' where availability is defined on the presence of the appropriate data in a SingleCellExperiment object.
#'
#' @param se A \linkS4class{SummarizedExperiment} object.
#' @param covariates Character vector of available covariates to use for coloring.
#' @param assay_names Character vector of available assay names to use for coloring.
#'
#' @details
#' Colouring by column data is not available if no column data exists in \code{se} - same for the row data.
#' Colouring by feature names is not available if there are no features in \code{se}.
#' There must also be assays in \code{se} to colour by features (in column-based plots) or samples (in row-based plots).
#'
#' @return A character vector of available colouring modes, i.e., nothing, by column/row data or by feature name.
#'
#' @author Aaron Lun
#' @rdname INTERNAL_define_color_options
.define_color_options_for_column_plots <- function(se, covariates, assay_names) {
color_choices <- .colorByNothingTitle
if (length(covariates)) {
color_choices <- c(color_choices, .colorByColDataTitle)
}
if (nrow(se) && length(assay_names)) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se)) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
c(color_choices, .colorByColSelectionsTitle)
}
#' @rdname INTERNAL_define_color_options
.define_color_options_for_row_plots <- function(se, covariates, assay_names) {
color_choices <- .colorByNothingTitle
if (length(covariates)) {
color_choices <- c(color_choices, .colorByRowDataTitle)
}
if (nrow(se)) {
color_choices <- c(color_choices, .colorByFeatNameTitle)
}
if (ncol(se) && length(assay_names)) {
color_choices <- c(color_choices, .colorBySampNameTitle)
}
c(color_choices, .colorByRowSelectionsTitle)
}
#' Add a visual parameter box for heatmap plots
#'
#' Create a visual parameter box for heatmap plots, i.e., where features are rows and samples are columns.
#'
#' @param x A DataFrame with one row, containing the parameter choices for the current plot.
#' @param se A \linkS4class{SummarizedExperiment} object after running \code{\link{.cacheCommonInfo}}.
#'
#' @return
#' A HTML tag object containing a \code{\link{collapseBox}} with visual parameters for heatmap plots.
#'
#' @details
#' Heatmap plots can be annotated by row and column metadata.
#' Rows or the heatmap matrix can be transformed using centering and scaling.
#' This function creates a collapsible box that contains all of these options, initialized with the choices in \code{memory}.
#' The box will also contain options for color scales and limits, visibility of row and column names, and legend placement and direction.
#'
#' Each option, once selected, yields a further subset of nested options.
#' For example, choosing to center the heatmap rows will open a \code{selectInput} to specify the divergent colorscale to use.
#'
#' @author Kevin Rue-Albrecht
#' @seealso
#' \code{\link{.defineInterface}}, where this function is typically called.
#'
#' @importFrom shiny checkboxGroupInput selectizeInput checkboxInput numericInput radioButtons
#' @importFrom shinyjs disabled
#'
#' @rdname INTERNAL_create_visual_box_for_complexheatmap
.create_visual_box_for_complexheatmap <- function(x, se) {
plot_name <- .getEncodedName(x)
all_coldata <- .getCachedCommonInfo(se, "ComplexHeatmapPlot")$valid.colData.names
all_rowdata <- .getCachedCommonInfo(se, "ComplexHeatmapPlot")$valid.rowData.names
assay_name <- slot(x, .heatMapAssay)
assay_discrete <- assay_name %in% .getCachedCommonInfo(se, "ComplexHeatmapPlot")$discrete.assay.names
.input_FUN <- function(field) paste0(plot_name, "_", field)
pchoice_field <- .input_FUN(.visualParamChoice)
ABLEFUN <- if (assay_discrete) {
disabled
} else {
identity
}
.addSpecificTour(class(x)[1], .heatMapColData, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapColData, " + .selectize-control"),
intro = "Here, we can select column annotations to show as color bars above the heat map.
This will also order the columns of the heat map by the values of the selected annotations (in the specified order, if multiple annotations are specified). This is useful for providing some structure to the heatmap."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapRowData, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapRowData, " + .selectize-control"),
intro = "Here, we can select row annotations to show as color bars on the left of the heat map.
<br/><br/>
This will <em>not</em> affect the order of rows in the heat map, as this is controlled in the <i>Data parameters</i> box."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapShowSelection, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapShowSelection),
intro = "Ticked, this checkbox displays a color bar above the heat map, indicating data points received from an incoming multiple column selection.
<br/><br/>
It also reveals another checkbox that can be used to order columns of the heat maps using the selection, with selected points on the left and unselected point on the right."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapOrderSelection, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapOrderSelection),
intro = "Ticked, this checkbox orders columns of the heat map using the incoming selection, if any, with selected points on the left and unselected point on the right.
<br/><br/>
This ordering takes precedence over the ordering by column annotations.
This is useful to compare features of selected data points to the rest of the data set."
)
)
)
})
.addSpecificTour(class(x)[1], .assayCenterRows, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayCenterRows),
intro = "Here, we can dynamically center the values for each row shown in the heat map, i.e. to a mean value of 0.
It also reveals another checkbox that can be used to scale values for each row.
<br/><br/>
This does not alter any value in the data set; centered values are only computed on the fly for the purpose of the heat map."
)
)
)
})
.addSpecificTour(class(x)[1], .assayScaleRows, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayScaleRows),
intro = "Here, we can dynamically scale the values for each row shown in the heat map, i.e. to a standard deviation of 1.
This row transformation is only available when row values are centered using the checkbox above.
<br/><br/>
This does not alter any value in the data set; scaled values are only computed on the fly for the purpose of the heat map."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapCenteredColormap, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapCenteredColormap, " + .selectize-control"),
intro = "Here, we can select from a choice of diverging color maps, when row values are centered using the checkbox above.
<br/><br/>
This is useful to visualize deviations from the mean, in particular when row values are also scaled using the second checkbox above."
)
)
)
})
.addSpecificTour(class(x)[1], .heatMapCustomAssayBounds, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .heatMapCustomAssayBounds),
intro = "Ticked, this checkbox reveals numeric fields that let us manually set custom lower and upper bounds for the color scale of the heat map.
<br/><br/>
This is useful to override the default range of the color scale, which is automatically fit to the range of values observed in the heat map."
)
)
)
})
.addSpecificTour(class(x)[1], .assayLowerBound, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayLowerBound),
intro = "Here, we can manually override the lower bound of the heat map color scale."
)
)
)
})
.addSpecificTour(class(x)[1], .assayUpperBound, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .assayUpperBound),
intro = "Here, we can manually override the upper bound of the heat map color scale."
)
)
)
})
.addSpecificTour(class(x)[1], .showDimnames, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .showDimnames),
intro = "Here, we can control whether to show row names or column names."
)
)
)
})
.addSpecificTour(class(x)[1], .namesRowFontSize, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .namesRowFontSize),
intro = "Here, we can control the font size of the row names."
)
)
)
})
.addSpecificTour(class(x)[1], .namesColumnFontSize, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .namesColumnFontSize),
intro = "Here, we can control the font size of the column names."
)
)
)
})
.addSpecificTour(class(x)[1], .plotLegendPosition, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .plotLegendPosition),
intro = "Changes the position of the legend on the plot, if any legend exists.
On the bottom, on the right; the choice is yours."
)
)
)
})
.addSpecificTour(class(x)[1], .plotLegendDirection, function(plot_name) {
data.frame(
rbind(
c(
element = paste0("#", plot_name, "_", .plotLegendDirection),
intro = "Changes the orientation of the legend on the plot, if any legend exists.
Horizontal, vertical; the choice is yours."
)
)
)
})
collapseBox(
id=paste0(plot_name, "_", .visualParamBoxOpen),
title="Visual parameters",
open=slot(x, .visualParamBoxOpen),
checkboxGroupInput(
inputId=pchoice_field, label=NULL, inline=TRUE,
selected=slot(x, .visualParamChoice),
choices=c(.visualParamChoiceMetadataTitle, .visualParamChoiceTransformTitle, .visualParamChoiceColorTitle,
.visualParamChoiceLabelsTitle, .visualParamChoiceLegendTitle)),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceMetadataTitle,
hr(),
.selectizeInput.iSEE(x, .heatMapColData,
label = "Column annotations:",
selected = slot(x, .heatMapColData),
choices = all_coldata,
multiple=TRUE,
options=list(plugins=list('remove_button', 'drag_drop')),
help = TRUE),
.selectizeInput.iSEE(x, .heatMapRowData,
label = "Row annotations:",
selected = slot(x, .heatMapRowData),
choices = all_rowdata,
multiple=TRUE,
options=list(plugins=list('remove_button', 'drag_drop')),
help = TRUE),
.checkboxInput.iSEE(x, .heatMapShowSelection,
label = "Show column selection",
value=slot(x, .heatMapShowSelection),
help = TRUE),
.conditionalOnCheckSolo(.input_FUN(.heatMapShowSelection), on_select = TRUE,
.checkboxInput.iSEE(x, .heatMapOrderSelection,
label = "Order by column selection",
value=slot(x, .heatMapOrderSelection),
help = TRUE),
)
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceTransformTitle,
hr(),
strong("Row transformations:"),
ABLEFUN(.checkboxInput.iSEE(x, .assayCenterRows,
label = "Center",
value=slot(x, .assayCenterRows),
help = TRUE)),
.conditionalOnCheckSolo(.input_FUN(.assayCenterRows), on_select = TRUE,
ABLEFUN(
.checkboxInput.iSEE(x, .assayScaleRows,
label = "Scale",
value=slot(x, .assayScaleRows),
help = TRUE)
),
ABLEFUN(
.selectizeInput.iSEE(x, .heatMapCenteredColormap,
label = "Centered assay colormap:",
selected=slot(x, .heatMapCenteredColormap),
choices=c(.colormapPurpleBlackYellow, .colormapBlueWhiteOrange, .colormapBlueWhiteRed, .colormapGreenWhiteRed),
help = TRUE)
))
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceColorTitle,
hr(),
ABLEFUN(
.checkboxInput.iSEE(x, .heatMapCustomAssayBounds,
label = "Use custom colorscale bounds",
value = slot(x, .heatMapCustomAssayBounds),
help = TRUE)),
.conditionalOnCheckSolo(.input_FUN(.heatMapCustomAssayBounds), on_select = TRUE,
.numericInput.iSEE(x, .assayLowerBound,
label = "Lower bound",
value=slot(x, .assayLowerBound), min = -Inf, max = Inf,
help = TRUE),
.numericInput.iSEE(x, .assayUpperBound,
label = "Upper bound",
value=slot(x, .assayUpperBound), min = -Inf, max = Inf,
help = TRUE))
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceLabelsTitle,
hr(),
.checkboxGroupInput.iSEE(x, .showDimnames,
label = "Show names:",
inline=TRUE,
selected=slot(x, .showDimnames),
choices=c(.showNamesRowTitle, .showNamesColumnTitle),
help = TRUE),
.conditionalOnCheckGroup(
.input_FUN(.showDimnames), .showNamesRowTitle,
.numericInput.iSEE(x, .namesRowFontSize,
label="Row names fontsize",
value=slot(x, .namesRowFontSize),
help=TRUE)),
.conditionalOnCheckGroup(
.input_FUN(.showDimnames), .showNamesColumnTitle,
.numericInput.iSEE(x, .namesColumnFontSize,
label="Column names fontsize",
value=slot(x, .namesColumnFontSize),
help=TRUE)),
),
.conditionalOnCheckGroup(
pchoice_field, .visualParamChoiceLegendTitle,
hr(),
.radioButtons.iSEE(x, .plotLegendPosition,
label = "Legend position:",
inline=TRUE,
selected=slot(x, .plotLegendPosition),
choices=c(.plotLegendBottomTitle, .plotLegendRightTitle),
help = TRUE),
.radioButtons.iSEE(x, .plotLegendDirection,
label = "Legend direction:",
inline=TRUE,
selected=slot(x, .plotLegendDirection),
choices=c(.plotLegendHorizontalTitle, .plotLegendVerticalTitle),
help = TRUE)
)
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_objects.R
\name{ChannelStatistics}
\alias{ChannelStatistics}
\title{ChannelStatistics Object}
\usage{
ChannelStatistics(commentCount = NULL, hiddenSubscriberCount = NULL,
subscriberCount = NULL, videoCount = NULL, viewCount = NULL)
}
\arguments{
\item{commentCount}{The number of comments for the channel}
\item{hiddenSubscriberCount}{Whether or not the number of subscribers is shown for this user}
\item{subscriberCount}{The number of subscribers that the channel has}
\item{videoCount}{The number of videos uploaded to the channel}
\item{viewCount}{The number of times the channel has been viewed}
}
\value{
ChannelStatistics object
}
\description{
ChannelStatistics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Statistics about a channel: number of subscribers, number of videos in the channel, etc.
}
| /googleyoutubev3.auto/man/ChannelStatistics.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 940 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/youtube_objects.R
\name{ChannelStatistics}
\alias{ChannelStatistics}
\title{ChannelStatistics Object}
\usage{
ChannelStatistics(commentCount = NULL, hiddenSubscriberCount = NULL,
subscriberCount = NULL, videoCount = NULL, viewCount = NULL)
}
\arguments{
\item{commentCount}{The number of comments for the channel}
\item{hiddenSubscriberCount}{Whether or not the number of subscribers is shown for this user}
\item{subscriberCount}{The number of subscribers that the channel has}
\item{videoCount}{The number of videos uploaded to the channel}
\item{viewCount}{The number of times the channel has been viewed}
}
\value{
ChannelStatistics object
}
\description{
ChannelStatistics Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Statistics about a channel: number of subscribers, number of videos in the channel, etc.
}
|
#' Estimate the baseline hazard function in the Cox model as a step function
#'
est_base_hazard <- function(surv.data, survObject, par_val, Bi){
par_val <- as.list(par_val)
Etime <- surv.data[, survObject$response]
nblock <- length(unique(Etime)) + 2
Twindow <- c(0, sort(unique(Etime)) + 0.001, Inf)
delta1 <- Twindow[1:(nblock - 1)] # lower window
delta2 <- Twindow[2:nblock] # upper window
hazard_est <- data.frame(delta1, delta2, hazard0 = 0, cum_hazard0 = 0,
T_len = delta2 - delta1,
time = c(sort(unique(Etime)), Inf))
linear <- parse(text= survObject$reg_equation)
for(i in 1:nrow(hazard_est)){
subdat <- surv.data[Etime < delta2[i] & Etime >= delta1[i], ]
# subdat of participants who survived up to delta1[i]
indx <- which(Etime >= delta1[i])
subdat2 <- surv.data[indx, ]
subBi <- Bi[indx, ]
numer <- sum(subdat[, survObject$event])
exp_term <- with(subdat2, with(par_val, with(subBi, exp(eval(linear)))))
T_len <- pmin(Etime[indx] - delta1[i], delta2[i] - delta1[i])
denom <- sum(exp_term*T_len)
h0 <- numer/denom
if(is.nan(h0)){
hazard_est$hazard0[i] <- 1e-16
} else {
hazard_est$hazard0[i] <- max(1e-16, h0)
}
if(i ==1 ){
hazard_est$cum_hazard0[i] <- hazard_est$hazard0[i] * (hazard_est$time[i] - hazard_est$delta1[i])
} else{
hazard_est$cum_hazard0[i] <- sum((hazard_est$hazard0 * hazard_est$T_len)[1:(i - 1)]) +
hazard_est$hazard0[i]*(hazard_est$time[i] - hazard_est$delta1[i])
}
}
for(i in 1:nrow(surv.data)){
kk <- which(delta1 <= Etime[i] & delta2 > Etime[i])
surv.data$hazard0[i] <- hazard_est$hazard0[kk]
surv.data$cum_hazard0[i] <- hazard_est$cum_hazard0[kk]
}
list(surv.data = surv.data, hazard_est = hazard_est)
}
| /R/est_base_hazard.r | no_license | oliviayu/robust-HHJMs | R | false | false | 1,837 | r | #' Estimate the baseline hazard function in the Cox model as a step function
#'
est_base_hazard <- function(surv.data, survObject, par_val, Bi){
par_val <- as.list(par_val)
Etime <- surv.data[, survObject$response]
nblock <- length(unique(Etime)) + 2
Twindow <- c(0, sort(unique(Etime)) + 0.001, Inf)
delta1 <- Twindow[1:(nblock - 1)] # lower window
delta2 <- Twindow[2:nblock] # upper window
hazard_est <- data.frame(delta1, delta2, hazard0 = 0, cum_hazard0 = 0,
T_len = delta2 - delta1,
time = c(sort(unique(Etime)), Inf))
linear <- parse(text= survObject$reg_equation)
for(i in 1:nrow(hazard_est)){
subdat <- surv.data[Etime < delta2[i] & Etime >= delta1[i], ]
# subdat of participants who survived up to delta1[i]
indx <- which(Etime >= delta1[i])
subdat2 <- surv.data[indx, ]
subBi <- Bi[indx, ]
numer <- sum(subdat[, survObject$event])
exp_term <- with(subdat2, with(par_val, with(subBi, exp(eval(linear)))))
T_len <- pmin(Etime[indx] - delta1[i], delta2[i] - delta1[i])
denom <- sum(exp_term*T_len)
h0 <- numer/denom
if(is.nan(h0)){
hazard_est$hazard0[i] <- 1e-16
} else {
hazard_est$hazard0[i] <- max(1e-16, h0)
}
if(i ==1 ){
hazard_est$cum_hazard0[i] <- hazard_est$hazard0[i] * (hazard_est$time[i] - hazard_est$delta1[i])
} else{
hazard_est$cum_hazard0[i] <- sum((hazard_est$hazard0 * hazard_est$T_len)[1:(i - 1)]) +
hazard_est$hazard0[i]*(hazard_est$time[i] - hazard_est$delta1[i])
}
}
for(i in 1:nrow(surv.data)){
kk <- which(delta1 <= Etime[i] & delta2 > Etime[i])
surv.data$hazard0[i] <- hazard_est$hazard0[kk]
surv.data$cum_hazard0[i] <- hazard_est$cum_hazard0[kk]
}
list(surv.data = surv.data, hazard_est = hazard_est)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric_residual_plot.R
\name{metric_residual_plot}
\alias{metric_residual_plot}
\title{Residual Plot}
\usage{
metric_residual_plot(metric_dat, var, filename = NA,
draw.plot = is.na(filename))
}
\arguments{
\item{draw.plot}{}
}
\description{
Residual Plot
}
\author{
Betsy Cowdery
}
| /modules/benchmark/man/metric_residual_plot.Rd | permissive | araiho/pecan | R | false | true | 362 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric_residual_plot.R
\name{metric_residual_plot}
\alias{metric_residual_plot}
\title{Residual Plot}
\usage{
metric_residual_plot(metric_dat, var, filename = NA,
draw.plot = is.na(filename))
}
\arguments{
\item{draw.plot}{}
}
\description{
Residual Plot
}
\author{
Betsy Cowdery
}
|
#SimpReg lapply
#test simple regression on parallel
setwd("/home/sxt999/run")
rm(list = ls())
cname = "regsimp"
compile = paste0("R CMD SHLIB ", cname,".c")
ccode = paste0(cname,".so")
system(compile)
y = c(10,11)
x = 9
regsimp = function(c, f1, f2, yindex, xindex) {
dyn.load(c)
sum = rep(0,4)
result = .C("reg",as.character(f1),as.character(f2),as.integer(yindex), as.integer(xindex),
as.numeric(sum),as.integer(0), as.numeric(0), as.numeric(0))
out = result[[5]]
out[3:5] = c(result[[7]], result[[8]],result[[6]])
names(out) = c("x", "y","xy","x^2","n")
dyn.unload(c)
return(out)
}
file1 = sapply(1:12, function(i) paste0("/home/data/NYCTaxis/trip_data_", i, ".csv"))
file2 = sapply(1:12, function(i) paste0("/home/data/NYCTaxis/trip_fare_", i, ".csv"))
t = system.time(
result <-lapply(1:12, function(i) regsimp(ccode, file1[i], file2[i], y, x) ) )
summ = sapply(1:5 , function(i) sum(unlist(lapply(result,"[[",i ))))
beta = (summ[3]- summ[1]*summ[2]/summ[5]) / ( summ[4] - (summ[1])*(summ[1])/summ[5])
alpha = summ[2]/summ[5] - beta * summ[1]/summ[5]
save.image("resultLapply.rda")
| /Assignment5/run/TestSimpRegLapply.R | no_license | mokiss/sxshao | R | false | false | 1,127 | r | #SimpReg lapply
#test simple regression on parallel
setwd("/home/sxt999/run")
rm(list = ls())
cname = "regsimp"
compile = paste0("R CMD SHLIB ", cname,".c")
ccode = paste0(cname,".so")
system(compile)
y = c(10,11)
x = 9
regsimp = function(c, f1, f2, yindex, xindex) {
dyn.load(c)
sum = rep(0,4)
result = .C("reg",as.character(f1),as.character(f2),as.integer(yindex), as.integer(xindex),
as.numeric(sum),as.integer(0), as.numeric(0), as.numeric(0))
out = result[[5]]
out[3:5] = c(result[[7]], result[[8]],result[[6]])
names(out) = c("x", "y","xy","x^2","n")
dyn.unload(c)
return(out)
}
file1 = sapply(1:12, function(i) paste0("/home/data/NYCTaxis/trip_data_", i, ".csv"))
file2 = sapply(1:12, function(i) paste0("/home/data/NYCTaxis/trip_fare_", i, ".csv"))
t = system.time(
result <-lapply(1:12, function(i) regsimp(ccode, file1[i], file2[i], y, x) ) )
summ = sapply(1:5 , function(i) sum(unlist(lapply(result,"[[",i ))))
beta = (summ[3]- summ[1]*summ[2]/summ[5]) / ( summ[4] - (summ[1])*(summ[1])/summ[5])
alpha = summ[2]/summ[5] - beta * summ[1]/summ[5]
save.image("resultLapply.rda")
|
## Instalar pacotes requeridos para esta análise
install.packages(c(
"data.table",
"devtools",
"forcats",
"ggplot2",
"pander",
"readxl",
"stringr",
"tableone"
))
devtools::install_github("philsf/philsfmisc")
| /scripts/setup.R | no_license | philsf-biostat/SAR-2019-001-RG | R | false | false | 227 | r | ## Instalar pacotes requeridos para esta análise
install.packages(c(
"data.table",
"devtools",
"forcats",
"ggplot2",
"pander",
"readxl",
"stringr",
"tableone"
))
devtools::install_github("philsf/philsfmisc")
|
Rsurface <- function (mask, sigma, usecov = NULL, alpha2 = 1, detectfn = "HHN", z = 1,
inverse = FALSE, scale = TRUE) {
if (ms(mask)) stop ("not ready for multisession masks")
mm <- nrow(mask)
detectfn <- valid.detectfn(detectfn, c(4,14:18)) ## converts from character
tmpmask <- cbind(mask, rep(1,mm))
miscparm <- c(1,0,0,0)
if (!is.null(usecov)) {
miscparm[2] <- 1
tmpmask <- cbind(tmpmask, exp(alpha2 * covariates(mask)[,usecov]))
}
miscparm[3] <- scale
temp <- .C ( "getdenomext",
as.integer(detectfn),
as.double (miscparm),
as.double(unlist(tmpmask)),
as.integer(mm),
as.double (sigma),
as.double (z),
invdenom = double(mm),
scale = double(1))
if (is.null(covariates(mask)))
covariates(mask) <- data.frame(matrix(nrow = mm, ncol = 0))
covariates(mask)[,"Resource"] <-
if (inverse) temp$invdenom
else 1/temp$invdenom
OK <- is.finite(covariates(mask)$Resource)
covariates(mask)$Resource[!OK] <- NA
class(mask) <- c('Rsurface', 'mask', 'data.frame') ## need data.frame to guide ms()
attr(mask, 'scale') <- temp$scale
mask
}
############################################################################################
Rsurface.as.data.frame <- function (x) {
covnames <- names(covariates(x))
OK <- match('Resource', covnames) ## just one col for now
covnames <- covnames[OK]
resources <- covariates(x)[,covnames]
df <- cbind(x, resources)
names(df) <- c('x','y',covnames)
df
}
############################################################################################
print.Rsurface <- function (x, ...) {
# if (ms(x)) { ## no need yet for ms()
# out <- vector('list')
# for (session in names(x)) {
# cat ('Session ', session, '\n')
# print(x[[session]], ...)
# out[[session]] <- x[[session]]
# }
# names(out) <- names(x)
# out
# }
# else {
df <- Rsurface.as.data.frame(x)
print(df, ...)
# }
invisible(df)
}
############################################################################################
plot.Rsurface <- function (x, covariate = 'Resource', plottype = 'shaded',
scale = 1, ...) {
if (ms(x)) {
breaklist <- lapply(x, plot, covariate, plottype, ...)
invisible(breaklist)
}
else {
if (length(covariate)>1)
stop ("whoa... just one at a time")
if (!(covariate %in% names(covariates(x))))
stop ("covariate ", covariate, " not found")
covariates(x)[,covariate] <- covariates(x)[,covariate] * scale
if (plottype %in% c('contour','persp')) {
xval <- sort(unique(x$x))
yval <- sort(unique(x$y))
if (nrow(x) != length(xval)*length(yval)) {
x <- rectangularMask(x)
if(nrow(x) != length(xval)*length(yval))
stop ("failed to convert irregular mask to rectangle")
}
zmat <- matrix(covariates(x)[,covariate], nrow = length(xval))
if (plottype == 'contour')
contour(x=xval, y=yval, z=zmat, ...)
else
persp(x=xval, y=yval, z=zmat, ...)
}
else {
class(x) <- c('mask','data.frame')
covlevels <- plot(x, covariate = covariate, dots = (plottype == 'dots'), ...)
if (!is.null(covlevels)) invisible(covlevels)
}
}
}
############################################################################################
| /secr/R/Rsurface.R | no_license | ingted/R-Examples | R | false | false | 3,811 | r | Rsurface <- function (mask, sigma, usecov = NULL, alpha2 = 1, detectfn = "HHN", z = 1,
inverse = FALSE, scale = TRUE) {
if (ms(mask)) stop ("not ready for multisession masks")
mm <- nrow(mask)
detectfn <- valid.detectfn(detectfn, c(4,14:18)) ## converts from character
tmpmask <- cbind(mask, rep(1,mm))
miscparm <- c(1,0,0,0)
if (!is.null(usecov)) {
miscparm[2] <- 1
tmpmask <- cbind(tmpmask, exp(alpha2 * covariates(mask)[,usecov]))
}
miscparm[3] <- scale
temp <- .C ( "getdenomext",
as.integer(detectfn),
as.double (miscparm),
as.double(unlist(tmpmask)),
as.integer(mm),
as.double (sigma),
as.double (z),
invdenom = double(mm),
scale = double(1))
if (is.null(covariates(mask)))
covariates(mask) <- data.frame(matrix(nrow = mm, ncol = 0))
covariates(mask)[,"Resource"] <-
if (inverse) temp$invdenom
else 1/temp$invdenom
OK <- is.finite(covariates(mask)$Resource)
covariates(mask)$Resource[!OK] <- NA
class(mask) <- c('Rsurface', 'mask', 'data.frame') ## need data.frame to guide ms()
attr(mask, 'scale') <- temp$scale
mask
}
############################################################################################
Rsurface.as.data.frame <- function (x) {
covnames <- names(covariates(x))
OK <- match('Resource', covnames) ## just one col for now
covnames <- covnames[OK]
resources <- covariates(x)[,covnames]
df <- cbind(x, resources)
names(df) <- c('x','y',covnames)
df
}
############################################################################################
print.Rsurface <- function (x, ...) {
# if (ms(x)) { ## no need yet for ms()
# out <- vector('list')
# for (session in names(x)) {
# cat ('Session ', session, '\n')
# print(x[[session]], ...)
# out[[session]] <- x[[session]]
# }
# names(out) <- names(x)
# out
# }
# else {
df <- Rsurface.as.data.frame(x)
print(df, ...)
# }
invisible(df)
}
############################################################################################
plot.Rsurface <- function (x, covariate = 'Resource', plottype = 'shaded',
scale = 1, ...) {
if (ms(x)) {
breaklist <- lapply(x, plot, covariate, plottype, ...)
invisible(breaklist)
}
else {
if (length(covariate)>1)
stop ("whoa... just one at a time")
if (!(covariate %in% names(covariates(x))))
stop ("covariate ", covariate, " not found")
covariates(x)[,covariate] <- covariates(x)[,covariate] * scale
if (plottype %in% c('contour','persp')) {
xval <- sort(unique(x$x))
yval <- sort(unique(x$y))
if (nrow(x) != length(xval)*length(yval)) {
x <- rectangularMask(x)
if(nrow(x) != length(xval)*length(yval))
stop ("failed to convert irregular mask to rectangle")
}
zmat <- matrix(covariates(x)[,covariate], nrow = length(xval))
if (plottype == 'contour')
contour(x=xval, y=yval, z=zmat, ...)
else
persp(x=xval, y=yval, z=zmat, ...)
}
else {
class(x) <- c('mask','data.frame')
covlevels <- plot(x, covariate = covariate, dots = (plottype == 'dots'), ...)
if (!is.null(covlevels)) invisible(covlevels)
}
}
}
############################################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{runGSEA}
\alias{runGSEA}
\title{Run gene-set enrichment analysis (GSEA)}
\usage{
runGSEA(
gene,
value,
species,
db = "GO",
my.entrez = NULL,
my.pathway = NULL,
min.size = 3,
max.size = 300,
do.plot = T,
plot.top.n = 10,
path.name.wrap.width = 40,
e2s = F
)
}
\arguments{
\item{gene}{Character vector of gene names}
\item{value}{Numeric vector of values used to rank genes (e.g., logFC, expression, etc.). Must be same length as gene argument.}
\item{species}{Species. One of "Mm" (mouse) or "Hs" (human)}
\item{my.entrez}{Data.frame with SYMBOL and ENTREZID columns. Used to relate gene symbols to Entrez IDs. Retrieved internally unless explicitly provided.}
\item{my.pathway}{Named list of pathways, with each entry containing vector of Entrez IDs. Retrieved internally unless explicitly provided.}
\item{min.size}{Minimum gene set size. Default is 3.}
\item{max.size}{Minimum gene set size. Default is 300.}
\item{do.plot}{Logical to return dotplot visualizing top GSEA ranked pathways. Default is T.}
\item{plot.top.n}{Numeric specifying how many top pathways to visualize. Default is 10.}
\item{path.name.wrap.width}{Numeric specifying width of pathway names to wrap around. Argument passed to stringr::str_wrap(..., width = path.name.wrap.width)}
\item{e2s}{entrez to symbol mapping (computationally demanding). Default False.}
}
\value{
list of enrichment results
}
\description{
Run gene-set enrichment analysis (GSEA)
}
\author{
Nicholas Mikolajewicz
}
| /man/runGSEA.Rd | permissive | NMikolajewicz/scMiko | R | false | true | 1,600 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{runGSEA}
\alias{runGSEA}
\title{Run gene-set enrichment analysis (GSEA)}
\usage{
runGSEA(
gene,
value,
species,
db = "GO",
my.entrez = NULL,
my.pathway = NULL,
min.size = 3,
max.size = 300,
do.plot = T,
plot.top.n = 10,
path.name.wrap.width = 40,
e2s = F
)
}
\arguments{
\item{gene}{Character vector of gene names}
\item{value}{Numeric vector of values used to rank genes (e.g., logFC, expression, etc.). Must be same length as gene argument.}
\item{species}{Species. One of "Mm" (mouse) or "Hs" (human)}
\item{my.entrez}{Data.frame with SYMBOL and ENTREZID columns. Used to relate gene symbols to Entrez IDs. Retrieved internally unless explicitly provided.}
\item{my.pathway}{Named list of pathways, with each entry containing vector of Entrez IDs. Retrieved internally unless explicitly provided.}
\item{min.size}{Minimum gene set size. Default is 3.}
\item{max.size}{Minimum gene set size. Default is 300.}
\item{do.plot}{Logical to return dotplot visualizing top GSEA ranked pathways. Default is T.}
\item{plot.top.n}{Numeric specifying how many top pathways to visualize. Default is 10.}
\item{path.name.wrap.width}{Numeric specifying width of pathway names to wrap around. Argument passed to stringr::str_wrap(..., width = path.name.wrap.width)}
\item{e2s}{entrez to symbol mapping (computationally demanding). Default False.}
}
\value{
list of enrichment results
}
\description{
Run gene-set enrichment analysis (GSEA)
}
\author{
Nicholas Mikolajewicz
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICSKATPO.R
\name{ICskatPO}
\alias{ICskatPO}
\title{ICSKATPO.R}
\usage{
ICskatPO(
left_dmat,
right_dmat,
lt,
rt,
obs_ind,
tpos_ind,
gMat,
null_beta,
Itt
)
}
\arguments{
\item{left_dmat}{n*(p+nknots+2) design matrix for left end of interval.}
\item{right_dmat}{n*(p+nknots+2) design matrix for right end of interval.}
\item{lt}{n*1 vector of left side of interval times.}
\item{rt}{n*1 vector of right side of interval times.}
\item{obs_ind}{n*1 vector of whether the event was observed before last follow-up.}
\item{tpos_ind}{n*1 vector of whether the event was observed after follow-up started (t>0).}
\item{gMat}{n*q genotype matrix.}
\item{null_beta}{(p+nknots+2)*1 vector of coefficients for null model.}
\item{Itt}{(p+nknots+2)*(p+nknots+2) Fisher information matrix for null model coefficients.}
}
\value{
A list with the elements:
\item{p_SKAT}{ICSKAT p-value for PO model.}
\item{p_burden}{IC burden test p-value for PO model.}
\item{complex}{Indicator of whether the SKAT variance matrix was positive definite}
\item{sig_mat}{The covariance matrix of the score equations for genetic effects when treated as fixed effects}
\item{skatQ}{SKAT test statistic.}
\item{burdenQ}{Burden test statistic.}
\item{err}{err=1 for a bad null fit.}
\item{errMsg}{Describes the error.}
}
\description{
Calculate the test statistic and p-value for interval-censored skat with PO model.
}
\examples{
set.seed(0)
gMat <- matrix(data=rbinom(n=2000, size=2, prob=0.3), nrow=100)
xMat <- matrix(data=rnorm(200), nrow=100)
bhFunInv <- function(x) {x}
obsTimes <- 1:5
etaVec <- rep(0, 100)
outcomeDat <- gen_IC_data(bhFunInv = bhFunInv, obsTimes = obsTimes, windowHalf = 0.1,
probMiss = 0.1, etaVec = etaVec)
lt <- outcomeDat$leftTimes
rt <- outcomeDat$rightTimes
tpos_ind <- as.numeric(lt > 0)
obs_ind <- as.numeric(rt != Inf)
dmats <- make_IC_dmat(xMat, lt, rt, obs_ind, tpos_ind)
nullFit <- ICSKAT_fit_null(init_beta = rep(0.1, 5), left_dmat = dmats$left_dmat,
right_dmat=dmats$right_dmat, obs_ind = obs_ind, tpos_ind = tpos_ind, lt = lt, rt = rt)
ICskatPO(left_dmat = dmats$left_dmat, right_dmat=dmats$right_dmat, lt = lt, rt = rt,
obs_ind = obs_ind, tpos_ind = tpos_ind, gMat = gMat, null_beta = nullFit$beta_fit,
Itt = nullFit$Itt)
}
| /man/ICskatPO.Rd | no_license | ryanrsun/ICSKAT | R | false | true | 2,329 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICSKATPO.R
\name{ICskatPO}
\alias{ICskatPO}
\title{ICSKATPO.R}
\usage{
ICskatPO(
left_dmat,
right_dmat,
lt,
rt,
obs_ind,
tpos_ind,
gMat,
null_beta,
Itt
)
}
\arguments{
\item{left_dmat}{n*(p+nknots+2) design matrix for left end of interval.}
\item{right_dmat}{n*(p+nknots+2) design matrix for right end of interval.}
\item{lt}{n*1 vector of left side of interval times.}
\item{rt}{n*1 vector of right side of interval times.}
\item{obs_ind}{n*1 vector of whether the event was observed before last follow-up.}
\item{tpos_ind}{n*1 vector of whether the event was observed after follow-up started (t>0).}
\item{gMat}{n*q genotype matrix.}
\item{null_beta}{(p+nknots+2)*1 vector of coefficients for null model.}
\item{Itt}{(p+nknots+2)*(p+nknots+2) Fisher information matrix for null model coefficients.}
}
\value{
A list with the elements:
\item{p_SKAT}{ICSKAT p-value for PO model.}
\item{p_burden}{IC burden test p-value for PO model.}
\item{complex}{Indicator of whether the SKAT variance matrix was positive definite}
\item{sig_mat}{The covariance matrix of the score equations for genetic effects when treated as fixed effects}
\item{skatQ}{SKAT test statistic.}
\item{burdenQ}{Burden test statistic.}
\item{err}{err=1 for a bad null fit.}
\item{errMsg}{Describes the error.}
}
\description{
Calculate the test statistic and p-value for interval-censored skat with PO model.
}
\examples{
set.seed(0)
gMat <- matrix(data=rbinom(n=2000, size=2, prob=0.3), nrow=100)
xMat <- matrix(data=rnorm(200), nrow=100)
bhFunInv <- function(x) {x}
obsTimes <- 1:5
etaVec <- rep(0, 100)
outcomeDat <- gen_IC_data(bhFunInv = bhFunInv, obsTimes = obsTimes, windowHalf = 0.1,
probMiss = 0.1, etaVec = etaVec)
lt <- outcomeDat$leftTimes
rt <- outcomeDat$rightTimes
tpos_ind <- as.numeric(lt > 0)
obs_ind <- as.numeric(rt != Inf)
dmats <- make_IC_dmat(xMat, lt, rt, obs_ind, tpos_ind)
nullFit <- ICSKAT_fit_null(init_beta = rep(0.1, 5), left_dmat = dmats$left_dmat,
right_dmat=dmats$right_dmat, obs_ind = obs_ind, tpos_ind = tpos_ind, lt = lt, rt = rt)
ICskatPO(left_dmat = dmats$left_dmat, right_dmat=dmats$right_dmat, lt = lt, rt = rt,
obs_ind = obs_ind, tpos_ind = tpos_ind, gMat = gMat, null_beta = nullFit$beta_fit,
Itt = nullFit$Itt)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tokenize.R
\name{segment}
\alias{segment}
\alias{segment.character}
\alias{segment.corpus}
\title{segment texts into component elements}
\usage{
segment(x, ...)
\method{segment}{character}(x, what = c("tokens", "sentences", "paragraphs",
"tags", "other"), delimiter = ifelse(what == "tokens", " ", ifelse(what ==
"sentences", "[.!?:;]", ifelse(what == "paragraphs", "\\\\n{2}", ifelse(what
== "tags", "##\\\\w+\\\\b", NULL)))), perl = FALSE, ...)
\method{segment}{corpus}(x, what = c("tokens", "sentences", "paragraphs",
"tags", "other"), delimiter = ifelse(what == "tokens", " ", ifelse(what ==
"sentences", "[.!?:;]", ifelse(what == "paragraphs", "\\\\n{2}", ifelse(what
== "tags", "##\\\\w+\\\\b", NULL)))), perl = FALSE, ...)
}
\arguments{
\item{x}{text or corpus object to be segmented}
\item{...}{provides additional arguments passed to \code{\link{tokenize}}, if
\code{what = "tokens"} is used}
\item{what}{unit of segmentation. Current options are tokens, sentences,
paragraphs, and other. Segmenting on \code{other} allows segmentation of a
text on any user-defined value, and must be accompanied by the
\code{delimiter} argument.}
\item{delimiter}{delimiter defined as a \code{\link{regex}} for segmentation. Each
type has its own default, except \code{other}, which requires a value to be
specified.}
\item{perl}{logical. Should Perl-compatible regular expressions be used?}
}
\value{
A list of segmented texts, with each element of the list correponding
to one of the original texts.
}
\description{
Segment text(s) into tokens, sentences, paragraphs, or other sections.
\code{segment} works on a character vector or corpus object, and allows the
delimiters to be defined. See details.
}
\details{
Tokens are delimited by Separators. For sentences, the delimiter
can be defined by the user. The default for sentences includes \code{.},
\code{!}, \code{?}, plus \code{;} and \code{:}.
For paragraphs, the default is two carriage returns, although this could be
changed to a single carriage return by changing the value of
\code{delimiter} to \code{"\\\n{1}"} which is the R version of the
\code{\link{regex}} for one newline character. (You might need this if the
document was created in a word processor, for instance, and the lines were
wrapped in the window rather than being hard-wrapped with a newline
character.)
}
\note{
Does not currently record document segments if segmenting a multi-text corpus
into smaller units. For this, use \link{changeunits} instead.
}
\examples{
# same as tokenize()
identical(tokenize(ukimmigTexts), segment(ukimmigTexts))
# segment into paragraphs
segment(ukimmigTexts[3:4], "paragraphs")
# segment a text into sentences
segmentedChar <- segment(ukimmigTexts, "sentences")
segmentedChar[2]
testCorpus <- corpus("##INTRO This is the introduction.
##DOC1 This is the first document.
Second sentence in Doc 1.
##DOC3 Third document starts here.
End of third document.")
testCorpusSeg <- segment(testCorpus, "tags")
summary(testCorpusSeg)
texts(testCorpusSeg)
# segment a corpus into sentences
segmentedCorpus <- segment(corpus(ukimmigTexts), "sentences")
identical(ndoc(segmentedCorpus), length(unlist(segmentedChar)))
}
| /man/segment.Rd | no_license | schinria/quanteda | R | false | true | 3,398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tokenize.R
\name{segment}
\alias{segment}
\alias{segment.character}
\alias{segment.corpus}
\title{segment texts into component elements}
\usage{
segment(x, ...)
\method{segment}{character}(x, what = c("tokens", "sentences", "paragraphs",
"tags", "other"), delimiter = ifelse(what == "tokens", " ", ifelse(what ==
"sentences", "[.!?:;]", ifelse(what == "paragraphs", "\\\\n{2}", ifelse(what
== "tags", "##\\\\w+\\\\b", NULL)))), perl = FALSE, ...)
\method{segment}{corpus}(x, what = c("tokens", "sentences", "paragraphs",
"tags", "other"), delimiter = ifelse(what == "tokens", " ", ifelse(what ==
"sentences", "[.!?:;]", ifelse(what == "paragraphs", "\\\\n{2}", ifelse(what
== "tags", "##\\\\w+\\\\b", NULL)))), perl = FALSE, ...)
}
\arguments{
\item{x}{text or corpus object to be segmented}
\item{...}{provides additional arguments passed to \code{\link{tokenize}}, if
\code{what = "tokens"} is used}
\item{what}{unit of segmentation. Current options are tokens, sentences,
paragraphs, and other. Segmenting on \code{other} allows segmentation of a
text on any user-defined value, and must be accompanied by the
\code{delimiter} argument.}
\item{delimiter}{delimiter defined as a \code{\link{regex}} for segmentation. Each
type has its own default, except \code{other}, which requires a value to be
specified.}
\item{perl}{logical. Should Perl-compatible regular expressions be used?}
}
\value{
A list of segmented texts, with each element of the list correponding
to one of the original texts.
}
\description{
Segment text(s) into tokens, sentences, paragraphs, or other sections.
\code{segment} works on a character vector or corpus object, and allows the
delimiters to be defined. See details.
}
\details{
Tokens are delimited by Separators. For sentences, the delimiter
can be defined by the user. The default for sentences includes \code{.},
\code{!}, \code{?}, plus \code{;} and \code{:}.
For paragraphs, the default is two carriage returns, although this could be
changed to a single carriage return by changing the value of
\code{delimiter} to \code{"\\\n{1}"} which is the R version of the
\code{\link{regex}} for one newline character. (You might need this if the
document was created in a word processor, for instance, and the lines were
wrapped in the window rather than being hard-wrapped with a newline
character.)
}
\note{
Does not currently record document segments if segmenting a multi-text corpus
into smaller units. For this, use \link{changeunits} instead.
}
\examples{
# same as tokenize()
identical(tokenize(ukimmigTexts), segment(ukimmigTexts))
# segment into paragraphs
segment(ukimmigTexts[3:4], "paragraphs")
# segment a text into sentences
segmentedChar <- segment(ukimmigTexts, "sentences")
segmentedChar[2]
testCorpus <- corpus("##INTRO This is the introduction.
##DOC1 This is the first document.
Second sentence in Doc 1.
##DOC3 Third document starts here.
End of third document.")
testCorpusSeg <- segment(testCorpus, "tags")
summary(testCorpusSeg)
texts(testCorpusSeg)
# segment a corpus into sentences
segmentedCorpus <- segment(corpus(ukimmigTexts), "sentences")
identical(ndoc(segmentedCorpus), length(unlist(segmentedChar)))
}
|
library(doBy)
library(plotrix)
library(ggplot2)
# Read the data for a single day or 10 days
day1<-read.csv("Datasets/nyt1.csv")
day1$age_category <- cut(
day1$Age,
c(-Inf,0,19,Inf),
labels = c("Invalid age","<18","18+")
)
day1$Gender <- ifelse(day1$Gender == 0, 'Male', 'Female')
day1$scode[day1$Impressions==0] <- "NoImps"
day1$scode[day1$Impressions >0] <- "Imps"
day1$scode[day1$Clicks >0] <- "Clicks"
day1$scode <- factor(day1$scode)
temp <- subset(day1, Age > 0 & age_category != "Invalid age")
clen <- function(x){c(length(x))}
impTable <-summaryBy(Impressions~Gender+age_category,data=temp, FUN=mean)
clickTable <-summaryBy(Clicks~Gender+age_category,data=temp, FUN=mean)
temp2 <- subset(temp, Impressions!= 0)
temp2$CTR <- temp2$Clicks / temp2$Impressions
ctrTable <-summaryBy(CTR~Gender+age_category,data=temp2, FUN=mean)
png(file = "comp.png")
ggplot(data= temp2, aes(x=CTR, fill=Gender, color= Gender))+ geom_histogram(position = "dodge", binwidth=0.1)+ labs(title = "Comparison of CTR")+ facet_grid(.~ age_category)
dev.off()
| /CSE313/Assignment 1/part-B.R | no_license | wjxhhhhh/XJTLU | R | false | false | 1,053 | r | library(doBy)
library(plotrix)
library(ggplot2)
# Read the data for a single day or 10 days
day1<-read.csv("Datasets/nyt1.csv")
day1$age_category <- cut(
day1$Age,
c(-Inf,0,19,Inf),
labels = c("Invalid age","<18","18+")
)
day1$Gender <- ifelse(day1$Gender == 0, 'Male', 'Female')
day1$scode[day1$Impressions==0] <- "NoImps"
day1$scode[day1$Impressions >0] <- "Imps"
day1$scode[day1$Clicks >0] <- "Clicks"
day1$scode <- factor(day1$scode)
temp <- subset(day1, Age > 0 & age_category != "Invalid age")
clen <- function(x){c(length(x))}
impTable <-summaryBy(Impressions~Gender+age_category,data=temp, FUN=mean)
clickTable <-summaryBy(Clicks~Gender+age_category,data=temp, FUN=mean)
temp2 <- subset(temp, Impressions!= 0)
temp2$CTR <- temp2$Clicks / temp2$Impressions
ctrTable <-summaryBy(CTR~Gender+age_category,data=temp2, FUN=mean)
png(file = "comp.png")
ggplot(data= temp2, aes(x=CTR, fill=Gender, color= Gender))+ geom_histogram(position = "dodge", binwidth=0.1)+ labs(title = "Comparison of CTR")+ facet_grid(.~ age_category)
dev.off()
|
#' Estimate photosynthesis parameters for C4 species using Yin's fitting procedure
#'
#' Using the gas exchange measurement (A_Ci curve), C4 photosynthesis model without
#' carbonic anhydrase and fitting procedure of Yin et al. (2011) to do nonlinear curve
#' fitting (using nlminb package) for estimating photosynthesis parameters (Vcmax,J,
#' Rd,gm and Vpmax) for C4 species using Yin's explicit equations. Make sure to load
#'the "stats" package before intstalling and using the "C4Estimation" package.
#' @param ACi Gas exchange measurement from Li6400 or other equipment.
#' It is a dataframe iput. Ci with the unit of ppm. You can prepare the data in Excel
#' file like the given example and save it as "tab delimited text". Then import data
#' by ACi <- read.table(file = "/Users/haoranzhou/Desktop/Learn R/ACi curve.txt",
#' header = TRUE)
#' @param Tleaf Leaf temperature when A_Ci curve is measured.
#' @param Patm Atmosphere pressure when A_Ci curve is measured.
#' @param alpha1 The fraction of O2 evolution occurring in the bundle sheath.
#' Unless you have enough information, input it as the 0.15.
#' @param x the fraction of total electron transport that are confined to be
#' used for the PEP regeneration out of J, which is the total electron transport.
#' @param CaBreakL Higher bound of Ci below which A is thought to be controled by
#' Rubisco Carboxylation (Ac). Start with 10.
#' @param CaBreakH Lower bound of Ci above which A is thought to be controled by
#' RuBP regeneration (Aj). Start with 50. If the estimation results showed
#' "inadmissible fits", change the CaBreakL and CaBreakH until "inadmissible fits"
#' disappear.
#'
#' @param startp A vector that gives the start points for the estimation
#' @return This package will return a dataframe that contains the following values
#' (c(Vcmax,J,Rd,gm and Vpmax)). You can try with c(30, 150, 3, 10, 50).
#' @return Parameter at leaf temperature: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Parameter at 25°C: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Objective: The final objective value based on
#' the estimation results.
#' @return Convergence: An integer code. 0 indicates successful
#' convergence.
#' @return Message: A character string giving any additional
#' information returned by the optimizer, or NULL. For details, see PORT documentation.
#' @return Iterations: Number of iterations performed.
#' @return Evaluations: Number of objective function and gradient
#' function evaluations.
#' @export
C4EstimateWithoutCAYin <- function(ACi,Tleaf,Patm,alpha1,x,CaBreakL,CaBreakH,startp)
{
A.obs <- ACi$A
Ci.obs <- ACi$Ci*Patm*0.001
O2 <- Patm*0.21*1000
#Temperature adjustment for Kc,Ko,gammastar,Kp from 25°C to Tleaf
Kc<-75.06*exp(36.5*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Ko<-35.82*exp(12.8*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gammastar<-0.000244*exp(24.82*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Kp <- 8.5455*exp(52.2*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gbs <-0.029455*exp(116.77*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*0.86-264.6)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*0.86-264.6)/(273.15+Tleaf)/0.008314))
fn<-function(Param){
Vcmax <- Param[1]
J <- Param[2]
Rd <- Param[3]
gm <- Param[4]
Vpmax <- Param[5]
Rm <- Rd/2
#Useful intermediate
x1_ac <- Vcmax
x2_ac <- Kc/Ko/1000
x3_ac <- Kc
deno_ac <- gm+gbs-x2_ac*gm*alpha1/0.047
x1_aj <- (1-x)*J/4
x2_aj <- 2*gammastar
x3_aj <- 0
deno_aj <- gm+gbs-x2_aj*gm*alpha1/0.047
#Explicit calculation of AEE and AET
d <- gm*(Rm-Vpmax-Ci.obs*(gm+2*gbs)-Kp*(gm + gbs))
f <- gm*gm*(Ci.obs*Vpmax+(Ci.obs+Kp)*(gbs*Ci.obs-Rm))
k <- gm*gm*gbs*(Ci.obs+Kp)
RcPc_p <- (d-(x3_ac+x2_ac*O2)*gm*gbs+(Rd-x1_ac)*(gm+gbs)-
(x1_ac*gammastar*gm+x2_ac*Rd*gm-x2_ac*k/gbs)*alpha1/0.047)/deno_ac
RrPc_p <- (d-(x3_aj+x2_aj*O2)*gm*gbs+(Rd-x1_aj)*(gm+gbs)-
(x1_aj*gammastar*gm+x2_aj*Rd*gm-x2_aj*k/gbs)*alpha1/0.047)/deno_aj
RcPc_q <- (f+(x3_ac+x2_ac*O2)*k+d*(Rd-x1_ac)-
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))+
(x1_ac*gammastar+x2_ac*Rd)*k*alpha1/0.047/gbs)/deno_ac
RrPc_q <- (f+(x3_aj+x2_aj*O2)*k+d*(Rd-x1_aj)-
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))+
(x1_aj*gammastar+x2_aj*Rd)*k*alpha1/0.047/gbs)/deno_aj
RcPc_r <- (Rd*(f+(x3_ac+x2_ac*O2)*k)-x1_ac*(f-k*gammastar*O2))/deno_ac
RrPc_r <- (Rd*(f+(x3_aj+x2_aj*O2)*k)-x1_aj*(f-k*gammastar*O2))/deno_aj
RcPc_Q <- (RcPc_p*RcPc_p-3*RcPc_q)/9
RrPc_Q <- (RrPc_p*RrPc_p-3*RrPc_q)/9
RcPc_U <- (2*RcPc_p^3-9*RcPc_p*RcPc_q+27*RcPc_r)/54
RrPc_U <- (2*RrPc_p^3-9*RrPc_p*RrPc_q+27*RrPc_r)/54
RcPc_PHI <- acos(RcPc_U/(RcPc_Q^3)^0.5)
RrPc_PHI <- acos(RrPc_U/(RrPc_Q^3)^0.5)
RcPc <- -2*RcPc_Q^0.5*cos(RcPc_PHI/3)-RcPc_p/3
RrPc <- -2*RrPc_Q^0.5*cos(RrPc_PHI/3)-RrPc_p/3
##Explicit calculation of ATE and ATT
Vpr <- x*J/2
a_ac <- x2_ac*gm*alpha1/0.047-gm-gbs
a_aj <- x2_aj*gm*alpha1/0.047-gm-gbs
b_ac <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_ac+x2_ac*O2)*gm*gbs+
(x1_ac*gammastar+x2_ac*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_ac-Rd)
b_aj <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_aj+x2_aj*O2)*gm*gbs+
(x1_aj*gammastar+x2_aj*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_aj-Rd)
c_ac <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_ac-Rd)+
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))
c_aj <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_aj-Rd)+
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))
RcPr <- (-b_ac+(b_ac^2-4*a_ac*c_ac)^0.5)/2/a_ac
RrPr <- (-b_aj+(b_aj^2-4*a_aj*c_aj)^0.5)/2/a_aj
#Objective function
Vpc_RcPc <- Vpmax*(Ci.obs-RcPc/gm)/((Ci.obs-RcPc/gm)+Kp)
Vpc_RrPc <- Vpmax*(Ci.obs-RrPc/gm)/((Ci.obs-RrPc/gm)+Kp)
Ac <- (Vpc_RcPc<Vpr)*RcPc+(Vpc_RcPc>=Vpr)*RcPr
Aj <- (Vpc_RrPc<Vpr)*RrPc+(Vpc_RrPc>=Vpr)*RrPr
sum((A.obs-(Ci.obs<=CaBreakL)*Ac-(Ci.obs>=CaBreakH)*Aj-(Ci.obs>CaBreakL)*
(Ci.obs<CaBreakH)*((Ac<Aj)*Ac+(Ac>=Aj)*Aj))^2)
}
#Using constrained optimization package "nloptr" to estimate Vcmax,J,Rd,gm and Vpmax
Est.model <- nlminb(c(startp[1],startp[2] , startp[3], startp[4], startp[5]),
fn, lower=c(0,0,0,0,0), upper=c(200, 600, 20, 30, 200))
Parameters<-Est.model$par
Vcmax <- Parameters[1]
J <- Parameters[2]
Rd <- Parameters[3]
gm <- Parameters[4]
Vpmax <- Parameters[5]
Rm <- Rd/2
#Temperature adjustment for Vcmax,J,Rd,gm and Vpmax from Tleaf to 25°C
Vcmax25<-Parameters[1]/(exp(51.89*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
J25<-Parameters[2]/(exp(69.246*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*0.609-188.502)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*0.609-188.502)/(273.15+Tleaf)/0.008314)))
Rd25<-Parameters[3]/(exp(41.85*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
gm25<-Parameters[4]/(exp(46.533*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*1.2-366.8)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*1.2-366.8)/(273.15+Tleaf)/0.008314)))
Vpmax25<-Parameters[5]/(exp(65.6905*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*0.47-147.69375)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*0.47-147.69375)/(273.15+Tleaf)/
0.008314)))
para25<-c(Vcmax25,J25,Rd25,gm25,Vpmax25)
#Calculate the estimation results
#Useful intermediate
x1_ac <- Vcmax
x2_ac <- Kc/Ko/1000
x3_ac <- Kc
deno_ac <- gm+gbs-x2_ac*gm*alpha1/0.047
x1_aj <- (1-x)*J/4
x2_aj <- 2*gammastar
x3_aj <- 0
deno_aj <- gm+gbs-x2_aj*gm*alpha1/0.047
#Explicit calculation of AEE and AET
d <- gm*(Rm-Vpmax-Ci.obs*(gm+2*gbs)-Kp*(gm + gbs))
f <- gm*gm*(Ci.obs*Vpmax+(Ci.obs+Kp)*(gbs*Ci.obs-Rm))
k <- gm*gm*gbs*(Ci.obs+Kp)
RcPc_p <- (d-(x3_ac+x2_ac*O2)*gm*gbs+(Rd-x1_ac)*(gm+gbs)-
(x1_ac*gammastar*gm+x2_ac*Rd*gm-x2_ac*k/gbs)*alpha1/0.047)/deno_ac
RrPc_p <- (d-(x3_aj+x2_aj*O2)*gm*gbs+(Rd-x1_aj)*(gm+gbs)-
(x1_aj*gammastar*gm+x2_aj*Rd*gm-x2_aj*k/gbs)*alpha1/0.047)/deno_aj
RcPc_q <- (f+(x3_ac+x2_ac*O2)*k+d*(Rd-x1_ac)-
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))+
(x1_ac*gammastar+x2_ac*Rd)*k*alpha1/0.047/gbs)/deno_ac
RrPc_q <- (f+(x3_aj+x2_aj*O2)*k+d*(Rd-x1_aj)-
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))+
(x1_aj*gammastar+x2_aj*Rd)*k*alpha1/0.047/gbs)/deno_aj
RcPc_r <- (Rd*(f+(x3_ac+x2_ac*O2)*k)-x1_ac*(f-k*gammastar*O2))/deno_ac
RrPc_r <- (Rd*(f+(x3_aj+x2_aj*O2)*k)-x1_aj*(f-k*gammastar*O2))/deno_aj
RcPc_Q <- (RcPc_p*RcPc_p-3*RcPc_q)/9
RrPc_Q <- (RrPc_p*RrPc_p-3*RrPc_q)/9
RcPc_U <- (2*RcPc_p^3-9*RcPc_p*RcPc_q+27*RcPc_r)/54
RrPc_U <- (2*RrPc_p^3-9*RrPc_p*RrPc_q+27*RrPc_r)/54
RcPc_PHI <- acos(RcPc_U/(RcPc_Q^3)^0.5)
RrPc_PHI <- acos(RrPc_U/(RrPc_Q^3)^0.5)
RcPc <- -2*RcPc_Q^0.5*cos(RcPc_PHI/3)-RcPc_p/3
RrPc <- -2*RrPc_Q^0.5*cos(RrPc_PHI/3)-RrPc_p/3
##Explicit calculation of ATE and ATT
Vpr <- x*J/2
a_ac <- x2_ac*gm*alpha1/0.047-gm-gbs
a_aj <- x2_aj*gm*alpha1/0.047-gm-gbs
b_ac <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_ac+x2_ac*O2)*gm*gbs+
(x1_ac*gammastar+x2_ac*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_ac-Rd)
b_aj <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_aj+x2_aj*O2)*gm*gbs+
(x1_aj*gammastar+x2_aj*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_aj-Rd)
c_ac <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_ac-Rd)+
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))
c_aj <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_aj-Rd)+
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))
RcPr <- (-b_ac+(b_ac^2-4*a_ac*c_ac)^0.5)/2/a_ac
RrPr <- (-b_aj+(b_aj^2-4*a_aj*c_aj)^0.5)/2/a_aj
Vpc_RcPc <- Vpmax*(Ci.obs-RcPc/gm)/((Ci.obs-RcPc/gm)+Kp)
Vpc_RrPc <- Vpmax*(Ci.obs-RrPc/gm)/((Ci.obs-RrPc/gm)+Kp)
Ac <- (Vpc_RcPc<=Vpr)*RcPc+(Vpc_RcPc>Vpr)*RcPr
Aj <- (Vpc_RrPc<=Vpr)*RrPc+(Vpc_RrPc>Vpr)*RrPr
#Write a while loop to compare Ac and Aj
Count<-length(Ci.obs)
limitation <- rep(0,Count)
i <- 1
while (i<=Count){
limitation[i] <- 2*(Ac[i]>=Aj[i])+1*(Ac[i]<Aj[i])
i=i+1
}
#Print out to see whether there is inadmissible fit
print("Print out to see whether there is inadmissible fit")
Ci_name <- "Ci"
limitation_name <- "limitation_state"
df <- data.frame(Ci.obs,limitation)
colnames(df)<-c(Ci_name,limitation_name)
print(df)
#Plot the estimation and observation
print("Plot the observed A and estimated Ac and Aj")
xrange<-max(Ci.obs)
yrange<-max(A.obs)
plot(Ci.obs,A.obs, type="p",col="blue",xlim=range(0,xrange),ylim=range(0,yrange),
pch=20, xlab="Ci(Pa)",ylab="A")
lines(Ci.obs,Ac, type="l",col="red",lwd=2)
lines(Ci.obs,Aj,type="l",col="green",lwd=2)
leg.text<-c("Obs A", "Cal Ac", "Cal Aj")
xrange<-max(Ci.obs)
legend(xrange-20,4,leg.text,col=c("blue","red","green"),pch=c(20,NA,NA),
lty=c(0,1,1),cex=0.5,lwd=c(0,2,2))
#Return the estimation results
EstF<-list(Est.model$par,para25,Est.model$objective,Est.model$convergence,
Est.model$iterations,Est.model$evaluations,Est.model$message)
EstFinal<-setNames(EstF,c("Parameter at leaf temperature","Parameter at 25°C",
"Objective","Convergence","Iterations","Evaluations",
"Message"))
return(EstFinal)
}
| /R/C4EstimateWithoutCAYin.R | no_license | zhouhaoran06/C4-Parameter-Estimation | R | false | false | 11,708 | r | #' Estimate photosynthesis parameters for C4 species using Yin's fitting procedure
#'
#' Using the gas exchange measurement (A_Ci curve), C4 photosynthesis model without
#' carbonic anhydrase and fitting procedure of Yin et al. (2011) to do nonlinear curve
#' fitting (using nlminb package) for estimating photosynthesis parameters (Vcmax,J,
#' Rd,gm and Vpmax) for C4 species using Yin's explicit equations. Make sure to load
#'the "stats" package before intstalling and using the "C4Estimation" package.
#' @param ACi Gas exchange measurement from Li6400 or other equipment.
#' It is a dataframe iput. Ci with the unit of ppm. You can prepare the data in Excel
#' file like the given example and save it as "tab delimited text". Then import data
#' by ACi <- read.table(file = "/Users/haoranzhou/Desktop/Learn R/ACi curve.txt",
#' header = TRUE)
#' @param Tleaf Leaf temperature when A_Ci curve is measured.
#' @param Patm Atmosphere pressure when A_Ci curve is measured.
#' @param alpha1 The fraction of O2 evolution occurring in the bundle sheath.
#' Unless you have enough information, input it as the 0.15.
#' @param x the fraction of total electron transport that are confined to be
#' used for the PEP regeneration out of J, which is the total electron transport.
#' @param CaBreakL Higher bound of Ci below which A is thought to be controled by
#' Rubisco Carboxylation (Ac). Start with 10.
#' @param CaBreakH Lower bound of Ci above which A is thought to be controled by
#' RuBP regeneration (Aj). Start with 50. If the estimation results showed
#' "inadmissible fits", change the CaBreakL and CaBreakH until "inadmissible fits"
#' disappear.
#'
#' @param startp A vector that gives the start points for the estimation
#' @return This package will return a dataframe that contains the following values
#' (c(Vcmax,J,Rd,gm and Vpmax)). You can try with c(30, 150, 3, 10, 50).
#' @return Parameter at leaf temperature: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Parameter at 25°C: A vector (c(Vcmax,J,Rd,gm and Vpmax))
#' returns the estimation parameters at leaf temperature.
#' @return Objective: The final objective value based on
#' the estimation results.
#' @return Convergence: An integer code. 0 indicates successful
#' convergence.
#' @return Message: A character string giving any additional
#' information returned by the optimizer, or NULL. For details, see PORT documentation.
#' @return Iterations: Number of iterations performed.
#' @return Evaluations: Number of objective function and gradient
#' function evaluations.
#' @export
C4EstimateWithoutCAYin <- function(ACi,Tleaf,Patm,alpha1,x,CaBreakL,CaBreakH,startp)
{
A.obs <- ACi$A
Ci.obs <- ACi$Ci*Patm*0.001
O2 <- Patm*0.21*1000
#Temperature adjustment for Kc,Ko,gammastar,Kp from 25°C to Tleaf
Kc<-75.06*exp(36.5*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Ko<-35.82*exp(12.8*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gammastar<-0.000244*exp(24.82*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
Kp <- 8.5455*exp(52.2*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))
gbs <-0.029455*exp(116.77*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*0.86-264.6)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*0.86-264.6)/(273.15+Tleaf)/0.008314))
fn<-function(Param){
Vcmax <- Param[1]
J <- Param[2]
Rd <- Param[3]
gm <- Param[4]
Vpmax <- Param[5]
Rm <- Rd/2
#Useful intermediate
x1_ac <- Vcmax
x2_ac <- Kc/Ko/1000
x3_ac <- Kc
deno_ac <- gm+gbs-x2_ac*gm*alpha1/0.047
x1_aj <- (1-x)*J/4
x2_aj <- 2*gammastar
x3_aj <- 0
deno_aj <- gm+gbs-x2_aj*gm*alpha1/0.047
#Explicit calculation of AEE and AET
d <- gm*(Rm-Vpmax-Ci.obs*(gm+2*gbs)-Kp*(gm + gbs))
f <- gm*gm*(Ci.obs*Vpmax+(Ci.obs+Kp)*(gbs*Ci.obs-Rm))
k <- gm*gm*gbs*(Ci.obs+Kp)
RcPc_p <- (d-(x3_ac+x2_ac*O2)*gm*gbs+(Rd-x1_ac)*(gm+gbs)-
(x1_ac*gammastar*gm+x2_ac*Rd*gm-x2_ac*k/gbs)*alpha1/0.047)/deno_ac
RrPc_p <- (d-(x3_aj+x2_aj*O2)*gm*gbs+(Rd-x1_aj)*(gm+gbs)-
(x1_aj*gammastar*gm+x2_aj*Rd*gm-x2_aj*k/gbs)*alpha1/0.047)/deno_aj
RcPc_q <- (f+(x3_ac+x2_ac*O2)*k+d*(Rd-x1_ac)-
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))+
(x1_ac*gammastar+x2_ac*Rd)*k*alpha1/0.047/gbs)/deno_ac
RrPc_q <- (f+(x3_aj+x2_aj*O2)*k+d*(Rd-x1_aj)-
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))+
(x1_aj*gammastar+x2_aj*Rd)*k*alpha1/0.047/gbs)/deno_aj
RcPc_r <- (Rd*(f+(x3_ac+x2_ac*O2)*k)-x1_ac*(f-k*gammastar*O2))/deno_ac
RrPc_r <- (Rd*(f+(x3_aj+x2_aj*O2)*k)-x1_aj*(f-k*gammastar*O2))/deno_aj
RcPc_Q <- (RcPc_p*RcPc_p-3*RcPc_q)/9
RrPc_Q <- (RrPc_p*RrPc_p-3*RrPc_q)/9
RcPc_U <- (2*RcPc_p^3-9*RcPc_p*RcPc_q+27*RcPc_r)/54
RrPc_U <- (2*RrPc_p^3-9*RrPc_p*RrPc_q+27*RrPc_r)/54
RcPc_PHI <- acos(RcPc_U/(RcPc_Q^3)^0.5)
RrPc_PHI <- acos(RrPc_U/(RrPc_Q^3)^0.5)
RcPc <- -2*RcPc_Q^0.5*cos(RcPc_PHI/3)-RcPc_p/3
RrPc <- -2*RrPc_Q^0.5*cos(RrPc_PHI/3)-RrPc_p/3
##Explicit calculation of ATE and ATT
Vpr <- x*J/2
a_ac <- x2_ac*gm*alpha1/0.047-gm-gbs
a_aj <- x2_aj*gm*alpha1/0.047-gm-gbs
b_ac <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_ac+x2_ac*O2)*gm*gbs+
(x1_ac*gammastar+x2_ac*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_ac-Rd)
b_aj <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_aj+x2_aj*O2)*gm*gbs+
(x1_aj*gammastar+x2_aj*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_aj-Rd)
c_ac <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_ac-Rd)+
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))
c_aj <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_aj-Rd)+
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))
RcPr <- (-b_ac+(b_ac^2-4*a_ac*c_ac)^0.5)/2/a_ac
RrPr <- (-b_aj+(b_aj^2-4*a_aj*c_aj)^0.5)/2/a_aj
#Objective function
Vpc_RcPc <- Vpmax*(Ci.obs-RcPc/gm)/((Ci.obs-RcPc/gm)+Kp)
Vpc_RrPc <- Vpmax*(Ci.obs-RrPc/gm)/((Ci.obs-RrPc/gm)+Kp)
Ac <- (Vpc_RcPc<Vpr)*RcPc+(Vpc_RcPc>=Vpr)*RcPr
Aj <- (Vpc_RrPc<Vpr)*RrPc+(Vpc_RrPc>=Vpr)*RrPr
sum((A.obs-(Ci.obs<=CaBreakL)*Ac-(Ci.obs>=CaBreakH)*Aj-(Ci.obs>CaBreakL)*
(Ci.obs<CaBreakH)*((Ac<Aj)*Ac+(Ac>=Aj)*Aj))^2)
}
#Using constrained optimization package "nloptr" to estimate Vcmax,J,Rd,gm and Vpmax
Est.model <- nlminb(c(startp[1],startp[2] , startp[3], startp[4], startp[5]),
fn, lower=c(0,0,0,0,0), upper=c(200, 600, 20, 30, 200))
Parameters<-Est.model$par
Vcmax <- Parameters[1]
J <- Parameters[2]
Rd <- Parameters[3]
gm <- Parameters[4]
Vpmax <- Parameters[5]
Rm <- Rd/2
#Temperature adjustment for Vcmax,J,Rd,gm and Vpmax from Tleaf to 25°C
Vcmax25<-Parameters[1]/(exp(51.89*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
J25<-Parameters[2]/(exp(69.246*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*0.609-188.502)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*0.609-188.502)/(273.15+Tleaf)/0.008314)))
Rd25<-Parameters[3]/(exp(41.85*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf))))
gm25<-Parameters[4]/(exp(46.533*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*1.2-366.8)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*1.2-366.8)/(273.15+Tleaf)/0.008314)))
Vpmax25<-Parameters[5]/(exp(65.6905*(Tleaf-25)/(298.15*0.008314*(273.15+Tleaf)))*
(1+exp((298.15*0.47-147.69375)/298.15/0.008314))/
(1+exp(((273.15+Tleaf)*0.47-147.69375)/(273.15+Tleaf)/
0.008314)))
para25<-c(Vcmax25,J25,Rd25,gm25,Vpmax25)
#Calculate the estimation results
#Useful intermediate
x1_ac <- Vcmax
x2_ac <- Kc/Ko/1000
x3_ac <- Kc
deno_ac <- gm+gbs-x2_ac*gm*alpha1/0.047
x1_aj <- (1-x)*J/4
x2_aj <- 2*gammastar
x3_aj <- 0
deno_aj <- gm+gbs-x2_aj*gm*alpha1/0.047
#Explicit calculation of AEE and AET
d <- gm*(Rm-Vpmax-Ci.obs*(gm+2*gbs)-Kp*(gm + gbs))
f <- gm*gm*(Ci.obs*Vpmax+(Ci.obs+Kp)*(gbs*Ci.obs-Rm))
k <- gm*gm*gbs*(Ci.obs+Kp)
RcPc_p <- (d-(x3_ac+x2_ac*O2)*gm*gbs+(Rd-x1_ac)*(gm+gbs)-
(x1_ac*gammastar*gm+x2_ac*Rd*gm-x2_ac*k/gbs)*alpha1/0.047)/deno_ac
RrPc_p <- (d-(x3_aj+x2_aj*O2)*gm*gbs+(Rd-x1_aj)*(gm+gbs)-
(x1_aj*gammastar*gm+x2_aj*Rd*gm-x2_aj*k/gbs)*alpha1/0.047)/deno_aj
RcPc_q <- (f+(x3_ac+x2_ac*O2)*k+d*(Rd-x1_ac)-
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))+
(x1_ac*gammastar+x2_ac*Rd)*k*alpha1/0.047/gbs)/deno_ac
RrPc_q <- (f+(x3_aj+x2_aj*O2)*k+d*(Rd-x1_aj)-
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))+
(x1_aj*gammastar+x2_aj*Rd)*k*alpha1/0.047/gbs)/deno_aj
RcPc_r <- (Rd*(f+(x3_ac+x2_ac*O2)*k)-x1_ac*(f-k*gammastar*O2))/deno_ac
RrPc_r <- (Rd*(f+(x3_aj+x2_aj*O2)*k)-x1_aj*(f-k*gammastar*O2))/deno_aj
RcPc_Q <- (RcPc_p*RcPc_p-3*RcPc_q)/9
RrPc_Q <- (RrPc_p*RrPc_p-3*RrPc_q)/9
RcPc_U <- (2*RcPc_p^3-9*RcPc_p*RcPc_q+27*RcPc_r)/54
RrPc_U <- (2*RrPc_p^3-9*RrPc_p*RrPc_q+27*RrPc_r)/54
RcPc_PHI <- acos(RcPc_U/(RcPc_Q^3)^0.5)
RrPc_PHI <- acos(RrPc_U/(RrPc_Q^3)^0.5)
RcPc <- -2*RcPc_Q^0.5*cos(RcPc_PHI/3)-RcPc_p/3
RrPc <- -2*RrPc_Q^0.5*cos(RrPc_PHI/3)-RrPc_p/3
##Explicit calculation of ATE and ATT
Vpr <- x*J/2
a_ac <- x2_ac*gm*alpha1/0.047-gm-gbs
a_aj <- x2_aj*gm*alpha1/0.047-gm-gbs
b_ac <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_ac+x2_ac*O2)*gm*gbs+
(x1_ac*gammastar+x2_ac*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_ac-Rd)
b_aj <- gm*(Ci.obs*gbs+Vpr-Rm)+(x3_aj+x2_aj*O2)*gm*gbs+
(x1_aj*gammastar+x2_aj*Rd)*gm*alpha1/0.047+(gm+gbs)*(x1_aj-Rd)
c_ac <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_ac-Rd)+
gm*gbs*(x1_ac*gammastar*O2+Rd*(x3_ac+x2_ac*O2))
c_aj <- -gm*(Ci.obs*gbs+Vpr-Rm)*(x1_aj-Rd)+
gm*gbs*(x1_aj*gammastar*O2+Rd*(x3_aj+x2_aj*O2))
RcPr <- (-b_ac+(b_ac^2-4*a_ac*c_ac)^0.5)/2/a_ac
RrPr <- (-b_aj+(b_aj^2-4*a_aj*c_aj)^0.5)/2/a_aj
Vpc_RcPc <- Vpmax*(Ci.obs-RcPc/gm)/((Ci.obs-RcPc/gm)+Kp)
Vpc_RrPc <- Vpmax*(Ci.obs-RrPc/gm)/((Ci.obs-RrPc/gm)+Kp)
Ac <- (Vpc_RcPc<=Vpr)*RcPc+(Vpc_RcPc>Vpr)*RcPr
Aj <- (Vpc_RrPc<=Vpr)*RrPc+(Vpc_RrPc>Vpr)*RrPr
#Write a while loop to compare Ac and Aj
Count<-length(Ci.obs)
limitation <- rep(0,Count)
i <- 1
while (i<=Count){
limitation[i] <- 2*(Ac[i]>=Aj[i])+1*(Ac[i]<Aj[i])
i=i+1
}
#Print out to see whether there is inadmissible fit
print("Print out to see whether there is inadmissible fit")
Ci_name <- "Ci"
limitation_name <- "limitation_state"
df <- data.frame(Ci.obs,limitation)
colnames(df)<-c(Ci_name,limitation_name)
print(df)
#Plot the estimation and observation
print("Plot the observed A and estimated Ac and Aj")
xrange<-max(Ci.obs)
yrange<-max(A.obs)
plot(Ci.obs,A.obs, type="p",col="blue",xlim=range(0,xrange),ylim=range(0,yrange),
pch=20, xlab="Ci(Pa)",ylab="A")
lines(Ci.obs,Ac, type="l",col="red",lwd=2)
lines(Ci.obs,Aj,type="l",col="green",lwd=2)
leg.text<-c("Obs A", "Cal Ac", "Cal Aj")
xrange<-max(Ci.obs)
legend(xrange-20,4,leg.text,col=c("blue","red","green"),pch=c(20,NA,NA),
lty=c(0,1,1),cex=0.5,lwd=c(0,2,2))
#Return the estimation results
EstF<-list(Est.model$par,para25,Est.model$objective,Est.model$convergence,
Est.model$iterations,Est.model$evaluations,Est.model$message)
EstFinal<-setNames(EstF,c("Parameter at leaf temperature","Parameter at 25°C",
"Objective","Convergence","Iterations","Evaluations",
"Message"))
return(EstFinal)
}
|
library(data.table)
damage <- fread("data/Post-Katrina_Damage_Assessment.csv")[Percentage > -1.0]
rows <- nrow(damage)
for(i in 0:100) {
start <- (1000*i)+1
end <- 1000*(i+1)
if(end > rows) {
end <- rows
}
write.csv(
damage[start:end, .(Address, "New Orleans", "LA")],
paste("data/damage_address_", i, ".csv", sep="")
)
}
damage[95101:95200, geoid:=append_geoid(damage[95101:95200, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[97401:97500, geoid:=append_geoid(damage[97401:97500, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[98301:98400, geoid:=append_geoid(damage[98301:98400, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[99001:99100, geoid:=append_geoid(damage[99001:99100, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[99501:99600, geoid:=append_geoid(damage[99501:99600, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[99901:100000, geoid:=append_geoid(damage[99901:100000, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[100101:100100, geoid:=append_geoid(damage[99901:100000, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
sum(is.na(damage$geoid))
write.csv(damage, "data/damage.csv")
tract_damage <- damage[, .(
"pct_damage"=mean(Percentage),
"max_flood_depth"=max(FloodDepth),
"max_flood_duration"=max(FloodDuration)), by=geoid]
for(i in 88434:100193) {
if(is.na(damage[i]$geoid)) {
print(i)
damage[i, geoid:=append_geoid(damage[i, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
}
} | /scripts/damage.R | permissive | izenmania/hurricane-gentrification | R | false | false | 1,767 | r | library(data.table)
damage <- fread("data/Post-Katrina_Damage_Assessment.csv")[Percentage > -1.0]
rows <- nrow(damage)
for(i in 0:100) {
start <- (1000*i)+1
end <- 1000*(i+1)
if(end > rows) {
end <- rows
}
write.csv(
damage[start:end, .(Address, "New Orleans", "LA")],
paste("data/damage_address_", i, ".csv", sep="")
)
}
damage[95101:95200, geoid:=append_geoid(damage[95101:95200, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[97401:97500, geoid:=append_geoid(damage[97401:97500, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[98301:98400, geoid:=append_geoid(damage[98301:98400, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[99001:99100, geoid:=append_geoid(damage[99001:99100, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[99501:99600, geoid:=append_geoid(damage[99501:99600, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[99901:100000, geoid:=append_geoid(damage[99901:100000, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
damage[100101:100100, geoid:=append_geoid(damage[99901:100000, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
sum(is.na(damage$geoid))
write.csv(damage, "data/damage.csv")
tract_damage <- damage[, .(
"pct_damage"=mean(Percentage),
"max_flood_depth"=max(FloodDepth),
"max_flood_duration"=max(FloodDuration)), by=geoid]
for(i in 88434:100193) {
if(is.na(damage[i]$geoid)) {
print(i)
damage[i, geoid:=append_geoid(damage[i, .("street"=Address, "city"="New Orleans", "state"="LA")], geoid_type="tr")$geoid]
}
} |
#' @importFrom utils packageVersion contrib.url head
#' installed.packages sessionInfo tail
NULL
#' Install or update Bioconductor, CRAN, or GitHub packages
#'
#' This package provides tools for managing _Bioconductor_ and other
#' packages in a manner consistent with _Bioconductor_'s package
#' versioning and release system.
#'
#' @details
#'
#' Main functions are as follows; additional help is available for
#' each function, e.g., `?BiocManager::version`.
#'
#' - `BiocManager::install()`: Install or update packages from
#' _Bioconductor_, CRAN, and GitHub.
#'
#' - `BiocManager::version()`: Report the version of _Bioconductor_ in
#' use.
#'
#' - `BiocManager::available()`: Return a `character()` vector of
#' package names available (at `BiocManager::repositories()`) for
#' installation.
#'
#' - `BiocManager::valid()`: Determine whether installed packages are
#' from the same version of _Bioconductor_.
#'
#' - `BiocManager::repositories()`: _Bioconductor_ and other
#' repository URLs to discover packages for installation.
#'
#' The version of _Bioconductor_ in use is determined by the installed
#' version of a second package, BiocVersion. BiocVersion is installed
#' automatically during first use of `BiocManager::install()`. If
#' BiocVersion has not yet been installed, the version is determined
#' by code in base R.
#'
#' Options influencing package behavior (see `?options`, `?getOption`)
#' include:
#'
#' - `"repos"`, `"BiocManager.check_repositories"`: URLs of additional
#' repositories for use by `BiocManger::install()`. See `?repositories`.
#'
#' - `"pkgType"`: The default type of packages to be downloaded and
#' installed; see `?install.packages`.
#'
#' - `"timeout"`: The maximum time allowed for download of a single
#' package, in seconds. _BiocManager_ increases this to 300 seconds
#' to accommodate download of large BSgenome and other packages.
#'
#' System environment variables influencing package behavior include:
#'
#' - \env{BIOCONDUCTOR_ONLINE_VERSION_DIAGNOSIS} advanced
#' configuration to avoid _Bioconductor_ version checks. See
#' `?install`.
#'
#' - \env{BIOCONDUCTOR_CONFIG_FILE} for offline use of BiocManager
#' versioning functionality. See `?install`.
#'
#' - \env{BIOCONDUCTOR_USE_CONTAINER_REPOSITORY} opt out of binary package
#' installations. See `?containerRepository`.
#'
#' - \env{BIOCMANAGER_CHECK_REPOSITORIES} silence messages regarding
#' non-standard CRAN or Bioconductor repositories. See `?repositories`.
#'
#' - \env{BIOCMANAGER_SITE_REPOSITORY} configure a more permanent
#' `site_repository` input to `repositories()`. See `?repositories`.
#'
#' @md
#' @aliases BiocManager
#'
#' @examples
#' R.version.string
#' packageVersion("BiocManager")
#' if (requireNamespace("BiocVersion", quietly = TRUE))
#' packageVersion("BiocVersion")
#' BiocManager::version()
"_PACKAGE"
| /R/BiocManager-package.R | no_license | Bioconductor/BiocManager | R | false | false | 2,892 | r | #' @importFrom utils packageVersion contrib.url head
#' installed.packages sessionInfo tail
NULL
#' Install or update Bioconductor, CRAN, or GitHub packages
#'
#' This package provides tools for managing _Bioconductor_ and other
#' packages in a manner consistent with _Bioconductor_'s package
#' versioning and release system.
#'
#' @details
#'
#' Main functions are as follows; additional help is available for
#' each function, e.g., `?BiocManager::version`.
#'
#' - `BiocManager::install()`: Install or update packages from
#' _Bioconductor_, CRAN, and GitHub.
#'
#' - `BiocManager::version()`: Report the version of _Bioconductor_ in
#' use.
#'
#' - `BiocManager::available()`: Return a `character()` vector of
#' package names available (at `BiocManager::repositories()`) for
#' installation.
#'
#' - `BiocManager::valid()`: Determine whether installed packages are
#' from the same version of _Bioconductor_.
#'
#' - `BiocManager::repositories()`: _Bioconductor_ and other
#' repository URLs to discover packages for installation.
#'
#' The version of _Bioconductor_ in use is determined by the installed
#' version of a second package, BiocVersion. BiocVersion is installed
#' automatically during first use of `BiocManager::install()`. If
#' BiocVersion has not yet been installed, the version is determined
#' by code in base R.
#'
#' Options influencing package behavior (see `?options`, `?getOption`)
#' include:
#'
#' - `"repos"`, `"BiocManager.check_repositories"`: URLs of additional
#' repositories for use by `BiocManger::install()`. See `?repositories`.
#'
#' - `"pkgType"`: The default type of packages to be downloaded and
#' installed; see `?install.packages`.
#'
#' - `"timeout"`: The maximum time allowed for download of a single
#' package, in seconds. _BiocManager_ increases this to 300 seconds
#' to accommodate download of large BSgenome and other packages.
#'
#' System environment variables influencing package behavior include:
#'
#' - \env{BIOCONDUCTOR_ONLINE_VERSION_DIAGNOSIS} advanced
#' configuration to avoid _Bioconductor_ version checks. See
#' `?install`.
#'
#' - \env{BIOCONDUCTOR_CONFIG_FILE} for offline use of BiocManager
#' versioning functionality. See `?install`.
#'
#' - \env{BIOCONDUCTOR_USE_CONTAINER_REPOSITORY} opt out of binary package
#' installations. See `?containerRepository`.
#'
#' - \env{BIOCMANAGER_CHECK_REPOSITORIES} silence messages regarding
#' non-standard CRAN or Bioconductor repositories. See `?repositories`.
#'
#' - \env{BIOCMANAGER_SITE_REPOSITORY} configure a more permanent
#' `site_repository` input to `repositories()`. See `?repositories`.
#'
#' @md
#' @aliases BiocManager
#'
#' @examples
#' R.version.string
#' packageVersion("BiocManager")
#' if (requireNamespace("BiocVersion", quietly = TRUE))
#' packageVersion("BiocVersion")
#' BiocManager::version()
"_PACKAGE"
|
# TODO
# Map features: remove marker clustering, use filtering of PG from top menu
# Plots features: remove percentages from "Frequency of highly aggressive isolates" plot
function(input, output){
# load("./data/patho_objects.RData")
# load("./data/map_objects.RData") # load map objects created by samples_interactive_map
# Reactive material filtration ####
# react_map_samples <- reactiveValues()
# react_map_samples$samples <- reactive({
# # show_samples <- vector(mode = "list", length = 2L) %>% set_names("samples", "icons")
# show_samples <- map_samples %>%
# filter(between(Path_rating,input$PG[1], input$PG[2]))
# if (input$PGna) {
# show_samples <- map_samples %>%
# filter(is.na(Path_rating)) %>% bind_rows(show_samples)
# }
# # show_samples$icons <- awesomeIcons(
# # icon = 'leaf',
# # iconColor = ifelse(show_samples$samples$Genotyped=="Y",'black','white' ),
# # library = 'ion',
# # markerColor = show_samples$samples$marker_cols)
# return(show_samples)
# })
# react_map_samples$map_icons <- reactive({
# awesomeIcons(
# icon = 'leaf',
# iconColor = ifelse(react_map_samples$samples()$Genotyped=="Y",'black','white' ),
# library = 'ion',
# markerColor = react_map_samples$samples()$marker_cols
# #markerColor = ifelse(is.na(map_samples$Path_rating), 'lightgray', 'white') # make this dependant on the pathogenicity
# )
# })
#
# # isolate_map <- reactive({
# #
# # })
# # print list of input events (very useful for debugging the map/app)
output$inputs <-
renderPrint({reactiveValuesToList(input)})
# # Isolate map ####
# output$isolateMap <- renderLeaflet({
# leaflet(agro_zones_sf) %>%
# addProviderTiles("Esri.WorldImagery", group="Satellite") %>%
# addProviderTiles("Esri.WorldTopoMap", group="Topo") %>%
# addProviderTiles("Esri.WorldPhysical", group="Physical") %>% # causes issues with the labels
# addProviderTiles("Esri.NatGeoWorldMap", group="National Geographic") %>%
# setView(146.5, -30, zoom = 5) %>% # -30.155307082559347, 146.48388609655257 ; central Australia: 133.7751, -27
# addProviderTiles("CartoDB.PositronOnlyLabels", group = "labels") %>%
# addAwesomeMarkers(
# lng = map_samples$lon,
# lat = map_samples$lat,
# popup = map_samples$popup_text,
# icon = icons,
# clusterOptions = markerClusterOptions(), group = "markers"
# ) %>%
# addScaleBar(position = "bottomright",
# options = scaleBarOptions(imperial = FALSE)) %>%
# addEasyButton(easyButton(
# icon = "fa-globe",
# title = "Reset Map Zoom",
# onClick = JS("function(btn, map){ map.setView([-30, 146.5], 5); }")
# )) %>%
# add_agro_layer(.) %>%
# addLayersControl(
# position = "bottomright",
# baseGroups = c("Satellite", "Physical", "Topo", "National Geographic"),
# overlayGroups = c("Agroecozones"), # , "labels"
# # "May Rain",
# # "June Rain",
# # "July Rain",
# # "August Rain"),
# options = layersControlOptions(collapsed = TRUE)) %>%
# hideGroup("Agroecozones")
# })
# # observe changes in the map
# # observe({
# # LogMsg("Showing", input$PG, "PG filtering\n")
# # })
# observe({
#
#
# proxy <- leafletProxy("isolateMap")
# if (any(c("Satellite", "Physical") %in% input$isolateMap_groups)){
# # c("Satellite", "Physical") %in% c("Satellite", "labels", "markers")
# proxy %>% showGroup("labels")
# } else {
# proxy %>% hideGroup("labels")
# }
# proxy %>% clearGroup("markers") %>%
# addAwesomeMarkers(
# lng = react_map_samples$samples()$lon,
# lat = react_map_samples$samples()$lat,
# popup = react_map_samples$samples()$popup_text,
# icon = react_map_samples$map_icons(),
# clusterOptions = markerClusterOptions(), group = "markers"
# )
# # Remove any existing legend, and only if the legend is
# # enabled, create a new one.
# # proxy %>% clearShapes() %>% removeTiles("agrozones") #%>% clearMarkers()
# # # proxy %>%
# # # addAwesomeMarkers(
# # # lng = react_map_samples()$lon,
# # # lat = react_map_samples()$lat,
# # # popup = ~ react_map_samples()$popup_text,
# # # icon = proxy_icons,
# # # clusterOptions = markerClusterOptions()
# # # )
# # if ("zones" %in% input$checkmaplayers) {
# # add_agro_layer(proxy)
# # }
# })
# # Zone summary table ####
# output$zoneSummary <- function() {
# zone_summary <- react_map_samples$samples() %>%
# group_by(lat, lon) %>%
# summarise(
# site_samples = n(),
# Genotyped = sum(Genotyped == "Y", na.rm=TRUE),
# Phenotyped = sum(Path_rating>=0, na.rm =TRUE)
# ) %>%
# ungroup() %>%
# inner_join(site_summary %>% dplyr::select(lat,lon,Zone)) %>%
# group_by(Zone) %>%
# summarise(Collected = sum(site_samples, na.rm = TRUE),
# Genotyped = sum(Genotyped, na.rm = TRUE),
# Phenotyped = sum(Phenotyped, na.rm = TRUE)
# ) %>% adorn_totals()
# kableExtra::kbl(zone_summary) %>% # rename(Genotyped = To_Genotype)) %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# font_size = 13) %>%
# row_spec(nrow(zone_summary), bold = T)
#
# }
#
# # Host frequency plot ####
# host_freq_plot <- function(){
# # define labels
# exlabel <- c("ICC3996", bquote("Genesis"^TM~"090")) # , "PBA~HatTrick~(PBR)", "PBA~Seamer~(PBR)")
# PBA_labels <- c("PBA HatTrick\n(PBR)", "PBA Seamer\n(PBR)")
# ggplot(data=host_response_data %>% filter(Diff_rating=="High"),
# aes(x=Year, y=freq, group = Diff_host,
# colour=factor(Diff_host, levels = c("ICC3996", "Genesis090", "HatTrick", "Seamer")))) +
# geom_line(size=1) + labs(y="Percentage of highly aggressive isolates", colour="Host") +
# scale_y_continuous(breaks = breaks_width(0.1),
# labels = label_percent(accuracy = 1, suffix = ""),
# expand = c(0,0), limits = c(0,1)) +
# # plot_theme(baseSize = 20) +
# scale_x_continuous(breaks=unique(host_response_data$Year)) +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year!=2014,
# Year<2018,freq>0),
# aes(label=sprintf("%.1f%%", freq*100)), size=4, nudge_y = 0.015,
# nudge_x = -0.15, colour="black") +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year==2014,freq>0),
# aes(label=sprintf("%.1f%%", freq*100)), size=4, nudge_y = 0.03,
# nudge_x = -0.15, colour="black") +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year==2018),
# aes(label=sprintf("%.1f%%", freq*100)), size=4, nudge_y = -0.015,
# nudge_x = 0.2, colour="black") +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year==2020),
# aes(label=sprintf("%.1f%%", freq*100)),
# size=4, nudge_x = 0.3, colour="black") +
# scale_colour_paletteer_d("ggthemes::excel_Office_2007_2010", name = "",
# labels= c(bquote(.(exlabel)), PBA_labels)) +
# clean_theme$theme +
# theme(axis.title.y = element_text(size = rel(0.85), face = "plain"),
# legend.spacing.y = unit(1,"cm"), panel.grid.major = element_blank())
# }
#
# output$host_freq <- renderPlot(height = 400,host_freq_plot())
# # ggsave(filedate("A_rabiei_highly_aggressive_isolates_2013-2020", ext = ".pdf",
# # outdir = "../output/", dateformat = FALSE), width = 10, height = 6)
#
#
# # Create the button to download the scatterplot as PDF
# output$downloadPlot1 <- downloadHandler(
# filename = function(plot_format=input$plotformat) {
# glue::glue('A_rabiei_isolate_frequency_{Sys.Date()}.{str_to_lower(plot_format)}')
# },
# content = function(file, plot_width = input$plotWidth, plot_height = input$plotHeight) {
# ggsave(file, host_freq_plot() +
# theme(plot.margin=margin(t = 12, r=6, b=6, l=6, unit = "pt")),
# # theme(plot.margin=margin(t = 1, r = 0.5, b = 0.5, l = 0.5, unit = "cm")),
# width = plot_width, height = plot_height,
# dpi = 300, units = "in")
# }
# )
#
# materials table ####
# tab_caption <- glue::glue("<i>Ascochyta rabiei</i> isolates collected as part of the GRDC projects GRI2007-001RTX and UM00052")
# samples %>% dplyr::select(Isolate=ISOLATE_ID, Sample=SAMPLE_ID, STATE, LOCATION, Farm=FARM_ID,
# Host=GENOTYPE) %>% rename_with(stringr::str_to_title) %>%
# left_join(patho_table %>% dplyr::select(-one_of(c("Pathotype", "Rating")))) %>%
PoC_data <- readxl::read_excel("data/PoC_Data.xlsx")
# geodetails <- geocode(PoC_data$Supplier_Location , output = "latlon", messaging=TRUE) %>%
# mutate(Supplier_Location = PoC_data$Supplier_Location) %>%
# write_xlsx(., "data/PoC_Data.xlsx", sheet = "supp_coordinates")
geodetails <- readxl::read_excel("data/PoC_Data.xlsx", sheet = "supp_coordinates")
material_table <- PoC_data %>% group_by(Item_ID, Item_Description) %>%
summarise(Number_of_Suppliers = n(), Mean_Price_AUD = mean(Item_Price_AUD, na.rm=TRUE)) %>%
ungroup()
output$materialDT <- renderDT(server = FALSE,{
DT::datatable(material_table ,
colnames = gsub("_", " ", names(material_table)), rownames = FALSE,
style = 'bootstrap', class = 'table-bordered table-condensed',
extensions = c('Buttons','Scroller'),
#caption = htmltools::HTML(tab_caption),#
options = list(dom = 'ftSiB',
buttons = c('copy', 'csv', 'excel', 'pdf', 'print'),
deferRender = TRUE, scrollY = 500, scroller = TRUE))
})
# material prices plot ####
material_plot_data <- reactive({
# selected_row <- ifelse(is.null(input$materialDT_rows_selected), 1,
# as.vector(input$materialDT_rows_selected))
selected_row <- input$materialDT_rows_selected
selected_material <- material_table$Item_Description[selected_row] #
PoC_data %>% filter(Item_Description %in% selected_material)
})
material_price_dist <- function(){
if (nrow(material_plot_data())==0) return(NULL)
ggplot(material_plot_data(), aes(x = Item_Price_AUD)) + # , fill = Item_Description
geom_histogram(fill = "lightskyblue") + facet_wrap(~Item_Description, ncol = 1) +
labs(y="Count", x="Item Price (AUD)") +
# scale_fill_paletteer_d("awtools::a_palette") +
guides(fill="none") +
clean_theme$theme
# coord_flip() + clean_theme$theme
}
output$price_plot <- renderPlot(height = 300,{
material_price_dist() +
theme(aspect.ratio = 1/2, plot.margin=margin(0,0,0,0, "mm"))
# ggsave(filedate("A_rabiei_pathogen_groups_2013-2020_horiz", ext = ".pdf",
# outdir = "../output/", dateformat = FALSE),
# width =10, height = 8)
})
# # Pathogenicity Group frequency plot ####
# yearly_patho_groups <- function(){
# plot_data <- patho_sum %>% mutate(Patho_group=sub("Group", "", Pathotype))
# patho_labs <- plot_data %>% filter(Pathotype=="Group0") %>% mutate(label_pos=1-freq)
# ggplot(plot_data, aes(x=Year, y=freq, fill=Patho_group)) +
# geom_bar(stat = 'identity', width = 0.7) + labs(fill="Pathog. Group") +
# scale_fill_brewer(palette = "Spectral", direction = -1) +
# scale_x_continuous(breaks= as.integer(unique(plot_data$Year))) +
# geom_text(data =patho_labs, aes(x=Year, y=0.94, label=glue::glue("n={pop}")),
# size=5, colour="white") +
# # geom_text(data =patho_labs, aes(x=Year, y=label_pos, label=label), size=4.5, nudge_y = 0.065) +
# labs(y="Frequency") +
# coord_flip() + clean_theme$theme
# }
# output$patho_plot <- renderPlot(height = 400,{
# yearly_patho_groups() +
# theme(aspect.ratio = 1/2, plot.margin=margin(0,0,0,0, "mm"))
# # ggsave(filedate("A_rabiei_pathogen_groups_2013-2020_horiz", ext = ".pdf",
# # outdir = "../output/", dateformat = FALSE),
# # width =10, height = 8)
# })
#
# # Create the button to download the scatterplot as PDF
# output$downloadPlot2 <- downloadHandler(
# filename = function(plot_format=input$plot2format) {
# glue::glue('A_rabiei_yearly_PG_frequency_{Sys.Date()}.{str_to_lower(plot_format)}')
# },
# content = function(file, plot_width = input$plot2Width, plot_height = input$plot2Height) {
# ggsave(file, yearly_patho_groups() +
# theme(plot.margin=margin(t = 12, r=6, b=6, l=6, unit = "pt")),
# width = plot_width, height = plot_height,
# dpi = 300, units = "in")
# }
# )
} | /server.R | no_license | IdoBar/material_dash | R | false | false | 13,332 | r | # TODO
# Map features: remove marker clustering, use filtering of PG from top menu
# Plots features: remove percentages from "Frequency of highly aggressive isolates" plot
function(input, output){
# load("./data/patho_objects.RData")
# load("./data/map_objects.RData") # load map objects created by samples_interactive_map
# Reactive material filtration ####
# react_map_samples <- reactiveValues()
# react_map_samples$samples <- reactive({
# # show_samples <- vector(mode = "list", length = 2L) %>% set_names("samples", "icons")
# show_samples <- map_samples %>%
# filter(between(Path_rating,input$PG[1], input$PG[2]))
# if (input$PGna) {
# show_samples <- map_samples %>%
# filter(is.na(Path_rating)) %>% bind_rows(show_samples)
# }
# # show_samples$icons <- awesomeIcons(
# # icon = 'leaf',
# # iconColor = ifelse(show_samples$samples$Genotyped=="Y",'black','white' ),
# # library = 'ion',
# # markerColor = show_samples$samples$marker_cols)
# return(show_samples)
# })
# react_map_samples$map_icons <- reactive({
# awesomeIcons(
# icon = 'leaf',
# iconColor = ifelse(react_map_samples$samples()$Genotyped=="Y",'black','white' ),
# library = 'ion',
# markerColor = react_map_samples$samples()$marker_cols
# #markerColor = ifelse(is.na(map_samples$Path_rating), 'lightgray', 'white') # make this dependant on the pathogenicity
# )
# })
#
# # isolate_map <- reactive({
# #
# # })
# # print list of input events (very useful for debugging the map/app)
output$inputs <-
renderPrint({reactiveValuesToList(input)})
# # Isolate map ####
# output$isolateMap <- renderLeaflet({
# leaflet(agro_zones_sf) %>%
# addProviderTiles("Esri.WorldImagery", group="Satellite") %>%
# addProviderTiles("Esri.WorldTopoMap", group="Topo") %>%
# addProviderTiles("Esri.WorldPhysical", group="Physical") %>% # causes issues with the labels
# addProviderTiles("Esri.NatGeoWorldMap", group="National Geographic") %>%
# setView(146.5, -30, zoom = 5) %>% # -30.155307082559347, 146.48388609655257 ; central Australia: 133.7751, -27
# addProviderTiles("CartoDB.PositronOnlyLabels", group = "labels") %>%
# addAwesomeMarkers(
# lng = map_samples$lon,
# lat = map_samples$lat,
# popup = map_samples$popup_text,
# icon = icons,
# clusterOptions = markerClusterOptions(), group = "markers"
# ) %>%
# addScaleBar(position = "bottomright",
# options = scaleBarOptions(imperial = FALSE)) %>%
# addEasyButton(easyButton(
# icon = "fa-globe",
# title = "Reset Map Zoom",
# onClick = JS("function(btn, map){ map.setView([-30, 146.5], 5); }")
# )) %>%
# add_agro_layer(.) %>%
# addLayersControl(
# position = "bottomright",
# baseGroups = c("Satellite", "Physical", "Topo", "National Geographic"),
# overlayGroups = c("Agroecozones"), # , "labels"
# # "May Rain",
# # "June Rain",
# # "July Rain",
# # "August Rain"),
# options = layersControlOptions(collapsed = TRUE)) %>%
# hideGroup("Agroecozones")
# })
# # observe changes in the map
# # observe({
# # LogMsg("Showing", input$PG, "PG filtering\n")
# # })
# observe({
#
#
# proxy <- leafletProxy("isolateMap")
# if (any(c("Satellite", "Physical") %in% input$isolateMap_groups)){
# # c("Satellite", "Physical") %in% c("Satellite", "labels", "markers")
# proxy %>% showGroup("labels")
# } else {
# proxy %>% hideGroup("labels")
# }
# proxy %>% clearGroup("markers") %>%
# addAwesomeMarkers(
# lng = react_map_samples$samples()$lon,
# lat = react_map_samples$samples()$lat,
# popup = react_map_samples$samples()$popup_text,
# icon = react_map_samples$map_icons(),
# clusterOptions = markerClusterOptions(), group = "markers"
# )
# # Remove any existing legend, and only if the legend is
# # enabled, create a new one.
# # proxy %>% clearShapes() %>% removeTiles("agrozones") #%>% clearMarkers()
# # # proxy %>%
# # # addAwesomeMarkers(
# # # lng = react_map_samples()$lon,
# # # lat = react_map_samples()$lat,
# # # popup = ~ react_map_samples()$popup_text,
# # # icon = proxy_icons,
# # # clusterOptions = markerClusterOptions()
# # # )
# # if ("zones" %in% input$checkmaplayers) {
# # add_agro_layer(proxy)
# # }
# })
# # Zone summary table ####
# output$zoneSummary <- function() {
# zone_summary <- react_map_samples$samples() %>%
# group_by(lat, lon) %>%
# summarise(
# site_samples = n(),
# Genotyped = sum(Genotyped == "Y", na.rm=TRUE),
# Phenotyped = sum(Path_rating>=0, na.rm =TRUE)
# ) %>%
# ungroup() %>%
# inner_join(site_summary %>% dplyr::select(lat,lon,Zone)) %>%
# group_by(Zone) %>%
# summarise(Collected = sum(site_samples, na.rm = TRUE),
# Genotyped = sum(Genotyped, na.rm = TRUE),
# Phenotyped = sum(Phenotyped, na.rm = TRUE)
# ) %>% adorn_totals()
# kableExtra::kbl(zone_summary) %>% # rename(Genotyped = To_Genotype)) %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# font_size = 13) %>%
# row_spec(nrow(zone_summary), bold = T)
#
# }
#
# # Host frequency plot ####
# host_freq_plot <- function(){
# # define labels
# exlabel <- c("ICC3996", bquote("Genesis"^TM~"090")) # , "PBA~HatTrick~(PBR)", "PBA~Seamer~(PBR)")
# PBA_labels <- c("PBA HatTrick\n(PBR)", "PBA Seamer\n(PBR)")
# ggplot(data=host_response_data %>% filter(Diff_rating=="High"),
# aes(x=Year, y=freq, group = Diff_host,
# colour=factor(Diff_host, levels = c("ICC3996", "Genesis090", "HatTrick", "Seamer")))) +
# geom_line(size=1) + labs(y="Percentage of highly aggressive isolates", colour="Host") +
# scale_y_continuous(breaks = breaks_width(0.1),
# labels = label_percent(accuracy = 1, suffix = ""),
# expand = c(0,0), limits = c(0,1)) +
# # plot_theme(baseSize = 20) +
# scale_x_continuous(breaks=unique(host_response_data$Year)) +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year!=2014,
# Year<2018,freq>0),
# aes(label=sprintf("%.1f%%", freq*100)), size=4, nudge_y = 0.015,
# nudge_x = -0.15, colour="black") +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year==2014,freq>0),
# aes(label=sprintf("%.1f%%", freq*100)), size=4, nudge_y = 0.03,
# nudge_x = -0.15, colour="black") +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year==2018),
# aes(label=sprintf("%.1f%%", freq*100)), size=4, nudge_y = -0.015,
# nudge_x = 0.2, colour="black") +
# geom_text(data=host_response_data %>% filter(Diff_rating=="High", Year==2020),
# aes(label=sprintf("%.1f%%", freq*100)),
# size=4, nudge_x = 0.3, colour="black") +
# scale_colour_paletteer_d("ggthemes::excel_Office_2007_2010", name = "",
# labels= c(bquote(.(exlabel)), PBA_labels)) +
# clean_theme$theme +
# theme(axis.title.y = element_text(size = rel(0.85), face = "plain"),
# legend.spacing.y = unit(1,"cm"), panel.grid.major = element_blank())
# }
#
# output$host_freq <- renderPlot(height = 400,host_freq_plot())
# # ggsave(filedate("A_rabiei_highly_aggressive_isolates_2013-2020", ext = ".pdf",
# # outdir = "../output/", dateformat = FALSE), width = 10, height = 6)
#
#
# # Create the button to download the scatterplot as PDF
# output$downloadPlot1 <- downloadHandler(
# filename = function(plot_format=input$plotformat) {
# glue::glue('A_rabiei_isolate_frequency_{Sys.Date()}.{str_to_lower(plot_format)}')
# },
# content = function(file, plot_width = input$plotWidth, plot_height = input$plotHeight) {
# ggsave(file, host_freq_plot() +
# theme(plot.margin=margin(t = 12, r=6, b=6, l=6, unit = "pt")),
# # theme(plot.margin=margin(t = 1, r = 0.5, b = 0.5, l = 0.5, unit = "cm")),
# width = plot_width, height = plot_height,
# dpi = 300, units = "in")
# }
# )
#
# materials table ####
# tab_caption <- glue::glue("<i>Ascochyta rabiei</i> isolates collected as part of the GRDC projects GRI2007-001RTX and UM00052")
# samples %>% dplyr::select(Isolate=ISOLATE_ID, Sample=SAMPLE_ID, STATE, LOCATION, Farm=FARM_ID,
# Host=GENOTYPE) %>% rename_with(stringr::str_to_title) %>%
# left_join(patho_table %>% dplyr::select(-one_of(c("Pathotype", "Rating")))) %>%
PoC_data <- readxl::read_excel("data/PoC_Data.xlsx")
# geodetails <- geocode(PoC_data$Supplier_Location , output = "latlon", messaging=TRUE) %>%
# mutate(Supplier_Location = PoC_data$Supplier_Location) %>%
# write_xlsx(., "data/PoC_Data.xlsx", sheet = "supp_coordinates")
geodetails <- readxl::read_excel("data/PoC_Data.xlsx", sheet = "supp_coordinates")
material_table <- PoC_data %>% group_by(Item_ID, Item_Description) %>%
summarise(Number_of_Suppliers = n(), Mean_Price_AUD = mean(Item_Price_AUD, na.rm=TRUE)) %>%
ungroup()
output$materialDT <- renderDT(server = FALSE,{
DT::datatable(material_table ,
colnames = gsub("_", " ", names(material_table)), rownames = FALSE,
style = 'bootstrap', class = 'table-bordered table-condensed',
extensions = c('Buttons','Scroller'),
#caption = htmltools::HTML(tab_caption),#
options = list(dom = 'ftSiB',
buttons = c('copy', 'csv', 'excel', 'pdf', 'print'),
deferRender = TRUE, scrollY = 500, scroller = TRUE))
})
# material prices plot ####
material_plot_data <- reactive({
# selected_row <- ifelse(is.null(input$materialDT_rows_selected), 1,
# as.vector(input$materialDT_rows_selected))
selected_row <- input$materialDT_rows_selected
selected_material <- material_table$Item_Description[selected_row] #
PoC_data %>% filter(Item_Description %in% selected_material)
})
material_price_dist <- function(){
if (nrow(material_plot_data())==0) return(NULL)
ggplot(material_plot_data(), aes(x = Item_Price_AUD)) + # , fill = Item_Description
geom_histogram(fill = "lightskyblue") + facet_wrap(~Item_Description, ncol = 1) +
labs(y="Count", x="Item Price (AUD)") +
# scale_fill_paletteer_d("awtools::a_palette") +
guides(fill="none") +
clean_theme$theme
# coord_flip() + clean_theme$theme
}
output$price_plot <- renderPlot(height = 300,{
material_price_dist() +
theme(aspect.ratio = 1/2, plot.margin=margin(0,0,0,0, "mm"))
# ggsave(filedate("A_rabiei_pathogen_groups_2013-2020_horiz", ext = ".pdf",
# outdir = "../output/", dateformat = FALSE),
# width =10, height = 8)
})
# # Pathogenicity Group frequency plot ####
# yearly_patho_groups <- function(){
# plot_data <- patho_sum %>% mutate(Patho_group=sub("Group", "", Pathotype))
# patho_labs <- plot_data %>% filter(Pathotype=="Group0") %>% mutate(label_pos=1-freq)
# ggplot(plot_data, aes(x=Year, y=freq, fill=Patho_group)) +
# geom_bar(stat = 'identity', width = 0.7) + labs(fill="Pathog. Group") +
# scale_fill_brewer(palette = "Spectral", direction = -1) +
# scale_x_continuous(breaks= as.integer(unique(plot_data$Year))) +
# geom_text(data =patho_labs, aes(x=Year, y=0.94, label=glue::glue("n={pop}")),
# size=5, colour="white") +
# # geom_text(data =patho_labs, aes(x=Year, y=label_pos, label=label), size=4.5, nudge_y = 0.065) +
# labs(y="Frequency") +
# coord_flip() + clean_theme$theme
# }
# output$patho_plot <- renderPlot(height = 400,{
# yearly_patho_groups() +
# theme(aspect.ratio = 1/2, plot.margin=margin(0,0,0,0, "mm"))
# # ggsave(filedate("A_rabiei_pathogen_groups_2013-2020_horiz", ext = ".pdf",
# # outdir = "../output/", dateformat = FALSE),
# # width =10, height = 8)
# })
#
# # Create the button to download the scatterplot as PDF
# output$downloadPlot2 <- downloadHandler(
# filename = function(plot_format=input$plot2format) {
# glue::glue('A_rabiei_yearly_PG_frequency_{Sys.Date()}.{str_to_lower(plot_format)}')
# },
# content = function(file, plot_width = input$plot2Width, plot_height = input$plot2Height) {
# ggsave(file, yearly_patho_groups() +
# theme(plot.margin=margin(t = 12, r=6, b=6, l=6, unit = "pt")),
# width = plot_width, height = plot_height,
# dpi = 300, units = "in")
# }
# )
} |
library(quhomology)
### Name: up_action
### Title: The up action for a birack or biquandle.
### Aliases: up_action
### ** Examples
## Example for version with function (for a dihedral quandle)
up_action <- function (a, b, k){
result <- (2 * b - a)%%k
return(as.integer(result))
}
##Example for matrix lookup (for commutative quandle over S_3, in which case k = 6)
up_action <- function (a, b, k){
#first define the action matrix
action_matrix <- rbind(c(0,0,0,0,0,0),c(1,1,5,5,2,2),c(2,5,2,1,5,1),
c(3,4,4,3,4,4),c(4,3,3,3,4,3),c(5,2,1,2,1,5))
result <-action_matrix[a + 1, b + 1]
return(as.integer(result))
}
| /data/genthat_extracted_code/quhomology/examples/up_action.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 650 | r | library(quhomology)
### Name: up_action
### Title: The up action for a birack or biquandle.
### Aliases: up_action
### ** Examples
## Example for version with function (for a dihedral quandle)
up_action <- function (a, b, k){
result <- (2 * b - a)%%k
return(as.integer(result))
}
##Example for matrix lookup (for commutative quandle over S_3, in which case k = 6)
up_action <- function (a, b, k){
#first define the action matrix
action_matrix <- rbind(c(0,0,0,0,0,0),c(1,1,5,5,2,2),c(2,5,2,1,5,1),
c(3,4,4,3,4,4),c(4,3,3,3,4,3),c(5,2,1,2,1,5))
result <-action_matrix[a + 1, b + 1]
return(as.integer(result))
}
|
library(CEC)
### Name: mixShapes
### Title: mixShapes
### Aliases: mixShapes
### Keywords: datasets
### ** Examples
data(mixShapes)
plot(mixShapes, cex = 0.5, pch = 19);
| /data/genthat_extracted_code/CEC/examples/mixShapes.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 177 | r | library(CEC)
### Name: mixShapes
### Title: mixShapes
### Aliases: mixShapes
### Keywords: datasets
### ** Examples
data(mixShapes)
plot(mixShapes, cex = 0.5, pch = 19);
|
#考虑过整体设限的风险控制方案,比如说将现有持仓的向量乘以相关性矩阵,然后就可以判断整个持仓有没有超过风险限制,
#但是这么做有一个问题,那就是判断出持仓风险之后无法有效地设定反向建仓的规则,同时,这种办法无法有效地识别“
#同向开仓不能超过12个Unit“的问题。
#一个新的思路是当判定出爆仓之后,从holding的记录上面逐条删除重新判定,也就是说一共有两套方程:
#1. 加仓的时候用循环判定加仓是在范围内的,一旦超出范围,撤销上一个加仓并且转向下一个(暂时不考虑优先加信号强的仓)。
#2. 每回合开始的资金控制,一旦判定不及格就以倒叙的形式减仓,那就是说“standing_contract”的顺序往回减并计入损失。
library(rlist) #list.append用到了
#建仓判定:
enter_date = NA #中转日期
direction = NA #中转合约方向
enter_price = NA #中转入场价
cut_point = NA #中转止损价
no_contract = NA #中转合约数量
#1.生成交易单
long_plan <- sig_long * units #The aggregate plan of how many *contracts(not tons)* should be add
t_position = copy(position) #建立测试仓位用的向量
for (j in 1:length(product_ids)){
if (long_plan[j] == 0) next #节省运算时间,跳过没有买入计划的产品
t_position[j] = floor((t_position[j] + long_plan[j])/units[j]) #计算
#test 1: any direction ,single holding should be less than 4
if (any(abs(t_position) > 4)) {
#test 2: any direction, combination of strong corr assets should be less than 6
}else if (any(abs(t_position %*% corr_mat$clscorr) > 6)){
#test 3: any direction, combination of losely corr assets should be less than 10
}else if (any(abs(t_position %*% corr_mat$lslcorr) > 10)){
#test 4: any direction, total holding should be less than 12
}else if (abs(sum(t_position)) > 12){
}else {
position[j] <- t_position[j]
holding[j] <- holding[j] + long_plan[j]
enter_date <- cdt[[1]][ptr]
direction <- 1L
enter_price <- cdt[[15 + (j-1) * 15]][ptr] + slippage[j]
fee <- fee + enter_price * vm[j] * fee.rate[j]
cut <- enter_price - 2 * cdt[[9+(j-1)*15]][ptr]
contract <- list(enter_date = enter_date,
direction = direction,
enter_price = enter_price,
cut_point = cut,
no_contract = long_plan[j]
)
standing_contract = list.append(standing_contract,contract)
cash <- cash - enter_price - fee
}
}#开仓loop
| /long_algorythm.r | no_license | SidGor/turtle_project | R | false | false | 2,686 | r |
#考虑过整体设限的风险控制方案,比如说将现有持仓的向量乘以相关性矩阵,然后就可以判断整个持仓有没有超过风险限制,
#但是这么做有一个问题,那就是判断出持仓风险之后无法有效地设定反向建仓的规则,同时,这种办法无法有效地识别“
#同向开仓不能超过12个Unit“的问题。
#一个新的思路是当判定出爆仓之后,从holding的记录上面逐条删除重新判定,也就是说一共有两套方程:
#1. 加仓的时候用循环判定加仓是在范围内的,一旦超出范围,撤销上一个加仓并且转向下一个(暂时不考虑优先加信号强的仓)。
#2. 每回合开始的资金控制,一旦判定不及格就以倒叙的形式减仓,那就是说“standing_contract”的顺序往回减并计入损失。
library(rlist) #list.append用到了
#建仓判定:
enter_date = NA #中转日期
direction = NA #中转合约方向
enter_price = NA #中转入场价
cut_point = NA #中转止损价
no_contract = NA #中转合约数量
#1.生成交易单
long_plan <- sig_long * units #The aggregate plan of how many *contracts(not tons)* should be add
t_position = copy(position) #建立测试仓位用的向量
for (j in 1:length(product_ids)){
if (long_plan[j] == 0) next #节省运算时间,跳过没有买入计划的产品
t_position[j] = floor((t_position[j] + long_plan[j])/units[j]) #计算
#test 1: any direction ,single holding should be less than 4
if (any(abs(t_position) > 4)) {
#test 2: any direction, combination of strong corr assets should be less than 6
}else if (any(abs(t_position %*% corr_mat$clscorr) > 6)){
#test 3: any direction, combination of losely corr assets should be less than 10
}else if (any(abs(t_position %*% corr_mat$lslcorr) > 10)){
#test 4: any direction, total holding should be less than 12
}else if (abs(sum(t_position)) > 12){
}else {
position[j] <- t_position[j]
holding[j] <- holding[j] + long_plan[j]
enter_date <- cdt[[1]][ptr]
direction <- 1L
enter_price <- cdt[[15 + (j-1) * 15]][ptr] + slippage[j]
fee <- fee + enter_price * vm[j] * fee.rate[j]
cut <- enter_price - 2 * cdt[[9+(j-1)*15]][ptr]
contract <- list(enter_date = enter_date,
direction = direction,
enter_price = enter_price,
cut_point = cut,
no_contract = long_plan[j]
)
standing_contract = list.append(standing_contract,contract)
cash <- cash - enter_price - fee
}
}#开仓loop
|
## Functions needed to implement the ML approach:
pAfun <- function(EloA, EloB){
1/(1 + exp(EloB - EloA))
}
Elo_pA <- function(EloStart_logk, X, show_k = FALSE){
EloStart <- EloStart_logk[-length(EloStart_logk)]
EloStart <- EloStart - mean(EloStart)
k <- exp(EloStart_logk[length(EloStart_logk)])
if (show_k) {
cat(paste0(round(k, 3), paste(rep(" ", 20), collapse = " ")))
cat("\r")
}
EloNow <- EloStart
Elo <- matrix(nrow = nrow(X), ncol = length(EloStart), 0)
colnames(Elo) <- colnames(X)
pA <- rep(0, nrow(X))
for (i in 1:nrow(X)) {
A <- which(X[i, ] == 1)
B <- which(X[i, ] == -1)
pA[i] <- pAfun(EloA = EloNow[A], EloB = EloNow[B])
toAdd <- (1 - pA[i]) * k
EloNow[A] <- EloNow[A] + toAdd
EloNow[B] <- EloNow[B] - toAdd
Elo[i, ] <- EloNow
}
return(list(pA = pA, Elo = Elo))
}
logLik <- function(EloStart_logk, X, show_k = FALSE){
pA <- Elo_pA(EloStart_logk = EloStart_logk, X = X, show_k = show_k)$pA
return(-sum(log(pA)))
}
logLik_model1 <- function(logk, X, show_k = FALSE){
pA <- Elo_pA(EloStart_logk = c(rep(0, ncol(X)), logk), X = X, show_k = show_k)$pA
return(-sum(log(pA)))
}
pAfun_001factor <- function(EloA, EloB){
1/(1 + exp(0.01*(EloB - EloA)))
}
Elo_pA_001factor <- function(EloStart_logk, X, show_k = FALSE){
EloStart <- EloStart_logk[-length(EloStart_logk)]
EloStart <- EloStart - mean(EloStart)
k <- exp(EloStart_logk[length(EloStart_logk)])
if (show_k) {
cat(paste0(round(k, 3), paste(rep(" ", 20), collapse = " ")))
cat("\r")
}
EloNow <- EloStart
Elo <- matrix(nrow = nrow(X), ncol = length(EloStart), 0)
colnames(Elo) <- colnames(X)
pA <- rep(0, nrow(X))
for (i in 1:nrow(X)) {
A <- which(X[i, ] == 1)
B <- which(X[i, ] == -1)
pA[i] <- pAfun_001factor(EloA = EloNow[A], EloB = EloNow[B])
toAdd <- (1 - pA[i]) * k
EloNow[A] <- EloNow[A] + toAdd
EloNow[B] <- EloNow[B] - toAdd
Elo[i, ] <- EloNow
}
return(list(pA = pA, Elo = Elo))
}
logLik_001factor <- function(EloStart_logk, X, show_k = FALSE){
pA <- Elo_pA_001factor(EloStart_logk = EloStart_logk, X = X, show_k = show_k)$pA
return(-sum(log(pA)))
} | /00a_functions_ml_approach_20180625.R | no_license | holgersr/Bayesian-inference-and-simulation-of-Elo-scores-in-analysis-of-social-behaviour | R | false | false | 2,220 | r | ## Functions needed to implement the ML approach:
pAfun <- function(EloA, EloB){
1/(1 + exp(EloB - EloA))
}
Elo_pA <- function(EloStart_logk, X, show_k = FALSE){
EloStart <- EloStart_logk[-length(EloStart_logk)]
EloStart <- EloStart - mean(EloStart)
k <- exp(EloStart_logk[length(EloStart_logk)])
if (show_k) {
cat(paste0(round(k, 3), paste(rep(" ", 20), collapse = " ")))
cat("\r")
}
EloNow <- EloStart
Elo <- matrix(nrow = nrow(X), ncol = length(EloStart), 0)
colnames(Elo) <- colnames(X)
pA <- rep(0, nrow(X))
for (i in 1:nrow(X)) {
A <- which(X[i, ] == 1)
B <- which(X[i, ] == -1)
pA[i] <- pAfun(EloA = EloNow[A], EloB = EloNow[B])
toAdd <- (1 - pA[i]) * k
EloNow[A] <- EloNow[A] + toAdd
EloNow[B] <- EloNow[B] - toAdd
Elo[i, ] <- EloNow
}
return(list(pA = pA, Elo = Elo))
}
logLik <- function(EloStart_logk, X, show_k = FALSE){
pA <- Elo_pA(EloStart_logk = EloStart_logk, X = X, show_k = show_k)$pA
return(-sum(log(pA)))
}
logLik_model1 <- function(logk, X, show_k = FALSE){
pA <- Elo_pA(EloStart_logk = c(rep(0, ncol(X)), logk), X = X, show_k = show_k)$pA
return(-sum(log(pA)))
}
pAfun_001factor <- function(EloA, EloB){
1/(1 + exp(0.01*(EloB - EloA)))
}
Elo_pA_001factor <- function(EloStart_logk, X, show_k = FALSE){
EloStart <- EloStart_logk[-length(EloStart_logk)]
EloStart <- EloStart - mean(EloStart)
k <- exp(EloStart_logk[length(EloStart_logk)])
if (show_k) {
cat(paste0(round(k, 3), paste(rep(" ", 20), collapse = " ")))
cat("\r")
}
EloNow <- EloStart
Elo <- matrix(nrow = nrow(X), ncol = length(EloStart), 0)
colnames(Elo) <- colnames(X)
pA <- rep(0, nrow(X))
for (i in 1:nrow(X)) {
A <- which(X[i, ] == 1)
B <- which(X[i, ] == -1)
pA[i] <- pAfun_001factor(EloA = EloNow[A], EloB = EloNow[B])
toAdd <- (1 - pA[i]) * k
EloNow[A] <- EloNow[A] + toAdd
EloNow[B] <- EloNow[B] - toAdd
Elo[i, ] <- EloNow
}
return(list(pA = pA, Elo = Elo))
}
logLik_001factor <- function(EloStart_logk, X, show_k = FALSE){
pA <- Elo_pA_001factor(EloStart_logk = EloStart_logk, X = X, show_k = show_k)$pA
return(-sum(log(pA)))
} |
# plot4.R
# assumes you have run cleanData.R first
home.path <- "~/Dropbox/Coursera/ExploratoryDataAnalysis/assignment1/ExData_Plotting1"
setwd(home.path)
tidydata.path <- "~/Dropbox/Coursera/ExploratoryDataAnalysis/assignment1/ExData_Plotting1/tidydata"
dataset1 <- readRDS(file.path(tidydata.path, "tidydata_assign1.rds"))
png(file = "plot4.png")
# set general plotting parameters
par(mfcol=(c(2, 2)))
#plot 1
with(dataset1, plot(datetime, global_active_power, ylab="Global Active Power (kilowatts)", xlab="", main="", type="n", xaxt="n"))
with(dataset1, lines(datetime, global_active_power))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
#plot2
with(dataset1, plot(datetime, sub_metering_1, ylab="Energy sub metering", xlab="", main="", type="n", xaxt="n"))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
with(dataset1, lines(datetime, sub_metering_1, col="black", lwd=1))
with(dataset1, lines(datetime, sub_metering_2, col="red", lwd=1))
with(dataset1, lines(datetime, sub_metering_3, col="blue", lwd=1))
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c("solid", "solid", "solid"), lwd=c(2, 2, 2), col=c("black", "red", "blue"), xjust=0, yjust=0, cex=0.8)
#plot3
with(dataset1, plot(datetime, voltage, ylab="Voltage", xlab="datetime", main="", type="n", xaxt="n"))
with(dataset1, lines(datetime, voltage))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
#plot4
with(dataset1, plot(datetime, global_reactive_power, ylab="Global_reactive_power", xlab="datetime", main="", type="n", xaxt="n"))
with(dataset1, lines(datetime, global_reactive_power))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
dev.off() | /plot4.R | no_license | pslai/ExData_Plotting1 | R | false | false | 2,168 | r | # plot4.R
# assumes you have run cleanData.R first
home.path <- "~/Dropbox/Coursera/ExploratoryDataAnalysis/assignment1/ExData_Plotting1"
setwd(home.path)
tidydata.path <- "~/Dropbox/Coursera/ExploratoryDataAnalysis/assignment1/ExData_Plotting1/tidydata"
dataset1 <- readRDS(file.path(tidydata.path, "tidydata_assign1.rds"))
png(file = "plot4.png")
# set general plotting parameters
par(mfcol=(c(2, 2)))
#plot 1
with(dataset1, plot(datetime, global_active_power, ylab="Global Active Power (kilowatts)", xlab="", main="", type="n", xaxt="n"))
with(dataset1, lines(datetime, global_active_power))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
#plot2
with(dataset1, plot(datetime, sub_metering_1, ylab="Energy sub metering", xlab="", main="", type="n", xaxt="n"))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
with(dataset1, lines(datetime, sub_metering_1, col="black", lwd=1))
with(dataset1, lines(datetime, sub_metering_2, col="red", lwd=1))
with(dataset1, lines(datetime, sub_metering_3, col="blue", lwd=1))
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c("solid", "solid", "solid"), lwd=c(2, 2, 2), col=c("black", "red", "blue"), xjust=0, yjust=0, cex=0.8)
#plot3
with(dataset1, plot(datetime, voltage, ylab="Voltage", xlab="datetime", main="", type="n", xaxt="n"))
with(dataset1, lines(datetime, voltage))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
#plot4
with(dataset1, plot(datetime, global_reactive_power, ylab="Global_reactive_power", xlab="datetime", main="", type="n", xaxt="n"))
with(dataset1, lines(datetime, global_reactive_power))
r <- as.POSIXct(c("2007-02-01 00:00:00", "2007-02-03 00:00:00"), format="%Y-%m-%d %H:%M:%S", tz="")
axis.POSIXct(1, at = seq(r[1], r[2], by = "day"), format = "%a")
dev.off() |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv = NULL
set = function(y) {
x <<- y
inv <<- NULL
}
get = function() x
setinv = function(inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
inv = x$getinv()
if (!is.null(inv)){
return(inv)
}
mat.data = x$get()
inv = solve(mat.data, ...)
x$setinv(inv)
return(inv)
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | drbeleza/ProgrammingAssignment2 | R | false | false | 695 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv = NULL
set = function(y) {
x <<- y
inv <<- NULL
}
get = function() x
setinv = function(inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
inv = x$getinv()
if (!is.null(inv)){
return(inv)
}
mat.data = x$get()
inv = solve(mat.data, ...)
x$setinv(inv)
return(inv)
## Return a matrix that is the inverse of 'x'
}
|
# install rmarkdown
if(!("rmarkdown") %in% installed.packages()) install.packages("rmarkdown")
# loading library rmarkdown
library(rmarkdown)
if(!('tinytex') %in% installed.packages()) install.packages('tinytex')
library(tinytex)
tinytex::install_tinytex()
# install tinytex
install_tinytex(force = FALSE,
dir = "auto",
repository = "ctan",
extra_packages = NULL,
add_path = TRUE)
tinytex_root()
# [1] "C:\\projetos\\project-rmarkdown\\auto"
use_tinytex(from = "C:\\projetos\\project-rmarkdown\\auto")
#Use of uninitialized value in bitwise or (|) at C:\projetos\project-rmarkdown\auto\texmf-dist\scripts\texlive\tlmgr.pl line 1482.
#Restart R and your editor and check if tinytex::tinytex_root() points to C:\projetos\project-rmarkdown\auto
| /example/Pesquisa Reprodutível/code/instalando.R | no_license | Uemerson/data-science-final-work | R | false | false | 835 | r | # install rmarkdown
if(!("rmarkdown") %in% installed.packages()) install.packages("rmarkdown")
# loading library rmarkdown
library(rmarkdown)
if(!('tinytex') %in% installed.packages()) install.packages('tinytex')
library(tinytex)
tinytex::install_tinytex()
# install tinytex
install_tinytex(force = FALSE,
dir = "auto",
repository = "ctan",
extra_packages = NULL,
add_path = TRUE)
tinytex_root()
# [1] "C:\\projetos\\project-rmarkdown\\auto"
use_tinytex(from = "C:\\projetos\\project-rmarkdown\\auto")
#Use of uninitialized value in bitwise or (|) at C:\projetos\project-rmarkdown\auto\texmf-dist\scripts\texlive\tlmgr.pl line 1482.
#Restart R and your editor and check if tinytex::tinytex_root() points to C:\projetos\project-rmarkdown\auto
|
source("HTTPRequests/authenticate.R")
token<-authenticate("root", "x", aServer, aPort)
urlEndpoint <- "http://10.12.51.90:3000/reprisk_db_single_case/list.json"
requestJSON <-
'
{
"pageSize": 100,
"attributes": [
"id",
"classification.policy_name",
"entry_date"
],
"filters": [
{
"is": {
"case_code": "TST"
}
}
],
"attributeNames": true
}
'
h <- basicTextGatherer()
HTTPState <- curlPerform(
url=urlEndpoint,
httpheader=c(paste("x-authentication: ", token, sep=""), "Content-Type: application/json;charset=UTF-8"),
postfields=requestJSON ,sep=""),
verbose=TRUE ,
writefunction = h$update)
JSONResponse <- h$value()
h$reset() | /trunk/R/trunk/testRequest.R | no_license | kupec-martin/reprisk | R | false | false | 712 | r | source("HTTPRequests/authenticate.R")
token<-authenticate("root", "x", aServer, aPort)
urlEndpoint <- "http://10.12.51.90:3000/reprisk_db_single_case/list.json"
requestJSON <-
'
{
"pageSize": 100,
"attributes": [
"id",
"classification.policy_name",
"entry_date"
],
"filters": [
{
"is": {
"case_code": "TST"
}
}
],
"attributeNames": true
}
'
h <- basicTextGatherer()
HTTPState <- curlPerform(
url=urlEndpoint,
httpheader=c(paste("x-authentication: ", token, sep=""), "Content-Type: application/json;charset=UTF-8"),
postfields=requestJSON ,sep=""),
verbose=TRUE ,
writefunction = h$update)
JSONResponse <- h$value()
h$reset() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtex.R
\name{getDrugNames}
\alias{getDrugNames}
\title{\code{getDrugNames} Identifies drugs in a}
\usage{
getDrugNames(drug_ids)
}
\value{
}
\description{
\code{getDrugNames} Identifies drugs in a
}
| /man/getDrugNames.Rd | permissive | Sage-Bionetworks/dten | R | false | true | 278 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtex.R
\name{getDrugNames}
\alias{getDrugNames}
\title{\code{getDrugNames} Identifies drugs in a}
\usage{
getDrugNames(drug_ids)
}
\value{
}
\description{
\code{getDrugNames} Identifies drugs in a
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.25,family="gaussian",standardize=FALSE)
sink('./large_intestine_038.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/large_intestine/large_intestine_038.R | no_license | esbgkannan/QSMART | R | false | false | 363 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/large_intestine.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.25,family="gaussian",standardize=FALSE)
sink('./large_intestine_038.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
feature.use <- function(urerf){
fUse <- integer()
for(i in 1:length(urerf$forest)){
for(j in 1:length(urerf$forest[[i]]$matA)){
if(!is.null(urerf$forest[[i]]$matA[[j]])){
numFeatures <- length(urerf$forest[[i]]$matA[[j]])/2
for(feature in 0:(numFeatures-1)){
if(is.na(fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]])){
fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]] <- 0
}
fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]] <- fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]] +1
}
}
}
}
print("Feature Use")
print (fUse)
}
| /hw/feature_use.R | no_license | Percimorphism/URerF_experiment | R | false | false | 613 | r | feature.use <- function(urerf){
fUse <- integer()
for(i in 1:length(urerf$forest)){
for(j in 1:length(urerf$forest[[i]]$matA)){
if(!is.null(urerf$forest[[i]]$matA[[j]])){
numFeatures <- length(urerf$forest[[i]]$matA[[j]])/2
for(feature in 0:(numFeatures-1)){
if(is.na(fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]])){
fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]] <- 0
}
fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]] <- fUse[urerf$forest[[i]]$matA[[j]][feature*2+1]] +1
}
}
}
}
print("Feature Use")
print (fUse)
}
|
# install.packages("rchess")
# Or if you want to be risky you can install the latest development version from github with:
# devtools::install_github("jbkunst/rchess")
library(rchess)
library(dplyr)
# FIDE World Cups data
data(chesswc)
count(chesswc, event) # counts number of events in each World Cup
# Basic Usage
chss <- Chess$new()
# Check the legal next moves:
chss$moves()
# A tibble: 20 x 6
# (moves available in this position with description of pieces and squares)
chss$moves(verbose = TRUE)
# Make a move:
chss$move("a3")
# We can concate some moves (and a captures)
chss$move("e5")$move("f4")$move("Qe7")$move("fxe5")
plot(chss)
# Or a ggplot2 version (I know, I need to change the chess pieces symbols in unicode; maybe use a chess typeface)
# plot(chss, type = "ggplot")
# There are function to get information of actual position:
chss$turn() # whose turn is it now
chss$square_color("h1")
chss$get("e5") # get color of square and piece which stands there
chss$history(verbose = TRUE) # A tibble: 5 x 8
chss$history()
chss$undo() # back to 1 turn before
chss$history()
chss$fen() # Forsyth–Edwards Notation (FEN) is a standard notation for
# describing a particular board position of a chess game.
# The purpose of FEN is to provide all the necessary information to
# restart a game from a particular position.
# You can edit the header
chss$header("White", "You")
chss$header("WhiteElo", 1800)
chss$header("Black", "Me")
chss$header("Date", Sys.Date())
chss$header("Site", "This R session")
# Get the header
chss$get_header()
# And get the pgn
cat(chss$pgn())
# Or plot the board in ascii format:
chss$ascii()
# Load PGN and FEN
# FEN
chssfen <- Chess$new()
fen <- "rnbqkbnr/pp1ppppp/8/2p5/4P3/8/PPPP1PPP/RNBQKBNR w KQkq c6 0 2"
chssfen$load(fen)
plot(chssfen)
# PGN
pgn <- system.file("extdata/pgn/kasparov_vs_topalov.pgn", package = "rchess")
pgn <- readLines(pgn, warn = FALSE)
pgn <- paste(pgn, collapse = "\n")
cat(pgn)
chsspgn <- Chess$new()
chsspgn$load_pgn(pgn)
chsspgn$history()
chsspgn$history(verbose = TRUE) # A tibble: 87 x 8
plot(chsspgn)
# Validation Functions
# State validation
chss2 <- Chess$new("rnb1kbnr/pppp1ppp/8/4p3/5PPq/8/PPPPP2P/RNBQKBNR w KQkq - 1 3")
plot(chss2)
chss2$in_check()
chss2$in_checkmate()
# Slatemate validation
chss3 <- Chess$new("4k3/4P3/4K3/8/8/8/8/8 b - - 0 78")
plot(chss3)
chss3$in_stalemate()
# Three fold repetition
chss4 <- Chess$new("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
chss4$in_threefold_repetition() # FALSE
chss4$move('Nf3')$move('Nf6')$move('Ng1')$move('Ng8')
chss4$in_threefold_repetition() # FALSE
chss4$move('Nf3')$move('Nf6')$move('Ng1')$move('Ng8')
chss4$in_threefold_repetition() # TRUE
chss4$history()
# Insufficient material
chss5 <- Chess$new("k7/8/n7/8/8/8/8/7K b - - 0 1")
plot(chss5)
chss5$insufficient_material()
# Helpers Functions
# There some helper function to get more information
# History Detail
# This functions is a detailed version from the history(verbose = TRUE).
chsspgn$history_detail()
# Plot a boad via ggplot
# You can plot a specific fen vía ggplot:
# ggchessboard(chsspgn$fen())
# Auxiliar Data
# There function to retrieve some data which is easier to plot:
# Pieces
rchess:::.chesspiecedata()
# Board
rchess:::.chessboarddata()
# Data
# The package have two data sets:
# FIDE World cups
data("chesswc")
str(chesswc)
head(chesswc)
# More Details
# Under the hood
# This package is mainly a wrapper of chessjs by jhlywa.
#
# The main parts in this package are:
#
# V8 package and chessjs javascript library.
# R6 package for the OO system.
# htmlwidget package and chessboardjs javascript library.
#
# Thanks to the creators and maintainers of these packages and libraries.
#
# Session Info
print(sessionInfo())
| /SGH/Big data/Project/chess/chess.R | no_license | Valkoiset/myrepo | R | false | false | 3,865 | r |
# install.packages("rchess")
# Or if you want to be risky you can install the latest development version from github with:
# devtools::install_github("jbkunst/rchess")
library(rchess)
library(dplyr)
# FIDE World Cups data
data(chesswc)
count(chesswc, event) # counts number of events in each World Cup
# Basic Usage
chss <- Chess$new()
# Check the legal next moves:
chss$moves()
# A tibble: 20 x 6
# (moves available in this position with description of pieces and squares)
chss$moves(verbose = TRUE)
# Make a move:
chss$move("a3")
# We can concate some moves (and a captures)
chss$move("e5")$move("f4")$move("Qe7")$move("fxe5")
plot(chss)
# Or a ggplot2 version (I know, I need to change the chess pieces symbols in unicode; maybe use a chess typeface)
# plot(chss, type = "ggplot")
# There are function to get information of actual position:
chss$turn() # whose turn is it now
chss$square_color("h1")
chss$get("e5") # get color of square and piece which stands there
chss$history(verbose = TRUE) # A tibble: 5 x 8
chss$history()
chss$undo() # back to 1 turn before
chss$history()
chss$fen() # Forsyth–Edwards Notation (FEN) is a standard notation for
# describing a particular board position of a chess game.
# The purpose of FEN is to provide all the necessary information to
# restart a game from a particular position.
# You can edit the header
chss$header("White", "You")
chss$header("WhiteElo", 1800)
chss$header("Black", "Me")
chss$header("Date", Sys.Date())
chss$header("Site", "This R session")
# Get the header
chss$get_header()
# And get the pgn
cat(chss$pgn())
# Or plot the board in ascii format:
chss$ascii()
# Load PGN and FEN
# FEN
chssfen <- Chess$new()
fen <- "rnbqkbnr/pp1ppppp/8/2p5/4P3/8/PPPP1PPP/RNBQKBNR w KQkq c6 0 2"
chssfen$load(fen)
plot(chssfen)
# PGN
pgn <- system.file("extdata/pgn/kasparov_vs_topalov.pgn", package = "rchess")
pgn <- readLines(pgn, warn = FALSE)
pgn <- paste(pgn, collapse = "\n")
cat(pgn)
chsspgn <- Chess$new()
chsspgn$load_pgn(pgn)
chsspgn$history()
chsspgn$history(verbose = TRUE) # A tibble: 87 x 8
plot(chsspgn)
# Validation Functions
# State validation
chss2 <- Chess$new("rnb1kbnr/pppp1ppp/8/4p3/5PPq/8/PPPPP2P/RNBQKBNR w KQkq - 1 3")
plot(chss2)
chss2$in_check()
chss2$in_checkmate()
# Slatemate validation
chss3 <- Chess$new("4k3/4P3/4K3/8/8/8/8/8 b - - 0 78")
plot(chss3)
chss3$in_stalemate()
# Three fold repetition
chss4 <- Chess$new("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
chss4$in_threefold_repetition() # FALSE
chss4$move('Nf3')$move('Nf6')$move('Ng1')$move('Ng8')
chss4$in_threefold_repetition() # FALSE
chss4$move('Nf3')$move('Nf6')$move('Ng1')$move('Ng8')
chss4$in_threefold_repetition() # TRUE
chss4$history()
# Insufficient material
chss5 <- Chess$new("k7/8/n7/8/8/8/8/7K b - - 0 1")
plot(chss5)
chss5$insufficient_material()
# Helpers Functions
# There some helper function to get more information
# History Detail
# This functions is a detailed version from the history(verbose = TRUE).
chsspgn$history_detail()
# Plot a boad via ggplot
# You can plot a specific fen vía ggplot:
# ggchessboard(chsspgn$fen())
# Auxiliar Data
# There function to retrieve some data which is easier to plot:
# Pieces
rchess:::.chesspiecedata()
# Board
rchess:::.chessboarddata()
# Data
# The package have two data sets:
# FIDE World cups
data("chesswc")
str(chesswc)
head(chesswc)
# More Details
# Under the hood
# This package is mainly a wrapper of chessjs by jhlywa.
#
# The main parts in this package are:
#
# V8 package and chessjs javascript library.
# R6 package for the OO system.
# htmlwidget package and chessboardjs javascript library.
#
# Thanks to the creators and maintainers of these packages and libraries.
#
# Session Info
print(sessionInfo())
|
# Some previous experimentation
# ------------------------------------------------------------
# Normalisierung
normalize <- function(x){
(x - min(x)) / (max(x) - min(x))
}
df <- iris[1:4]
apply(df, 2, normalize)
# Hiearchical Clustering
# Hierarchisches Cluster erzeugen
cluster <- hclust(dist(scale(mtcars)),
method = "complete")
# Plotten
plot(cluster)
# Gruppen zeichnen
rect.hclust(cluster, 5)
#ClusterNr zum Dateframe hinzufügen
data.frame(mtcars, cutree(cluster, 5))
# k-means Clustering
set.seed(1234)
df <- iris[3:4]
# Normalisieren
df_norm <- scale(df)
# Cluster bilden (Anzahl Clusters!)
clusters <- kmeans(df_norm, 3)
# Plot
plot(df, col=clusters$cluster + 1, pch=20)
# Clustering in Action
# -------------------------------------------------------------------------
data(nutrient, package="flexclust")
head(nutrient)
# Distanzen bestimmen
dists <- dist(nutrient)
as.matrix(dists)[1:3, 1:3]
row.names(nutrient) <- tolower(row.names(nutrient))
nutrient_scaled <- scale(nutrient)
nutrient_dist <- dist(nutrient_scaled)
# Hiearchical cluster analysis
# --------------------------------------------
nutrient_average <- hclust(nutrient_dist, method = "average")
plot(nutrient_average, hang=-1, cex=.8)
# Determine best numbers of clusters
library(NbClust)
nc <- NbClust(nutrient_scaled, distance="euclidean",
min.nc=2, max.nc=15, method="average")
barplot(table(nc$Best.nc[1,]),
xlab="Number of clusters", ylab="Number of criteria",
main="Number of Clusters chosen by 26 Criteria")
# Clustering with recomended number of clusters
clusters <- cutree(nutrient_average, k=5)
# look at the clusters with the means
aggregate(nutrient, by=list(cluster=clusters), median)
# plot cluster in tree
plot(nutrient_average, hang=-1, cex=.8)
rect.hclust(nutrient_average, k=5)
# Partitioning cluster analysis
# --------------------------------------------
# k-means clustering
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1) * sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)
}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab = "Within groups sum of squares")
}
data(wine, package = "rattle")
head(wine)
# Standardize the data
df <- scale(wine[-1])
# Determine number of clusters
wssplot(df)
library(NbClust)
set.seed(1234)
devAskNewPage(ask=T)
nc <- NbClust(df, min.nc = 2, max.nc = 15, method="kmeans")
# * According to the majority rule, the best number of clusters is 3
table(nc$Best.n[1,])
dev.off()
barplot(table(nc$Best.n[1,]),
xlab="Number of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
# Perform k-means analysis
set.seed(1234)
fit_km <- kmeans(df, 3, nstart = 25)
fit_km$size
# Show centers
fit_km$centers
aggregate(wine[-1], by=list(cluster=fit_km$cluster), mean)
# We verify our solution to the given type
ct_km <- table(wine$Type, fit_km$cluster)
ct_km
# -> looks pretty good
# Verify with adjusted Rand index
library(flexclust)
randIndex(ct_km)
# ARI
# 0.897495
# -> Not bad :)
# Clustering with medoids
# ----------------------------------------------
library(cluster)
set.seed(1234)
fit_pam <- pam(wine[-1], k=3, stand=T)
fit_pam$medoids
clusplot(fit_pam, main="Bivariat Clustering Plot")
# Clustering might be too good
library(fMultivar)
set.seed(1234)
df <- rnorm2d(500, rho=.5)
df <- as.data.frame(df)
plot(df, main="Bivariate Normal Distribution with rho=.5")
# Try to find some clusters where none exists
wssplot(df)
library(NbClust)
nc <- NbClust(df, min.nc = 2, max.nc = 15, method="kmeans")
dev.new()
barplot(table(nc$Best.n[1,]),
xlab="Number of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
library(ggplot2)
library(cluster)
fit <- pam(df, k=2)
df$clustering <- factor(fit$clustering)
ggplot(data=df, aes(x=V1, y=V2, color=clustering, shape=clustering)) +
geom_point() +
ggtitle("Clustering of Bivariate Normal Data")
| /CAS Datenanalyse/Data Mining/clustering.R | no_license | huli/mas-data-science | R | false | false | 4,245 | r |
# Some previous experimentation
# ------------------------------------------------------------
# Normalisierung
normalize <- function(x){
(x - min(x)) / (max(x) - min(x))
}
df <- iris[1:4]
apply(df, 2, normalize)
# Hiearchical Clustering
# Hierarchisches Cluster erzeugen
cluster <- hclust(dist(scale(mtcars)),
method = "complete")
# Plotten
plot(cluster)
# Gruppen zeichnen
rect.hclust(cluster, 5)
#ClusterNr zum Dateframe hinzufügen
data.frame(mtcars, cutree(cluster, 5))
# k-means Clustering
set.seed(1234)
df <- iris[3:4]
# Normalisieren
df_norm <- scale(df)
# Cluster bilden (Anzahl Clusters!)
clusters <- kmeans(df_norm, 3)
# Plot
plot(df, col=clusters$cluster + 1, pch=20)
# Clustering in Action
# -------------------------------------------------------------------------
data(nutrient, package="flexclust")
head(nutrient)
# Distanzen bestimmen
dists <- dist(nutrient)
as.matrix(dists)[1:3, 1:3]
row.names(nutrient) <- tolower(row.names(nutrient))
nutrient_scaled <- scale(nutrient)
nutrient_dist <- dist(nutrient_scaled)
# Hiearchical cluster analysis
# --------------------------------------------
nutrient_average <- hclust(nutrient_dist, method = "average")
plot(nutrient_average, hang=-1, cex=.8)
# Determine best numbers of clusters
library(NbClust)
nc <- NbClust(nutrient_scaled, distance="euclidean",
min.nc=2, max.nc=15, method="average")
barplot(table(nc$Best.nc[1,]),
xlab="Number of clusters", ylab="Number of criteria",
main="Number of Clusters chosen by 26 Criteria")
# Clustering with recomended number of clusters
clusters <- cutree(nutrient_average, k=5)
# look at the clusters with the means
aggregate(nutrient, by=list(cluster=clusters), median)
# plot cluster in tree
plot(nutrient_average, hang=-1, cex=.8)
rect.hclust(nutrient_average, k=5)
# Partitioning cluster analysis
# --------------------------------------------
# k-means clustering
wssplot <- function(data, nc=15, seed=1234){
wss <- (nrow(data)-1) * sum(apply(data,2,var))
for (i in 2:nc){
set.seed(seed)
wss[i] <- sum(kmeans(data, centers=i)$withinss)
}
plot(1:nc, wss, type="b", xlab="Number of Clusters",
ylab = "Within groups sum of squares")
}
data(wine, package = "rattle")
head(wine)
# Standardize the data
df <- scale(wine[-1])
# Determine number of clusters
wssplot(df)
library(NbClust)
set.seed(1234)
devAskNewPage(ask=T)
nc <- NbClust(df, min.nc = 2, max.nc = 15, method="kmeans")
# * According to the majority rule, the best number of clusters is 3
table(nc$Best.n[1,])
dev.off()
barplot(table(nc$Best.n[1,]),
xlab="Number of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
# Perform k-means analysis
set.seed(1234)
fit_km <- kmeans(df, 3, nstart = 25)
fit_km$size
# Show centers
fit_km$centers
aggregate(wine[-1], by=list(cluster=fit_km$cluster), mean)
# We verify our solution to the given type
ct_km <- table(wine$Type, fit_km$cluster)
ct_km
# -> looks pretty good
# Verify with adjusted Rand index
library(flexclust)
randIndex(ct_km)
# ARI
# 0.897495
# -> Not bad :)
# Clustering with medoids
# ----------------------------------------------
library(cluster)
set.seed(1234)
fit_pam <- pam(wine[-1], k=3, stand=T)
fit_pam$medoids
clusplot(fit_pam, main="Bivariat Clustering Plot")
# Clustering might be too good
library(fMultivar)
set.seed(1234)
df <- rnorm2d(500, rho=.5)
df <- as.data.frame(df)
plot(df, main="Bivariate Normal Distribution with rho=.5")
# Try to find some clusters where none exists
wssplot(df)
library(NbClust)
nc <- NbClust(df, min.nc = 2, max.nc = 15, method="kmeans")
dev.new()
barplot(table(nc$Best.n[1,]),
xlab="Number of Clusters", ylab="Number of Criteria",
main="Number of Clusters Chosen by 26 Criteria")
library(ggplot2)
library(cluster)
fit <- pam(df, k=2)
df$clustering <- factor(fit$clustering)
ggplot(data=df, aes(x=V1, y=V2, color=clustering, shape=clustering)) +
geom_point() +
ggtitle("Clustering of Bivariate Normal Data")
|
#calculate rolling 3 month CC values from period values
library(data.table)
#set path
data_dir <- "O:/CoOp/CoOp194_PROReportng&OM/Julie"
#load data
ce <- fread(paste0(data_dir,"/DC_CC_daypartcheck_st101.csv"))
ce <- setorder(ce,FSCL_YR_NUM,FSCL_PER_IN_YR_NUM,DAY_PART)
ce <- ce[FSCL_YR_NUM==2018|FSCL_YR_NUM==2017|(FSCL_YR_NUM==2016&FSCL_PER_IN_YR_NUM>=9)]
#lag twice to get rolling 3
ce[, lag_TB :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="TOTAL_TB"]
ce[, lag_RSPNS :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="TOTAL_RSPNS"]
ce[, lag2_TB :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="lag_TB"]
ce[, lag2_RSPNS :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="lag_RSPNS"]
#sum together
ce[, R3MTB := rowSums(.SD, na.rm = TRUE), .SDcols=c("TOTAL_TB","lag_TB","lag2_TB")]
ce[, R3MRSPNS := rowSums(.SD, na.rm = TRUE), .SDcols=c("TOTAL_RSPNS","lag_RSPNS","lag2_RSPNS")]
#calculate TB
ce[, R3M_CC := round(R3MTB/R3MRSPNS,3)]
ce2 <- ce[, .(DAY_PART,FSCL_YR_NUM,FSCL_PER_IN_YR_NUM,R3MTB,R3MRSPNS,R3M_CC)]
write.csv(ce2,paste0(data_dir,"/DC_CC_daypartcheck_st101_results.csv")) | /Rolling3month_cc_code.R | no_license | jumorris2017/R-code-flatfiles | R | false | false | 1,152 | r | #calculate rolling 3 month CC values from period values
library(data.table)
#set path
data_dir <- "O:/CoOp/CoOp194_PROReportng&OM/Julie"
#load data
ce <- fread(paste0(data_dir,"/DC_CC_daypartcheck_st101.csv"))
ce <- setorder(ce,FSCL_YR_NUM,FSCL_PER_IN_YR_NUM,DAY_PART)
ce <- ce[FSCL_YR_NUM==2018|FSCL_YR_NUM==2017|(FSCL_YR_NUM==2016&FSCL_PER_IN_YR_NUM>=9)]
#lag twice to get rolling 3
ce[, lag_TB :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="TOTAL_TB"]
ce[, lag_RSPNS :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="TOTAL_RSPNS"]
ce[, lag2_TB :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="lag_TB"]
ce[, lag2_RSPNS :=lapply(.SD, function(x) c(NA, x[-.N])), by="DAY_PART", .SDcols="lag_RSPNS"]
#sum together
ce[, R3MTB := rowSums(.SD, na.rm = TRUE), .SDcols=c("TOTAL_TB","lag_TB","lag2_TB")]
ce[, R3MRSPNS := rowSums(.SD, na.rm = TRUE), .SDcols=c("TOTAL_RSPNS","lag_RSPNS","lag2_RSPNS")]
#calculate TB
ce[, R3M_CC := round(R3MTB/R3MRSPNS,3)]
ce2 <- ce[, .(DAY_PART,FSCL_YR_NUM,FSCL_PER_IN_YR_NUM,R3MTB,R3MRSPNS,R3M_CC)]
write.csv(ce2,paste0(data_dir,"/DC_CC_daypartcheck_st101_results.csv")) |
###################################################
### code chunk number 12: Cs203_state-filtering
###################################################
kf_marss <- MARSSkfss(fit_marss)
| /inst/userguide/figures/KFAS--Cs203_state-filtering.R | permissive | nwfsc-timeseries/MARSS | R | false | false | 187 | r | ###################################################
### code chunk number 12: Cs203_state-filtering
###################################################
kf_marss <- MARSSkfss(fit_marss)
|
\name{SampleSize.Poisson}
\alias{SampleSize.Poisson}
\title{Sample size calculation for continuous sequential analysis with Poisson data.}
\description{The function \code{SampleSize.Poisson} obtains the required sample size (length of surveillance) needed to guarantee a desired statistical
power for a pre-specified relative risk, when doing continuous sequential analysis for Poisson data with a Wald type upper boundary, which is flat with
respect to the log-likelihood ratio. It can also be used to approximate the sample size needed when doing group sequential analysis for Poisson data.
}
\usage{
SampleSize.Poisson(alpha=0.05,power=0.9,M=1,D=0,RR=2,
precision=0.000001,Tailed="upper")
}
\arguments{
\item{alpha}{The significance level. The default value is alpha=0.05. Must be in the range (0,0.5].}
\item{RR}{The target vector of relative risks to be detected with the requested statistical vector of powers. The default value is RR=2.}
\item{power}{The target vector of overall statistical powers to detect an increased relative risk (RR). The default value is power=0.90.}
\item{M}{The minimum number of events needed before the null hypothesis can be rejected. It must be a positive integer.
A good rule of thumb is to set M=4 (Kulldorff and Silva, 2015). The default value is M=1, which means that even a single event
can reject the null hypothesis if it occurs sufficiently early.}
\item{D}{The expected number of events under the null hypothesis at the first look at the data.
This is used when there is an initial large chunk of data arriving, followed by continuous sequential analysis. The default value is D=0, which is
also the best choice. This means that there is no delay in the start of the sequential analyses. If D is very large, the maximum
sample size will be set equal to D if a non-sequential analysis provides the desired power.}
\item{precision}{The tolerance for the difference between the requested and actual statistical power. Should be very small. The default value is precision=0.000001.}
\item{Tailed}{Tailed="upper" (default) for H0:RR<=1, and Tailed="lower" for H0:RR>=1 or Tailed="two" for H0:RR=1.}
}
\details{
When using the MaxSPRT and the \code{CV.Poisson} function to conduct continuous sequential analysis for Poisson data, the null
hypothesis is rejected when the log likelihood ratio exceeds the pre-determined critical value calculated by \code{CV.Poisson}.
The sequential analysis ends without rejecting the null hypothesis when a predetermined upper limit on the sample size is
reached, expressed in terms of the expected number of events under the null hypothesis. For example, the sequential analysis
may end as soon as the sample size is such that there are 50 expected events under the null.
The function \code{SampleSize.Poisson} calculates what the upper limit on the sample size (length of surveillance) that is required
for the continuous Poisson based MaxSPRT to achieve the desired statistical power for a pre-specified relative risk RR.
The solution is exact using iterative numerical calculations (Kulldorff et al., (2011).
While designed for continuous sequential analysis, the \code{SampleSize.Poisson} function can also be used to approximate the
required upper limit on the sample size that is needed when doing group sequential analysis for Poisson data, using the \code{CV.G.Poisson function}.
}
\value{
\item{SampleSize_by_RR_Power}{A table containing the main performance measures associated to the required samples sizes, expressed in terms of the expected number of events under the null hypothesis, for each combination of RR and power.}
}
\author{ Ivair Ramos Silva, Martin Kulldorff.}
\references{
Kulldorff M, Davis RL, Kolczak M, Lewis E, Lieu T, Platt R. (2011). A Maximized Sequential Probability Ratio Test for Drug and Safety Surveillance. Sequential Analysis, 30: 58--78.
Kulldorff M, Silva IR. (2015). Continuous Post-market Sequential Safety Surveillance with Minimum Events to Signal. REVSTAT Statistical Journal, 15(3): 373--394.
}
\section{Acknowledgements}{
Development of the \code{SampleSize.Poisson} function was funded by:\cr
- National Council of Scientific and Technological Development (CNPq), Brazil (v1.0).\cr
- Bank for Development of the Minas Gerais State (BDMG), Brazil (v1.0).\cr
- National Institute of General Medical Sciences, NIH, USA, through grant number R01GM108999 (v2.0.1,2.0.2).
}
\keyword{Continuous MaxSPRT analysis}
\section{See also}{
\code{\link[Sequential]{CV.Poisson}}: for calculating critical values for continuous sequential analysis with Poisson data.\cr
\code{\link[Sequential]{Performance.Poisson}}: for calculating the statistical power, expected time to signal and expected sample size for continuous sequential analysis with Poisson data\cr
\code{\link[Sequential]{SampleSize.Binomial}}: for calculating the minimum sample size given a target power in continuous sequential analysis with binomial data.
}
\examples{
### Example 1:
## Sample size required to obtain a power of 80%, for a relati-
## ve risk of 3, no delay for starting the surveillance (D=0),
## and when the null hypothesis can be rejected with one event
## (M=1) under an alpha level of 5%.
# result1<- SampleSize.Poisson(alpha=0.05,power=0.8,M=1,D=0,RR=3)
# result1
## Example 2:
## Sample size required to obtain a power of 90%, for a relati-
## ve risk of 2, no delay for starting the surveillance (D=0),
## and when the null hypothesis can be rejected only after 2
## events (M=2) under an alpha level of 10%.
##
# result2<- SampleSize.Poisson(alpha=0.1,power=0.9,M=2,D=0,RR=2)
# result2
}
| /man/SampleSize.Poisson.Rd | no_license | tslumley/sequential2 | R | false | false | 5,720 | rd | \name{SampleSize.Poisson}
\alias{SampleSize.Poisson}
\title{Sample size calculation for continuous sequential analysis with Poisson data.}
\description{The function \code{SampleSize.Poisson} obtains the required sample size (length of surveillance) needed to guarantee a desired statistical
power for a pre-specified relative risk, when doing continuous sequential analysis for Poisson data with a Wald type upper boundary, which is flat with
respect to the log-likelihood ratio. It can also be used to approximate the sample size needed when doing group sequential analysis for Poisson data.
}
\usage{
SampleSize.Poisson(alpha=0.05,power=0.9,M=1,D=0,RR=2,
precision=0.000001,Tailed="upper")
}
\arguments{
\item{alpha}{The significance level. The default value is alpha=0.05. Must be in the range (0,0.5].}
\item{RR}{The target vector of relative risks to be detected with the requested statistical vector of powers. The default value is RR=2.}
\item{power}{The target vector of overall statistical powers to detect an increased relative risk (RR). The default value is power=0.90.}
\item{M}{The minimum number of events needed before the null hypothesis can be rejected. It must be a positive integer.
A good rule of thumb is to set M=4 (Kulldorff and Silva, 2015). The default value is M=1, which means that even a single event
can reject the null hypothesis if it occurs sufficiently early.}
\item{D}{The expected number of events under the null hypothesis at the first look at the data.
This is used when there is an initial large chunk of data arriving, followed by continuous sequential analysis. The default value is D=0, which is
also the best choice. This means that there is no delay in the start of the sequential analyses. If D is very large, the maximum
sample size will be set equal to D if a non-sequential analysis provides the desired power.}
\item{precision}{The tolerance for the difference between the requested and actual statistical power. Should be very small. The default value is precision=0.000001.}
\item{Tailed}{Tailed="upper" (default) for H0:RR<=1, and Tailed="lower" for H0:RR>=1 or Tailed="two" for H0:RR=1.}
}
\details{
When using the MaxSPRT and the \code{CV.Poisson} function to conduct continuous sequential analysis for Poisson data, the null
hypothesis is rejected when the log likelihood ratio exceeds the pre-determined critical value calculated by \code{CV.Poisson}.
The sequential analysis ends without rejecting the null hypothesis when a predetermined upper limit on the sample size is
reached, expressed in terms of the expected number of events under the null hypothesis. For example, the sequential analysis
may end as soon as the sample size is such that there are 50 expected events under the null.
The function \code{SampleSize.Poisson} calculates what the upper limit on the sample size (length of surveillance) that is required
for the continuous Poisson based MaxSPRT to achieve the desired statistical power for a pre-specified relative risk RR.
The solution is exact using iterative numerical calculations (Kulldorff et al., (2011).
While designed for continuous sequential analysis, the \code{SampleSize.Poisson} function can also be used to approximate the
required upper limit on the sample size that is needed when doing group sequential analysis for Poisson data, using the \code{CV.G.Poisson function}.
}
\value{
\item{SampleSize_by_RR_Power}{A table containing the main performance measures associated to the required samples sizes, expressed in terms of the expected number of events under the null hypothesis, for each combination of RR and power.}
}
\author{ Ivair Ramos Silva, Martin Kulldorff.}
\references{
Kulldorff M, Davis RL, Kolczak M, Lewis E, Lieu T, Platt R. (2011). A Maximized Sequential Probability Ratio Test for Drug and Safety Surveillance. Sequential Analysis, 30: 58--78.
Kulldorff M, Silva IR. (2015). Continuous Post-market Sequential Safety Surveillance with Minimum Events to Signal. REVSTAT Statistical Journal, 15(3): 373--394.
}
\section{Acknowledgements}{
Development of the \code{SampleSize.Poisson} function was funded by:\cr
- National Council of Scientific and Technological Development (CNPq), Brazil (v1.0).\cr
- Bank for Development of the Minas Gerais State (BDMG), Brazil (v1.0).\cr
- National Institute of General Medical Sciences, NIH, USA, through grant number R01GM108999 (v2.0.1,2.0.2).
}
\keyword{Continuous MaxSPRT analysis}
\section{See also}{
\code{\link[Sequential]{CV.Poisson}}: for calculating critical values for continuous sequential analysis with Poisson data.\cr
\code{\link[Sequential]{Performance.Poisson}}: for calculating the statistical power, expected time to signal and expected sample size for continuous sequential analysis with Poisson data\cr
\code{\link[Sequential]{SampleSize.Binomial}}: for calculating the minimum sample size given a target power in continuous sequential analysis with binomial data.
}
\examples{
### Example 1:
## Sample size required to obtain a power of 80%, for a relati-
## ve risk of 3, no delay for starting the surveillance (D=0),
## and when the null hypothesis can be rejected with one event
## (M=1) under an alpha level of 5%.
# result1<- SampleSize.Poisson(alpha=0.05,power=0.8,M=1,D=0,RR=3)
# result1
## Example 2:
## Sample size required to obtain a power of 90%, for a relati-
## ve risk of 2, no delay for starting the surveillance (D=0),
## and when the null hypothesis can be rejected only after 2
## events (M=2) under an alpha level of 10%.
##
# result2<- SampleSize.Poisson(alpha=0.1,power=0.9,M=2,D=0,RR=2)
# result2
}
|
# users can either step through this file, or call this file with
# r -f example.R
# THIS ASSUMES THAT THE TESTHINT DATABASE EXISTS. The recipe for building that
# database is in ../dbInitialization/createHintTest.sql
# THIS EXAMPLE USES THE BRAIN HINT OUTPUT MADE BY RUNNING make hint at /scratch/data/footprints
print(date())
#-------------------------------------------------------------------------------
# set path to hint output
data.path <- "/scratch/shared/footprints/spinal_cord_hint_16"
output_path=paste(data.path,"/TFBS_OUTPUT",sep="")
dir.create(output_path, showWarnings = FALSE)
bdbag.path<-"/scratch/shared/footprints/spinal_cord_16"
dir.create(bdbag.path, showWarnings = FALSE)
#-------------------------------------------------------------------------------
# establish database connections:
if(!exists("db.wellington"))
db.wellington <- "spinal_cord_hint_16"
if(!exists("db.fimo"))
db.fimo <- "fimo"
#-------------------------------------------------------------------------------
# Source the libraries
source("/scratch/galaxy/test/generate_db/src/dependencies.R")
source("/scratch/galaxy/test/generate_db/src/dbFunctions.R")
source("/scratch/galaxy/test/generate_db/src/tableParsing.R")
source("/scratch/galaxy/test/generate_db/src/main_Bioc.R")
if(!interactive()){
chromosomes <- paste0("chr",c(1:22,"X","Y","MT"))
# Create parallel structure here
library(BiocParallel)
register(MulticoreParam(workers = 25, stop.on.error = FALSE, log = TRUE), default = TRUE)
# Run on all 24 possible chromosomes at once
result <- bptry(bplapply(chromosomes,fillAllSamplesByChromosome,
dbConnection = db.wellington,
fimo = db.fimo,
minid = "spinal_cord_hint_16.minid",
dbUser = "trena",
dbTable = "spinal_cord_hint_16",
sourcePath = data.path,
isTest = FALSE,
method = "HINT",
Fill_DB_Enable=FALSE))
}
cmd=paste("tar -zcvf ", bdbag.path, "/", db.wellington,".tar.gz ", output_path, sep="")
system(cmd, intern = TRUE)
unlink(output_path,recursive=TRUE)
#print(bpok(result))
#print("Database fill complete")
#print(date())
| /generate_db/master/spinal_cord_16/hint.R | no_license | xtmgah/genomics-footprint | R | false | false | 2,186 | r | # users can either step through this file, or call this file with
# r -f example.R
# THIS ASSUMES THAT THE TESTHINT DATABASE EXISTS. The recipe for building that
# database is in ../dbInitialization/createHintTest.sql
# THIS EXAMPLE USES THE BRAIN HINT OUTPUT MADE BY RUNNING make hint at /scratch/data/footprints
print(date())
#-------------------------------------------------------------------------------
# set path to hint output
data.path <- "/scratch/shared/footprints/spinal_cord_hint_16"
output_path=paste(data.path,"/TFBS_OUTPUT",sep="")
dir.create(output_path, showWarnings = FALSE)
bdbag.path<-"/scratch/shared/footprints/spinal_cord_16"
dir.create(bdbag.path, showWarnings = FALSE)
#-------------------------------------------------------------------------------
# establish database connections:
if(!exists("db.wellington"))
db.wellington <- "spinal_cord_hint_16"
if(!exists("db.fimo"))
db.fimo <- "fimo"
#-------------------------------------------------------------------------------
# Source the libraries
source("/scratch/galaxy/test/generate_db/src/dependencies.R")
source("/scratch/galaxy/test/generate_db/src/dbFunctions.R")
source("/scratch/galaxy/test/generate_db/src/tableParsing.R")
source("/scratch/galaxy/test/generate_db/src/main_Bioc.R")
if(!interactive()){
chromosomes <- paste0("chr",c(1:22,"X","Y","MT"))
# Create parallel structure here
library(BiocParallel)
register(MulticoreParam(workers = 25, stop.on.error = FALSE, log = TRUE), default = TRUE)
# Run on all 24 possible chromosomes at once
result <- bptry(bplapply(chromosomes,fillAllSamplesByChromosome,
dbConnection = db.wellington,
fimo = db.fimo,
minid = "spinal_cord_hint_16.minid",
dbUser = "trena",
dbTable = "spinal_cord_hint_16",
sourcePath = data.path,
isTest = FALSE,
method = "HINT",
Fill_DB_Enable=FALSE))
}
cmd=paste("tar -zcvf ", bdbag.path, "/", db.wellington,".tar.gz ", output_path, sep="")
system(cmd, intern = TRUE)
unlink(output_path,recursive=TRUE)
#print(bpok(result))
#print("Database fill complete")
#print(date())
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/openmi.om.linkableComponent.R
\name{openmi.om.linkableComponent}
\alias{openmi.om.linkableComponent}
\title{The base class for linkable meta-model components.}
\value{
R6 class of type openmi.om.linkableComponent
}
\description{
The base class for linkable meta-model components.
The base class for linkable meta-model components.
}
\examples{
NA
}
\seealso{
}
\section{Super class}{
\code{\link[openmi.om:openmi.om.base]{openmi.om::openmi.om.base}} -> \code{openmi.om.linkableComponent}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-clone}{\code{openmi.om.linkableComponent$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="addComponent">}\href{../../openmi.om/html/openmi.om.base.html#method-addComponent}{\code{openmi.om::openmi.om.base$addComponent()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="addInput">}\href{../../openmi.om/html/openmi.om.base.html#method-addInput}{\code{openmi.om::openmi.om.base$addInput()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="add_component">}\href{../../openmi.om/html/openmi.om.base.html#method-add_component}{\code{openmi.om::openmi.om.base$add_component()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="asJSON">}\href{../../openmi.om/html/openmi.om.base.html#method-asJSON}{\code{openmi.om::openmi.om.base$asJSON()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="finish">}\href{../../openmi.om/html/openmi.om.base.html#method-finish}{\code{openmi.om::openmi.om.base$finish()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="getInputs">}\href{../../openmi.om/html/openmi.om.base.html#method-getInputs}{\code{openmi.om::openmi.om.base$getInputs()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="getValue">}\href{../../openmi.om/html/openmi.om.base.html#method-getValue}{\code{openmi.om::openmi.om.base$getValue()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="get_component_id">}\href{../../openmi.om/html/openmi.om.base.html#method-get_component_id}{\code{openmi.om::openmi.om.base$get_component_id()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="init">}\href{../../openmi.om/html/openmi.om.base.html#method-init}{\code{openmi.om::openmi.om.base$init()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="initialize">}\href{../../openmi.om/html/openmi.om.base.html#method-initialize}{\code{openmi.om::openmi.om.base$initialize()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="logState">}\href{../../openmi.om/html/openmi.om.base.html#method-logState}{\code{openmi.om::openmi.om.base$logState()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="log_debug">}\href{../../openmi.om/html/openmi.om.base.html#method-log_debug}{\code{openmi.om::openmi.om.base$log_debug()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="orderOperations">}\href{../../openmi.om/html/openmi.om.base.html#method-orderOperations}{\code{openmi.om::openmi.om.base$orderOperations()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="parse_class_specific">}\href{../../openmi.om/html/openmi.om.base.html#method-parse_class_specific}{\code{openmi.om::openmi.om.base$parse_class_specific()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="parse_openmi">}\href{../../openmi.om/html/openmi.om.base.html#method-parse_openmi}{\code{openmi.om::openmi.om.base$parse_openmi()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="prepare">}\href{../../openmi.om/html/openmi.om.base.html#method-prepare}{\code{openmi.om::openmi.om.base$prepare()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="set_prop">}\href{../../openmi.om/html/openmi.om.base.html#method-set_prop}{\code{openmi.om::openmi.om.base$set_prop()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="set_sub_prop">}\href{../../openmi.om/html/openmi.om.base.html#method-set_sub_prop}{\code{openmi.om::openmi.om.base$set_sub_prop()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="set_vars">}\href{../../openmi.om/html/openmi.om.base.html#method-set_vars}{\code{openmi.om::openmi.om.base$set_vars()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="settable">}\href{../../openmi.om/html/openmi.om.base.html#method-settable}{\code{openmi.om::openmi.om.base$settable()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="step">}\href{../../openmi.om/html/openmi.om.base.html#method-step}{\code{openmi.om::openmi.om.base$step()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="stepChildren">}\href{../../openmi.om/html/openmi.om.base.html#method-stepChildren}{\code{openmi.om::openmi.om.base$stepChildren()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="update">}\href{../../openmi.om/html/openmi.om.base.html#method-update}{\code{openmi.om::openmi.om.base$update()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="validate">}\href{../../openmi.om/html/openmi.om.base.html#method-validate}{\code{openmi.om::openmi.om.base$validate()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{openmi.om.linkableComponent$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/openmi.om.linkableComponent.Rd | no_license | HARPgroup/openmi-om | R | false | true | 6,814 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/openmi.om.linkableComponent.R
\name{openmi.om.linkableComponent}
\alias{openmi.om.linkableComponent}
\title{The base class for linkable meta-model components.}
\value{
R6 class of type openmi.om.linkableComponent
}
\description{
The base class for linkable meta-model components.
The base class for linkable meta-model components.
}
\examples{
NA
}
\seealso{
}
\section{Super class}{
\code{\link[openmi.om:openmi.om.base]{openmi.om::openmi.om.base}} -> \code{openmi.om.linkableComponent}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-clone}{\code{openmi.om.linkableComponent$clone()}}
}
}
\if{html}{
\out{<details ><summary>Inherited methods</summary>}
\itemize{
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="addComponent">}\href{../../openmi.om/html/openmi.om.base.html#method-addComponent}{\code{openmi.om::openmi.om.base$addComponent()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="addInput">}\href{../../openmi.om/html/openmi.om.base.html#method-addInput}{\code{openmi.om::openmi.om.base$addInput()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="add_component">}\href{../../openmi.om/html/openmi.om.base.html#method-add_component}{\code{openmi.om::openmi.om.base$add_component()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="asJSON">}\href{../../openmi.om/html/openmi.om.base.html#method-asJSON}{\code{openmi.om::openmi.om.base$asJSON()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="finish">}\href{../../openmi.om/html/openmi.om.base.html#method-finish}{\code{openmi.om::openmi.om.base$finish()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="getInputs">}\href{../../openmi.om/html/openmi.om.base.html#method-getInputs}{\code{openmi.om::openmi.om.base$getInputs()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="getValue">}\href{../../openmi.om/html/openmi.om.base.html#method-getValue}{\code{openmi.om::openmi.om.base$getValue()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="get_component_id">}\href{../../openmi.om/html/openmi.om.base.html#method-get_component_id}{\code{openmi.om::openmi.om.base$get_component_id()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="init">}\href{../../openmi.om/html/openmi.om.base.html#method-init}{\code{openmi.om::openmi.om.base$init()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="initialize">}\href{../../openmi.om/html/openmi.om.base.html#method-initialize}{\code{openmi.om::openmi.om.base$initialize()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="logState">}\href{../../openmi.om/html/openmi.om.base.html#method-logState}{\code{openmi.om::openmi.om.base$logState()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="log_debug">}\href{../../openmi.om/html/openmi.om.base.html#method-log_debug}{\code{openmi.om::openmi.om.base$log_debug()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="orderOperations">}\href{../../openmi.om/html/openmi.om.base.html#method-orderOperations}{\code{openmi.om::openmi.om.base$orderOperations()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="parse_class_specific">}\href{../../openmi.om/html/openmi.om.base.html#method-parse_class_specific}{\code{openmi.om::openmi.om.base$parse_class_specific()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="parse_openmi">}\href{../../openmi.om/html/openmi.om.base.html#method-parse_openmi}{\code{openmi.om::openmi.om.base$parse_openmi()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="prepare">}\href{../../openmi.om/html/openmi.om.base.html#method-prepare}{\code{openmi.om::openmi.om.base$prepare()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="set_prop">}\href{../../openmi.om/html/openmi.om.base.html#method-set_prop}{\code{openmi.om::openmi.om.base$set_prop()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="set_sub_prop">}\href{../../openmi.om/html/openmi.om.base.html#method-set_sub_prop}{\code{openmi.om::openmi.om.base$set_sub_prop()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="set_vars">}\href{../../openmi.om/html/openmi.om.base.html#method-set_vars}{\code{openmi.om::openmi.om.base$set_vars()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="settable">}\href{../../openmi.om/html/openmi.om.base.html#method-settable}{\code{openmi.om::openmi.om.base$settable()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="step">}\href{../../openmi.om/html/openmi.om.base.html#method-step}{\code{openmi.om::openmi.om.base$step()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="stepChildren">}\href{../../openmi.om/html/openmi.om.base.html#method-stepChildren}{\code{openmi.om::openmi.om.base$stepChildren()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="update">}\href{../../openmi.om/html/openmi.om.base.html#method-update}{\code{openmi.om::openmi.om.base$update()}}\out{</span>}
\item \out{<span class="pkg-link" data-pkg="openmi.om" data-topic="openmi.om.base" data-id="validate">}\href{../../openmi.om/html/openmi.om.base.html#method-validate}{\code{openmi.om::openmi.om.base$validate()}}\out{</span>}
}
\out{</details>}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{openmi.om.linkableComponent$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
setwd("~/repositoriosGit/coursera/data_sicence_johns_hopkins/02_r_programming/programming_assignment")
rm(list=ls())
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
ncol(outcome)
names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome[, 11])
rankall <- function(outcome, num = "best") {
## Read outcome data: COLS: HospitalName, State, HeartAttack, HearFailure, Pneumonia
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")[,c(2,7,11,17,23)]
## Check that state and outcome are valid
if(! (outcome == "heart attack" || outcome == "heart failure" || outcome == "pneumonia") ) {
stop("invalid outcome")
}
if(class(num) == "character"){
if (! (num == "best" || num == "worst")){
stop("invalid number")
}
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the (abbreviated) state name
# Remove columns by outcome, only left HospitalName and Deaths by outcome
if(outcome == "heart attack") {
data = data[,c(1,2,3)]
} else if(outcome == "heart failure") {
data = data[,c(1,2,4)]
} else if(outcome == "pneumonia") {
data = data[,c(1,2,5)]
}
names(data)[3] = "Deaths"
data[, 3] = suppressWarnings( as.numeric(data[, 3]) )
# Remove rows with NA
data = data[!is.na(data$Deaths),]
splited = split(data, data$State)
ans = lapply(splited, function(x, num) {
# Order by Deaths and then HospitalName
x = x[order(x$Deaths, x$Hospital.Name),]
# Return
if(class(num) == "character") {
if(num == "best") {
return (x$Hospital.Name[1])
}
else if(num == "worst") {
return (x$Hospital.Name[nrow(x)])
}
}
else {
return (x$Hospital.Name[num])
}
}, num)
#Return data.frame with format
return ( data.frame(hospital=unlist(ans), state=names(ans)) )
}
| /data_sicence_johns_hopkins/02_r_programming/programming_assignment/rankall.R | permissive | abrantesasf/coursera | R | false | false | 2,207 | r | setwd("~/repositoriosGit/coursera/data_sicence_johns_hopkins/02_r_programming/programming_assignment")
rm(list=ls())
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
ncol(outcome)
names(outcome)
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome[, 11])
rankall <- function(outcome, num = "best") {
## Read outcome data: COLS: HospitalName, State, HeartAttack, HearFailure, Pneumonia
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")[,c(2,7,11,17,23)]
## Check that state and outcome are valid
if(! (outcome == "heart attack" || outcome == "heart failure" || outcome == "pneumonia") ) {
stop("invalid outcome")
}
if(class(num) == "character"){
if (! (num == "best" || num == "worst")){
stop("invalid number")
}
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the (abbreviated) state name
# Remove columns by outcome, only left HospitalName and Deaths by outcome
if(outcome == "heart attack") {
data = data[,c(1,2,3)]
} else if(outcome == "heart failure") {
data = data[,c(1,2,4)]
} else if(outcome == "pneumonia") {
data = data[,c(1,2,5)]
}
names(data)[3] = "Deaths"
data[, 3] = suppressWarnings( as.numeric(data[, 3]) )
# Remove rows with NA
data = data[!is.na(data$Deaths),]
splited = split(data, data$State)
ans = lapply(splited, function(x, num) {
# Order by Deaths and then HospitalName
x = x[order(x$Deaths, x$Hospital.Name),]
# Return
if(class(num) == "character") {
if(num == "best") {
return (x$Hospital.Name[1])
}
else if(num == "worst") {
return (x$Hospital.Name[nrow(x)])
}
}
else {
return (x$Hospital.Name[num])
}
}, num)
#Return data.frame with format
return ( data.frame(hospital=unlist(ans), state=names(ans)) )
}
|
# Running isoDetector for each pure reference sample
# Changes from original isoDetector_blue.R:
# Changed file split between pure and mixture files
# Mixture file creation: all pure cell type files must come from same person
# Will use the combined fragment length distribution file instead of individual
# (possibly randomly selected) fragment length distribution files
library(stringr)
library(isoform)
# Arrays 1-30
array_val <- as.numeric(Sys.getenv("SLURM_ARRAY_TASK_ID"))
batch <- array_val
header_nas = "/nas/longleaf/home/hheiling/Blueprint/"
header_pine = "/pine/scr/h/h/hheiling/Blueprint/"
# Materials prefix
prefix_mat = str_c(header_pine,"Blueprint_Materials")
# Pure cell type files prefix (to be used for algorithm fit)
prefix_pure = str_c(header_pine,"Fit_Samples2")
# Datasets for cell types
cellTypes = c("EGAD00001002671","EGAD00001002674","EGAD00001002675")
## Assume ordering of datasets in CT correspond to cell types CT1, CT2, and CT3
# Find pure reference files to use in algorithm fit
CT1_files = list.files(path = str_c(prefix_pure, cellTypes[1], sep = "/"), full.names = T)
CT2_files = list.files(path = str_c(prefix_pure, cellTypes[2], sep = "/"), full.names = T)
CT3_files = list.files(path = str_c(prefix_pure, cellTypes[3], sep = "/"), full.names = T)
set.seed(8500)
CT_ref_files = c(sample(CT1_files, size = 20, replace = F),
sample(CT2_files, size = 20, replace = F),
sample(CT3_files, size = 20, replace = F))
# Find pure reference samples to use for finding clusters (separate from samples
# to use in algorithm fit)
# Chose cell type (dataset)
set.seed(8766)
seeds = sample(1000:9999, 3, replace = F)
if(batch <= 10){
CT = "EGAD00001002671"
# Simulated output prefix
prefix_out = str_c(header_nas,"isoDetector_out2/",CT)
if(!dir.exists(prefix_out)){dir.create(prefix_out, recursive = T)}
j = batch
set.seed(seeds[1])
pure_files = sample(CT1_files[-which(CT1_files %in% CT_ref_files[1:20])], size = 10, replace = F)
}else if(batch <= 20){ # batch 11 to 20
CT = "EGAD00001002674"
# Simulated output prefix
prefix_out = str_c(header_nas,"isoDetector_out2/",CT)
if(!dir.exists(prefix_out)){dir.create(prefix_out, recursive = T)}
j = batch - 10 # want j from 1 to 10
set.seed(seeds[2])
pure_files = sample(CT2_files[-which(CT2_files %in% CT_ref_files[21:40])], size = 10, replace = F)
}else if(batch >= 21){ # batch 21 to 30
CT = "EGAD00001002675"
# Simulated output prefix
prefix_out = str_c(header_nas,"isoDetector_out2/",CT)
if(!dir.exists(prefix_out)){dir.create(prefix_out, recursive = T)}
j = batch - 20 # want j from 1 to 10
set.seed(seeds[3])
pure_files = sample(CT3_files[-which(CT3_files %in% CT_ref_files[41:60])], size = 10, replace = F)
}
print(basename(pure_files))
pure_select = pure_files[j] # Select specific file to run isoDetector on
print(basename(pure_select))
samp_name = unlist(str_split(basename(pure_select), "_"))[1]
print(samp_name)
# Use fragment length distribution file that is composed of a merging of 50 fragment length distribution files
fragSizeFile = str_c(prefix_mat, "Combined_FragDist.txt", sep="/")
# Find BED and knownIsoforms objects
bedFile = sprintf("%s/gencode.v15.nonoverlap.exon.bed", prefix_mat)
knownIsoforms = sprintf("%s/gencode.v15.nonoverlap.exon.knownIsoforms.RData", prefix_mat)
# Specify name of output file
output_file = str_c(samp_name, "_geneModel_knownIsoforms.RData")
print(output_file)
isoDetector(pure_select, bedFile, fragSizeFile, readLen = 100, # readLen from step1_get_counts.Rout
sprintf("%s/%s", prefix_out, output_file),
knownIsoforms=knownIsoforms)
| /03 Blueprint in silico analyses/02 IsoDeconvMM code/isoDetector_blue2.R | no_license | hheiling/IsoDeconvMM_Supplement | R | false | false | 3,677 | r | # Running isoDetector for each pure reference sample
# Changes from original isoDetector_blue.R:
# Changed file split between pure and mixture files
# Mixture file creation: all pure cell type files must come from same person
# Will use the combined fragment length distribution file instead of individual
# (possibly randomly selected) fragment length distribution files
library(stringr)
library(isoform)
# Arrays 1-30
array_val <- as.numeric(Sys.getenv("SLURM_ARRAY_TASK_ID"))
batch <- array_val
header_nas = "/nas/longleaf/home/hheiling/Blueprint/"
header_pine = "/pine/scr/h/h/hheiling/Blueprint/"
# Materials prefix
prefix_mat = str_c(header_pine,"Blueprint_Materials")
# Pure cell type files prefix (to be used for algorithm fit)
prefix_pure = str_c(header_pine,"Fit_Samples2")
# Datasets for cell types
cellTypes = c("EGAD00001002671","EGAD00001002674","EGAD00001002675")
## Assume ordering of datasets in CT correspond to cell types CT1, CT2, and CT3
# Find pure reference files to use in algorithm fit
CT1_files = list.files(path = str_c(prefix_pure, cellTypes[1], sep = "/"), full.names = T)
CT2_files = list.files(path = str_c(prefix_pure, cellTypes[2], sep = "/"), full.names = T)
CT3_files = list.files(path = str_c(prefix_pure, cellTypes[3], sep = "/"), full.names = T)
set.seed(8500)
CT_ref_files = c(sample(CT1_files, size = 20, replace = F),
sample(CT2_files, size = 20, replace = F),
sample(CT3_files, size = 20, replace = F))
# Find pure reference samples to use for finding clusters (separate from samples
# to use in algorithm fit)
# Chose cell type (dataset)
set.seed(8766)
seeds = sample(1000:9999, 3, replace = F)
if(batch <= 10){
CT = "EGAD00001002671"
# Simulated output prefix
prefix_out = str_c(header_nas,"isoDetector_out2/",CT)
if(!dir.exists(prefix_out)){dir.create(prefix_out, recursive = T)}
j = batch
set.seed(seeds[1])
pure_files = sample(CT1_files[-which(CT1_files %in% CT_ref_files[1:20])], size = 10, replace = F)
}else if(batch <= 20){ # batch 11 to 20
CT = "EGAD00001002674"
# Simulated output prefix
prefix_out = str_c(header_nas,"isoDetector_out2/",CT)
if(!dir.exists(prefix_out)){dir.create(prefix_out, recursive = T)}
j = batch - 10 # want j from 1 to 10
set.seed(seeds[2])
pure_files = sample(CT2_files[-which(CT2_files %in% CT_ref_files[21:40])], size = 10, replace = F)
}else if(batch >= 21){ # batch 21 to 30
CT = "EGAD00001002675"
# Simulated output prefix
prefix_out = str_c(header_nas,"isoDetector_out2/",CT)
if(!dir.exists(prefix_out)){dir.create(prefix_out, recursive = T)}
j = batch - 20 # want j from 1 to 10
set.seed(seeds[3])
pure_files = sample(CT3_files[-which(CT3_files %in% CT_ref_files[41:60])], size = 10, replace = F)
}
print(basename(pure_files))
pure_select = pure_files[j] # Select specific file to run isoDetector on
print(basename(pure_select))
samp_name = unlist(str_split(basename(pure_select), "_"))[1]
print(samp_name)
# Use fragment length distribution file that is composed of a merging of 50 fragment length distribution files
fragSizeFile = str_c(prefix_mat, "Combined_FragDist.txt", sep="/")
# Find BED and knownIsoforms objects
bedFile = sprintf("%s/gencode.v15.nonoverlap.exon.bed", prefix_mat)
knownIsoforms = sprintf("%s/gencode.v15.nonoverlap.exon.knownIsoforms.RData", prefix_mat)
# Specify name of output file
output_file = str_c(samp_name, "_geneModel_knownIsoforms.RData")
print(output_file)
isoDetector(pure_select, bedFile, fragSizeFile, readLen = 100, # readLen from step1_get_counts.Rout
sprintf("%s/%s", prefix_out, output_file),
knownIsoforms=knownIsoforms)
|
# read fcs files
t0p1<-f_read(t0p1_file,pattern_read)
t1p1<-f_read(t1p1_file,pattern_read)
t2p1<-f_read(t2p1_file,pattern_read)
t3p1<-f_read(t3p1_file,pattern_read)
t4p1<-f_read(t4p1_file,pattern_read)
t5p1<-f_read(t5p1_file,pattern_read)
t6p1<-f_read(t6p1_file,pattern_read)
t7p1<-f_read(t7p1_file,pattern_read)
t8p1<-f_read(t8p1_file,pattern_read)
t9p1<-f_read(t9p1_file,pattern_read)
#----------------------------------------------------------------------------------
# create the list of dataframes that contain all the data
df_list<-c(f_df_list(t0p1,starting_well,wells_per_sample,ypethanol_doses,columns_to_include),
f_df_list(t3p1,starting_well,2,c(0,400),columns_to_include),
f_df_list(t1p1,starting_well,wells_per_sample,glycerol_doses,columns_to_include),
f_df_list(t4p1,starting_well,2,c(0,200),columns_to_include),
f_df_list(t2p1,starting_well,24,proline_doses,columns_to_include),
f_df_list(t5p1,starting_well,2,c(0,200),columns_to_include),
f_df_list(t6p1,starting_well,24,sd_doses,columns_to_include),
f_df_list(t7p1,starting_well,2,c(0,400),columns_to_include),
f_df_list(t8p1,starting_well,24,ypd_doses,columns_to_include),
f_df_list(t9p1,starting_well,2,c(0,600),columns_to_include)
)
#----------------------------------------------------------------------------------
##create names for the dataframe list
df_list_names<-vector()
for (i in time_points){
name<-lapply(strain_names,f_names_df_list,i)
df_list_names<-c(df_list_names, name)
}
names(df_list)<-df_list_names
#----------------------------------------------------------------------------------
# size subsetting, if wanted
if(size_subset==TRUE){
size_subset_df<-lapply(df_list,f_size_subset,columns=columns,
col_fscw=col_fscw,col_sscw=col_sscw,limit_fscw=limit_fscw,
limit_sscw=limit_sscw, lower_limit_fscw=lower_limit_fscw,
lower_limit_sscw=lower_limit_sscw)
df_list<-size_subset_df
}
#----------------------------------------------------------------------------------
# descriptive stats
descriptives<-c(mapply(f_descriptives,df_list,experiment_doses_desc,
MoreArgs = list(column=3),SIMPLIFY = F))
#----------------------------------------------------------------------------------
# getting a value for size
df_with_size<-lapply(df_list,f_size)
mapply("write.csv", descriptives, paste(label_list, ".csv", sep = ""), SIMPLIFY = F)
| /FACS data/paper figures/figure1/facs_script.R | no_license | aasli/promoter | R | false | false | 2,593 | r |
# read fcs files
t0p1<-f_read(t0p1_file,pattern_read)
t1p1<-f_read(t1p1_file,pattern_read)
t2p1<-f_read(t2p1_file,pattern_read)
t3p1<-f_read(t3p1_file,pattern_read)
t4p1<-f_read(t4p1_file,pattern_read)
t5p1<-f_read(t5p1_file,pattern_read)
t6p1<-f_read(t6p1_file,pattern_read)
t7p1<-f_read(t7p1_file,pattern_read)
t8p1<-f_read(t8p1_file,pattern_read)
t9p1<-f_read(t9p1_file,pattern_read)
#----------------------------------------------------------------------------------
# create the list of dataframes that contain all the data
df_list<-c(f_df_list(t0p1,starting_well,wells_per_sample,ypethanol_doses,columns_to_include),
f_df_list(t3p1,starting_well,2,c(0,400),columns_to_include),
f_df_list(t1p1,starting_well,wells_per_sample,glycerol_doses,columns_to_include),
f_df_list(t4p1,starting_well,2,c(0,200),columns_to_include),
f_df_list(t2p1,starting_well,24,proline_doses,columns_to_include),
f_df_list(t5p1,starting_well,2,c(0,200),columns_to_include),
f_df_list(t6p1,starting_well,24,sd_doses,columns_to_include),
f_df_list(t7p1,starting_well,2,c(0,400),columns_to_include),
f_df_list(t8p1,starting_well,24,ypd_doses,columns_to_include),
f_df_list(t9p1,starting_well,2,c(0,600),columns_to_include)
)
#----------------------------------------------------------------------------------
##create names for the dataframe list
df_list_names<-vector()
for (i in time_points){
name<-lapply(strain_names,f_names_df_list,i)
df_list_names<-c(df_list_names, name)
}
names(df_list)<-df_list_names
#----------------------------------------------------------------------------------
# size subsetting, if wanted
if(size_subset==TRUE){
size_subset_df<-lapply(df_list,f_size_subset,columns=columns,
col_fscw=col_fscw,col_sscw=col_sscw,limit_fscw=limit_fscw,
limit_sscw=limit_sscw, lower_limit_fscw=lower_limit_fscw,
lower_limit_sscw=lower_limit_sscw)
df_list<-size_subset_df
}
#----------------------------------------------------------------------------------
# descriptive stats
descriptives<-c(mapply(f_descriptives,df_list,experiment_doses_desc,
MoreArgs = list(column=3),SIMPLIFY = F))
#----------------------------------------------------------------------------------
# getting a value for size
df_with_size<-lapply(df_list,f_size)
mapply("write.csv", descriptives, paste(label_list, ".csv", sep = ""), SIMPLIFY = F)
|
## This file creates a dataset that contains counts of live births between
## 2003 and 2016 by race/ethnicity using the CDC NCHS Natality Data.
##
## Step 1 downloads all the data. They are approximately 200 MB each.
## Step 2 will then unzip each file (one by one). Unzipped, the files are
## about 5 GB. It will then import only the columns we want, collapse
## the data, and save just the aggregated data.
## Step 3 will then combine all aggregated datasets, import, and then delete
## the source files and downloaded zip files.
## Imports
library(tidyverse)
library(narcan)
## Helpers
## R can't unzip files that result in something larger than 4GB so we use
## the `system2` command to invoke the local unzip utility.
decompress_file <- function(directory, file, .file_cache = FALSE) {
## From: https://stackoverflow.com/questions/42740206/
if (.file_cache == TRUE) {
print("decompression skipped")
} else {
# Set working directory for decompression
# simplifies unzip directory location behavior
wd <- getwd()
setwd(directory)
# Run decompression
decompression <-
system2("unzip",
args = c("-o", # include override flag
file),
stdout = TRUE)
# Reset working directory
setwd(wd); rm(wd)
# Test for success criteria
# change the search depending on
# your implementation
if (grepl("Warning message", tail(decompression, 1))) {
print(decompression)
}
}
}
## First, download all the zips
years <- 2003:2016
for (year in years) {
if (length(list.files("./raw_data", pattern = as.character(year)) > 0)) {
next
}
current_file <- narcan:::download_natality_ascii(year, return_path = TRUE)
print(current_file)
Sys.sleep(runif(1, 0, 30))
}
## Unzip them
for (year in years) {
## Find the current file in the raw folder
current_file <- list.files("./raw_data", as.character(year),
full.names = FALSE)
decompress_file("./raw_data", current_file)
print(current_file)
}
## Now for each, read in the data, aggregate by race/ethnicity,
## save the aggregated data.
decompressed_files <- list.files("./raw_data", "dat|txt", full.names = TRUE)
for (year in years) {
## Find the current file in the raw folder
current_file <- decompressed_files[grep(paste0("Nat", year),
decompressed_files)]
print(current_file)
## Get the widths and columns
nber_dict <- narcan:::.dct_to_fwf_df(year, natality = TRUE) %>%
filter(start > lag(end),
name %in% c("mager9", "restatus",
"mbrace", "mracerec",
"mhisp_r", "umhisp"))
## Import the data
temp_df <-
readr::read_fwf(current_file,
col_positions = fwf_positions(start = nber_dict$start,
end = nber_dict$end,
col_names = nber_dict$name),
col_types = paste(nber_dict$type, collapse = ""))
## Deal with variable name changes
if (tibble::has_name(temp_df, "umhisp")) {
temp_df <- temp_df %>%
rename(mhisp_r = umhisp)
}
if (tibble::has_name(temp_df, "mracerec")) {
temp_df <- temp_df %>%
mutate(mbrace = mracerec) %>%
select(-mracerec)
}
## Fix some of the columns
temp_df <- temp_df %>%
narcan::subset_residents(.) %>%
mutate(mother_age = 10 + (mager9 - 1)*5,
bridge_race =
case_when(mbrace == 1 ~ "white",
mbrace == 2 ~ "black",
mbrace == 3 ~ "aia",
mbrace == 4 ~ "api",
TRUE ~ "unknown"),
hispanic =
case_when(mhisp_r == 0 ~ "non_hisp",
mhisp_r == 9 ~ "unknown",
TRUE ~ "hisp")) %>%
select(bridge_race, hispanic, mother_age)
## Aggregate through different combinations:
## Total
## Non-Hispanic white
## Non-Hispanic black
## Non-Hispanic other
## All white
## All black
## All other
## All hispanic
## All non-hispanic
agg_df <- bind_rows(
temp_df %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "total"),
temp_df %>%
filter(hispanic == "non_hisp",
bridge_race == "white") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_white"),
temp_df %>%
filter(hispanic == "non_hisp",
bridge_race == "black") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_black"),
temp_df %>%
filter(hispanic == "non_hisp",
bridge_race %in% c("aia", "api")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_other"),
temp_df %>%
filter(bridge_race %in% c("white")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "all_hisp_white"),
temp_df %>%
filter(bridge_race %in% c("black")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "all_hisp_black"),
temp_df %>%
filter(bridge_race %in% c("aia", "api")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "all_hisp_other"),
temp_df %>%
filter(hispanic == "non_hisp") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_total"),
temp_df %>%
filter(hispanic == "hisp") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "hisp_total")
) %>% mutate(year = year)
saveRDS(agg_df, sprintf("./raw_data/agg_df_%s.RDS", year))
}
live_births <- list.files("./raw_data", "\\.RDS", full.names = TRUE) %>%
map(readRDS) %>%
reduce(rbind)
devtools::use_data(live_births, overwrite = TRUE)
| /data-raw/making_live_births_data.R | permissive | mkiang/narcan | R | false | false | 6,653 | r | ## This file creates a dataset that contains counts of live births between
## 2003 and 2016 by race/ethnicity using the CDC NCHS Natality Data.
##
## Step 1 downloads all the data. They are approximately 200 MB each.
## Step 2 will then unzip each file (one by one). Unzipped, the files are
## about 5 GB. It will then import only the columns we want, collapse
## the data, and save just the aggregated data.
## Step 3 will then combine all aggregated datasets, import, and then delete
## the source files and downloaded zip files.
## Imports
library(tidyverse)
library(narcan)
## Helpers
## R can't unzip files that result in something larger than 4GB so we use
## the `system2` command to invoke the local unzip utility.
decompress_file <- function(directory, file, .file_cache = FALSE) {
## From: https://stackoverflow.com/questions/42740206/
if (.file_cache == TRUE) {
print("decompression skipped")
} else {
# Set working directory for decompression
# simplifies unzip directory location behavior
wd <- getwd()
setwd(directory)
# Run decompression
decompression <-
system2("unzip",
args = c("-o", # include override flag
file),
stdout = TRUE)
# Reset working directory
setwd(wd); rm(wd)
# Test for success criteria
# change the search depending on
# your implementation
if (grepl("Warning message", tail(decompression, 1))) {
print(decompression)
}
}
}
## First, download all the zips
years <- 2003:2016
for (year in years) {
if (length(list.files("./raw_data", pattern = as.character(year)) > 0)) {
next
}
current_file <- narcan:::download_natality_ascii(year, return_path = TRUE)
print(current_file)
Sys.sleep(runif(1, 0, 30))
}
## Unzip them
for (year in years) {
## Find the current file in the raw folder
current_file <- list.files("./raw_data", as.character(year),
full.names = FALSE)
decompress_file("./raw_data", current_file)
print(current_file)
}
## Now for each, read in the data, aggregate by race/ethnicity,
## save the aggregated data.
decompressed_files <- list.files("./raw_data", "dat|txt", full.names = TRUE)
for (year in years) {
## Find the current file in the raw folder
current_file <- decompressed_files[grep(paste0("Nat", year),
decompressed_files)]
print(current_file)
## Get the widths and columns
nber_dict <- narcan:::.dct_to_fwf_df(year, natality = TRUE) %>%
filter(start > lag(end),
name %in% c("mager9", "restatus",
"mbrace", "mracerec",
"mhisp_r", "umhisp"))
## Import the data
temp_df <-
readr::read_fwf(current_file,
col_positions = fwf_positions(start = nber_dict$start,
end = nber_dict$end,
col_names = nber_dict$name),
col_types = paste(nber_dict$type, collapse = ""))
## Deal with variable name changes
if (tibble::has_name(temp_df, "umhisp")) {
temp_df <- temp_df %>%
rename(mhisp_r = umhisp)
}
if (tibble::has_name(temp_df, "mracerec")) {
temp_df <- temp_df %>%
mutate(mbrace = mracerec) %>%
select(-mracerec)
}
## Fix some of the columns
temp_df <- temp_df %>%
narcan::subset_residents(.) %>%
mutate(mother_age = 10 + (mager9 - 1)*5,
bridge_race =
case_when(mbrace == 1 ~ "white",
mbrace == 2 ~ "black",
mbrace == 3 ~ "aia",
mbrace == 4 ~ "api",
TRUE ~ "unknown"),
hispanic =
case_when(mhisp_r == 0 ~ "non_hisp",
mhisp_r == 9 ~ "unknown",
TRUE ~ "hisp")) %>%
select(bridge_race, hispanic, mother_age)
## Aggregate through different combinations:
## Total
## Non-Hispanic white
## Non-Hispanic black
## Non-Hispanic other
## All white
## All black
## All other
## All hispanic
## All non-hispanic
agg_df <- bind_rows(
temp_df %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "total"),
temp_df %>%
filter(hispanic == "non_hisp",
bridge_race == "white") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_white"),
temp_df %>%
filter(hispanic == "non_hisp",
bridge_race == "black") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_black"),
temp_df %>%
filter(hispanic == "non_hisp",
bridge_race %in% c("aia", "api")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_other"),
temp_df %>%
filter(bridge_race %in% c("white")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "all_hisp_white"),
temp_df %>%
filter(bridge_race %in% c("black")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "all_hisp_black"),
temp_df %>%
filter(bridge_race %in% c("aia", "api")) %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "all_hisp_other"),
temp_df %>%
filter(hispanic == "non_hisp") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "non_hisp_total"),
temp_df %>%
filter(hispanic == "hisp") %>%
group_by(mother_age) %>%
summarize(births = n()) %>%
mutate(m_race_eth = "hisp_total")
) %>% mutate(year = year)
saveRDS(agg_df, sprintf("./raw_data/agg_df_%s.RDS", year))
}
live_births <- list.files("./raw_data", "\\.RDS", full.names = TRUE) %>%
map(readRDS) %>%
reduce(rbind)
devtools::use_data(live_births, overwrite = TRUE)
|
# R code of Steegen, Tuerlinckx, Gelman & Vanpaemel (2016). Increasing Transparency through a Multiverse Analysis. Perspectives on Psychological Science, 11, 702-712
# This file contains code to perform a multiverse analysis on two datasets from Durante, K.M., Rae A., & Griskevicius, V. (2013). The fluctuating female vote: Politics, religion and the ovulatory cycle. Psychological Science, 24, 1007-1016.
# As described in Steegen et al (2016), six different multiverse analyses are performed; one on the first dataset (Study 1), and five on the second dataset (Study 2).
# In each multiverse analysis, a data multiverse is created, and all associated p-values are shown in an histogram.
# For four analyses, the p-values are shown in a grid, allowing a closer inspection.
# The raw data are stored in durante_etal_2013_study1.txt and durante_etal_2013_study2.txt.
# These two files are based on, but not identical to, two excel files Kristina Durante kindly shared with us.
# The changes as compared to the original files include:
# We changed the WorkerID, which consisted of a complicated string of letters and numbers in the original data file, to numbers in ascending order, for simplicity
# We changed the variable name Cycle Length from the original file to Reported Cycle Length, for clarity
# We made minor changes to some variable names, for clarity
# We fixed some coding errors (see supplemental material)
# We converted data that were due to coding errors we could not fix to NA (see supplemental material)
# The data files do not contain the raw variables that were included in the survey by Durante et al. (2013), but were not used in the analyses reported by Durante et al. (2013) or by Steegen et al. (2016) (see supplemental material)
# The data files do not contain the processed data from the Durante et al. (2013) data files that we received
# Kristina Durante has given us permission to publicly share these data files.
# leuven, sept 30, 2016
setwd("~/Desktop/R Github/Multiverse") #change to correct working directory
#install.packages("ggplot2")
#install.packages("Rmisc")
library(ggplot2)
library(Rmisc)
rm(list = ls())
annlist <- c(1, 2, 3, 4, 5, 6) # which analyses? 1 = religiosity (study 1), 2 = religiosity (study 2), 3 = fiscal political attitudes,
# 4 = social political attitudes, 5 = voting preferences, 6 = donation preferences
deplist <- c("RelComp","RelComp","FiscConsComp","SocConsComp","Vote","Donate") # list of dependent variables
all.data.multiverses <- list() # all data multiverses for all the analyses specified in annlist
all.p <- list() # all p-values for all the analyses specified in annlist
##############################################################################################
######################## compute multiverse of statistical results ###########################
##############################################################################################
for (iii in 1:length(annlist)) { # for each analysis
rm(list = setdiff(ls(), c("annlist", "deplist", "all.p", "all.data.multiverses",
"iii")))
ann <- annlist[iii] #create analysis identifier
###### read in raw data
if (ann == 1) { #if ann == 1 >>> religiosity
mydata.raw <- read.csv2("durante_etal_2013_study1.txt", sep = "") #read in raw data from Study 1
}
if (ann > 1) { #if ann > 1 >>> religiosity, fiscal political attitude, etc.
mydata.raw <- read.csv2("durante_etal_2013_study2.txt", sep = "", na.strings = "NA") #read in raw data from Study 2
mydata.raw$StartDateNext <- as.Date(mydata.raw$StartDateNext, format = "%m/%d/%y")
}
mydata.raw$DateTesting <- as.Date(mydata.raw$DateTesting, format = "%m/%d/%y")
mydata.raw$StartDateofLastPeriod <- as.Date(mydata.raw$StartDateofLastPeriod, format = "%m/%d/%y")
mydata.raw$StartDateofPeriodBeforeLast <- as.Date(mydata.raw$StartDateofPeriodBeforeLast,
format = "%m/%d/%y")
###### process raw data to create multiverse of data sets
no.nmo <- ifelse(ann == 1, 2, 3) # number of next menstrual onset assessment (nmo) processing choices
no.f <- 5 # number of fertility (f) assessment processing choices
no.r <- 3 # number of relationship status(r) assessment processing choices
no.ecl <- 3 # number of exclusion based on cycle length (ecl) processing choices
no.ec <- 2 # number of exclusion based on certainty ratings (ec) processing choices
data.multiverse <- array(list(), dim = c(no.nmo, no.f, no.r, no.ecl, no.ec)) # multiverse of data sets
p.multiverse <- array(0, dim = c(no.nmo, no.f, no.r, no.ecl, no.ec)) # multiverse of p values
# data processing ("proc") with a single option
mydata.proc <- mydata.raw
# create religiosity score
mydata.proc$RelComp <- round(rowMeans(cbind(mydata.proc$Rel1, mydata.proc$Rel2,
mydata.proc$Rel3), na.rm = TRUE), digits = 2)
#or mydata.proc %>% select(Rel1, Rel2, Rel3) %>% cbind() # tidyverse
# create fiscal and social political attitudes score
if (ann > 1) { #reverse coding?
mydata.proc$Abortion <- abs(7 - mydata.proc$Abortion) + 1 #social
mydata.proc$StemCell <- abs(7 - mydata.proc$StemCell) + 1 #social
mydata.proc$Marijuana <- abs(7 - mydata.proc$Marijuana) + 1 #social
mydata.proc$RichTax <- abs(7 - mydata.proc$RichTax) + 1 #fiscal
mydata.proc$StLiving <- abs(7 - mydata.proc$StLiving) + 1 #fiscal
mydata.proc$Profit <- abs(7 - mydata.proc$Profit) + 1 #fiscal
mydata.proc$FiscConsComp <- round(rowMeans(cbind(mydata.proc$FreeMarket,
mydata.proc$PrivSocialSec, mydata.proc$RichTax, mydata.proc$StLiving,
mydata.proc$Profit), na.rm = TRUE), digits = 2) # create fiscal political attitudes score
mydata.proc$SocConsComp <- round(rowMeans(cbind(mydata.proc$Marriage,
mydata.proc$RestrictAbortion, mydata.proc$Abortion, mydata.proc$StemCell,
mydata.proc$Marijuana), na.rm = TRUE), digits = 2) # create social political attitudes score
}
mydata.proc.init <- mydata.proc
# data processing with multiple options (see Table 1, p. 705)
for (i in 1:no.nmo){ # for each nmo option
for (j in 1:no.f){ # for each f option
for (k in 1:no.r){ # for each r option
for (l in 1:no.ecl){ # for each ecl option
for (m in 1:no.ec){ # for each ec option
mydata.proc <- mydata.proc.init #initialize processed data with variables with a single option only
# next menstrual onset (nmo) assessment
mydata.proc$ComputedCycleLength <- mydata.proc$StartDateofLastPeriod - mydata.proc$StartDateofPeriodBeforeLast # compute cycle length
if (i == 1) {
mydata.proc$NextMenstrualOnset <- mydata.proc$StartDateofLastPeriod + mydata.proc$ComputedCycleLength # first nmo option: based on computed cycle length
} else if (i == 2) {
mydata.proc$NextMenstrualOnset <- mydata.proc$StartDateofLastPeriod + mydata.proc$ReportedCycleLength # second nmo option: based on reported cycle length
} else if (i == 3) {
mydata.proc$NextMenstrualOnset <- mydata.proc$StartDateNext # third nmo option: based on reported estimate of next menstrual onset
}
mydata.proc$DaysBeforeNextOnset <- mydata.proc$NextMenstrualOnset - mydata.proc$DateTesting # compute days before next menstrual onset
mydata.proc$CycleDay <- 28 - mydata.proc$DaysBeforeNextOnset # compute cycle day
mydata.proc$CycleDay <- ifelse(mydata.proc$CycleDay <1, 1, mydata.proc$CycleDay)
mydata.proc$CycleDay <- ifelse(mydata.proc$CycleDay > 28, 28, mydata.proc$CycleDay)
# as described in the Supplemental Material, for two participants, we did not manage to recover the value of Cycle Day. When Cycle Day is determined based on nmo1, we
# adopt the Cycle Day value from the original data file to ensure that the results of our single data set analysis are identical
# to the single data set analysis in Durante et al. (2013)
if (ann > 1 & i == 1) {
mydata.proc$CycleDay[mydata.proc$WorkerID == 15] <- 11
mydata.proc$CycleDay[mydata.proc$WorkerID == 16] <- 18
} # fixing 2 problematic cases
# fertility assessment
high.lower <- c(7, 6, 9, 8, 9) # lower boundaries of different options for 'high fertility' cycle days
high.upper <- c(14, 14, 17, 14, 17) # upper boundaries of different options for 'high fertility' cycle days
low1.lower <- c(17, 17, 18, 1, 1) # lower boundaries of different options for 'low fertility' cycle days (first range)
low1.upper <- c(25, 27, 25, 7, 8) # upper boundaries of different options for 'low fertility' cycle days (first range)
low2.lower <- c(17, 17, 18, 15, 18) # lower boundaries of different options for 'low fertility' cycle days (second range)
low2.upper <- c(25, 27, 25, 28, 28) # upper boundaries of different options for 'low fertility' cycle days (second range)
# (a) F1: high = cycle days 7–14; low = cycle days 17–25 >>> high.lower = 7, high.upper = 14
# (b) F2: high = cycle days 6–14; low = cycle days 17–27
# (c) F3: high = cycle days 9–17; low = cycle days 18–25
# (d) F4: high = cycle days 8–14; low = cycle days 1–7 and
# 15–28
# (e) F5: high = cycle days 9–17; low = cycle days 1–8 and
# 18–28
mydata.proc$Fertility <- rep(NA, dim(mydata.proc)[1]) # create fertility variable
mydata.proc$Fertility[mydata.proc$CycleDay >= high.lower[j] & mydata.proc$CycleDay <=
high.upper[j]] <- "High" # assign 'High' to fertility if cycle day is within the high fertility range
mydata.proc$Fertility[mydata.proc$CycleDay >= low1.lower[j] & mydata.proc$CycleDay <=
low1.upper[j]] <- "Low" # assign 'Low' to fertility if cycle day is within the first low fertility range
mydata.proc$Fertility[mydata.proc$CycleDay >= low2.lower[j] & mydata.proc$CycleDay <=
low2.upper[j]] <- "Low" # assign 'Low' to fertility if cycle day is within the second low fertility range
# relationship status assessment
if (k == 1) {
mydata.proc$RelationshipStatus <- ifelse(mydata.proc$Relationship <=
2, "Single", "Relationship") # first r option: single = response options 1 and 2; relationship = response options 3 and 4
} else if (k == 2) {
mydata.proc$RelationshipStatus <- ifelse(mydata.proc$Relationship ==
1, "Single", "Relationship") # second r option: single = response option 1, relationship = response options 2, 3 and 4
} else if (k == 3) {
mydata.proc$RelationshipStatus[mydata.proc$Relationship == 1] <- "Single"
mydata.proc$RelationshipStatus[mydata.proc$Relationship > 2 & mydata.proc$Relationship <
5] <- "Relationship" # third r option: single = response option 1, relationship = response options 3 and 4
}
# exclusion based on cycle length
if (l == 1) {
mydata.proc <- mydata.proc # first ecl option: no exclusion based on cycle length
} else if (l == 2) {
mydata.proc <- mydata.proc[!(mydata.proc$ComputedCycleLength <
25 | mydata.proc$ComputedCycleLength > 35), ] # second ecl option: exclusion based on computed cycle length
} else if (l == 3) {
mydata.proc <- mydata.proc[!(mydata.proc$ReportedCycleLength <
25 | mydata.proc$ReportedCycleLength > 35), ] # third ecl option: exclusion based on reported cycle length
}
# exclusion based on certainty ratings
if (m == 1) {
mydata.proc <- mydata.proc # first ec option: no exclusion based on certainty ratings
} else if (m == 2) {
mydata.proc <- mydata.proc[!(mydata.proc$Sure1 < 6 | mydata.proc$Sure2 <
6), ] # second ec option: exclusion based on variables Sure1 and Sure2
}
data.multiverse[[i, j, k, l, m]] = mydata.proc # store processed data set in the data multiverse
}
}
}
}
}
###### analyze multiverse of data sets to create multiverse of statistical results #######################################################
for (i in 1:no.nmo){ # for each nmo option
for (j in 1:no.f){ # for each f option
for (k in 1:no.r){ # for each r option
for (l in 1:no.ecl){ # for each ecl option
for (m in 1:no.ec){ # for each ec option
mydata.proc$Fertility <- factor(mydata.proc$Fertility)
mydata.proc$RelationshipStatus <- factor(mydata.proc$RelationshipStatus)
if (ann <= 4) {
an = lm(paste(deplist[ann], "~Fertility*RelationshipStatus"), data.multiverse[[i, j, k, l, m]]) # for analyses 1 to 4, perform an ANOVA on the processed data set
}
if (ann >= 5) {
an = glm(paste(deplist[ann], "~Fertility*RelationshipStatus"), family = binomial(link = "logit"),
data.multiverse[[i, j, k, l, m]]) # for analyses 5 and 6, perform a logistic regression on the processed data set
}
summar <- summary(an)
p.multiverse[i, j, k, l, m] <- summar$coefficients[4, 4] # store the p-value of the fertility x relationship interaction
}
}
}
}
}
p.multiverse[1, , , 3, ] <- NA # when participants are excluded based on reported cycle length, we do not consider cycle day assessment based on computed cycle length
p.multiverse[2, , , 2, ] <- NA # when participants are excluded based on computed cycle length, we do not consider cycle day assessment based on reported cycle length
all.data.multiverses[[iii]] <- data.multiverse
all.p[[iii]] <- p.multiverse
}
# as a check, show the results of the single data set analyses by durante et. al (2013)
sapply(all.p, "[[", 1)
# compute the proportion of data sets with a significant interaction effect (p.707)
f <- function (ann) {length(which(all.p[[ann]]>.05))/length(which(!is.na(all.p[[ann]])))}
sapply(annlist,f)
########################################################
######################## make graphs ###################
########################################################
graphnames <- c("Religiosity (Study 1)", "Religiosity (Study 2)", "Fiscal political attitudes",
"Social political attitudes", "Voting preferences", "Donation preferences")
# histograms of p-values
hists <- list()
pv <- list()
ylabs=c("Frequency","","Frequency","","Frequency","")
xlabs=c("","","","","p","p")
for (iii in 1:length(annlist)) local({
ann <- annlist[iii]
p <- all.p[[ann]]
if (ann == 1) {
cat1 <- rep(c(1:15), 8)
cat2 <- rep(1:8, each = 15)
} else {
cat1 <- rep(c(1:15), 14)
cat2 <- rep(1:14, each = 15)
}
df <- data.frame(category1 = cat1, category2 = cat2, value = (as.vector(p[!is.na(p)])))
df[["sign"]] = ifelse(df[["value"]] <= 0.05, "significant", "nonsignificant")
pv[[ann]]=df$value
hists[[ann]] <<- qplot(pv[[ann]], geom = "histogram", binwidth = 0.01) + xlim(0,1) + geom_histogram(colour = "black", fill = "white", binwidth = 0.01) +
xlab(xlabs[[ann]]) + ylab(ylabs[[ann]]) + geom_vline(xintercept = 0.05, colour = "red",
linetype = "longdash") + ggtitle(graphnames[ann]) + theme(plot.title = element_text(lineheight = 0.8,
face = "bold")) + theme(axis.text = element_text(size = 12), axis.title = element_text(size = 16))
#windows(8, 5)
print(hists[[ann]])
rm(p)
rm(df)
})
# grids of p-values
grids <- list()
for (iii in c(2,4,5,6)){ #in the paper, we only show the grids for analyses 2,4,5, and 6
ann <- annlist[iii]
p <- all.p[[ann]]
p.grid <- array(0,dim=c(no.f, no.r, no.ec, no.ecl, no.nmo)) # change the dimensions of the p multiverse for visualization purposes
for (jj in 1:3){
for (jjj in 1:3){
p.grid[, , , jj, jjj] <- p[jjj, , , jj, ]
}
}
cat1 <- rep(c(1:15), 14)
cat2 <- rep(1:14, each = 15)
df <- data.frame(category1 = cat1, category2 = cat2, value = (as.vector
(p.grid[!is.na(p.grid)])))
df[["sign"]] = ifelse(df[["value"]] <= 0.05, "significant", "nonsignificant")
grids[[ann]] <- ggplot(df, aes(x = category1, y = category2, fill = sign)) +
geom_tile(colour = "black") +
geom_text(label = round((df$value), 2), size = 3, colour = "black") +
# draw relationship branches vertical
geom_segment(aes(x = 3, y = -1.7, xend = 3, yend = -0.3)) +
geom_segment(aes(x = 8, y = -1.7, xend = 8, yend = -0.3)) +
geom_segment(aes(x = 13, y = -1.7, xend = 13, yend = -0.3)) +
# draw relationship branches horizontal
geom_segment(aes(x = 3, y = -1.7, xend = 13, yend = -1.7)) +
# draw fertility branches vertical
geom_segment(aes(x = 1, y = -0.3, xend = 1, yend = 0.5)) +
geom_segment(aes(x = 2, y = -0.3, xend = 2, yend = 0.5)) +
geom_segment(aes(x = 3, y = -0.3, xend = 3, yend = 0.5)) +
geom_segment(aes(x = 4, y = -0.3, xend = 4, yend = 0.5)) +
geom_segment(aes(x = 5, y = -0.3, xend = 5, yend = 0.5)) +
geom_segment(aes(x = 6, y = -0.3, xend = 6, yend = 0.5)) +
geom_segment(aes(x = 7, y = -0.3, xend = 7, yend = 0.5)) +
geom_segment(aes(x = 8, y = -0.3, xend = 8, yend = 0.5)) +
geom_segment(aes(x = 9, y = -0.3, xend = 9, yend = 0.5)) +
geom_segment(aes(x = 10, y = -0.3, xend = 10, yend = 0.5)) +
geom_segment(aes(x = 11, y = -0.3, xend = 11, yend = 0.5)) +
geom_segment(aes(x = 12, y = -0.3, xend = 12, yend = 0.5)) +
geom_segment(aes(x = 13, y = -0.3, xend = 13, yend = 0.5)) +
geom_segment(aes(x = 14, y = -0.3, xend = 14, yend = 0.5)) +
geom_segment(aes(x = 15, y = -0.3, xend = 15, yend = 0.5)) +
# draw fertility branches horizontal
geom_segment(aes(x = 1, y = -0.3, xend = 5, yend = -0.3)) +
geom_segment(aes(x = 6, y = -0.3, xend = 10, yend = -0.3)) +
geom_segment(aes(x = 11, y = -0.3, xend = 15, yend = -0.3)) +
# draw menstrual onset branches horizontal
geom_segment(aes(x = 18.5, y = 2.5, xend = 20.5, yend = 2.5)) +
geom_segment(aes(x = 18.5, y = 6.5, xend = 20.5, yend = 6.5)) +
geom_segment(aes(x = 18.5, y = 11.5, xend = 20.5, yend = 11.5)) +
# draw menstrual onset branches vertical
geom_segment(aes(x = 20.5, y = 2.5, xend = 20.5, yend = 11.5)) +
# draw exclusion cycle length branches horizontal
geom_segment(aes(x = 16.5, y = 1.5, xend = 18.5, yend = 1.5)) +
geom_segment(aes(x = 16.5, y = 3.5, xend = 18.5, yend = 3.5)) +
geom_segment(aes(x = 16.5, y = 5.5, xend = 18.5, yend = 5.5)) +
geom_segment(aes(x = 16.5, y = 7.5, xend = 18.5, yend = 7.5)) +
geom_segment(aes(x = 16.5, y = 9.5, xend = 18.5, yend = 9.5)) +
geom_segment(aes(x = 16.5, y = 11.5, xend = 18.5, yend = 11.5)) +
geom_segment(aes(x = 16.5, y = 13.5, xend = 18.5, yend = 13.5)) +
# draw exclusion cycle length branches vertical
geom_segment(aes(x = 18.5, y = 1.5, xend = 18.5, yend = 3.5)) +
geom_segment(aes(x = 18.5, y = 5.5, xend = 18.5, yend = 7.5)) +
geom_segment(aes(x = 18.5, y = 9.5, xend = 18.5, yend = 13.5)) +
# draw exclusion sure branches horizontal
geom_segment(aes(x = 15.5, y = 1, xend = 16.5, yend = 1)) +
geom_segment(aes(x = 15.5, y = 2, xend = 16.5, yend = 2)) +
geom_segment(aes(x = 15.5, y = 3, xend = 16.5, yend = 3)) +
geom_segment(aes(x = 15.5, y = 4, xend = 16.5, yend = 4)) +
geom_segment(aes(x = 15.5, y = 5, xend = 16.5, yend = 5)) +
geom_segment(aes(x = 15.5, y = 6, xend = 16.5, yend = 6)) +
geom_segment(aes(x = 15.5, y = 7, xend = 16.5, yend = 7)) +
geom_segment(aes(x = 15.5, y = 8, xend = 16.5, yend = 8)) +
geom_segment(aes(x = 15.5, y = 9, xend = 16.5, yend = 9)) +
geom_segment(aes(x = 15.5, y = 10, xend = 16.5, yend = 10)) +
geom_segment(aes(x = 15.5, y = 11, xend = 16.5, yend = 11)) +
geom_segment(aes(x = 15.5, y = 12, xend = 16.5, yend = 12)) +
geom_segment(aes(x = 15.5, y = 13, xend = 16.5, yend = 13)) +
geom_segment(aes(x = 15.5, y = 14, xend = 16.5, yend = 14)) +
# draw exlusion sure branches vertical
geom_segment(aes(x = 16.5, y = 1, xend = 16.5, yend = 2)) +
geom_segment(aes(x = 16.5, y = 3, xend = 16.5, yend = 4)) +
geom_segment(aes(x = 16.5, y = 5, xend = 16.5, yend = 6)) +
geom_segment(aes(x = 16.5, y = 7, xend = 16.5, yend = 8)) +
geom_segment(aes(x = 16.5, y = 9, xend = 16.5, yend = 10)) +
geom_segment(aes(x = 16.5, y = 11, xend = 16.5, yend = 12)) +
geom_segment(aes(x = 16.5, y = 13, xend = 16.5, yend = 14)) +
annotate("text", x = c(3, 8, 13), y = -2.2, label = c("R1", "R2", "R3")) +
annotate("text", x = 1:15, y = -0.8, label = rep(c("F1", "F2", "F3",
"F4", "F5"), 3)) +
annotate("text", x = 20, y = c(2, 6, 11), label = c("NMO1", "NMO2",
"NMO3")) +
annotate("text", x = 18, y = c(1, 3), label = c("ECL1", "ECL2")) +
annotate("text", x = 18, y = c(5, 7), label = c("ECL1", "ECL3")) +
annotate("text", x = 18, y = c(9, 11, 13), label = c("ECL1", "ECL2", "ECL3")) +
annotate("text", x = 16, y = c(0.7, 1.7, 2.7, 3.7, 4.7, 5.7, 6.7, 7.7, 8.7,
9.7, 10.7, 11.7, 12.7, 13.7), label = rep(c("EC1", "EC2"), 7)) +
scale_fill_manual(values = c(significant = "grey", nonsignificant = "white")) +
scale_x_discrete(expand = c(0, 0)) + scale_y_reverse() + ggtitle(graphnames[ann]) +
theme(plot.title = element_text(lineheight = 0.8, face = "bold")) +
theme(panel.grid.minor = element_blank()) + theme(panel.grid.major = element_blank()) +
theme(axis.ticks = element_blank(), axis.text.x = element_blank(),
axis.text.y = element_blank()) + theme(panel.background = element_rect(fill = "transparent")) +
theme(legend.position = "none") + theme() + xlab("") + ylab("")
# windows(30, 20)
#windows(10, 7)
print(grids[ann])
rm(df)
rm(p)
rm(p.grid)
}
| /multiverse analysis steegen et al 2016_osf.R | no_license | Tanainan/Multiverse | R | false | false | 23,611 | r | # R code of Steegen, Tuerlinckx, Gelman & Vanpaemel (2016). Increasing Transparency through a Multiverse Analysis. Perspectives on Psychological Science, 11, 702-712
# This file contains code to perform a multiverse analysis on two datasets from Durante, K.M., Rae A., & Griskevicius, V. (2013). The fluctuating female vote: Politics, religion and the ovulatory cycle. Psychological Science, 24, 1007-1016.
# As described in Steegen et al (2016), six different multiverse analyses are performed; one on the first dataset (Study 1), and five on the second dataset (Study 2).
# In each multiverse analysis, a data multiverse is created, and all associated p-values are shown in an histogram.
# For four analyses, the p-values are shown in a grid, allowing a closer inspection.
# The raw data are stored in durante_etal_2013_study1.txt and durante_etal_2013_study2.txt.
# These two files are based on, but not identical to, two excel files Kristina Durante kindly shared with us.
# The changes as compared to the original files include:
# We changed the WorkerID, which consisted of a complicated string of letters and numbers in the original data file, to numbers in ascending order, for simplicity
# We changed the variable name Cycle Length from the original file to Reported Cycle Length, for clarity
# We made minor changes to some variable names, for clarity
# We fixed some coding errors (see supplemental material)
# We converted data that were due to coding errors we could not fix to NA (see supplemental material)
# The data files do not contain the raw variables that were included in the survey by Durante et al. (2013), but were not used in the analyses reported by Durante et al. (2013) or by Steegen et al. (2016) (see supplemental material)
# The data files do not contain the processed data from the Durante et al. (2013) data files that we received
# Kristina Durante has given us permission to publicly share these data files.
# leuven, sept 30, 2016
setwd("~/Desktop/R Github/Multiverse") #change to correct working directory
#install.packages("ggplot2")
#install.packages("Rmisc")
library(ggplot2)
library(Rmisc)
rm(list = ls())
annlist <- c(1, 2, 3, 4, 5, 6) # which analyses? 1 = religiosity (study 1), 2 = religiosity (study 2), 3 = fiscal political attitudes,
# 4 = social political attitudes, 5 = voting preferences, 6 = donation preferences
deplist <- c("RelComp","RelComp","FiscConsComp","SocConsComp","Vote","Donate") # list of dependent variables
all.data.multiverses <- list() # all data multiverses for all the analyses specified in annlist
all.p <- list() # all p-values for all the analyses specified in annlist
##############################################################################################
######################## compute multiverse of statistical results ###########################
##############################################################################################
for (iii in 1:length(annlist)) { # for each analysis
rm(list = setdiff(ls(), c("annlist", "deplist", "all.p", "all.data.multiverses",
"iii")))
ann <- annlist[iii] #create analysis identifier
###### read in raw data
if (ann == 1) { #if ann == 1 >>> religiosity
mydata.raw <- read.csv2("durante_etal_2013_study1.txt", sep = "") #read in raw data from Study 1
}
if (ann > 1) { #if ann > 1 >>> religiosity, fiscal political attitude, etc.
mydata.raw <- read.csv2("durante_etal_2013_study2.txt", sep = "", na.strings = "NA") #read in raw data from Study 2
mydata.raw$StartDateNext <- as.Date(mydata.raw$StartDateNext, format = "%m/%d/%y")
}
mydata.raw$DateTesting <- as.Date(mydata.raw$DateTesting, format = "%m/%d/%y")
mydata.raw$StartDateofLastPeriod <- as.Date(mydata.raw$StartDateofLastPeriod, format = "%m/%d/%y")
mydata.raw$StartDateofPeriodBeforeLast <- as.Date(mydata.raw$StartDateofPeriodBeforeLast,
format = "%m/%d/%y")
###### process raw data to create multiverse of data sets
no.nmo <- ifelse(ann == 1, 2, 3) # number of next menstrual onset assessment (nmo) processing choices
no.f <- 5 # number of fertility (f) assessment processing choices
no.r <- 3 # number of relationship status(r) assessment processing choices
no.ecl <- 3 # number of exclusion based on cycle length (ecl) processing choices
no.ec <- 2 # number of exclusion based on certainty ratings (ec) processing choices
data.multiverse <- array(list(), dim = c(no.nmo, no.f, no.r, no.ecl, no.ec)) # multiverse of data sets
p.multiverse <- array(0, dim = c(no.nmo, no.f, no.r, no.ecl, no.ec)) # multiverse of p values
# data processing ("proc") with a single option
mydata.proc <- mydata.raw
# create religiosity score
mydata.proc$RelComp <- round(rowMeans(cbind(mydata.proc$Rel1, mydata.proc$Rel2,
mydata.proc$Rel3), na.rm = TRUE), digits = 2)
#or mydata.proc %>% select(Rel1, Rel2, Rel3) %>% cbind() # tidyverse
# create fiscal and social political attitudes score
if (ann > 1) { #reverse coding?
mydata.proc$Abortion <- abs(7 - mydata.proc$Abortion) + 1 #social
mydata.proc$StemCell <- abs(7 - mydata.proc$StemCell) + 1 #social
mydata.proc$Marijuana <- abs(7 - mydata.proc$Marijuana) + 1 #social
mydata.proc$RichTax <- abs(7 - mydata.proc$RichTax) + 1 #fiscal
mydata.proc$StLiving <- abs(7 - mydata.proc$StLiving) + 1 #fiscal
mydata.proc$Profit <- abs(7 - mydata.proc$Profit) + 1 #fiscal
mydata.proc$FiscConsComp <- round(rowMeans(cbind(mydata.proc$FreeMarket,
mydata.proc$PrivSocialSec, mydata.proc$RichTax, mydata.proc$StLiving,
mydata.proc$Profit), na.rm = TRUE), digits = 2) # create fiscal political attitudes score
mydata.proc$SocConsComp <- round(rowMeans(cbind(mydata.proc$Marriage,
mydata.proc$RestrictAbortion, mydata.proc$Abortion, mydata.proc$StemCell,
mydata.proc$Marijuana), na.rm = TRUE), digits = 2) # create social political attitudes score
}
mydata.proc.init <- mydata.proc
# data processing with multiple options (see Table 1, p. 705)
for (i in 1:no.nmo){ # for each nmo option
for (j in 1:no.f){ # for each f option
for (k in 1:no.r){ # for each r option
for (l in 1:no.ecl){ # for each ecl option
for (m in 1:no.ec){ # for each ec option
mydata.proc <- mydata.proc.init #initialize processed data with variables with a single option only
# next menstrual onset (nmo) assessment
mydata.proc$ComputedCycleLength <- mydata.proc$StartDateofLastPeriod - mydata.proc$StartDateofPeriodBeforeLast # compute cycle length
if (i == 1) {
mydata.proc$NextMenstrualOnset <- mydata.proc$StartDateofLastPeriod + mydata.proc$ComputedCycleLength # first nmo option: based on computed cycle length
} else if (i == 2) {
mydata.proc$NextMenstrualOnset <- mydata.proc$StartDateofLastPeriod + mydata.proc$ReportedCycleLength # second nmo option: based on reported cycle length
} else if (i == 3) {
mydata.proc$NextMenstrualOnset <- mydata.proc$StartDateNext # third nmo option: based on reported estimate of next menstrual onset
}
mydata.proc$DaysBeforeNextOnset <- mydata.proc$NextMenstrualOnset - mydata.proc$DateTesting # compute days before next menstrual onset
mydata.proc$CycleDay <- 28 - mydata.proc$DaysBeforeNextOnset # compute cycle day
mydata.proc$CycleDay <- ifelse(mydata.proc$CycleDay <1, 1, mydata.proc$CycleDay)
mydata.proc$CycleDay <- ifelse(mydata.proc$CycleDay > 28, 28, mydata.proc$CycleDay)
# as described in the Supplemental Material, for two participants, we did not manage to recover the value of Cycle Day. When Cycle Day is determined based on nmo1, we
# adopt the Cycle Day value from the original data file to ensure that the results of our single data set analysis are identical
# to the single data set analysis in Durante et al. (2013)
if (ann > 1 & i == 1) {
mydata.proc$CycleDay[mydata.proc$WorkerID == 15] <- 11
mydata.proc$CycleDay[mydata.proc$WorkerID == 16] <- 18
} # fixing 2 problematic cases
# fertility assessment
high.lower <- c(7, 6, 9, 8, 9) # lower boundaries of different options for 'high fertility' cycle days
high.upper <- c(14, 14, 17, 14, 17) # upper boundaries of different options for 'high fertility' cycle days
low1.lower <- c(17, 17, 18, 1, 1) # lower boundaries of different options for 'low fertility' cycle days (first range)
low1.upper <- c(25, 27, 25, 7, 8) # upper boundaries of different options for 'low fertility' cycle days (first range)
low2.lower <- c(17, 17, 18, 15, 18) # lower boundaries of different options for 'low fertility' cycle days (second range)
low2.upper <- c(25, 27, 25, 28, 28) # upper boundaries of different options for 'low fertility' cycle days (second range)
# (a) F1: high = cycle days 7–14; low = cycle days 17–25 >>> high.lower = 7, high.upper = 14
# (b) F2: high = cycle days 6–14; low = cycle days 17–27
# (c) F3: high = cycle days 9–17; low = cycle days 18–25
# (d) F4: high = cycle days 8–14; low = cycle days 1–7 and
# 15–28
# (e) F5: high = cycle days 9–17; low = cycle days 1–8 and
# 18–28
mydata.proc$Fertility <- rep(NA, dim(mydata.proc)[1]) # create fertility variable
mydata.proc$Fertility[mydata.proc$CycleDay >= high.lower[j] & mydata.proc$CycleDay <=
high.upper[j]] <- "High" # assign 'High' to fertility if cycle day is within the high fertility range
mydata.proc$Fertility[mydata.proc$CycleDay >= low1.lower[j] & mydata.proc$CycleDay <=
low1.upper[j]] <- "Low" # assign 'Low' to fertility if cycle day is within the first low fertility range
mydata.proc$Fertility[mydata.proc$CycleDay >= low2.lower[j] & mydata.proc$CycleDay <=
low2.upper[j]] <- "Low" # assign 'Low' to fertility if cycle day is within the second low fertility range
# relationship status assessment
if (k == 1) {
mydata.proc$RelationshipStatus <- ifelse(mydata.proc$Relationship <=
2, "Single", "Relationship") # first r option: single = response options 1 and 2; relationship = response options 3 and 4
} else if (k == 2) {
mydata.proc$RelationshipStatus <- ifelse(mydata.proc$Relationship ==
1, "Single", "Relationship") # second r option: single = response option 1, relationship = response options 2, 3 and 4
} else if (k == 3) {
mydata.proc$RelationshipStatus[mydata.proc$Relationship == 1] <- "Single"
mydata.proc$RelationshipStatus[mydata.proc$Relationship > 2 & mydata.proc$Relationship <
5] <- "Relationship" # third r option: single = response option 1, relationship = response options 3 and 4
}
# exclusion based on cycle length
if (l == 1) {
mydata.proc <- mydata.proc # first ecl option: no exclusion based on cycle length
} else if (l == 2) {
mydata.proc <- mydata.proc[!(mydata.proc$ComputedCycleLength <
25 | mydata.proc$ComputedCycleLength > 35), ] # second ecl option: exclusion based on computed cycle length
} else if (l == 3) {
mydata.proc <- mydata.proc[!(mydata.proc$ReportedCycleLength <
25 | mydata.proc$ReportedCycleLength > 35), ] # third ecl option: exclusion based on reported cycle length
}
# exclusion based on certainty ratings
if (m == 1) {
mydata.proc <- mydata.proc # first ec option: no exclusion based on certainty ratings
} else if (m == 2) {
mydata.proc <- mydata.proc[!(mydata.proc$Sure1 < 6 | mydata.proc$Sure2 <
6), ] # second ec option: exclusion based on variables Sure1 and Sure2
}
data.multiverse[[i, j, k, l, m]] = mydata.proc # store processed data set in the data multiverse
}
}
}
}
}
###### analyze multiverse of data sets to create multiverse of statistical results #######################################################
for (i in 1:no.nmo){ # for each nmo option
for (j in 1:no.f){ # for each f option
for (k in 1:no.r){ # for each r option
for (l in 1:no.ecl){ # for each ecl option
for (m in 1:no.ec){ # for each ec option
mydata.proc$Fertility <- factor(mydata.proc$Fertility)
mydata.proc$RelationshipStatus <- factor(mydata.proc$RelationshipStatus)
if (ann <= 4) {
an = lm(paste(deplist[ann], "~Fertility*RelationshipStatus"), data.multiverse[[i, j, k, l, m]]) # for analyses 1 to 4, perform an ANOVA on the processed data set
}
if (ann >= 5) {
an = glm(paste(deplist[ann], "~Fertility*RelationshipStatus"), family = binomial(link = "logit"),
data.multiverse[[i, j, k, l, m]]) # for analyses 5 and 6, perform a logistic regression on the processed data set
}
summar <- summary(an)
p.multiverse[i, j, k, l, m] <- summar$coefficients[4, 4] # store the p-value of the fertility x relationship interaction
}
}
}
}
}
p.multiverse[1, , , 3, ] <- NA # when participants are excluded based on reported cycle length, we do not consider cycle day assessment based on computed cycle length
p.multiverse[2, , , 2, ] <- NA # when participants are excluded based on computed cycle length, we do not consider cycle day assessment based on reported cycle length
all.data.multiverses[[iii]] <- data.multiverse
all.p[[iii]] <- p.multiverse
}
# as a check, show the results of the single data set analyses by durante et. al (2013)
sapply(all.p, "[[", 1)
# compute the proportion of data sets with a significant interaction effect (p.707)
f <- function (ann) {length(which(all.p[[ann]]>.05))/length(which(!is.na(all.p[[ann]])))}
sapply(annlist,f)
########################################################
######################## make graphs ###################
########################################################
graphnames <- c("Religiosity (Study 1)", "Religiosity (Study 2)", "Fiscal political attitudes",
"Social political attitudes", "Voting preferences", "Donation preferences")
# histograms of p-values
hists <- list()
pv <- list()
ylabs=c("Frequency","","Frequency","","Frequency","")
xlabs=c("","","","","p","p")
for (iii in 1:length(annlist)) local({
ann <- annlist[iii]
p <- all.p[[ann]]
if (ann == 1) {
cat1 <- rep(c(1:15), 8)
cat2 <- rep(1:8, each = 15)
} else {
cat1 <- rep(c(1:15), 14)
cat2 <- rep(1:14, each = 15)
}
df <- data.frame(category1 = cat1, category2 = cat2, value = (as.vector(p[!is.na(p)])))
df[["sign"]] = ifelse(df[["value"]] <= 0.05, "significant", "nonsignificant")
pv[[ann]]=df$value
hists[[ann]] <<- qplot(pv[[ann]], geom = "histogram", binwidth = 0.01) + xlim(0,1) + geom_histogram(colour = "black", fill = "white", binwidth = 0.01) +
xlab(xlabs[[ann]]) + ylab(ylabs[[ann]]) + geom_vline(xintercept = 0.05, colour = "red",
linetype = "longdash") + ggtitle(graphnames[ann]) + theme(plot.title = element_text(lineheight = 0.8,
face = "bold")) + theme(axis.text = element_text(size = 12), axis.title = element_text(size = 16))
#windows(8, 5)
print(hists[[ann]])
rm(p)
rm(df)
})
# grids of p-values
grids <- list()
for (iii in c(2,4,5,6)){ #in the paper, we only show the grids for analyses 2,4,5, and 6
ann <- annlist[iii]
p <- all.p[[ann]]
p.grid <- array(0,dim=c(no.f, no.r, no.ec, no.ecl, no.nmo)) # change the dimensions of the p multiverse for visualization purposes
for (jj in 1:3){
for (jjj in 1:3){
p.grid[, , , jj, jjj] <- p[jjj, , , jj, ]
}
}
cat1 <- rep(c(1:15), 14)
cat2 <- rep(1:14, each = 15)
df <- data.frame(category1 = cat1, category2 = cat2, value = (as.vector
(p.grid[!is.na(p.grid)])))
df[["sign"]] = ifelse(df[["value"]] <= 0.05, "significant", "nonsignificant")
grids[[ann]] <- ggplot(df, aes(x = category1, y = category2, fill = sign)) +
geom_tile(colour = "black") +
geom_text(label = round((df$value), 2), size = 3, colour = "black") +
# draw relationship branches vertical
geom_segment(aes(x = 3, y = -1.7, xend = 3, yend = -0.3)) +
geom_segment(aes(x = 8, y = -1.7, xend = 8, yend = -0.3)) +
geom_segment(aes(x = 13, y = -1.7, xend = 13, yend = -0.3)) +
# draw relationship branches horizontal
geom_segment(aes(x = 3, y = -1.7, xend = 13, yend = -1.7)) +
# draw fertility branches vertical
geom_segment(aes(x = 1, y = -0.3, xend = 1, yend = 0.5)) +
geom_segment(aes(x = 2, y = -0.3, xend = 2, yend = 0.5)) +
geom_segment(aes(x = 3, y = -0.3, xend = 3, yend = 0.5)) +
geom_segment(aes(x = 4, y = -0.3, xend = 4, yend = 0.5)) +
geom_segment(aes(x = 5, y = -0.3, xend = 5, yend = 0.5)) +
geom_segment(aes(x = 6, y = -0.3, xend = 6, yend = 0.5)) +
geom_segment(aes(x = 7, y = -0.3, xend = 7, yend = 0.5)) +
geom_segment(aes(x = 8, y = -0.3, xend = 8, yend = 0.5)) +
geom_segment(aes(x = 9, y = -0.3, xend = 9, yend = 0.5)) +
geom_segment(aes(x = 10, y = -0.3, xend = 10, yend = 0.5)) +
geom_segment(aes(x = 11, y = -0.3, xend = 11, yend = 0.5)) +
geom_segment(aes(x = 12, y = -0.3, xend = 12, yend = 0.5)) +
geom_segment(aes(x = 13, y = -0.3, xend = 13, yend = 0.5)) +
geom_segment(aes(x = 14, y = -0.3, xend = 14, yend = 0.5)) +
geom_segment(aes(x = 15, y = -0.3, xend = 15, yend = 0.5)) +
# draw fertility branches horizontal
geom_segment(aes(x = 1, y = -0.3, xend = 5, yend = -0.3)) +
geom_segment(aes(x = 6, y = -0.3, xend = 10, yend = -0.3)) +
geom_segment(aes(x = 11, y = -0.3, xend = 15, yend = -0.3)) +
# draw menstrual onset branches horizontal
geom_segment(aes(x = 18.5, y = 2.5, xend = 20.5, yend = 2.5)) +
geom_segment(aes(x = 18.5, y = 6.5, xend = 20.5, yend = 6.5)) +
geom_segment(aes(x = 18.5, y = 11.5, xend = 20.5, yend = 11.5)) +
# draw menstrual onset branches vertical
geom_segment(aes(x = 20.5, y = 2.5, xend = 20.5, yend = 11.5)) +
# draw exclusion cycle length branches horizontal
geom_segment(aes(x = 16.5, y = 1.5, xend = 18.5, yend = 1.5)) +
geom_segment(aes(x = 16.5, y = 3.5, xend = 18.5, yend = 3.5)) +
geom_segment(aes(x = 16.5, y = 5.5, xend = 18.5, yend = 5.5)) +
geom_segment(aes(x = 16.5, y = 7.5, xend = 18.5, yend = 7.5)) +
geom_segment(aes(x = 16.5, y = 9.5, xend = 18.5, yend = 9.5)) +
geom_segment(aes(x = 16.5, y = 11.5, xend = 18.5, yend = 11.5)) +
geom_segment(aes(x = 16.5, y = 13.5, xend = 18.5, yend = 13.5)) +
# draw exclusion cycle length branches vertical
geom_segment(aes(x = 18.5, y = 1.5, xend = 18.5, yend = 3.5)) +
geom_segment(aes(x = 18.5, y = 5.5, xend = 18.5, yend = 7.5)) +
geom_segment(aes(x = 18.5, y = 9.5, xend = 18.5, yend = 13.5)) +
# draw exclusion sure branches horizontal
geom_segment(aes(x = 15.5, y = 1, xend = 16.5, yend = 1)) +
geom_segment(aes(x = 15.5, y = 2, xend = 16.5, yend = 2)) +
geom_segment(aes(x = 15.5, y = 3, xend = 16.5, yend = 3)) +
geom_segment(aes(x = 15.5, y = 4, xend = 16.5, yend = 4)) +
geom_segment(aes(x = 15.5, y = 5, xend = 16.5, yend = 5)) +
geom_segment(aes(x = 15.5, y = 6, xend = 16.5, yend = 6)) +
geom_segment(aes(x = 15.5, y = 7, xend = 16.5, yend = 7)) +
geom_segment(aes(x = 15.5, y = 8, xend = 16.5, yend = 8)) +
geom_segment(aes(x = 15.5, y = 9, xend = 16.5, yend = 9)) +
geom_segment(aes(x = 15.5, y = 10, xend = 16.5, yend = 10)) +
geom_segment(aes(x = 15.5, y = 11, xend = 16.5, yend = 11)) +
geom_segment(aes(x = 15.5, y = 12, xend = 16.5, yend = 12)) +
geom_segment(aes(x = 15.5, y = 13, xend = 16.5, yend = 13)) +
geom_segment(aes(x = 15.5, y = 14, xend = 16.5, yend = 14)) +
# draw exlusion sure branches vertical
geom_segment(aes(x = 16.5, y = 1, xend = 16.5, yend = 2)) +
geom_segment(aes(x = 16.5, y = 3, xend = 16.5, yend = 4)) +
geom_segment(aes(x = 16.5, y = 5, xend = 16.5, yend = 6)) +
geom_segment(aes(x = 16.5, y = 7, xend = 16.5, yend = 8)) +
geom_segment(aes(x = 16.5, y = 9, xend = 16.5, yend = 10)) +
geom_segment(aes(x = 16.5, y = 11, xend = 16.5, yend = 12)) +
geom_segment(aes(x = 16.5, y = 13, xend = 16.5, yend = 14)) +
annotate("text", x = c(3, 8, 13), y = -2.2, label = c("R1", "R2", "R3")) +
annotate("text", x = 1:15, y = -0.8, label = rep(c("F1", "F2", "F3",
"F4", "F5"), 3)) +
annotate("text", x = 20, y = c(2, 6, 11), label = c("NMO1", "NMO2",
"NMO3")) +
annotate("text", x = 18, y = c(1, 3), label = c("ECL1", "ECL2")) +
annotate("text", x = 18, y = c(5, 7), label = c("ECL1", "ECL3")) +
annotate("text", x = 18, y = c(9, 11, 13), label = c("ECL1", "ECL2", "ECL3")) +
annotate("text", x = 16, y = c(0.7, 1.7, 2.7, 3.7, 4.7, 5.7, 6.7, 7.7, 8.7,
9.7, 10.7, 11.7, 12.7, 13.7), label = rep(c("EC1", "EC2"), 7)) +
scale_fill_manual(values = c(significant = "grey", nonsignificant = "white")) +
scale_x_discrete(expand = c(0, 0)) + scale_y_reverse() + ggtitle(graphnames[ann]) +
theme(plot.title = element_text(lineheight = 0.8, face = "bold")) +
theme(panel.grid.minor = element_blank()) + theme(panel.grid.major = element_blank()) +
theme(axis.ticks = element_blank(), axis.text.x = element_blank(),
axis.text.y = element_blank()) + theme(panel.background = element_rect(fill = "transparent")) +
theme(legend.position = "none") + theme() + xlab("") + ylab("")
# windows(30, 20)
#windows(10, 7)
print(grids[ann])
rm(df)
rm(p)
rm(p.grid)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RatePhylo.allCI.R
\name{RatePhylo.allCI}
\alias{RatePhylo.allCI}
\title{Confidence intervals for rate parameters}
\usage{
RatePhylo.allCI(
rateData,
MLrate = NULL,
fixed = NULL,
rateMIN = 0.001,
rateMAX = 50,
common.mean = FALSE,
lambda.est = TRUE
)
}
\arguments{
\item{rateData}{an object of class \code{rateData}}
\item{MLrate}{a vector of relative rate parameters. The length of the vector is equal to the number of rates being estimated. If \code{rate=NULL} then rates are equal. Normally these will be the maximum likelihood rate estimates.}
\item{fixed}{A vector stating whether each parameter should be allowed to vary (either \code{FALSE} which results in a start value of 1, or a numeric start value) or should be fixed (\code{TRUE}).}
\item{rateMIN}{Minimum value for the rate parameters}
\item{rateMAX}{Maximum value for the rate parameters}
\item{common.mean}{a logical specififying whether each rate category should have its own mean (\code{common.mean=FALSE}) or all categories should have the same mean (\code{common.mean=FALSE}). See Thomas et al. (2009) for a discussion on the impact of assumptions about mean on rate estimates.}
\item{lambda.est}{Logical. Estimate Pagel's lambda.}
}
\value{
rateLci Lower confidence interval for rate estimate
rateUci Upper confidence interval for rate estimate
}
\description{
Calculates approximate confidence intervals for all rate parameters. CIs are esimated for one rate parameters while fixing others at a given value (usually the maximum likelihood estimate).
These are reliable (given the asympotic assumptions of the chi-square distribution) if only two groups are being compared but should be regarded only as a rough approximation for =>3 different rate categories. If the rates are correlated the CIs may be underestimated.
}
\examples{
data(anolis.data)
data(anolis.tree)
## Convert data to class rateData with a rateMatrix object as input
anolis.rateMatrix <- as.rateMatrix(phy=anolis.tree, x="geo_ecomorph", data=anolis.data)
anolis.rateData <- as.rateData(y="Female_SVL", x="geo_ecomorph",
rateMatrix = anolis.rateMatrix, phy=NULL, data=anolis.data, log.y=TRUE)
# A model with a different rate in each of the four groups. The 'fixed' command is used to determine
# whether a particular rate is to be constrained or not. Use '1' to fix a group and 'FALSE' to show
# that the parameter is not fixed and should be estimated. The values should be entered in the same
# order as the ranking of the groups. That is, group 0 (small islands) takes position one in the
# fixed vector, group 1 (large island trunk crown and trunk ground) takes position 2 and so on. The
# default is to allow each group to take a different mean.
anole.ML <- optim.likRatePhylo(rateData=anolis.rateData, rate=NULL,
fixed=c(FALSE,FALSE,FALSE, FALSE),
common.mean=FALSE, lambda.est=FALSE)
# Confidence intervals for the first two parameters
RatePhylo.allCI(rateData=anolis.rateData, MLrate = anole.ML$MLRate,
fixed=c(FALSE, TRUE, TRUE, TRUE), rateMIN = 0.001, rateMAX = 50,
common.mean = FALSE)
}
\references{
Thomas GH, Freckleton RP, & Szekely T. 2006. Comparative analyses of the influence of developmental mode on phenotypic diversification rates in shorebirds. Proceedings of the Royal Society B 273, 1619-1624.
Thomas GH, Meiri S, & Phillimore AB. 2009. Body size diversification in Anolis: novel environments and island effects. Evolution 63, 2017-2030.
}
\author{
Gavin Thomas, Rob Freckleton
}
| /fuzzedpackages/motmot/man/RatePhylo.allCI.Rd | no_license | akhikolla/testpackages | R | false | true | 3,556 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RatePhylo.allCI.R
\name{RatePhylo.allCI}
\alias{RatePhylo.allCI}
\title{Confidence intervals for rate parameters}
\usage{
RatePhylo.allCI(
rateData,
MLrate = NULL,
fixed = NULL,
rateMIN = 0.001,
rateMAX = 50,
common.mean = FALSE,
lambda.est = TRUE
)
}
\arguments{
\item{rateData}{an object of class \code{rateData}}
\item{MLrate}{a vector of relative rate parameters. The length of the vector is equal to the number of rates being estimated. If \code{rate=NULL} then rates are equal. Normally these will be the maximum likelihood rate estimates.}
\item{fixed}{A vector stating whether each parameter should be allowed to vary (either \code{FALSE} which results in a start value of 1, or a numeric start value) or should be fixed (\code{TRUE}).}
\item{rateMIN}{Minimum value for the rate parameters}
\item{rateMAX}{Maximum value for the rate parameters}
\item{common.mean}{a logical specififying whether each rate category should have its own mean (\code{common.mean=FALSE}) or all categories should have the same mean (\code{common.mean=FALSE}). See Thomas et al. (2009) for a discussion on the impact of assumptions about mean on rate estimates.}
\item{lambda.est}{Logical. Estimate Pagel's lambda.}
}
\value{
rateLci Lower confidence interval for rate estimate
rateUci Upper confidence interval for rate estimate
}
\description{
Calculates approximate confidence intervals for all rate parameters. CIs are esimated for one rate parameters while fixing others at a given value (usually the maximum likelihood estimate).
These are reliable (given the asympotic assumptions of the chi-square distribution) if only two groups are being compared but should be regarded only as a rough approximation for =>3 different rate categories. If the rates are correlated the CIs may be underestimated.
}
\examples{
data(anolis.data)
data(anolis.tree)
## Convert data to class rateData with a rateMatrix object as input
anolis.rateMatrix <- as.rateMatrix(phy=anolis.tree, x="geo_ecomorph", data=anolis.data)
anolis.rateData <- as.rateData(y="Female_SVL", x="geo_ecomorph",
rateMatrix = anolis.rateMatrix, phy=NULL, data=anolis.data, log.y=TRUE)
# A model with a different rate in each of the four groups. The 'fixed' command is used to determine
# whether a particular rate is to be constrained or not. Use '1' to fix a group and 'FALSE' to show
# that the parameter is not fixed and should be estimated. The values should be entered in the same
# order as the ranking of the groups. That is, group 0 (small islands) takes position one in the
# fixed vector, group 1 (large island trunk crown and trunk ground) takes position 2 and so on. The
# default is to allow each group to take a different mean.
anole.ML <- optim.likRatePhylo(rateData=anolis.rateData, rate=NULL,
fixed=c(FALSE,FALSE,FALSE, FALSE),
common.mean=FALSE, lambda.est=FALSE)
# Confidence intervals for the first two parameters
RatePhylo.allCI(rateData=anolis.rateData, MLrate = anole.ML$MLRate,
fixed=c(FALSE, TRUE, TRUE, TRUE), rateMIN = 0.001, rateMAX = 50,
common.mean = FALSE)
}
\references{
Thomas GH, Freckleton RP, & Szekely T. 2006. Comparative analyses of the influence of developmental mode on phenotypic diversification rates in shorebirds. Proceedings of the Royal Society B 273, 1619-1624.
Thomas GH, Meiri S, & Phillimore AB. 2009. Body size diversification in Anolis: novel environments and island effects. Evolution 63, 2017-2030.
}
\author{
Gavin Thomas, Rob Freckleton
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{df_swets08}
\alias{df_swets08}
\title{Data from a self-paced reading experiment that records reading times in milliseconds at the post-critical region. \insertCite{swets2008underspecification;textual}{bcogsci}}
\format{
A data frame with 5,184 rows and 6 variables:
\describe{
\item{subj}{The subject id.}
\item{item}{The item id.}
\item{resp.RT}{Response times to questions.}
\item{qtype}{The three levels of the between-subjects factor, question type.}
\item{attachment}{The three levels of the within-subjects factor, attachment type.}
\item{RT}{Reading times at the post-critical region.}
}
}
\usage{
df_swets08
}
\description{
The dataset is from a self-paced reading experiment by \insertCite{swets2008underspecification;textual}{bcogsci}, and contains reading times from a 3x3 design.
}
\references{
\insertAllCited{}
}
\keyword{datasets}
| /man/df_swets08.Rd | permissive | saidejp/bcogsci | R | false | true | 952 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{df_swets08}
\alias{df_swets08}
\title{Data from a self-paced reading experiment that records reading times in milliseconds at the post-critical region. \insertCite{swets2008underspecification;textual}{bcogsci}}
\format{
A data frame with 5,184 rows and 6 variables:
\describe{
\item{subj}{The subject id.}
\item{item}{The item id.}
\item{resp.RT}{Response times to questions.}
\item{qtype}{The three levels of the between-subjects factor, question type.}
\item{attachment}{The three levels of the within-subjects factor, attachment type.}
\item{RT}{Reading times at the post-critical region.}
}
}
\usage{
df_swets08
}
\description{
The dataset is from a self-paced reading experiment by \insertCite{swets2008underspecification;textual}{bcogsci}, and contains reading times from a 3x3 design.
}
\references{
\insertAllCited{}
}
\keyword{datasets}
|
testlist <- list(x = c(NaN, 4.88349512044349e-33), y = c(4.85787505972498e-33, 2.49576593945277e-29, 1.13662662593938e-305, 6.80504789180293e+38, 2.18465582271745e-305, NaN, 9.9425881215977e-297, -2.62841742635227e-159, -1.34746874498879e+28, -6.94237805298391e+306, -5.82852024984172e+303, 8.85449539944218e-159, 2.14327978499502e-312, 1.04857559801461e-255, 3.13705387384993e-115, -4.48546878382412e+24, 2.2815699287538e-310, 1.26707338373663e-279, 7.29111855643999e-304, -1.30736177482179e+28, -1.05168902913543e-176, -3.74893795732314e-253, 7.09901525832349e-304, 3.13178067539484e-115, 3.04553020513337e-115, -5.15273908894498e-36, 9.9268544386519e+247, 5.9083020048414e-315, 0, 0, 0, 0, 0, 0, 0, 2.46890981934066e-112, -6.67115915932496e+306, -2.15534811653075e+294, 3.06571532920587e-115, 4.32859124382498e-310, 3.50786608547285e-322, 0, -5.4941969960306e+303, 9.48312416197914e-307, -7.98824203857711e-280, -3.31318983418522e+304, 2.41737079499033e+35, 0, 2.41737032386985e+35, 2.95913009018901e-308, 0, 0, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609955884-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,094 | r | testlist <- list(x = c(NaN, 4.88349512044349e-33), y = c(4.85787505972498e-33, 2.49576593945277e-29, 1.13662662593938e-305, 6.80504789180293e+38, 2.18465582271745e-305, NaN, 9.9425881215977e-297, -2.62841742635227e-159, -1.34746874498879e+28, -6.94237805298391e+306, -5.82852024984172e+303, 8.85449539944218e-159, 2.14327978499502e-312, 1.04857559801461e-255, 3.13705387384993e-115, -4.48546878382412e+24, 2.2815699287538e-310, 1.26707338373663e-279, 7.29111855643999e-304, -1.30736177482179e+28, -1.05168902913543e-176, -3.74893795732314e-253, 7.09901525832349e-304, 3.13178067539484e-115, 3.04553020513337e-115, -5.15273908894498e-36, 9.9268544386519e+247, 5.9083020048414e-315, 0, 0, 0, 0, 0, 0, 0, 2.46890981934066e-112, -6.67115915932496e+306, -2.15534811653075e+294, 3.06571532920587e-115, 4.32859124382498e-310, 3.50786608547285e-322, 0, -5.4941969960306e+303, 9.48312416197914e-307, -7.98824203857711e-280, -3.31318983418522e+304, 2.41737079499033e+35, 0, 2.41737032386985e+35, 2.95913009018901e-308, 0, 0, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
\name{math}
\alias{sort.rtv}
\alias{sample.rtv}
\alias{length.drtv}
\alias{length.crtv}
\alias{order.drtv}
\alias{order.crtv}
\alias{c.drtv}
\alias{c.crtv}
\alias{rep.drtv}
\alias{rep.crtv}
\alias{[.drtv}
\alias{[.crtv}
\alias{mean.crtv}
\alias{min.crtv}
\alias{max.crtv}
\alias{range.crtv}
\alias{round.crtv}
\alias{floor.crtv}
\alias{ceiling.crtv}
\alias{+.crtv}
\alias{-.crtv}
\alias{sample}
\alias{sample.default}
\title{standard math operations}
\description{todo}
\usage{
\method{sort}{rtv}(x, \dots)
\method{sample}{rtv}(x, \dots)
\method{length}{drtv}(x, \dots)
\method{length}{crtv}(x, \dots)
\method{order}{drtv}(x, by.unit = getOption("rtv.unit"),
by.cp = FALSE, \dots)
\method{order}{crtv}(x, by.unit = x$unit, by.cp = FALSE, \dots)
\method{c}{drtv}(x, \dots)
\method{c}{crtv}(x, \dots)
\method{rep}{drtv}(x, times, \dots)
\method{rep}{crtv}(x, times, \dots)
\method{[}{drtv}(x, i)
\method{[}{crtv}(x, i)
\method{mean}{crtv}(x, \dots)
\method{min}{crtv}(x, \dots)
\method{max}{crtv}(x, \dots)
\method{round}{crtv}(x, \dots)
\method{floor}{crtv}(x, \dots)
\method{ceiling}{crtv}(x, \dots)
\method{range}{crtv}(x, \dots)
\method{+}{crtv}(a, b)
\method{-}{crtv}(a, b)
sample (\dots)
\method{sample}{default}(x, \dots)
}
\arguments{
\item{x}{.}
\item{i}{.}
\item{by.unit}{.}
\item{by.cp}{.}
\item{times}{.}
\item{a}{.}
\item{b}{.}
\item{\dots}{.}
}
| /man/math.Rd | no_license | cran/rtv | R | false | false | 1,359 | rd | \name{math}
\alias{sort.rtv}
\alias{sample.rtv}
\alias{length.drtv}
\alias{length.crtv}
\alias{order.drtv}
\alias{order.crtv}
\alias{c.drtv}
\alias{c.crtv}
\alias{rep.drtv}
\alias{rep.crtv}
\alias{[.drtv}
\alias{[.crtv}
\alias{mean.crtv}
\alias{min.crtv}
\alias{max.crtv}
\alias{range.crtv}
\alias{round.crtv}
\alias{floor.crtv}
\alias{ceiling.crtv}
\alias{+.crtv}
\alias{-.crtv}
\alias{sample}
\alias{sample.default}
\title{standard math operations}
\description{todo}
\usage{
\method{sort}{rtv}(x, \dots)
\method{sample}{rtv}(x, \dots)
\method{length}{drtv}(x, \dots)
\method{length}{crtv}(x, \dots)
\method{order}{drtv}(x, by.unit = getOption("rtv.unit"),
by.cp = FALSE, \dots)
\method{order}{crtv}(x, by.unit = x$unit, by.cp = FALSE, \dots)
\method{c}{drtv}(x, \dots)
\method{c}{crtv}(x, \dots)
\method{rep}{drtv}(x, times, \dots)
\method{rep}{crtv}(x, times, \dots)
\method{[}{drtv}(x, i)
\method{[}{crtv}(x, i)
\method{mean}{crtv}(x, \dots)
\method{min}{crtv}(x, \dots)
\method{max}{crtv}(x, \dots)
\method{round}{crtv}(x, \dots)
\method{floor}{crtv}(x, \dots)
\method{ceiling}{crtv}(x, \dots)
\method{range}{crtv}(x, \dots)
\method{+}{crtv}(a, b)
\method{-}{crtv}(a, b)
sample (\dots)
\method{sample}{default}(x, \dots)
}
\arguments{
\item{x}{.}
\item{i}{.}
\item{by.unit}{.}
\item{by.cp}{.}
\item{times}{.}
\item{a}{.}
\item{b}{.}
\item{\dots}{.}
}
|
\name{getParameters-methods}
\alias{getParameters-methods}
\alias{getParameters}
\title{The \code{getParameters} method}
\description{Returns the vector of parameters and converts it to a named list.}
\usage{getParameters(this)}
\value{a named list in which each element corresponds to a parameter. The vector of parameters is converted into a named list such as
(name of parameter 1 = value of parameter 1, name of parameter 2 = value of parameter 2, ...).}
\arguments{
\item{this}{the underlying object of class \code{\linkS4class{mtkProcess}} or its sub-classes.}
}
\author{Juhui WANG, MIA-Jouy, Inra, Juhui.Wang@jouy.inra.fr}
\references{J. Wang, H. Richard, R. Faivre, H. Monod (2013). Le package \code{mtk}, une bibliothèque R pour l'exploration numérique des modèles.
\emph{In:} Analyse de sensibilité et exploration de modèles : Application aux sciences de la nature et de l'environnement
(R. Faivre, B. Iooss, S. Mahévas, D. Makowski, H. Monod, Eds). Editions Quae, Versailles.}
\examples{
# Create a native designer avec the method "Morris"
# implemented in the package "mtk"
designer <- mtkNativeDesigner(design="Morris", information=list(size=20))
# Return the parameters as named list
getParameters(designer)
}
| /man/getParameters-methods.Rd | no_license | santoshpanda15/mtk | R | false | false | 1,242 | rd | \name{getParameters-methods}
\alias{getParameters-methods}
\alias{getParameters}
\title{The \code{getParameters} method}
\description{Returns the vector of parameters and converts it to a named list.}
\usage{getParameters(this)}
\value{a named list in which each element corresponds to a parameter. The vector of parameters is converted into a named list such as
(name of parameter 1 = value of parameter 1, name of parameter 2 = value of parameter 2, ...).}
\arguments{
\item{this}{the underlying object of class \code{\linkS4class{mtkProcess}} or its sub-classes.}
}
\author{Juhui WANG, MIA-Jouy, Inra, Juhui.Wang@jouy.inra.fr}
\references{J. Wang, H. Richard, R. Faivre, H. Monod (2013). Le package \code{mtk}, une bibliothèque R pour l'exploration numérique des modèles.
\emph{In:} Analyse de sensibilité et exploration de modèles : Application aux sciences de la nature et de l'environnement
(R. Faivre, B. Iooss, S. Mahévas, D. Makowski, H. Monod, Eds). Editions Quae, Versailles.}
\examples{
# Create a native designer avec the method "Morris"
# implemented in the package "mtk"
designer <- mtkNativeDesigner(design="Morris", information=list(size=20))
# Return the parameters as named list
getParameters(designer)
}
|
subroutine f10(ic)
#
# sort into increasing wavelengths
#
implicit integer*4(i-n)
include "../common/blank"
include "../common/lbl4"
include "../common/label1"
include "../common/lbl3"
include "../common/lbl6"
include "../common/lbl7"
include "../common/lblg"
include "../common/lundefs"
include "../common/alphabet"
include "../common/dscrch"
integer*4 isort(4864)
equivalence (isort,datsc3)
character*8 inm
logical errors, madeit, nochng
#RED
integer*4 iwidok # function
# data ihplus/'+'/
ihplus = ihchar('+')
if (ictrl == -1) {
ic = ihx
return
}
do i=1,maxchn
data(i)=dataa(i)
#------------------------------------------------
# print info
#--------------------------------------------------
nochng = .false.
repeat {
call hreset(1)
call whedr2
write(ttyout,10)
write(ttyout,20) idv1,ifl1,ititl
if (ictrl == ihe) {
errors = .true.
write(ttyout,30)
} else {
errors = .false.
write(ttyout,35)
}
#-----------------------------------------------
# new wavelength file necessary ?
#-----------------------------------------------
madeit = .false.
repeat {
write(ttyout,37)
call crtin
i = 1
call wjfren(i,x,il)
if (iwidok(il) == 1) {
iwtmpf = il
call wjfren(i,x,il)
if (i<80 && il==0) {
iwrec=x
call wavlng (iwtmpf,iwrec,ier)
if (ier==0) madeit=.true.
}
} else if (il==ihx || il==ihe) {
ic = il
return
} else if (x == 0. & il == 0) {
madeit = .true.
nochng = .true.
}
} until (madeit)
} until (nochng)
#----------------------------------------------------
# here data array has data and dataa has wavelengths
#
# now do the sorting of data (not wavelengths)
#----------------------------------------------------
call bubble(dataa, isort, nchans)
do i = 1, nchans
datac(i) = data(isort(i))
#------------------------------------------------------
# do errors if required
#------------------------------------------------------
if (errors) {
idad = 2
itmp = ifl1
call devlun(4,idv1,lun)
if (lun == 0) {
write(ttyout,40)
call crtin
ic = ihx
return
}
call finfil(itmp,lun,2,ier) # position before errors
itmp = itmp +1 # compute error record number
ifl1e = itmp
call finfil(itmp,lun,2,ier) # get errors
if (ier != 0) {
write(ttyout,45) ier,ifl1e
call crtin
ic = ihx
return
}
do i = 1, nchans
error(i) = datab(isort(i))
}
#---------------------------------------------------
# dataa has unsorted wavelengths
# datac has data (sorted)
# errors has sorted errors
#------------------------------------------------------
#----------------------------------------------------
# re-read data into dataa
#----------------------------------------------------
call devlun(4,idv1,lun)
itmp = ifl1
call finfil(itmp,lun,1,ier)
if (ier != 0) {
write(ttyout,45) ier,ifl1
call crtin
ic = ihx
return
}
ic = 0
#------------------------------------------------------
# update the history
#------------------------------------------------------
call namdev(idv1,inm)
write(ihist,60) inm,ifl1
mhist(1:274) = ' '
return
#-----------------------------------------------------
10 format('Function f10: Sort data into Increasing Wavelength Order'//)
20 format(5x,'This function sorts data into increasing wavelength',/,5x,
' order with the errors if included.',//,5x,
'Operating on:',a,i5,':',a,//)
30 format(' Sorting Errors Also'/)
35 format(' Errors Not Included'/)
37 format(' Type e to EXIT from f10'/,
' Type x for HARD EXIT (no more processing)'/,
' Type record id (V, W, D, U, or Y and record number to',
' CHANGE Wavelength Set'/,
' Type return to continue'/)
40 format(' LUN ERROR! Press return to EXIT'/)
45 format(' I/O ERROR ',i5,' in file ',i4/)
60 format(' f10: ',a8,' record: ',i5,' sorted to incr. wavelengths')
#--------------------------------------------------------------------
end
| /src-local/specpr/src.specpr/fcn09-11/f10.r | no_license | ns-bak/tetracorder-tutorial | R | false | false | 4,015 | r | subroutine f10(ic)
#
# sort into increasing wavelengths
#
implicit integer*4(i-n)
include "../common/blank"
include "../common/lbl4"
include "../common/label1"
include "../common/lbl3"
include "../common/lbl6"
include "../common/lbl7"
include "../common/lblg"
include "../common/lundefs"
include "../common/alphabet"
include "../common/dscrch"
integer*4 isort(4864)
equivalence (isort,datsc3)
character*8 inm
logical errors, madeit, nochng
#RED
integer*4 iwidok # function
# data ihplus/'+'/
ihplus = ihchar('+')
if (ictrl == -1) {
ic = ihx
return
}
do i=1,maxchn
data(i)=dataa(i)
#------------------------------------------------
# print info
#--------------------------------------------------
nochng = .false.
repeat {
call hreset(1)
call whedr2
write(ttyout,10)
write(ttyout,20) idv1,ifl1,ititl
if (ictrl == ihe) {
errors = .true.
write(ttyout,30)
} else {
errors = .false.
write(ttyout,35)
}
#-----------------------------------------------
# new wavelength file necessary ?
#-----------------------------------------------
madeit = .false.
repeat {
write(ttyout,37)
call crtin
i = 1
call wjfren(i,x,il)
if (iwidok(il) == 1) {
iwtmpf = il
call wjfren(i,x,il)
if (i<80 && il==0) {
iwrec=x
call wavlng (iwtmpf,iwrec,ier)
if (ier==0) madeit=.true.
}
} else if (il==ihx || il==ihe) {
ic = il
return
} else if (x == 0. & il == 0) {
madeit = .true.
nochng = .true.
}
} until (madeit)
} until (nochng)
#----------------------------------------------------
# here data array has data and dataa has wavelengths
#
# now do the sorting of data (not wavelengths)
#----------------------------------------------------
call bubble(dataa, isort, nchans)
do i = 1, nchans
datac(i) = data(isort(i))
#------------------------------------------------------
# do errors if required
#------------------------------------------------------
if (errors) {
idad = 2
itmp = ifl1
call devlun(4,idv1,lun)
if (lun == 0) {
write(ttyout,40)
call crtin
ic = ihx
return
}
call finfil(itmp,lun,2,ier) # position before errors
itmp = itmp +1 # compute error record number
ifl1e = itmp
call finfil(itmp,lun,2,ier) # get errors
if (ier != 0) {
write(ttyout,45) ier,ifl1e
call crtin
ic = ihx
return
}
do i = 1, nchans
error(i) = datab(isort(i))
}
#---------------------------------------------------
# dataa has unsorted wavelengths
# datac has data (sorted)
# errors has sorted errors
#------------------------------------------------------
#----------------------------------------------------
# re-read data into dataa
#----------------------------------------------------
call devlun(4,idv1,lun)
itmp = ifl1
call finfil(itmp,lun,1,ier)
if (ier != 0) {
write(ttyout,45) ier,ifl1
call crtin
ic = ihx
return
}
ic = 0
#------------------------------------------------------
# update the history
#------------------------------------------------------
call namdev(idv1,inm)
write(ihist,60) inm,ifl1
mhist(1:274) = ' '
return
#-----------------------------------------------------
10 format('Function f10: Sort data into Increasing Wavelength Order'//)
20 format(5x,'This function sorts data into increasing wavelength',/,5x,
' order with the errors if included.',//,5x,
'Operating on:',a,i5,':',a,//)
30 format(' Sorting Errors Also'/)
35 format(' Errors Not Included'/)
37 format(' Type e to EXIT from f10'/,
' Type x for HARD EXIT (no more processing)'/,
' Type record id (V, W, D, U, or Y and record number to',
' CHANGE Wavelength Set'/,
' Type return to continue'/)
40 format(' LUN ERROR! Press return to EXIT'/)
45 format(' I/O ERROR ',i5,' in file ',i4/)
60 format(' f10: ',a8,' record: ',i5,' sorted to incr. wavelengths')
#--------------------------------------------------------------------
end
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.95,family="gaussian",standardize=FALSE)
sink('./breast_096.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/breast/breast_096.R | no_license | esbgkannan/QSMART | R | false | false | 351 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.95,family="gaussian",standardize=FALSE)
sink('./breast_096.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## Termin 1 praktischer Teil
# `#` startet einen Kommentar - dieser wird von der
#`R`-Konsole nicht interpretiert
## STRG-Enter nimmt Befehle aus dem Skript-Editor und fuehrt
## sie in der `R`-Konsole aus
## Heute lernen wir Vektoren kennen
## - Erstellung von numerischen Vektoren
## - Speichern von Vektoren in Variablen
## - basale mathematische / statistische Funktionen
## - logische Abfrage von Eigenschaften von Vektoren
## `R` beherrscht die Grundrechenarten
1 + 3
3 - 17
3 * 2
3^2
3^2 + 4^2
10 / 5
## Auf Klammerung achten:
(3 + 5) / 2
3 + 5 / 2
## Man kann Zahlen (und auch andere Objekte) in Variablen
## speichern, um spaeter wieder darauf zuzugreifen:
pi <- 3.1415
radius <- 5
kreisflaeche <- pi * radius^2
## Gib den Wert einer Variablen mit STRG-Enter auf
## der Konsole aus
kreisflaeche
## Loesche eine Variable
rm(pi)
pi # Warum ist pi noch da?! (und hat einen anderen Wert)
## `R` ist sehr praktisch fuer statistische Berechnungen, da es
## Daten als "Vektoren abspeichert", d.h. im Normalfall koennen
## wir mehrere Daten direkt in *einem* Objekt abspeichern
## Mit der Funktion `c()` fuegen wir mehrere Zahlen zu *einem*
## Vektor zusammen
meinVektor <- c(1, 2, 6, 2, 9)
## Auf mehr-elementigen Vektoren koennen wir jetzt schon einfache
## statistische Berechnungen durchfuehren, wir die `R` eingebaute
## Funktionen bietet
mean(meinVektor)
sum(meinVektor)
sd(meinVektor)
var(meinVektor)
min(meinVektor)
max(meinVektor)
## Finde heraus, wie viele Elemente ein Vektor hat:
length(meinVektor)
## Viele Funktionen agieren direkt auf allen Elementen
## eines Vektors:
sqrt(meinVektor)
## Merke: hier war die Ausgabe kein ein-elementiger Vektor
## wie oben, sondern fuer alle Werte in `meinVektor` wurde
## die Quadratwurzel ausgegeben
## Ebenso funktioniert auch das Rechnen mit
## mehr-elementigen Vektoren:
meinVektor * 2
meinVektor / 2
## Man kann Funktionen "verschachteln":
sqrt(var(meinVektor)) # was ist das?
## Erhalte "Hilfe" fuer eine Funktion mit `?`
?var # fuer Anfaenger meiner Ansicht nach ungeeignet
## Andere Datentypen: `R` kennt nicht nur Zahlen,
## sondern auch Text
meinText <- "bla bla"
## Auch Textvektoren koennen mehr-elementig sein:
mehrText <- c("bla", 'bla') # etwas anderes als oben
length(meinText)
length(mehrText)
## - Logische Abfragen -
## Ich kann Eigenschaften von Vektoren ueberpruefen,
## etwa: wie viele Elemente entsprechen einem bestimmten
## Wert:
meinVektor > 3
## Hier offenbart sich ein weiterer Datentyp:
## "logical" -> kodiert Wahrheit. Kennt nur die Elemente
## TRUE/FALSE. Kodiert hier: Ist ein Element in `meinVektor`
## groesser als 3?
meinVektor == 2
## TRUE wird als 1 und FALSE wird als 0 interpretiert:
sum(meinVektor == 2) # wie viele Elemente sind gleich 2
sum(meinVektor == 999)
### Das war schon mal einiges:
### Ueben wir das Ganze ein!
###################################
####### Uebungen Termin 1 #########
###################################
## Nutzt zur Bearbeitung der folgenden Aufgaben die oben
## vorgestellten Funktionen.
## Deutsche und amerikanische Sportverbaende haben
## Daten ueber die Leistungen von jeweils 6 ihrer
## besten Hochspringer uebermittelt:
us_springer <- c(6.89, 7.05, 6.23,
7.32, 7.55, 6.53)
d_springer <- c(1.85, 1.89, 2.02,
2.31, 1.99, 1.79)
## Leider nutzten der amerikanische Verband das
## Angloamerikanische Masssystem, d.h. uebermittelte
## die Werte in "foot", aber der deutsche
## Verband verwendete Meter
## Aufgabe 1:
## Erstellt einen Vektor, der die Werte der US-Springer in Meter
## enthaelt. 1 foot entspricht 0,3048m. Achtet darauf, dass
## in `R` das Dezimaltrennzeichen ein Punkt (`.`) ist und
## kein Komma.
## Aufgabe 2:
## Welches Sportteam hat im Mittel die hoeheren
## Spruenge geschafft?
## Aufgabe 2:
## Wie streuen die Werte um den Mittelwert der Gruppen?
## Welche gruppe ist homogener?
## Aufgabe 3:
## Wie hoch war der insgesamt hoechste Sprung?
## Wie hoch der insgesamt niedrigste?
## In welcher Gruppe trat jeweils der hoechste /
## niedrigste Sprung auf?
## Aufgabe 4:
## Wie viele Springer haben die 2m Marke geknackt?
## Aufgabe 5:
## Gibt es einen amerikanischen Springer, der niedriger als
## 1.85m gesprungen ist?
## Formuliert *eine* Anfrage, die entweder
## `TRUE` oder `FALSE` ausgibt:
################## Wenn noch Zeit ist
## Auswahl von Elementen in Vektoren
## Mit der [ ] Notation koennen wir Elemente aus
## Vektoren anhand ihrer Position auswaehlen
meinVektor[3] # waehlt das dritte Element aus
## Man kann eine "Negativ"-Auswahl durchfuehren:
meinVektor[-3] # laesst das dritte Element aus
## Oftmals erwuenscht: waehle Elemente
## anhand ihrer Eigenschaften aus:
meinVektor[meinVektor > 2] # was ist `meinVektor > 2`?
meinVektor[meinVektor <= 2]
## Man kann mit dem [ ] Zugriff auch Daten veraendern:
meinVektor[3] <- 0
meinVektor[meinVektor > 8] <- 9999
## Aufgabe 6:
## Waehle die Sprunghoehen aller deutschen Sportler aus,
## die niedriger als 2m gesprungen sind
## Aufgabe 7:
## Wir erhalten vom deutschen Sportverband eine Aktualisierung
## der Daten: anscheinend wurden im Wettbewerb einige der
## Sportler disqualifiziert. Wir speichern diese Information
## in einem TRUE/FALSE Vektor ab:
## Setze die Sprunghoehen aller disqualifizierten Deutschen
## auf 0.
############### Falls immer noch Zeit ist:
# Zusatzinfo: einfache Berechnung eines t-Tests:
us_springer <- c(6.89, 7.05, 6.23,
7.32, 7.55, 6.53)
us_springer_m <- us_springer * 0.3048
d_springer <- c(1.85, 1.89, 2.02,
2.31, 1.99, 1.79)
t.test(us_springer_m, d_springer)
| /Uebungen/2018/01_Uebung.R | no_license | m-Py/Testtheorie-R | R | false | false | 5,701 | r | ## Termin 1 praktischer Teil
# `#` startet einen Kommentar - dieser wird von der
#`R`-Konsole nicht interpretiert
## STRG-Enter nimmt Befehle aus dem Skript-Editor und fuehrt
## sie in der `R`-Konsole aus
## Heute lernen wir Vektoren kennen
## - Erstellung von numerischen Vektoren
## - Speichern von Vektoren in Variablen
## - basale mathematische / statistische Funktionen
## - logische Abfrage von Eigenschaften von Vektoren
## `R` beherrscht die Grundrechenarten
1 + 3
3 - 17
3 * 2
3^2
3^2 + 4^2
10 / 5
## Auf Klammerung achten:
(3 + 5) / 2
3 + 5 / 2
## Man kann Zahlen (und auch andere Objekte) in Variablen
## speichern, um spaeter wieder darauf zuzugreifen:
pi <- 3.1415
radius <- 5
kreisflaeche <- pi * radius^2
## Gib den Wert einer Variablen mit STRG-Enter auf
## der Konsole aus
kreisflaeche
## Loesche eine Variable
rm(pi)
pi # Warum ist pi noch da?! (und hat einen anderen Wert)
## `R` ist sehr praktisch fuer statistische Berechnungen, da es
## Daten als "Vektoren abspeichert", d.h. im Normalfall koennen
## wir mehrere Daten direkt in *einem* Objekt abspeichern
## Mit der Funktion `c()` fuegen wir mehrere Zahlen zu *einem*
## Vektor zusammen
meinVektor <- c(1, 2, 6, 2, 9)
## Auf mehr-elementigen Vektoren koennen wir jetzt schon einfache
## statistische Berechnungen durchfuehren, wir die `R` eingebaute
## Funktionen bietet
mean(meinVektor)
sum(meinVektor)
sd(meinVektor)
var(meinVektor)
min(meinVektor)
max(meinVektor)
## Finde heraus, wie viele Elemente ein Vektor hat:
length(meinVektor)
## Viele Funktionen agieren direkt auf allen Elementen
## eines Vektors:
sqrt(meinVektor)
## Merke: hier war die Ausgabe kein ein-elementiger Vektor
## wie oben, sondern fuer alle Werte in `meinVektor` wurde
## die Quadratwurzel ausgegeben
## Ebenso funktioniert auch das Rechnen mit
## mehr-elementigen Vektoren:
meinVektor * 2
meinVektor / 2
## Man kann Funktionen "verschachteln":
sqrt(var(meinVektor)) # was ist das?
## Erhalte "Hilfe" fuer eine Funktion mit `?`
?var # fuer Anfaenger meiner Ansicht nach ungeeignet
## Andere Datentypen: `R` kennt nicht nur Zahlen,
## sondern auch Text
meinText <- "bla bla"
## Auch Textvektoren koennen mehr-elementig sein:
mehrText <- c("bla", 'bla') # etwas anderes als oben
length(meinText)
length(mehrText)
## - Logische Abfragen -
## Ich kann Eigenschaften von Vektoren ueberpruefen,
## etwa: wie viele Elemente entsprechen einem bestimmten
## Wert:
meinVektor > 3
## Hier offenbart sich ein weiterer Datentyp:
## "logical" -> kodiert Wahrheit. Kennt nur die Elemente
## TRUE/FALSE. Kodiert hier: Ist ein Element in `meinVektor`
## groesser als 3?
meinVektor == 2
## TRUE wird als 1 und FALSE wird als 0 interpretiert:
sum(meinVektor == 2) # wie viele Elemente sind gleich 2
sum(meinVektor == 999)
### Das war schon mal einiges:
### Ueben wir das Ganze ein!
###################################
####### Uebungen Termin 1 #########
###################################
## Nutzt zur Bearbeitung der folgenden Aufgaben die oben
## vorgestellten Funktionen.
## Deutsche und amerikanische Sportverbaende haben
## Daten ueber die Leistungen von jeweils 6 ihrer
## besten Hochspringer uebermittelt:
us_springer <- c(6.89, 7.05, 6.23,
7.32, 7.55, 6.53)
d_springer <- c(1.85, 1.89, 2.02,
2.31, 1.99, 1.79)
## Leider nutzten der amerikanische Verband das
## Angloamerikanische Masssystem, d.h. uebermittelte
## die Werte in "foot", aber der deutsche
## Verband verwendete Meter
## Aufgabe 1:
## Erstellt einen Vektor, der die Werte der US-Springer in Meter
## enthaelt. 1 foot entspricht 0,3048m. Achtet darauf, dass
## in `R` das Dezimaltrennzeichen ein Punkt (`.`) ist und
## kein Komma.
## Aufgabe 2:
## Welches Sportteam hat im Mittel die hoeheren
## Spruenge geschafft?
## Aufgabe 2:
## Wie streuen die Werte um den Mittelwert der Gruppen?
## Welche gruppe ist homogener?
## Aufgabe 3:
## Wie hoch war der insgesamt hoechste Sprung?
## Wie hoch der insgesamt niedrigste?
## In welcher Gruppe trat jeweils der hoechste /
## niedrigste Sprung auf?
## Aufgabe 4:
## Wie viele Springer haben die 2m Marke geknackt?
## Aufgabe 5:
## Gibt es einen amerikanischen Springer, der niedriger als
## 1.85m gesprungen ist?
## Formuliert *eine* Anfrage, die entweder
## `TRUE` oder `FALSE` ausgibt:
################## Wenn noch Zeit ist
## Auswahl von Elementen in Vektoren
## Mit der [ ] Notation koennen wir Elemente aus
## Vektoren anhand ihrer Position auswaehlen
meinVektor[3] # waehlt das dritte Element aus
## Man kann eine "Negativ"-Auswahl durchfuehren:
meinVektor[-3] # laesst das dritte Element aus
## Oftmals erwuenscht: waehle Elemente
## anhand ihrer Eigenschaften aus:
meinVektor[meinVektor > 2] # was ist `meinVektor > 2`?
meinVektor[meinVektor <= 2]
## Man kann mit dem [ ] Zugriff auch Daten veraendern:
meinVektor[3] <- 0
meinVektor[meinVektor > 8] <- 9999
## Aufgabe 6:
## Waehle die Sprunghoehen aller deutschen Sportler aus,
## die niedriger als 2m gesprungen sind
## Aufgabe 7:
## Wir erhalten vom deutschen Sportverband eine Aktualisierung
## der Daten: anscheinend wurden im Wettbewerb einige der
## Sportler disqualifiziert. Wir speichern diese Information
## in einem TRUE/FALSE Vektor ab:
## Setze die Sprunghoehen aller disqualifizierten Deutschen
## auf 0.
############### Falls immer noch Zeit ist:
# Zusatzinfo: einfache Berechnung eines t-Tests:
us_springer <- c(6.89, 7.05, 6.23,
7.32, 7.55, 6.53)
us_springer_m <- us_springer * 0.3048
d_springer <- c(1.85, 1.89, 2.02,
2.31, 1.99, 1.79)
t.test(us_springer_m, d_springer)
|
resError <- function(ccc){
msg <- "calculated by resError"
## - data reduction as given by ToDo
## - round
a <- abbrevList(ccc)
p.target <- as.numeric(a$cpt$Values$Pressure$Value)
maxdev <- as.numeric(a$cpt$MaxDev)
k <- 2 # Erweiterungsfaktor
##-------------##
## pcal
##-------------##
PCAL <- getSubList(a$cav$Pressure, "cal")
PCAL$HeadCell <- "{\\(p_{cal}\\)}"
PCAL$UnitCell <- PCAL$Unit
pcal <- getConstVal(NA, NA, PCAL)
##-------------##
## uncert_total
##-------------##
k <- 2
UT <- getSubList(a$cav$Uncertainty, "uncertTotal_rel")
UT$HeadCell <- paste("{\\(U(k=",k,")\\)}", sep="")
if(UT$Unit == "1"){
UT$UnitCell <- ""
}
ut <- getConstVal(NA, NA, UT) * k
## ind
PIND <- getSubList(a$cav$Pressure, "ind")
PIND$HeadCell <- "{\\(p_{ind}\\)}"
PIND$UnitCell <- PIND$Unit
pind <- getConstVal(NA, NA, PIND)
## ind_offset
PINDoffs <- getSubList(a$cav$Pressure, "ind_offset")
PINDoffs$HeadCell <- "{\\(p_r\\)}"
PINDoffs$UnitCell <- PINDoffs$Unit
pindoffs <- getConstVal(NA, NA, PINDoffs)
## ind_corr
PINDcorr <- getSubList(a$cav$Pressure, "ind_corr")
PINDcorr$HeadCell <- "{\\(p_{ind} - p_r\\)}"
PINDcorr$UnitCell <- PINDcorr$Unit
pindcorr <- getConstVal(NA, NA, PINDcorr)
## rel
RES <- getSubList(a$cav$Error, "relative")
if(RES$Unit == "1"){
RES$Unit <- ""
}
RES$HeadCell <- "{\\(e\\)}"
RES$UnitCell <- RES$Unit
result <- getConstVal(NA, NA, RES)
## Auswirkung des revV
## bisher nur auf srg_error
## getested
revV <- median(result)
noOfP <- length(p.target)
## Ergebnisstabelle soll gleiche Länge wie
## target vekcor haben
td.pcal <- rep(NA, noOfP)
td.pind <- rep(NA, noOfP)
td.pindoffs <- rep(NA, noOfP)
td.pindcorr <- rep(NA, noOfP)
td.ut <- rep(NA, noOfP)
td.result <- rep(NA, noOfP)
## Zieldrücke einzeln durchgehen
for(i in 1:noOfP){
i.out <- NULL
i.take <- which(pcal > (p.target[i] *(1- maxdev)) &
pcal < (p.target[i] *(1+ maxdev)))
msg <- paste(msg,"; For target pressure:",p.target[i],
"I take the points:", toString(i.take))
if(length(i.take) > 1){
## ut ist schon k=2, eswerden alle Punkte genommen,
## bei dem e.delta kleiner als 3-sigma ist
## wobei e.delte die Abweichung vom Reverenzwert ist
e.delta <- abs(result[i.take] - revV)
i.out <- which(e.delta > mean(ut[i.take])/k*3)
if(length(i.out) > 0){
i.take <- i.take[-i.out]
msg <- paste(msg, "from these I skip the points: ",
toString(i.take[i.out]))
}
}
td.pcal[i] <- unlist(mean(pcal[i.take]))
td.ut[i] <- unlist(mean(ut[i.take]))
td.result[i] <- unlist(mean(result[i.take]))
td.pind[i] <- unlist(mean(pind[i.take]))
td.pindoffs[i] <- unlist(mean(pindoffs[i.take]))
td.pindcorr[i] <- unlist(mean(pindcorr[i.take]))
} #for
PCAL$Value <- formatC(td.pcal, digits=3, format="E")
UT$Value <- formatC(td.ut, digits=1, format="E")
PIND$Value <- formatC(td.pind, digits=2, format="E")
PINDoffs$Value <- formatC(td.pindoffs, digits=2, format="E")
PINDcorr$Value <- formatC(td.pindcorr, digits=2, format="E")
UT$Value <- formatC(td.ut, digits=1, format="E")
RES$Value <- formatC(td.result, digits=1, width=2, format="E")
ccc$Calibration$Result$Table[[1]] <- PCAL
ccc$Calibration$Result$Table[[2]] <- PIND
ccc$Calibration$Result$Table[[3]] <- PINDoffs
ccc$Calibration$Result$Table[[4]] <- PINDcorr
ccc$Calibration$Result$Table[[5]] <- RES
ccc$Calibration$Result$Table[[6]] <- UT
ccc$Calibration$Result$Table[[5]]$Comment <- msg
return(ccc)
}
| /utils/resError.R | no_license | wactbprot/r4vl | R | false | false | 4,046 | r | resError <- function(ccc){
msg <- "calculated by resError"
## - data reduction as given by ToDo
## - round
a <- abbrevList(ccc)
p.target <- as.numeric(a$cpt$Values$Pressure$Value)
maxdev <- as.numeric(a$cpt$MaxDev)
k <- 2 # Erweiterungsfaktor
##-------------##
## pcal
##-------------##
PCAL <- getSubList(a$cav$Pressure, "cal")
PCAL$HeadCell <- "{\\(p_{cal}\\)}"
PCAL$UnitCell <- PCAL$Unit
pcal <- getConstVal(NA, NA, PCAL)
##-------------##
## uncert_total
##-------------##
k <- 2
UT <- getSubList(a$cav$Uncertainty, "uncertTotal_rel")
UT$HeadCell <- paste("{\\(U(k=",k,")\\)}", sep="")
if(UT$Unit == "1"){
UT$UnitCell <- ""
}
ut <- getConstVal(NA, NA, UT) * k
## ind
PIND <- getSubList(a$cav$Pressure, "ind")
PIND$HeadCell <- "{\\(p_{ind}\\)}"
PIND$UnitCell <- PIND$Unit
pind <- getConstVal(NA, NA, PIND)
## ind_offset
PINDoffs <- getSubList(a$cav$Pressure, "ind_offset")
PINDoffs$HeadCell <- "{\\(p_r\\)}"
PINDoffs$UnitCell <- PINDoffs$Unit
pindoffs <- getConstVal(NA, NA, PINDoffs)
## ind_corr
PINDcorr <- getSubList(a$cav$Pressure, "ind_corr")
PINDcorr$HeadCell <- "{\\(p_{ind} - p_r\\)}"
PINDcorr$UnitCell <- PINDcorr$Unit
pindcorr <- getConstVal(NA, NA, PINDcorr)
## rel
RES <- getSubList(a$cav$Error, "relative")
if(RES$Unit == "1"){
RES$Unit <- ""
}
RES$HeadCell <- "{\\(e\\)}"
RES$UnitCell <- RES$Unit
result <- getConstVal(NA, NA, RES)
## Auswirkung des revV
## bisher nur auf srg_error
## getested
revV <- median(result)
noOfP <- length(p.target)
## Ergebnisstabelle soll gleiche Länge wie
## target vekcor haben
td.pcal <- rep(NA, noOfP)
td.pind <- rep(NA, noOfP)
td.pindoffs <- rep(NA, noOfP)
td.pindcorr <- rep(NA, noOfP)
td.ut <- rep(NA, noOfP)
td.result <- rep(NA, noOfP)
## Zieldrücke einzeln durchgehen
for(i in 1:noOfP){
i.out <- NULL
i.take <- which(pcal > (p.target[i] *(1- maxdev)) &
pcal < (p.target[i] *(1+ maxdev)))
msg <- paste(msg,"; For target pressure:",p.target[i],
"I take the points:", toString(i.take))
if(length(i.take) > 1){
## ut ist schon k=2, eswerden alle Punkte genommen,
## bei dem e.delta kleiner als 3-sigma ist
## wobei e.delte die Abweichung vom Reverenzwert ist
e.delta <- abs(result[i.take] - revV)
i.out <- which(e.delta > mean(ut[i.take])/k*3)
if(length(i.out) > 0){
i.take <- i.take[-i.out]
msg <- paste(msg, "from these I skip the points: ",
toString(i.take[i.out]))
}
}
td.pcal[i] <- unlist(mean(pcal[i.take]))
td.ut[i] <- unlist(mean(ut[i.take]))
td.result[i] <- unlist(mean(result[i.take]))
td.pind[i] <- unlist(mean(pind[i.take]))
td.pindoffs[i] <- unlist(mean(pindoffs[i.take]))
td.pindcorr[i] <- unlist(mean(pindcorr[i.take]))
} #for
PCAL$Value <- formatC(td.pcal, digits=3, format="E")
UT$Value <- formatC(td.ut, digits=1, format="E")
PIND$Value <- formatC(td.pind, digits=2, format="E")
PINDoffs$Value <- formatC(td.pindoffs, digits=2, format="E")
PINDcorr$Value <- formatC(td.pindcorr, digits=2, format="E")
UT$Value <- formatC(td.ut, digits=1, format="E")
RES$Value <- formatC(td.result, digits=1, width=2, format="E")
ccc$Calibration$Result$Table[[1]] <- PCAL
ccc$Calibration$Result$Table[[2]] <- PIND
ccc$Calibration$Result$Table[[3]] <- PINDoffs
ccc$Calibration$Result$Table[[4]] <- PINDcorr
ccc$Calibration$Result$Table[[5]] <- RES
ccc$Calibration$Result$Table[[6]] <- UT
ccc$Calibration$Result$Table[[5]]$Comment <- msg
return(ccc)
}
|
context("Input SlabVar and SpikeVar of locFDR BF theoretic function cor")
test_that("Throws warning if SpikeVar parameter is not a numeric scalar", {
skip_on_cran()
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = NA), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, NA), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = "A"), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, "A"), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 1:2), "SpikeVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, 1:2), "SpikeVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = -1), "SpikeVar is not positive*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, 0), "SpikeVar is not positive*")
})
test_that("Throws warning if SpikeVar parameter is not a numeric scalar", {
skip_on_cran()
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = NA), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, NA), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = "A"), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, "A"), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = 1:2), "SlabVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, 1:2), "SlabVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = -1), "SlabVar is not positive*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, 0), "SlabVar is not positive*")
})
| /tests/testthat/test-InputSpikeSlabVarLocFDR_BF_theoretic_Cor.R | no_license | ArunabhaCodes/CPBayes | R | false | false | 2,106 | r | context("Input SlabVar and SpikeVar of locFDR BF theoretic function cor")
test_that("Throws warning if SpikeVar parameter is not a numeric scalar", {
skip_on_cran()
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = NA), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, NA), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = "A"), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, "A"), "SpikeVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 1:2), "SpikeVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, 1:2), "SpikeVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = -1), "SpikeVar is not positive*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, 0), "SpikeVar is not positive*")
})
test_that("Throws warning if SpikeVar parameter is not a numeric scalar", {
skip_on_cran()
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = NA), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, NA), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = "A"), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, "A"), "SlabVar is not numeric*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = 1:2), "SlabVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, 1:2), "SlabVar is not a scalar*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SlabVar = -1), "SlabVar is not positive*")
expect_warning(analytic_locFDR_BF_cor(1:10, 1:10, ExampleDataCor$cor, SpikeVar = 0.01, 0), "SlabVar is not positive*")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{blbcoef}
\alias{blbcoef}
\title{Obtain model's coefficient}
\usage{
blbcoef(fit)
}
\arguments{
\item{fit}{fitted model}
}
\description{
compute the coefficients from fit
}
| /man/blbcoef.Rd | permissive | JZNeilZ/blblm | R | false | true | 264 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{blbcoef}
\alias{blbcoef}
\title{Obtain model's coefficient}
\usage{
blbcoef(fit)
}
\arguments{
\item{fit}{fitted model}
}
\description{
compute the coefficients from fit
}
|
library(mlr3extralearners)
if (!requireNamespace("catboost", quietly = TRUE)) {
install_catboost("0.26.1")
}
test_that("classif.catboost_catboost.train", {
learner = lrn("classif.catboost")
fun = catboost::catboost.train
exclude = c(
"learn_pool", # handled via mlr3
"test_pool", # don't use internal validation
"params" # each parameter supplied separately
)
ParamTest = run_paramtest(learner, fun, exclude)
expect_true(ParamTest, info = paste0(
"\nMissing parameters:\n",
paste0("- '", ParamTest$missing, "'", collapse = "\n")))
})
test_that("classif.catboost_catboost.predict", {
learner = lrn("classif.catboost")
fun = catboost::catboost.predict
exclude = c(
"model", # handled via mlr3
"pool", # handled via mlr3
"prediction_type" # handled via mlr3
)
ParamTest = run_paramtest(learner, fun, exclude)
expect_true(ParamTest, info = paste0(
"\nMissing parameters:\n",
paste0("- '", ParamTest$missing, "'", collapse = "\n")))
})
| /inst/paramtest/test_paramtest_catboost_classif_catboost.R | no_license | A-Pai/mlr3extralearners | R | false | false | 1,001 | r | library(mlr3extralearners)
if (!requireNamespace("catboost", quietly = TRUE)) {
install_catboost("0.26.1")
}
test_that("classif.catboost_catboost.train", {
learner = lrn("classif.catboost")
fun = catboost::catboost.train
exclude = c(
"learn_pool", # handled via mlr3
"test_pool", # don't use internal validation
"params" # each parameter supplied separately
)
ParamTest = run_paramtest(learner, fun, exclude)
expect_true(ParamTest, info = paste0(
"\nMissing parameters:\n",
paste0("- '", ParamTest$missing, "'", collapse = "\n")))
})
test_that("classif.catboost_catboost.predict", {
learner = lrn("classif.catboost")
fun = catboost::catboost.predict
exclude = c(
"model", # handled via mlr3
"pool", # handled via mlr3
"prediction_type" # handled via mlr3
)
ParamTest = run_paramtest(learner, fun, exclude)
expect_true(ParamTest, info = paste0(
"\nMissing parameters:\n",
paste0("- '", ParamTest$missing, "'", collapse = "\n")))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaults.R
\name{default_shape_var}
\alias{default_shape_var}
\title{default shape_var}
\usage{
default_shape_var(object)
}
\arguments{
\item{object}{SummarizedExperiment}
}
\value{
default value of shape_var
}
\description{
default shape_var
}
\examples{
if (require(autonomics.data)){
require(magrittr)
# STEM CELL COMPARISON
autonomics.data::stemcomp.proteinratios \%>\% default_shape_var()
# GLUTAMINASE
autonomics.data::glutaminase \%>\% default_shape_var()
}
}
| /autonomics.plot/man/default_shape_var.Rd | no_license | bhagwataditya/autonomics0 | R | false | true | 563 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defaults.R
\name{default_shape_var}
\alias{default_shape_var}
\title{default shape_var}
\usage{
default_shape_var(object)
}
\arguments{
\item{object}{SummarizedExperiment}
}
\value{
default value of shape_var
}
\description{
default shape_var
}
\examples{
if (require(autonomics.data)){
require(magrittr)
# STEM CELL COMPARISON
autonomics.data::stemcomp.proteinratios \%>\% default_shape_var()
# GLUTAMINASE
autonomics.data::glutaminase \%>\% default_shape_var()
}
}
|
system('git config --global user.email "qiu-yue.zhang@students.mq.edu.au"')
system('git config --global user.name "HA82"')
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_clean.csv", destfile = "./data/SAFI_clean.csv")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_dates.xlsx", destfile = "./data/SAFI_dates.xlsx", mode= "wb")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_openrefine.csv", destfile = "./data/SAFI_openrefine.csv", mode= "wb")
install.packages("tidyverse")
library(tidyverse)
respondent_wall_type <- c("muddaub", "burntbricks", "sunbricks")
#subseting
respondent_wall_type[2]
respondent_wall_type[c(3,2)]
more_respondent_wall_type <- respondent_wall_type[c(1,2,3,2,1,3)]
no_membrs <- c(3, 7, 10, 6)
no_membrs[c(TRUE, FALSE, TRUE, TRUE)]
no_membrs[no_membrs>5]
no_membrs[no_membrs<3|no_membrs>5]
possessions <- c("bicycle", "radio", "television")
possessions[possessions == "car" | possessions == "bicycle"]
possessions %in% c("car", "bicycle")
rooms <- c(2,1,1,NA,4)
mean(rooms)
max(rooms)
mean(rooms, na.rm=TRUE)
is.na(rooms)
!is.na(rooms)
rooms[!is.na(rooms)]
| /scripts/scripts.R | no_license | HA82/data-carpentry-r | R | false | false | 1,238 | r | system('git config --global user.email "qiu-yue.zhang@students.mq.edu.au"')
system('git config --global user.name "HA82"')
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_clean.csv", destfile = "./data/SAFI_clean.csv")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_dates.xlsx", destfile = "./data/SAFI_dates.xlsx", mode= "wb")
download.file(url = "https://mq-software-carpentry.github.io/R-git-for-research/data/SAFI_openrefine.csv", destfile = "./data/SAFI_openrefine.csv", mode= "wb")
install.packages("tidyverse")
library(tidyverse)
respondent_wall_type <- c("muddaub", "burntbricks", "sunbricks")
#subseting
respondent_wall_type[2]
respondent_wall_type[c(3,2)]
more_respondent_wall_type <- respondent_wall_type[c(1,2,3,2,1,3)]
no_membrs <- c(3, 7, 10, 6)
no_membrs[c(TRUE, FALSE, TRUE, TRUE)]
no_membrs[no_membrs>5]
no_membrs[no_membrs<3|no_membrs>5]
possessions <- c("bicycle", "radio", "television")
possessions[possessions == "car" | possessions == "bicycle"]
possessions %in% c("car", "bicycle")
rooms <- c(2,1,1,NA,4)
mean(rooms)
max(rooms)
mean(rooms, na.rm=TRUE)
is.na(rooms)
!is.na(rooms)
rooms[!is.na(rooms)]
|
# JHU Data Science - Course 03 - Getting and Cleaning Data - Week 4
## Importing data to play around with
## Editing text variables
tolower
strsplit
# Combine strsplit with sapply to remove .s
sub("_","",names(data)) # replaces first underscore in each name
gsub("_","",names(data)) # replaces all underscores in each name
# For everything below i
grep("search string", vector) # This acts like which("search string" in vector)
grep("search string", vector, value=TRUE) # This acts like vector(which("search string" in vector))
grepl("search string", vector) # This acts like ("search string" in vector)
# Length with grep is a good way to check that the string does not appearn
str_trim #in stringr | /03_Cleaning_data/Week4/Course_notes.R | no_license | kartik-avula/Data_Science | R | false | false | 711 | r | # JHU Data Science - Course 03 - Getting and Cleaning Data - Week 4
## Importing data to play around with
## Editing text variables
tolower
strsplit
# Combine strsplit with sapply to remove .s
sub("_","",names(data)) # replaces first underscore in each name
gsub("_","",names(data)) # replaces all underscores in each name
# For everything below i
grep("search string", vector) # This acts like which("search string" in vector)
grep("search string", vector, value=TRUE) # This acts like vector(which("search string" in vector))
grepl("search string", vector) # This acts like ("search string" in vector)
# Length with grep is a good way to check that the string does not appearn
str_trim #in stringr |
library(minfi)
# BiocManager::install("minfiData")
library(minfiData)
library(IlluminaHumanMethylationEPICmanifest)#,
library(IlluminaHumanMethylationEPICanno.ilm10b4.hg19)
RGSet<-minfi::read.metharray.exp('data/MotifPipeline/ENCODE/methyl_array/850k/')
# phenoData <- pData(RGSet)
manifest <- getManifest(RGSet)
head(getProbeInfo(manifest))
MSet <- preprocessRaw(RGSet)
MSet
RSet <- ratioConvert(MSet, what = "both", keepCN = TRUE)
RSet
beta <- getBeta(RSet)
GRset <- mapToGenome(RSet)
GRset
##QC
RGSet <- preprocessQuantile(RGSet, fixOutliers = TRUE,removeBadSamples = TRUE, badSampleCutoff = 10.5,quantileNormalize = TRUE, stratified = TRUE, mergeManifest = FALSE, sex = NULL)
# MSet.swan <- preprocessSWAN(RGSet)
# RSet2 <- ratioConvert(MSet.swan)
snps <- getSnpInfo(RGSet)
head(snps,10)
RGSet <- addSnpInfo(GRset) #GRset
##filter out snps
RGSet <- dropLociWithSnps(RGSet, snps=c("SBE","CpG"), maf=0)
###filter out cross-reactive probes
devtools::install_github("markgene/maxprobes")
RGSet<-maxprobes::dropXreactiveLoci(RGSet)
##else comment above out
beta <- getBeta(RGSet)
M <- getM(GRset)
CN <- getCN(GRset)
sampleNames <- sampleNames(GRset)
probeNames <- featureNames(GRset)
pheno <- pData(GRset)
gr <- granges(GRset)
annotation <- getAnnotation(RGSet)
qc <- getQC(MSet)
head(qc)
plotQC(qc)
densityPlot(beta, sampGroups = GRset@colData@rownames,main = 'preQC_beta',legend = None)
densityBeanPlot(beta, sampGroups = GRset@colData@rownames,main = 'preQC_beta')
controlStripPlot(RGSet, controls="BISULFITE CONVERSION II")
qcReport(RGSet, pdf= "qcReport.pdf",sampNames = MSet@colData@rownames,sampGroups = MSet@colData@rownames)
write.table(beta[,1],'data/MotifPipeline/ENCODE/methyl_array/A-549b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,2],'data/MotifPipeline/ENCODE/methyl_array/GM12878b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,3],'data/MotifPipeline/ENCODE/methyl_array/HeLa-S3b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,4],'data/MotifPipeline/ENCODE/methyl_array/Hep-G2b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,5],'data/MotifPipeline/ENCODE/methyl_array/K562b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,6],'data/MotifPipeline/ENCODE/methyl_array/SKNSHb.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(RGSet@rowRanges@ranges@start,'data/MotifPipeline/ENCODE/methyl_array/startb.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$chr,'data/MotifPipeline/ENCODE/methyl_array/chrb.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation$Relation_to_Island,'data/MotifPipeline/ENCODE/methyl_array/R2island.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$UCSC_RefGene_Group,'data/MotifPipeline/ENCODE/methyl_array/gene_bodyA.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$GencodeBasicV12_Group,'data/MotifPipeline/ENCODE/methyl_array/gene_bodyB.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$Name,'data/MotifPipeline/ENCODE/methyl_array/cpg.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
##bedtools intersect to add gene annotation to cg location info
write.table(annotation@listData$Gene,'data/MotifPipeline/ENCODE/methyl_array/gene.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
##python convert empty rows to 0s
# ##bash to format as bed file cd data/MotifPipeline/ENCODE/methyl_array/
# paste chrb.txt startb.txt A-549b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> A-549_MeArrayc.txt
# paste chrb.txt startb.txt GM12878b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> GM12878_MeArrayc.txt
# paste chrb.txt startb.txt HeLa-S3b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> HeLa-S3_MeArrayc.txt
# paste chrb.txt startb.txt Hep-G2b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> Hep-G2_MeArrayc.txt
# paste chrb.txt startb.txt K562b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> K-562_MeArrayc.txt
# paste chrb.txt startb.txt SKNSHb.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> SK-N-SH_MeArrayc.txt
# #
# #
# # ##map from 19 to 38 cd to data
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/A-549_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/A549_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/HeLa-S3_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/HeLa_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/Hep-G2_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/HepG2_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/K-562_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/K562_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/SK-N-SH_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/SKNSH_MeArrayHG38d.txt unmapped
# #
#
#
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/A549_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/A549_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/HeLa_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/HeLa_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/HepG2_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/HepG2_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/K562_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/K562_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/SKNSH_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/SKNSH_MeArrayHG38e.txt
# ##build shuffled versions
# cut -f4 A-549_MeArrayHG38c.txt|cut -f4 |shuf> shuf_A549.txt
# paste A-549_MeArrayHG38c.txt shuf_A549.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > A-549_shufMeArrayHG38c.txt
# cut -f4 GM12878_MeArrayHG38c.txt|cut -f4 |shuf> shuf_GM12878.txt
# paste GM12878_MeArrayHG38c.txt shuf_GM12878.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > GM12878_shufMeArrayHG38c.txt
# cut -f4 HeLa-S3_MeArrayHG38c.txt|cut -f4 |shuf> shuf_HeLa-S3.txt
# paste HeLa-S3_MeArrayHG38c.txt shuf_HeLa-S3.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > HeLa-S3_shufMeArrayHG38c.txt
# cut -f4 Hep-G2_MeArrayHG38c.txt|cut -f4 |shuf> shuf_Hep-G2.txt
# paste Hep-G2_MeArrayHG38c.txt shuf_Hep-G2.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > Hep-G2_shufMeArrayHG38c.txt
# cut -f4 K-562_MeArrayHG38c.txt|cut -f4 |shuf> shuf_K-562.txt
# paste K-562_MeArrayHG38c.txt shuf_K-562.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > K-562_shufMeArrayHG38c.txt
# cut -f4 SK-N-SH_MeArrayHG38c.txt|cut -f4 |shuf> shuf_SK-N-SH.txt
# paste SK-N-SH_MeArrayHG38c.txt shuf_SK-N-SH.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > SK-N-SH_shufMeArrayHG38c.txt
#
#
### create milipeed motifs
# paste cpg.txt A-549b.txt | awk '{print $1,$2}' OFS='\t'> A-549_Metif.txt
# paste cpg.txt GM12878b.txt | awk '{print $1,$2}' OFS='\t'> GM12878_Metif.txt
# paste cpg.txt HeLa-S3b.txt | awk '{print $1,$2}' OFS='\t'> HeLa_Metif.txt
# paste cpg.txt Hep-G2b.txt | awk '{print $1,$2}' OFS='\t'> HepG2_Metif.txt
# paste cpg.txt K562b.txt | awk '{print $1,$2}' OFS='\t'> K562_Metif.txt
# paste cpg.txt SKNSHb.txt | awk '{print $1,$2}' OFS='\t'> SKNSH_Metif.txt
### create gold standard chip motif for comparison
# cd data/MotifPipeline/ENCODE/methyl_array/
# paste chrb.txt startb.txt A-549b.txt | awk '{print $1,$2,$2+1,$3,$4}' OFS='\t'>> A-549_Metifb.txt
# eval "~/../rekrg/Tools/bedtools2/bin/bedtools slop -i data/MotifPipeline/remap/A549_spRE2020.txt -g ~/../rekrg/Tools/bedtools2/genomes/human.hg38.genome -r 1000 -l 1000" > data/MotifPipeline/remap/slopA549.txt
# eval "~/../rekrg/Tools/bedtools2/bin/bedtools intersect -wa -wb -a data/MotifPipeline/remap/slopA549.txt -b data/MotifPipeline/ENCODE/methyl_array/A-549_Metifb.txt " > data/MotifPipeline/A549GOLD_1000kb.txt
# eval "~/../rekrg/Tools/bedtools2/bin/bedtools intersect -wa -wb -a data/MotifPipeline/remap/A549_spRE2020.txt -b data/MotifPipeline/ENCODE/methyl_array/A-549_Metifb.txt " > data/MotifPipeline/A549GOLD.txt
# | /src/R/process_encode_MeArray.R | no_license | dcolinmorgan/mili_benchmark | R | false | false | 9,088 | r | library(minfi)
# BiocManager::install("minfiData")
library(minfiData)
library(IlluminaHumanMethylationEPICmanifest)#,
library(IlluminaHumanMethylationEPICanno.ilm10b4.hg19)
RGSet<-minfi::read.metharray.exp('data/MotifPipeline/ENCODE/methyl_array/850k/')
# phenoData <- pData(RGSet)
manifest <- getManifest(RGSet)
head(getProbeInfo(manifest))
MSet <- preprocessRaw(RGSet)
MSet
RSet <- ratioConvert(MSet, what = "both", keepCN = TRUE)
RSet
beta <- getBeta(RSet)
GRset <- mapToGenome(RSet)
GRset
##QC
RGSet <- preprocessQuantile(RGSet, fixOutliers = TRUE,removeBadSamples = TRUE, badSampleCutoff = 10.5,quantileNormalize = TRUE, stratified = TRUE, mergeManifest = FALSE, sex = NULL)
# MSet.swan <- preprocessSWAN(RGSet)
# RSet2 <- ratioConvert(MSet.swan)
snps <- getSnpInfo(RGSet)
head(snps,10)
RGSet <- addSnpInfo(GRset) #GRset
##filter out snps
RGSet <- dropLociWithSnps(RGSet, snps=c("SBE","CpG"), maf=0)
###filter out cross-reactive probes
devtools::install_github("markgene/maxprobes")
RGSet<-maxprobes::dropXreactiveLoci(RGSet)
##else comment above out
beta <- getBeta(RGSet)
M <- getM(GRset)
CN <- getCN(GRset)
sampleNames <- sampleNames(GRset)
probeNames <- featureNames(GRset)
pheno <- pData(GRset)
gr <- granges(GRset)
annotation <- getAnnotation(RGSet)
qc <- getQC(MSet)
head(qc)
plotQC(qc)
densityPlot(beta, sampGroups = GRset@colData@rownames,main = 'preQC_beta',legend = None)
densityBeanPlot(beta, sampGroups = GRset@colData@rownames,main = 'preQC_beta')
controlStripPlot(RGSet, controls="BISULFITE CONVERSION II")
qcReport(RGSet, pdf= "qcReport.pdf",sampNames = MSet@colData@rownames,sampGroups = MSet@colData@rownames)
write.table(beta[,1],'data/MotifPipeline/ENCODE/methyl_array/A-549b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,2],'data/MotifPipeline/ENCODE/methyl_array/GM12878b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,3],'data/MotifPipeline/ENCODE/methyl_array/HeLa-S3b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,4],'data/MotifPipeline/ENCODE/methyl_array/Hep-G2b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,5],'data/MotifPipeline/ENCODE/methyl_array/K562b.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(beta[,6],'data/MotifPipeline/ENCODE/methyl_array/SKNSHb.txt',sep ='\t',quote = FALSE,dec = '.',col.names = FALSE)
write.table(RGSet@rowRanges@ranges@start,'data/MotifPipeline/ENCODE/methyl_array/startb.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$chr,'data/MotifPipeline/ENCODE/methyl_array/chrb.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation$Relation_to_Island,'data/MotifPipeline/ENCODE/methyl_array/R2island.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$UCSC_RefGene_Group,'data/MotifPipeline/ENCODE/methyl_array/gene_bodyA.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$GencodeBasicV12_Group,'data/MotifPipeline/ENCODE/methyl_array/gene_bodyB.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
write.table(annotation@listData$Name,'data/MotifPipeline/ENCODE/methyl_array/cpg.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
##bedtools intersect to add gene annotation to cg location info
write.table(annotation@listData$Gene,'data/MotifPipeline/ENCODE/methyl_array/gene.txt',sep ='\t',quote = FALSE,dec = '.',row.names=FALSE,col.names = FALSE)
##python convert empty rows to 0s
# ##bash to format as bed file cd data/MotifPipeline/ENCODE/methyl_array/
# paste chrb.txt startb.txt A-549b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> A-549_MeArrayc.txt
# paste chrb.txt startb.txt GM12878b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> GM12878_MeArrayc.txt
# paste chrb.txt startb.txt HeLa-S3b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> HeLa-S3_MeArrayc.txt
# paste chrb.txt startb.txt Hep-G2b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> Hep-G2_MeArrayc.txt
# paste chrb.txt startb.txt K562b.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> K-562_MeArrayc.txt
# paste chrb.txt startb.txt SKNSHb.txt R2island.txt gene_bodyE.txt gene_bodyF.txt| awk '{print $1,$2,$2+1,$4,$5,$6,$7}' OFS='\t'> SK-N-SH_MeArrayc.txt
# #
# #
# # ##map from 19 to 38 cd to data
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/A-549_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/A549_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/HeLa-S3_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/HeLa_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/Hep-G2_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/HepG2_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/K-562_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/K562_MeArrayHG38d.txt unmapped
# ./data/liftOver data/MotifPipeline/ENCODE/methyl_array/SK-N-SH_MeArrayc.txt data/hg19ToHg38.over.chain data/MotifPipeline/ENCODE/methyl_array/SKNSH_MeArrayHG38d.txt unmapped
# #
#
#
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/A549_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/A549_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/GM12878_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/HeLa_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/HeLa_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/HepG2_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/HepG2_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/K562_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/K562_MeArrayHG38e.txt
# cut -f1,2,3,4,5,7 data/MotifPipeline/ENCODE/methyl_array/SKNSH_MeArrayHG38d.txt > data/MotifPipeline/ENCODE/methyl_array/SKNSH_MeArrayHG38e.txt
# ##build shuffled versions
# cut -f4 A-549_MeArrayHG38c.txt|cut -f4 |shuf> shuf_A549.txt
# paste A-549_MeArrayHG38c.txt shuf_A549.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > A-549_shufMeArrayHG38c.txt
# cut -f4 GM12878_MeArrayHG38c.txt|cut -f4 |shuf> shuf_GM12878.txt
# paste GM12878_MeArrayHG38c.txt shuf_GM12878.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > GM12878_shufMeArrayHG38c.txt
# cut -f4 HeLa-S3_MeArrayHG38c.txt|cut -f4 |shuf> shuf_HeLa-S3.txt
# paste HeLa-S3_MeArrayHG38c.txt shuf_HeLa-S3.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > HeLa-S3_shufMeArrayHG38c.txt
# cut -f4 Hep-G2_MeArrayHG38c.txt|cut -f4 |shuf> shuf_Hep-G2.txt
# paste Hep-G2_MeArrayHG38c.txt shuf_Hep-G2.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > Hep-G2_shufMeArrayHG38c.txt
# cut -f4 K-562_MeArrayHG38c.txt|cut -f4 |shuf> shuf_K-562.txt
# paste K-562_MeArrayHG38c.txt shuf_K-562.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > K-562_shufMeArrayHG38c.txt
# cut -f4 SK-N-SH_MeArrayHG38c.txt|cut -f4 |shuf> shuf_SK-N-SH.txt
# paste SK-N-SH_MeArrayHG38c.txt shuf_SK-N-SH.txt | awk '{print $1,$2,$3,$4,$5,$6,$7}' OFS='\t' > SK-N-SH_shufMeArrayHG38c.txt
#
#
### create milipeed motifs
# paste cpg.txt A-549b.txt | awk '{print $1,$2}' OFS='\t'> A-549_Metif.txt
# paste cpg.txt GM12878b.txt | awk '{print $1,$2}' OFS='\t'> GM12878_Metif.txt
# paste cpg.txt HeLa-S3b.txt | awk '{print $1,$2}' OFS='\t'> HeLa_Metif.txt
# paste cpg.txt Hep-G2b.txt | awk '{print $1,$2}' OFS='\t'> HepG2_Metif.txt
# paste cpg.txt K562b.txt | awk '{print $1,$2}' OFS='\t'> K562_Metif.txt
# paste cpg.txt SKNSHb.txt | awk '{print $1,$2}' OFS='\t'> SKNSH_Metif.txt
### create gold standard chip motif for comparison
# cd data/MotifPipeline/ENCODE/methyl_array/
# paste chrb.txt startb.txt A-549b.txt | awk '{print $1,$2,$2+1,$3,$4}' OFS='\t'>> A-549_Metifb.txt
# eval "~/../rekrg/Tools/bedtools2/bin/bedtools slop -i data/MotifPipeline/remap/A549_spRE2020.txt -g ~/../rekrg/Tools/bedtools2/genomes/human.hg38.genome -r 1000 -l 1000" > data/MotifPipeline/remap/slopA549.txt
# eval "~/../rekrg/Tools/bedtools2/bin/bedtools intersect -wa -wb -a data/MotifPipeline/remap/slopA549.txt -b data/MotifPipeline/ENCODE/methyl_array/A-549_Metifb.txt " > data/MotifPipeline/A549GOLD_1000kb.txt
# eval "~/../rekrg/Tools/bedtools2/bin/bedtools intersect -wa -wb -a data/MotifPipeline/remap/A549_spRE2020.txt -b data/MotifPipeline/ENCODE/methyl_array/A-549_Metifb.txt " > data/MotifPipeline/A549GOLD.txt
# |
sim = read_output(output_file_name)
plot_all(sim)
islands = c("NewHaven", "Exotica", "Lampedusa")
compare = function(df) {
rabbit_initial = df[1, paste0(islands,".rabbit.rabbit.initial")]
rabbit_total = tail(df[paste0(islands,".rabbit.rabbit.outflowTotal")], 1)
R = data.frame(rabbit_initial, rabbit_total, sum(rabbit_initial), sum(rabbit_total))
colnames(R) = c(paste0("rabbit_initial",0:2), paste0("rabbit_outflowTotal",0:2), "rabbit_initial_sum", "rabbit_outflowTotal_sum")
fox_initial = df[1, paste0(islands, ".fox.fox.initial")]
fox_total = tail(df[paste0(islands,".fox.fox.outflowTotal")], 1)
F = data.frame(fox_initial, fox_total, sum(fox_initial), sum(fox_total))
colnames(F) = c(paste0("fox_initial",0:2), paste0("fox_outflowTotal",0:2), "fox_initial_sum", "fox_outflowTotal_sum")
D = data.frame(sum(rabbit_total)-sum(rabbit_initial), sum(fox_total)-sum(fox_initial))
colnames(D) = c("rabbit_diff", "fox_diff")
cbind(R,F,D)
}
A = ddply(sim, .(iteration), compare)
print(A)
| /input/book/demo/migration/islands-4-end.R | no_license | amantaya/UniSim2 | R | false | false | 1,004 | r | sim = read_output(output_file_name)
plot_all(sim)
islands = c("NewHaven", "Exotica", "Lampedusa")
compare = function(df) {
rabbit_initial = df[1, paste0(islands,".rabbit.rabbit.initial")]
rabbit_total = tail(df[paste0(islands,".rabbit.rabbit.outflowTotal")], 1)
R = data.frame(rabbit_initial, rabbit_total, sum(rabbit_initial), sum(rabbit_total))
colnames(R) = c(paste0("rabbit_initial",0:2), paste0("rabbit_outflowTotal",0:2), "rabbit_initial_sum", "rabbit_outflowTotal_sum")
fox_initial = df[1, paste0(islands, ".fox.fox.initial")]
fox_total = tail(df[paste0(islands,".fox.fox.outflowTotal")], 1)
F = data.frame(fox_initial, fox_total, sum(fox_initial), sum(fox_total))
colnames(F) = c(paste0("fox_initial",0:2), paste0("fox_outflowTotal",0:2), "fox_initial_sum", "fox_outflowTotal_sum")
D = data.frame(sum(rabbit_total)-sum(rabbit_initial), sum(fox_total)-sum(fox_initial))
colnames(D) = c("rabbit_diff", "fox_diff")
cbind(R,F,D)
}
A = ddply(sim, .(iteration), compare)
print(A)
|
library(rv)
### Name: rvconst
### Title: Random Vector with a Point-Mass Distribution
### Aliases: rvconst
### Keywords: classes
### ** Examples
x <- rvconst(x=1:3)
c(x, 4)
| /data/genthat_extracted_code/rv/examples/rvconst.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 186 | r | library(rv)
### Name: rvconst
### Title: Random Vector with a Point-Mass Distribution
### Aliases: rvconst
### Keywords: classes
### ** Examples
x <- rvconst(x=1:3)
c(x, 4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/microbiota2.R
\docType{data}
\name{LayerTaxonomy}
\alias{LayerTaxonomy}
\title{LayerTaxonomy}
\format{
An object of class \code{LayerTaxonomy} (inherits from \code{Layer}, \code{ggproto}, \code{gg}) of length 2.
}
\usage{
LayerTaxonomy
}
\description{
The Layer object for \code{\link[=geom_taxonomy]{geom_taxonomy()}}.
}
\details{
Three things are done here:
\enumerate{
\item If \code{scale_fill} is not specified, the \code{\link[=scale_fill_taxonomy]{scale_fill_taxonomy()}} scale is applied.
\item The fill aesthetic is converted to a factor, ordered by \code{tax.palette} colors,
to allow for stacking in the correct order.
\item If \code{show.ribbon=TRUE}, it ensures \code{group} aesthetic does not vary by \code{x}, and makes sure there
is exactly one row for every sample (\code{x}/\code{PANEL}) and \code{group}.
}
}
\keyword{datasets}
| /man/LayerTaxonomy.Rd | no_license | ying14/yingtools2 | R | false | true | 925 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/microbiota2.R
\docType{data}
\name{LayerTaxonomy}
\alias{LayerTaxonomy}
\title{LayerTaxonomy}
\format{
An object of class \code{LayerTaxonomy} (inherits from \code{Layer}, \code{ggproto}, \code{gg}) of length 2.
}
\usage{
LayerTaxonomy
}
\description{
The Layer object for \code{\link[=geom_taxonomy]{geom_taxonomy()}}.
}
\details{
Three things are done here:
\enumerate{
\item If \code{scale_fill} is not specified, the \code{\link[=scale_fill_taxonomy]{scale_fill_taxonomy()}} scale is applied.
\item The fill aesthetic is converted to a factor, ordered by \code{tax.palette} colors,
to allow for stacking in the correct order.
\item If \code{show.ribbon=TRUE}, it ensures \code{group} aesthetic does not vary by \code{x}, and makes sure there
is exactly one row for every sample (\code{x}/\code{PANEL}) and \code{group}.
}
}
\keyword{datasets}
|
library(shiny)
library(ggplot2)
library(phyloseq)
library(data.table)
source("../common/mbiome/mbiome-reader.R")
source("../common/ggplot_ext/eupath_default.R")
source("../common/tooltip/tooltip.R")
source("../common/config.R")
shinyServer(function(input, output, session) {
mstudy_obj <- NULL
# this should be a static variable for all R sessions
NO_METADATA_SELECTED <- "Choose the sample details"
WIDTH <- global_width
# Declaring some global variables
# df_abundance, df_sample and df_sample.formatted are declared global to avoid
# multiple file reading in the reactive section
df_abundance <- NULL
df_sample <- NULL
df_sample.formatted <- NULL
richness_object <- NULL
all_measures <- c("Chao1", "ACE", "Shannon", "Simpson", "Fisher")
phyloseq_obj <- NULL
# global objects to read in more than one function
columns <- NULL
hash_sample_names<- NULL
hash_count_samples <- NULL
ggplot_object<-NULL
ggplot_data <- NULL
ggplot_build_object <- NULL
ggplot_object_mt<-NULL
ggplot_data_mt <- NULL
ggplot_build_object_mt <- NULL
abundance_otu <- NULL
abundance_taxa <- NULL
MAX_SAMPLES_NO_RESIZE <- 40
MIN_HEIGHT_AFTER_RESIZE <- 9.5
maximum_samples_without_resizing <- 40
minimum_height_after_resizing <- 9
load_microbiome_data <- reactive({
if(is.null(mstudy_obj)){
# abundance_file <- "MicrobiomeSampleByMetadata_TaxaRelativeAbundance.txt"
# sample_file <- "MicrobiomeSampleByMetadata_Characteristics.txt"
#
# mstudy_obj <<- import.eupath(
# taxa_abundance_path = abundance_file,
# sample_path = sample_file,
# aggregate_by = "Species",
# use_relative_abundance = F
# )
mstudy_obj <<- import.biom(biom_file, metadata_details, use_relative_abundance=F)
updateSelectizeInput(session, "category",
choices = c(
mstudy_obj$get_filtered_categories()),
options = list(placeholder = NO_METADATA_SELECTED),
server = TRUE)
updateSelectizeInput(session, "categoryFacet1",
choices = c(
mstudy_obj$get_filtered_categories()),
options = list(placeholder = "First choose the x-axis"),
server = TRUE)
updateSelectizeInput(session, "categoryFacet2",
choices = c(
mstudy_obj$get_filtered_categories()),
options = list(placeholder = "First choose the x-axis"),
server = TRUE)
phyloseq_obj <- mbiome2phyloseq(mstudy_obj, "Species")
richness_object <<- estimate_richness(phyloseq_obj, measures = all_measures)
richness_object$SampleName <<- gsub("\\.", "\\-", rownames(richness_object))
richness_object$SampleName <<- gsub("^X", "", rownames(richness_object))
}
mstudy_obj
})
allSamples <- function(){}
output$allSamplesChart <- renderUI({
shinyjs::hide("allSamplesArea")
shinyjs::show("chartLoading")
mstudy <- load_microbiome_data()
measure<-input$measure
plotRadio <- input$plotTypeRadio
quantity_samples <- mstudy$get_sample_count()
result_to_show<-NULL
if(identical(measure,"") | is.na(measure) | !(measure %in% all_measures) ){
output$allSamplesDt <- renderDataTable(NULL)
result_to_show<-h5(class="alert alert-danger", "Please choose at least one alpha diversity measure.")
}else{
if(identical(measure, "Chao1")){
se <- "se.chao1"
rich <- richness_object[,c("SampleName", measure,se)]
}else if(identical(measure, "ACE")){
se <- "se.ACE"
rich <- richness_object[,c("SampleName", measure,se)]
}else{
rich <- richness_object[,c("SampleName", measure)]
se = NULL
}
rich$SampleName<-factor(rich$SampleName, levels=rich$SampleName)
data_melted<-melt(rich, id.vars = c("SampleName"), measure.vars=measure)
if(!is.null(se)){
se_melted <-melt(rich, id.vars = c("SampleName"), measure.vars=se)
se_melted[,"variable"]<-measure
colnames(se_melted)<-c("SampleName", "variable", "se")
data_melted<-merge(data_melted,se_melted,by=c("SampleName", "variable"), all.x=T)
}else{
data_melted$se<-0 # see if this is necessary
}
if(identical(plotRadio, "dotplot")){
chart <- ggplot(data_melted, aes_string(x="value", y="SampleName"))+
geom_point(shape = 21, alpha=1, colour = "grey", fill = "black", size = 3, stroke = 1.5)+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.y = element_text(size=rel(0.9))
)+
labs(x="Alpha Diversity", y="Samples")
if(!is.null(se)){
chart<-chart+geom_errorbarh(aes(xmax=value + se, xmin=value - se), height = .1)
}
} # end if is dotplot
else{
chart<-ggplot(data_melted, aes(variable, value))+geom_boxplot()+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
)+
labs(x="All Samples", y="Alpha Diversity")
}
ggplot_object <<- chart
ggplot_build_object <<- ggplot_build(chart)
output$allSamplesWrapper<-renderPlot({
ggplot_object
})
if(is.null(se)){
colnames(rich)<-c("Sample Name", measure)
}else{
colnames(rich)<-c("Sample Name", measure, "Std. Error")
}
output$allSamplesDt = renderDataTable(
rich,
options = list(
order = list(list(0, 'asc'))
)
)
if(quantity_samples <= MAX_SAMPLES_NO_RESIZE | identical(plotRadio, "boxplot")){
result_to_show<-plotOutput("allSamplesWrapper",
hover = hoverOpts("plot_hover", delay = 60, delayType = "throttle"),
width = paste0(WIDTH,"px"),
height = "500px"
)
}else{
h <- quantity_samples*MIN_HEIGHT_AFTER_RESIZE
if(h>2500){
h<-2500
}
result_to_show<-plotOutput("allSamplesWrapper",
hover = hoverOpts("plot_hover", delay = 60, delayType = "throttle"),
width = paste0(WIDTH,"px"),
# width = "100%",
height = h
)
}
}
shinyjs::hide("chartLoading", anim = TRUE, animType = "slide")
shinyjs::show("allSamplesArea")
return(result_to_show)
})
byMetadata <- function(){}
output$byMetadataChart <- renderUI({
mstudy <- load_microbiome_data()
result_to_show<-NULL
# reactive values
measure<-input$measure
plotRadio <- input$plotTypeRadio
# category <- category_button()
category <- input$category
verticalCategory <- input$categoryFacet1
horizontalCategory <- input$categoryFacet2
if(identical(measure,"") | is.na(measure) | !(measure %in% all_measures) ){
output$byMetadataDt <- renderDataTable(NULL)
output$result_tests <- renderUI(NULL)
result_to_show<-h5(class="alert alert-warning", "Please choose at least one alpha diversity measure.")
}else if(is.null(category) | identical(category, "")){
output$byMetadataDt <- renderDataTable(NULL)
output$result_tests <- renderUI(NULL)
result_to_show<-h5(class="alert alert-warning", "Please choose the sample detail for the X-Axis.")
}else if(identical(category, verticalCategory) | identical(category, horizontalCategory)){
output$byMetadataDt <- renderDataTable(NULL)
output$result_tests <- renderUI(NULL)
result_to_show<-h5(class="alert alert-warning", "Please choose different sample details.")
}
else{
shinyjs::hide("metadataContent")
shinyjs::show("metadataLoading")
quantity_samples <- mstudy$get_sample_count()
output$byMetadataDt <- renderDataTable(NULL)
condVertical <- identical(verticalCategory, "")
condHorizontal <- identical(horizontalCategory, "")
if(!condVertical & !condHorizontal){
all_columns<-c(category, verticalCategory, horizontalCategory)
}else if(!condVertical & condHorizontal){
all_columns<-c(category, verticalCategory)
}else if(condVertical & !condHorizontal){
all_columns<-c(category, horizontalCategory)
}else{
all_columns<-c(category)
}
# print(all_columns)
# print(mstudy$get_metadata_as_column("host diet"))
dt_metadata<-mstudy$get_metadata_as_column(all_columns)
if(identical(measure,"Chao1")){
rich <- richness_object[,c("SampleName", measure,"se.chao1")]
}else if(identical(measure,"ACE")){
rich <- richness_object[,c("SampleName", measure,"se.ACE")]
}else{
rich <- richness_object[,c("SampleName", measure)]
}
richness_merged <- merge(dt_metadata, rich, by = "SampleName")
# richness_merged<-na.omit(richness_merged)
# data_melted<-melt(richness_merged, id.vars = c("SampleName", category), measure.vars=measure)
# print("richness_merged")
if(identical(class(richness_merged[[category]]),"numeric")){
if(!condVertical){
chart<-ggplot(richness_merged,
aes_string(x=sprintf("`%s`", category), y=measure, color=sprintf("`%s`", verticalCategory)))+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_text(size=rel(0.9),face="bold"),
axis.text.y = element_text(size=rel(0.8),face="bold")
)+
labs(x=paste(category), y="Alpha Diversity")
}else{
chart<-ggplot(richness_merged,
aes_string(x=sprintf("`%s`", category), y=measure))+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_text(size=rel(0.9),face="bold"),
axis.text.y = element_text(size=rel(0.8),face="bold")
)+
labs(x=paste(category), y="Alpha Diversity")
}
# scale_x_discrete(labels = function(x) lapply(strwrap(x, width = 10, simplify = FALSE), paste, collapse="\n"))+
if(identical(plotRadio, "dotplot")){
chart<-chart+
geom_point(shape = 20, alpha=0.7, size = 2)+
geom_smooth(method = "loess", span = 0.7)
} # end if is dotplot
else{
chart<-chart+
geom_boxplot()
}
if(!condHorizontal){
joined_categories <- sprintf(" `%s` ~ .", horizontalCategory)
chart <- chart+facet_grid(as.formula(joined_categories))
# output$result_tests <- renderUI(NULL)
}
if(!condHorizontal | !condVertical){
output$result_tests <- renderUI(NULL)
}else{
output$result_tests <- renderUI(runStatisticalTests(category, measure, chart$data))
}
output$byMetadataChartWrapper<-renderPlot({
chart
})
}else{
chart<-ggplot(richness_merged, aes_string(sprintf("`%s`",category), measure))+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_text(size=rel(0.9),face="bold"),
axis.text.y = element_text(size=rel(0.8),face="bold")
)+
labs(x=paste(category), y="Alpha Diversity")
# scale_x_discrete(labels = function(x) lapply(strwrap(x, width = 10, simplify = FALSE), paste, collapse="\n"))+
if(identical(plotRadio, "dotplot")){
chart<-chart+
geom_point(shape = 21, alpha=1, colour = "grey", fill = "black", size = 3, stroke = 1.5)
if(!identical(class(richness_merged[[category]]),"numeric")){
chart<-chart+
geom_smooth(method = "loess", span = 0.7)
}
# geom_errorbar(aes(ymax=value + se, ymin=value - se), height = .1) # error
} # end if is dotplot
else{
chart<-chart+
geom_boxplot()
}
if(!condHorizontal & condVertical){
joined_categories <- sprintf(" `%s` ~ .", horizontalCategory)
}else if(!condHorizontal & !condVertical){
joined_categories <- sprintf("`%s` ~`%s`", horizontalCategory, verticalCategory)
}else if(condHorizontal & !condVertical){
joined_categories <- sprintf("~ `%s`", verticalCategory)
}
if(!condHorizontal | !condVertical){
chart <- chart+facet_grid(as.formula(joined_categories))
output$result_tests <- renderUI(NULL)
}else{
output$result_tests <- renderUI(runStatisticalTests(category, measure, chart$data))
}
output$byMetadataChartWrapper<-renderPlot({
chart
})
}
formatTable(richness_merged, measure, category, verticalCategory, horizontalCategory)
ggplot_object_mt<<-chart
ggplot_build_object_mt<<-ggplot_build(chart)
if(quantity_samples <= maximum_samples_without_resizing | identical(plotRadio, "boxplot") |
identical("numeric",class(richness_merged[[category]]))){
result_to_show<-plotOutput("byMetadataChartWrapper",
hover = hoverOpts("hoverByMetadata", delay = 60, delayType = "throttle"),
# width = "100%", height = "500px"
width = paste0(WIDTH,"px"), height = "500px"
)
}else{
h <- quantity_samples*MIN_HEIGHT_AFTER_RESIZE
if(h>2500){
h<-2500
}
result_to_show<-plotOutput("byMetadataChartWrapper",
hover = hoverOpts("hoverByMetadata", delay = 60, delayType = "throttle"),
width = paste0(WIDTH,"px"),
height = h
)
}
shinyjs::hide("metadataLoading", anim = TRUE, animType = "fade")
shinyjs::show("metadataContent")
}
result_to_show
})
formatTable <- function(richness_merged, measure, category, verticalCategory, horizontalCategory){
condVertical <- identical(verticalCategory, "")
condHorizontal <- identical(horizontalCategory, "")
if(!condVertical & !condHorizontal){
colnames(richness_merged)<-c("Sample Name", category, verticalCategory, horizontalCategory, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}else if(!condVertical & condHorizontal){
colnames(richness_merged)<-c("Sample Name", category, verticalCategory, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}else if(condVertical & !condHorizontal){
colnames(richness_merged)<-c("Sample Name", category, horizontalCategory, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}else{
colnames(richness_merged)<-c("Sample Name", category, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}
}
# category_button <- eventReactive(input$doneButton, {
# input$category
# })
runStatisticalTests <- function(category, measures, gg_data){
html_formatted<-"<ul class=\"shell-body\"> %s</ul>"
if(length(category)==1){
levels_df <- levels(factor(gg_data[[category]]))
if(length(levels_df)==2){
html_formatted<-sprintf(html_formatted, "<li>Wilcoxon rank sum test:%s</li>")
}else{
html_formatted<-sprintf(html_formatted, "<li>Kruskal-Wallis rank sum test:%s</li>")
}
text <- ""
# for(i in 1:length(measures)){
# df<-subset(gg_data, variable==measures[i])
df_to_run <- gg_data[,c(category,measures),with=F]
if(length(levels_df)==2){
suppressWarnings(
result<-wilcox.test(df_to_run[[2]] ~ df_to_run[[1]])
)
text<-paste0(text, sprintf("<br>[%s]: W = %f, p-value = %.8f", measures, result$statistic, result$p.value))
}else{
suppressWarnings(
result<-kruskal.test(df_to_run[[1]] ~ df_to_run[[2]])
)
text<-paste0(text, sprintf("<br>[%s]: chi-squared = %f, df = %f, p-value = %.8f", measures, result$statistic, result$parameter, result$p.value))
}
# }
html_formatted<-HTML(sprintf(html_formatted, text))
}else{
html_formatted <- NULL
}
html_formatted
}
hovers <- function(){}
output$uiHoverAllSamples <- renderUI({
hover <- input$plot_hover
isolate(typeRadio<-input$plotTypeRadio)
isolate(measure<-input$measure)
if (is.null(hover$x) || round(hover$x) <0 || round(hover$y)<0 || is.null(hover$y))
return(NULL)
tooltip<-NULL
if(identical(typeRadio, "dotplot")){
measures_with_se <- c("Chao1","ACE")
if(measure %in% measures_with_se){
columns_to_show<-c("SampleName", "variable", "value", "se")
renamed_columns <- c("Sample", "Measure", "Alpha Div.", "Std. Err.")
tooltip<-generic_point(hover, ggplot_build_object, ggplot_object$data, WIDTH,
-68, 28, 28, columns_to_show, renamed_columns)
}else{
columns_to_show<-c("SampleName", "variable", "value")
renamed_columns <- c("Sample", "Measure", "Alpha Div.")
tooltip<-generic_point(hover, ggplot_build_object, ggplot_object$data, WIDTH,
-55, 28, 28, columns_to_show, renamed_columns)
}
}else{
tooltip<-generic_boxplot(hover, ggplot_build_object, WIDTH, 0, 20,20)
}
tooltip
})
output$uiHoverByMetadata <- renderUI({
hover <- input$hoverByMetadata
return(NULL)
isolate(typeRadio<-input$plotTypeRadio)
isolate(measure<-input$measure)
if (is.null(hover$x) || round(hover$x) <0 || round(hover$y)<0 || is.null(hover$y))
return(NULL)
# hover$panelvar1<-ifelse(is.null(hover$panelvar1), "NA", hover$panelvar1)
isolate(category<-input$category)
if(identical(typeRadio, "dotplot")){
all_columns<-colnames(ggplot_object_mt$data)
measures_with_se <- c("Chao1","ACE")
names(measures_with_se)<-c("se.chao1","se.ACE")
if(hover$panelvar1 %in% measures_with_se){
have_measures_se<-measures_with_se %in% measure
columns_to_show<-all_columns
renamed_columns <- c("Sample", "Measure", category, "Alpha Div.", "Std. Err.")
if(length(category)==1){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-80, 28, 28, columns_to_show, renamed_columns)
}else if(length(category)==2){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-94, 28, 28, columns_to_show, renamed_columns)
}
else{
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-108, 28, 28, columns_to_show, renamed_columns)
}
}else{
columns_to_show<-all_columns[1:(length(all_columns)-1)]
renamed_columns <- c("Sample", category, "Measure", "Alpha Div.")
if(length(category)==1){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-66, 28, 28, columns_to_show, renamed_columns)
}else if(length(category)==2){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-80, 28, 28, columns_to_show, renamed_columns)
}else{
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-94, 28, 28, columns_to_show, renamed_columns)
}
}
}else{
# tooltip<-generic_boxplot(hover, ggplot_build_object_mt, WIDTH, 0, 20, 20)
tooltip<-NULL
# return(tooltip)
}
return(tooltip)
})
# downloads
downloadButtons <- function(){}
output$btnDownloadPNG <- downloadHandler(
filename = "plot.png",
content = function(file) {
isolate(selected_tab<-input$tabs)
png(file, width=1200,height=800,units="px")
if(identical(selected_tab, "firstTab")){
print(ggplot_object)
}else{
print(ggplot_object_mt)
}
dev.off()
}
)
output$btnDownloadEPS <- downloadHandler(
filename = "plot.eps",
content = function(file) {
isolate(selected_tab<-input$tabs)
setEPS()
postscript(file, width=16,height=10.67, family = "Helvetica")
if(identical(selected_tab, "firstTab")){
print(ggplot_object)
}else{
print(ggplot_object_mt)
}
dev.off()
}
)
output$btnDownloadSVG <- downloadHandler(
filename = "plot.svg",
content = function(file) {
isolate(selected_tab<-input$tabs)
svg(file, width=16,height=10.67)
if(identical(selected_tab, "firstTab")){
print(ggplot_object)
}else{
print(ggplot_object_mt)
}
dev.off()
}
)
output$btnDownloadCSV <- downloadHandler(
filename = "data.csv",
content = function(file) {
isolate(selected_tab<-input$tabs)
if(identical(selected_tab, "firstTab")){
write.csv(ggplot_object$data, file)
}else{
write.csv(ggplot_object_mt$data, file)
}
}
)
shinyjs::hide(id = "loading-content", anim = TRUE, animType = "fade")
shinyjs::show("app-content")
}) # end shinyServer | /alpha_diversity/server.R | no_license | francislon/microbiomedb | R | false | false | 22,709 | r | library(shiny)
library(ggplot2)
library(phyloseq)
library(data.table)
source("../common/mbiome/mbiome-reader.R")
source("../common/ggplot_ext/eupath_default.R")
source("../common/tooltip/tooltip.R")
source("../common/config.R")
shinyServer(function(input, output, session) {
mstudy_obj <- NULL
# this should be a static variable for all R sessions
NO_METADATA_SELECTED <- "Choose the sample details"
WIDTH <- global_width
# Declaring some global variables
# df_abundance, df_sample and df_sample.formatted are declared global to avoid
# multiple file reading in the reactive section
df_abundance <- NULL
df_sample <- NULL
df_sample.formatted <- NULL
richness_object <- NULL
all_measures <- c("Chao1", "ACE", "Shannon", "Simpson", "Fisher")
phyloseq_obj <- NULL
# global objects to read in more than one function
columns <- NULL
hash_sample_names<- NULL
hash_count_samples <- NULL
ggplot_object<-NULL
ggplot_data <- NULL
ggplot_build_object <- NULL
ggplot_object_mt<-NULL
ggplot_data_mt <- NULL
ggplot_build_object_mt <- NULL
abundance_otu <- NULL
abundance_taxa <- NULL
MAX_SAMPLES_NO_RESIZE <- 40
MIN_HEIGHT_AFTER_RESIZE <- 9.5
maximum_samples_without_resizing <- 40
minimum_height_after_resizing <- 9
load_microbiome_data <- reactive({
if(is.null(mstudy_obj)){
# abundance_file <- "MicrobiomeSampleByMetadata_TaxaRelativeAbundance.txt"
# sample_file <- "MicrobiomeSampleByMetadata_Characteristics.txt"
#
# mstudy_obj <<- import.eupath(
# taxa_abundance_path = abundance_file,
# sample_path = sample_file,
# aggregate_by = "Species",
# use_relative_abundance = F
# )
mstudy_obj <<- import.biom(biom_file, metadata_details, use_relative_abundance=F)
updateSelectizeInput(session, "category",
choices = c(
mstudy_obj$get_filtered_categories()),
options = list(placeholder = NO_METADATA_SELECTED),
server = TRUE)
updateSelectizeInput(session, "categoryFacet1",
choices = c(
mstudy_obj$get_filtered_categories()),
options = list(placeholder = "First choose the x-axis"),
server = TRUE)
updateSelectizeInput(session, "categoryFacet2",
choices = c(
mstudy_obj$get_filtered_categories()),
options = list(placeholder = "First choose the x-axis"),
server = TRUE)
phyloseq_obj <- mbiome2phyloseq(mstudy_obj, "Species")
richness_object <<- estimate_richness(phyloseq_obj, measures = all_measures)
richness_object$SampleName <<- gsub("\\.", "\\-", rownames(richness_object))
richness_object$SampleName <<- gsub("^X", "", rownames(richness_object))
}
mstudy_obj
})
allSamples <- function(){}
output$allSamplesChart <- renderUI({
shinyjs::hide("allSamplesArea")
shinyjs::show("chartLoading")
mstudy <- load_microbiome_data()
measure<-input$measure
plotRadio <- input$plotTypeRadio
quantity_samples <- mstudy$get_sample_count()
result_to_show<-NULL
if(identical(measure,"") | is.na(measure) | !(measure %in% all_measures) ){
output$allSamplesDt <- renderDataTable(NULL)
result_to_show<-h5(class="alert alert-danger", "Please choose at least one alpha diversity measure.")
}else{
if(identical(measure, "Chao1")){
se <- "se.chao1"
rich <- richness_object[,c("SampleName", measure,se)]
}else if(identical(measure, "ACE")){
se <- "se.ACE"
rich <- richness_object[,c("SampleName", measure,se)]
}else{
rich <- richness_object[,c("SampleName", measure)]
se = NULL
}
rich$SampleName<-factor(rich$SampleName, levels=rich$SampleName)
data_melted<-melt(rich, id.vars = c("SampleName"), measure.vars=measure)
if(!is.null(se)){
se_melted <-melt(rich, id.vars = c("SampleName"), measure.vars=se)
se_melted[,"variable"]<-measure
colnames(se_melted)<-c("SampleName", "variable", "se")
data_melted<-merge(data_melted,se_melted,by=c("SampleName", "variable"), all.x=T)
}else{
data_melted$se<-0 # see if this is necessary
}
if(identical(plotRadio, "dotplot")){
chart <- ggplot(data_melted, aes_string(x="value", y="SampleName"))+
geom_point(shape = 21, alpha=1, colour = "grey", fill = "black", size = 3, stroke = 1.5)+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.y = element_text(size=rel(0.9))
)+
labs(x="Alpha Diversity", y="Samples")
if(!is.null(se)){
chart<-chart+geom_errorbarh(aes(xmax=value + se, xmin=value - se), height = .1)
}
} # end if is dotplot
else{
chart<-ggplot(data_melted, aes(variable, value))+geom_boxplot()+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
)+
labs(x="All Samples", y="Alpha Diversity")
}
ggplot_object <<- chart
ggplot_build_object <<- ggplot_build(chart)
output$allSamplesWrapper<-renderPlot({
ggplot_object
})
if(is.null(se)){
colnames(rich)<-c("Sample Name", measure)
}else{
colnames(rich)<-c("Sample Name", measure, "Std. Error")
}
output$allSamplesDt = renderDataTable(
rich,
options = list(
order = list(list(0, 'asc'))
)
)
if(quantity_samples <= MAX_SAMPLES_NO_RESIZE | identical(plotRadio, "boxplot")){
result_to_show<-plotOutput("allSamplesWrapper",
hover = hoverOpts("plot_hover", delay = 60, delayType = "throttle"),
width = paste0(WIDTH,"px"),
height = "500px"
)
}else{
h <- quantity_samples*MIN_HEIGHT_AFTER_RESIZE
if(h>2500){
h<-2500
}
result_to_show<-plotOutput("allSamplesWrapper",
hover = hoverOpts("plot_hover", delay = 60, delayType = "throttle"),
width = paste0(WIDTH,"px"),
# width = "100%",
height = h
)
}
}
shinyjs::hide("chartLoading", anim = TRUE, animType = "slide")
shinyjs::show("allSamplesArea")
return(result_to_show)
})
byMetadata <- function(){}
output$byMetadataChart <- renderUI({
mstudy <- load_microbiome_data()
result_to_show<-NULL
# reactive values
measure<-input$measure
plotRadio <- input$plotTypeRadio
# category <- category_button()
category <- input$category
verticalCategory <- input$categoryFacet1
horizontalCategory <- input$categoryFacet2
if(identical(measure,"") | is.na(measure) | !(measure %in% all_measures) ){
output$byMetadataDt <- renderDataTable(NULL)
output$result_tests <- renderUI(NULL)
result_to_show<-h5(class="alert alert-warning", "Please choose at least one alpha diversity measure.")
}else if(is.null(category) | identical(category, "")){
output$byMetadataDt <- renderDataTable(NULL)
output$result_tests <- renderUI(NULL)
result_to_show<-h5(class="alert alert-warning", "Please choose the sample detail for the X-Axis.")
}else if(identical(category, verticalCategory) | identical(category, horizontalCategory)){
output$byMetadataDt <- renderDataTable(NULL)
output$result_tests <- renderUI(NULL)
result_to_show<-h5(class="alert alert-warning", "Please choose different sample details.")
}
else{
shinyjs::hide("metadataContent")
shinyjs::show("metadataLoading")
quantity_samples <- mstudy$get_sample_count()
output$byMetadataDt <- renderDataTable(NULL)
condVertical <- identical(verticalCategory, "")
condHorizontal <- identical(horizontalCategory, "")
if(!condVertical & !condHorizontal){
all_columns<-c(category, verticalCategory, horizontalCategory)
}else if(!condVertical & condHorizontal){
all_columns<-c(category, verticalCategory)
}else if(condVertical & !condHorizontal){
all_columns<-c(category, horizontalCategory)
}else{
all_columns<-c(category)
}
# print(all_columns)
# print(mstudy$get_metadata_as_column("host diet"))
dt_metadata<-mstudy$get_metadata_as_column(all_columns)
if(identical(measure,"Chao1")){
rich <- richness_object[,c("SampleName", measure,"se.chao1")]
}else if(identical(measure,"ACE")){
rich <- richness_object[,c("SampleName", measure,"se.ACE")]
}else{
rich <- richness_object[,c("SampleName", measure)]
}
richness_merged <- merge(dt_metadata, rich, by = "SampleName")
# richness_merged<-na.omit(richness_merged)
# data_melted<-melt(richness_merged, id.vars = c("SampleName", category), measure.vars=measure)
# print("richness_merged")
if(identical(class(richness_merged[[category]]),"numeric")){
if(!condVertical){
chart<-ggplot(richness_merged,
aes_string(x=sprintf("`%s`", category), y=measure, color=sprintf("`%s`", verticalCategory)))+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_text(size=rel(0.9),face="bold"),
axis.text.y = element_text(size=rel(0.8),face="bold")
)+
labs(x=paste(category), y="Alpha Diversity")
}else{
chart<-ggplot(richness_merged,
aes_string(x=sprintf("`%s`", category), y=measure))+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_text(size=rel(0.9),face="bold"),
axis.text.y = element_text(size=rel(0.8),face="bold")
)+
labs(x=paste(category), y="Alpha Diversity")
}
# scale_x_discrete(labels = function(x) lapply(strwrap(x, width = 10, simplify = FALSE), paste, collapse="\n"))+
if(identical(plotRadio, "dotplot")){
chart<-chart+
geom_point(shape = 20, alpha=0.7, size = 2)+
geom_smooth(method = "loess", span = 0.7)
} # end if is dotplot
else{
chart<-chart+
geom_boxplot()
}
if(!condHorizontal){
joined_categories <- sprintf(" `%s` ~ .", horizontalCategory)
chart <- chart+facet_grid(as.formula(joined_categories))
# output$result_tests <- renderUI(NULL)
}
if(!condHorizontal | !condVertical){
output$result_tests <- renderUI(NULL)
}else{
output$result_tests <- renderUI(runStatisticalTests(category, measure, chart$data))
}
output$byMetadataChartWrapper<-renderPlot({
chart
})
}else{
chart<-ggplot(richness_merged, aes_string(sprintf("`%s`",category), measure))+
theme_eupath_default(
panel.border = element_rect(colour="black", size=1, fill=NA),
axis.text.x = element_text(size=rel(0.9),face="bold"),
axis.text.y = element_text(size=rel(0.8),face="bold")
)+
labs(x=paste(category), y="Alpha Diversity")
# scale_x_discrete(labels = function(x) lapply(strwrap(x, width = 10, simplify = FALSE), paste, collapse="\n"))+
if(identical(plotRadio, "dotplot")){
chart<-chart+
geom_point(shape = 21, alpha=1, colour = "grey", fill = "black", size = 3, stroke = 1.5)
if(!identical(class(richness_merged[[category]]),"numeric")){
chart<-chart+
geom_smooth(method = "loess", span = 0.7)
}
# geom_errorbar(aes(ymax=value + se, ymin=value - se), height = .1) # error
} # end if is dotplot
else{
chart<-chart+
geom_boxplot()
}
if(!condHorizontal & condVertical){
joined_categories <- sprintf(" `%s` ~ .", horizontalCategory)
}else if(!condHorizontal & !condVertical){
joined_categories <- sprintf("`%s` ~`%s`", horizontalCategory, verticalCategory)
}else if(condHorizontal & !condVertical){
joined_categories <- sprintf("~ `%s`", verticalCategory)
}
if(!condHorizontal | !condVertical){
chart <- chart+facet_grid(as.formula(joined_categories))
output$result_tests <- renderUI(NULL)
}else{
output$result_tests <- renderUI(runStatisticalTests(category, measure, chart$data))
}
output$byMetadataChartWrapper<-renderPlot({
chart
})
}
formatTable(richness_merged, measure, category, verticalCategory, horizontalCategory)
ggplot_object_mt<<-chart
ggplot_build_object_mt<<-ggplot_build(chart)
if(quantity_samples <= maximum_samples_without_resizing | identical(plotRadio, "boxplot") |
identical("numeric",class(richness_merged[[category]]))){
result_to_show<-plotOutput("byMetadataChartWrapper",
hover = hoverOpts("hoverByMetadata", delay = 60, delayType = "throttle"),
# width = "100%", height = "500px"
width = paste0(WIDTH,"px"), height = "500px"
)
}else{
h <- quantity_samples*MIN_HEIGHT_AFTER_RESIZE
if(h>2500){
h<-2500
}
result_to_show<-plotOutput("byMetadataChartWrapper",
hover = hoverOpts("hoverByMetadata", delay = 60, delayType = "throttle"),
width = paste0(WIDTH,"px"),
height = h
)
}
shinyjs::hide("metadataLoading", anim = TRUE, animType = "fade")
shinyjs::show("metadataContent")
}
result_to_show
})
formatTable <- function(richness_merged, measure, category, verticalCategory, horizontalCategory){
condVertical <- identical(verticalCategory, "")
condHorizontal <- identical(horizontalCategory, "")
if(!condVertical & !condHorizontal){
colnames(richness_merged)<-c("Sample Name", category, verticalCategory, horizontalCategory, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}else if(!condVertical & condHorizontal){
colnames(richness_merged)<-c("Sample Name", category, verticalCategory, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}else if(condVertical & !condHorizontal){
colnames(richness_merged)<-c("Sample Name", category, horizontalCategory, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}else{
colnames(richness_merged)<-c("Sample Name", category, measure)
output$byMetadataDt = renderDataTable(
richness_merged,
options = list(
order = list(list(0, 'desc'))
)
)
}
}
# category_button <- eventReactive(input$doneButton, {
# input$category
# })
runStatisticalTests <- function(category, measures, gg_data){
html_formatted<-"<ul class=\"shell-body\"> %s</ul>"
if(length(category)==1){
levels_df <- levels(factor(gg_data[[category]]))
if(length(levels_df)==2){
html_formatted<-sprintf(html_formatted, "<li>Wilcoxon rank sum test:%s</li>")
}else{
html_formatted<-sprintf(html_formatted, "<li>Kruskal-Wallis rank sum test:%s</li>")
}
text <- ""
# for(i in 1:length(measures)){
# df<-subset(gg_data, variable==measures[i])
df_to_run <- gg_data[,c(category,measures),with=F]
if(length(levels_df)==2){
suppressWarnings(
result<-wilcox.test(df_to_run[[2]] ~ df_to_run[[1]])
)
text<-paste0(text, sprintf("<br>[%s]: W = %f, p-value = %.8f", measures, result$statistic, result$p.value))
}else{
suppressWarnings(
result<-kruskal.test(df_to_run[[1]] ~ df_to_run[[2]])
)
text<-paste0(text, sprintf("<br>[%s]: chi-squared = %f, df = %f, p-value = %.8f", measures, result$statistic, result$parameter, result$p.value))
}
# }
html_formatted<-HTML(sprintf(html_formatted, text))
}else{
html_formatted <- NULL
}
html_formatted
}
hovers <- function(){}
output$uiHoverAllSamples <- renderUI({
hover <- input$plot_hover
isolate(typeRadio<-input$plotTypeRadio)
isolate(measure<-input$measure)
if (is.null(hover$x) || round(hover$x) <0 || round(hover$y)<0 || is.null(hover$y))
return(NULL)
tooltip<-NULL
if(identical(typeRadio, "dotplot")){
measures_with_se <- c("Chao1","ACE")
if(measure %in% measures_with_se){
columns_to_show<-c("SampleName", "variable", "value", "se")
renamed_columns <- c("Sample", "Measure", "Alpha Div.", "Std. Err.")
tooltip<-generic_point(hover, ggplot_build_object, ggplot_object$data, WIDTH,
-68, 28, 28, columns_to_show, renamed_columns)
}else{
columns_to_show<-c("SampleName", "variable", "value")
renamed_columns <- c("Sample", "Measure", "Alpha Div.")
tooltip<-generic_point(hover, ggplot_build_object, ggplot_object$data, WIDTH,
-55, 28, 28, columns_to_show, renamed_columns)
}
}else{
tooltip<-generic_boxplot(hover, ggplot_build_object, WIDTH, 0, 20,20)
}
tooltip
})
output$uiHoverByMetadata <- renderUI({
hover <- input$hoverByMetadata
return(NULL)
isolate(typeRadio<-input$plotTypeRadio)
isolate(measure<-input$measure)
if (is.null(hover$x) || round(hover$x) <0 || round(hover$y)<0 || is.null(hover$y))
return(NULL)
# hover$panelvar1<-ifelse(is.null(hover$panelvar1), "NA", hover$panelvar1)
isolate(category<-input$category)
if(identical(typeRadio, "dotplot")){
all_columns<-colnames(ggplot_object_mt$data)
measures_with_se <- c("Chao1","ACE")
names(measures_with_se)<-c("se.chao1","se.ACE")
if(hover$panelvar1 %in% measures_with_se){
have_measures_se<-measures_with_se %in% measure
columns_to_show<-all_columns
renamed_columns <- c("Sample", "Measure", category, "Alpha Div.", "Std. Err.")
if(length(category)==1){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-80, 28, 28, columns_to_show, renamed_columns)
}else if(length(category)==2){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-94, 28, 28, columns_to_show, renamed_columns)
}
else{
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-108, 28, 28, columns_to_show, renamed_columns)
}
}else{
columns_to_show<-all_columns[1:(length(all_columns)-1)]
renamed_columns <- c("Sample", category, "Measure", "Alpha Div.")
if(length(category)==1){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-66, 28, 28, columns_to_show, renamed_columns)
}else if(length(category)==2){
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-80, 28, 28, columns_to_show, renamed_columns)
}else{
tooltip<-generic_point(hover, ggplot_build_object_mt, ggplot_object_mt$data, WIDTH,
-94, 28, 28, columns_to_show, renamed_columns)
}
}
}else{
# tooltip<-generic_boxplot(hover, ggplot_build_object_mt, WIDTH, 0, 20, 20)
tooltip<-NULL
# return(tooltip)
}
return(tooltip)
})
# downloads
downloadButtons <- function(){}
output$btnDownloadPNG <- downloadHandler(
filename = "plot.png",
content = function(file) {
isolate(selected_tab<-input$tabs)
png(file, width=1200,height=800,units="px")
if(identical(selected_tab, "firstTab")){
print(ggplot_object)
}else{
print(ggplot_object_mt)
}
dev.off()
}
)
output$btnDownloadEPS <- downloadHandler(
filename = "plot.eps",
content = function(file) {
isolate(selected_tab<-input$tabs)
setEPS()
postscript(file, width=16,height=10.67, family = "Helvetica")
if(identical(selected_tab, "firstTab")){
print(ggplot_object)
}else{
print(ggplot_object_mt)
}
dev.off()
}
)
output$btnDownloadSVG <- downloadHandler(
filename = "plot.svg",
content = function(file) {
isolate(selected_tab<-input$tabs)
svg(file, width=16,height=10.67)
if(identical(selected_tab, "firstTab")){
print(ggplot_object)
}else{
print(ggplot_object_mt)
}
dev.off()
}
)
output$btnDownloadCSV <- downloadHandler(
filename = "data.csv",
content = function(file) {
isolate(selected_tab<-input$tabs)
if(identical(selected_tab, "firstTab")){
write.csv(ggplot_object$data, file)
}else{
write.csv(ggplot_object_mt$data, file)
}
}
)
shinyjs::hide(id = "loading-content", anim = TRUE, animType = "fade")
shinyjs::show("app-content")
}) # end shinyServer |
## Read data. Previously, we must set the working directory where the file is stored.
datos <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
str(datos)
## Convert de Date column from factor to date
datos[, 1] <- as.Date(datos[, 1], format = "%d/%m/%Y")
str(datos)
## Subsetting data
datos2 <- datos[datos$Date == "2007-2-1" | datos$Date == "2007-2-2", ]
rm(datos)
library(datasets)
## Make a vector with the join of Date and Time
concatenar <- paste(datos2$Date, datos2$Time, sep = " ")
totime = strptime(concat,"%Y-%m-%d %H:%M:%S")
## We don't use dev.copy because the legend wasn't exported correctly
png(filename = "plot4.png")
par(mfrow = c(2, 2))
## plotting 1
plot(totime, datos2$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
## plotting 2
plot(totime, datos2$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
## plotting 3
plot(totime, datos2$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
lines(totime, datos2$Sub_metering_2, col = "red")
lines(totime, datos2$Sub_metering_3, col = "blue")
legend("topright", lty = c(1, 1, 1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## plotting 4
plot(totime, datos2$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off() | /myRFiles/plot4.R | no_license | kasares/ExData_Plotting1 | R | false | false | 1,390 | r | ## Read data. Previously, we must set the working directory where the file is stored.
datos <- read.table("household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
str(datos)
## Convert de Date column from factor to date
datos[, 1] <- as.Date(datos[, 1], format = "%d/%m/%Y")
str(datos)
## Subsetting data
datos2 <- datos[datos$Date == "2007-2-1" | datos$Date == "2007-2-2", ]
rm(datos)
library(datasets)
## Make a vector with the join of Date and Time
concatenar <- paste(datos2$Date, datos2$Time, sep = " ")
totime = strptime(concat,"%Y-%m-%d %H:%M:%S")
## We don't use dev.copy because the legend wasn't exported correctly
png(filename = "plot4.png")
par(mfrow = c(2, 2))
## plotting 1
plot(totime, datos2$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
## plotting 2
plot(totime, datos2$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
## plotting 3
plot(totime, datos2$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering", col = "black")
lines(totime, datos2$Sub_metering_2, col = "red")
lines(totime, datos2$Sub_metering_3, col = "blue")
legend("topright", lty = c(1, 1, 1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## plotting 4
plot(totime, datos2$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off() |
library("knitr")
library("rgl")
#knit("terbumeton.Rmd")
#markdownToHTML('terbumeton.md', 'terbumeton.html', options=c("use_xhml"))
#system("pandoc -s terbumeton.html -o terbumeton.pdf")
knit2html('terbumeton.Rmd')
| /FDA_Pesticide_Glossary/terbumeton.R | permissive | andrewdefries/andrewdefries.github.io | R | false | false | 216 | r | library("knitr")
library("rgl")
#knit("terbumeton.Rmd")
#markdownToHTML('terbumeton.md', 'terbumeton.html', options=c("use_xhml"))
#system("pandoc -s terbumeton.html -o terbumeton.pdf")
knit2html('terbumeton.Rmd')
|
\name{Familydata}
\alias{Familydata}
\docType{data}
\title{ Dataset of a hypothetical family}
\description{ Anthropometric and financial data of a hypothetical family}
\usage{data(Familydata)}
\format{
A data frame with 11 observations on the following 6 variables.
\describe{
\item{\code{code}}{a character vector}
\item{\code{age}}{a numeric vector}
\item{\code{ht}}{a numeric vector}
\item{\code{wt}}{a numeric vector}
\item{\code{money}}{a numeric vector}
\item{\code{sex}}{a factor with levels \code{F} \code{M}}
}
}
\examples{
data(Familydata)
.data <- Familydata
des(.data)
summ(.data)
age2 <- with(.data, age)^2
with(.data, plot(age, money, log="y"))
dots.of.age <- seq(0,80,0.01)
new.data.frame <- data.frame(age=dots.of.age, age2=dots.of.age^2)
lm1 <- lm(log(money) ~ age + age2, data=.data)
summary(lm1)$coefficients
dots.of.money <- predict.lm(lm1, new.data.frame)
lines(dots.of.age, exp(dots.of.money), col="blue")
}
\keyword{datasets}
| /man/Familydata.Rd | no_license | cran/epiDisplay | R | false | false | 1,011 | rd | \name{Familydata}
\alias{Familydata}
\docType{data}
\title{ Dataset of a hypothetical family}
\description{ Anthropometric and financial data of a hypothetical family}
\usage{data(Familydata)}
\format{
A data frame with 11 observations on the following 6 variables.
\describe{
\item{\code{code}}{a character vector}
\item{\code{age}}{a numeric vector}
\item{\code{ht}}{a numeric vector}
\item{\code{wt}}{a numeric vector}
\item{\code{money}}{a numeric vector}
\item{\code{sex}}{a factor with levels \code{F} \code{M}}
}
}
\examples{
data(Familydata)
.data <- Familydata
des(.data)
summ(.data)
age2 <- with(.data, age)^2
with(.data, plot(age, money, log="y"))
dots.of.age <- seq(0,80,0.01)
new.data.frame <- data.frame(age=dots.of.age, age2=dots.of.age^2)
lm1 <- lm(log(money) ~ age + age2, data=.data)
summary(lm1)$coefficients
dots.of.money <- predict.lm(lm1, new.data.frame)
lines(dots.of.age, exp(dots.of.money), col="blue")
}
\keyword{datasets}
|
context("Group sizes")
# Data for the first three test_that groups below
df <- data.frame(x = rep(1:3, each = 10), y = rep(1:6, each = 5))
tbls <- test_load(df)
test_that("ungrouped data has 1 group, with group size = nrow()", {
for (tbl in tbls) {
expect_equal(n_groups(tbl), 1L)
expect_equal(group_size(tbl), 30)
}
})
test_that("rowwise data has one group for each group", {
rw <- rowwise(df)
expect_equal(n_groups(rw), 30)
expect_equal(group_size(rw), rep(1, 30))
})
test_that("group_size correct for grouped data", {
for (tbl in tbls) {
grp <- group_by(tbl, x)
expect_equal(n_groups(grp), 3L)
expect_equal(group_size(grp), rep(10, 3))
}
})
# For following tests, add an extra level that's not present in data
df$x = factor(df$x, levels=1:4)
tbls <- test_load(df)
test_that("n_groups drops zero-length groups", {
for (tbl in tbls) {
grp <- group_by(tbl, x)
expect_equal(n_groups(grp), 3, info=class(tbl)[1])
}
})
test_that("summarise drops zero-length groups", {
for (tbl in tbls) {
res <- tbl %>%
group_by(x) %>%
summarise(n = n(), mn = mean(y)) %>%
collect
expect_equal(nrow(res), 3, info = class(tbl)[1])
expect_equal(tail(res$n, n = 1), 10, info = class(tbl)[1])
expect_false(is.nan(tail(res$mn, n = 1)), info = class(tbl)[1])
}
})
| /tests/testthat/test-group-size.R | no_license | ravinpoudel/dplyr | R | false | false | 1,331 | r | context("Group sizes")
# Data for the first three test_that groups below
df <- data.frame(x = rep(1:3, each = 10), y = rep(1:6, each = 5))
tbls <- test_load(df)
test_that("ungrouped data has 1 group, with group size = nrow()", {
for (tbl in tbls) {
expect_equal(n_groups(tbl), 1L)
expect_equal(group_size(tbl), 30)
}
})
test_that("rowwise data has one group for each group", {
rw <- rowwise(df)
expect_equal(n_groups(rw), 30)
expect_equal(group_size(rw), rep(1, 30))
})
test_that("group_size correct for grouped data", {
for (tbl in tbls) {
grp <- group_by(tbl, x)
expect_equal(n_groups(grp), 3L)
expect_equal(group_size(grp), rep(10, 3))
}
})
# For following tests, add an extra level that's not present in data
df$x = factor(df$x, levels=1:4)
tbls <- test_load(df)
test_that("n_groups drops zero-length groups", {
for (tbl in tbls) {
grp <- group_by(tbl, x)
expect_equal(n_groups(grp), 3, info=class(tbl)[1])
}
})
test_that("summarise drops zero-length groups", {
for (tbl in tbls) {
res <- tbl %>%
group_by(x) %>%
summarise(n = n(), mn = mean(y)) %>%
collect
expect_equal(nrow(res), 3, info = class(tbl)[1])
expect_equal(tail(res$n, n = 1), 10, info = class(tbl)[1])
expect_false(is.nan(tail(res$mn, n = 1)), info = class(tbl)[1])
}
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DoLandSCENT.R
\name{DoLandSCENT}
\alias{DoLandSCENT}
\title{Runs the LandSCENT algorithm}
\usage{
DoLandSCENT(
exp.m,
ppiA.m,
log_trans = FALSE,
mc.cores = 1,
pheno.v = NULL,
coordinates = NULL,
PLOT = TRUE,
PDF = TRUE
)
}
\arguments{
\item{exp.m}{Can be three major kinds of input:
One is a scRNA-Seq data matrix with rows labeling genes and columns
labeling single cells. And it can be either a log-transformed data
matrix with minimal value around 0.1 (recommended), or an
nonlog-transformed data matrix with minimal value 0.
The other two kinds of input can be either a "SingleCellExperiment"
class object or a "CellDataSet" class object}
\item{ppiA.m}{The adjacency matrix of a user-given PPI network with rownames and
colnames labeling genes (same gene identifier as in \code{exp.m})}
\item{log_trans}{A logical. Whether to do log-transformation on the input data
matrix or not. Default is FALSE}
\item{mc.cores}{The number of cores to use, i.e. at most how many child processes will
be run simultaneously. The option is initialized from environment variable
MC_CORES if set. Must be at least one (default), and parallelization
requires at least two cores.}
\item{pheno.v}{A phenotype vector for the single cells, of same length and order as the
columns of \code{exp.m}.
Function can also automatically extract phenotype information
from your original sce/cds data, please store the phenotype information
as name of \code{phenoInfo}.}
\item{coordinates}{Optional. The previous reduced dimension coordinates, with rows lalabeling cells
and two colums labeling reduced dimensions}
\item{PLOT}{A logical. Decides whether to generate (default) the landSR figure
or not.}
\item{PDF}{A logical. Output figure via pdf file or not, default is TRUE}
}
\value{
Integration.l
A list contains input information and SR values, potency states and more
other results.
A PDF file
If PDF is TRUE(default), then it will automatically generate a pdf file
ploting cell density against potency states.
}
\description{
Main user function implement LandSCENT. This function is the typical
workflow of the whole package for you to easily use.
}
\examples{
\dontrun{
### define a small network
ppiA.m <- matrix(0,nrow=10,ncol=10)
ppiA.m[1,] <- c(0,1,1,1,1)
for(r in 2:nrow(ppiA.m)){
ppiA.m[r,1] <- 1
}
rownames(ppiA.m) <- paste("G",1:10,sep="")
colnames(ppiA.m) <- paste("G",1:10,sep="")
### define a positively valued expression matrix (20 genes x 10 samples)
exp.m <- matrix(rpois(20*10,8),nrow=20,ncol=10)
colnames(exp.m) <- paste("S",1:10,sep="")
rownames(exp.m) <- paste("G",1:20,sep="")
DoLandSCENT.o <- DoLandSCENT(exp.m, ppiA.m, PLOT = FALSE, PDF = FALSE)
}
}
| /man/DoLandSCENT.Rd | no_license | ChenWeiyan/LandSCENT | R | false | true | 2,766 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DoLandSCENT.R
\name{DoLandSCENT}
\alias{DoLandSCENT}
\title{Runs the LandSCENT algorithm}
\usage{
DoLandSCENT(
exp.m,
ppiA.m,
log_trans = FALSE,
mc.cores = 1,
pheno.v = NULL,
coordinates = NULL,
PLOT = TRUE,
PDF = TRUE
)
}
\arguments{
\item{exp.m}{Can be three major kinds of input:
One is a scRNA-Seq data matrix with rows labeling genes and columns
labeling single cells. And it can be either a log-transformed data
matrix with minimal value around 0.1 (recommended), or an
nonlog-transformed data matrix with minimal value 0.
The other two kinds of input can be either a "SingleCellExperiment"
class object or a "CellDataSet" class object}
\item{ppiA.m}{The adjacency matrix of a user-given PPI network with rownames and
colnames labeling genes (same gene identifier as in \code{exp.m})}
\item{log_trans}{A logical. Whether to do log-transformation on the input data
matrix or not. Default is FALSE}
\item{mc.cores}{The number of cores to use, i.e. at most how many child processes will
be run simultaneously. The option is initialized from environment variable
MC_CORES if set. Must be at least one (default), and parallelization
requires at least two cores.}
\item{pheno.v}{A phenotype vector for the single cells, of same length and order as the
columns of \code{exp.m}.
Function can also automatically extract phenotype information
from your original sce/cds data, please store the phenotype information
as name of \code{phenoInfo}.}
\item{coordinates}{Optional. The previous reduced dimension coordinates, with rows lalabeling cells
and two colums labeling reduced dimensions}
\item{PLOT}{A logical. Decides whether to generate (default) the landSR figure
or not.}
\item{PDF}{A logical. Output figure via pdf file or not, default is TRUE}
}
\value{
Integration.l
A list contains input information and SR values, potency states and more
other results.
A PDF file
If PDF is TRUE(default), then it will automatically generate a pdf file
ploting cell density against potency states.
}
\description{
Main user function implement LandSCENT. This function is the typical
workflow of the whole package for you to easily use.
}
\examples{
\dontrun{
### define a small network
ppiA.m <- matrix(0,nrow=10,ncol=10)
ppiA.m[1,] <- c(0,1,1,1,1)
for(r in 2:nrow(ppiA.m)){
ppiA.m[r,1] <- 1
}
rownames(ppiA.m) <- paste("G",1:10,sep="")
colnames(ppiA.m) <- paste("G",1:10,sep="")
### define a positively valued expression matrix (20 genes x 10 samples)
exp.m <- matrix(rpois(20*10,8),nrow=20,ncol=10)
colnames(exp.m) <- paste("S",1:10,sep="")
rownames(exp.m) <- paste("G",1:20,sep="")
DoLandSCENT.o <- DoLandSCENT(exp.m, ppiA.m, PLOT = FALSE, PDF = FALSE)
}
}
|
library(dplyr)
mov<-read.csv("C:/Users/Simran/Desktop/movie_analysis/TidyMovie.csv")
summary(mov)
table(mov$IMDb)
table(mov$Netflix)
table(mov$IMDb, mov$Netflix)
summary(table(mov$Rotten.Tomatoes, mov$Prime.Video))
summary(mov)
table(mov$IMDb)
table(mov$Year)
table(mov$IMDb,mov$Year)
summary(table(mov$IMDb,mov$Year))
#p-value<0.05 : fails to provide any evidence
# Quantile for number of movies with 5% probability
quantile(mov$IMDb,0.05)
# Quantile for number of movies with an interval of 25% in the probabilities
quantile(mov$IMDb)
# Using t.test for checking if mean can be 7
t.test(mov$IMDb, mu=7.0)
#As p-value<0.05, it's unlikely that true mean is 7
#Null hypothesis is rejected
# Using t.test checking if mean is 7 with a confidence level of 99%
t.test(mov$IMDb,conf.level = 0.99, mu=7.0)
#p-value is very small so it is very unlikely that mean is 7
#Null hypothesis is rejected**
#Calculating confidence interval for median of IMDb using Wilcox test
wilcox.test(mov$IMDb,conf.int = TRUE)
# Checking if correlation between IMDb and Runtime is significant.
cor.test(mov$IMDb,mov$Runtime)
#p-value<0.05 so a significant correlation exist
| /StatisticalTests.R | no_license | Simran5830/movie_analysis | R | false | false | 1,224 | r | library(dplyr)
mov<-read.csv("C:/Users/Simran/Desktop/movie_analysis/TidyMovie.csv")
summary(mov)
table(mov$IMDb)
table(mov$Netflix)
table(mov$IMDb, mov$Netflix)
summary(table(mov$Rotten.Tomatoes, mov$Prime.Video))
summary(mov)
table(mov$IMDb)
table(mov$Year)
table(mov$IMDb,mov$Year)
summary(table(mov$IMDb,mov$Year))
#p-value<0.05 : fails to provide any evidence
# Quantile for number of movies with 5% probability
quantile(mov$IMDb,0.05)
# Quantile for number of movies with an interval of 25% in the probabilities
quantile(mov$IMDb)
# Using t.test for checking if mean can be 7
t.test(mov$IMDb, mu=7.0)
#As p-value<0.05, it's unlikely that true mean is 7
#Null hypothesis is rejected
# Using t.test checking if mean is 7 with a confidence level of 99%
t.test(mov$IMDb,conf.level = 0.99, mu=7.0)
#p-value is very small so it is very unlikely that mean is 7
#Null hypothesis is rejected**
#Calculating confidence interval for median of IMDb using Wilcox test
wilcox.test(mov$IMDb,conf.int = TRUE)
# Checking if correlation between IMDb and Runtime is significant.
cor.test(mov$IMDb,mov$Runtime)
#p-value<0.05 so a significant correlation exist
|
ones <- function(a){
return(switch(a,"one ","two ","three ","four ","five ","six ","seven ","eight ","nine "))
}
teens <- function(a){
return(switch(a,"eleven ","twelve ","thirteen ","fourteen ","fifteen ", "sixteen ","seventeen ","eighteen ","nineteen "))
}
tens <- function(a){
return(switch(a,"ten ", "twenty ", "thirty ", "forty ", "fifty ", "sixty ", "seventy ","eighty ","ninety "))
}
a <-as.numeric(readline("Insert number 1-9999: "))
if(a>=1000){
word = sprintf("%s thousand ", ones(a/1000))
print(word)
a = a%%1000;
#print(sprintf("The result is now: %d", a))
}
if(a>=100){
word = sprintf("%s hundred and ", ones(a/100))
print(word)
a = a%%100;
#print(sprintf("The result is now: %d", a))
}
if(a>10 && a<20){
word = (teens(a%%10))
print(word)
#a = a%%10;
#print(sprintf("The result is now: %d", a))
} else{
x = trunc(a/10)
print(tens(x))
a = a%%10;
print((ones(a)))
} | /1-9999.R | no_license | ElinorThorne/QA_Work | R | false | false | 962 | r | ones <- function(a){
return(switch(a,"one ","two ","three ","four ","five ","six ","seven ","eight ","nine "))
}
teens <- function(a){
return(switch(a,"eleven ","twelve ","thirteen ","fourteen ","fifteen ", "sixteen ","seventeen ","eighteen ","nineteen "))
}
tens <- function(a){
return(switch(a,"ten ", "twenty ", "thirty ", "forty ", "fifty ", "sixty ", "seventy ","eighty ","ninety "))
}
a <-as.numeric(readline("Insert number 1-9999: "))
if(a>=1000){
word = sprintf("%s thousand ", ones(a/1000))
print(word)
a = a%%1000;
#print(sprintf("The result is now: %d", a))
}
if(a>=100){
word = sprintf("%s hundred and ", ones(a/100))
print(word)
a = a%%100;
#print(sprintf("The result is now: %d", a))
}
if(a>10 && a<20){
word = (teens(a%%10))
print(word)
#a = a%%10;
#print(sprintf("The result is now: %d", a))
} else{
x = trunc(a/10)
print(tens(x))
a = a%%10;
print((ones(a)))
} |
#' @export
as_tibble.googlesheets4_schema_SheetProperties <- function(x, ...) {
tibble::tibble(
# TODO: open question whether I should explicitly unescape title here
name = glean_chr(x, "title"),
index = glean_int(x, "index"),
id = glean_int(x, "sheetId"),
type = glean_chr(x, "sheetType"),
visible = !glean_lgl(x, "hidden", .default = FALSE),
grid_rows = glean_int(x, c("gridProperties", "rowCount")),
grid_columns = glean_int(x, c("gridProperties", "columnCount"))
)
}
| /R/schema_SheetProperties.R | permissive | MarkEdmondson1234/googlesheets4 | R | false | false | 554 | r | #' @export
as_tibble.googlesheets4_schema_SheetProperties <- function(x, ...) {
tibble::tibble(
# TODO: open question whether I should explicitly unescape title here
name = glean_chr(x, "title"),
index = glean_int(x, "index"),
id = glean_int(x, "sheetId"),
type = glean_chr(x, "sheetType"),
visible = !glean_lgl(x, "hidden", .default = FALSE),
grid_rows = glean_int(x, c("gridProperties", "rowCount")),
grid_columns = glean_int(x, c("gridProperties", "columnCount"))
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rhymer.R
\name{get_content}
\alias{get_content}
\title{Extract content from Datamuse API call.}
\usage{
get_content(full_path, return_type = "df", limit = 10)
}
\arguments{
\item{full_path}{API path to append to Datamuse API endpoint.}
\item{return_type}{type to return. Options are:
\itemize{
\item 'df' for data.frame.
\item 'word' for a single word.
\item 'random_word' or 'random word' or 'rand' for a random word.
\item 'vector' for a vector of words.
}}
\item{limit}{max number of rows to return from the content dataframe.}
}
\value{
content returned from API call.
}
\description{
Extract content from Datamuse API call.
}
\examples{
get_content("/words?rel_rhy=test", limit = 5)
get_content("/words?ml=test", limit = 20)
}
| /man/get_content.Rd | no_license | landesbergn/rhymer | R | false | true | 813 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rhymer.R
\name{get_content}
\alias{get_content}
\title{Extract content from Datamuse API call.}
\usage{
get_content(full_path, return_type = "df", limit = 10)
}
\arguments{
\item{full_path}{API path to append to Datamuse API endpoint.}
\item{return_type}{type to return. Options are:
\itemize{
\item 'df' for data.frame.
\item 'word' for a single word.
\item 'random_word' or 'random word' or 'rand' for a random word.
\item 'vector' for a vector of words.
}}
\item{limit}{max number of rows to return from the content dataframe.}
}
\value{
content returned from API call.
}
\description{
Extract content from Datamuse API call.
}
\examples{
get_content("/words?rel_rhy=test", limit = 5)
get_content("/words?ml=test", limit = 20)
}
|
#1b)
# i) Simulating 1 block lifetime of Xa and Xb.
Xa = rexp(n=1, rate=1/10) # Simulating 1 draw of Xa.
Xb = rexp(n=1, rate=1/10) # Simulation 1 draw of Xb.
x = max(Xa, Xb) # Taking the maximum of Xa and Xb.
# ii) Repeating step i) 10,000 times using the replicate function.
size10k = replicate(10000, max(rexp(n=1, rate=1/10), rexp(n=1, rate=1/10)))
# iii) Using Hist to generate a histogram and then superimposing
# the density function on the histogram using curve.
hist(size10k, prob = TRUE)
pdf = function(x){
return (0.2*exp(-0.1*x)-0.2*exp(-0.2*x))
}
curve(pdf(x), add = TRUE)
# iv) Used mean to calculate the average of the 10,000 repetitions.
mean(size10k)
1-pexp(15, rate = 1/ mean(size10k)) # Using 1 - the distribution function to
# calculate the probability.
# 1c) Performed the step vi) using sample size of 1,000, and 100,000.
size1k = replicate(1000, max(rexp(n=1, rate=1/10), rexp(n=1, rate=1/10)))
mean(size1k)
1-pexp(15, rate = 1/(mean(size1k)))
size100k = replicate(100000, max(rexp(n=1, rate=1/10), rexp(n=1, rate=1/10)))
mean(size100k)
1-pexp(15, rate = 1/(mean(size100k))) | /Project 1/Question 1.R | no_license | vishnubapana/Data-Science | R | false | false | 1,145 | r | #1b)
# i) Simulating 1 block lifetime of Xa and Xb.
Xa = rexp(n=1, rate=1/10) # Simulating 1 draw of Xa.
Xb = rexp(n=1, rate=1/10) # Simulation 1 draw of Xb.
x = max(Xa, Xb) # Taking the maximum of Xa and Xb.
# ii) Repeating step i) 10,000 times using the replicate function.
size10k = replicate(10000, max(rexp(n=1, rate=1/10), rexp(n=1, rate=1/10)))
# iii) Using Hist to generate a histogram and then superimposing
# the density function on the histogram using curve.
hist(size10k, prob = TRUE)
pdf = function(x){
return (0.2*exp(-0.1*x)-0.2*exp(-0.2*x))
}
curve(pdf(x), add = TRUE)
# iv) Used mean to calculate the average of the 10,000 repetitions.
mean(size10k)
1-pexp(15, rate = 1/ mean(size10k)) # Using 1 - the distribution function to
# calculate the probability.
# 1c) Performed the step vi) using sample size of 1,000, and 100,000.
size1k = replicate(1000, max(rexp(n=1, rate=1/10), rexp(n=1, rate=1/10)))
mean(size1k)
1-pexp(15, rate = 1/(mean(size1k)))
size100k = replicate(100000, max(rexp(n=1, rate=1/10), rexp(n=1, rate=1/10)))
mean(size100k)
1-pexp(15, rate = 1/(mean(size100k))) |
############# 27-OCT-2014
############# It is just a trial version of codes.
############# samiri2@unl.edu
###################################
################################# Ensemble
SHC<-function (x,kn0,B=200){
x<-cbind(x,rep(0,nrow(x)))
Len<-dim(x)
clusterO<-Len[2]
b<-1
# it is 4
knmin<-2;knmax<-min(25,floor(dim(x)[1]/5)-2)
kn<-sample(c(knmin,knmax),1)
#dd<-Hub2MQ(x,kn)
#RE<-dd[,Len[2]]
RHub2MQ<-function(x,kn,knmin,knmax){
kn<-sample(c(knmin:knmax),1)
dd<-Hub2MQ(x,kn)
Len<-dim(x)
return(dd[,Len[2]])
}
distancematrix0<-function(data){
data<-reorderf(data)
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
ss<-length(unique(data[,Len[2]]))
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
ij0<-data[,Len[2]]==i
ij1<-data[,Len[2]]==j
dismat[i,j]<-(distwo(data[ij0,-Len[2]],data[ij1,-Len[2]]))
}
}
t(dismat)
}
reorderf<-function(data){
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
h<-0
data0<-cbind(data,NA)
for(i in nk){
ij<-data[,Len[2]]==i
h<-h+1
data0[ij,Len[2]+1]<-h
}
return(data0[,-Len[2]])
}
distwo<-function(data1,data2){
d1<-dim(data1)
if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
d2<-dim(data2)
if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di0<-NULL
ff<-1
for(i in 1:d1[1]){
for(j in 1:d2[1]){
di0[ff]<-mean((data1[i,]-data2[j,])^2)
ff<-ff+1
}
}
quantile(di0,probs=.2)
}
cl <- makeCluster(detectCores()-1) # create a cluster with 2 cores
registerDoParallel(cl) # register the cluster
ens = foreach(i = 1:200,
.combine = "rbind", .export=c("Hub2MQ","distancematrix0","reorderf","distwo")) %dopar% {
fit1 <- RHub2MQ(x,kn,knmin,knmax)
fit1
}
stopCluster(cl)
#while(b<B){
# kn<-sample(c(knmin:knmax),1)
# dd<-Hub2MQ(x,kn)
# RE<-rbind(RE,dd[,Len[2]])
# b<-b+1
#}
REDIST<-as.dist(distancematrixH(ens))
REDISTT<-as.matrix(REDIST)
hc <- hclust(REDIST,method = "single")
zhh<-mean(Xsub(hc,kn0),na.rm=T)
kstar<-length(unique(cutree(hc,h=zhh)))
cc<-cutree(hc,kstar)
kn<-kn0
xl2<-x
ni<-Len[1]
for(i in 1:ni){
xl2[i,clusterO]<-cc[i]
}
alpha0<-.05
while(alpha0>0){
xcc<-NULL
for(j in unique(cc)) xcc[j]<-length(xl2[xl2[,clusterO]==j,clusterO])
mino0<- which(xcc/dim(x)[1]<alpha0)
main0<-setdiff((cc),mino0)
if(length(main0)>(kn)) break
alpha0<-alpha0/2
}
i<-1
cc0<-NULL
for(j in main0){
cc0[cc==j]<-i
i<-i+1
}
for(j in mino0){
cc0[cc==j]<-i
i<-i+1
}
#cc02<-cc[1:length(main0)]
kmi<-length(mino0)
kma2<-kma<-length(main0)
cc1<-cc0
#cc2<-setdiff((cc),main0)
while(kma2>kn){
#xl<-xl[,-clusterO]
xz<-list(NULL)
for(i in unique(cc1)){
xz[[i]]<-which(cc1==i)
}
kcc<-unique(cc1)
XXXX<-distancematrix0SE3(REDISTT,c(1:kma2),xz)
cc1[cc1==XXXX[2]]<-XXXX[1]
cc1<-reorderfSE(cc1)
kma2<-kma2-1
}
main02<-1:kma2
mino02<-(kma2+1):(kma2+kmi)
xz<-list(NULL)
for(i in unique(cc1)){
xz[[i]]<-which(cc1==i)
}
xl2<-x
ni<-Len[1]
for(i in 1:ni){
xl2[i,clusterO]<-cc1[i]
}
if(!length(mino0)==0){
while( !length(mino02)==0){
ind<-md<-NULL
i0<-1
for(i1 in mino02 ){
d1<-NULL
for(i2 in main02){
d1<-c(d1,distwoA(REDISTT,xz[[i2]],xz[[i1]]))
}
ind[i0]<-which.min(d1)
md[i0]<-min(d1,na.rm=T)
i0<-i0+1
}
xl2[xl2[,clusterO]==mino02[which.min(md)],clusterO]=main02[ind[which.min(md)]]
cc1[cc1==mino02[which.min(md)]]<-main02[ind[which.min(md)]]
xz<-list(NULL)
for(i in unique(cc1)){
xz[[i]]<-which(cc1==i)
}
mino02<-setdiff(mino02,mino02[which.min(md)])
#if(length(mino1)==0) break
}}
xl2[,clusterO]
}
############################### SIZE of cluster
EK<-function (x,B=200){
x<-cbind(x,rep(0,nrow(x)))
Len<-dim(x)
clusterO<-Len[2]
b<-1
knmin<-2;knmax<-min(25,floor(dim(x)[1]/5)-2)
kn<-sample(c(knmin,knmax),1)
dd<-Hub2MQ(x,kn)
RE<-dd[,Len[2]]
#####
####
RHub2MQ<-function(x,kn,knmin,knmax){
kn<-sample(c(knmin:knmax),1)
dd<-Hub2MQ(x,kn)
Len<-dim(x)
return(dd[,Len[2]])
}
distancematrix0<-function(data){
data<-reorderf(data)
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
ss<-length(unique(data[,Len[2]]))
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
ij0<-data[,Len[2]]==i
ij1<-data[,Len[2]]==j
dismat[i,j]<-(distwo(data[ij0,-Len[2]],data[ij1,-Len[2]]))
}
}
t(dismat)
}
reorderf<-function(data){
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
h<-0
data0<-cbind(data,NA)
for(i in nk){
ij<-data[,Len[2]]==i
h<-h+1
data0[ij,Len[2]+1]<-h
}
return(data0[,-Len[2]])
}
distwo<-function(data1,data2){
d1<-dim(data1)
if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
d2<-dim(data2)
if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di0<-NULL
ff<-1
for(i in 1:d1[1]){
for(j in 1:d2[1]){
di0[ff]<-mean((data1[i,]-data2[j,])^2)
ff<-ff+1
}
}
quantile(di0,probs=.2)
}
cl <- makeCluster(detectCores()-1) # create a cluster with 2 cores
registerDoParallel(cl) # register the cluster
ens = foreach(i = 1:B,
.combine = "rbind", .export=c("Hub2MQ","distancematrix0","reorderf","distwo")) %dopar% {
fit1 <- RHub2MQ(x,kn,knmin,knmax)
fit1
}
stopCluster(cl)
REDIST<-as.dist(distancematrixH(ens))
hclustM <- hclust(REDIST, method = "single")
cutValue <- hclustM$height[which.max(diff(hclustM$height))]
ee<-(cutree(hclustM, h = cutValue))
ee0<-length(unique(ee))
#ee<-(cutree(hclustM, h = cutValue))
idn<-as.numeric(names(table(ee)))[table(ee)/length(ee)<.009]
eeNA<-NULL
for(i in idn){
eeNA<-c(eeNA,which(ee==i))
}
if(length(eeNA)!=0){
SEMAX<-sort(which(diff(hclustM$height)==sort(diff(hclustM$height), decreasing = TRUE)[2]),decreasing = TRUE)[1]
cutValue <- hclustM$height[SEMAX]
ee2<-(cutree(hclustM, h = cutValue))
ee0<-mean(c(sum(table(ee2)/length(ee2)>.009),length(unique(ee))))
}
return(ee0)
}
#######################
######################
######################
Hub2MQ<-function(x,kn){
clusterO<-dim(x)[2]
zz<-sample(c(4:6),1)
(cl <- kmeans(x[,-clusterO], floor(dim(x)[1]/zz), algorithm = "MacQueen",iter.max = 50, nstart = 1))
xl<-cbind(x[,-clusterO],cl$cluster)
xlc<-distancematrix0(xl)
# xlcT<-distancematrix0T(xl)
xlc<-as.dist(xlc)
hc <- hclust(xlc,method = "single")
xk<-NULL
if( kn>length(cl$size)) kn<-length(cl$size)-1
cc<-cutree(hc,kn)
xl2<-cbind(x,NA)
ni2<-sort(unique(xl[,clusterO]))
for(i in ni2){
xl2[xl[,clusterO]==i,clusterO+1]<-cc[i]
}
return(xl2[,-clusterO])
}
###distancematrixHT<-function(data){
### Len<-dim(data)
### ss<-Len[2]
### dismat<- matrix(NA,ncol=ss,ss)
### for(i in 1:ss){
### for(j in 1:ss){
### dismat[i,j]<-(distancem(data[,i],data[,j]))
### }
### }
### t(dismat)
###}
distancematrixH<-function(data){
Len<-dim(data)
ss<-Len[2]
dismat<- matrix(NA,ncol=ss,ss)
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
dismat[i,j]<-(distancem(data[,i],data[,j]))
}
}
t(dismat)
}
distancem<-function(a,b){
distan1<-0
distan1 <- sum(a!= b)
return(distan1)
}
reorderf<-function(data){
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
h<-0
data0<-cbind(data,NA)
for(i in nk){
ij<-data[,Len[2]]==i
h<-h+1
data0[ij,Len[2]+1]<-h
}
return(data0[,-Len[2]])
}
distancematrix0<-function(data){
data<-reorderf(data)
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
ss<-length(unique(data[,Len[2]]))
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
ij0<-data[,Len[2]]==i
ij1<-data[,Len[2]]==j
dismat[i,j]<-(distwo(data[ij0,-Len[2]],data[ij1,-Len[2]]))
}
}
t(dismat)
}
distwo<-function(data1,data2){
d1<-dim(data1)
if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
d2<-dim(data2)
if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di0<-NULL
ff<-1
for(i in 1:d1[1]){
for(j in 1:d2[1]){
di0[ff]<-mean((data1[i,]-data2[j,])^2)
ff<-ff+1
}
}
quantile(di0,probs=.2)
}
distwoA2<-function(xlcT0,dat1,dat2){
dat11<-unlist(dat1)
dat22<-unlist(dat2)
di00<-NULL
ff<-1
for(ii1 in dat11){
for(ii2 in dat22){
di00[ff]<-xlcT0[ii1,ii2]
ff<-ff+1
}}
min(di00)
}
distwoA<-function(xlcT0,dat1,dat2){
dat11<-unlist(dat1)
dat22<-unlist(dat2)
#d1<-dim(data1)
#if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
#d2<-dim(data2)
#if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di00<-NULL#matrix(NA,nrow=d1*d2,ncol=2)
ff<-1
for(ii1 in dat11){
for(ii2 in dat22){
di00[ff]<-xlcT0[ii1,ii2]
ff<-ff+1
}}
min(di00)
}
distancematrix0SE3<-function(XLL,W1,XZZ){
ss<-length(W1)
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in W1){
if (i==W1[ss]) break
for(j in (W1[(which(W1==i)+1):ss])){
dismat[i,j]<-distwoA2(XLL,XZZ[[i]],XZZ[[j]])
}
}
t(dismat)
eee<-which(t(dismat)==min(t(dismat),na.rm=T), arr.ind = TRUE)
sort(eee[sample(dim(eee)[1],1),])
}
reorderfSE<-function(data){
Len<-length(data)
nk<-as.integer(names(table(data)))
h<-0
data0<-NULL
for(i in nk){
ij<- data==i
h<-h+1
data0[ij]<-h
}
return(data0)
}
CreatXCC<-function(xx){
xxc<-list()
len<-dim(xx)
xxc[[1]]<-1
for(l1 in 2:(len[1])){
if(xx[l1,1]<0&xx[l1,2]<0) xxc[[l1]]<-l1
if(xx[l1,1]<0&xx[l1,2]>0) xxc[[l1]]<-c(l1,unlist(xxc[[xx[l1,2]]]))
if(xx[l1,1]>0&xx[l1,2]>0) xxc[[l1]]<-c(l1,unlist(xxc[[xx[l1,1]]]),unlist(xxc[[xx[l1,2]]]))
}
xxc
}
Xsub<-function(hclust0,K0){
xcc<-list()
xx<-hclust0$merge
len<-dim(xx)
zh<-NULL
xcc<-CreatXCC(xx)
xc<-NULL
heigh1<-hclust0$heigh
inverse = function (f, lower = -100, upper = 100) {
function (y) uniroot((function (x) f(x) - y), lower = lower, upper = upper)[1]
}
square_inverse = inverse(function (x) length(unique((cutree(hclust0,h=x)))), min(heigh1), max(heigh1))
Km<-which.min((heigh1-square_inverse(K0)$root)^2)
for(l1 in (Km-1):(1)){
if(l1 %in% xc) next
if(xx[l1,1]<0&xx[l1,2]<0) {zh[l1]<-heigh1[l1]}
if(xx[l1,1]<0&xx[l1,2]>0) {zh[l1]<-heigh1[l1];xc<-c(xc,xcc[[xx[l1,2]]]) }
if(xx[l1,1]>0&xx[l1,2]>0) {zh[l1]<-heigh1[l1];xc<-c(xc,xcc[[xx[l1,2]]],xcc[[xx[l1,1]]]) }
}
return(zh)
}
| /GHCsource/codes/archive/SHC.R | no_license | saeidamiri1/GHC | R | false | false | 11,266 | r | ############# 27-OCT-2014
############# It is just a trial version of codes.
############# samiri2@unl.edu
###################################
################################# Ensemble
SHC<-function (x,kn0,B=200){
x<-cbind(x,rep(0,nrow(x)))
Len<-dim(x)
clusterO<-Len[2]
b<-1
# it is 4
knmin<-2;knmax<-min(25,floor(dim(x)[1]/5)-2)
kn<-sample(c(knmin,knmax),1)
#dd<-Hub2MQ(x,kn)
#RE<-dd[,Len[2]]
RHub2MQ<-function(x,kn,knmin,knmax){
kn<-sample(c(knmin:knmax),1)
dd<-Hub2MQ(x,kn)
Len<-dim(x)
return(dd[,Len[2]])
}
distancematrix0<-function(data){
data<-reorderf(data)
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
ss<-length(unique(data[,Len[2]]))
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
ij0<-data[,Len[2]]==i
ij1<-data[,Len[2]]==j
dismat[i,j]<-(distwo(data[ij0,-Len[2]],data[ij1,-Len[2]]))
}
}
t(dismat)
}
reorderf<-function(data){
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
h<-0
data0<-cbind(data,NA)
for(i in nk){
ij<-data[,Len[2]]==i
h<-h+1
data0[ij,Len[2]+1]<-h
}
return(data0[,-Len[2]])
}
distwo<-function(data1,data2){
d1<-dim(data1)
if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
d2<-dim(data2)
if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di0<-NULL
ff<-1
for(i in 1:d1[1]){
for(j in 1:d2[1]){
di0[ff]<-mean((data1[i,]-data2[j,])^2)
ff<-ff+1
}
}
quantile(di0,probs=.2)
}
cl <- makeCluster(detectCores()-1) # create a cluster with 2 cores
registerDoParallel(cl) # register the cluster
ens = foreach(i = 1:200,
.combine = "rbind", .export=c("Hub2MQ","distancematrix0","reorderf","distwo")) %dopar% {
fit1 <- RHub2MQ(x,kn,knmin,knmax)
fit1
}
stopCluster(cl)
#while(b<B){
# kn<-sample(c(knmin:knmax),1)
# dd<-Hub2MQ(x,kn)
# RE<-rbind(RE,dd[,Len[2]])
# b<-b+1
#}
REDIST<-as.dist(distancematrixH(ens))
REDISTT<-as.matrix(REDIST)
hc <- hclust(REDIST,method = "single")
zhh<-mean(Xsub(hc,kn0),na.rm=T)
kstar<-length(unique(cutree(hc,h=zhh)))
cc<-cutree(hc,kstar)
kn<-kn0
xl2<-x
ni<-Len[1]
for(i in 1:ni){
xl2[i,clusterO]<-cc[i]
}
alpha0<-.05
while(alpha0>0){
xcc<-NULL
for(j in unique(cc)) xcc[j]<-length(xl2[xl2[,clusterO]==j,clusterO])
mino0<- which(xcc/dim(x)[1]<alpha0)
main0<-setdiff((cc),mino0)
if(length(main0)>(kn)) break
alpha0<-alpha0/2
}
i<-1
cc0<-NULL
for(j in main0){
cc0[cc==j]<-i
i<-i+1
}
for(j in mino0){
cc0[cc==j]<-i
i<-i+1
}
#cc02<-cc[1:length(main0)]
kmi<-length(mino0)
kma2<-kma<-length(main0)
cc1<-cc0
#cc2<-setdiff((cc),main0)
while(kma2>kn){
#xl<-xl[,-clusterO]
xz<-list(NULL)
for(i in unique(cc1)){
xz[[i]]<-which(cc1==i)
}
kcc<-unique(cc1)
XXXX<-distancematrix0SE3(REDISTT,c(1:kma2),xz)
cc1[cc1==XXXX[2]]<-XXXX[1]
cc1<-reorderfSE(cc1)
kma2<-kma2-1
}
main02<-1:kma2
mino02<-(kma2+1):(kma2+kmi)
xz<-list(NULL)
for(i in unique(cc1)){
xz[[i]]<-which(cc1==i)
}
xl2<-x
ni<-Len[1]
for(i in 1:ni){
xl2[i,clusterO]<-cc1[i]
}
if(!length(mino0)==0){
while( !length(mino02)==0){
ind<-md<-NULL
i0<-1
for(i1 in mino02 ){
d1<-NULL
for(i2 in main02){
d1<-c(d1,distwoA(REDISTT,xz[[i2]],xz[[i1]]))
}
ind[i0]<-which.min(d1)
md[i0]<-min(d1,na.rm=T)
i0<-i0+1
}
xl2[xl2[,clusterO]==mino02[which.min(md)],clusterO]=main02[ind[which.min(md)]]
cc1[cc1==mino02[which.min(md)]]<-main02[ind[which.min(md)]]
xz<-list(NULL)
for(i in unique(cc1)){
xz[[i]]<-which(cc1==i)
}
mino02<-setdiff(mino02,mino02[which.min(md)])
#if(length(mino1)==0) break
}}
xl2[,clusterO]
}
############################### SIZE of cluster
EK<-function (x,B=200){
x<-cbind(x,rep(0,nrow(x)))
Len<-dim(x)
clusterO<-Len[2]
b<-1
knmin<-2;knmax<-min(25,floor(dim(x)[1]/5)-2)
kn<-sample(c(knmin,knmax),1)
dd<-Hub2MQ(x,kn)
RE<-dd[,Len[2]]
#####
####
RHub2MQ<-function(x,kn,knmin,knmax){
kn<-sample(c(knmin:knmax),1)
dd<-Hub2MQ(x,kn)
Len<-dim(x)
return(dd[,Len[2]])
}
distancematrix0<-function(data){
data<-reorderf(data)
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
ss<-length(unique(data[,Len[2]]))
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
ij0<-data[,Len[2]]==i
ij1<-data[,Len[2]]==j
dismat[i,j]<-(distwo(data[ij0,-Len[2]],data[ij1,-Len[2]]))
}
}
t(dismat)
}
reorderf<-function(data){
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
h<-0
data0<-cbind(data,NA)
for(i in nk){
ij<-data[,Len[2]]==i
h<-h+1
data0[ij,Len[2]+1]<-h
}
return(data0[,-Len[2]])
}
distwo<-function(data1,data2){
d1<-dim(data1)
if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
d2<-dim(data2)
if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di0<-NULL
ff<-1
for(i in 1:d1[1]){
for(j in 1:d2[1]){
di0[ff]<-mean((data1[i,]-data2[j,])^2)
ff<-ff+1
}
}
quantile(di0,probs=.2)
}
cl <- makeCluster(detectCores()-1) # create a cluster with 2 cores
registerDoParallel(cl) # register the cluster
ens = foreach(i = 1:B,
.combine = "rbind", .export=c("Hub2MQ","distancematrix0","reorderf","distwo")) %dopar% {
fit1 <- RHub2MQ(x,kn,knmin,knmax)
fit1
}
stopCluster(cl)
REDIST<-as.dist(distancematrixH(ens))
hclustM <- hclust(REDIST, method = "single")
cutValue <- hclustM$height[which.max(diff(hclustM$height))]
ee<-(cutree(hclustM, h = cutValue))
ee0<-length(unique(ee))
#ee<-(cutree(hclustM, h = cutValue))
idn<-as.numeric(names(table(ee)))[table(ee)/length(ee)<.009]
eeNA<-NULL
for(i in idn){
eeNA<-c(eeNA,which(ee==i))
}
if(length(eeNA)!=0){
SEMAX<-sort(which(diff(hclustM$height)==sort(diff(hclustM$height), decreasing = TRUE)[2]),decreasing = TRUE)[1]
cutValue <- hclustM$height[SEMAX]
ee2<-(cutree(hclustM, h = cutValue))
ee0<-mean(c(sum(table(ee2)/length(ee2)>.009),length(unique(ee))))
}
return(ee0)
}
#######################
######################
######################
Hub2MQ<-function(x,kn){
clusterO<-dim(x)[2]
zz<-sample(c(4:6),1)
(cl <- kmeans(x[,-clusterO], floor(dim(x)[1]/zz), algorithm = "MacQueen",iter.max = 50, nstart = 1))
xl<-cbind(x[,-clusterO],cl$cluster)
xlc<-distancematrix0(xl)
# xlcT<-distancematrix0T(xl)
xlc<-as.dist(xlc)
hc <- hclust(xlc,method = "single")
xk<-NULL
if( kn>length(cl$size)) kn<-length(cl$size)-1
cc<-cutree(hc,kn)
xl2<-cbind(x,NA)
ni2<-sort(unique(xl[,clusterO]))
for(i in ni2){
xl2[xl[,clusterO]==i,clusterO+1]<-cc[i]
}
return(xl2[,-clusterO])
}
###distancematrixHT<-function(data){
### Len<-dim(data)
### ss<-Len[2]
### dismat<- matrix(NA,ncol=ss,ss)
### for(i in 1:ss){
### for(j in 1:ss){
### dismat[i,j]<-(distancem(data[,i],data[,j]))
### }
### }
### t(dismat)
###}
distancematrixH<-function(data){
Len<-dim(data)
ss<-Len[2]
dismat<- matrix(NA,ncol=ss,ss)
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
dismat[i,j]<-(distancem(data[,i],data[,j]))
}
}
t(dismat)
}
distancem<-function(a,b){
distan1<-0
distan1 <- sum(a!= b)
return(distan1)
}
reorderf<-function(data){
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
h<-0
data0<-cbind(data,NA)
for(i in nk){
ij<-data[,Len[2]]==i
h<-h+1
data0[ij,Len[2]+1]<-h
}
return(data0[,-Len[2]])
}
distancematrix0<-function(data){
data<-reorderf(data)
Len<-dim(data)
nk<-as.integer(names(table(data[,Len[2]])))
ss<-length(unique(data[,Len[2]]))
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in 1:ss){
if (i==ss) break
for(j in ((i+1):ss)){
ij0<-data[,Len[2]]==i
ij1<-data[,Len[2]]==j
dismat[i,j]<-(distwo(data[ij0,-Len[2]],data[ij1,-Len[2]]))
}
}
t(dismat)
}
distwo<-function(data1,data2){
d1<-dim(data1)
if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
d2<-dim(data2)
if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di0<-NULL
ff<-1
for(i in 1:d1[1]){
for(j in 1:d2[1]){
di0[ff]<-mean((data1[i,]-data2[j,])^2)
ff<-ff+1
}
}
quantile(di0,probs=.2)
}
distwoA2<-function(xlcT0,dat1,dat2){
dat11<-unlist(dat1)
dat22<-unlist(dat2)
di00<-NULL
ff<-1
for(ii1 in dat11){
for(ii2 in dat22){
di00[ff]<-xlcT0[ii1,ii2]
ff<-ff+1
}}
min(di00)
}
distwoA<-function(xlcT0,dat1,dat2){
dat11<-unlist(dat1)
dat22<-unlist(dat2)
#d1<-dim(data1)
#if(is.null(d1)) {data1<-t(as.matrix(data1));d1<-dim(data1)}
#d2<-dim(data2)
#if(is.null(d2)) {data2<-t(as.matrix(data2));d2<-dim(data2)}
di00<-NULL#matrix(NA,nrow=d1*d2,ncol=2)
ff<-1
for(ii1 in dat11){
for(ii2 in dat22){
di00[ff]<-xlcT0[ii1,ii2]
ff<-ff+1
}}
min(di00)
}
distancematrix0SE3<-function(XLL,W1,XZZ){
ss<-length(W1)
dismat<- matrix(NA,ncol=ss,ss)#array(NA, dim=c(1,Len[2]-1,ss,ss))
for(i in W1){
if (i==W1[ss]) break
for(j in (W1[(which(W1==i)+1):ss])){
dismat[i,j]<-distwoA2(XLL,XZZ[[i]],XZZ[[j]])
}
}
t(dismat)
eee<-which(t(dismat)==min(t(dismat),na.rm=T), arr.ind = TRUE)
sort(eee[sample(dim(eee)[1],1),])
}
reorderfSE<-function(data){
Len<-length(data)
nk<-as.integer(names(table(data)))
h<-0
data0<-NULL
for(i in nk){
ij<- data==i
h<-h+1
data0[ij]<-h
}
return(data0)
}
CreatXCC<-function(xx){
xxc<-list()
len<-dim(xx)
xxc[[1]]<-1
for(l1 in 2:(len[1])){
if(xx[l1,1]<0&xx[l1,2]<0) xxc[[l1]]<-l1
if(xx[l1,1]<0&xx[l1,2]>0) xxc[[l1]]<-c(l1,unlist(xxc[[xx[l1,2]]]))
if(xx[l1,1]>0&xx[l1,2]>0) xxc[[l1]]<-c(l1,unlist(xxc[[xx[l1,1]]]),unlist(xxc[[xx[l1,2]]]))
}
xxc
}
Xsub<-function(hclust0,K0){
xcc<-list()
xx<-hclust0$merge
len<-dim(xx)
zh<-NULL
xcc<-CreatXCC(xx)
xc<-NULL
heigh1<-hclust0$heigh
inverse = function (f, lower = -100, upper = 100) {
function (y) uniroot((function (x) f(x) - y), lower = lower, upper = upper)[1]
}
square_inverse = inverse(function (x) length(unique((cutree(hclust0,h=x)))), min(heigh1), max(heigh1))
Km<-which.min((heigh1-square_inverse(K0)$root)^2)
for(l1 in (Km-1):(1)){
if(l1 %in% xc) next
if(xx[l1,1]<0&xx[l1,2]<0) {zh[l1]<-heigh1[l1]}
if(xx[l1,1]<0&xx[l1,2]>0) {zh[l1]<-heigh1[l1];xc<-c(xc,xcc[[xx[l1,2]]]) }
if(xx[l1,1]>0&xx[l1,2]>0) {zh[l1]<-heigh1[l1];xc<-c(xc,xcc[[xx[l1,2]]],xcc[[xx[l1,1]]]) }
}
return(zh)
}
|
############## PLOTTING FUNCTIONS FOR betaMPT
#' Plot Parameter Estimates
#'
#' Plot parameter estimates for hierarchical MPT models.
#'
#' @param x a fitted Beta or latent-trait MPT model
#' @param includeIndividual whether to plot individual estimates
#' @param addLines whether to connect individual parameter estimates by lines
#' @param estimate type of point estimates for group-level and individual parameters
#' (either \code{"mean"} or \code{"median"})
#' @param select character vector of parameters to be plotted (e.g., \code{select = c("d", "g")}. Can be used to plot subsets of parameters and change the order of parameters.
#' @param ... further arguments passed to the standard \code{\link{plot}} function
#'
#' @author Daniel Heck
#' @seealso \code{\link{betaMPT}}, \code{\link{traitMPT}}, \code{\link{plotDistribution}}
#' @examples
#' \dontrun{
#' plotParam(fit,
#' addLines = TRUE,
#' estimate = "median",
#' select = c("d1", "d2")
#' )
#' }
#' @export
plotParam <- function(x, includeIndividual = TRUE, addLines = FALSE,
estimate = "mean", select = "all", ...) {
stat <- ifelse(estimate == "median", "50%", "Mean")
par.group <- x$summary$groupParameters$mean
par.ind <- x$summary$individParameters
parnames <- substr(rownames(par.group), 6, 100)
if (select[1] == "all") {
select <- parnames
} else {
if (!all(select %in% parnames)) {
stop(
"Check arguments: Not all parameters in 'select' are included in the MPT model!\n",
"Parameters are: ", paste(parnames, collapse = ", ")
)
}
par.group <- par.group[paste0("mean_", select), , drop = FALSE]
}
dims <- dim(par.ind)
S <- nrow(par.group) # parameters
N <- dims[2] # persons
means <- par.group[, stat]
plot(1:S, means,
ylim = 0:1, xlim = c(.5, S + .5), pch = 19, xaxt = "n", # size=3,
xlab = "MPT Parameters",
ylab = paste0("Estimate (", estimate, "s)"), col = 2,
main = paste0(
"Group-level ", estimate, "s + 95% CI (red)",
ifelse(includeIndividual,
paste0(" and individual ", estimate, "s (gray)"),
""
)
), ...
)
axis(side = 1, at = 1:S, labels = select)
if (includeIndividual) {
for (i in 1:N) {
if (addLines) {
lines(1:S + .05, par.ind[select, i, stat],
col = adjustcolor(col = "black", alpha.f = .5)
)
points(1:S + .05, par.ind[select, i, stat],
cex = .9, pch = 16,
col = adjustcolor(col = "black", alpha.f = .5)
)
} else {
points(1:S + seq(-.2, .2, length.out = N)[i],
col = adjustcolor(col = "black", alpha.f = .5), # col=rainbow(N, alpha=.4)[i],
pch = 16,
par.ind[select, i, stat], cex = .9
)
}
}
points(1:S, means, cex = 1.3, col = 2, pch = 19)
}
segments(
x0 = 1:S, y0 = par.group[, 3],
y1 = par.group[, 5], lwd = 2, col = 2
)
}
| /R/plotParameters.R | no_license | cran/TreeBUGS | R | false | false | 3,010 | r |
############## PLOTTING FUNCTIONS FOR betaMPT
#' Plot Parameter Estimates
#'
#' Plot parameter estimates for hierarchical MPT models.
#'
#' @param x a fitted Beta or latent-trait MPT model
#' @param includeIndividual whether to plot individual estimates
#' @param addLines whether to connect individual parameter estimates by lines
#' @param estimate type of point estimates for group-level and individual parameters
#' (either \code{"mean"} or \code{"median"})
#' @param select character vector of parameters to be plotted (e.g., \code{select = c("d", "g")}. Can be used to plot subsets of parameters and change the order of parameters.
#' @param ... further arguments passed to the standard \code{\link{plot}} function
#'
#' @author Daniel Heck
#' @seealso \code{\link{betaMPT}}, \code{\link{traitMPT}}, \code{\link{plotDistribution}}
#' @examples
#' \dontrun{
#' plotParam(fit,
#' addLines = TRUE,
#' estimate = "median",
#' select = c("d1", "d2")
#' )
#' }
#' @export
plotParam <- function(x, includeIndividual = TRUE, addLines = FALSE,
estimate = "mean", select = "all", ...) {
stat <- ifelse(estimate == "median", "50%", "Mean")
par.group <- x$summary$groupParameters$mean
par.ind <- x$summary$individParameters
parnames <- substr(rownames(par.group), 6, 100)
if (select[1] == "all") {
select <- parnames
} else {
if (!all(select %in% parnames)) {
stop(
"Check arguments: Not all parameters in 'select' are included in the MPT model!\n",
"Parameters are: ", paste(parnames, collapse = ", ")
)
}
par.group <- par.group[paste0("mean_", select), , drop = FALSE]
}
dims <- dim(par.ind)
S <- nrow(par.group) # parameters
N <- dims[2] # persons
means <- par.group[, stat]
plot(1:S, means,
ylim = 0:1, xlim = c(.5, S + .5), pch = 19, xaxt = "n", # size=3,
xlab = "MPT Parameters",
ylab = paste0("Estimate (", estimate, "s)"), col = 2,
main = paste0(
"Group-level ", estimate, "s + 95% CI (red)",
ifelse(includeIndividual,
paste0(" and individual ", estimate, "s (gray)"),
""
)
), ...
)
axis(side = 1, at = 1:S, labels = select)
if (includeIndividual) {
for (i in 1:N) {
if (addLines) {
lines(1:S + .05, par.ind[select, i, stat],
col = adjustcolor(col = "black", alpha.f = .5)
)
points(1:S + .05, par.ind[select, i, stat],
cex = .9, pch = 16,
col = adjustcolor(col = "black", alpha.f = .5)
)
} else {
points(1:S + seq(-.2, .2, length.out = N)[i],
col = adjustcolor(col = "black", alpha.f = .5), # col=rainbow(N, alpha=.4)[i],
pch = 16,
par.ind[select, i, stat], cex = .9
)
}
}
points(1:S, means, cex = 1.3, col = 2, pch = 19)
}
segments(
x0 = 1:S, y0 = par.group[, 3],
y1 = par.group[, 5], lwd = 2, col = 2
)
}
|
##jmd
##4.17.12
##get_gene_supp_mat.r
split.names <- function(x){ unlist(strsplit(x=gsub('[CCO-EXTRACELLULAR]', '', x, fixed=TRUE), split='\\.5\\.' )) }
multi.met.gene.supp <- function(z0){
z <- t(z0)
multi.met <- grep(', ', rownames(z), value=TRUE, fixed=TRUE)
for (mm in multi.met){
mets <- unlist(strsplit(x=mm, split=', ', fixed=TRUE))
mets <- intersect(mets, rownames(z))
purple.ind <- which(z[mm,]==3)
if (length(mets)>1){
z[mm, colSums(z[mets,]==1)>0] <- 4
z[mm, colSums(z[mets,]==2)>0] <- 5
#if one met predicted and other verified, then combo is predicted & verified
z[mm, colSums(z[mets,]==1)>0 & colSums(z[mets,]==2)>0] <- 6
z[mm, colSums(z[mets,]==3)>0] <- 6
} else {
z[mm, which(z[mets,]==1)] <- 4
z[mm, which(z[mets,]==2)] <- 5
z[mm, which(z[mets,]==3)] <- 6
}#end else
z[mm, purple.ind] <- 3
}#end for
return(t(z))
}
#3=pred&obs;2=pred only;1=obs only;0=neither
get.gene.supp.mat <- function(test.supp.df, sp, rxns.lst, supp.col='SUPPLEMENTS', obs.supp.names, rm.cpds='CPD2T-61', ub=named.vec(10**3, names=colnames(sp)), supp.ub=10, eps=10**-6){
##get pred
u.supps <- gsub('\\(|\\)', '', unique(unlist(strsplit(split=' or ', x=test.supp.df$SUPP))))
u.supps <- setdiff(u.supps, rm.cpds)
test.supp.df2 <- test.supp.df
#loop thru genes & make SUPPLEMENTS = setdiff(u.supps, vector of old supps split by ' or '), all pasted together by ' or '
test.supp.df2[,supp.col] <- apply(test.supp.df, 1, FUN=function(x){ paste(u.supps, collapse=' or ') })
tes.nonsupp <- test.supp(sp=sp, ko.lst=rxns.lst[rownames(test.supp.df2)], annot=test.supp.df2, sense='E', ub=ub, supp.ub=supp.ub)
pred.supps <- unlist(tes.nonsupp[[2]])[unlist(tes.nonsupp[[2]])>eps]
##get matrix of obs & prediction pairs
pred.supp.mat <- apply(as.matrix(names(pred.supps)), 1, FUN=function(x) split.names(x))
pred.supp.mat[1,] <- paste(pred.supp.mat[1,], '.5', sep='')
obs.supp.mat <- apply(as.matrix(obs.supp.names), 1, FUN=function(x) split.names(x))
obs.supp.mat[1,] <- paste(obs.supp.mat[1,], '.5', sep='')
obs.supp.mat <- obs.supp.mat[,!(obs.supp.mat[2,] %in% rm.cpds)]
##make mat
#3=pred&obs;2=pred only;1=obs only;0=neither
gene.supp.mat <- matrix(0, nrow=nrow(test.supp.df), ncol=length(u.supps), dimnames=list(rownames(test.supp.df), gsub(' and ', ', ', u.supps)))
#pred
for (i in 1:ncol(pred.supp.mat)){
gene.supp.mat[pred.supp.mat[1,i], pred.supp.mat[2,i]] <- gene.supp.mat[pred.supp.mat[1,i], pred.supp.mat[2,i]]+2
}
#obs
for (i in 1:ncol(obs.supp.mat)){
gene.supp.mat[obs.supp.mat[1,i], obs.supp.mat[2,i]] <- gene.supp.mat[obs.supp.mat[1,i], obs.supp.mat[2,i]]+1
}
gsm <- multi.met.gene.supp(gene.supp.mat)
return(gsm)
}
| /get_gene_supp_mat.r | no_license | djinnome/crop | R | false | false | 2,931 | r | ##jmd
##4.17.12
##get_gene_supp_mat.r
split.names <- function(x){ unlist(strsplit(x=gsub('[CCO-EXTRACELLULAR]', '', x, fixed=TRUE), split='\\.5\\.' )) }
multi.met.gene.supp <- function(z0){
z <- t(z0)
multi.met <- grep(', ', rownames(z), value=TRUE, fixed=TRUE)
for (mm in multi.met){
mets <- unlist(strsplit(x=mm, split=', ', fixed=TRUE))
mets <- intersect(mets, rownames(z))
purple.ind <- which(z[mm,]==3)
if (length(mets)>1){
z[mm, colSums(z[mets,]==1)>0] <- 4
z[mm, colSums(z[mets,]==2)>0] <- 5
#if one met predicted and other verified, then combo is predicted & verified
z[mm, colSums(z[mets,]==1)>0 & colSums(z[mets,]==2)>0] <- 6
z[mm, colSums(z[mets,]==3)>0] <- 6
} else {
z[mm, which(z[mets,]==1)] <- 4
z[mm, which(z[mets,]==2)] <- 5
z[mm, which(z[mets,]==3)] <- 6
}#end else
z[mm, purple.ind] <- 3
}#end for
return(t(z))
}
#3=pred&obs;2=pred only;1=obs only;0=neither
get.gene.supp.mat <- function(test.supp.df, sp, rxns.lst, supp.col='SUPPLEMENTS', obs.supp.names, rm.cpds='CPD2T-61', ub=named.vec(10**3, names=colnames(sp)), supp.ub=10, eps=10**-6){
##get pred
u.supps <- gsub('\\(|\\)', '', unique(unlist(strsplit(split=' or ', x=test.supp.df$SUPP))))
u.supps <- setdiff(u.supps, rm.cpds)
test.supp.df2 <- test.supp.df
#loop thru genes & make SUPPLEMENTS = setdiff(u.supps, vector of old supps split by ' or '), all pasted together by ' or '
test.supp.df2[,supp.col] <- apply(test.supp.df, 1, FUN=function(x){ paste(u.supps, collapse=' or ') })
tes.nonsupp <- test.supp(sp=sp, ko.lst=rxns.lst[rownames(test.supp.df2)], annot=test.supp.df2, sense='E', ub=ub, supp.ub=supp.ub)
pred.supps <- unlist(tes.nonsupp[[2]])[unlist(tes.nonsupp[[2]])>eps]
##get matrix of obs & prediction pairs
pred.supp.mat <- apply(as.matrix(names(pred.supps)), 1, FUN=function(x) split.names(x))
pred.supp.mat[1,] <- paste(pred.supp.mat[1,], '.5', sep='')
obs.supp.mat <- apply(as.matrix(obs.supp.names), 1, FUN=function(x) split.names(x))
obs.supp.mat[1,] <- paste(obs.supp.mat[1,], '.5', sep='')
obs.supp.mat <- obs.supp.mat[,!(obs.supp.mat[2,] %in% rm.cpds)]
##make mat
#3=pred&obs;2=pred only;1=obs only;0=neither
gene.supp.mat <- matrix(0, nrow=nrow(test.supp.df), ncol=length(u.supps), dimnames=list(rownames(test.supp.df), gsub(' and ', ', ', u.supps)))
#pred
for (i in 1:ncol(pred.supp.mat)){
gene.supp.mat[pred.supp.mat[1,i], pred.supp.mat[2,i]] <- gene.supp.mat[pred.supp.mat[1,i], pred.supp.mat[2,i]]+2
}
#obs
for (i in 1:ncol(obs.supp.mat)){
gene.supp.mat[obs.supp.mat[1,i], obs.supp.mat[2,i]] <- gene.supp.mat[obs.supp.mat[1,i], obs.supp.mat[2,i]]+1
}
gsm <- multi.met.gene.supp(gene.supp.mat)
return(gsm)
}
|
library(data.table)
library(dplyr)
data.long = data.table(read.csv('Data/cascade_cd4init_agegr.csv', stringsAsFactors=FALSE))
data.long$sex = factor(data.long$sex, levels=c('Male', 'Female'))
data.long$age = factor(data.long$agegr,
levels=c('15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59'),
labels=c('15-24', '15-24', '25-34', '25-34', '35-44', '35-44', '45+', '45+', '45+'))
data.long$cd4 = factor(data.long$cd4cat, levels=c('500+', '350-499', '200-349', '100-199', '50-99', '<50'))
data.aggr = by(data.long, list(data.long$cd4, data.long$age, data.long$sex), function(obs) {sum(obs$count)})
data.dist = array(data.aggr, dim(data.aggr), dimnames(data.aggr))
rm(data.long, data.aggr)
| /prepare-data-dist.R | permissive | rlglaubius/NaturalHistorySynthesis | R | false | false | 770 | r | library(data.table)
library(dplyr)
data.long = data.table(read.csv('Data/cascade_cd4init_agegr.csv', stringsAsFactors=FALSE))
data.long$sex = factor(data.long$sex, levels=c('Male', 'Female'))
data.long$age = factor(data.long$agegr,
levels=c('15-19', '20-24', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59'),
labels=c('15-24', '15-24', '25-34', '25-34', '35-44', '35-44', '45+', '45+', '45+'))
data.long$cd4 = factor(data.long$cd4cat, levels=c('500+', '350-499', '200-349', '100-199', '50-99', '<50'))
data.aggr = by(data.long, list(data.long$cd4, data.long$age, data.long$sex), function(obs) {sum(obs$count)})
data.dist = array(data.aggr, dim(data.aggr), dimnames(data.aggr))
rm(data.long, data.aggr)
|
setwd("V:/Data Analytics/RPractice/coursera")
a<-read.table("household_power_consumption.txt",sep=";",heade=T,na.strings = c("?"))
a[,"Date"]<-as.Date(a$Date,format="%d/%m/%Y")
a1<-a[a[,"Date"]=="2007-02-01"|a[,"Date"]== "2007-02-02",]
a1$DateTime<-strptime(do.call(paste, c(a1[c("Date", "Time")], sep = " ")),format="%Y-%m-%d %H:%M:%S",tz="")
png("plot2.png",width=480,height=480)
with(a1,plot(DateTime,Global_active_power,lty="solid",type="l",xlab="",ylab="Global Active Power (kilowatts)"))
dev.off() | /plot2.R | no_license | ShankerGCEO/ExploratoryDataCourseProject1 | R | false | false | 503 | r | setwd("V:/Data Analytics/RPractice/coursera")
a<-read.table("household_power_consumption.txt",sep=";",heade=T,na.strings = c("?"))
a[,"Date"]<-as.Date(a$Date,format="%d/%m/%Y")
a1<-a[a[,"Date"]=="2007-02-01"|a[,"Date"]== "2007-02-02",]
a1$DateTime<-strptime(do.call(paste, c(a1[c("Date", "Time")], sep = " ")),format="%Y-%m-%d %H:%M:%S",tz="")
png("plot2.png",width=480,height=480)
with(a1,plot(DateTime,Global_active_power,lty="solid",type="l",xlab="",ylab="Global Active Power (kilowatts)"))
dev.off() |
#' Retrieving a BigML Dataset
#' @export
#' @family dataset methods
#' @references \url{https://bigml.com/developers/datasets}
#' @param source_id A string giving the name of the source id.
#' @param include_overview A logical value indicating whether to provide a
#' simple data frame overview of fields.
#' @template dots
#' @template author
#' @template dataset_return
#' @template normal_methods
getDataset <-
function (source_id, include_overview = TRUE, ...)
{
message("Retrieving the dataset...")
response = .basic_api(.DATASET_URL)$get(id = source_id)
if (include_overview) {
tmpfields = ldply(response$fields, function(y) {
y$summary = NULL
as.data.frame(y, stringsAsFactors=FALSE)
})
response$fields_overview = tmpfields
}
return(response)
}
| /bigml/R/getDataset.R | no_license | ingted/R-Examples | R | false | false | 820 | r | #' Retrieving a BigML Dataset
#' @export
#' @family dataset methods
#' @references \url{https://bigml.com/developers/datasets}
#' @param source_id A string giving the name of the source id.
#' @param include_overview A logical value indicating whether to provide a
#' simple data frame overview of fields.
#' @template dots
#' @template author
#' @template dataset_return
#' @template normal_methods
getDataset <-
function (source_id, include_overview = TRUE, ...)
{
message("Retrieving the dataset...")
response = .basic_api(.DATASET_URL)$get(id = source_id)
if (include_overview) {
tmpfields = ldply(response$fields, function(y) {
y$summary = NULL
as.data.frame(y, stringsAsFactors=FALSE)
})
response$fields_overview = tmpfields
}
return(response)
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
function(input, output) {
output$hist <- renderPlot( {
hist(rnorm(input$num))
})
}
| /mdm/server.R | no_license | mdmeschi72/shinyExperiment | R | false | false | 382 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
function(input, output) {
output$hist <- renderPlot( {
hist(rnorm(input$num))
})
}
|
###pie chart example
expenditure<-c("Housing","Food","Cloths","Entertainment","Other")
amount<-c(600,300,150,100,200)
##x,y
#example -1
pie(amount,expenditure)
#example -2
pie(amount,
labels=as.character(expenditure),
main="Monthly Expenditure Breakdown",
col=c("red","orange","yellow","blue","green"),
border="black",
radius = 1.3,
clockwise=TRUE
)
## barplot
max.temp <- c(22, 27, 26, 24, 23, 26, 28)
barplot(max.temp)
barplot(max.temp,
main = "Maximum Temperatures in a Week",
xlab = "Degree Celsius",
ylab = "Day",
names.arg = c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"),
col =c("red","orange","yellow","blue","green","red","red"),
horiz = FALSE
)
##categorize or group by data
age <- c(17,18,18,17,18,19,18,16,18,18)
table(age)
barplot(table(age),
main="Age Count of 10 Students",
xlab="Age",
ylab="Count",
border="red",
col="blue"
)
##example -3
Titanic
margin.table(Titanic,1) # count according to class
margin.table(Titanic,2) # count according to survival
margin.table(Titanic) # gives total count if index is not provided
barplot(margin.table(Titanic,1))
###BOX plot
airquality #dataframe
str(airquality)
boxplot(airquality$Temp)
## example -
boxplot(airquality$Temp,
main = "Mean ozone in parts per billion at Roosevelt Island",
xlab = "Parts Per Billion",
ylab = "Ozone",
col = "orange",
border = "brown",
horizontal = FALSE,
notch = TRUE
)
##EXAMPLE -2
boxplot(airquality$Ozone,airquality$Temp,airquality$Day,
main = "Multiple boxplots for comparision",
names = c("ozone", "normal", "temp"),
las = 2,
col = c("orange","red"),
border = "brown",
horizontal = FALSE,
notch = TRUE,
xlab="test"
)
#example no. 4
boxplot(airquality$Temp~airquality$Ozone,
data=airquality,
main="Different boxplots for each month",
xlab="Month Number",
ylab="Degree Fahrenheit",
col="orange",
border="brown"
)
#### histogram
str(airquality)
Temperature <- airquality$Temp
hist(Temperature)
##
# histogram with added parameters
hist(Temperature,
main="Maximum daily temperature at La Guardia Airport",
xlab="Temperature in degrees Fahrenheit",
xlim=c(50,100),
col="green",
freq=TRUE
)
##with text label
h <- hist(Temperature,ylim=c(0,40),col="darkmagenta")
text(h$mids,h$counts,labels=h$counts, adj=c(0.1, -0.1))
#non -uniform width
hist(Temperature,
main="Maximum daily temperature at La Guardia Airport",
xlab="Temperature in degrees Fahrenheit",
xlim=c(50,100),
col="chocolate",
border="brown",
breaks=c(55,60,70,75,80,100)
)
### line chart
# dummy data
set.seed(45)
df <- data.frame(x=rep(1:5, 9), val=sample(1:100, 45),
variable=rep(paste0("category", 1:9), each=5))
str(df)
##install.package(ggplot)
library(ggplot2)
# plot
ggplot(data = df, aes(x=x, y=val)) + geom_line(aes(colour=variable))
##
# Create the data for the chart.
v <- c(7,12,28,3,41)
t <- c(14,7,6,19,3)
# Plot the bar chart.
plot(v,type = "o",col = "red", xlab = "Month", ylab = "Rain fall",
main = "Rain fall chart")
lines(t, type = "o", col = "blue")
##rainbow
plot(0,0,xlim = c(-10,10),ylim = c(-10,10),type = "n")
cl <- rainbow(5)
for (i in 1:5){
lines(-10:10,runif(21,-10,10),col = cl[i],type = 'b')
}
## scatter
# Get the input values.
input <- mtcars[,c('wt','mpg')]
str(input)
# Plot the chart for cars with weight between 2.5 to 5 and mileage between 15 and 30.
plot(x = input$wt,y = input$mpg,
xlab = "Weight",
ylab = "Milage",
xlim = c(2.5,5),
ylim = c(15,30),
main = "Weight vs Milage",
breaks=c(1.1,2.1)
)
##
# Plot the matrices between 4 variables giving 12 plots.
# One variable with 3 others and total 4 variables.
pairs(~wt+mpg+disp+cyl,data = mtcars,
main = "Scatterplot Matrix")
| /Data_Visualization_5th_Jul.r | no_license | vimleshtech/rlanguage-tutorial | R | false | false | 4,050 | r | ###pie chart example
expenditure<-c("Housing","Food","Cloths","Entertainment","Other")
amount<-c(600,300,150,100,200)
##x,y
#example -1
pie(amount,expenditure)
#example -2
pie(amount,
labels=as.character(expenditure),
main="Monthly Expenditure Breakdown",
col=c("red","orange","yellow","blue","green"),
border="black",
radius = 1.3,
clockwise=TRUE
)
## barplot
max.temp <- c(22, 27, 26, 24, 23, 26, 28)
barplot(max.temp)
barplot(max.temp,
main = "Maximum Temperatures in a Week",
xlab = "Degree Celsius",
ylab = "Day",
names.arg = c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"),
col =c("red","orange","yellow","blue","green","red","red"),
horiz = FALSE
)
##categorize or group by data
age <- c(17,18,18,17,18,19,18,16,18,18)
table(age)
barplot(table(age),
main="Age Count of 10 Students",
xlab="Age",
ylab="Count",
border="red",
col="blue"
)
##example -3
Titanic
margin.table(Titanic,1) # count according to class
margin.table(Titanic,2) # count according to survival
margin.table(Titanic) # gives total count if index is not provided
barplot(margin.table(Titanic,1))
###BOX plot
airquality #dataframe
str(airquality)
boxplot(airquality$Temp)
## example -
boxplot(airquality$Temp,
main = "Mean ozone in parts per billion at Roosevelt Island",
xlab = "Parts Per Billion",
ylab = "Ozone",
col = "orange",
border = "brown",
horizontal = FALSE,
notch = TRUE
)
##EXAMPLE -2
boxplot(airquality$Ozone,airquality$Temp,airquality$Day,
main = "Multiple boxplots for comparision",
names = c("ozone", "normal", "temp"),
las = 2,
col = c("orange","red"),
border = "brown",
horizontal = FALSE,
notch = TRUE,
xlab="test"
)
#example no. 4
boxplot(airquality$Temp~airquality$Ozone,
data=airquality,
main="Different boxplots for each month",
xlab="Month Number",
ylab="Degree Fahrenheit",
col="orange",
border="brown"
)
#### histogram
str(airquality)
Temperature <- airquality$Temp
hist(Temperature)
##
# histogram with added parameters
hist(Temperature,
main="Maximum daily temperature at La Guardia Airport",
xlab="Temperature in degrees Fahrenheit",
xlim=c(50,100),
col="green",
freq=TRUE
)
##with text label
h <- hist(Temperature,ylim=c(0,40),col="darkmagenta")
text(h$mids,h$counts,labels=h$counts, adj=c(0.1, -0.1))
#non -uniform width
hist(Temperature,
main="Maximum daily temperature at La Guardia Airport",
xlab="Temperature in degrees Fahrenheit",
xlim=c(50,100),
col="chocolate",
border="brown",
breaks=c(55,60,70,75,80,100)
)
### line chart
# dummy data
set.seed(45)
df <- data.frame(x=rep(1:5, 9), val=sample(1:100, 45),
variable=rep(paste0("category", 1:9), each=5))
str(df)
##install.package(ggplot)
library(ggplot2)
# plot
ggplot(data = df, aes(x=x, y=val)) + geom_line(aes(colour=variable))
##
# Create the data for the chart.
v <- c(7,12,28,3,41)
t <- c(14,7,6,19,3)
# Plot the bar chart.
plot(v,type = "o",col = "red", xlab = "Month", ylab = "Rain fall",
main = "Rain fall chart")
lines(t, type = "o", col = "blue")
##rainbow
plot(0,0,xlim = c(-10,10),ylim = c(-10,10),type = "n")
cl <- rainbow(5)
for (i in 1:5){
lines(-10:10,runif(21,-10,10),col = cl[i],type = 'b')
}
## scatter
# Get the input values.
input <- mtcars[,c('wt','mpg')]
str(input)
# Plot the chart for cars with weight between 2.5 to 5 and mileage between 15 and 30.
plot(x = input$wt,y = input$mpg,
xlab = "Weight",
ylab = "Milage",
xlim = c(2.5,5),
ylim = c(15,30),
main = "Weight vs Milage",
breaks=c(1.1,2.1)
)
##
# Plot the matrices between 4 variables giving 12 plots.
# One variable with 3 others and total 4 variables.
pairs(~wt+mpg+disp+cyl,data = mtcars,
main = "Scatterplot Matrix")
|
\name{print.info.atomic}
\alias{print.info.atomic}
\title{Print a Column Analysis...}
\usage{\method{print}{info.atomic}(x, ...)}
\description{Print a Column Analysis}
\details{Correctly format and display the analysis of an individual column.}
\value{the object (invisibly)}
\arguments{\item{x}{a \code{column.analysis} object}
\item{...}{ignored parameters}}
| /man/print.info.atomic.Rd | no_license | zeligdev/ZeligDVN | R | false | false | 361 | rd | \name{print.info.atomic}
\alias{print.info.atomic}
\title{Print a Column Analysis...}
\usage{\method{print}{info.atomic}(x, ...)}
\description{Print a Column Analysis}
\details{Correctly format and display the analysis of an individual column.}
\value{the object (invisibly)}
\arguments{\item{x}{a \code{column.analysis} object}
\item{...}{ignored parameters}}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.8936300554598e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615845635-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 735 | r | testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.8936300554598e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
#' Make all HTTP requests return a fake response
#'
#' In this context, HTTP verb functions raise a 'message' so that test code can
#' assert that the requests are made. As in [without_internet()], the message
#' raised has a well-defined shape, made of three
#' elements, separated by space: (1) the request
#' method (e.g. "GET" or "POST"); (2) the request URL; and
#' (3) the request body, if present. The verb-expectation functions,
#' such as `expect_GET` and `expect_POST`, look for this shape.
#'
#' Unlike `without_internet`,
#' the HTTP functions do not error and halt execution, instead returning a
#' `response`-class object so that code calling the HTTP functions can
#' proceed with its response handling logic and itself be tested. The response
#' it returns echoes back most of the request itself, similar to how some
#' endpoints on \url{http://httpbin.org} do.
#'
#' @param expr Code to run inside the fake context
#' @return The result of `expr`
#' @examples
#' with_fake_http({
#' expect_GET(req1 <- httr::GET("http://example.com"), "http://example.com")
#' req1$url
#' expect_POST(
#' req2 <- httr::POST("http://example.com", body = '{"a":1}'),
#' "http://example.com"
#' )
#' httr::content(req2)
#' })
#' @export
#' @importFrom testthat expect_message
with_fake_http <- function(expr) {
old <- options(..httptest.request.errors = FALSE)
mock_perform(fake_request)
on.exit({
do.call(options, old)
stop_mocking()
})
eval.parent(expr)
}
#' Return something that looks like a 'response'
#'
#' These functions allow mocking of HTTP requests without requiring an internet
#' connection or server to run against. Their return shape is a 'httr'
#' "response" class object that should behave like a real response generated
#' by a real request.
#'
#' @param request An 'httr' `request`-class object. A character URL is also
#' accepted, for which a fake request object will be created, using the `verb`
#' argument as well.
#' @param verb Character name for the HTTP verb, if `request` is a URL. Default
#' is "GET".
#' @param status_code Integer HTTP response status
#' @param headers Optional list of additional response headers to return
#' @param content If supplied, a JSON-serializable list that will be returned
#' as response content with Content-Type: application/json. If no `content`
#' is provided, and if the `status_code` is not 204 No Content, the
#' `url` will be set as the response content with Content-Type: text/plain.
#' @return An 'httr' response class object.
#' @export
#' @importFrom jsonlite toJSON
#' @importFrom utils modifyList
fake_response <- function(request,
verb = "GET",
status_code = 200,
headers = list(),
content = NULL) {
if (is.character(request)) {
# To-be-deprecated(?) behavior of passing in a URL. Fake a request.
request <- structure(list(method = verb, url = request), class = "request")
}
# TODO: if the request says `write_disk`, should we copy the mock file to
# that location, so that that file exists?
base.headers <- list()
if (status_code == 204) {
content <- NULL
} else if (!is.raw(content)) {
if (!is.character(content)) {
# JSON it
content <- toJSON(content,
auto_unbox = TRUE, null = "null", na = "null",
force = TRUE
)
base.headers <- list(`Content-Type` = "application/json")
}
base.headers[["content-length"]] <- nchar(content)
content <- charToRaw(content)
}
headers <- modifyList(base.headers, headers)
structure(list(
url = request$url,
status_code = status_code,
times = structure(c(rep(0, 5), nchar(request$url)),
.Names = c(
"redirect", "namelookup", "connect", "pretransfer",
"starttransfer", "total"
)
),
request = request,
headers = headers,
content = content
), class = "response")
}
fake_request <- function(req, handle, refresh) {
out <- paste(req$method, req$url)
body <- request_body(req)
headers <- list(`Content-Type` = "application/json")
status_code <- ifelse(is.null(body) && req$method != "GET", 204, 200)
if (!is.null(body)) {
out <- paste(out, body)
}
message(out)
return(fake_response(req,
content = body, status_code = status_code,
headers = headers
))
}
| /R/fake-http.R | no_license | cran/httptest | R | false | false | 4,371 | r | #' Make all HTTP requests return a fake response
#'
#' In this context, HTTP verb functions raise a 'message' so that test code can
#' assert that the requests are made. As in [without_internet()], the message
#' raised has a well-defined shape, made of three
#' elements, separated by space: (1) the request
#' method (e.g. "GET" or "POST"); (2) the request URL; and
#' (3) the request body, if present. The verb-expectation functions,
#' such as `expect_GET` and `expect_POST`, look for this shape.
#'
#' Unlike `without_internet`,
#' the HTTP functions do not error and halt execution, instead returning a
#' `response`-class object so that code calling the HTTP functions can
#' proceed with its response handling logic and itself be tested. The response
#' it returns echoes back most of the request itself, similar to how some
#' endpoints on \url{http://httpbin.org} do.
#'
#' @param expr Code to run inside the fake context
#' @return The result of `expr`
#' @examples
#' with_fake_http({
#' expect_GET(req1 <- httr::GET("http://example.com"), "http://example.com")
#' req1$url
#' expect_POST(
#' req2 <- httr::POST("http://example.com", body = '{"a":1}'),
#' "http://example.com"
#' )
#' httr::content(req2)
#' })
#' @export
#' @importFrom testthat expect_message
with_fake_http <- function(expr) {
old <- options(..httptest.request.errors = FALSE)
mock_perform(fake_request)
on.exit({
do.call(options, old)
stop_mocking()
})
eval.parent(expr)
}
#' Return something that looks like a 'response'
#'
#' These functions allow mocking of HTTP requests without requiring an internet
#' connection or server to run against. Their return shape is a 'httr'
#' "response" class object that should behave like a real response generated
#' by a real request.
#'
#' @param request An 'httr' `request`-class object. A character URL is also
#' accepted, for which a fake request object will be created, using the `verb`
#' argument as well.
#' @param verb Character name for the HTTP verb, if `request` is a URL. Default
#' is "GET".
#' @param status_code Integer HTTP response status
#' @param headers Optional list of additional response headers to return
#' @param content If supplied, a JSON-serializable list that will be returned
#' as response content with Content-Type: application/json. If no `content`
#' is provided, and if the `status_code` is not 204 No Content, the
#' `url` will be set as the response content with Content-Type: text/plain.
#' @return An 'httr' response class object.
#' @export
#' @importFrom jsonlite toJSON
#' @importFrom utils modifyList
fake_response <- function(request,
verb = "GET",
status_code = 200,
headers = list(),
content = NULL) {
if (is.character(request)) {
# To-be-deprecated(?) behavior of passing in a URL. Fake a request.
request <- structure(list(method = verb, url = request), class = "request")
}
# TODO: if the request says `write_disk`, should we copy the mock file to
# that location, so that that file exists?
base.headers <- list()
if (status_code == 204) {
content <- NULL
} else if (!is.raw(content)) {
if (!is.character(content)) {
# JSON it
content <- toJSON(content,
auto_unbox = TRUE, null = "null", na = "null",
force = TRUE
)
base.headers <- list(`Content-Type` = "application/json")
}
base.headers[["content-length"]] <- nchar(content)
content <- charToRaw(content)
}
headers <- modifyList(base.headers, headers)
structure(list(
url = request$url,
status_code = status_code,
times = structure(c(rep(0, 5), nchar(request$url)),
.Names = c(
"redirect", "namelookup", "connect", "pretransfer",
"starttransfer", "total"
)
),
request = request,
headers = headers,
content = content
), class = "response")
}
fake_request <- function(req, handle, refresh) {
out <- paste(req$method, req$url)
body <- request_body(req)
headers <- list(`Content-Type` = "application/json")
status_code <- ifelse(is.null(body) && req$method != "GET", 204, 200)
if (!is.null(body)) {
out <- paste(out, body)
}
message(out)
return(fake_response(req,
content = body, status_code = status_code,
headers = headers
))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rankclust.R
\name{rankclust}
\alias{rankclust}
\title{Model-based clustering for multivariate partial ranking}
\usage{
rankclust(
data,
m = ncol(data),
K = 1,
criterion = "bic",
Qsem = 100,
Bsem = 20,
RjSE = m * (m - 1)/2,
RjM = m * (m - 1)/2,
Ql = 500,
Bl = 100,
maxTry = 3,
run = 1,
detail = FALSE
)
}
\arguments{
\item{data}{a matrix in which each row is a ranking (partial or not; for partial ranking,
missing elements must be 0 or NA. Tied are replaced by the lowest position they share). For multivariate rankings, the rankings of each dimension are
placed end to end in each row. The data must be in ranking notation (see Details or
\link{convertRank} functions).}
\item{m}{a vector composed of the sizes of the rankings of each dimension (default value is the number of column of the matrix data).}
\item{K}{an integer or a vector of integer with the number of clusters.}
\item{criterion}{criterion "bic" or "icl", criterion to minimize for selecting the number of clusters.}
\item{Qsem}{the total number of iterations for the SEM algorithm (defaut value=40).}
\item{Bsem}{burn-in period for SEM algorithm (default value=10).}
\item{RjSE}{a vector containing, for each dimension, the number of iterations of the Gibbs sampler
used both in the SE step for partial rankings and for the presentation orders generation (default value=mj(mj-1)/2).}
\item{RjM}{a vector containing, for each dimension, the number of iterations of the Gibbs sampler used in the M step (default value=mj(mj-1)/2)}
\item{Ql}{number of iterations of the Gibbs sampler
for estimation of log-likelihood (default value=100).}
\item{Bl}{burn-in period for estimation of log-likelihood (default value=50).}
\item{maxTry}{maximum number of restarts of the SEM-Gibbs algorithm in the case of non convergence (default value=3).}
\item{run}{number of runs of the algorithm for each value of K.}
\item{detail}{boolean, if TRUE, time and others informations will be print during the process (default value FALSE).}
}
\value{
An object of class rankclust (See \code{\link{Output-class}} and \code{\link{Rankclust-class}}).
If the output object is named \code{res}. You can access the result by res[number of groups]@slotName where \code{slotName} is an element of the class Output.
}
\description{
This functions estimates a clustering of ranking data, potentially multivariate, partial and containing tied, based on a mixture of multivariate ISR model [2].
By specifying only one cluster, the function performs a modelling of the ranking data using the multivariate ISR model.
The estimation is performed thanks to a SEM-Gibbs algorithm in the general case.
}
\details{
The ranks have to be given to the package in the ranking notation (see \link{convertRank} function), with the following convention:
- missing positions are replaced by 0
- tied are replaced by the lowest position they share"
The ranking representation r=(r_1,...,r_m) contains the
ranks assigned to the objects, and means that the ith
object is in r_ith position.
The ordering representation o=(o_1,...,o_m) means that object
o_i is in the ith position.
Let us consider the following example to illustrate both
notations: a judge, which has to rank three holidays
destinations according to its preferences, O1 =
Countryside, O2 =Mountain and O3 = Sea, ranks first Sea,
second Countryside, and last Mountain. The ordering
result of the judge is o = (3, 1, 2) whereas the ranking
result is r = (2, 3, 1).
}
\examples{
data(big4)
result <- rankclust(big4$data, K = 2, m = big4$m, Ql = 200, Bl = 100, maxTry = 2)
if(result@convergence)
{
summary(result)
partition <- result[2]@partition
tik <- result[2]@tik
}
}
\references{
[1] C.Biernacki and J.Jacques (2013), A generative model for rank data based on sorting algorithm, Computational Statistics and Data Analysis, 58, 162-176.
[2] J.Jacques and C.Biernacki (2012), Model-based clustering for multivariate partial ranking data, Inria Research Report n 8113.
}
\seealso{
See \code{\link{Output-class}} and \code{\link{Rankclust-class}} for available output.
}
\author{
Quentin Grimonprez
}
| /fuzzedpackages/Rankcluster/man/rankclust.Rd | no_license | akhikolla/testpackages | R | false | true | 4,216 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rankclust.R
\name{rankclust}
\alias{rankclust}
\title{Model-based clustering for multivariate partial ranking}
\usage{
rankclust(
data,
m = ncol(data),
K = 1,
criterion = "bic",
Qsem = 100,
Bsem = 20,
RjSE = m * (m - 1)/2,
RjM = m * (m - 1)/2,
Ql = 500,
Bl = 100,
maxTry = 3,
run = 1,
detail = FALSE
)
}
\arguments{
\item{data}{a matrix in which each row is a ranking (partial or not; for partial ranking,
missing elements must be 0 or NA. Tied are replaced by the lowest position they share). For multivariate rankings, the rankings of each dimension are
placed end to end in each row. The data must be in ranking notation (see Details or
\link{convertRank} functions).}
\item{m}{a vector composed of the sizes of the rankings of each dimension (default value is the number of column of the matrix data).}
\item{K}{an integer or a vector of integer with the number of clusters.}
\item{criterion}{criterion "bic" or "icl", criterion to minimize for selecting the number of clusters.}
\item{Qsem}{the total number of iterations for the SEM algorithm (defaut value=40).}
\item{Bsem}{burn-in period for SEM algorithm (default value=10).}
\item{RjSE}{a vector containing, for each dimension, the number of iterations of the Gibbs sampler
used both in the SE step for partial rankings and for the presentation orders generation (default value=mj(mj-1)/2).}
\item{RjM}{a vector containing, for each dimension, the number of iterations of the Gibbs sampler used in the M step (default value=mj(mj-1)/2)}
\item{Ql}{number of iterations of the Gibbs sampler
for estimation of log-likelihood (default value=100).}
\item{Bl}{burn-in period for estimation of log-likelihood (default value=50).}
\item{maxTry}{maximum number of restarts of the SEM-Gibbs algorithm in the case of non convergence (default value=3).}
\item{run}{number of runs of the algorithm for each value of K.}
\item{detail}{boolean, if TRUE, time and others informations will be print during the process (default value FALSE).}
}
\value{
An object of class rankclust (See \code{\link{Output-class}} and \code{\link{Rankclust-class}}).
If the output object is named \code{res}. You can access the result by res[number of groups]@slotName where \code{slotName} is an element of the class Output.
}
\description{
This functions estimates a clustering of ranking data, potentially multivariate, partial and containing tied, based on a mixture of multivariate ISR model [2].
By specifying only one cluster, the function performs a modelling of the ranking data using the multivariate ISR model.
The estimation is performed thanks to a SEM-Gibbs algorithm in the general case.
}
\details{
The ranks have to be given to the package in the ranking notation (see \link{convertRank} function), with the following convention:
- missing positions are replaced by 0
- tied are replaced by the lowest position they share"
The ranking representation r=(r_1,...,r_m) contains the
ranks assigned to the objects, and means that the ith
object is in r_ith position.
The ordering representation o=(o_1,...,o_m) means that object
o_i is in the ith position.
Let us consider the following example to illustrate both
notations: a judge, which has to rank three holidays
destinations according to its preferences, O1 =
Countryside, O2 =Mountain and O3 = Sea, ranks first Sea,
second Countryside, and last Mountain. The ordering
result of the judge is o = (3, 1, 2) whereas the ranking
result is r = (2, 3, 1).
}
\examples{
data(big4)
result <- rankclust(big4$data, K = 2, m = big4$m, Ql = 200, Bl = 100, maxTry = 2)
if(result@convergence)
{
summary(result)
partition <- result[2]@partition
tik <- result[2]@tik
}
}
\references{
[1] C.Biernacki and J.Jacques (2013), A generative model for rank data based on sorting algorithm, Computational Statistics and Data Analysis, 58, 162-176.
[2] J.Jacques and C.Biernacki (2012), Model-based clustering for multivariate partial ranking data, Inria Research Report n 8113.
}
\seealso{
See \code{\link{Output-class}} and \code{\link{Rankclust-class}} for available output.
}
\author{
Quentin Grimonprez
}
|
# Loading provided datasets - loading from local machine
NEI <- readRDS("~/Exploratory_Data_Analysis/Assignment_2/summarySCC_PM25.rds")
SCC <- readRDS("~/Exploratory_Data_Analysis/Assignment_2/Source_Classification_Code.rds")
# Sampling
NEI_sampling <- NEI[sample(nrow(NEI), size=5000, replace=F), ]
# Subset data and append two years in one data frame
MD <- subset(NEI, fips=='24510')
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510")
# from 1999 to 2008? Use the base plotting system to make a plot answering this question.
# Generate the graph in the same directory as the source code
png(filename='~/Exploratory_Data_Analysis/Assignment_2/plot2.png')
barplot(tapply(X=MD$Emissions, INDEX=MD$year, FUN=sum),
main='Total Emission in Baltimore City, MD',
xlab='Year', ylab=expression('PM'[2.5]))
dev.off() | /Assignment2/plot2.R | no_license | yewwah/Exploratory-Data-Analysis | R | false | false | 877 | r |
# Loading provided datasets - loading from local machine
NEI <- readRDS("~/Exploratory_Data_Analysis/Assignment_2/summarySCC_PM25.rds")
SCC <- readRDS("~/Exploratory_Data_Analysis/Assignment_2/Source_Classification_Code.rds")
# Sampling
NEI_sampling <- NEI[sample(nrow(NEI), size=5000, replace=F), ]
# Subset data and append two years in one data frame
MD <- subset(NEI, fips=='24510')
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510")
# from 1999 to 2008? Use the base plotting system to make a plot answering this question.
# Generate the graph in the same directory as the source code
png(filename='~/Exploratory_Data_Analysis/Assignment_2/plot2.png')
barplot(tapply(X=MD$Emissions, INDEX=MD$year, FUN=sum),
main='Total Emission in Baltimore City, MD',
xlab='Year', ylab=expression('PM'[2.5]))
dev.off() |
require(tidyverse)
require(shiny)
require(ggplot2)
require(plotly)
require(viridis)
require(hrbrthemes)
require(gtools)
require(shinythemes)
require(shinyWidgets)
require(shinymaterial)
require(DT)
scores_df <- read.csv("data:/scoresspread.csv")
team_input <- selectInput(
inputId = "team_input",
choices = sort(unique(scores_df$team_home)),
label = "Select Team"
)
week_input <- selectInput(
inputId = "week_input",
choices = mixedsort(unique(scores_df$schedule_week)),
label = "Week",
multiple = TRUE
)
year_input <- selectInput(
inputId = "year_input",
choices = sort(unique(scores_df$schedule_season)),
label = "Season"
)
year_input2 <- selectInput(
inputId = "year_input2",
choices = scores_df %>% filter(schedule_season > 1978) %>% select(schedule_season) %>% unique() %>% arrange() %>% map_df(rev),
label = "Season"
)
playoff_input <- prettyRadioButtons(
inputId = "playoff_input",
choices = list("True" = TRUE, "False" = FALSE),
selected = FALSE,
label = "Include Playoffs",
animation = "smooth",
shape = "curve",
inline = TRUE
)
trend_input <- selectInput(
inputId = "trend_input",
choices = c("Overall O/U Line", "Overall Total Points", "Season O/U Line", "Season Total Points"),
multiple = TRUE,
label = "Include Trend Line",
)
opacitybp_input <- sliderInput(
inputId = "opacitybp_input",
min = 0,
max = 1.0,
value = 0.1,
step = 0.01,
label = "Boxplot Opacity",
)
# Introduction Page
page_one <- tabPanel(
"Introduction",
style = "margin-top: -20px",
icon = icon("i", "fa-info-circle"),
titlePanel("Introduction"),
wellPanel(style = "margin-top: 21px",
fluidRow(style = "padding: 30px; margin-top: -20px; margin-bottom: -20px", tags$head(tags$style("#container * { display: inline; }")),
div(id="container",
h3("Domain"),
br(),
br(),
p("We chose to look at sports betting in the NFL, which is the practice of placing wagers on football players and games. There is a wide range in the level of participation for sports betting: from casual fans to serious money-making fanatics. Regardless of motivations, all sports bets follow three simple steps: selection, stakes, and odds. The most popular types of sports bets are on point spreads and totals, where people often use statistics to decide on their bet and then wait for the game to be played to see the outcome. Betting is always a gamble, and sports betting involves necessary risk. Sports betting often happens online, which is where this project is focusing. We choose this domain because of a shared interest in sports, and curiosity about the world of football betting. This project will examine trends in National Football League (NFL) betting, especially how the COVID 19 pandemic has affected football betting and team play."),
br(),
br(),
br(),
img(src = 'betting_infographic.PNG', alt = "US sports betting infographic", height="60%", width="60%", style = "display: block; margin-left: auto; margin-right: auto;"),
tags$div(style = "text-align: center; font-size: 10px; display: block;", tags$em("This infographic, ", tags$a(href="https://www.economist.com/graphic-detail/2019/12/29/as-much-as-26bn-could-be-gambled-on-american-sport-in-2020", "by the Economist"), ", shows which states have legalized sports gambling on the map and the bar chart on the side shows the rapid growth of legal sports bets in the last few years.")),
br(),
br(),
h3("Key Terms"),
br(),
br(),
p(strong("Spread"), "- the expected point margin by which a team will win or lose by. In other words, how much a team is favored by. A ",
em("negative"), " spread implies the team is favored by that amount. A ",
em("positive")," spread implies the team is the underdog by that amount." ),
br(),
p(strong("Cover the spread"), "- the underdog/favored team was able to win/lose by the certain threshold. An underdog team can either lose by the number of points set in the spread or less and cover the spread. In other words, the underdog is given a handicap. Moreover, the favored team ",
em("has"), " to win by the given spread or they will not cover the spread."),
br(),
p(strong("Over Under"), "- the expected total number of points scored by both teams in the given game. People can either bet on the over (going to go over the expected total) or the under (going under the expected total)."),
br(),
br(),
p(em("E.g."), "the super bowl between the Chiefs and the Buccaneers this year (Super Bowl 55), had a spread of Chiefs -3.5 and an over/under of 57.7. This means that the Chiefs were favored by 3.5 points, and they had to win by 3.5 points or else they wouldn't cover the spread. The Buccaneers on the other hand, could lose by less than 3.5 points or just win to cover the spread. The Buccaneers came out on top 31-9, so Chiefs didn't cover and the game went under since the total points scored was under 57.7."),
br(),
br(),
h3("Summary Information"),
br(),
br(),
p("Our dataset includes ", textOutput("num_obs"), " observations that contain values in the favorite team and spread favorite columns. We specifically chose to keep the observations with these variables since we wanted to study the trend of the betting. This turned out to be around ",
textOutput("num_unique_years"), "seasons worth of data with ",
textOutput("num_different_teams"), " different teams. Thus, ignoring the rows without spread data, the smallest spread turned out to be ", textOutput("favorite_min_spread"), " , which which is also considered a 50/50 game or a tossup. Our biggest spread turned out to be ", textOutput("favorite_max_spread"), " which is considered to be a very one sided game. However,our mean turned out to be ", textOutput("favorite_spread_mean"), " which was much closer to a 50/50 game than a one sided game. We also found that the proportion of home teams that were favored was ", textOutput("prop_home_favorite"), " and of the home favorites we found the proportion to cover the spread to be ", textOutput("prop_home_favorite_cover") )
)
),
)
)
# Betting Accuracy Page
page_two <- tabPanel(
"Betting Accuracy",
style = "margin-top: -20px",
icon = icon("i", "fa-bullseye"),
titlePanel("Betting Accuracy"),
sidebarLayout(
sidebarPanel(style = "margin-top: 10px",
h4("A key aspect of sports betting is making predictions on who will win the game. In this section, we examine how those predictions stacked up with actual game outcomes."),
p("This first plot is intended to show the relationship between the average percent accuracy of projected winners actually winning over the seasons. For the first couple years of data, there are not many games recorded which results in outliers where the percent accuracy is either 100% or 0%. From the year 1980 and beyond, we can see a general trend represented with the red trend line of a percent accuracy around 60%. This means that for each season, where there are multiple games played, the average accuracy of the projected favorite team winning is about 60%. This plot is answering the question of whether the accuracy of projected favorites changed for the 2020 season due to the COVID 19 pandemic and an adjusted season. In the plot, we can see that there was no significant change in the accuracy of predicting winners, as the point for 2020 percent accuracy lies within the expected values represented by the shadow of the trend line."),
br(),
p("This second plot allows you to choose a team and a season year to see
how well bettors are able to predict game outcomes for home games. *Tip:
more recent seasons have more data!"),
team_input,
year_input
),
mainPanel(
wellPanel(style = "margin-top: 10px; padding: 6px",
"The Average Percent Accuracy of Projected Favorites by Season",
plotlyOutput("bets_accuracy_plot")
),
wellPanel(style = "margin-top: 10px; padding: 6px",
"A Team's Percent Accuracy in a Given Season",
plotlyOutput("team_season_accuracy")
)
)
)
)
# Uncertainty in the Over/Under Lines Page
page_three <- tabPanel(
"Uncertainty in the Over/Under Lines",
style = "margin-top: -20px",
icon = icon("i", "fa-question"),
titlePanel("Uncertainty in the Over/Under Lines"),
sidebarLayout(
sidebarPanel(style = "margin-top: 10px",
h4("Though the spread of games can tell us the uncertainy and favorites of games, Over/Under lines can tell us more about what the odd makers are seeing."),
p("In the unprecedented 2020 NFL season, we saw games being played with no fans to minimal team interaction before the season. This was a season never like before, and it proved to show in the Over/Under lines of games. In this plot, we are able to observe the distribution of the Over/Under lines of each week throughout the given season. In addition, we are able to compare the overall averages of Over/Under lines and total points scored, and the season averages. I included this to show how the trends might differ or be similar."),
br(),
div(style="display: inline-block;vertical-align:top; width: 150px;", year_input2),
div(style="display: inline-block;vertical-align:top; width: 50px;",HTML("<br>")),
div(style="display: inline-block;vertical-align:top; width: 150px;", playoff_input),
trend_input, chooseSliderSkin(
skin = "Flat",
color = "#1C2833"
), opacitybp_input
),
mainPanel(style = "margin-top: 10px",
wellPanel(style = "padding: 6px",
"Distribution of Over/Under Lines for Every Week of the Season",
plotlyOutput("ou_boxplot")
),
wellPanel("We are able to see that in many cases the set Over/Under Lines was much lower compared to the total points scored. However, it is a common trend that oddsmakers increase the line as we see total points scored increase. Especially in the 2020 season, we can see that the average total scored points appears to be higher than the Over/Under Line for the first couple of weeks. It dipped for a bit after that but took a bit of a bounce-back after week 10. Overall, we do see that points are being scored at an increasing rate, causing the Over/Under Lines to go up as well. For the past decade or so, we have seen the average total points scored deviate away from the average calculated across all seasons."
)
)
)
)
# Weeks & Seasons Average Spread Page
page_four <- tabPanel(
"Weeks & Seasons Average Spread",
style = "margin-top: -20px",
icon = icon("i", "fa-arrows-alt-h"),
titlePanel("Weeks & Seasons Average Spread"),
sidebarLayout(
sidebarPanel(style = "margin-top: 10px", week_input,
p("As we mentioned on an introduction page, spread is the expected point margin by which a team will win or lose by. Point spread bets are a popular type of sports bet that you can make. Point spread bets is also mostly likely to be a big part of your winning betting strategy. Matter of fact, many successful professional sports bettors use the point spread bets stategy to make up their winning stategy. As the world was hit with COVID 19, I wanted to see if it would affect the average spread in the NFL sport betting at all. It turns out out that COVID 19 does not affect the average spread. However, You can still utilize this plot by studying the trend of the spread and once you can pin out the pattern, you'll have a higher chance of spotting out the value and pick out the winners by the amount point spread more confidently."),
),
mainPanel(style = "margin-top: 10px",
wellPanel(style = "padding: 6px",
plotlyOutput("week_spreadplot")
)
)
)
)
# Conclusion Page
page_five <- tabPanel(
"Finishing Thoughts",
style = "margin-top: -20px",
icon = icon("i", "fa-check-square"),
titlePanel("Finishing Thoughts"),
wellPanel(style = "margin-top: 21px",
h4("After completing this project, we have come up with important insights for people who are interested in betting on NFL games. Using knowledge gained from this project, potential bettors are better equipped to make smart predictions about who will win a game."),
h4(
"All in all, the pandemic resulted in uncertainty before the start of the season, but that uncertainty chipped away as the season progressed. Linemakers were able to adapt to the high scoring play-style of the teams over this past season. Now, line makers now have data from a season under a pandemic they can use to set better lines. Even for bettors, they will have the data need to make smarter and safer bets for future seasons.",
em("(Hover over each title to get each of our reflections.)"))
),
fluidRow(
column(4,
tags$div(HTML(
"<div class=flip-card>
<div class=flip-card-inner>
<div class=flip-card-front>
<h2 style=text-align:center;vertical-align:middle>Betting Accuracy</h2>
</div>
<div class=flip-card-back>
<i style=text-align:center>Maddie: I analyzed how the accuracy of projected favorites changed in the 2020 season. I explore NFL betting accuracy which has consistently been around 60% accurate for the more recent seasons that we have a complete set of data for where all season's games are reported. Looking specifically at the 2020 season, we can see that here was no significant change in the accuracy of predicting winners, as it is around 63% accurate. Knowing that the 2020 NFL season was shaken up due to the COVID 19 pandemic, it is suprising that sports bets continue to be relatively accurate.</i>
</div>
</div>
</div>"
))),
column(4,
tags$div(HTML(
"<div class=flip-card>
<div class=flip-card-inner>
<div class=flip-card-front>
<h2 style=text-align:center>Uncertainty in the Over/Under Lines</h2>
</div>
<div class=flip-card-back>
<i style=text-align:center; padding:30px>Kobe: It was interesting to see how the Over/Under lines differed amongst different seasons. We saw odds makers didn't take into account more factors to why offenses would start scoring more in the 2020 season as we saw totals going way over the line in the first few weeks. Odd makers definitely had a very tough time making lines in this unprecedented season, the first of its kind. However, I think they will have much better lines for the 2021 season as they were able to collect data for one season through a pandemic. Overall, we observed a trend of uncertainty before the start of the season, as Over/Under Lines and total scored points in the first few weeks of 2020 had wide margins compared to the other seasons.</i>
</div>
</div>
</div>"
))),
column(4,
tags$div(HTML(
"<div class=flip-card>
<div class=flip-card-inner>
<div class=flip-card-front>
<h2>Weeks & Seasons Average Spread</h2>
</div>
<div class=flip-card-back>
<i>Bryan: I analyzed how Covid 19 would affect the average spread. In interactive Visuals part 3, I explore the changes in average spread per week of each NFL seasons. From analyzing the trend of average of all the seasons before 2020 and comparing it with the 2020 NFL season, I see that there was no correlation between Covid 19 and the average spread. The average spread still moved in a normal trend. From what I see, the average spread from week 1 to week 18 from the year 2015 to 2020. The spread average never exceed or fall by 1.5 points each following year which was unique and cool to see. </i>
</div>
</div>
</div>"
)))
),
br(),
fluidRow(
column(5,
wellPanel(
dataTableOutput("aggragate_table")
)
),
column(5,
wellPanel(style = "width: 790px; height: 195px; padding: 4px",
p("Interested in placing bets? Check out this table showing each team's average score during a home game. This will be helpful to new bettors since for each NFL game the oddsmakers set a number of points in which the favorite team is favored by. By making this table it allows the bettors to see the average scores of the favorite home team and either choose for the favored team to win by more than the number of points set, or bet on the underdogs to lose by less than the number of points, it can also work the other way around as well. Therefore by figuring out the favorite team and the average score of the hometeam, it'll give us a better chance of the prediction on how much points that the bettor's team will win or lose by.",
br(),
br(),
em("Click on the image below to get a list of betting sites available.")),
),
wellPanel(style = "width: 790px; height: 320px; padding: 6px",
a(href ="https://www.oddsshark.com/nfl/sites", img(src = 'ball_money.png', alt = "Football Showered with Money", style = "width: 775px; height: 305px;"), style = "text-align: center; font-size: 10px; display: block;")
)
)
)
)
# Putting everything together
ui <- fluidPage(
navbarPage(
theme = shinytheme('flatly'),
tags$div(tags$img(src='nfllogo.png', width = 29, height = 40, style="float:left; margin-left: 5px; margin-right: 10px; margin-top: -10px"), includeCSS("styles.css")),
page_one,
page_two,
page_three,
page_four,
page_five
)
) | /shiny:/ui.R | no_license | mneils-9/ac-group3-final-project | R | false | false | 19,089 | r | require(tidyverse)
require(shiny)
require(ggplot2)
require(plotly)
require(viridis)
require(hrbrthemes)
require(gtools)
require(shinythemes)
require(shinyWidgets)
require(shinymaterial)
require(DT)
scores_df <- read.csv("data:/scoresspread.csv")
team_input <- selectInput(
inputId = "team_input",
choices = sort(unique(scores_df$team_home)),
label = "Select Team"
)
week_input <- selectInput(
inputId = "week_input",
choices = mixedsort(unique(scores_df$schedule_week)),
label = "Week",
multiple = TRUE
)
year_input <- selectInput(
inputId = "year_input",
choices = sort(unique(scores_df$schedule_season)),
label = "Season"
)
year_input2 <- selectInput(
inputId = "year_input2",
choices = scores_df %>% filter(schedule_season > 1978) %>% select(schedule_season) %>% unique() %>% arrange() %>% map_df(rev),
label = "Season"
)
playoff_input <- prettyRadioButtons(
inputId = "playoff_input",
choices = list("True" = TRUE, "False" = FALSE),
selected = FALSE,
label = "Include Playoffs",
animation = "smooth",
shape = "curve",
inline = TRUE
)
trend_input <- selectInput(
inputId = "trend_input",
choices = c("Overall O/U Line", "Overall Total Points", "Season O/U Line", "Season Total Points"),
multiple = TRUE,
label = "Include Trend Line",
)
opacitybp_input <- sliderInput(
inputId = "opacitybp_input",
min = 0,
max = 1.0,
value = 0.1,
step = 0.01,
label = "Boxplot Opacity",
)
# Introduction Page
page_one <- tabPanel(
"Introduction",
style = "margin-top: -20px",
icon = icon("i", "fa-info-circle"),
titlePanel("Introduction"),
wellPanel(style = "margin-top: 21px",
fluidRow(style = "padding: 30px; margin-top: -20px; margin-bottom: -20px", tags$head(tags$style("#container * { display: inline; }")),
div(id="container",
h3("Domain"),
br(),
br(),
p("We chose to look at sports betting in the NFL, which is the practice of placing wagers on football players and games. There is a wide range in the level of participation for sports betting: from casual fans to serious money-making fanatics. Regardless of motivations, all sports bets follow three simple steps: selection, stakes, and odds. The most popular types of sports bets are on point spreads and totals, where people often use statistics to decide on their bet and then wait for the game to be played to see the outcome. Betting is always a gamble, and sports betting involves necessary risk. Sports betting often happens online, which is where this project is focusing. We choose this domain because of a shared interest in sports, and curiosity about the world of football betting. This project will examine trends in National Football League (NFL) betting, especially how the COVID 19 pandemic has affected football betting and team play."),
br(),
br(),
br(),
img(src = 'betting_infographic.PNG', alt = "US sports betting infographic", height="60%", width="60%", style = "display: block; margin-left: auto; margin-right: auto;"),
tags$div(style = "text-align: center; font-size: 10px; display: block;", tags$em("This infographic, ", tags$a(href="https://www.economist.com/graphic-detail/2019/12/29/as-much-as-26bn-could-be-gambled-on-american-sport-in-2020", "by the Economist"), ", shows which states have legalized sports gambling on the map and the bar chart on the side shows the rapid growth of legal sports bets in the last few years.")),
br(),
br(),
h3("Key Terms"),
br(),
br(),
p(strong("Spread"), "- the expected point margin by which a team will win or lose by. In other words, how much a team is favored by. A ",
em("negative"), " spread implies the team is favored by that amount. A ",
em("positive")," spread implies the team is the underdog by that amount." ),
br(),
p(strong("Cover the spread"), "- the underdog/favored team was able to win/lose by the certain threshold. An underdog team can either lose by the number of points set in the spread or less and cover the spread. In other words, the underdog is given a handicap. Moreover, the favored team ",
em("has"), " to win by the given spread or they will not cover the spread."),
br(),
p(strong("Over Under"), "- the expected total number of points scored by both teams in the given game. People can either bet on the over (going to go over the expected total) or the under (going under the expected total)."),
br(),
br(),
p(em("E.g."), "the super bowl between the Chiefs and the Buccaneers this year (Super Bowl 55), had a spread of Chiefs -3.5 and an over/under of 57.7. This means that the Chiefs were favored by 3.5 points, and they had to win by 3.5 points or else they wouldn't cover the spread. The Buccaneers on the other hand, could lose by less than 3.5 points or just win to cover the spread. The Buccaneers came out on top 31-9, so Chiefs didn't cover and the game went under since the total points scored was under 57.7."),
br(),
br(),
h3("Summary Information"),
br(),
br(),
p("Our dataset includes ", textOutput("num_obs"), " observations that contain values in the favorite team and spread favorite columns. We specifically chose to keep the observations with these variables since we wanted to study the trend of the betting. This turned out to be around ",
textOutput("num_unique_years"), "seasons worth of data with ",
textOutput("num_different_teams"), " different teams. Thus, ignoring the rows without spread data, the smallest spread turned out to be ", textOutput("favorite_min_spread"), " , which which is also considered a 50/50 game or a tossup. Our biggest spread turned out to be ", textOutput("favorite_max_spread"), " which is considered to be a very one sided game. However,our mean turned out to be ", textOutput("favorite_spread_mean"), " which was much closer to a 50/50 game than a one sided game. We also found that the proportion of home teams that were favored was ", textOutput("prop_home_favorite"), " and of the home favorites we found the proportion to cover the spread to be ", textOutput("prop_home_favorite_cover") )
)
),
)
)
# Betting Accuracy Page
page_two <- tabPanel(
"Betting Accuracy",
style = "margin-top: -20px",
icon = icon("i", "fa-bullseye"),
titlePanel("Betting Accuracy"),
sidebarLayout(
sidebarPanel(style = "margin-top: 10px",
h4("A key aspect of sports betting is making predictions on who will win the game. In this section, we examine how those predictions stacked up with actual game outcomes."),
p("This first plot is intended to show the relationship between the average percent accuracy of projected winners actually winning over the seasons. For the first couple years of data, there are not many games recorded which results in outliers where the percent accuracy is either 100% or 0%. From the year 1980 and beyond, we can see a general trend represented with the red trend line of a percent accuracy around 60%. This means that for each season, where there are multiple games played, the average accuracy of the projected favorite team winning is about 60%. This plot is answering the question of whether the accuracy of projected favorites changed for the 2020 season due to the COVID 19 pandemic and an adjusted season. In the plot, we can see that there was no significant change in the accuracy of predicting winners, as the point for 2020 percent accuracy lies within the expected values represented by the shadow of the trend line."),
br(),
p("This second plot allows you to choose a team and a season year to see
how well bettors are able to predict game outcomes for home games. *Tip:
more recent seasons have more data!"),
team_input,
year_input
),
mainPanel(
wellPanel(style = "margin-top: 10px; padding: 6px",
"The Average Percent Accuracy of Projected Favorites by Season",
plotlyOutput("bets_accuracy_plot")
),
wellPanel(style = "margin-top: 10px; padding: 6px",
"A Team's Percent Accuracy in a Given Season",
plotlyOutput("team_season_accuracy")
)
)
)
)
# Uncertainty in the Over/Under Lines Page
page_three <- tabPanel(
"Uncertainty in the Over/Under Lines",
style = "margin-top: -20px",
icon = icon("i", "fa-question"),
titlePanel("Uncertainty in the Over/Under Lines"),
sidebarLayout(
sidebarPanel(style = "margin-top: 10px",
h4("Though the spread of games can tell us the uncertainy and favorites of games, Over/Under lines can tell us more about what the odd makers are seeing."),
p("In the unprecedented 2020 NFL season, we saw games being played with no fans to minimal team interaction before the season. This was a season never like before, and it proved to show in the Over/Under lines of games. In this plot, we are able to observe the distribution of the Over/Under lines of each week throughout the given season. In addition, we are able to compare the overall averages of Over/Under lines and total points scored, and the season averages. I included this to show how the trends might differ or be similar."),
br(),
div(style="display: inline-block;vertical-align:top; width: 150px;", year_input2),
div(style="display: inline-block;vertical-align:top; width: 50px;",HTML("<br>")),
div(style="display: inline-block;vertical-align:top; width: 150px;", playoff_input),
trend_input, chooseSliderSkin(
skin = "Flat",
color = "#1C2833"
), opacitybp_input
),
mainPanel(style = "margin-top: 10px",
wellPanel(style = "padding: 6px",
"Distribution of Over/Under Lines for Every Week of the Season",
plotlyOutput("ou_boxplot")
),
wellPanel("We are able to see that in many cases the set Over/Under Lines was much lower compared to the total points scored. However, it is a common trend that oddsmakers increase the line as we see total points scored increase. Especially in the 2020 season, we can see that the average total scored points appears to be higher than the Over/Under Line for the first couple of weeks. It dipped for a bit after that but took a bit of a bounce-back after week 10. Overall, we do see that points are being scored at an increasing rate, causing the Over/Under Lines to go up as well. For the past decade or so, we have seen the average total points scored deviate away from the average calculated across all seasons."
)
)
)
)
# Weeks & Seasons Average Spread Page
page_four <- tabPanel(
"Weeks & Seasons Average Spread",
style = "margin-top: -20px",
icon = icon("i", "fa-arrows-alt-h"),
titlePanel("Weeks & Seasons Average Spread"),
sidebarLayout(
sidebarPanel(style = "margin-top: 10px", week_input,
p("As we mentioned on an introduction page, spread is the expected point margin by which a team will win or lose by. Point spread bets are a popular type of sports bet that you can make. Point spread bets is also mostly likely to be a big part of your winning betting strategy. Matter of fact, many successful professional sports bettors use the point spread bets stategy to make up their winning stategy. As the world was hit with COVID 19, I wanted to see if it would affect the average spread in the NFL sport betting at all. It turns out out that COVID 19 does not affect the average spread. However, You can still utilize this plot by studying the trend of the spread and once you can pin out the pattern, you'll have a higher chance of spotting out the value and pick out the winners by the amount point spread more confidently."),
),
mainPanel(style = "margin-top: 10px",
wellPanel(style = "padding: 6px",
plotlyOutput("week_spreadplot")
)
)
)
)
# Conclusion Page
page_five <- tabPanel(
"Finishing Thoughts",
style = "margin-top: -20px",
icon = icon("i", "fa-check-square"),
titlePanel("Finishing Thoughts"),
wellPanel(style = "margin-top: 21px",
h4("After completing this project, we have come up with important insights for people who are interested in betting on NFL games. Using knowledge gained from this project, potential bettors are better equipped to make smart predictions about who will win a game."),
h4(
"All in all, the pandemic resulted in uncertainty before the start of the season, but that uncertainty chipped away as the season progressed. Linemakers were able to adapt to the high scoring play-style of the teams over this past season. Now, line makers now have data from a season under a pandemic they can use to set better lines. Even for bettors, they will have the data need to make smarter and safer bets for future seasons.",
em("(Hover over each title to get each of our reflections.)"))
),
fluidRow(
column(4,
tags$div(HTML(
"<div class=flip-card>
<div class=flip-card-inner>
<div class=flip-card-front>
<h2 style=text-align:center;vertical-align:middle>Betting Accuracy</h2>
</div>
<div class=flip-card-back>
<i style=text-align:center>Maddie: I analyzed how the accuracy of projected favorites changed in the 2020 season. I explore NFL betting accuracy which has consistently been around 60% accurate for the more recent seasons that we have a complete set of data for where all season's games are reported. Looking specifically at the 2020 season, we can see that here was no significant change in the accuracy of predicting winners, as it is around 63% accurate. Knowing that the 2020 NFL season was shaken up due to the COVID 19 pandemic, it is suprising that sports bets continue to be relatively accurate.</i>
</div>
</div>
</div>"
))),
column(4,
tags$div(HTML(
"<div class=flip-card>
<div class=flip-card-inner>
<div class=flip-card-front>
<h2 style=text-align:center>Uncertainty in the Over/Under Lines</h2>
</div>
<div class=flip-card-back>
<i style=text-align:center; padding:30px>Kobe: It was interesting to see how the Over/Under lines differed amongst different seasons. We saw odds makers didn't take into account more factors to why offenses would start scoring more in the 2020 season as we saw totals going way over the line in the first few weeks. Odd makers definitely had a very tough time making lines in this unprecedented season, the first of its kind. However, I think they will have much better lines for the 2021 season as they were able to collect data for one season through a pandemic. Overall, we observed a trend of uncertainty before the start of the season, as Over/Under Lines and total scored points in the first few weeks of 2020 had wide margins compared to the other seasons.</i>
</div>
</div>
</div>"
))),
column(4,
tags$div(HTML(
"<div class=flip-card>
<div class=flip-card-inner>
<div class=flip-card-front>
<h2>Weeks & Seasons Average Spread</h2>
</div>
<div class=flip-card-back>
<i>Bryan: I analyzed how Covid 19 would affect the average spread. In interactive Visuals part 3, I explore the changes in average spread per week of each NFL seasons. From analyzing the trend of average of all the seasons before 2020 and comparing it with the 2020 NFL season, I see that there was no correlation between Covid 19 and the average spread. The average spread still moved in a normal trend. From what I see, the average spread from week 1 to week 18 from the year 2015 to 2020. The spread average never exceed or fall by 1.5 points each following year which was unique and cool to see. </i>
</div>
</div>
</div>"
)))
),
br(),
fluidRow(
column(5,
wellPanel(
dataTableOutput("aggragate_table")
)
),
column(5,
wellPanel(style = "width: 790px; height: 195px; padding: 4px",
p("Interested in placing bets? Check out this table showing each team's average score during a home game. This will be helpful to new bettors since for each NFL game the oddsmakers set a number of points in which the favorite team is favored by. By making this table it allows the bettors to see the average scores of the favorite home team and either choose for the favored team to win by more than the number of points set, or bet on the underdogs to lose by less than the number of points, it can also work the other way around as well. Therefore by figuring out the favorite team and the average score of the hometeam, it'll give us a better chance of the prediction on how much points that the bettor's team will win or lose by.",
br(),
br(),
em("Click on the image below to get a list of betting sites available.")),
),
wellPanel(style = "width: 790px; height: 320px; padding: 6px",
a(href ="https://www.oddsshark.com/nfl/sites", img(src = 'ball_money.png', alt = "Football Showered with Money", style = "width: 775px; height: 305px;"), style = "text-align: center; font-size: 10px; display: block;")
)
)
)
)
# Putting everything together
ui <- fluidPage(
navbarPage(
theme = shinytheme('flatly'),
tags$div(tags$img(src='nfllogo.png', width = 29, height = 40, style="float:left; margin-left: 5px; margin-right: 10px; margin-top: -10px"), includeCSS("styles.css")),
page_one,
page_two,
page_three,
page_four,
page_five
)
) |
use_gitlab_ci <- function(path = ".",
force = is_force(),
ignore = TRUE) {
pkg <- as.package(path)
use_template("dot_gitlab-ci.yml", ".gitlab-ci.yml", ignore = ignore,
force = force, pkg = pkg)
use_directory(".gitlab-ci", ignore = TRUE, pkg = pkg)
use_template(file.path("dot_gitlab-ci", "gitlab-com.R"),
file.path(".gitlab-ci", "gitlab-com.R"),
ignore = ignore, force = force, pkg = pkg)
return(invisible(NULL))
}
is_check <- function(x) {
is_check_stage <- identical(getElement(x, "stage"), "check")
is_check_job <- identical(getElement(x, "name"), "packager")
is_check <- is_check_stage && is_check_job
return(is_check)
}
is_my_name <- function(x, name) return(tolower(getElement(x, "name")) == name)
#' Read a \verb{gitlab} Check Log
#'
#' For a given user's project, the last log for jobs for name and stage "check"
#' will be read. This is assumed to be the output of \command{R CMD check},
#' \code{\link[rcmdcheck:rcmdcheck]{rcmdcheck::rcmdcheck}}
#' \code{\link[devtools:check]{devtools::check}}, or the like.
#' @param user The user's name on \verb{gitlab}.
#' @param project The project's name on \verb{gitlab}.
#' @param private_token The user's private token on \verb{gitlab}.
#' @param ... Arguments passed to \code{\link[httr:GET]{httr::GET}}.
#' @return A character vector containing the lines of the \verb{gitlab} log.
#' @keywords internal
#' @export
#' @examples
#' \dontrun{
#' gitlab_token <- readLines(file.path("~", ".gitlab_private_token.txt"))
#' if (Sys.info()[["nodename"]] == "fvafrdebianCU") {
#' j <- get_gitlab_log(user = "fvafrcu", project = "packager",
#' private_token = gitlab_token,
#' httr::use_proxy("10.127.255.17", 8080))
#' } else {
#' j <- get_gitlab_log(user = "fvafrcu", project = "packager",
#' private_token = gitlab_token)
#' }
#'
#' cat(j, sep = "\n")
#' }
get_gitlab_log <- function(user, project, private_token, ...) {
if (is.null(private_token)) {
job <- NULL
} else {
url <- paste0("https://gitlab.com/api/v4/users/", user, "/projects",
"?per_page=100")
names(private_token) <- "PRIVATE-TOKEN"
r <- httr::GET(url, httr::add_headers(.headers = private_token), ...)
projects <- httr::content(r)
project_index <- sapply(projects, is_my_name, project)
if (!any(project_index)) throw(paste0("Could not find `", project,
"` on ", url, "."))
my_project <- projects[project_index][[1]]
url <- paste("https://gitlab.com/api/v4/projects",
my_project[["id"]], "jobs/", sep = "/")
r <- httr::GET(url, httr::add_headers(.headers = private_token), ...)
jobs <- httr::content(r)
check_jobs <- jobs[sapply(jobs, is_check)]
last_check_jobs_url <- check_jobs[[1]][["web_url"]]
r <- httr::GET(paste(last_check_jobs_url, "raw", sep = "/"), ...)
job <- httr::content(r)
if (!is.null(job))
job <- unlist(strsplit(job, split = "\n"))
}
return(job)
}
#' Provide a \verb{gitlab} \acronym{URL} for a Given Path
#'
#' @template package_path
#' @return a character string giving a \verb{github} \acronym{URL}.
#' @keywords internal
#' @export
#' @examples
#' path <- file.path(tempdir(), "myPackage")
#' unlink(path, recursive = TRUE)
#' usethis::create_package(path, open = FALSE)
#' try(provide_gitlab_url(path))
#' gert::git_init(path)
#' provide_gitlab_url(path)
#' invisible(desc::desc_set(Package = "bar", file = path))
#' provide_gitlab_url(path)
provide_gitlab_url <- function(path = ".") {
url <- get_git_url(get_remote_url(path))
if (is.null(url)) {
if (!uses_git(path)) {
throw(paste(path, "is not a git repository"))
} else {
directory <- basename(gert::git_find(path))
if (is_r_package(path)) {
package_name <- strip_off_attributes(desc::desc_get("Package",
file = path)
)
if (package_name != directory) {
warning("The package's name and root directory differ, ",
"sticking with the name as retrieved from file ",
"DESCRIPTION.")
directory <- package_name
}
}
git_signature <- get_git_signature(path, verbose = TRUE)
name <- getElement(gert::git_signature_parse(git_signature),
"name")
url <- paste("https://gitlab.com", name, directory, sep = "/")
}
}
return(url)
}
| /R/gitlab.R | no_license | jimsforks/packager | R | false | false | 4,853 | r | use_gitlab_ci <- function(path = ".",
force = is_force(),
ignore = TRUE) {
pkg <- as.package(path)
use_template("dot_gitlab-ci.yml", ".gitlab-ci.yml", ignore = ignore,
force = force, pkg = pkg)
use_directory(".gitlab-ci", ignore = TRUE, pkg = pkg)
use_template(file.path("dot_gitlab-ci", "gitlab-com.R"),
file.path(".gitlab-ci", "gitlab-com.R"),
ignore = ignore, force = force, pkg = pkg)
return(invisible(NULL))
}
is_check <- function(x) {
is_check_stage <- identical(getElement(x, "stage"), "check")
is_check_job <- identical(getElement(x, "name"), "packager")
is_check <- is_check_stage && is_check_job
return(is_check)
}
is_my_name <- function(x, name) return(tolower(getElement(x, "name")) == name)
#' Read a \verb{gitlab} Check Log
#'
#' For a given user's project, the last log for jobs for name and stage "check"
#' will be read. This is assumed to be the output of \command{R CMD check},
#' \code{\link[rcmdcheck:rcmdcheck]{rcmdcheck::rcmdcheck}}
#' \code{\link[devtools:check]{devtools::check}}, or the like.
#' @param user The user's name on \verb{gitlab}.
#' @param project The project's name on \verb{gitlab}.
#' @param private_token The user's private token on \verb{gitlab}.
#' @param ... Arguments passed to \code{\link[httr:GET]{httr::GET}}.
#' @return A character vector containing the lines of the \verb{gitlab} log.
#' @keywords internal
#' @export
#' @examples
#' \dontrun{
#' gitlab_token <- readLines(file.path("~", ".gitlab_private_token.txt"))
#' if (Sys.info()[["nodename"]] == "fvafrdebianCU") {
#' j <- get_gitlab_log(user = "fvafrcu", project = "packager",
#' private_token = gitlab_token,
#' httr::use_proxy("10.127.255.17", 8080))
#' } else {
#' j <- get_gitlab_log(user = "fvafrcu", project = "packager",
#' private_token = gitlab_token)
#' }
#'
#' cat(j, sep = "\n")
#' }
get_gitlab_log <- function(user, project, private_token, ...) {
if (is.null(private_token)) {
job <- NULL
} else {
url <- paste0("https://gitlab.com/api/v4/users/", user, "/projects",
"?per_page=100")
names(private_token) <- "PRIVATE-TOKEN"
r <- httr::GET(url, httr::add_headers(.headers = private_token), ...)
projects <- httr::content(r)
project_index <- sapply(projects, is_my_name, project)
if (!any(project_index)) throw(paste0("Could not find `", project,
"` on ", url, "."))
my_project <- projects[project_index][[1]]
url <- paste("https://gitlab.com/api/v4/projects",
my_project[["id"]], "jobs/", sep = "/")
r <- httr::GET(url, httr::add_headers(.headers = private_token), ...)
jobs <- httr::content(r)
check_jobs <- jobs[sapply(jobs, is_check)]
last_check_jobs_url <- check_jobs[[1]][["web_url"]]
r <- httr::GET(paste(last_check_jobs_url, "raw", sep = "/"), ...)
job <- httr::content(r)
if (!is.null(job))
job <- unlist(strsplit(job, split = "\n"))
}
return(job)
}
#' Provide a \verb{gitlab} \acronym{URL} for a Given Path
#'
#' @template package_path
#' @return a character string giving a \verb{github} \acronym{URL}.
#' @keywords internal
#' @export
#' @examples
#' path <- file.path(tempdir(), "myPackage")
#' unlink(path, recursive = TRUE)
#' usethis::create_package(path, open = FALSE)
#' try(provide_gitlab_url(path))
#' gert::git_init(path)
#' provide_gitlab_url(path)
#' invisible(desc::desc_set(Package = "bar", file = path))
#' provide_gitlab_url(path)
provide_gitlab_url <- function(path = ".") {
url <- get_git_url(get_remote_url(path))
if (is.null(url)) {
if (!uses_git(path)) {
throw(paste(path, "is not a git repository"))
} else {
directory <- basename(gert::git_find(path))
if (is_r_package(path)) {
package_name <- strip_off_attributes(desc::desc_get("Package",
file = path)
)
if (package_name != directory) {
warning("The package's name and root directory differ, ",
"sticking with the name as retrieved from file ",
"DESCRIPTION.")
directory <- package_name
}
}
git_signature <- get_git_signature(path, verbose = TRUE)
name <- getElement(gert::git_signature_parse(git_signature),
"name")
url <- paste("https://gitlab.com", name, directory, sep = "/")
}
}
return(url)
}
|
library(tidyverse)
library(phyloseq)
library(vegan)
setwd('')
ps <- readRDS('Data/PhyloseqObject_filtered.rds')
ps <- transform_sample_counts(ps, function(otu) {otu/sum(otu)})
setseed(3)
# Subset the data, then call
psn <- subset_samples(ps, Run=='2' & Gonad=='intact')
multiBeta(psn, "Run2 Intact XXF vs XYF", "vegan/Run2_Intact_XXF_XYF", "Chromosome", "Chromosome", "NMDS")
# Call this function, it will do all the beta measures you add.
multiBeta <- function(ps, title, outfile_base, label="", variable, ordMethod){
pv_bray <- beta_with_plot(ps, title, paste0(outfile_base, '_bray.png'), variable, label, ordMethod, 'bray', "Bray Curtis")
pv_jaccard <- beta_with_plot(ps, title, paste0(outfile_base, '_jaccard.png'), variable, label, ordMethod, 'jaccard', "Jaccard")
pv_unifrac <- beta_with_plot(ps, title, paste0(outfile_base, '_unifrac.png'), variable, label, ordMethod, 'unifrac', "Unifrac")
pv_wunifrac <- beta_with_plot(ps, title, paste0(outfile_base, '_wunifrac.png'), variable, label, ordMethod, 'wunifrac', "Weighted Unifrac")
# If you want to put the results in a md table
print(paste(pv_bray, pv_jaccard, pv_unifrac, pv_wunifrac, sep=" | "))
}
beta_with_plot <- function(ps, title, outfile, label="", variable, ordMethod, distance='bray', distanceName="Bray Curtis"){
# Calculate p value
distance_matrix <- phyloseq::distance(ps, method=distance)
sample_df <- data.frame(sample_data(ps))
formula <- as.formula(paste0('distance_matrix ~ ', variable, collapse = ''))
ad <- adonis(formula, data=sample_df)
pval <- ad$aov.tab$`Pr(>F)`[1]
# Only consider the 50 most numerous taxa
ps = prune_taxa(names(sort(taxa_sums(ps), TRUE)[1:50]), ps)
formula <- as.formula(paste0(' ~ ', variable, collapse = ''))
ord = ordinate(ps, formula = formula, ordMethod, distance)
ordplot <- plot_ordination(ps, ord, "samples", color=variable)
p <- ordplot +
stat_ellipse(type = "norm", linetype = 2) +
stat_ellipse(type = "t", geom='polygon', alpha=0.1, aes_string(fill=variable), show.legend = F) +
labs(title = title, subtitle = paste('P-value:',pval), caption=distanceName, color=label) +
theme_bw()
ggsave(outfile, plot=p, device='png', width=3, height=3)
return(pval)
}
| /src/NMDS_With_Ellipse.R | no_license | NielInfante/WV_16S | R | false | false | 2,224 | r | library(tidyverse)
library(phyloseq)
library(vegan)
setwd('')
ps <- readRDS('Data/PhyloseqObject_filtered.rds')
ps <- transform_sample_counts(ps, function(otu) {otu/sum(otu)})
setseed(3)
# Subset the data, then call
psn <- subset_samples(ps, Run=='2' & Gonad=='intact')
multiBeta(psn, "Run2 Intact XXF vs XYF", "vegan/Run2_Intact_XXF_XYF", "Chromosome", "Chromosome", "NMDS")
# Call this function, it will do all the beta measures you add.
multiBeta <- function(ps, title, outfile_base, label="", variable, ordMethod){
pv_bray <- beta_with_plot(ps, title, paste0(outfile_base, '_bray.png'), variable, label, ordMethod, 'bray', "Bray Curtis")
pv_jaccard <- beta_with_plot(ps, title, paste0(outfile_base, '_jaccard.png'), variable, label, ordMethod, 'jaccard', "Jaccard")
pv_unifrac <- beta_with_plot(ps, title, paste0(outfile_base, '_unifrac.png'), variable, label, ordMethod, 'unifrac', "Unifrac")
pv_wunifrac <- beta_with_plot(ps, title, paste0(outfile_base, '_wunifrac.png'), variable, label, ordMethod, 'wunifrac', "Weighted Unifrac")
# If you want to put the results in a md table
print(paste(pv_bray, pv_jaccard, pv_unifrac, pv_wunifrac, sep=" | "))
}
beta_with_plot <- function(ps, title, outfile, label="", variable, ordMethod, distance='bray', distanceName="Bray Curtis"){
# Calculate p value
distance_matrix <- phyloseq::distance(ps, method=distance)
sample_df <- data.frame(sample_data(ps))
formula <- as.formula(paste0('distance_matrix ~ ', variable, collapse = ''))
ad <- adonis(formula, data=sample_df)
pval <- ad$aov.tab$`Pr(>F)`[1]
# Only consider the 50 most numerous taxa
ps = prune_taxa(names(sort(taxa_sums(ps), TRUE)[1:50]), ps)
formula <- as.formula(paste0(' ~ ', variable, collapse = ''))
ord = ordinate(ps, formula = formula, ordMethod, distance)
ordplot <- plot_ordination(ps, ord, "samples", color=variable)
p <- ordplot +
stat_ellipse(type = "norm", linetype = 2) +
stat_ellipse(type = "t", geom='polygon', alpha=0.1, aes_string(fill=variable), show.legend = F) +
labs(title = title, subtitle = paste('P-value:',pval), caption=distanceName, color=label) +
theme_bw()
ggsave(outfile, plot=p, device='png', width=3, height=3)
return(pval)
}
|
#Zadanie1
ma1 = matrix(0:11, nrow = 3, ncol = 4)
ma2 = matrix(2, nrow = 3, ncol = 4)
ma3 = matrix(sample(1:3, 3), nrow = 3, ncol = 4)
#Zadanie2
ma1 + ma2
ma1 - ma2
ma1 * ma2
ma1 / ma2
ma1 + ma3
ma1 - ma3
ma1 * ma3
ma1 / ma3
#Obliczenia działają w sposób zbliczony do matematycznego.
#Działania arytmetyczne wykonują na odpowiadających sobie komórkach.
#Zadanie3
ma1[1, 4]
#Zadanie4
ma3[ma3 > 2]
#Zadanie5
ma4 = cbind(ma1, ma3)
#Zadanie6
ra1 = data.frame(data = c(Sys.Date(), Sys.Date() - 1, Sys.Date() - 2),
miasto = c("Puszczykowo", "Puszczykowo", "Puszczykowo"),
stringsAsFactors = FALSE)
#Zadanie7
ra2 = data.frame(t.min = c(5.3, 4.6, 2.9),
t.max = c(11.1, 14.6, 9),
stringsAsFactors = FALSE)
#Zadanie8
ra3 = cbind(ra1, ra2)
ra4 = data.frame (tmean = ((ra3[,3] + ra3[,4])/2),
stringsAsFactors = FALSE)
ra3 = cbind(ra3, ra4)
#Zadanie9
colnames(ra3) = c("data" , "miasto" , "t.min" , "tmaks" , "tsr")
#Zadanie10
subset(ra3[,c(1,5)], tsr > 8)
#Zadanie11
li1 = list(c(10:1),
ma4,
ra3)
#Zadanie12
wektor_1 = li1[[1]]
#Zadanie13
mean(li1[[3]][["tsr"]])
#Zadanie14
as.matrix(ra1)
| /Jedrzej-Kowal.R | no_license | Kowal-Jedrzej/zlozone-obiekty | R | false | false | 1,268 | r | #Zadanie1
ma1 = matrix(0:11, nrow = 3, ncol = 4)
ma2 = matrix(2, nrow = 3, ncol = 4)
ma3 = matrix(sample(1:3, 3), nrow = 3, ncol = 4)
#Zadanie2
ma1 + ma2
ma1 - ma2
ma1 * ma2
ma1 / ma2
ma1 + ma3
ma1 - ma3
ma1 * ma3
ma1 / ma3
#Obliczenia działają w sposób zbliczony do matematycznego.
#Działania arytmetyczne wykonują na odpowiadających sobie komórkach.
#Zadanie3
ma1[1, 4]
#Zadanie4
ma3[ma3 > 2]
#Zadanie5
ma4 = cbind(ma1, ma3)
#Zadanie6
ra1 = data.frame(data = c(Sys.Date(), Sys.Date() - 1, Sys.Date() - 2),
miasto = c("Puszczykowo", "Puszczykowo", "Puszczykowo"),
stringsAsFactors = FALSE)
#Zadanie7
ra2 = data.frame(t.min = c(5.3, 4.6, 2.9),
t.max = c(11.1, 14.6, 9),
stringsAsFactors = FALSE)
#Zadanie8
ra3 = cbind(ra1, ra2)
ra4 = data.frame (tmean = ((ra3[,3] + ra3[,4])/2),
stringsAsFactors = FALSE)
ra3 = cbind(ra3, ra4)
#Zadanie9
colnames(ra3) = c("data" , "miasto" , "t.min" , "tmaks" , "tsr")
#Zadanie10
subset(ra3[,c(1,5)], tsr > 8)
#Zadanie11
li1 = list(c(10:1),
ma4,
ra3)
#Zadanie12
wektor_1 = li1[[1]]
#Zadanie13
mean(li1[[3]][["tsr"]])
#Zadanie14
as.matrix(ra1)
|
mutate_cond <- function(.data, condition, ..., envir = parent.frame()) {
condition <- eval(substitute(condition), .data, envir)
.data[condition , ] <- .data[condition, ] %>% mutate(..., na.rm = TRUE)
.data
}
data <- data.frame(col1 = rep(c("A", "B", "C", "D", "E"), each = 10),
col2 = base::sample(c(1,2,3), 50, replace = TRUE))
data2 <- data.frame(col1 = rep(c("A", "B", "C", "D", "E"), each = 10),
col2 = base::sample(c(1,2,3, NA), 50, replace = TRUE))
test1 <- data %>%
mutate_cond(col1 == 'A', col2 = col2*2)
test2 <- data2 %>%
mutate_cond(col1 == 'A', col2 = col2*2)
| /data_cleaning_scripts/extra_code/TEST_mutate_cond.R | permissive | Canderson156/Arctic-shorebird-RSF-V3 | R | false | false | 661 | r | mutate_cond <- function(.data, condition, ..., envir = parent.frame()) {
condition <- eval(substitute(condition), .data, envir)
.data[condition , ] <- .data[condition, ] %>% mutate(..., na.rm = TRUE)
.data
}
data <- data.frame(col1 = rep(c("A", "B", "C", "D", "E"), each = 10),
col2 = base::sample(c(1,2,3), 50, replace = TRUE))
data2 <- data.frame(col1 = rep(c("A", "B", "C", "D", "E"), each = 10),
col2 = base::sample(c(1,2,3, NA), 50, replace = TRUE))
test1 <- data %>%
mutate_cond(col1 == 'A', col2 = col2*2)
test2 <- data2 %>%
mutate_cond(col1 == 'A', col2 = col2*2)
|
testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853834136L, 2030715618L, -1261966754L, -129171080L, -642498820L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result) | /IntervalSurgeon/inst/testfiles/rcpp_depth/AFL_rcpp_depth/rcpp_depth_valgrind_files/1609856921-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 713 | r | testlist <- list(pts = integer(0), ends = NULL, starts = NULL, sorted_ends = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), sorted_starts = c(0L, -1627389952L, 682962941L, 546746628L, 599542L, 1800501216L, 1649393244L, -1712958485L, -1313178345L, 1470710473L, 2108708770L, -1965616612L, -533569700L, -771330099L, 853834136L, 2030715618L, -1261966754L, -129171080L, -642498820L, 779827246L, 1878602521L))
result <- do.call(IntervalSurgeon:::rcpp_depth,testlist)
str(result) |
# This file is part of the standard setup for testthat.
# It is recommended that you do not modify it.
#
# Where should you do additional test configuration?
# Learn more about the roles of various files in:
# * https://r-pkgs.org/tests.html
# * https://testthat.r-lib.org/reference/test_package.html#special-files
library(testthat)
library(suddengains)
test_check("suddengains")
| /tests/testthat.R | permissive | milanwiedemann/suddengains | R | false | false | 382 | r | # This file is part of the standard setup for testthat.
# It is recommended that you do not modify it.
#
# Where should you do additional test configuration?
# Learn more about the roles of various files in:
# * https://r-pkgs.org/tests.html
# * https://testthat.r-lib.org/reference/test_package.html#special-files
library(testthat)
library(suddengains)
test_check("suddengains")
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012 & year < 2016){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing", .missing = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~insurance_v2X, FUN = svymean, by = ~education, design = subset(FYCdsgn, AGELAST >= 65))
print(results)
| /mepstrends/hc_ins/json/code/r/pctPOP__education__ins_ge65__.r | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 3,335 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Insurance coverage
# To compute for all insurance categories, replace 'insurance' in the 'svyby' function with 'insurance_v2X'
if(year == 1996){
FYC <- FYC %>%
mutate(MCDEV96 = MCDEVER, MCREV96 = MCREVER,
OPAEV96 = OPAEVER, OPBEV96 = OPBEVER)
}
if(year < 2011){
FYC <- FYC %>%
mutate(
public = (MCDEV.yy.==1|OPAEV.yy.==1|OPBEV.yy.==1),
medicare = (MCREV.yy.==1),
private = (INSCOV.yy.==1),
mcr_priv = (medicare & private),
mcr_pub = (medicare & !private & public),
mcr_only = (medicare & !private & !public),
no_mcr = (!medicare),
ins_gt65 = 4*mcr_only + 5*mcr_priv + 6*mcr_pub + 7*no_mcr,
INSURC.yy. = ifelse(AGELAST < 65, INSCOV.yy., ins_gt65)
)
}
FYC <- FYC %>%
mutate(insurance = recode_factor(INSCOV.yy., .default = "Missing", .missing = "Missing",
"1" = "Any private, all ages",
"2" = "Public only, all ages",
"3" = "Uninsured, all ages")) %>%
mutate(insurance_v2X = recode_factor(INSURC.yy., .default = "Missing", .missing = "Missing",
"1" = "<65, Any private",
"2" = "<65, Public only",
"3" = "<65, Uninsured",
"4" = "65+, Medicare only",
"5" = "65+, Medicare and private",
"6" = "65+, Medicare and other public",
"7" = "65+, No medicare",
"8" = "65+, No medicare"))
# Education
if(year <= 1998){
FYC <- FYC %>% mutate(EDUCYR = EDUCYR.yy.)
}else if(year <= 2004){
FYC <- FYC %>% mutate(EDUCYR = EDUCYEAR)
}
if(year >= 2012 & year < 2016){
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDRECODE & EDRECODE < 13),
high_school = (EDRECODE == 13),
some_college = (EDRECODE > 13))
}else{
FYC <- FYC %>%
mutate(
less_than_hs = (0 <= EDUCYR & EDUCYR < 12),
high_school = (EDUCYR == 12),
some_college = (EDUCYR > 12))
}
FYC <- FYC %>% mutate(
education = 1*less_than_hs + 2*high_school + 3*some_college,
education = replace(education, AGELAST < 18, 9),
education = recode_factor(education, .default = "Missing", .missing = "Missing",
"1" = "Less than high school",
"2" = "High school",
"3" = "Some college",
"9" = "Inapplicable (age < 18)",
"0" = "Missing"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~insurance_v2X, FUN = svymean, by = ~education, design = subset(FYCdsgn, AGELAST >= 65))
print(results)
|
library(stringr)
train_data <- read.table("UCI HAR Dataset//train//X_train.txt")
train_data_y <- read.table("UCI HAR Dataset//train//y_train.txt")
train_data <- cbind(train_data_y, train_data)
test_data <- read.table("UCI HAR Dataset//test//X_test.txt")
test_data_y <- read.table("UCI HAR Dataset//test//y_test.txt")
test_data <- cbind(test_data_y, test_data)
data <- rbind(train_data, test_data)
activity_labels <- read.table("UCI HAR Dataset//activity_labels.txt")
data[,1] <- factor(data[,1], labels = activity_labels[,2])
features <- read.table("UCI HAR Dataset//features.txt")
colnames(data) <- c("Activity", as.character(features[,2]))
means_and_stds <- grepl("mean()", features[,2], fixed = TRUE) | grepl("std()", features[,2], fixed = TRUE)
data <- data[, c(TRUE, means_and_stds)]
write.table(data, "tidy_data.txt", row.names = FALSE) | /run_analysis.R | no_license | jillpoilj/getdata_project | R | false | false | 848 | r | library(stringr)
train_data <- read.table("UCI HAR Dataset//train//X_train.txt")
train_data_y <- read.table("UCI HAR Dataset//train//y_train.txt")
train_data <- cbind(train_data_y, train_data)
test_data <- read.table("UCI HAR Dataset//test//X_test.txt")
test_data_y <- read.table("UCI HAR Dataset//test//y_test.txt")
test_data <- cbind(test_data_y, test_data)
data <- rbind(train_data, test_data)
activity_labels <- read.table("UCI HAR Dataset//activity_labels.txt")
data[,1] <- factor(data[,1], labels = activity_labels[,2])
features <- read.table("UCI HAR Dataset//features.txt")
colnames(data) <- c("Activity", as.character(features[,2]))
means_and_stds <- grepl("mean()", features[,2], fixed = TRUE) | grepl("std()", features[,2], fixed = TRUE)
data <- data[, c(TRUE, means_and_stds)]
write.table(data, "tidy_data.txt", row.names = FALSE) |
#' Compare champion with challengers globally
#'
#' The function creates objects that present global model perfromance using various measures. Those date can be easily
#' ploted with \code{plot} function. It uses \code{auditor} package to create \code{\link[auditor]{model_performance}} of all passed
#' explainers. Keep in mind that type of task has to be specified.
#'
#' @param champion - explainer of champion model.
#' @param challengers - explainer of challenger model or list of explainers.
#' @param type - type of the task. Either classification or regression
#'
#' @return An object of the class overall_comparison
#'
#' It is a named list containing following fields:
#' \itemize{
#' \item \code{radar} list of \code{\link[auditor]{model_performance}} objects and other parameters that will be passed to generic \code{plot} function
#' \item \code{accordance} data.frame object of champion responses and challenger's corresponding to them. Used to plot accordance.
#' \item \code{models_info} data.frame containig inforamtion about models used in analysys.
#' }
#'
#' @rdname overall_comparison
#' @export
#'
#' @examples
#' \donttest{
#' library("DALEXtra")
#' library("mlr")
#' task <- mlr::makeRegrTask(
#' id = "R",
#' data = apartments,
#' target = "m2.price"
#' )
#' learner_lm <- mlr::makeLearner(
#' "regr.lm"
#' )
#' model_lm <- mlr::train(learner_lm, task)
#' explainer_lm <- explain_mlr(model_lm, apartmentsTest, apartmentsTest$m2.price, label = "LM")
#'
#' learner_rf <- mlr::makeLearner(
#' "regr.randomForest"
#' )
#' model_rf <- mlr::train(learner_rf, task)
#' explainer_rf <- explain_mlr(model_rf, apartmentsTest, apartmentsTest$m2.price, label = "RF")
#'
#' learner_gbm <- mlr::makeLearner(
#' "regr.gbm"
#' )
#' model_gbm <- mlr::train(learner_gbm, task)
#' explainer_gbm <- explain_mlr(model_gbm, apartmentsTest, apartmentsTest$m2.price, label = "gbm")
#'
#' data <- overall_comparison(explainer_lm, list(explainer_gbm, explainer_rf), type = "regression")
#' plot(data)
#' }
overall_comparison <- function(champion, challengers, type) {
if (class(challengers) == "explainer") {
challengers <- list(challengers)
}
if (any(sapply(challengers, function(x) {
class(x) != "explainer"
})) | class(champion) != "explainer") {
stop("Champion and all of challengers has to be explainer objects")
}
if (is.null(champion$data)) {
stop("Data argument has to be passed with explainer")
}
if (is.null(champion$y_hat)) {
stop("Explain function has to be run with precalculate TRUE")
}
models_info <- data.frame(label = champion$label, class = class(champion$model)[1], type = "Champion", stringsAsFactors = FALSE)
for (e in challengers) {
models_info <- rbind(models_info,
list(label = e$label, class = class(e$model)[1], type = "Challenger"),
stringsAsFactors = FALSE)
}
if (type == "classification") {
radar_args <- lapply(challengers, auditor::model_performance, score = NULL, new_score = new_scores)
radar_args$object <- auditor::model_performance(champion, score = NULL, new_score = new_scores)
radar_args$verbose <- FALSE
yhats <- NULL
for (e in challengers) {
yhats <- rbind(yhats,
data.frame("Champion" = champion$y_hat,
"Challenger" = e$y_hat,
"Label" = e$label),
stringsAsFactors = FALSE)
}
ret <- list("radar" = radar_args, "accordance" = yhats, "models_info" = models_info)
} else if (type == "regression") {
radar_args <- lapply(challengers, auditor::model_performance)
radar_args$object <- auditor::model_performance(champion)
radar_args$verbose <- FALSE
yhats <- NULL
for (e in challengers) {
yhats <- rbind(yhats,
data.frame("Champion" = champion$y_hat,
"Challenger" = e$y_hat,
"Label" = e$label),
stringsAsFactors = FALSE)
}
ret <- list("radar" = radar_args, "accordance" = yhats, "models_info" = models_info)
} else {
stop("Task has to be either classification or regression")
}
class(ret) <- "overall_comparison"
ret
}
#' Print overall_comparison object
#'
#' @param x an object of class \code{overall_comparison}
#' @param ... other parameters
#'
#' @export
#' @examples
#' \donttest{
#' library("DALEXtra")
#' library("mlr")
#' task <- mlr::makeRegrTask(
#' id = "R",
#' data = apartments,
#' target = "m2.price"
#' )
#' learner_lm <- mlr::makeLearner(
#' "regr.lm"
#' )
#' model_lm <- mlr::train(learner_lm, task)
#' explainer_lm <- explain_mlr(model_lm, apartmentsTest, apartmentsTest$m2.price, label = "LM")
#'
#' learner_rf <- mlr::makeLearner(
#' "regr.randomForest"
#' )
#' model_rf <- mlr::train(learner_rf, task)
#' explainer_rf <- explain_mlr(model_rf, apartmentsTest, apartmentsTest$m2.price, label = "RF")
#'
#' learner_gbm <- mlr::makeLearner(
#' "regr.gbm"
#' )
#' model_gbm <- mlr::train(learner_gbm, task)
#' explainer_gbm <- explain_mlr(model_gbm, apartmentsTest, apartmentsTest$m2.price, label = "gbm")
#'
#' data <- overall_comparison(explainer_lm, list(explainer_gbm, explainer_rf), type = "regression")
#' print(data)
#' }
print.overall_comparison <- function(x, ...) {
cat("Radar Args: ", length(x$radar)-1, "model_performances detected\n")
cat("Accordance table head\n")
print(head(x$accordance))
cat("Models Info\n")
print(head(x$models_info))
}
confusionmatrix <- function(explainer) {
yhat <- as.numeric(explainer$y_hat > 0.5)
TP <- sum(yhat[yhat == 1] == explainer$y[yhat == 1])
FP <- length(yhat[yhat == 1]) - TP
TN <- sum(yhat[yhat == 0] == explainer$y[yhat == 0])
FN <- length(yhat[yhat == 0]) - TN
list(
"TP" = TP,
"FP" = FP,
"TN" = TN,
"FN" = FN
)
}
new_scores <- list(
"1-auc" = function(au) {
1 - auditor::score(au, score = "auc")$score
},
"1-acc" = function(au) {
conf <- confusionmatrix(au)
1 - (conf$TP + conf$TN) / (conf$TP + conf$FP + conf$TN + conf$FN)
},
"1-precission" = function(au) {
conf <- confusionmatrix(au)
1 - conf$TP / (conf$TP + conf$FP)
},
"1-recall" = function(au) {
conf <- confusionmatrix(au)
1 - conf$TP / (conf$TP + conf$FN)
},
"1-specificity" = function(au) {
conf <- confusionmatrix(au)
1 - conf$TN / (conf$TN + conf$FP)
},
"1-F1" = function(au) {
conf <- confusionmatrix(au)
1 - (2 * (conf$TP / (conf$TP + conf$FP)) * (conf$TP / (conf$TP + conf$FN))) /
(conf$TP / (conf$TP + conf$FN) + conf$TP / (conf$TP + conf$FP))
}
)
| /R/overall_comparison.R | no_license | pragyanaischool/DALEXtra | R | false | false | 6,612 | r | #' Compare champion with challengers globally
#'
#' The function creates objects that present global model perfromance using various measures. Those date can be easily
#' ploted with \code{plot} function. It uses \code{auditor} package to create \code{\link[auditor]{model_performance}} of all passed
#' explainers. Keep in mind that type of task has to be specified.
#'
#' @param champion - explainer of champion model.
#' @param challengers - explainer of challenger model or list of explainers.
#' @param type - type of the task. Either classification or regression
#'
#' @return An object of the class overall_comparison
#'
#' It is a named list containing following fields:
#' \itemize{
#' \item \code{radar} list of \code{\link[auditor]{model_performance}} objects and other parameters that will be passed to generic \code{plot} function
#' \item \code{accordance} data.frame object of champion responses and challenger's corresponding to them. Used to plot accordance.
#' \item \code{models_info} data.frame containig inforamtion about models used in analysys.
#' }
#'
#' @rdname overall_comparison
#' @export
#'
#' @examples
#' \donttest{
#' library("DALEXtra")
#' library("mlr")
#' task <- mlr::makeRegrTask(
#' id = "R",
#' data = apartments,
#' target = "m2.price"
#' )
#' learner_lm <- mlr::makeLearner(
#' "regr.lm"
#' )
#' model_lm <- mlr::train(learner_lm, task)
#' explainer_lm <- explain_mlr(model_lm, apartmentsTest, apartmentsTest$m2.price, label = "LM")
#'
#' learner_rf <- mlr::makeLearner(
#' "regr.randomForest"
#' )
#' model_rf <- mlr::train(learner_rf, task)
#' explainer_rf <- explain_mlr(model_rf, apartmentsTest, apartmentsTest$m2.price, label = "RF")
#'
#' learner_gbm <- mlr::makeLearner(
#' "regr.gbm"
#' )
#' model_gbm <- mlr::train(learner_gbm, task)
#' explainer_gbm <- explain_mlr(model_gbm, apartmentsTest, apartmentsTest$m2.price, label = "gbm")
#'
#' data <- overall_comparison(explainer_lm, list(explainer_gbm, explainer_rf), type = "regression")
#' plot(data)
#' }
overall_comparison <- function(champion, challengers, type) {
if (class(challengers) == "explainer") {
challengers <- list(challengers)
}
if (any(sapply(challengers, function(x) {
class(x) != "explainer"
})) | class(champion) != "explainer") {
stop("Champion and all of challengers has to be explainer objects")
}
if (is.null(champion$data)) {
stop("Data argument has to be passed with explainer")
}
if (is.null(champion$y_hat)) {
stop("Explain function has to be run with precalculate TRUE")
}
models_info <- data.frame(label = champion$label, class = class(champion$model)[1], type = "Champion", stringsAsFactors = FALSE)
for (e in challengers) {
models_info <- rbind(models_info,
list(label = e$label, class = class(e$model)[1], type = "Challenger"),
stringsAsFactors = FALSE)
}
if (type == "classification") {
radar_args <- lapply(challengers, auditor::model_performance, score = NULL, new_score = new_scores)
radar_args$object <- auditor::model_performance(champion, score = NULL, new_score = new_scores)
radar_args$verbose <- FALSE
yhats <- NULL
for (e in challengers) {
yhats <- rbind(yhats,
data.frame("Champion" = champion$y_hat,
"Challenger" = e$y_hat,
"Label" = e$label),
stringsAsFactors = FALSE)
}
ret <- list("radar" = radar_args, "accordance" = yhats, "models_info" = models_info)
} else if (type == "regression") {
radar_args <- lapply(challengers, auditor::model_performance)
radar_args$object <- auditor::model_performance(champion)
radar_args$verbose <- FALSE
yhats <- NULL
for (e in challengers) {
yhats <- rbind(yhats,
data.frame("Champion" = champion$y_hat,
"Challenger" = e$y_hat,
"Label" = e$label),
stringsAsFactors = FALSE)
}
ret <- list("radar" = radar_args, "accordance" = yhats, "models_info" = models_info)
} else {
stop("Task has to be either classification or regression")
}
class(ret) <- "overall_comparison"
ret
}
#' Print overall_comparison object
#'
#' @param x an object of class \code{overall_comparison}
#' @param ... other parameters
#'
#' @export
#' @examples
#' \donttest{
#' library("DALEXtra")
#' library("mlr")
#' task <- mlr::makeRegrTask(
#' id = "R",
#' data = apartments,
#' target = "m2.price"
#' )
#' learner_lm <- mlr::makeLearner(
#' "regr.lm"
#' )
#' model_lm <- mlr::train(learner_lm, task)
#' explainer_lm <- explain_mlr(model_lm, apartmentsTest, apartmentsTest$m2.price, label = "LM")
#'
#' learner_rf <- mlr::makeLearner(
#' "regr.randomForest"
#' )
#' model_rf <- mlr::train(learner_rf, task)
#' explainer_rf <- explain_mlr(model_rf, apartmentsTest, apartmentsTest$m2.price, label = "RF")
#'
#' learner_gbm <- mlr::makeLearner(
#' "regr.gbm"
#' )
#' model_gbm <- mlr::train(learner_gbm, task)
#' explainer_gbm <- explain_mlr(model_gbm, apartmentsTest, apartmentsTest$m2.price, label = "gbm")
#'
#' data <- overall_comparison(explainer_lm, list(explainer_gbm, explainer_rf), type = "regression")
#' print(data)
#' }
print.overall_comparison <- function(x, ...) {
cat("Radar Args: ", length(x$radar)-1, "model_performances detected\n")
cat("Accordance table head\n")
print(head(x$accordance))
cat("Models Info\n")
print(head(x$models_info))
}
confusionmatrix <- function(explainer) {
yhat <- as.numeric(explainer$y_hat > 0.5)
TP <- sum(yhat[yhat == 1] == explainer$y[yhat == 1])
FP <- length(yhat[yhat == 1]) - TP
TN <- sum(yhat[yhat == 0] == explainer$y[yhat == 0])
FN <- length(yhat[yhat == 0]) - TN
list(
"TP" = TP,
"FP" = FP,
"TN" = TN,
"FN" = FN
)
}
new_scores <- list(
"1-auc" = function(au) {
1 - auditor::score(au, score = "auc")$score
},
"1-acc" = function(au) {
conf <- confusionmatrix(au)
1 - (conf$TP + conf$TN) / (conf$TP + conf$FP + conf$TN + conf$FN)
},
"1-precission" = function(au) {
conf <- confusionmatrix(au)
1 - conf$TP / (conf$TP + conf$FP)
},
"1-recall" = function(au) {
conf <- confusionmatrix(au)
1 - conf$TP / (conf$TP + conf$FN)
},
"1-specificity" = function(au) {
conf <- confusionmatrix(au)
1 - conf$TN / (conf$TN + conf$FP)
},
"1-F1" = function(au) {
conf <- confusionmatrix(au)
1 - (2 * (conf$TP / (conf$TP + conf$FP)) * (conf$TP / (conf$TP + conf$FN))) /
(conf$TP / (conf$TP + conf$FN) + conf$TP / (conf$TP + conf$FP))
}
)
|
data("diamonds")
View(diamonds)
####Histogram of diamond price######
ggplot(data=diamonds) + geom_histogram(binwidth=500, aes(x=diamonds$price) ) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$") + ylab("Frequency") + theme_classic()
##Mean##
mean(diamonds$price)
###Median###
median(diamonds$price)
####Number of diamonds less than a particular price###
sum(diamonds$price < 500)
sum(diamonds$price < 250)
sum(diamonds$price >= 15000)
###Diamond Price Distribution With binwidth of 1000###
ggplot(data=diamonds) + geom_histogram(binwidth=500, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$ - Binwidth 500") + ylab("Frequency") + theme_minimal() + xlim(0,2500)
####Diamond Price Distribution With binwidth of 100####
ggplot(data=diamonds) + geom_histogram(binwidth=100, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$- Binwidth 100") + ylab("Frequency") + theme_minimal() + xlim(0,2500)
### Diamond Price Distribution With binwidth of 50###
ggplot(data=diamonds) + geom_histogram(binwidth=50, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$ - Binwidth 50") + ylab("Frequency") + theme_minimal() + xlim(0,2500)
####Diamond Price distribution by Cut###
ggplot(data=diamonds) + geom_histogram(binwidth=100, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution by Cut") + xlab("Diamond Price U$") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut)
###Diamond cut with Maximum Price###
subset(diamonds, price == max(price))
###Diamond cut with Minimum Price###
subset(diamonds, price == min(price))
###Lowest Median Price####
a = diamonds[which(diamonds$cut == "Fair"),]
b = diamonds[which(diamonds$cut == "Good"),]
c = diamonds[which(diamonds$cut == "Very Good"),]
d = diamonds[which(diamonds$cut == "Premium"),]
e = diamonds[which(diamonds$cut == "Ideal"),]
####Median value of different cuts###
median(a$price)
median(b$price)
median(c$price)
median(d$price)
median(e$price)
####Get different frequency scales (the y axis) to accomodate for specific patterns######
ggplot(data=diamonds) + geom_histogram(binwidth=100, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution by Cut") + xlab("Diamond Price U$") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut, scales="free_y")
####Plotting price per carat of different cuts####
ggplot(data=diamonds) + geom_histogram(binwidth=50, aes(x=diamonds$price/diamonds$carat)) + ggtitle("Diamond Price per Carat Distribution by Cut") + xlab("Diamond Price per Carat U$") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut)
####Plotting price per carat of different cuts and using Log10#####
ggplot(data=diamonds) + geom_histogram(binwidth=0.01, aes(x=diamonds$price/diamonds$carat)) + ggtitle("Diamond Price per Carat Distribution by Cut") + xlab("Diamond Price per Carat U$ - LOG 10 Scale") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut) + scale_x_log10()
#######Price of diamonds using box plots######
ggplot(diamonds, aes(factor(cut), price, fill=cut)) + geom_boxplot() + ggtitle("Diamond Price according Cut") + xlab("Type of Cut") + ylab("Diamond Price U$") + coord_cartesian(ylim=c(0,7500))
#####Diamond Chart according Clarity####
ggplot(diamonds, aes(factor(clarity), price, fill=clarity)) + geom_boxplot() + ggtitle("Diamond Price according Clarity") + xlab("Clarity") + ylab("Diamond Price U$") + coord_cartesian(ylim=c(0,7500))
###Creating subsets####
d = subset(diamonds, diamonds$color == 'D')
j = subset(diamonds, diamonds$color == 'J')
summary(d)
IQR(d$price)
summary(j)
IQR(j$price)
####Diamond Price per Carat according Color###
ggplot(diamonds, aes(factor(color), (price/carat), fill=color)) + geom_boxplot() + ggtitle("Diamond Price per Carat according Color") + xlab("Color") + ylab("Diamond Price per Carat U$")
####Limit the price range to under and see a smaller picture###
ggplot(diamonds, aes(factor(color), (price/carat), fill=color)) + geom_boxplot() + ggtitle("Diamond Price per Carat according Color") + xlab("Color") + ylab("Diamond Price per Carat U$") + coord_cartesian(ylim=c(0,7500))
####Diamond Frequency by Chart####
ggplot(data=diamonds, aes(x=carat)) + geom_freqpoly() + ggtitle("Diamond Frequency by Carat") + xlab("Carat Size") + ylab("Count")
ggplot(data=diamonds, aes(x=carat)) + geom_freqpoly(binwidth = 0.025) + ggtitle("Diamond Frequency by Carat") + xlab("Carat Size") + ylab("Count") + scale_x_continuous(minor_breaks = seq(0, 5.5, 0.1))
| /Diamond.R | no_license | Surbhi273/Exploratory-Analysis-on-Diamond-Dataset | R | false | false | 4,625 | r | data("diamonds")
View(diamonds)
####Histogram of diamond price######
ggplot(data=diamonds) + geom_histogram(binwidth=500, aes(x=diamonds$price) ) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$") + ylab("Frequency") + theme_classic()
##Mean##
mean(diamonds$price)
###Median###
median(diamonds$price)
####Number of diamonds less than a particular price###
sum(diamonds$price < 500)
sum(diamonds$price < 250)
sum(diamonds$price >= 15000)
###Diamond Price Distribution With binwidth of 1000###
ggplot(data=diamonds) + geom_histogram(binwidth=500, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$ - Binwidth 500") + ylab("Frequency") + theme_minimal() + xlim(0,2500)
####Diamond Price Distribution With binwidth of 100####
ggplot(data=diamonds) + geom_histogram(binwidth=100, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$- Binwidth 100") + ylab("Frequency") + theme_minimal() + xlim(0,2500)
### Diamond Price Distribution With binwidth of 50###
ggplot(data=diamonds) + geom_histogram(binwidth=50, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution") + xlab("Diamond Price U$ - Binwidth 50") + ylab("Frequency") + theme_minimal() + xlim(0,2500)
####Diamond Price distribution by Cut###
ggplot(data=diamonds) + geom_histogram(binwidth=100, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution by Cut") + xlab("Diamond Price U$") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut)
###Diamond cut with Maximum Price###
subset(diamonds, price == max(price))
###Diamond cut with Minimum Price###
subset(diamonds, price == min(price))
###Lowest Median Price####
a = diamonds[which(diamonds$cut == "Fair"),]
b = diamonds[which(diamonds$cut == "Good"),]
c = diamonds[which(diamonds$cut == "Very Good"),]
d = diamonds[which(diamonds$cut == "Premium"),]
e = diamonds[which(diamonds$cut == "Ideal"),]
####Median value of different cuts###
median(a$price)
median(b$price)
median(c$price)
median(d$price)
median(e$price)
####Get different frequency scales (the y axis) to accomodate for specific patterns######
ggplot(data=diamonds) + geom_histogram(binwidth=100, aes(x=diamonds$price)) + ggtitle("Diamond Price Distribution by Cut") + xlab("Diamond Price U$") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut, scales="free_y")
####Plotting price per carat of different cuts####
ggplot(data=diamonds) + geom_histogram(binwidth=50, aes(x=diamonds$price/diamonds$carat)) + ggtitle("Diamond Price per Carat Distribution by Cut") + xlab("Diamond Price per Carat U$") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut)
####Plotting price per carat of different cuts and using Log10#####
ggplot(data=diamonds) + geom_histogram(binwidth=0.01, aes(x=diamonds$price/diamonds$carat)) + ggtitle("Diamond Price per Carat Distribution by Cut") + xlab("Diamond Price per Carat U$ - LOG 10 Scale") + ylab("Frequency") + theme_minimal() + facet_wrap(~cut) + scale_x_log10()
#######Price of diamonds using box plots######
ggplot(diamonds, aes(factor(cut), price, fill=cut)) + geom_boxplot() + ggtitle("Diamond Price according Cut") + xlab("Type of Cut") + ylab("Diamond Price U$") + coord_cartesian(ylim=c(0,7500))
#####Diamond Chart according Clarity####
ggplot(diamonds, aes(factor(clarity), price, fill=clarity)) + geom_boxplot() + ggtitle("Diamond Price according Clarity") + xlab("Clarity") + ylab("Diamond Price U$") + coord_cartesian(ylim=c(0,7500))
###Creating subsets####
d = subset(diamonds, diamonds$color == 'D')
j = subset(diamonds, diamonds$color == 'J')
summary(d)
IQR(d$price)
summary(j)
IQR(j$price)
####Diamond Price per Carat according Color###
ggplot(diamonds, aes(factor(color), (price/carat), fill=color)) + geom_boxplot() + ggtitle("Diamond Price per Carat according Color") + xlab("Color") + ylab("Diamond Price per Carat U$")
####Limit the price range to under and see a smaller picture###
ggplot(diamonds, aes(factor(color), (price/carat), fill=color)) + geom_boxplot() + ggtitle("Diamond Price per Carat according Color") + xlab("Color") + ylab("Diamond Price per Carat U$") + coord_cartesian(ylim=c(0,7500))
####Diamond Frequency by Chart####
ggplot(data=diamonds, aes(x=carat)) + geom_freqpoly() + ggtitle("Diamond Frequency by Carat") + xlab("Carat Size") + ylab("Count")
ggplot(data=diamonds, aes(x=carat)) + geom_freqpoly(binwidth = 0.025) + ggtitle("Diamond Frequency by Carat") + xlab("Carat Size") + ylab("Count") + scale_x_continuous(minor_breaks = seq(0, 5.5, 0.1))
|
#page no. 275
#problem 18
# formula used : area of regular hexagon =6*((1/2)*base*height)
# 6 times area of triangle
area_triangle = function(b,h)
{
return((b*h)/2)
}
#given:
base = 8
b = base/2 #base of right angle triangle
hypo = base #hypotenuse of right angle triangle
h = sqrt(hypo^2 - b^2)
area_hexagon=6*area_triangle(base,h)
print(area_hexagon)
| /Basic_Engineering_Mathematics_by_John_Bird/CH26/EX26.18/Ex26_18.R | permissive | prashantsinalkar/R_TBC_Uploads | R | false | false | 414 | r | #page no. 275
#problem 18
# formula used : area of regular hexagon =6*((1/2)*base*height)
# 6 times area of triangle
area_triangle = function(b,h)
{
return((b*h)/2)
}
#given:
base = 8
b = base/2 #base of right angle triangle
hypo = base #hypotenuse of right angle triangle
h = sqrt(hypo^2 - b^2)
area_hexagon=6*area_triangle(base,h)
print(area_hexagon)
|
#-------------------------------------------------------------------------------
# Environment Set-up
#-------------------------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
procStartTime <- Sys.time()
Version <- "6"
.libPaths("E:/R-Packages")
# Link Dropbox
# library(RStudioAMI)
# linkDropbox()
pkgs <- c("caret", "data.table", "xgboost", "caTools", "doSNOW", "tcltk")
# install.packages(pkgs)
# Load required libraries
sapply(pkgs, require, character.only=TRUE)
# Define the path for base directory and set it as a working directory
# basePath <- "/home/rstudio/Dropbox/Public/Homesite/"
basePath <- "D:/Vikas/Homesite/"
setwd(basePath)
# Source New Script of xgb.cv
source("model/xgboost_cv/xgb.cv1.R")
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
inputData <- fread("input/train.csv")
inputData[, Original_Quote_Date:=as.Date(Original_Quote_Date, "%Y-%m-%d")]
inputData[, year:=as.numeric(format(Original_Quote_Date, "%y"))]
inputData[, month:=as.numeric(format(Original_Quote_Date, "%m"))]
inputData[, day:=as.numeric(format(Original_Quote_Date, "%d"))]
inputData[, c("QuoteNumber", "Original_Quote_Date"):=NULL]
inputData[is.na(inputData)] <- 0
inputData[, Field10:=as.numeric(gsub(",", "", Field10))]
for(f in names(inputData)) {
if(class(inputData[[f]]) == "character") {
inputData[[f]] <- as.numeric(as.factor(inputData[[f]]))
}
}
#-------------------------------------------------------------------------------
# Split data into train and test
#-------------------------------------------------------------------------------
outcome_name <- "QuoteConversion_Flag"
feature_names <- setdiff(names(inputData), outcome_name)
set.seed(1234)
random_splits <- runif(nrow(inputData))
train_df <- inputData[random_splits < .5, ]
train_df <- data.frame(train_df)
validate_df <- data.frame(inputData[random_splits >=.5, ])
# Remove constant variables
feature_names <- setdiff(feature_names, names(which(apply(train_df[, feature_names], 2, sd)==0)))
prop.table(table(inputData[, outcome_name, with=FALSE]))
prop.table(table(train_df[, outcome_name]))
prop.table(table(validate_df[, outcome_name]))
#-------------------------------------------------------------------------------
# Create index for cross validation
#-------------------------------------------------------------------------------
set.seed(102)
index <- createFolds(train_df[, outcome_name], k = 3)
# Define list of parameters
param0 <- list(booster = "gbtree"
, silent = 0
, eta = 0.2
, gamma = 0
, max_depth = 5
, min_child_weight = 10
, subsample = 1
, colsample_bytree = 0.6
, objective = "binary:logistic"
, eval_metric = "auc"
)
#-------------------------------------------------------------------------------
# Recursive Feature Selection
#-------------------------------------------------------------------------------
# Selected Vars
selVars <- c("PropertyField37", "PersonalField10A", "SalesField5", "Field7", "PersonalField1", "SalesField1B")
feature_names <- setdiff(feature_names, selVars)
length(feature_names)
rfs_outputs <- data.table(SelectedVars=feature_names)
rfs_outputs[, N:=1:.N]
cl <- makeCluster(3, type="SOCK")
registerDoSNOW(cl)
final_outputs <- foreach(i=1:nrow(rfs_outputs), .inorder=FALSE, .packages=c("xgboost", "data.table", "caTools", "tcltk")) %dopar% {
if(!exists("pb")) pb <- tkProgressBar("Variables Completed", min=1, max=nrow(rfs_outputs))
setTkProgressBar(pb, i)
startTime <- Sys.time()
new_features <- c(selVars, rfs_outputs[i, SelectedVars])
dtrain <- xgb.DMatrix(data=data.matrix(train_df[, new_features]), label=train_df[, outcome_name])
dval <- xgb.DMatrix(data=data.matrix(validate_df[, new_features]))
#-------------------------------------------------------------------------------
# Fit cross validation model using new_script
#-------------------------------------------------------------------------------
set.seed(1234)
system.time(xgb_cv_new <- xgb.cv1(params=param0, data=dtrain, nrounds=150,
metrics=list("auc"), folds=index, verbose=FALSE, prediction=TRUE))
xgb_cv_perf <- xgb_cv_new$dt
setnames(xgb_cv_perf, gsub("[.]", "_", names(xgb_cv_perf)))
xgb_cv_perf[, test_auc_mean_sd:=test_auc_mean-test_auc_std]
xgb_cv_perf[, tree:=1:.N]
setorder(xgb_cv_perf, -test_auc_mean)
tree_auc_mean <- xgb_cv_perf[1, tree]
setorder(xgb_cv_perf, -test_auc_mean_sd)
tree_auc_mean_sd <- xgb_cv_perf[1, tree]
setorder(xgb_cv_perf, tree)
new_nrounds <- max(tree_auc_mean_sd, tree_auc_mean)
rfs_outputs[i, test_auc_mean:=xgb_cv_perf[tree==new_nrounds, test_auc_mean]]
rfs_outputs[i, test_auc_std:=xgb_cv_perf[tree==new_nrounds, test_auc_std]]
#-------------------------------------------------------------------------------
# Fit final models
#-------------------------------------------------------------------------------
set.seed(1234)
system.time(xgb_fit0 <- xgb.train(params=param0, data=dtrain, nrounds=new_nrounds)) # 14 Seconds
tmpPred <- predict(object=xgb_fit0, newdata=dval, ntreelimit=new_nrounds)
auc <- colAUC(X=tmpPred, y=validate_df[, outcome_name])
endTime <- Sys.time()
timeTaken <- round(as.numeric(difftime(endTime, startTime, units="secs")), 0)
rfs_outputs[i, best_nrounds:=new_nrounds]
rfs_outputs[i, val_auc:=as.numeric(auc)]
rfs_outputs[i, time_secs:=timeTaken]
out <- rfs_outputs[i, ]
out
}
stopCluster(cl)
procEndTime <- Sys.time()
procTimeTaken <- difftime(procEndTime, procStartTime, units="secs")
procTimeTaken
rfs_outputs <- rbindlist(final_outputs)
setorder(rfs_outputs, N)
summary(rfs_outputs)
rfs_outputs[, Avg_AUC:=(test_auc_mean+val_auc)/2]
rfs_outputs[, Max_Diff:=max(Avg_AUC)-Avg_AUC]
missInfo <- fread("model/xgboost_cv/Missing_Value_Analysis.csv")
rfs_outputs <- merge(rfs_outputs, missInfo, by.x="SelectedVars", by.y="Var")
setnames(rfs_outputs, "NegativeCount", "MissingValues")
setorder(rfs_outputs, Max_Diff)
rfs_outputs
write.csv(rfs_outputs, paste0("model/xgboost_cv/rfs/xgboost_cv_rfs_", Version, ".csv"), row.names=FALSE)
| /model/xgboost_cv/rfs/xgboost_cv_rfs_6.R | no_license | vikasnitk85/Homesite | R | false | false | 6,296 | r | #-------------------------------------------------------------------------------
# Environment Set-up
#-------------------------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
procStartTime <- Sys.time()
Version <- "6"
.libPaths("E:/R-Packages")
# Link Dropbox
# library(RStudioAMI)
# linkDropbox()
pkgs <- c("caret", "data.table", "xgboost", "caTools", "doSNOW", "tcltk")
# install.packages(pkgs)
# Load required libraries
sapply(pkgs, require, character.only=TRUE)
# Define the path for base directory and set it as a working directory
# basePath <- "/home/rstudio/Dropbox/Public/Homesite/"
basePath <- "D:/Vikas/Homesite/"
setwd(basePath)
# Source New Script of xgb.cv
source("model/xgboost_cv/xgb.cv1.R")
#-------------------------------------------------------------------------------
# Load data
#-------------------------------------------------------------------------------
inputData <- fread("input/train.csv")
inputData[, Original_Quote_Date:=as.Date(Original_Quote_Date, "%Y-%m-%d")]
inputData[, year:=as.numeric(format(Original_Quote_Date, "%y"))]
inputData[, month:=as.numeric(format(Original_Quote_Date, "%m"))]
inputData[, day:=as.numeric(format(Original_Quote_Date, "%d"))]
inputData[, c("QuoteNumber", "Original_Quote_Date"):=NULL]
inputData[is.na(inputData)] <- 0
inputData[, Field10:=as.numeric(gsub(",", "", Field10))]
for(f in names(inputData)) {
if(class(inputData[[f]]) == "character") {
inputData[[f]] <- as.numeric(as.factor(inputData[[f]]))
}
}
#-------------------------------------------------------------------------------
# Split data into train and test
#-------------------------------------------------------------------------------
outcome_name <- "QuoteConversion_Flag"
feature_names <- setdiff(names(inputData), outcome_name)
set.seed(1234)
random_splits <- runif(nrow(inputData))
train_df <- inputData[random_splits < .5, ]
train_df <- data.frame(train_df)
validate_df <- data.frame(inputData[random_splits >=.5, ])
# Remove constant variables
feature_names <- setdiff(feature_names, names(which(apply(train_df[, feature_names], 2, sd)==0)))
prop.table(table(inputData[, outcome_name, with=FALSE]))
prop.table(table(train_df[, outcome_name]))
prop.table(table(validate_df[, outcome_name]))
#-------------------------------------------------------------------------------
# Create index for cross validation
#-------------------------------------------------------------------------------
set.seed(102)
index <- createFolds(train_df[, outcome_name], k = 3)
# Define list of parameters
param0 <- list(booster = "gbtree"
, silent = 0
, eta = 0.2
, gamma = 0
, max_depth = 5
, min_child_weight = 10
, subsample = 1
, colsample_bytree = 0.6
, objective = "binary:logistic"
, eval_metric = "auc"
)
#-------------------------------------------------------------------------------
# Recursive Feature Selection
#-------------------------------------------------------------------------------
# Selected Vars
selVars <- c("PropertyField37", "PersonalField10A", "SalesField5", "Field7", "PersonalField1", "SalesField1B")
feature_names <- setdiff(feature_names, selVars)
length(feature_names)
rfs_outputs <- data.table(SelectedVars=feature_names)
rfs_outputs[, N:=1:.N]
cl <- makeCluster(3, type="SOCK")
registerDoSNOW(cl)
final_outputs <- foreach(i=1:nrow(rfs_outputs), .inorder=FALSE, .packages=c("xgboost", "data.table", "caTools", "tcltk")) %dopar% {
if(!exists("pb")) pb <- tkProgressBar("Variables Completed", min=1, max=nrow(rfs_outputs))
setTkProgressBar(pb, i)
startTime <- Sys.time()
new_features <- c(selVars, rfs_outputs[i, SelectedVars])
dtrain <- xgb.DMatrix(data=data.matrix(train_df[, new_features]), label=train_df[, outcome_name])
dval <- xgb.DMatrix(data=data.matrix(validate_df[, new_features]))
#-------------------------------------------------------------------------------
# Fit cross validation model using new_script
#-------------------------------------------------------------------------------
set.seed(1234)
system.time(xgb_cv_new <- xgb.cv1(params=param0, data=dtrain, nrounds=150,
metrics=list("auc"), folds=index, verbose=FALSE, prediction=TRUE))
xgb_cv_perf <- xgb_cv_new$dt
setnames(xgb_cv_perf, gsub("[.]", "_", names(xgb_cv_perf)))
xgb_cv_perf[, test_auc_mean_sd:=test_auc_mean-test_auc_std]
xgb_cv_perf[, tree:=1:.N]
setorder(xgb_cv_perf, -test_auc_mean)
tree_auc_mean <- xgb_cv_perf[1, tree]
setorder(xgb_cv_perf, -test_auc_mean_sd)
tree_auc_mean_sd <- xgb_cv_perf[1, tree]
setorder(xgb_cv_perf, tree)
new_nrounds <- max(tree_auc_mean_sd, tree_auc_mean)
rfs_outputs[i, test_auc_mean:=xgb_cv_perf[tree==new_nrounds, test_auc_mean]]
rfs_outputs[i, test_auc_std:=xgb_cv_perf[tree==new_nrounds, test_auc_std]]
#-------------------------------------------------------------------------------
# Fit final models
#-------------------------------------------------------------------------------
set.seed(1234)
system.time(xgb_fit0 <- xgb.train(params=param0, data=dtrain, nrounds=new_nrounds)) # 14 Seconds
tmpPred <- predict(object=xgb_fit0, newdata=dval, ntreelimit=new_nrounds)
auc <- colAUC(X=tmpPred, y=validate_df[, outcome_name])
endTime <- Sys.time()
timeTaken <- round(as.numeric(difftime(endTime, startTime, units="secs")), 0)
rfs_outputs[i, best_nrounds:=new_nrounds]
rfs_outputs[i, val_auc:=as.numeric(auc)]
rfs_outputs[i, time_secs:=timeTaken]
out <- rfs_outputs[i, ]
out
}
stopCluster(cl)
procEndTime <- Sys.time()
procTimeTaken <- difftime(procEndTime, procStartTime, units="secs")
procTimeTaken
rfs_outputs <- rbindlist(final_outputs)
setorder(rfs_outputs, N)
summary(rfs_outputs)
rfs_outputs[, Avg_AUC:=(test_auc_mean+val_auc)/2]
rfs_outputs[, Max_Diff:=max(Avg_AUC)-Avg_AUC]
missInfo <- fread("model/xgboost_cv/Missing_Value_Analysis.csv")
rfs_outputs <- merge(rfs_outputs, missInfo, by.x="SelectedVars", by.y="Var")
setnames(rfs_outputs, "NegativeCount", "MissingValues")
setorder(rfs_outputs, Max_Diff)
rfs_outputs
write.csv(rfs_outputs, paste0("model/xgboost_cv/rfs/xgboost_cv_rfs_", Version, ".csv"), row.names=FALSE)
|
# ---------------------------------------------------------------
# INPUT DATA ----------------------------------------------------
# ---------------------------------------------------------------
data.loc = '/data_2017-2018' # location of data file(s)
data.file = 'Table_1.3_Primary_Energy_Consumption_by_Source.xlsx' # data file to be used
out.loc = '/figures' # location of where to save figures
source.cols = c("Geothermal" = "#6a3d9a",
"Solar" = "#ff7f00",
"Waste" = "#858585",
"Biomass" = "#8c613c",
"Wind" = "#1f78b4",
"Hydroelectric" = "#a6cee3",
"Nuclear" = "#55a868",
"Petroleum" = "#fdbf6f",
"Coal" = "#12253d",
"Natural Gas" = "#c44e52",
"Total Primary Energy" = "#fc8d62",
"Total Fossil Fuels" = "#383e56",
"Total Renewable Energy" = "#66c2a5")
# ---------------------------------------------------------------
# MAIN SCRIPT ---------------------------------------------------
# ---------------------------------------------------------------
# load libraries -------
library(data.table)
library(openxlsx)
library(ggplot2)
library(hrbrthemes)
library(stringr)
library(plyr)
library(directlabels)
library(grid)
library(rstudioapi)
# get file location as working directory -----
current.fil = getActiveDocumentContext()$path
current.loc = dirname(current.fil)
setwd(dirname(current.fil))
# set working directory as data file location ------
setwd(paste0(current.loc, data.loc))
# load data ------
dt_month = as.data.table(read.xlsx(data.file, sheet = "Monthly Data", startRow = 11, detectDates = T))
dt_month = dt_month[2:nrow(dt_month)]
dt_annual = as.data.table(read.xlsx(data.file, sheet = "Annual Data", startRow = 11, detectDates = T))
dt_annual = dt_annual[2:nrow(dt_annual)]
# rename columns ----
colnames(dt_annual)[1] = c("Year")
# melt data table from wide to long format -----
dt_month = melt(dt_month, measure.vars = colnames(dt_month)[2:13],
variable.name = "MSN", value.name = "Value")
dt_annual = melt(dt_annual, measure.vars = colnames(dt_annual)[2:13],
variable.name = "MSN", value.name = "Value")
# rename MSN factor levels -------
dt_month[, MSN := revalue(MSN, c('Biomass.Energy.Consumption' = "Biomass",
'Coal.Consumption' = "Coal",
'Total.Fossil.Fuels.Consumption' = "Total Fossil Fuels",
'Geothermal.Energy.Consumption' = "Geothermal",
'Hydroelectric.Power.Consumption' = "Hydroelectric",
'Natural.Gas.Consumption.(Excluding.Supplemental.Gaseous.Fuels)' = "Natural Gas",
'Nuclear.Electric.Power.Consumption' = "Nuclear",
'Petroleum.Consumption.(Excluding.Biofuels)' = "Petroleum",
'Solar.Energy.Consumption' = "Solar",
'Wind.Energy.Consumption' = "Wind",
'Total.Renewable.Energy.Consumption' = "Total Renewable Energy",
'Total.Primary.Energy.Consumption' = "Total Primary Energy" ))]
dt_annual[, MSN := revalue(MSN, c('Biomass.Energy.Consumption' = "Biomass",
'Coal.Consumption' = "Coal",
'Total.Fossil.Fuels.Consumption' = "Total Fossil Fuels",
'Geothermal.Energy.Consumption' = "Geothermal",
'Hydroelectric.Power.Consumption' = "Hydroelectric",
'Natural.Gas.Consumption.(Excluding.Supplemental.Gaseous.Fuels)' = "Natural Gas",
'Nuclear.Electric.Power.Consumption' = "Nuclear",
'Petroleum.Consumption.(Excluding.Biofuels)' = "Petroleum",
'Solar.Energy.Consumption' = "Solar",
'Wind.Energy.Consumption' = "Wind",
'Total.Renewable.Energy.Consumption' = "Total Renewable Energy",
'Total.Primary.Energy.Consumption' = "Total Primary Energy" ))]
# convert value column to numeric ----
dt_month[, Value := as.numeric(Value)]
dt_annual[, Value := as.numeric(Value)]
# remove NA month or year entries ----
dt_month = dt_month[!is.na(Month)]
dt_annual = dt_annual[!is.na(Year)]
# ---------------------------------------------------------------
# FIGURES -------------------------------------------------------
# ---------------------------------------------------------------
setwd(paste0(current.loc, out.loc))
# ANNUAL AREA PLOT AND LINE PLOT -------------
dt = dt_annual[! MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Wind",
"Solar",
"Geothermal",
"Biomass",
"Hydroelectric",
"Nuclear",
"Natural Gas",
"Petroleum",
"Coal"))]
xval = dt[, Year]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Annual U.S. Primary Energy Consumption by Source (1949 - 2017)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_annual = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_annual,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# leg.ord = levels(with(dt[Month == max(dt[,Month])], reorder(MSN, -Value)))
line_annual = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,8,1,1), "lines"))
# this is to add labels of fuel type on the outside of the plot
line_annual_2 <- ggplotGrob(line_annual)
line_annual_2$layout$clip[line_annual_2$layout$name == "panel"] <- "off"
ggsave(line_annual_2,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# MONTHLY AREA PLOT AND LINE PLOT -------------
dt = dt_month[! MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Wind",
"Solar",
"Geothermal",
"Biomass",
"Hydroelectric",
"Nuclear",
"Natural Gas",
"Petroleum",
"Coal"))]
xval = dt[, Month]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Monthly U.S. Primary Energy Consumption By Source (January 1973 - April 2018)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_month = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_month,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# leg.ord = levels(with(dt[Month == max(dt[,Month])], reorder(MSN, -Value)))
line_month = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,8,1,1), "lines"))
line_month_2 <- ggplotGrob(line_month)
line_month_2$layout$clip[line_month_2$layout$name == "panel"] <- "off"
ggsave(line_month_2,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# LATEST YEAR BAR PLOT -------------
dt = dt_annual[ Year == max(Year) & ! MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")]
xval = dt[, reorder(MSN, Value)]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "2017 U.S. Primary Energy Consumption by Source"
sublab = "Data: U.S. Energy Information Administration"
gval = "X"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
bar_annual = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_bar(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
coord_flip() +
scale_y_comma(expand = c(0,0)) +
guides(fill = FALSE) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(bar_annual,
filename = "Energy_Primary Energy Consumption by Source_Annual_2017_BP.png",
width = 11.75,
height = 6.25,
dpi = 400)
# (TOTALS ONLY) ANNUAL AREA PLOT AND LINE PLOT -------------
dt = dt_annual[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels"))]
xval = dt[, Year]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Annual U.S. Primary Energy Consumption by Source (1949 - 2017)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_annual = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_annual,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_Totals_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
dt = dt_annual[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels",
"Total Primary Energy"))]
xval = dt[, Year]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "1949 - 2017 Annual U.S. Primary Energy Consumption by Source"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
line_annual = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,9,1,1), "lines"))
line_annual_2 <- ggplotGrob(line_annual)
line_annual_2$layout$clip[line_annual_2$layout$name == "panel"] <- "off"
# grid.draw(gt1)
ggsave(line_annual_2,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_Totals_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# (TOTALS ONLY) MONTHLY AREA PLOT AND LINE PLOT -------------
dt = dt_month[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels"))]
xval = dt[, Month]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Monthly U.S. Primary Energy Consumption By Source (January 1973 - April 2018)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_month = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_month,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_Totals_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
dt = dt_month[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels",
"Total Primary Energy"))]
xval = dt[, Month]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Monthly U.S. Primary Energy Consumption By Source (January 1973 - April 2018)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
line_month = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,10,1,1), "lines"))
line_month_2 <- ggplotGrob(line_month)
line_month_2$layout$clip[line_month_2$layout$name == "panel"] <- "off"
# grid.draw(gt1)
ggsave(line_month_2,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_Totals_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
| /scripts/archive/Energy_Primary Energy Consumption by Source.R | no_license | S3researchUSC/ene505-figures | R | false | false | 23,214 | r | # ---------------------------------------------------------------
# INPUT DATA ----------------------------------------------------
# ---------------------------------------------------------------
data.loc = '/data_2017-2018' # location of data file(s)
data.file = 'Table_1.3_Primary_Energy_Consumption_by_Source.xlsx' # data file to be used
out.loc = '/figures' # location of where to save figures
source.cols = c("Geothermal" = "#6a3d9a",
"Solar" = "#ff7f00",
"Waste" = "#858585",
"Biomass" = "#8c613c",
"Wind" = "#1f78b4",
"Hydroelectric" = "#a6cee3",
"Nuclear" = "#55a868",
"Petroleum" = "#fdbf6f",
"Coal" = "#12253d",
"Natural Gas" = "#c44e52",
"Total Primary Energy" = "#fc8d62",
"Total Fossil Fuels" = "#383e56",
"Total Renewable Energy" = "#66c2a5")
# ---------------------------------------------------------------
# MAIN SCRIPT ---------------------------------------------------
# ---------------------------------------------------------------
# load libraries -------
library(data.table)
library(openxlsx)
library(ggplot2)
library(hrbrthemes)
library(stringr)
library(plyr)
library(directlabels)
library(grid)
library(rstudioapi)
# get file location as working directory -----
current.fil = getActiveDocumentContext()$path
current.loc = dirname(current.fil)
setwd(dirname(current.fil))
# set working directory as data file location ------
setwd(paste0(current.loc, data.loc))
# load data ------
dt_month = as.data.table(read.xlsx(data.file, sheet = "Monthly Data", startRow = 11, detectDates = T))
dt_month = dt_month[2:nrow(dt_month)]
dt_annual = as.data.table(read.xlsx(data.file, sheet = "Annual Data", startRow = 11, detectDates = T))
dt_annual = dt_annual[2:nrow(dt_annual)]
# rename columns ----
colnames(dt_annual)[1] = c("Year")
# melt data table from wide to long format -----
dt_month = melt(dt_month, measure.vars = colnames(dt_month)[2:13],
variable.name = "MSN", value.name = "Value")
dt_annual = melt(dt_annual, measure.vars = colnames(dt_annual)[2:13],
variable.name = "MSN", value.name = "Value")
# rename MSN factor levels -------
dt_month[, MSN := revalue(MSN, c('Biomass.Energy.Consumption' = "Biomass",
'Coal.Consumption' = "Coal",
'Total.Fossil.Fuels.Consumption' = "Total Fossil Fuels",
'Geothermal.Energy.Consumption' = "Geothermal",
'Hydroelectric.Power.Consumption' = "Hydroelectric",
'Natural.Gas.Consumption.(Excluding.Supplemental.Gaseous.Fuels)' = "Natural Gas",
'Nuclear.Electric.Power.Consumption' = "Nuclear",
'Petroleum.Consumption.(Excluding.Biofuels)' = "Petroleum",
'Solar.Energy.Consumption' = "Solar",
'Wind.Energy.Consumption' = "Wind",
'Total.Renewable.Energy.Consumption' = "Total Renewable Energy",
'Total.Primary.Energy.Consumption' = "Total Primary Energy" ))]
dt_annual[, MSN := revalue(MSN, c('Biomass.Energy.Consumption' = "Biomass",
'Coal.Consumption' = "Coal",
'Total.Fossil.Fuels.Consumption' = "Total Fossil Fuels",
'Geothermal.Energy.Consumption' = "Geothermal",
'Hydroelectric.Power.Consumption' = "Hydroelectric",
'Natural.Gas.Consumption.(Excluding.Supplemental.Gaseous.Fuels)' = "Natural Gas",
'Nuclear.Electric.Power.Consumption' = "Nuclear",
'Petroleum.Consumption.(Excluding.Biofuels)' = "Petroleum",
'Solar.Energy.Consumption' = "Solar",
'Wind.Energy.Consumption' = "Wind",
'Total.Renewable.Energy.Consumption' = "Total Renewable Energy",
'Total.Primary.Energy.Consumption' = "Total Primary Energy" ))]
# convert value column to numeric ----
dt_month[, Value := as.numeric(Value)]
dt_annual[, Value := as.numeric(Value)]
# remove NA month or year entries ----
dt_month = dt_month[!is.na(Month)]
dt_annual = dt_annual[!is.na(Year)]
# ---------------------------------------------------------------
# FIGURES -------------------------------------------------------
# ---------------------------------------------------------------
setwd(paste0(current.loc, out.loc))
# ANNUAL AREA PLOT AND LINE PLOT -------------
dt = dt_annual[! MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Wind",
"Solar",
"Geothermal",
"Biomass",
"Hydroelectric",
"Nuclear",
"Natural Gas",
"Petroleum",
"Coal"))]
xval = dt[, Year]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Annual U.S. Primary Energy Consumption by Source (1949 - 2017)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_annual = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_annual,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# leg.ord = levels(with(dt[Month == max(dt[,Month])], reorder(MSN, -Value)))
line_annual = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,8,1,1), "lines"))
# this is to add labels of fuel type on the outside of the plot
line_annual_2 <- ggplotGrob(line_annual)
line_annual_2$layout$clip[line_annual_2$layout$name == "panel"] <- "off"
ggsave(line_annual_2,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# MONTHLY AREA PLOT AND LINE PLOT -------------
dt = dt_month[! MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Wind",
"Solar",
"Geothermal",
"Biomass",
"Hydroelectric",
"Nuclear",
"Natural Gas",
"Petroleum",
"Coal"))]
xval = dt[, Month]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Monthly U.S. Primary Energy Consumption By Source (January 1973 - April 2018)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_month = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_month,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# leg.ord = levels(with(dt[Month == max(dt[,Month])], reorder(MSN, -Value)))
line_month = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,8,1,1), "lines"))
line_month_2 <- ggplotGrob(line_month)
line_month_2$layout$clip[line_month_2$layout$name == "panel"] <- "off"
ggsave(line_month_2,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# LATEST YEAR BAR PLOT -------------
dt = dt_annual[ Year == max(Year) & ! MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")]
xval = dt[, reorder(MSN, Value)]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "2017 U.S. Primary Energy Consumption by Source"
sublab = "Data: U.S. Energy Information Administration"
gval = "X"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
bar_annual = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_bar(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
coord_flip() +
scale_y_comma(expand = c(0,0)) +
guides(fill = FALSE) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(bar_annual,
filename = "Energy_Primary Energy Consumption by Source_Annual_2017_BP.png",
width = 11.75,
height = 6.25,
dpi = 400)
# (TOTALS ONLY) ANNUAL AREA PLOT AND LINE PLOT -------------
dt = dt_annual[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels"))]
xval = dt[, Year]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Annual U.S. Primary Energy Consumption by Source (1949 - 2017)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_annual = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_annual,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_Totals_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
dt = dt_annual[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels",
"Total Primary Energy"))]
xval = dt[, Year]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "1949 - 2017 Annual U.S. Primary Energy Consumption by Source"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
line_annual = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_continuous(breaks = seq(1950,2017,5), expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,9,1,1), "lines"))
line_annual_2 <- ggplotGrob(line_annual)
line_annual_2$layout$clip[line_annual_2$layout$name == "panel"] <- "off"
# grid.draw(gt1)
ggsave(line_annual_2,
filename = "Energy_Primary Energy Consumption by Source_Annual_1949-2017_Totals_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
# (TOTALS ONLY) MONTHLY AREA PLOT AND LINE PLOT -------------
dt = dt_month[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels"))]
xval = dt[, Month]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Monthly U.S. Primary Energy Consumption By Source (January 1973 - April 2018)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
area_month = ggplot(dt, aes(x = xval, y = yval, fill = fillval)) +
geom_area(stat = "identity") +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
fill = leglab) +
theme_ipsum_rc(grid = gval) +
scale_fill_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,1,1,1), "lines"))
ggsave(area_month,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_Totals_ATS.png",
width = 11.75,
height = 6.25,
dpi = 400)
dt = dt_month[ MSN %in% c("Total Fossil Fuels",
"Total Renewable Energy",
"Total Primary Energy")][, MSN := factor(MSN, levels = c("Total Renewable Energy",
"Total Fossil Fuels",
"Total Primary Energy"))]
xval = dt[, Month]
yval = dt[, Value]
fillval = dt[, MSN]
tlab = "Monthly U.S. Primary Energy Consumption By Source (January 1973 - April 2018)"
sublab = "Data: U.S. Energy Information Administration"
gval = "Y"
xlab = NULL
ylab = "Quadrillion BTU"
leglab = ""
leg.ord = levels(fillval)
plot.cols = source.cols
line_month = ggplot(dt, aes(x = xval, y = yval, color = fillval)) +
geom_line(stat = "identity", size = 0.7) +
labs(title = tlab,
subtitle = sublab,
x = xlab,
y = ylab,
color = leglab) +
theme_ipsum_rc(grid = gval) +
scale_color_manual(breaks = leg.ord, values = plot.cols) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y", expand = c(0,0)) +
scale_y_continuous(expand = c(0.01,0)) +
geom_dl(aes(label = MSN), method = list(dl.trans(x = x + .3), "last.bumpup", cex = 1.1, fontfamily = "Roboto Condensed")) +
guides(color = FALSE) +
theme(plot.title = element_text(size = 21, hjust = 0.5, face = "bold"),
plot.subtitle = element_text(size = 15, hjust = 0.5),
axis.title.x = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.title.y = element_text(size = 24, hjust = 0.5, face = "bold"),
axis.text.x = element_text(size = 18, face="bold"),
axis.text.y = element_text(size = 18, face="bold"),
legend.text = element_text(size = 14, face = "bold")) +
theme(plot.margin = unit(c(1,10,1,1), "lines"))
line_month_2 <- ggplotGrob(line_month)
line_month_2$layout$clip[line_month_2$layout$name == "panel"] <- "off"
# grid.draw(gt1)
ggsave(line_month_2,
filename = "Energy_Primary Energy Consumption by Source_Monthly_Jan1973-Apr2018_Totals_LTS.png",
width = 11.75,
height = 6.25,
dpi = 400)
|
# season1_co-usage_heatmap.r
# calculate character co-usage rates for each character based on anther's ladder wii u season 1 data
# co-usage rate is defined as [...]
library(tidyverse)
library(png)
library(grid)
library(gridExtra)
library(RMySQL)
cnx <- dbConnect(MySQL(), dbname='anthers_02_02_2016')
df <- dbGetQuery(cnx, 'select player_ladder_stat_characters.player_id, characters.name, player_ladder_stat_characters.times_used, player_ladder_stat_characters.wins from player_ladder_stat_characters join characters on player_ladder_stat_characters.character_id = characters.id where season_id = 7;')
dbDisconnect(cnx)
# make character name field a factor for easy indexing
df_backup<-df # original copy in case of needed reversion to untouched query
df$name<-as.factor(df$name)
# create matrix showing cumulative co-usage shares
nchars<-nlevels(df$name)
use_mat<-matrix(0, nrow=nchars, ncol=nchars)
dimnames(use_mat)<-list(levels(df$name), levels(df$name))
players<-unique(df$player_id) # or distinct(df, player_id)
# define a character being used as a main if it was used in the majority of a given user's matches
main_count<-rep(0, length.out=nchars)
for (player in players) {
# get individual player data
sub_df<-subset(df, df$player_id==player) # or filter(df, player_id==player)
# get fractional usage rates per character
use_prc<-sub_df[,3]/sum(sub_df[,3])
# get most used character and increment global mains vector
main_ind<-which.max(use_prc)
main_char<-sub_df$name[main_ind]
main_count[main_char]<-main_count[main_char]+1
# for each character used, add usage rate to main character's entry
for (i in 1:nrow(sub_df)) {
use_char<-sub_df$name[i]
use_mat[main_char, use_char]<-use_mat[main_char, use_char] + use_prc[i]
}
}
use_mat_backup<-use_mat # copy in case of reversion
self_use_rate<-diag(use_mat)/main_count
diag(use_mat)<-0 # remove dominating self-use values on diagonal
diag(use_mat)<-apply(use_mat, 1, median) # reset self-use as baseline median for secondaries to better illustrate +/- relative to other secondaries
# scale operates on columns, so double transpose
use_mat<-t(scale(t(use_mat), center=diag(use_mat), scale=rowSums(use_mat))) # center on per-character medians, scale by overall secondary usage per character to normalize
use_mat[is.nan(use_mat)]<-0
# tidy data for plotting
plot_df<-as.data.frame(use_mat) %>% mutate(Mains=rownames(use_mat)) %>%
gather(key=Secondaries, value=Usage_Rate, -Mains)
# alternate version for matrices with var names in dimnames: reshape2:::melt.matrix(use_mat)
######
# Plotting calls
######
# plot1: basic plot
p1 <- ggplot(plot_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=Usage_Rate), colour='grey50') +
scale_fill_gradient2(low='white', high='black') +
theme(legend.position='none') +
# theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.5)) # perpendicular
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
ggsave(file='anther_s1_cusage_hmap01.png', width=12, height=8, dpi=100, plot=p1)
# plot 2: icon version
# note for plot margin: default is 6 all around. order is clockwise from top
imgpre<-'../StockIcons/stock_90_'
imgpost<-'_01.png'
make_rasters<-function(char_vec, alpha=1) {
raster_list<-list()
for (i in char_vec) {
# go through exceptions for character naming
if (i=='Captain Falcon') { i<-'captain' }
if (i=='King Dedede') { i<-'dedede' }
if (i=='Diddy Kong') { i<-'diddy' }
if (i=='Donkey Kong') { i<-'donkey' }
if (i=='Dr. Mario') { i<-'drmario' }
if (i=='Duck Hunt Duo') { i<-'duckhunt' }
if (i=='Mr. Game And Watch') { i<-'gamewatch' }
if (i=='Ganondorf') { i<-'ganon' }
if (i=='Greninja') { i<-'gekkouga' }
if (i=='Corrin') { i<-'kamui' }
if (i=='Bowser') { i<-'koopa' }
if (i=='Bowser Jr.') { i<-'koopajr' }
if (i=='Little Mac') { i<-'littlemac' }
if (i=='Charizard') { i<-'lizardon' }
if (i=='Mii Brawler') { i<-'miienemyf' }
if (i=='Mii Gunner') { i<-'miigunner' }
if (i=='Mii Swordsman') { i<-'miiswordsman' }
if (i=='Villager') { i<-'murabito' }
if (i=='Olimar') { i<-'pikmin' }
if (i=='Dark Pit') { i<-'pitb' }
if (i=='Jigglypuff') { i<-'purin' }
if (i=='Robin') { i<-'reflet' }
if (i=='R.O.B.') { i<-'robot' }
if (i=='Megaman') { i<-'rockman' }
if (i=='Rosalina And Luma') { i<-'rosetta' }
if (i=='Zero Suit Samus') { i<-'szerosuit' }
if (i=='Toon Link') { i<-'toonlink' }
if (i=='Wii Fit Trainer') { i<-'wiifit' }
if (i=='Random') { i<-'omakase' }
# go through exceptions for character icons
if (i=='kamui' | i=='reflet') { img<-readPNG(paste(imgpre, i, '_02.png', sep='')) }
else if (i=='Wario') { img<-readPNG(paste(imgpre, tolower(i), '_05.png', sep='')) }
else { img<-readPNG(paste(imgpre, tolower(i), '_01.png', sep=''))}
# alpha controls transparency (0=transparent, 1=opaque). default value is 1
pre_raster<-matrix(rgb(img[,,1], img[,,2], img[,,3], img[,,4]*alpha), nrow=dim(img)[1])
# turn into raster object
raster_list[[i]]<-rasterGrob(pre_raster, interpolate=TRUE)
}
return(raster_list)
}
make_annotation<-function(raster, x, y, img_offset) {
annotation_custom(raster, xmin=x-img_offset, xmax=x+img_offset, ymin=y-img_offset, ymax=y+img_offset)
}
rasters<-make_rasters(rownames(use_mat))
# make vertial strip for 'mains' axis
y_offset<-1
yplot<-seq(from=y_offset, to=nchars*y_offset, by=y_offset)
y_strip <- ggplot(data=as.data.frame(yplot), aes(y=yplot)) +
mapply(make_annotation, rasters, 0, yplot, y_offset) +
theme_classic() +
theme(axis.ticks=element_blank()) +
scale_x_continuous(limits=c(-y_offset, y_offset), expand=c(0, 0)) +
scale_y_discrete(expand=c(0.01,0)) +
theme(axis.text.x=element_blank(), axis.text.y=element_blank()) +
theme(axis.title.x=element_blank()) + ylab('Mains') +
theme(plot.margin=unit(c(6, 0, 0, 6), 'pt'))
# make horizontal strip for 'secondaries' axis
x_offset<-1
xplot<-seq(from=x_offset, to=nchars*x_offset, by=x_offset)
x_strip<-ggplot(data=as.data.frame(xplot), aes(x=xplot)) +
mapply(make_annotation, rasters, xplot, 0, x_offset) +
theme_classic() +
theme(axis.ticks=element_blank()) +
scale_y_continuous(limits=c(-x_offset, x_offset), expand=c(0,0)) +
scale_x_discrete(expand=c(0.01, 0)) +
theme(axis.text.x=element_blank(), axis.text.y=element_blank()) +
theme(axis.title.y=element_blank()) + xlab('Secondaries') +
theme(plot.margin=unit(c(0,6,6,0), 'pt'))
# make totally blank plot to take up lower left corner space
blank_plot<-ggplot(data=as.data.frame(0)) + geom_blank() + theme_classic() + labs(x='', y='') + theme(plot.margin=unit(c(0,0,6,6), 'pt'))
# suppress axes from previous plot
p2_hmap <- ggplot(plot_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=Usage_Rate), colour='grey50') +
scale_fill_gradient2(low='white', high='black') +
theme(legend.position='none') +
theme(axis.text.x=element_blank(), axis.text.y=element_blank()) +
theme(axis.title.x=element_blank(), axis.title.y=element_blank()) +
theme(plot.margin=unit(c(6,6,0,0), 'pt'))
# combine for final plot
p2 <- arrangeGrob(y_strip, p2_hmap, blank_plot, x_strip, nrow=2, ncol=2, widths=c(1, 25), heights=c(13, 1))
# grid.draw(p2)
ggsave(file='anther_s1_cusage_hmap02.png', width=12, height=8, dpi=100, plot=p2)
# plot 3: emphasize small differences. illustrate rare usage too
# all values are between -1 and 1, with the vast majority concentrated very close to 0, esp for negative values.
# to bring out differences, we can take a fractional root to separate out values near 0
exp_pow<-3
plot_df <- plot_df %>% mutate(temp = abs(Usage_Rate)^(1/exp_pow)) %>%
mutate(PowUsage = ifelse(Usage_Rate<0, -temp, temp)) %>%
select(-temp)
p3 <- ggplot(plot_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=PowUsage), colour='grey50') +
scale_fill_gradient2(low='blue', high='red') +
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
ggsave(file='anther_s1_cusage_hmap03.png', width=12, height=8, dpi=100, plot=p3)
#### testing area
# plot showing similarity between mains
tim<-dist(use_mat, method='manhattan')
hc<-hclust(tim)
plot(hc)
# similarity between secondaries
# this doesn't make a ton of sense since these don't necessarily add to a similar value - general high use can cluster with general high use even if proportions are off
tom<-dist(t(use_mat), method='manhattan')
h2<-hclust(tom)
plot(h2)
# factor_key preserves column order for Secondaries
# must manually add factor levels (in order) for mains
test_df<-as.data.frame(use_mat[hc$order, h2$order]) %>% mutate(Mains=rownames(use_mat)[hc$order]) %>% gather(key=Secondaries, value=Usage_Rate, -Mains, factor_key=TRUE) %>% mutate(Mains=factor(Mains, levels=rownames(use_mat)[hc$order]))
tplot<- ggplot(test_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=Usage_Rate), colour='grey50') +
scale_fill_gradient2(low='white', high='black') +
theme(legend.position='none') +
# theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.5)) # perpendicular
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
# try differences based on exp pow
pow_mat <- plot_df %>% select(-Usage_Rate) %>% spread(key=Secondaries, value=PowUsage) %>% select(-Mains)
rownames(pow_mat)<-rownames(use_mat)
pow_dist_main<-dist(pow_mat, method='manhattan')
pow_dist_secd<-dist(t(pow_mat), method='manhattan')
pow_ord_main<-hclust(pow_dist_main)$order
pow_ord_secd<-hclust(pow_dist_secd)$order
pow_df<-as.data.frame(pow_mat[pow_ord_main, pow_ord_secd]) %>%
mutate(Mains=rownames(use_mat)[pow_ord_main]) %>%
gather(key=Secondaries, value=PowUsage, -Mains, factor_key=TRUE) %>%
mutate(Mains=factor(Mains, levels=rownames(use_mat)[pow_ord_main]))
pow_plot<-ggplot(pow_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=PowUsage), colour='grey50') +
scale_fill_gradient2(low='blue', high='red') +
theme(legend.position='none') +
# theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.5)) # perpendicular
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
| /anther_season1_co-usage/season1_co-usage_heatmap.r | no_license | rmuraglia/SmashViz | R | false | false | 10,446 | r | # season1_co-usage_heatmap.r
# calculate character co-usage rates for each character based on anther's ladder wii u season 1 data
# co-usage rate is defined as [...]
library(tidyverse)
library(png)
library(grid)
library(gridExtra)
library(RMySQL)
cnx <- dbConnect(MySQL(), dbname='anthers_02_02_2016')
df <- dbGetQuery(cnx, 'select player_ladder_stat_characters.player_id, characters.name, player_ladder_stat_characters.times_used, player_ladder_stat_characters.wins from player_ladder_stat_characters join characters on player_ladder_stat_characters.character_id = characters.id where season_id = 7;')
dbDisconnect(cnx)
# make character name field a factor for easy indexing
df_backup<-df # original copy in case of needed reversion to untouched query
df$name<-as.factor(df$name)
# create matrix showing cumulative co-usage shares
nchars<-nlevels(df$name)
use_mat<-matrix(0, nrow=nchars, ncol=nchars)
dimnames(use_mat)<-list(levels(df$name), levels(df$name))
players<-unique(df$player_id) # or distinct(df, player_id)
# define a character being used as a main if it was used in the majority of a given user's matches
main_count<-rep(0, length.out=nchars)
for (player in players) {
# get individual player data
sub_df<-subset(df, df$player_id==player) # or filter(df, player_id==player)
# get fractional usage rates per character
use_prc<-sub_df[,3]/sum(sub_df[,3])
# get most used character and increment global mains vector
main_ind<-which.max(use_prc)
main_char<-sub_df$name[main_ind]
main_count[main_char]<-main_count[main_char]+1
# for each character used, add usage rate to main character's entry
for (i in 1:nrow(sub_df)) {
use_char<-sub_df$name[i]
use_mat[main_char, use_char]<-use_mat[main_char, use_char] + use_prc[i]
}
}
use_mat_backup<-use_mat # copy in case of reversion
self_use_rate<-diag(use_mat)/main_count
diag(use_mat)<-0 # remove dominating self-use values on diagonal
diag(use_mat)<-apply(use_mat, 1, median) # reset self-use as baseline median for secondaries to better illustrate +/- relative to other secondaries
# scale operates on columns, so double transpose
use_mat<-t(scale(t(use_mat), center=diag(use_mat), scale=rowSums(use_mat))) # center on per-character medians, scale by overall secondary usage per character to normalize
use_mat[is.nan(use_mat)]<-0
# tidy data for plotting
plot_df<-as.data.frame(use_mat) %>% mutate(Mains=rownames(use_mat)) %>%
gather(key=Secondaries, value=Usage_Rate, -Mains)
# alternate version for matrices with var names in dimnames: reshape2:::melt.matrix(use_mat)
######
# Plotting calls
######
# plot1: basic plot
p1 <- ggplot(plot_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=Usage_Rate), colour='grey50') +
scale_fill_gradient2(low='white', high='black') +
theme(legend.position='none') +
# theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.5)) # perpendicular
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
ggsave(file='anther_s1_cusage_hmap01.png', width=12, height=8, dpi=100, plot=p1)
# plot 2: icon version
# note for plot margin: default is 6 all around. order is clockwise from top
imgpre<-'../StockIcons/stock_90_'
imgpost<-'_01.png'
make_rasters<-function(char_vec, alpha=1) {
raster_list<-list()
for (i in char_vec) {
# go through exceptions for character naming
if (i=='Captain Falcon') { i<-'captain' }
if (i=='King Dedede') { i<-'dedede' }
if (i=='Diddy Kong') { i<-'diddy' }
if (i=='Donkey Kong') { i<-'donkey' }
if (i=='Dr. Mario') { i<-'drmario' }
if (i=='Duck Hunt Duo') { i<-'duckhunt' }
if (i=='Mr. Game And Watch') { i<-'gamewatch' }
if (i=='Ganondorf') { i<-'ganon' }
if (i=='Greninja') { i<-'gekkouga' }
if (i=='Corrin') { i<-'kamui' }
if (i=='Bowser') { i<-'koopa' }
if (i=='Bowser Jr.') { i<-'koopajr' }
if (i=='Little Mac') { i<-'littlemac' }
if (i=='Charizard') { i<-'lizardon' }
if (i=='Mii Brawler') { i<-'miienemyf' }
if (i=='Mii Gunner') { i<-'miigunner' }
if (i=='Mii Swordsman') { i<-'miiswordsman' }
if (i=='Villager') { i<-'murabito' }
if (i=='Olimar') { i<-'pikmin' }
if (i=='Dark Pit') { i<-'pitb' }
if (i=='Jigglypuff') { i<-'purin' }
if (i=='Robin') { i<-'reflet' }
if (i=='R.O.B.') { i<-'robot' }
if (i=='Megaman') { i<-'rockman' }
if (i=='Rosalina And Luma') { i<-'rosetta' }
if (i=='Zero Suit Samus') { i<-'szerosuit' }
if (i=='Toon Link') { i<-'toonlink' }
if (i=='Wii Fit Trainer') { i<-'wiifit' }
if (i=='Random') { i<-'omakase' }
# go through exceptions for character icons
if (i=='kamui' | i=='reflet') { img<-readPNG(paste(imgpre, i, '_02.png', sep='')) }
else if (i=='Wario') { img<-readPNG(paste(imgpre, tolower(i), '_05.png', sep='')) }
else { img<-readPNG(paste(imgpre, tolower(i), '_01.png', sep=''))}
# alpha controls transparency (0=transparent, 1=opaque). default value is 1
pre_raster<-matrix(rgb(img[,,1], img[,,2], img[,,3], img[,,4]*alpha), nrow=dim(img)[1])
# turn into raster object
raster_list[[i]]<-rasterGrob(pre_raster, interpolate=TRUE)
}
return(raster_list)
}
make_annotation<-function(raster, x, y, img_offset) {
annotation_custom(raster, xmin=x-img_offset, xmax=x+img_offset, ymin=y-img_offset, ymax=y+img_offset)
}
rasters<-make_rasters(rownames(use_mat))
# make vertial strip for 'mains' axis
y_offset<-1
yplot<-seq(from=y_offset, to=nchars*y_offset, by=y_offset)
y_strip <- ggplot(data=as.data.frame(yplot), aes(y=yplot)) +
mapply(make_annotation, rasters, 0, yplot, y_offset) +
theme_classic() +
theme(axis.ticks=element_blank()) +
scale_x_continuous(limits=c(-y_offset, y_offset), expand=c(0, 0)) +
scale_y_discrete(expand=c(0.01,0)) +
theme(axis.text.x=element_blank(), axis.text.y=element_blank()) +
theme(axis.title.x=element_blank()) + ylab('Mains') +
theme(plot.margin=unit(c(6, 0, 0, 6), 'pt'))
# make horizontal strip for 'secondaries' axis
x_offset<-1
xplot<-seq(from=x_offset, to=nchars*x_offset, by=x_offset)
x_strip<-ggplot(data=as.data.frame(xplot), aes(x=xplot)) +
mapply(make_annotation, rasters, xplot, 0, x_offset) +
theme_classic() +
theme(axis.ticks=element_blank()) +
scale_y_continuous(limits=c(-x_offset, x_offset), expand=c(0,0)) +
scale_x_discrete(expand=c(0.01, 0)) +
theme(axis.text.x=element_blank(), axis.text.y=element_blank()) +
theme(axis.title.y=element_blank()) + xlab('Secondaries') +
theme(plot.margin=unit(c(0,6,6,0), 'pt'))
# make totally blank plot to take up lower left corner space
blank_plot<-ggplot(data=as.data.frame(0)) + geom_blank() + theme_classic() + labs(x='', y='') + theme(plot.margin=unit(c(0,0,6,6), 'pt'))
# suppress axes from previous plot
p2_hmap <- ggplot(plot_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=Usage_Rate), colour='grey50') +
scale_fill_gradient2(low='white', high='black') +
theme(legend.position='none') +
theme(axis.text.x=element_blank(), axis.text.y=element_blank()) +
theme(axis.title.x=element_blank(), axis.title.y=element_blank()) +
theme(plot.margin=unit(c(6,6,0,0), 'pt'))
# combine for final plot
p2 <- arrangeGrob(y_strip, p2_hmap, blank_plot, x_strip, nrow=2, ncol=2, widths=c(1, 25), heights=c(13, 1))
# grid.draw(p2)
ggsave(file='anther_s1_cusage_hmap02.png', width=12, height=8, dpi=100, plot=p2)
# plot 3: emphasize small differences. illustrate rare usage too
# all values are between -1 and 1, with the vast majority concentrated very close to 0, esp for negative values.
# to bring out differences, we can take a fractional root to separate out values near 0
exp_pow<-3
plot_df <- plot_df %>% mutate(temp = abs(Usage_Rate)^(1/exp_pow)) %>%
mutate(PowUsage = ifelse(Usage_Rate<0, -temp, temp)) %>%
select(-temp)
p3 <- ggplot(plot_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=PowUsage), colour='grey50') +
scale_fill_gradient2(low='blue', high='red') +
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
ggsave(file='anther_s1_cusage_hmap03.png', width=12, height=8, dpi=100, plot=p3)
#### testing area
# plot showing similarity between mains
tim<-dist(use_mat, method='manhattan')
hc<-hclust(tim)
plot(hc)
# similarity between secondaries
# this doesn't make a ton of sense since these don't necessarily add to a similar value - general high use can cluster with general high use even if proportions are off
tom<-dist(t(use_mat), method='manhattan')
h2<-hclust(tom)
plot(h2)
# factor_key preserves column order for Secondaries
# must manually add factor levels (in order) for mains
test_df<-as.data.frame(use_mat[hc$order, h2$order]) %>% mutate(Mains=rownames(use_mat)[hc$order]) %>% gather(key=Secondaries, value=Usage_Rate, -Mains, factor_key=TRUE) %>% mutate(Mains=factor(Mains, levels=rownames(use_mat)[hc$order]))
tplot<- ggplot(test_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=Usage_Rate), colour='grey50') +
scale_fill_gradient2(low='white', high='black') +
theme(legend.position='none') +
# theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.5)) # perpendicular
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
# try differences based on exp pow
pow_mat <- plot_df %>% select(-Usage_Rate) %>% spread(key=Secondaries, value=PowUsage) %>% select(-Mains)
rownames(pow_mat)<-rownames(use_mat)
pow_dist_main<-dist(pow_mat, method='manhattan')
pow_dist_secd<-dist(t(pow_mat), method='manhattan')
pow_ord_main<-hclust(pow_dist_main)$order
pow_ord_secd<-hclust(pow_dist_secd)$order
pow_df<-as.data.frame(pow_mat[pow_ord_main, pow_ord_secd]) %>%
mutate(Mains=rownames(use_mat)[pow_ord_main]) %>%
gather(key=Secondaries, value=PowUsage, -Mains, factor_key=TRUE) %>%
mutate(Mains=factor(Mains, levels=rownames(use_mat)[pow_ord_main]))
pow_plot<-ggplot(pow_df, aes(x=Secondaries, y=Mains)) +
geom_tile(aes(fill=PowUsage), colour='grey50') +
scale_fill_gradient2(low='blue', high='red') +
theme(legend.position='none') +
# theme(axis.text.x=element_text(angle=90, hjust=1, vjust=0.5)) # perpendicular
theme(axis.text.x=element_text(angle=45, hjust=1, vjust=1)) # slanted
|
source('./batters.r')
source('./positions.r')
write.csv(batters, file="./2018-score-batters.csv")
| /score.r | no_license | MarkBorcherding/baseballs | R | false | false | 103 | r | source('./batters.r')
source('./positions.r')
write.csv(batters, file="./2018-score-batters.csv")
|
setwd("C:/Users/Tong/Desktop/Coursera/data")
household_data <- read.table("household_power_consumption.txt",sep = ";",header = FALSE,col.names = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),na.strings ="?",skip = grep("31/1/2007",readLines("household_power_consumption.txt")),nrows = 4319)
household_data <- subset(household_data,Date %in% c("1/2/2007","2/2/2007"))
household_data$DateTime <- as.POSIXct(strptime(paste(household_data$Date,household_data$Time,sep = " "),format = "%e/%m/%Y %H:%M:%S"))
Sys.setlocale("LC_ALL","English")
png("plot2.png",width = 480, height = 480)
with(household_data,plot(DateTime,Global_active_power,ylab = "Global Active Power (kilowatts)",xlab = "",type = "n"))
with(household_data,lines(DateTime,Global_active_power))
dev.off() | /plot2.R | no_license | tongsupakit/ExData_Plotting1 | R | false | false | 860 | r | setwd("C:/Users/Tong/Desktop/Coursera/data")
household_data <- read.table("household_power_consumption.txt",sep = ";",header = FALSE,col.names = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3"),na.strings ="?",skip = grep("31/1/2007",readLines("household_power_consumption.txt")),nrows = 4319)
household_data <- subset(household_data,Date %in% c("1/2/2007","2/2/2007"))
household_data$DateTime <- as.POSIXct(strptime(paste(household_data$Date,household_data$Time,sep = " "),format = "%e/%m/%Y %H:%M:%S"))
Sys.setlocale("LC_ALL","English")
png("plot2.png",width = 480, height = 480)
with(household_data,plot(DateTime,Global_active_power,ylab = "Global Active Power (kilowatts)",xlab = "",type = "n"))
with(household_data,lines(DateTime,Global_active_power))
dev.off() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.