content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
observe(addScrollAnim(session, "hc1", "bounceInRight"))
observe(addScrollAnim(session, "hc2", "bounceInRight"))
observe(addScrollAnim(session, "hc3", "bounceInRight"))
observe(addScrollAnim(session, "hc4", "bounceInRight"))
observe(addScrollAnim(session, "hc5", "bounceInRight"))
observe(addScrollAnim(session, "hc6", "bounceInRight"))
observe(addScrollAnim(session, "hc7", "bounceInRight"))
observe(addScrollAnim(session, "hc8", "bounceInRight"))
observe(addScrollAnim(session, "hc9", "bounceInRight"))
observe(addScrollAnim(session, "hc10", "bounceInRight"))
observe(addScrollAnim(session, "hc11", "bounceInRight"))
observe(addScrollAnim(session, "hc12", "bounceInRight"))
observe(addScrollAnim(session, "hc13", "bounceInRight"))
observe(addScrollAnim(session, "hc14", "bounceInRight"))
observe(addScrollAnim(session, "hc15", "bounceInRight"))
observe(addScrollAnim(session, "hc16", "bounceInRight"))
observe(addScrollAnim(session, "hc17", "bounceInRight"))
observe(addScrollAnim(session, "hc18", "bounceInRight"))
observe(addScrollAnim(session, "hc19", "bounceInRight"))
observe(addScrollAnim(session, "hc20", "bounceInRight"))
observe(addScrollAnim(session, "hc21", "bounceInRight"))
observe(addScrollAnim(session, "hc22", "bounceInRight"))
observe(addScrollAnim(session, "hc23", "bounceInRight"))
observe(addScrollAnim(session, "hc24", "bounceInRight"))
observe(addScrollAnim(session, "lf1", "bounceInRight"))
observe(addScrollAnim(session, "lf2", "bounceInRight"))
observe(addScrollAnim(session, "lf3", "bounceInRight"))
| /server/observer.R | no_license | nicoFhahn/covid_shiny | R | false | false | 1,527 | r | observe(addScrollAnim(session, "hc1", "bounceInRight"))
observe(addScrollAnim(session, "hc2", "bounceInRight"))
observe(addScrollAnim(session, "hc3", "bounceInRight"))
observe(addScrollAnim(session, "hc4", "bounceInRight"))
observe(addScrollAnim(session, "hc5", "bounceInRight"))
observe(addScrollAnim(session, "hc6", "bounceInRight"))
observe(addScrollAnim(session, "hc7", "bounceInRight"))
observe(addScrollAnim(session, "hc8", "bounceInRight"))
observe(addScrollAnim(session, "hc9", "bounceInRight"))
observe(addScrollAnim(session, "hc10", "bounceInRight"))
observe(addScrollAnim(session, "hc11", "bounceInRight"))
observe(addScrollAnim(session, "hc12", "bounceInRight"))
observe(addScrollAnim(session, "hc13", "bounceInRight"))
observe(addScrollAnim(session, "hc14", "bounceInRight"))
observe(addScrollAnim(session, "hc15", "bounceInRight"))
observe(addScrollAnim(session, "hc16", "bounceInRight"))
observe(addScrollAnim(session, "hc17", "bounceInRight"))
observe(addScrollAnim(session, "hc18", "bounceInRight"))
observe(addScrollAnim(session, "hc19", "bounceInRight"))
observe(addScrollAnim(session, "hc20", "bounceInRight"))
observe(addScrollAnim(session, "hc21", "bounceInRight"))
observe(addScrollAnim(session, "hc22", "bounceInRight"))
observe(addScrollAnim(session, "hc23", "bounceInRight"))
observe(addScrollAnim(session, "hc24", "bounceInRight"))
observe(addScrollAnim(session, "lf1", "bounceInRight"))
observe(addScrollAnim(session, "lf2", "bounceInRight"))
observe(addScrollAnim(session, "lf3", "bounceInRight"))
|
load_df$Charge <- ifelse (load_df$load == -0.5, "Sans", "Avec")
# negative plot -----------------------------------------------------------
load_neg <- subset(load_df, usvalence==-0.5)
data_plot_neg = load_neg %>%
ggplot(aes(x = as.character(response), y = RWAscore,
fill = Charge, color = Charge)) +
#geom_boxplot(position = position_dodge(width = -1.4), alpha = 1, width = 0.1, outlier.shape = NA) +
geom_point(position = position_jitterdodge(jitter.width = 0.2, jitter.height = 0.05,
dodge.width = -0.75), alpha = 0.4, size = 1,
shape = 19, inherit.aes = TRUE) +
#stat_summary(fun.y = "median", geom = "point", size = 3, color="#ff738a",
#shape = "25"|", position = position_dodge(width = -1.4), alpha = 1) +
#scale_discrete_manual(aesthetics = "point_shape", values = c(-0.5, 0.5)) +
labs(x = 'Évaluations', y = 'RWA', fill="Charge cognitive", color="Charge cognitive") +
scale_fill_manual(values=c("#73a5ff", "#50ce76")) +
scale_color_manual(values= c("#73a5ff", "#50ce76"), guide = "none") +
coord_cartesian(ylim=c(1,9)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10), expand=c(0.01,0)) +
labs(subtitle="Contre-conditionnement positif") +
theme_ipsum_rc(base_size = 13,
subtitle_size = 20,
axis_title_size = 15) +
guides(fill = guide_legend(override.aes = list(linetype = 0)),
color = guide_legend(override.aes = list(linetype = 0))) +
coord_flip()
data_plot_neg <- ggMarginal(data_plot_neg, margins = "x", alpha = 0.6,
type = "histogram", size = 4, fill = "gray", colour = "lightgray")
# positive plot -----------------------------------------------------------
load_pos <- subset(load_df, usvalence==0.5)
data_plot_pos = load_pos %>%
ggplot(aes(x = as.character(response), y = RWAscore,
fill = Charge, color = Charge)) +
#geom_boxplot(position = position_dodge(width = -1.4), alpha = 1, width = 0.1, outlier.shape = NA) +
geom_point(position = position_jitterdodge(jitter.width = 0.2, jitter.height = 0.05,
dodge.width = -0.75), alpha = 0.4, size = 1,
shape = 19, inherit.aes = TRUE) +
#stat_summary(fun.y = "median", geom = "point", size = 3, color="#ff738a",
#shape = "25"|", position = position_dodge(width = -1.4), alpha = 1) +
#scale_discrete_manual(aesthetics = "point_shape", values = c(-0.5, 0.5)) +
labs(x = 'Évaluations', y = 'RWA', fill="Charge cognitive", color="Charge cognitive") +
scale_fill_manual(values=c("#73a5ff", "#50ce76")) +
scale_color_manual(values= c("#73a5ff", "#50ce76"), guide = "none") +
coord_cartesian(ylim=c(1,9)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10), expand=c(0.01,0)) +
labs(subtitle="Contre-conditionnement négatif") +
theme_ipsum_rc(base_size = 13,
subtitle_size = 20,
axis_title_size = 15) +
guides(fill = guide_legend(override.aes = list(linetype = 0)),
color = guide_legend(override.aes = list(linetype = 0))) +
coord_flip()
data_plot_pos <- ggMarginal(data_plot_pos, margins = "x", alpha = 0.6,
type = "histogram", size = 4, fill = "gray", colour = "lightgray")
# Combine plot
data_plot_all <- ggarrange(data_plot_neg,
data_plot_pos,
ncol = 2, nrow = 1)
# uncomment to display plot
# data_plot_all
# save plot
ggsave("plots/data_plot_all_xp09_french.pdf", width = 50, height = 15, units = "cm")
# Combine with spaghetti
data_spag_all <- ggarrange(marg_plot_n,
marg_plot_p,
data_plot_neg,
data_plot_pos,
ncol = 2, nrow = 2)
# uncomment to display plot
# data_spag_all
# save plot
ggsave("plots/data_spag_xp09_french.pdf", width = 50, height = 30, units = "cm")
| /drafts/xp09/04c_plot_point_xp09.R | no_license | bricebeffara/rwa_attitude_change | R | false | false | 3,974 | r |
load_df$Charge <- ifelse (load_df$load == -0.5, "Sans", "Avec")
# negative plot -----------------------------------------------------------
load_neg <- subset(load_df, usvalence==-0.5)
data_plot_neg = load_neg %>%
ggplot(aes(x = as.character(response), y = RWAscore,
fill = Charge, color = Charge)) +
#geom_boxplot(position = position_dodge(width = -1.4), alpha = 1, width = 0.1, outlier.shape = NA) +
geom_point(position = position_jitterdodge(jitter.width = 0.2, jitter.height = 0.05,
dodge.width = -0.75), alpha = 0.4, size = 1,
shape = 19, inherit.aes = TRUE) +
#stat_summary(fun.y = "median", geom = "point", size = 3, color="#ff738a",
#shape = "25"|", position = position_dodge(width = -1.4), alpha = 1) +
#scale_discrete_manual(aesthetics = "point_shape", values = c(-0.5, 0.5)) +
labs(x = 'Évaluations', y = 'RWA', fill="Charge cognitive", color="Charge cognitive") +
scale_fill_manual(values=c("#73a5ff", "#50ce76")) +
scale_color_manual(values= c("#73a5ff", "#50ce76"), guide = "none") +
coord_cartesian(ylim=c(1,9)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10), expand=c(0.01,0)) +
labs(subtitle="Contre-conditionnement positif") +
theme_ipsum_rc(base_size = 13,
subtitle_size = 20,
axis_title_size = 15) +
guides(fill = guide_legend(override.aes = list(linetype = 0)),
color = guide_legend(override.aes = list(linetype = 0))) +
coord_flip()
data_plot_neg <- ggMarginal(data_plot_neg, margins = "x", alpha = 0.6,
type = "histogram", size = 4, fill = "gray", colour = "lightgray")
# positive plot -----------------------------------------------------------
load_pos <- subset(load_df, usvalence==0.5)
data_plot_pos = load_pos %>%
ggplot(aes(x = as.character(response), y = RWAscore,
fill = Charge, color = Charge)) +
#geom_boxplot(position = position_dodge(width = -1.4), alpha = 1, width = 0.1, outlier.shape = NA) +
geom_point(position = position_jitterdodge(jitter.width = 0.2, jitter.height = 0.05,
dodge.width = -0.75), alpha = 0.4, size = 1,
shape = 19, inherit.aes = TRUE) +
#stat_summary(fun.y = "median", geom = "point", size = 3, color="#ff738a",
#shape = "25"|", position = position_dodge(width = -1.4), alpha = 1) +
#scale_discrete_manual(aesthetics = "point_shape", values = c(-0.5, 0.5)) +
labs(x = 'Évaluations', y = 'RWA', fill="Charge cognitive", color="Charge cognitive") +
scale_fill_manual(values=c("#73a5ff", "#50ce76")) +
scale_color_manual(values= c("#73a5ff", "#50ce76"), guide = "none") +
coord_cartesian(ylim=c(1,9)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10), expand=c(0.01,0)) +
labs(subtitle="Contre-conditionnement négatif") +
theme_ipsum_rc(base_size = 13,
subtitle_size = 20,
axis_title_size = 15) +
guides(fill = guide_legend(override.aes = list(linetype = 0)),
color = guide_legend(override.aes = list(linetype = 0))) +
coord_flip()
data_plot_pos <- ggMarginal(data_plot_pos, margins = "x", alpha = 0.6,
type = "histogram", size = 4, fill = "gray", colour = "lightgray")
# Combine plot
data_plot_all <- ggarrange(data_plot_neg,
data_plot_pos,
ncol = 2, nrow = 1)
# uncomment to display plot
# data_plot_all
# save plot
ggsave("plots/data_plot_all_xp09_french.pdf", width = 50, height = 15, units = "cm")
# Combine with spaghetti
data_spag_all <- ggarrange(marg_plot_n,
marg_plot_p,
data_plot_neg,
data_plot_pos,
ncol = 2, nrow = 2)
# uncomment to display plot
# data_spag_all
# save plot
ggsave("plots/data_spag_xp09_french.pdf", width = 50, height = 30, units = "cm")
|
rankhospital <- function(st, outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## Return hospital name in that state with the given rank
## 30-day death rate
rawdata <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
t_State <- (rawdata$State)
if (!(st%in% t_State))
{
stop("invalid state")
}
data<-subset(rawdata, rawdata$State==st)
if (nrow(data)==0){
stop("invalid state")
}
columnNumber<-0
if (outcome=="heart attack"){
columnNumber<-11
}else
if (outcome=="heart failure"){
columnNumber<-17
}else
if (outcome=="pneumonia"){
columnNumber<-23
}else{
stop("invalid outcome")
}
dRateValues <- na.omit(as.numeric(data[,columnNumber]))
print(length(data))
dRate <- order(dRateValues)
print(dRateValues)
print(length(dRateValues))
bestH <- subset(data, data[,columnNumber] %in% dRateValues)
length(bestH)
bestH<-bestH[order(as.numeric(bestH[, columnNumber]), bestH[, 2]), 2]
if (num=="best"){
num=1
}else if (num=="worst"){
num=length(bestH)
}
print(num)
bestH[num]
}
| /prog3/rankhospital (tatanka-mob's conflicted copy 2013-10-15).R | no_license | aifa/R | R | false | false | 1,138 | r | rankhospital <- function(st, outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## Return hospital name in that state with the given rank
## 30-day death rate
rawdata <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
t_State <- (rawdata$State)
if (!(st%in% t_State))
{
stop("invalid state")
}
data<-subset(rawdata, rawdata$State==st)
if (nrow(data)==0){
stop("invalid state")
}
columnNumber<-0
if (outcome=="heart attack"){
columnNumber<-11
}else
if (outcome=="heart failure"){
columnNumber<-17
}else
if (outcome=="pneumonia"){
columnNumber<-23
}else{
stop("invalid outcome")
}
dRateValues <- na.omit(as.numeric(data[,columnNumber]))
print(length(data))
dRate <- order(dRateValues)
print(dRateValues)
print(length(dRateValues))
bestH <- subset(data, data[,columnNumber] %in% dRateValues)
length(bestH)
bestH<-bestH[order(as.numeric(bestH[, columnNumber]), bestH[, 2]), 2]
if (num=="best"){
num=1
}else if (num=="worst"){
num=length(bestH)
}
print(num)
bestH[num]
}
|
#' @export
krylov.predict <- function(object,
coords = NULL,
coords.ho = NULL,
X.ho = NULL,
method = "krylov",
cov.model = "exponential",
cov.taper = "wend1",
delta = 2,
dist.mat = NULL,
nu = NULL) {
beta <- object$beta
theta <- object$theta
z <- object$z
cov.fun <- switch(cov.model,
exponential = cov.exp,
matern = cov.mat,
spherical = cov.sph
)
if (is.null(dist.mat) & is.null(coords.ho) & is.null(coords)) {
stop("error: either dist.mat or coords.ho and coords must be specified")
}
if (is.null(dist.mat) & !is.null(coords.ho) & !is.null(coords)) {
if (method %in% c("krylov", "spam")) {
dist.mat <- nearest.dist(coords.ho,
coords,
miles = FALSE,
delta = delta
)
} else {
dist.mat <- rdist(coords.ho, coords)
}
}
if (method %in% c("krylov", "spam")) {
taper.fun <- switch(cov.taper,
wend1 = cov.wend1,
wend2 = cov.wend2
)
model.theta <- switch(cov.model,
"exponential" = c(theta[2], 1 - theta[3], theta[3]),
"matern" = c(theta[2], 1 - theta[3], nu, theta[3]),
"spherical" = c(theta[2], 1 - theta[3], theta[3])
)
psi0 <- cov.fun(dist.mat, theta = model.theta) *
taper.fun(dist.mat, theta = delta)
} else {
model.theta <- switch(cov.model,
"exponential" = c(theta[2], 1 - theta[3], theta[3]),
"matern" = c(theta[2], 1 - theta[3], nu, theta[3]),
"spherical" = c(theta[2], 1 - theta[3], theta[3])
)
psi0 <- cov.fun(dist.mat, theta = model.theta)
}
if (!is.null(beta)) {
pred <- X.ho %*% beta + psi0 %*% z
} else {
pred <- psi0 %*% z
}
return(pred)
}
| /R/prediction.R | no_license | TedChu/spKrylov | R | false | false | 1,890 | r | #' @export
krylov.predict <- function(object,
coords = NULL,
coords.ho = NULL,
X.ho = NULL,
method = "krylov",
cov.model = "exponential",
cov.taper = "wend1",
delta = 2,
dist.mat = NULL,
nu = NULL) {
beta <- object$beta
theta <- object$theta
z <- object$z
cov.fun <- switch(cov.model,
exponential = cov.exp,
matern = cov.mat,
spherical = cov.sph
)
if (is.null(dist.mat) & is.null(coords.ho) & is.null(coords)) {
stop("error: either dist.mat or coords.ho and coords must be specified")
}
if (is.null(dist.mat) & !is.null(coords.ho) & !is.null(coords)) {
if (method %in% c("krylov", "spam")) {
dist.mat <- nearest.dist(coords.ho,
coords,
miles = FALSE,
delta = delta
)
} else {
dist.mat <- rdist(coords.ho, coords)
}
}
if (method %in% c("krylov", "spam")) {
taper.fun <- switch(cov.taper,
wend1 = cov.wend1,
wend2 = cov.wend2
)
model.theta <- switch(cov.model,
"exponential" = c(theta[2], 1 - theta[3], theta[3]),
"matern" = c(theta[2], 1 - theta[3], nu, theta[3]),
"spherical" = c(theta[2], 1 - theta[3], theta[3])
)
psi0 <- cov.fun(dist.mat, theta = model.theta) *
taper.fun(dist.mat, theta = delta)
} else {
model.theta <- switch(cov.model,
"exponential" = c(theta[2], 1 - theta[3], theta[3]),
"matern" = c(theta[2], 1 - theta[3], nu, theta[3]),
"spherical" = c(theta[2], 1 - theta[3], theta[3])
)
psi0 <- cov.fun(dist.mat, theta = model.theta)
}
if (!is.null(beta)) {
pred <- X.ho %*% beta + psi0 %*% z
} else {
pred <- psi0 %*% z
}
return(pred)
}
|
## reat txt file
powerConsData = read.csv2("work_ex/exporartory_data_analysis/week1/household_power_consumption.txt", header = TRUE, sep = ";")
## convert columns to correct class
powerConsData$Date <- as.Date(powerConsData$Date, format="%d/%m/%Y")
powerConsData$Time <- format(powerConsData$Time, format="%H:%M:%S")
powerConsData$Global_active_power <- as.numeric(powerConsData$Global_active_power)
powerConsData$Global_reactive_power <- as.numeric(powerConsData$Global_reactive_power)
powerConsData$Voltage <- as.numeric(powerConsData$Voltage)
powerConsData$Global_intensity <- as.numeric(powerConsData$Global_intensity)
powerConsData$Sub_metering_1 <- as.numeric(powerConsData$Sub_metering_1)
powerConsData$Sub_metering_2 <- as.numeric(powerConsData$Sub_metering_2)
powerConsData$Sub_metering_3 <- as.numeric(powerConsData$Sub_metering_3)
DateTime1 <- strptime(paste(powerConsData$Date, powerConsData$Time, sep=" "), "%Y-%m-%d %H:%M:%S")
powerConsData <- cbind(powerConsData, DateTime1)
## subset data from 2007-02-01 and 2007-02-02
subsetdata <- subset(powerConsData, Date == "2007-02-01" | Date =="2007-02-02")
## plot globalactivepower vs date_time
png("plot2.png", width=480, height=480)
with(subsetdata, plot(DateTime1, Global_active_power, type="l", xlab="Day", ylab="Global Active Power (kilowatts)"))
dev.off()
| /plot2.r | no_license | amiha1/ExData_Plotting1 | R | false | false | 1,360 | r |
## reat txt file
powerConsData = read.csv2("work_ex/exporartory_data_analysis/week1/household_power_consumption.txt", header = TRUE, sep = ";")
## convert columns to correct class
powerConsData$Date <- as.Date(powerConsData$Date, format="%d/%m/%Y")
powerConsData$Time <- format(powerConsData$Time, format="%H:%M:%S")
powerConsData$Global_active_power <- as.numeric(powerConsData$Global_active_power)
powerConsData$Global_reactive_power <- as.numeric(powerConsData$Global_reactive_power)
powerConsData$Voltage <- as.numeric(powerConsData$Voltage)
powerConsData$Global_intensity <- as.numeric(powerConsData$Global_intensity)
powerConsData$Sub_metering_1 <- as.numeric(powerConsData$Sub_metering_1)
powerConsData$Sub_metering_2 <- as.numeric(powerConsData$Sub_metering_2)
powerConsData$Sub_metering_3 <- as.numeric(powerConsData$Sub_metering_3)
DateTime1 <- strptime(paste(powerConsData$Date, powerConsData$Time, sep=" "), "%Y-%m-%d %H:%M:%S")
powerConsData <- cbind(powerConsData, DateTime1)
## subset data from 2007-02-01 and 2007-02-02
subsetdata <- subset(powerConsData, Date == "2007-02-01" | Date =="2007-02-02")
## plot globalactivepower vs date_time
png("plot2.png", width=480, height=480)
with(subsetdata, plot(DateTime1, Global_active_power, type="l", xlab="Day", ylab="Global Active Power (kilowatts)"))
dev.off()
|
##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 25 Mar 2016
# Function: batsmanMovingAverage
# This function computes and plots the moving average the batsman
#
###########################################################################################
#' @title
#' Moving average of batsman
#'
#' @description
#' This function plots the runs scored by the batsman over the career as a time
#' series. A loess regression line is plotted on the moving average of the batsman
#' the batsman
#'
#' @usage
#' batsmanMovingAverage(df, name= "A Leg Glance")
#'
#' @param df
#' Data frame
#'
#' @param name
#' Name of batsman
#'
#' @return None
#' @references
#' \url{http://cricsheet.org/}\cr
#' \url{https://gigadom.wordpress.com/}\cr
#' \url{https://github.com/tvganesh/yorkrData}
#'
#' @author
#' Tinniam V Ganesh
#' @note
#' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
#'
#' @examples
#' \dontrun{
#' #Get the data frame for Kohli
#' kohli <- getBatsmanDetails(team="India",name="Kohli",dir=pathToFile)
#' batsmanMovingAverage(kohli,"Kohli")
#' }
#' @seealso
#' \code{\link{batsmanDismissals}}\cr
#' \code{\link{batsmanRunsVsDeliveries}}\cr
#' \code{\link{batsmanRunsVsStrikeRate}}\cr
#' \code{\link{batsmanRunsPredict}}\cr
#' \code{\link{teamBatsmenPartnershipAllOppnAllMatches}}\cr
#'
#' @export
#'
batsmanMovingAverage_ly <- function(df,name = "A Leg Glance"){
batsman = runs = NULL
b <- select(df,batsman,runs,date)
b <- unique(b)
b$ID<-seq.int(nrow(b))
m <- loess(runs ~ ID, data = b)
plot_ly(b, x=~ID) %>%
add_lines(y=~runs, line=list(color="grey"), opacity=.3, name="Runs") %>%
add_lines(y=~fitted(loess(runs ~ ID)),
line=list(color="black", opacity=1),
name="Average") %>%
add_ribbons(data = augment(m),
ymin = ~.fitted - 1.96 * .se.fit,
ymax = ~.fitted + 1.96 * .se.fit,
line = list(color = 'rgba(7, 164, 181, 0.05)'),
opacity=1,
fillcolor = 'rgba(7, 164, 181, 0.2)',
name = "Standard Error") %>%
layout(title=paste(name, "- Moving Average of Career Runs"),
xaxis=list(title="Innings #"),
yaxis=list(title="Innings Runs"))
# plot.title = paste(name,"- Moving average of runs in career")
# ggplot(b) + geom_line(aes(x=date, y=runs),colour="darkgrey") +
# geom_smooth(aes(x=date, y=runs)) +
# xlab("Date") + ylab("Runs") +
# ggtitle(bquote(atop(.(plot.title),
# atop(italic("Data source:http://cricsheet.org/"),""))))
}
| /R/batsmanMovingAverage_ly.R | no_license | bcdunbar/yorkr | R | false | false | 2,805 | r | ##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 25 Mar 2016
# Function: batsmanMovingAverage
# This function computes and plots the moving average the batsman
#
###########################################################################################
#' @title
#' Moving average of batsman
#'
#' @description
#' This function plots the runs scored by the batsman over the career as a time
#' series. A loess regression line is plotted on the moving average of the batsman
#' the batsman
#'
#' @usage
#' batsmanMovingAverage(df, name= "A Leg Glance")
#'
#' @param df
#' Data frame
#'
#' @param name
#' Name of batsman
#'
#' @return None
#' @references
#' \url{http://cricsheet.org/}\cr
#' \url{https://gigadom.wordpress.com/}\cr
#' \url{https://github.com/tvganesh/yorkrData}
#'
#' @author
#' Tinniam V Ganesh
#' @note
#' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
#'
#' @examples
#' \dontrun{
#' #Get the data frame for Kohli
#' kohli <- getBatsmanDetails(team="India",name="Kohli",dir=pathToFile)
#' batsmanMovingAverage(kohli,"Kohli")
#' }
#' @seealso
#' \code{\link{batsmanDismissals}}\cr
#' \code{\link{batsmanRunsVsDeliveries}}\cr
#' \code{\link{batsmanRunsVsStrikeRate}}\cr
#' \code{\link{batsmanRunsPredict}}\cr
#' \code{\link{teamBatsmenPartnershipAllOppnAllMatches}}\cr
#'
#' @export
#'
batsmanMovingAverage_ly <- function(df,name = "A Leg Glance"){
batsman = runs = NULL
b <- select(df,batsman,runs,date)
b <- unique(b)
b$ID<-seq.int(nrow(b))
m <- loess(runs ~ ID, data = b)
plot_ly(b, x=~ID) %>%
add_lines(y=~runs, line=list(color="grey"), opacity=.3, name="Runs") %>%
add_lines(y=~fitted(loess(runs ~ ID)),
line=list(color="black", opacity=1),
name="Average") %>%
add_ribbons(data = augment(m),
ymin = ~.fitted - 1.96 * .se.fit,
ymax = ~.fitted + 1.96 * .se.fit,
line = list(color = 'rgba(7, 164, 181, 0.05)'),
opacity=1,
fillcolor = 'rgba(7, 164, 181, 0.2)',
name = "Standard Error") %>%
layout(title=paste(name, "- Moving Average of Career Runs"),
xaxis=list(title="Innings #"),
yaxis=list(title="Innings Runs"))
# plot.title = paste(name,"- Moving average of runs in career")
# ggplot(b) + geom_line(aes(x=date, y=runs),colour="darkgrey") +
# geom_smooth(aes(x=date, y=runs)) +
# xlab("Date") + ylab("Runs") +
# ggtitle(bquote(atop(.(plot.title),
# atop(italic("Data source:http://cricsheet.org/"),""))))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r_utils.R
\name{theme_shom_pretty}
\alias{theme_shom_pretty}
\title{Shom's Custom ggplot2 themes}
\usage{
theme_shom_pretty(base_size = 11, waffle = FALSE)
}
\arguments{
\item{base_size}{base size font}
\item{waffle}{logical for waffle plot}
}
\value{
None
}
\description{
Custom ggplot theme for pretty plots
}
| /man/theme_shom_pretty.Rd | no_license | shommazumder/shomR | R | false | true | 391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/r_utils.R
\name{theme_shom_pretty}
\alias{theme_shom_pretty}
\title{Shom's Custom ggplot2 themes}
\usage{
theme_shom_pretty(base_size = 11, waffle = FALSE)
}
\arguments{
\item{base_size}{base size font}
\item{waffle}{logical for waffle plot}
}
\value{
None
}
\description{
Custom ggplot theme for pretty plots
}
|
library(gof)
### Name: gof-package
### Title: Model-diagnostics based on cumulative residuals
### Aliases: gof gof-package
### Keywords: package
### ** Examples
example(cumres)
| /data/genthat_extracted_code/gof/examples/gof-package.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 184 | r | library(gof)
### Name: gof-package
### Title: Model-diagnostics based on cumulative residuals
### Aliases: gof gof-package
### Keywords: package
### ** Examples
example(cumres)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PILAF.R
\name{PILAF}
\alias{PILAF}
\title{Constructor to PILAF object}
\usage{
PILAF(time, week, coal, samp, ILI, coal.E, samp.E, ILI.E, iter)
}
\arguments{
\item{time}{A numeric vector of time.}
\item{week}{A numeric vector of week.}
\item{coal}{A numeric vector of coalescent event counts.}
\item{samp}{A numeric vector of sampling event counts.}
\item{ILI}{A numeric vector ILI counts.}
\item{coal.E}{A numeric vector of offset to coal.}
\item{samp.E}{A numeric vector of offset to samp.}
\item{ILI.E}{A numeric vector of offset to ILI.}
\item{iter}{A numeric vector to indicate group of trajectories.}
}
\value{
A PILAF object.
}
\description{
Constructor method to create an object from PILAF class
}
| /man/PILAF.Rd | permissive | Mamie/PILAF | R | false | true | 792 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PILAF.R
\name{PILAF}
\alias{PILAF}
\title{Constructor to PILAF object}
\usage{
PILAF(time, week, coal, samp, ILI, coal.E, samp.E, ILI.E, iter)
}
\arguments{
\item{time}{A numeric vector of time.}
\item{week}{A numeric vector of week.}
\item{coal}{A numeric vector of coalescent event counts.}
\item{samp}{A numeric vector of sampling event counts.}
\item{ILI}{A numeric vector ILI counts.}
\item{coal.E}{A numeric vector of offset to coal.}
\item{samp.E}{A numeric vector of offset to samp.}
\item{ILI.E}{A numeric vector of offset to ILI.}
\item{iter}{A numeric vector to indicate group of trajectories.}
}
\value{
A PILAF object.
}
\description{
Constructor method to create an object from PILAF class
}
|
library("testthat")
test_that(
"Testing amalgamate_deps_gen()",
{
td <- readRDS("../testdata/td1.rds")
dep.mat <- readRDS("../testdata/depmat1.rds")
state.data <- readRDS("../testdata/statedata.rds")
amal.deps <- amalgamate_deps_gen(td, dep.mat, mode = "check", state.data = state.data)
expect_true(all(names(amal.deps$M) %in% amal.deps$traits))
expect_true(all(attributes(amal.deps)$names == c("traits", "drop", "groups", "M", "states", "state.data")))
}
)
| /tests/testthat/test_amalgamate_deps_gen.R | permissive | uyedaj/rphenoscate | R | false | false | 496 | r | library("testthat")
test_that(
"Testing amalgamate_deps_gen()",
{
td <- readRDS("../testdata/td1.rds")
dep.mat <- readRDS("../testdata/depmat1.rds")
state.data <- readRDS("../testdata/statedata.rds")
amal.deps <- amalgamate_deps_gen(td, dep.mat, mode = "check", state.data = state.data)
expect_true(all(names(amal.deps$M) %in% amal.deps$traits))
expect_true(all(attributes(amal.deps)$names == c("traits", "drop", "groups", "M", "states", "state.data")))
}
)
|
# Read data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Compute emissions for coal-combustion related sources
coal_SCC <- subset(SCC, grepl("coal", Short.Name, fixed=TRUE))
coal <- subset(NEI, SCC %in% coal_SCC$SCC)
year_emissions <- aggregate(coal$Emissions, by=list(year=coal$year), FUN=sum)
# Plot
png("plot4.png")
plot(year_emissions$year, year_emissions$x, pch=19, xlab="Year", ylab="Emissions", xaxt="n")
lines(year_emissions$year, year_emissions$x)
axis(1, at=c(year_emissions$year))
title(main="Coal-combustion related")
dev.off() | /ExploratoryDataAnalysis/CourseProject2/plot4.R | no_license | CarlFredriksson/datasciencecoursera | R | false | false | 585 | r | # Read data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Compute emissions for coal-combustion related sources
coal_SCC <- subset(SCC, grepl("coal", Short.Name, fixed=TRUE))
coal <- subset(NEI, SCC %in% coal_SCC$SCC)
year_emissions <- aggregate(coal$Emissions, by=list(year=coal$year), FUN=sum)
# Plot
png("plot4.png")
plot(year_emissions$year, year_emissions$x, pch=19, xlab="Year", ylab="Emissions", xaxt="n")
lines(year_emissions$year, year_emissions$x)
axis(1, at=c(year_emissions$year))
title(main="Coal-combustion related")
dev.off() |
projectdata <- read.csv("online_shoppers_intention.csv", header = TRUE, sep = ",")
View(projectdata)
nrow(is.na(projectdata))
which(is.na(projectdata))
summary(projectdata)
dim(projectdata)
colSums(is.na(projectdata))
projectdata$ProductRelated
typeof(projectdata$ProductRelated)
typeof(projectdata$ProductRelated_Duration)
boxplot(projectdata$ProductRelated_Duration)$out
hist(projectdata$ProductRelated, xlim = c(0,200))
hist(projectdata$ProductRelated_Duration, xlim = c(0,20000))
boxplot(projectdata$ProductRelated_Duration)
installed.packages()
summary(projectdata$ProductRelated)
projectdata %>%
ggplot2::aes(x=ProductRelated) +
geom_bar() +
facet_grid(Revenue ~ .,
scales = "free_y")
hist(projectdata$Revenue)
install.packages(ggplot)
install.packages("tidyverse")
library(tidyverse)
library(ggplot2)
| /Data Preprocessing in R/PFB project work.R | no_license | MadhuriNYC/Visualizations_Projects_TableauFiles | R | false | false | 864 | r | projectdata <- read.csv("online_shoppers_intention.csv", header = TRUE, sep = ",")
View(projectdata)
nrow(is.na(projectdata))
which(is.na(projectdata))
summary(projectdata)
dim(projectdata)
colSums(is.na(projectdata))
projectdata$ProductRelated
typeof(projectdata$ProductRelated)
typeof(projectdata$ProductRelated_Duration)
boxplot(projectdata$ProductRelated_Duration)$out
hist(projectdata$ProductRelated, xlim = c(0,200))
hist(projectdata$ProductRelated_Duration, xlim = c(0,20000))
boxplot(projectdata$ProductRelated_Duration)
installed.packages()
summary(projectdata$ProductRelated)
projectdata %>%
ggplot2::aes(x=ProductRelated) +
geom_bar() +
facet_grid(Revenue ~ .,
scales = "free_y")
hist(projectdata$Revenue)
install.packages(ggplot)
install.packages("tidyverse")
library(tidyverse)
library(ggplot2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IndexBamFile.R
\name{IndexBamFile}
\alias{IndexBamFile}
\title{IndexBamFile}
\usage{
IndexBamFile(input.file.dir, input.file.pattern, index.file, output.file.dir,
genome)
}
\arguments{
\item{input.file.dir}{}
\item{input.file.pattern}{}
\item{index.file}{}
\item{output.file.dir}{}
\item{genome}{}
}
\description{
IndexBamFile
}
| /man/IndexBamFile.Rd | no_license | bioinformatics-gao/ChipSeq | R | false | true | 414 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IndexBamFile.R
\name{IndexBamFile}
\alias{IndexBamFile}
\title{IndexBamFile}
\usage{
IndexBamFile(input.file.dir, input.file.pattern, index.file, output.file.dir,
genome)
}
\arguments{
\item{input.file.dir}{}
\item{input.file.pattern}{}
\item{index.file}{}
\item{output.file.dir}{}
\item{genome}{}
}
\description{
IndexBamFile
}
|
# MIT License
#
# Copyright (c) 2017-2020 TileDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#' An S4 class for a TileDB domain
#'
#' @slot ptr External pointer to the underlying implementation
#' @exportClass tiledb_domain
setClass("tiledb_domain",
slots = list(ptr = "externalptr"))
tiledb_domain.from_ptr <- function(ptr) {
if (missing(ptr) || typeof(ptr) != "externalptr" || is.null(ptr)) {
stop("ptr argument must be a non NULL externalptr to a tiledb_domain instance")
}
return(new("tiledb_domain", ptr = ptr))
}
#' Constructs a `tiledb_domain` object
#'
#' All `tiledb_dim` must be of the same TileDB type.
#'
#' @param ctx tiledb_ctx (optional)
#' @param dims list() of tiledb_dim objects
#' @return tiledb_domain
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32"),
#' tiledb_dim("d2", c(1L, 50L), type = "INT32")))
#' @importFrom methods slot
#' @importFrom methods new
#' @export tiledb_domain
tiledb_domain <- function(dims, ctx = tiledb_get_context()) {
if (!is(ctx, "tiledb_ctx")) {
stop("argument ctx must be a tiledb_ctx")
}
is_dim <- function(obj) is(obj, "tiledb_dim")
if (is_dim(dims)) { # if a dim object given:
dims <- list(dims) # make it a vector so that lapply works below
}
if (missing(dims) || length(dims) == 0 || !all(vapply(dims, is_dim, logical(1)))) {
stop("argument dims must be a list of one or more tileb_dim")
}
dims_ptrs <- lapply(dims, function(obj) slot(obj, "ptr"))
ptr <- libtiledb_domain(ctx@ptr, dims_ptrs)
return(new("tiledb_domain", ptr = ptr))
}
#' Prints an domain object
#'
#' @param object An domain object
#' @export
setMethod("show", "tiledb_domain",
function(object) {
return(libtiledb_domain_dump(object@ptr))
})
#' Returns a list of the tiledb_domain dimension objects
#'
#'
#' @param object tiledb_domain
#' @return a list of tiledb_dim
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32"),
#' tiledb_dim("d2", c(1L, 50L), type = "INT32")))
#' dimensions(dom)
#'
#' lapply(dimensions(dom), name)
#'
#' @export
setMethod("dimensions", "tiledb_domain",
function(object) {
dim_ptrs <- libtiledb_domain_get_dimensions(object@ptr)
return(lapply(dim_ptrs, tiledb_dim.from_ptr))
})
#' Returns the tiledb_domain TileDB type string
#'
#' @param object tiledb_domain
#' @return tiledb_domain type string
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32")))
#' datatype(dom)
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64")))
#' datatype(dom)
#'
#' @export
setMethod("datatype", "tiledb_domain",
function(object) {
##return(libtiledb_domain_get_type(object@ptr))
#generalize from domaintype <- libtiledb_domain_get_type(dom@ptr) to
domaintype <- sapply(libtiledb_domain_get_dimensions(object@ptr),
libtiledb_dim_get_datatype)
return(domaintype)
})
#' Returns the number of dimensions of the `tiledb_domain`
#'
#' @param object tiledb_domain
#' @return integer number of dimensions
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64")))
#' tiledb_ndim(dom)
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64"),
#' tiledb_dim("d2", c(0.5, 100.0), type = "FLOAT64")))
#' tiledb_ndim(dom)
#'
#' @export
setMethod("tiledb_ndim", "tiledb_domain",
function(object) {
return(libtiledb_domain_get_ndim(object@ptr))
})
#' @rdname generics
#' @export
setGeneric("is.integral", function(object) standardGeneric("is.integral"))
#' Returns TRUE is tiledb_domain is an integral (integer) domain
#'
#' @param object tiledb_domain
#' @return TRUE if the domain is an integral domain, else FALSE
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32")))
#' is.integral(dom)
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64")))
#' is.integral(dom)
#'
#' @export
setMethod("is.integral", "tiledb_domain",
function(object) {
dtype <- datatype(object)
res <- isTRUE(any(sapply(dtype, match, c("FLOAT32","FLOAT32"))))
return(!res)
})
#' Retrieve the dimension (domain extent) of the domain
#'
#' Only valid for integral (integer) domains
#'
#' @param x tiledb_domain
#' @return dimension vector
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32"),
#' tiledb_dim("d2", c(1L, 100L), type = "INT32")))
#' dim(dom)
#'
#' @export
dim.tiledb_domain <- function(x) {
dtype <- datatype(x)
if (isTRUE(any(sapply(dtype, match, c("FLOAT32","FLOAT64"))))) {
stop("dim() is only defined for integral domains")
}
return(vapply(dimensions(x), dim, integer(1L)))
}
#' Returns a Dimension indicated by index for the given TileDB Domain
#'
#' @param domain TileDB Domain object
#' @param idx Integer index of the selected dimension
#' @return TileDB Dimension object
#' @export
tiledb_domain_get_dimension_from_index <- function(domain, idx) {
stopifnot(domain_argument=is(domain, "tiledb_domain"),
idx_argument=is.numeric(idx))
return(new("tiledb_dim", ptr=libtiledb_domain_get_dimension_from_index(domain@ptr, idx)))
}
#' Returns a Dimension indicated by name for the given TileDB Domain
#'
#' @param domain TileDB Domain object
#' @param name A character variable with a dimension name
#' @return TileDB Dimension object
#' @export
tiledb_domain_get_dimension_from_name <- function(domain, name) {
stopifnot(domain_argument=is(domain, "tiledb_domain"),
name_argument=is.character(name))
return(new("tiledb_dim", ptr=libtiledb_domain_get_dimension_from_name(domain@ptr, name)))
}
#' Check a domain for a given dimension name
#'
#' @param domain A domain of a TileDB Array schema
#' @param name A character variable with a dimension name
#' @return A boolean value indicating if the dimension exists in the domain
#' @export
tiledb_domain_has_dimension <- function(domain, name) {
stopifnot(domain_argument=is(domain, "tiledb_domain"),
name_argument=is.character(name))
libtiledb_domain_has_dimension(domain@ptr, name)
}
| /R/Domain.R | permissive | dcooley/TileDB-R | R | false | false | 7,849 | r | # MIT License
#
# Copyright (c) 2017-2020 TileDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#' An S4 class for a TileDB domain
#'
#' @slot ptr External pointer to the underlying implementation
#' @exportClass tiledb_domain
setClass("tiledb_domain",
slots = list(ptr = "externalptr"))
tiledb_domain.from_ptr <- function(ptr) {
if (missing(ptr) || typeof(ptr) != "externalptr" || is.null(ptr)) {
stop("ptr argument must be a non NULL externalptr to a tiledb_domain instance")
}
return(new("tiledb_domain", ptr = ptr))
}
#' Constructs a `tiledb_domain` object
#'
#' All `tiledb_dim` must be of the same TileDB type.
#'
#' @param ctx tiledb_ctx (optional)
#' @param dims list() of tiledb_dim objects
#' @return tiledb_domain
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32"),
#' tiledb_dim("d2", c(1L, 50L), type = "INT32")))
#' @importFrom methods slot
#' @importFrom methods new
#' @export tiledb_domain
tiledb_domain <- function(dims, ctx = tiledb_get_context()) {
if (!is(ctx, "tiledb_ctx")) {
stop("argument ctx must be a tiledb_ctx")
}
is_dim <- function(obj) is(obj, "tiledb_dim")
if (is_dim(dims)) { # if a dim object given:
dims <- list(dims) # make it a vector so that lapply works below
}
if (missing(dims) || length(dims) == 0 || !all(vapply(dims, is_dim, logical(1)))) {
stop("argument dims must be a list of one or more tileb_dim")
}
dims_ptrs <- lapply(dims, function(obj) slot(obj, "ptr"))
ptr <- libtiledb_domain(ctx@ptr, dims_ptrs)
return(new("tiledb_domain", ptr = ptr))
}
#' Prints an domain object
#'
#' @param object An domain object
#' @export
setMethod("show", "tiledb_domain",
function(object) {
return(libtiledb_domain_dump(object@ptr))
})
#' Returns a list of the tiledb_domain dimension objects
#'
#'
#' @param object tiledb_domain
#' @return a list of tiledb_dim
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32"),
#' tiledb_dim("d2", c(1L, 50L), type = "INT32")))
#' dimensions(dom)
#'
#' lapply(dimensions(dom), name)
#'
#' @export
setMethod("dimensions", "tiledb_domain",
function(object) {
dim_ptrs <- libtiledb_domain_get_dimensions(object@ptr)
return(lapply(dim_ptrs, tiledb_dim.from_ptr))
})
#' Returns the tiledb_domain TileDB type string
#'
#' @param object tiledb_domain
#' @return tiledb_domain type string
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32")))
#' datatype(dom)
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64")))
#' datatype(dom)
#'
#' @export
setMethod("datatype", "tiledb_domain",
function(object) {
##return(libtiledb_domain_get_type(object@ptr))
#generalize from domaintype <- libtiledb_domain_get_type(dom@ptr) to
domaintype <- sapply(libtiledb_domain_get_dimensions(object@ptr),
libtiledb_dim_get_datatype)
return(domaintype)
})
#' Returns the number of dimensions of the `tiledb_domain`
#'
#' @param object tiledb_domain
#' @return integer number of dimensions
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64")))
#' tiledb_ndim(dom)
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64"),
#' tiledb_dim("d2", c(0.5, 100.0), type = "FLOAT64")))
#' tiledb_ndim(dom)
#'
#' @export
setMethod("tiledb_ndim", "tiledb_domain",
function(object) {
return(libtiledb_domain_get_ndim(object@ptr))
})
#' @rdname generics
#' @export
setGeneric("is.integral", function(object) standardGeneric("is.integral"))
#' Returns TRUE is tiledb_domain is an integral (integer) domain
#'
#' @param object tiledb_domain
#' @return TRUE if the domain is an integral domain, else FALSE
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32")))
#' is.integral(dom)
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(0.5, 100.0), type = "FLOAT64")))
#' is.integral(dom)
#'
#' @export
setMethod("is.integral", "tiledb_domain",
function(object) {
dtype <- datatype(object)
res <- isTRUE(any(sapply(dtype, match, c("FLOAT32","FLOAT32"))))
return(!res)
})
#' Retrieve the dimension (domain extent) of the domain
#'
#' Only valid for integral (integer) domains
#'
#' @param x tiledb_domain
#' @return dimension vector
#' @examples
#' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
#' dom <- tiledb_domain(dims = c(tiledb_dim("d1", c(1L, 100L), type = "INT32"),
#' tiledb_dim("d2", c(1L, 100L), type = "INT32")))
#' dim(dom)
#'
#' @export
dim.tiledb_domain <- function(x) {
dtype <- datatype(x)
if (isTRUE(any(sapply(dtype, match, c("FLOAT32","FLOAT64"))))) {
stop("dim() is only defined for integral domains")
}
return(vapply(dimensions(x), dim, integer(1L)))
}
#' Returns a Dimension indicated by index for the given TileDB Domain
#'
#' @param domain TileDB Domain object
#' @param idx Integer index of the selected dimension
#' @return TileDB Dimension object
#' @export
tiledb_domain_get_dimension_from_index <- function(domain, idx) {
stopifnot(domain_argument=is(domain, "tiledb_domain"),
idx_argument=is.numeric(idx))
return(new("tiledb_dim", ptr=libtiledb_domain_get_dimension_from_index(domain@ptr, idx)))
}
#' Returns a Dimension indicated by name for the given TileDB Domain
#'
#' @param domain TileDB Domain object
#' @param name A character variable with a dimension name
#' @return TileDB Dimension object
#' @export
tiledb_domain_get_dimension_from_name <- function(domain, name) {
stopifnot(domain_argument=is(domain, "tiledb_domain"),
name_argument=is.character(name))
return(new("tiledb_dim", ptr=libtiledb_domain_get_dimension_from_name(domain@ptr, name)))
}
#' Check a domain for a given dimension name
#'
#' @param domain A domain of a TileDB Array schema
#' @param name A character variable with a dimension name
#' @return A boolean value indicating if the dimension exists in the domain
#' @export
tiledb_domain_has_dimension <- function(domain, name) {
stopifnot(domain_argument=is(domain, "tiledb_domain"),
name_argument=is.character(name))
libtiledb_domain_has_dimension(domain@ptr, name)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sccp.R
\name{getareastd}
\alias{getareastd}
\title{Get the peak information from SCCPs standards}
\usage{
getareastd(data = NULL, ismz = 323, ppm = 5, con = 2000,
rt = NULL, rts = NULL)
}
\arguments{
\item{data}{list from `xcmsRaw` function}
\item{ismz}{internal standards m/z}
\item{ppm}{resolution of mass spectrum}
\item{con}{concentration of standards}
\item{rt}{retention time range of sccps}
\item{rts}{retention time range of internal standards}
}
\value{
list with peak information
}
\description{
Get the peak information from SCCPs standards
}
\seealso{
\code{\link{getarea}},\code{\link{getsccp}}
}
| /man/getareastd.Rd | no_license | AspirinCode/enviGCMS | R | false | true | 695 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sccp.R
\name{getareastd}
\alias{getareastd}
\title{Get the peak information from SCCPs standards}
\usage{
getareastd(data = NULL, ismz = 323, ppm = 5, con = 2000,
rt = NULL, rts = NULL)
}
\arguments{
\item{data}{list from `xcmsRaw` function}
\item{ismz}{internal standards m/z}
\item{ppm}{resolution of mass spectrum}
\item{con}{concentration of standards}
\item{rt}{retention time range of sccps}
\item{rts}{retention time range of internal standards}
}
\value{
list with peak information
}
\description{
Get the peak information from SCCPs standards
}
\seealso{
\code{\link{getarea}},\code{\link{getsccp}}
}
|
survey_parameters = function( p=NULL, project_name=NULL, project_class="default", ... ) {
# ---------------------
# deal with additional passed parameters
p = parameters_control(p, list(...), control="add") # add passed args to parameter list, priority to args
# ---------------------
# create/update library list
p$libs = c( p$libs, RLibrary ( "colorspace", "fields", "geosphere", "lubridate", "lattice",
"maps", "mapdata", "maptools", "parallel", "rgdal", "rgeos", "sp", "splancs", "GADMTools" ) )
p$libs = c( p$libs, project.library ( "aegis", "aegis.bathymetry", "aegis.substrate", "aegis.temperature", "aegis.survey", "aegis.mpa", "netmensuration", "bio.taxonomy" ) )
p$project_name = ifelse ( !is.null(project_name), project_name, "survey" )
p$data_sources = c("groundfish", "snowcrab")
if ( !exists("data_root", p) ) p$data_root = project.datadirectory( "aegis", project_name )
if ( !exists("datadir", p) ) p$datadir = file.path( p$data_root, "data" )
if ( !exists("modeldir", p) ) p$modeldir = file.path( p$data_root, "modelled" )
if ( !file.exists(p$datadir) ) dir.create( p$datadir, showWarnings=F, recursive=T )
if ( !file.exists(p$modeldir) ) dir.create( p$modeldir, showWarnings=F, recursive=T )
if (!exists("spatial_domain", p) ) p$spatial_domain = "SSE"
if (!exists("spatial_domain_subareas", p)) p$spatial_domain_subareas = c( "snowcrab", "SSE.mpa" )
p = spatial_parameters( p=p) # default (= only supported resolution of 0.2 km discretization) .. do NOT change
if ( !exists("scanmar.dir", p) ) p$scanmar.dir = file.path( p$datadir, "nets", "Scanmar" )
if ( !exists("marport.dir", p) ) p$marport.dir = file.path( p$datadir, "nets", "Marport" )
if ( !exists("yrs", p) ) p$yrs=1970:lubridate::year(lubridate::now())
p = temporal_parameters(p=p, aegis_dimensionality="space-year")
if ( !exists("netmensuration.years", p) ) p$netmensuration.years = c(1990:1992, 2004:lubridate::year(lubridate::now())) # 2009 is the first year with set logs from scanmar available .. if more are found, alter this date
p$taxa.of.interest = aegis.survey::groundfish.variablelist("catch.summary")
p$season = "summer"
p$taxa = "maxresolved"
p$clusters = rep("localhost", detectCores() )
if (project_class=="default") {
if ( !exists("inputdata_spatial_discretization_planar_km", p)) p$inputdata_spatial_discretization_planar_km = 1 # 1 km .. requires 32 GB RAM and limit of speed -- controls resolution of data prior to modelling to reduce data set and speed up modelling
if ( !exists("inputdata_temporal_discretization_yr", p)) p$inputdata_temporal_discretization_yr = 1/12 # ie., monthly .. controls resolution of data prior to modelling to reduce data set and speed up modelling }
return(p)
}
if (project_class=="carstm") {
if ( !exists("inputdata_spatial_discretization_planar_km", p)) p$inputdata_spatial_discretization_planar_km = 1 # 1 km .. requires 32 GB RAM and limit of speed -- controls resolution of data prior to modelling to reduce data set and speed up modelling
if ( !exists("inputdata_temporal_discretization_yr", p)) p$inputdata_temporal_discretization_yr = 1/12 # ie., monthly .. controls resolution of data prior to modelling to reduce data set and speed up modelling }
return(p)
}
if (project_class=="stmv") {
p$libs = c( p$libs, project.library ( "stmv" ) )
p$DATA = 'survey.db( p=p, DS="stmv_inputs" )'
p$varstomodel = c()
if (!exists("stmv_variables", p)) p$stmv_variables = list()
if (!exists("LOCS", p$stmv_variables)) p$stmv_variables$LOCS=c("plon", "plat")
if (!exists("TIME", p$stmv_variables)) p$stmv_variables$TIME="tiyr"
p = aegis_parameters(p=p, DS="stmv" )
return(p)
}
}
| /R/survey_parameters.R | permissive | PEDsnowcrab/aegis.survey | R | false | false | 3,761 | r | survey_parameters = function( p=NULL, project_name=NULL, project_class="default", ... ) {
# ---------------------
# deal with additional passed parameters
p = parameters_control(p, list(...), control="add") # add passed args to parameter list, priority to args
# ---------------------
# create/update library list
p$libs = c( p$libs, RLibrary ( "colorspace", "fields", "geosphere", "lubridate", "lattice",
"maps", "mapdata", "maptools", "parallel", "rgdal", "rgeos", "sp", "splancs", "GADMTools" ) )
p$libs = c( p$libs, project.library ( "aegis", "aegis.bathymetry", "aegis.substrate", "aegis.temperature", "aegis.survey", "aegis.mpa", "netmensuration", "bio.taxonomy" ) )
p$project_name = ifelse ( !is.null(project_name), project_name, "survey" )
p$data_sources = c("groundfish", "snowcrab")
if ( !exists("data_root", p) ) p$data_root = project.datadirectory( "aegis", project_name )
if ( !exists("datadir", p) ) p$datadir = file.path( p$data_root, "data" )
if ( !exists("modeldir", p) ) p$modeldir = file.path( p$data_root, "modelled" )
if ( !file.exists(p$datadir) ) dir.create( p$datadir, showWarnings=F, recursive=T )
if ( !file.exists(p$modeldir) ) dir.create( p$modeldir, showWarnings=F, recursive=T )
if (!exists("spatial_domain", p) ) p$spatial_domain = "SSE"
if (!exists("spatial_domain_subareas", p)) p$spatial_domain_subareas = c( "snowcrab", "SSE.mpa" )
p = spatial_parameters( p=p) # default (= only supported resolution of 0.2 km discretization) .. do NOT change
if ( !exists("scanmar.dir", p) ) p$scanmar.dir = file.path( p$datadir, "nets", "Scanmar" )
if ( !exists("marport.dir", p) ) p$marport.dir = file.path( p$datadir, "nets", "Marport" )
if ( !exists("yrs", p) ) p$yrs=1970:lubridate::year(lubridate::now())
p = temporal_parameters(p=p, aegis_dimensionality="space-year")
if ( !exists("netmensuration.years", p) ) p$netmensuration.years = c(1990:1992, 2004:lubridate::year(lubridate::now())) # 2009 is the first year with set logs from scanmar available .. if more are found, alter this date
p$taxa.of.interest = aegis.survey::groundfish.variablelist("catch.summary")
p$season = "summer"
p$taxa = "maxresolved"
p$clusters = rep("localhost", detectCores() )
if (project_class=="default") {
if ( !exists("inputdata_spatial_discretization_planar_km", p)) p$inputdata_spatial_discretization_planar_km = 1 # 1 km .. requires 32 GB RAM and limit of speed -- controls resolution of data prior to modelling to reduce data set and speed up modelling
if ( !exists("inputdata_temporal_discretization_yr", p)) p$inputdata_temporal_discretization_yr = 1/12 # ie., monthly .. controls resolution of data prior to modelling to reduce data set and speed up modelling }
return(p)
}
if (project_class=="carstm") {
if ( !exists("inputdata_spatial_discretization_planar_km", p)) p$inputdata_spatial_discretization_planar_km = 1 # 1 km .. requires 32 GB RAM and limit of speed -- controls resolution of data prior to modelling to reduce data set and speed up modelling
if ( !exists("inputdata_temporal_discretization_yr", p)) p$inputdata_temporal_discretization_yr = 1/12 # ie., monthly .. controls resolution of data prior to modelling to reduce data set and speed up modelling }
return(p)
}
if (project_class=="stmv") {
p$libs = c( p$libs, project.library ( "stmv" ) )
p$DATA = 'survey.db( p=p, DS="stmv_inputs" )'
p$varstomodel = c()
if (!exists("stmv_variables", p)) p$stmv_variables = list()
if (!exists("LOCS", p$stmv_variables)) p$stmv_variables$LOCS=c("plon", "plat")
if (!exists("TIME", p$stmv_variables)) p$stmv_variables$TIME="tiyr"
p = aegis_parameters(p=p, DS="stmv" )
return(p)
}
}
|
# Launch the ShinyApp (Do not remove this comment)
# To deploy, run: rsconnect::deployApp()
# Or use the blue button on top of this file
pkgload::load_all(export_all = FALSE,helpers = FALSE,attach_testthat = FALSE)
options( "golem.app.prod" = TRUE)
smoother::run_app() # add parameters here (if any)
| /app.R | permissive | astrzalka/smoother | R | false | false | 302 | r |
# Launch the ShinyApp (Do not remove this comment)
# To deploy, run: rsconnect::deployApp()
# Or use the blue button on top of this file
pkgload::load_all(export_all = FALSE,helpers = FALSE,attach_testthat = FALSE)
options( "golem.app.prod" = TRUE)
smoother::run_app() # add parameters here (if any)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/references_read.R
\name{references_read}
\alias{references_read}
\title{Reads Thomson Reuters Web of Knowledge/Science and ISI reference export files}
\usage{
references_read(data = ".", dir = TRUE, filename_root = "",
include_all = FALSE)
}
\arguments{
\item{data}{either a directory, used in conjuction with dir=TRUE, or a file
name to load}
\item{dir}{if TRUE then data is assumed to be a directory name from which all
files will be read, but if FALSE then data is assumed to be a single file
to read}
\item{filename_root}{the filename root, can include relative or absolute
path, to which "_references.csv" will be appended and the output from the
function will be saved}
\item{include_all}{should all columns be included, or just the most commonly recorded. default=FALSE}
}
\description{
\code{references_read} This function reads Thomson Reuters Web of Knowledge
and ISI format reference data files into an R friendly data format and can
optionally write the converted data to a friendly CSV format.
}
| /man/references_read.Rd | no_license | tilltnet/refnet | R | false | true | 1,092 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/references_read.R
\name{references_read}
\alias{references_read}
\title{Reads Thomson Reuters Web of Knowledge/Science and ISI reference export files}
\usage{
references_read(data = ".", dir = TRUE, filename_root = "",
include_all = FALSE)
}
\arguments{
\item{data}{either a directory, used in conjuction with dir=TRUE, or a file
name to load}
\item{dir}{if TRUE then data is assumed to be a directory name from which all
files will be read, but if FALSE then data is assumed to be a single file
to read}
\item{filename_root}{the filename root, can include relative or absolute
path, to which "_references.csv" will be appended and the output from the
function will be saved}
\item{include_all}{should all columns be included, or just the most commonly recorded. default=FALSE}
}
\description{
\code{references_read} This function reads Thomson Reuters Web of Knowledge
and ISI format reference data files into an R friendly data format and can
optionally write the converted data to a friendly CSV format.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-survey.R
\name{expand_survey_clusters}
\alias{expand_survey_clusters}
\title{Expand list of clusters at each area level}
\usage{
expand_survey_clusters(survey_clusters, survey_regions, areas,
top_level = min(areas$area_level),
bottom_level = max(areas$area_level))
}
\description{
This function recursively expands the list of clusters to produce a list
of survey clusters within areas at each level.
}
\details{
TODO: These should be examples - where is areas_long.rds now?
areas_long <- readRDS(here::here("data/areas/areas_long.rds"))
survey_clusters <- readRDS(here::here("data/survey/survey_clusters.rds"))
survey_regions <- readRDS(here::here("data/survey/survey_regions.rds"))
expand_survey_clusters(survey_clusters, areas_long)
Get clusters at level 1 areas only
expand_survey_clusters(survey_clusters, areas_long, top_level = 1, bottom_level = 1)
}
\keyword{Recursion}
\keyword{internal}
| /man/expand_survey_clusters.Rd | permissive | meganodris/naomi | R | false | true | 986 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-survey.R
\name{expand_survey_clusters}
\alias{expand_survey_clusters}
\title{Expand list of clusters at each area level}
\usage{
expand_survey_clusters(survey_clusters, survey_regions, areas,
top_level = min(areas$area_level),
bottom_level = max(areas$area_level))
}
\description{
This function recursively expands the list of clusters to produce a list
of survey clusters within areas at each level.
}
\details{
TODO: These should be examples - where is areas_long.rds now?
areas_long <- readRDS(here::here("data/areas/areas_long.rds"))
survey_clusters <- readRDS(here::here("data/survey/survey_clusters.rds"))
survey_regions <- readRDS(here::here("data/survey/survey_regions.rds"))
expand_survey_clusters(survey_clusters, areas_long)
Get clusters at level 1 areas only
expand_survey_clusters(survey_clusters, areas_long, top_level = 1, bottom_level = 1)
}
\keyword{Recursion}
\keyword{internal}
|
install.packages("devtools")
devtools::install_github("fcbarbi/ardl")
library("ardl")
| /downloading_ardl.R | no_license | CTWebb19/Forecasting-Florida-Employment | R | false | false | 89 | r | install.packages("devtools")
devtools::install_github("fcbarbi/ardl")
library("ardl")
|
library(dplyr)
library(ggplot2)
library(reshape2)
# LOAD AND PREPROCESS DATA
df_2017 = read.csv('UK Gender Pay Gap Data - 2017 to 2018.csv')
df_2018 = read.csv('UK Gender Pay Gap Data - 2018 to 2019.csv')
df_2019 = read.csv('UK Gender Pay Gap Data - 2019 to 2020.csv')
head(df_2017)
names(df_2017)
# Start by creating a unique id, which is the company number (if present), else the company name
df_2017$unique_id = ifelse(df_2017$CompanyNumber=='', df_2017$EmployerName, df_2017$CompanyNumber)
df_2018$unique_id = ifelse(df_2018$CompanyNumber=='', df_2018$EmployerName, df_2018$CompanyNumber)
df_2019$unique_id = ifelse(df_2019$CompanyNumber=='', df_2019$EmployerName, df_2019$CompanyNumber)
df_2017$year = 2017
df_2018$year = 2018
df_2019$year = 2019
# Drop items before merging - Address, CompanyNumber, SicCodes, CompanyLinkToGPGInfo, ResponsiblePerson, CurrentName, DueDate, DateSubmitted
df_2017 = df_2017[, !(names(df_2017) %in% c('EmployerName', 'Address', 'CompanyNumber', 'SicCodes', 'CompanyLinkToGPGInfo', 'ResponsiblePerson', 'CurrentName', 'DueDate', 'DateSubmitted'))]
df_2018 = df_2018[, !(names(df_2018) %in% c('EmployerName', 'Address', 'CompanyNumber', 'SicCodes', 'CompanyLinkToGPGInfo', 'ResponsiblePerson', 'CurrentName', 'DueDate', 'DateSubmitted'))]
df_2019 = df_2019[, !(names(df_2019) %in% c('Address', 'CompanyNumber', 'SicCodes', 'CompanyLinkToGPGInfo', 'ResponsiblePerson', 'CurrentName', 'DueDate', 'DateSubmitted'))]
dim(df_2018)
dim(df_2019)
dim(df_2017)
# Convert employer size and submitted after deadline to factors
df_2017["SubmittedAfterTheDeadline"] = lapply(df_2017["SubmittedAfterTheDeadline"], as.factor)
df_2017["EmployerSize"] = lapply(df_2017["EmployerSize"], as.factor)
df_2018["SubmittedAfterTheDeadline"] = lapply(df_2018["SubmittedAfterTheDeadline"], as.factor)
df_2018["EmployerSize"] = lapply(df_2018["EmployerSize"], as.factor)
df_2019["SubmittedAfterTheDeadline"] = lapply(df_2019["SubmittedAfterTheDeadline"], as.factor)
df_2019["EmployerSize"] = lapply(df_2019["EmployerSize"], as.factor)
# Replace na values with 0 (only in the bonus mean difference rows)
df_2017 = replace(df_2017, is.na(df_2017), 0)
df_2018 = replace(df_2018, is.na(df_2018), 0)
df_2019 = replace(df_2019, is.na(df_2019), 0)
# Combine the 3 dataframes on either company name or company number, get rid of the rest
df_combined = merge(merge(df_2017, df_2018, by='unique_id', suffixes = c('_2017', '_2018')), df_2019, by='unique_id', suffixes=c('_2018', '_2019'))
# Add 2019 suffix to 2019 columns
names(df_combined)[34:length(names(df_combined))] = gsub('(\\w*)', '\\1_2019', names(df_combined)[34:length(names(df_combined))])
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
#EDA
# Bonus - Males vs Females per company
# Percentage of women who received bonuses over the past 3 years
df_combined %>% group_by(EmployerSize_2019) %>%
summarise('Mean Bonus Percent (2017)' = mean(FemaleBonusPercent_2017) , 'Mean Bonus Percent (2018)' = mean(FemaleBonusPercent_2018),
'Mean Bonus Percent (2019)' = mean(FemaleBonusPercent_2019)) %>%
filter(EmployerSize_2019 != 'Not Provided') %>%
rename('Employer Size'=EmployerSize_2019)
# Need to compare it with men who receive bonuses
a = df_2019 %>% group_by('Employer Size' = EmployerSize) %>%
filter (EmployerSize!='Not Provided') %>%
summarise('Female' = mean(FemaleBonusPercent), 'Male' = mean(MaleBonusPercent))
a_melt = melt(a,id.vars='Employer Size', variable.name = 'sex', value.name='Bonus Payout Percentage')
a_melt %>% ggplot(aes(x=`Employer Size`, y=`Bonus Payout Percentage`, fill=sex)) + geom_bar(stat='identity', position='dodge')+
theme(text=element_text(size=12), axis.text.x = element_text(angle=45, vjust = .7, hjust=.7)) +
labs(title='Bonus Payout Percentage - Males vs Females')+
theme_classic() # + ggsave('bonus_payout.jpg', dpi=1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# A
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Bonus Frequency Polygons
# Frequency polygon of females in each quartile in 2019
# Lower Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleLowerQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Lower Quartile')+
theme_classic() #+ ggsave('lower_quart_dens.jpg', dpi=1000)
# Lower Middle Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleLowerMiddleQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Lower-Middle Quartile')+
theme_classic()# + ggsave('lower_mid_quart_dens.jpg', dpi=1000)
# Upper Middle Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleUpperMiddleQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Upper-Middle Quartile')+
theme_classic() # + ggsave('upper_mid_quart_dens.jpg', dpi=1000)
# Top Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleTopQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Top Quartile')+
theme_classic()# + ggsave('top_quart_dens.jpg', dpi=1000)
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Organization Structure
# Percentage of women in the top quartile
df_combined %>% group_by('Employer Size' = EmployerSize_2019) %>%
summarise('Females in Top Quartile (2017)' = mean(FemaleTopQuartile_2017),
'Females in Top Quartile (2018)' = mean(FemaleTopQuartile_2018),
'Females in Top Quartile (2019)' = mean(FemaleTopQuartile_2019)) %>%
filter(`Employer Size` != 'Not Provided')
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
x = df_2019%>% select('Lower Quartile' = FemaleLowerQuartile, 'Lower Middle Quartile' = FemaleLowerMiddleQuartile,
'Upper Middle Quartile' = FemaleUpperMiddleQuartile, 'Top Quartile' = FemaleTopQuartile)
melt(x, variable.name='Quartile') %>% ggplot(aes(x=Quartile, y=value, fill=Quartile)) + geom_boxplot()+
labs(x='Quartile', y='Percentage', title='Females per Quartile')+
theme_classic()+
theme(axis.text.x = element_text(angle = 30, vjust=.7, hjust=0.7), text=element_text(size=12))# +ggsave('box_plot_female_quartile.jpg', dpi=1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# One Way Anova - difference in mean pay over the years
mean_pay = df_combined %>% select('2017'=DiffMeanHourlyPercent_2017, '2018'= DiffMeanHourlyPercent_2018,
'2019' = DiffMeanHourlyPercent_2019)
mean_pay = melt(mean_pay, variable.name = 'Year')
fm_1 = aov(value~Year, data=mean_pay)
anova(fm_1)
mean_pay %>% ggplot(aes(x=Year, y=value, fill=Year))+geom_boxplot()+
labs(y='Percentage', title='Percentage Difference in Mean Hourly Pay')+
theme_classic() # + ggsave('pay_boxplot.jpg', dpi=1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# One Way Anova - difference in bonus pay over the years
bonus_pay = df_combined %>% select(DiffMeanBonusPercent_2017, DiffMeanBonusPercent_2018, DiffMeanBonusPercent_2019)
bonus_pay = melt(bonus_pay)
fm_2 = aov(value~variable, data=bonus_pay)
anova(fm_2)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Difference in bonus pay
df_combined %>% group_by('Employer Size' = EmployerSize_2019) %>%
summarise('Mean Difference (2017)' = mean(DiffMeanBonusPercent_2017),
'Mean Difference (2018)' = mean(DiffMeanBonusPercent_2018),
'Mean Difference (2019)' = mean(DiffMeanBonusPercent_2019)) %>%
filter(`Employer Size` != 'Not Provided')
x = df_combined %>% summarise('2017'=mean(DiffMeanHourlyPercent_2017),
'2018'=mean(DiffMeanHourlyPercent_2018),
'2019' = mean(DiffMeanHourlyPercent_2019))
melt(x) %>% ggplot(aes(x=variable, y=value, group=1)) + geom_path() + geom_point(size=2)+
labs(x='Year', y='Percentage Difference', title='Percentage Difference in Mean Hourly Rates') +
theme_classic() # + ggsave('hourly_rates_over_year.jpg', dpi = 1000, )
y = df_combined %>% summarise('2017'=mean(DiffMeanBonusPercent_2017),
'2018'=mean(DiffMeanBonusPercent_2018),
'2019' = mean(DiffMeanBonusPercent_2019))
melt(y) %>% ggplot(aes(x=variable, y=value, group=1)) + geom_line() + geom_point() +
labs(x='Year', y='Percentage Difference', title='Percentage Difference in Mean Bonus Rates') +
theme_classic() # + ggsave('bonus_diff_over_year.jpg', dpi = 1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Two sample t test, to see if there is a differenece between the bonus payout rates
t.test(df_2019$MaleBonusPercent, df_2019$FemaleBonusPercent, alternative='greater')
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Boxplot of difference in pay, by employer size. Remove the one outlier
df_2019 %>% filter(EmployerSize!='Not Provided', DiffMeanHourlyPercent>-200) %>%
select('Employer Size' = EmployerSize, 'Percentage Difference' = DiffMeanHourlyPercent) %>%
ggplot(aes(x=`Employer Size`, y=`Percentage Difference`, fill=`Employer Size`)) + geom_boxplot()+
labs(title='Average Difference in Mean Hourly Rates') +
theme_classic()# + ggsave('hourly_pay_per_employer.jpg', dpi=1000)
df_2019 %>% filter(EmployerSize!='Not Provided') %>% group_by(EmployerSize) %>%
summarise(mean(DiffMeanHourlyPercent))
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
library(regclass)
library(lmtest)
# Create a new data frame for linear regression. Easy to work with
df_for_regression = df_2019[, names(df_2019) %in% c('DiffMeanHourlyPercent', 'FemaleLowerQuartile', 'FemaleLowerMiddleQuartile',
'FemaleUpperMiddleQuartile', 'FemaleTopQuartile', 'EmployerSize',
'MaleBonusPercent', 'FemaleBonusPercent')]
# Drop the companies where employer size is not provided
df_for_regression <- df_for_regression %>% filter(EmployerSize != 'Not Provided')
str(df_for_regression)
lin_mod = lm(DiffMeanHourlyPercent~FemaleLowerQuartile+FemaleLowerMiddleQuartile+FemaleUpperMiddleQuartile
+FemaleTopQuartile+EmployerSize + FemaleBonusPercent + MaleBonusPercent, data = df_for_regression)
summary(lin_mod)
# Test for Multicollinearity
# Use GVIF. VIFs are fine, all under 5
VIF(lin_mod)
dwtest(lin_mod) # Almost 2. So not going to change anything
lin_mod = lm(DiffMeanHourlyPercent~FemaleLowerQuartile+FemaleTopQuartile+EmployerSize + FemaleBonusPercent, data = df_for_regression)
VIF(lin_mod)
summary(lin_mod)
# Test for hetroscedasticity
lmtest::bptest(lin_mod)
lm_df = fortify(lin_mod)
lm_df %>% ggplot(aes(x=.fitted, y=.resid))+geom_point()+
geom_smooth(aes(x=lm_df$.fitted, y=lm_df$.resid)) +
geom_hline(yintercept =mean(lm_df$.resid)) +
labs(x='Fitted Values', y='Residuals', title='Residuals vs Fitted Values') +
theme_classic() #+ ggsave('errors_vs_fitted.jpg', dpi = 1000)
lm_df %>% ggplot(aes(x=.resid)) + geom_freqpoly(binwidth=1)+
labs(x='Residuals', y='Count', title='Frequency Polygon of Residuals')+
theme_classic() # + ggsave('freq_poly_errors.jpg', dpi=1000)
summary(lin_mod)
# drop employer size
lin_mod_2 = lm(DiffMeanHourlyPercent~FemaleLowerQuartile+FemaleLowerMiddleQuartile+FemaleUpperMiddleQuartile
+FemaleTopQuartile, data = df_for_regression)
summary(lin_mod_2)
VIF(lin_mod_2)
dwtest(lin_mod_2)
bptest(lin_mod_2)
lm_df_2 = fortify(lin_mod_2)
lm_df_2 %>% ggplot(aes(x=.fitted, y=.resid))+geom_point() + geom_smooth() +
geom_hline(yintercept =mean(lm_df$.resid))+
theme_classic()
# Test autocorrelation of errors
summary(lin_mod_2)
summary(lin_mod)
| /main.R | no_license | agastya1995/Gender-Pay-Gap-in-UK | R | false | false | 14,029 | r | library(dplyr)
library(ggplot2)
library(reshape2)
# LOAD AND PREPROCESS DATA
df_2017 = read.csv('UK Gender Pay Gap Data - 2017 to 2018.csv')
df_2018 = read.csv('UK Gender Pay Gap Data - 2018 to 2019.csv')
df_2019 = read.csv('UK Gender Pay Gap Data - 2019 to 2020.csv')
head(df_2017)
names(df_2017)
# Start by creating a unique id, which is the company number (if present), else the company name
df_2017$unique_id = ifelse(df_2017$CompanyNumber=='', df_2017$EmployerName, df_2017$CompanyNumber)
df_2018$unique_id = ifelse(df_2018$CompanyNumber=='', df_2018$EmployerName, df_2018$CompanyNumber)
df_2019$unique_id = ifelse(df_2019$CompanyNumber=='', df_2019$EmployerName, df_2019$CompanyNumber)
df_2017$year = 2017
df_2018$year = 2018
df_2019$year = 2019
# Drop items before merging - Address, CompanyNumber, SicCodes, CompanyLinkToGPGInfo, ResponsiblePerson, CurrentName, DueDate, DateSubmitted
df_2017 = df_2017[, !(names(df_2017) %in% c('EmployerName', 'Address', 'CompanyNumber', 'SicCodes', 'CompanyLinkToGPGInfo', 'ResponsiblePerson', 'CurrentName', 'DueDate', 'DateSubmitted'))]
df_2018 = df_2018[, !(names(df_2018) %in% c('EmployerName', 'Address', 'CompanyNumber', 'SicCodes', 'CompanyLinkToGPGInfo', 'ResponsiblePerson', 'CurrentName', 'DueDate', 'DateSubmitted'))]
df_2019 = df_2019[, !(names(df_2019) %in% c('Address', 'CompanyNumber', 'SicCodes', 'CompanyLinkToGPGInfo', 'ResponsiblePerson', 'CurrentName', 'DueDate', 'DateSubmitted'))]
dim(df_2018)
dim(df_2019)
dim(df_2017)
# Convert employer size and submitted after deadline to factors
df_2017["SubmittedAfterTheDeadline"] = lapply(df_2017["SubmittedAfterTheDeadline"], as.factor)
df_2017["EmployerSize"] = lapply(df_2017["EmployerSize"], as.factor)
df_2018["SubmittedAfterTheDeadline"] = lapply(df_2018["SubmittedAfterTheDeadline"], as.factor)
df_2018["EmployerSize"] = lapply(df_2018["EmployerSize"], as.factor)
df_2019["SubmittedAfterTheDeadline"] = lapply(df_2019["SubmittedAfterTheDeadline"], as.factor)
df_2019["EmployerSize"] = lapply(df_2019["EmployerSize"], as.factor)
# Replace na values with 0 (only in the bonus mean difference rows)
df_2017 = replace(df_2017, is.na(df_2017), 0)
df_2018 = replace(df_2018, is.na(df_2018), 0)
df_2019 = replace(df_2019, is.na(df_2019), 0)
# Combine the 3 dataframes on either company name or company number, get rid of the rest
df_combined = merge(merge(df_2017, df_2018, by='unique_id', suffixes = c('_2017', '_2018')), df_2019, by='unique_id', suffixes=c('_2018', '_2019'))
# Add 2019 suffix to 2019 columns
names(df_combined)[34:length(names(df_combined))] = gsub('(\\w*)', '\\1_2019', names(df_combined)[34:length(names(df_combined))])
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
#EDA
# Bonus - Males vs Females per company
# Percentage of women who received bonuses over the past 3 years
df_combined %>% group_by(EmployerSize_2019) %>%
summarise('Mean Bonus Percent (2017)' = mean(FemaleBonusPercent_2017) , 'Mean Bonus Percent (2018)' = mean(FemaleBonusPercent_2018),
'Mean Bonus Percent (2019)' = mean(FemaleBonusPercent_2019)) %>%
filter(EmployerSize_2019 != 'Not Provided') %>%
rename('Employer Size'=EmployerSize_2019)
# Need to compare it with men who receive bonuses
a = df_2019 %>% group_by('Employer Size' = EmployerSize) %>%
filter (EmployerSize!='Not Provided') %>%
summarise('Female' = mean(FemaleBonusPercent), 'Male' = mean(MaleBonusPercent))
a_melt = melt(a,id.vars='Employer Size', variable.name = 'sex', value.name='Bonus Payout Percentage')
a_melt %>% ggplot(aes(x=`Employer Size`, y=`Bonus Payout Percentage`, fill=sex)) + geom_bar(stat='identity', position='dodge')+
theme(text=element_text(size=12), axis.text.x = element_text(angle=45, vjust = .7, hjust=.7)) +
labs(title='Bonus Payout Percentage - Males vs Females')+
theme_classic() # + ggsave('bonus_payout.jpg', dpi=1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# A
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Bonus Frequency Polygons
# Frequency polygon of females in each quartile in 2019
# Lower Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleLowerQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Lower Quartile')+
theme_classic() #+ ggsave('lower_quart_dens.jpg', dpi=1000)
# Lower Middle Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleLowerMiddleQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Lower-Middle Quartile')+
theme_classic()# + ggsave('lower_mid_quart_dens.jpg', dpi=1000)
# Upper Middle Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleUpperMiddleQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Upper-Middle Quartile')+
theme_classic() # + ggsave('upper_mid_quart_dens.jpg', dpi=1000)
# Top Quartile
df_2019 %>% filter (EmployerSize!='Not Provided') %>%
ggplot(aes(x=FemaleTopQuartile, color=EmployerSize)) + geom_density(size=1) +
labs(x='Percentage', y='Density', color='Employer Size', title='Percentage of Females in Top Quartile')+
theme_classic()# + ggsave('top_quart_dens.jpg', dpi=1000)
# -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- #
# Organization Structure
# Percentage of women in the top quartile
df_combined %>% group_by('Employer Size' = EmployerSize_2019) %>%
summarise('Females in Top Quartile (2017)' = mean(FemaleTopQuartile_2017),
'Females in Top Quartile (2018)' = mean(FemaleTopQuartile_2018),
'Females in Top Quartile (2019)' = mean(FemaleTopQuartile_2019)) %>%
filter(`Employer Size` != 'Not Provided')
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
x = df_2019%>% select('Lower Quartile' = FemaleLowerQuartile, 'Lower Middle Quartile' = FemaleLowerMiddleQuartile,
'Upper Middle Quartile' = FemaleUpperMiddleQuartile, 'Top Quartile' = FemaleTopQuartile)
melt(x, variable.name='Quartile') %>% ggplot(aes(x=Quartile, y=value, fill=Quartile)) + geom_boxplot()+
labs(x='Quartile', y='Percentage', title='Females per Quartile')+
theme_classic()+
theme(axis.text.x = element_text(angle = 30, vjust=.7, hjust=0.7), text=element_text(size=12))# +ggsave('box_plot_female_quartile.jpg', dpi=1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# One Way Anova - difference in mean pay over the years
mean_pay = df_combined %>% select('2017'=DiffMeanHourlyPercent_2017, '2018'= DiffMeanHourlyPercent_2018,
'2019' = DiffMeanHourlyPercent_2019)
mean_pay = melt(mean_pay, variable.name = 'Year')
fm_1 = aov(value~Year, data=mean_pay)
anova(fm_1)
mean_pay %>% ggplot(aes(x=Year, y=value, fill=Year))+geom_boxplot()+
labs(y='Percentage', title='Percentage Difference in Mean Hourly Pay')+
theme_classic() # + ggsave('pay_boxplot.jpg', dpi=1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# One Way Anova - difference in bonus pay over the years
bonus_pay = df_combined %>% select(DiffMeanBonusPercent_2017, DiffMeanBonusPercent_2018, DiffMeanBonusPercent_2019)
bonus_pay = melt(bonus_pay)
fm_2 = aov(value~variable, data=bonus_pay)
anova(fm_2)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Difference in bonus pay
df_combined %>% group_by('Employer Size' = EmployerSize_2019) %>%
summarise('Mean Difference (2017)' = mean(DiffMeanBonusPercent_2017),
'Mean Difference (2018)' = mean(DiffMeanBonusPercent_2018),
'Mean Difference (2019)' = mean(DiffMeanBonusPercent_2019)) %>%
filter(`Employer Size` != 'Not Provided')
x = df_combined %>% summarise('2017'=mean(DiffMeanHourlyPercent_2017),
'2018'=mean(DiffMeanHourlyPercent_2018),
'2019' = mean(DiffMeanHourlyPercent_2019))
melt(x) %>% ggplot(aes(x=variable, y=value, group=1)) + geom_path() + geom_point(size=2)+
labs(x='Year', y='Percentage Difference', title='Percentage Difference in Mean Hourly Rates') +
theme_classic() # + ggsave('hourly_rates_over_year.jpg', dpi = 1000, )
y = df_combined %>% summarise('2017'=mean(DiffMeanBonusPercent_2017),
'2018'=mean(DiffMeanBonusPercent_2018),
'2019' = mean(DiffMeanBonusPercent_2019))
melt(y) %>% ggplot(aes(x=variable, y=value, group=1)) + geom_line() + geom_point() +
labs(x='Year', y='Percentage Difference', title='Percentage Difference in Mean Bonus Rates') +
theme_classic() # + ggsave('bonus_diff_over_year.jpg', dpi = 1000)
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Two sample t test, to see if there is a differenece between the bonus payout rates
t.test(df_2019$MaleBonusPercent, df_2019$FemaleBonusPercent, alternative='greater')
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Boxplot of difference in pay, by employer size. Remove the one outlier
df_2019 %>% filter(EmployerSize!='Not Provided', DiffMeanHourlyPercent>-200) %>%
select('Employer Size' = EmployerSize, 'Percentage Difference' = DiffMeanHourlyPercent) %>%
ggplot(aes(x=`Employer Size`, y=`Percentage Difference`, fill=`Employer Size`)) + geom_boxplot()+
labs(title='Average Difference in Mean Hourly Rates') +
theme_classic()# + ggsave('hourly_pay_per_employer.jpg', dpi=1000)
df_2019 %>% filter(EmployerSize!='Not Provided') %>% group_by(EmployerSize) %>%
summarise(mean(DiffMeanHourlyPercent))
# --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
library(regclass)
library(lmtest)
# Create a new data frame for linear regression. Easy to work with
df_for_regression = df_2019[, names(df_2019) %in% c('DiffMeanHourlyPercent', 'FemaleLowerQuartile', 'FemaleLowerMiddleQuartile',
'FemaleUpperMiddleQuartile', 'FemaleTopQuartile', 'EmployerSize',
'MaleBonusPercent', 'FemaleBonusPercent')]
# Drop the companies where employer size is not provided
df_for_regression <- df_for_regression %>% filter(EmployerSize != 'Not Provided')
str(df_for_regression)
lin_mod = lm(DiffMeanHourlyPercent~FemaleLowerQuartile+FemaleLowerMiddleQuartile+FemaleUpperMiddleQuartile
+FemaleTopQuartile+EmployerSize + FemaleBonusPercent + MaleBonusPercent, data = df_for_regression)
summary(lin_mod)
# Test for Multicollinearity
# Use GVIF. VIFs are fine, all under 5
VIF(lin_mod)
dwtest(lin_mod) # Almost 2. So not going to change anything
lin_mod = lm(DiffMeanHourlyPercent~FemaleLowerQuartile+FemaleTopQuartile+EmployerSize + FemaleBonusPercent, data = df_for_regression)
VIF(lin_mod)
summary(lin_mod)
# Test for hetroscedasticity
lmtest::bptest(lin_mod)
lm_df = fortify(lin_mod)
lm_df %>% ggplot(aes(x=.fitted, y=.resid))+geom_point()+
geom_smooth(aes(x=lm_df$.fitted, y=lm_df$.resid)) +
geom_hline(yintercept =mean(lm_df$.resid)) +
labs(x='Fitted Values', y='Residuals', title='Residuals vs Fitted Values') +
theme_classic() #+ ggsave('errors_vs_fitted.jpg', dpi = 1000)
lm_df %>% ggplot(aes(x=.resid)) + geom_freqpoly(binwidth=1)+
labs(x='Residuals', y='Count', title='Frequency Polygon of Residuals')+
theme_classic() # + ggsave('freq_poly_errors.jpg', dpi=1000)
summary(lin_mod)
# drop employer size
lin_mod_2 = lm(DiffMeanHourlyPercent~FemaleLowerQuartile+FemaleLowerMiddleQuartile+FemaleUpperMiddleQuartile
+FemaleTopQuartile, data = df_for_regression)
summary(lin_mod_2)
VIF(lin_mod_2)
dwtest(lin_mod_2)
bptest(lin_mod_2)
lm_df_2 = fortify(lin_mod_2)
lm_df_2 %>% ggplot(aes(x=.fitted, y=.resid))+geom_point() + geom_smooth() +
geom_hline(yintercept =mean(lm_df$.resid))+
theme_classic()
# Test autocorrelation of errors
summary(lin_mod_2)
summary(lin_mod)
|
rm(list = ls(all = TRUE))
source("model2.R")
data(iris)
x = as.matrix(iris[, 1:4])
fit = gaussian_mixture_model(x = x, K = 2)
z_true = iris[, 5]
table(fit$z_map, z_true)
| /code/model2/sample2.R | no_license | ryoga27/gaussian_mixture_model | R | false | false | 170 | r | rm(list = ls(all = TRUE))
source("model2.R")
data(iris)
x = as.matrix(iris[, 1:4])
fit = gaussian_mixture_model(x = x, K = 2)
z_true = iris[, 5]
table(fit$z_map, z_true)
|
#-----------------Alignment--------------------------------------------#
usearch <- "usearch8.0.1403_i86osx32"
if (Sys.info()['sysname'] == "Darwin") { # OS-X
usearch <- "bin/./usearch8.0.1403_i86osx32"
}
if (Sys.info()['sysname'] == "Linux") { # Linux
usearch <- "bin/./usearch8.0.1403_i86linux32"
}
usearch7 <- "bin/./usearch7.0.1090_i86osx32"
if (Sys.info()['sysname'] == "Darwin") { # OS-X
usearch7 <- "bin/./usearch7.0.1090_i86osx32"
}
if (Sys.info()['sysname'] == "Linux") { # Linux
usearch7 <- "bin/./usearch7.0.1090_i86linux32"
}
#--------------Miscellaneous------------------------------------------#
# Path to installed BLASTParser library:
R_LIBS <- "R_Lib"
# Output file with final clusters:
clust_filename <- "clusters.clstr"
# Output OTU table:
otu_table_filename <- "otu_table.txt"
# Output file with coordinates:
coord_filename <- "coordinates.crd"
# Chimeric reference database:
chime_ref <- "data/gold/gold.fa"
# A directory that contains temporary files:
tmp_dir <- paste(analysis_dir, "/tmp", sep='')
# Keep or not temporary files:
keep_tmp_files <- T | /src/config.R | no_license | izhbannikov/MetAmp | R | false | false | 1,079 | r | #-----------------Alignment--------------------------------------------#
usearch <- "usearch8.0.1403_i86osx32"
if (Sys.info()['sysname'] == "Darwin") { # OS-X
usearch <- "bin/./usearch8.0.1403_i86osx32"
}
if (Sys.info()['sysname'] == "Linux") { # Linux
usearch <- "bin/./usearch8.0.1403_i86linux32"
}
usearch7 <- "bin/./usearch7.0.1090_i86osx32"
if (Sys.info()['sysname'] == "Darwin") { # OS-X
usearch7 <- "bin/./usearch7.0.1090_i86osx32"
}
if (Sys.info()['sysname'] == "Linux") { # Linux
usearch7 <- "bin/./usearch7.0.1090_i86linux32"
}
#--------------Miscellaneous------------------------------------------#
# Path to installed BLASTParser library:
R_LIBS <- "R_Lib"
# Output file with final clusters:
clust_filename <- "clusters.clstr"
# Output OTU table:
otu_table_filename <- "otu_table.txt"
# Output file with coordinates:
coord_filename <- "coordinates.crd"
# Chimeric reference database:
chime_ref <- "data/gold/gold.fa"
# A directory that contains temporary files:
tmp_dir <- paste(analysis_dir, "/tmp", sep='')
# Keep or not temporary files:
keep_tmp_files <- T |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusterTab.R
\name{clusterTab}
\alias{clusterTab}
\title{UI elements for cluster tab}
\usage{
clusterTab()
}
\description{
Future home for cluster analysis, hierarchical and divisive/kmeans/kmedoids etc.
}
| /man/clusterTab.Rd | no_license | mpeeples2008/NAA_analytical_dashboard | R | false | true | 284 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusterTab.R
\name{clusterTab}
\alias{clusterTab}
\title{UI elements for cluster tab}
\usage{
clusterTab()
}
\description{
Future home for cluster analysis, hierarchical and divisive/kmeans/kmedoids etc.
}
|
library(samplingVarEst)
### Name: VE.Jk.Tukey.Corr.NHT
### Title: The Tukey (1958) jackknife variance estimator for the estimator
### of a correlation coefficient using the Narain-Horvitz-Thompson point
### estimator
### Aliases: VE.Jk.Tukey.Corr.NHT
### Keywords: variance estimation correlation coefficient
### ** Examples
data(oaxaca) #Loads the Oaxaca municipalities dataset
pik.U <- Pk.PropNorm.U(373, oaxaca$HOMES00) #Reconstructs the 1st order incl. probs.
s <- oaxaca$sHOMES00 #Defines the sample to be used
N <- dim(oaxaca)[1] #Defines the population size
y1 <- oaxaca$POP10 #Defines the variable of interest y1
y2 <- oaxaca$POPMAL10 #Defines the variable of interest y2
x <- oaxaca$HOMES10 #Defines the variable of interest x
#Computes the var. est. of the corr. coeff. point estimator using y1
VE.Jk.Tukey.Corr.NHT(y1[s==1], x[s==1], pik.U[s==1], N)
#Computes the var. est. of the corr. coeff. point estimator using y2
VE.Jk.Tukey.Corr.NHT(y2[s==1], x[s==1], pik.U[s==1], N, FPC= FALSE)
| /data/genthat_extracted_code/samplingVarEst/examples/VE_Jk_Tukey_Corr_NHT.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,158 | r | library(samplingVarEst)
### Name: VE.Jk.Tukey.Corr.NHT
### Title: The Tukey (1958) jackknife variance estimator for the estimator
### of a correlation coefficient using the Narain-Horvitz-Thompson point
### estimator
### Aliases: VE.Jk.Tukey.Corr.NHT
### Keywords: variance estimation correlation coefficient
### ** Examples
data(oaxaca) #Loads the Oaxaca municipalities dataset
pik.U <- Pk.PropNorm.U(373, oaxaca$HOMES00) #Reconstructs the 1st order incl. probs.
s <- oaxaca$sHOMES00 #Defines the sample to be used
N <- dim(oaxaca)[1] #Defines the population size
y1 <- oaxaca$POP10 #Defines the variable of interest y1
y2 <- oaxaca$POPMAL10 #Defines the variable of interest y2
x <- oaxaca$HOMES10 #Defines the variable of interest x
#Computes the var. est. of the corr. coeff. point estimator using y1
VE.Jk.Tukey.Corr.NHT(y1[s==1], x[s==1], pik.U[s==1], N)
#Computes the var. est. of the corr. coeff. point estimator using y2
VE.Jk.Tukey.Corr.NHT(y2[s==1], x[s==1], pik.U[s==1], N, FPC= FALSE)
|
rm(list=ls())
a = function(n)
{
p = (2*n)-2
success = 0
while (success==0)
{
p=p+1
success = 1
m = rep(0,p)
m[1] = 1
m[p] = 1
while(sum(m)<(n))
{
filled = which(m==1)
myDist=NULL
j = 0
for(unfilled in which(m==0))
{
j=j+1
myDist[j] = min(abs(unfilled-filled))
}
m[which(m==0)[which(myDist==max(myDist))[1]]]=1
if (is.element(1,max(myDist))) {success=0}
print(m)
}
print(paste( "p is", p, "success is", success))
}
return(p)
}
b = matrix(0,100,2)
for (c in 3:102)
{
b[c-2,1] = c
b[c-2,2] = a(c)
}
b
| /nm.R | no_license | thedatacafe/riddler | R | false | false | 636 | r | rm(list=ls())
a = function(n)
{
p = (2*n)-2
success = 0
while (success==0)
{
p=p+1
success = 1
m = rep(0,p)
m[1] = 1
m[p] = 1
while(sum(m)<(n))
{
filled = which(m==1)
myDist=NULL
j = 0
for(unfilled in which(m==0))
{
j=j+1
myDist[j] = min(abs(unfilled-filled))
}
m[which(m==0)[which(myDist==max(myDist))[1]]]=1
if (is.element(1,max(myDist))) {success=0}
print(m)
}
print(paste( "p is", p, "success is", success))
}
return(p)
}
b = matrix(0,100,2)
for (c in 3:102)
{
b[c-2,1] = c
b[c-2,2] = a(c)
}
b
|
# Get swirl library
install.packages("swirl")
library(swirl)
# Exercise 1: R Version
print(version)
# Exercise 2: Numeric vector average:
numbers <- c(2.23, 3.45, 1.87, 2.11, 7.33, 18.34, 19.23)
avg <- mean(numbers)
print(avg)
# Exercise 3: Sum
sum = 0
for (i in 1:25) {
sum = sum + i^2
}
print(sum)
# Exercise 4: Class of cars
clz <- class(cars)
print(clz)
# Exercise 5: How many rows does the cars oject have?
rows <- nrow(cars)
print(rows)
# Exercise 6: Name of second column of cars
# dist
# Exercise 7: Average distance in cars
avg_dist <- mean(cars[,2])
print(avg_dist)
# Exercise 8: Which row if cars has distance of 85?
dist_85 <- which(cars$dist == 85)
print(dist_85) | /week_1/first_assessment_exercises.R | no_license | ArianGohari/harvardx_ph525.1x | R | false | false | 686 | r | # Get swirl library
install.packages("swirl")
library(swirl)
# Exercise 1: R Version
print(version)
# Exercise 2: Numeric vector average:
numbers <- c(2.23, 3.45, 1.87, 2.11, 7.33, 18.34, 19.23)
avg <- mean(numbers)
print(avg)
# Exercise 3: Sum
sum = 0
for (i in 1:25) {
sum = sum + i^2
}
print(sum)
# Exercise 4: Class of cars
clz <- class(cars)
print(clz)
# Exercise 5: How many rows does the cars oject have?
rows <- nrow(cars)
print(rows)
# Exercise 6: Name of second column of cars
# dist
# Exercise 7: Average distance in cars
avg_dist <- mean(cars[,2])
print(avg_dist)
# Exercise 8: Which row if cars has distance of 85?
dist_85 <- which(cars$dist == 85)
print(dist_85) |
#' wMSE
#'
#' Function for weighting MSE, nMSE, or aMSE by the specified variable for weights.
#'
#'
#' @param observed observed growth values (e.g. height or weight)
#' @param predicted predicted values from models fitted to observed data
#' @param id.var variable that identifies individual subjects
#' @param weight.var Variable used to weight MSE or nMSE. This should be a vector of values that will be used to divide subject-specific MSE estimates by. An example could be using subject-specific growth trajectories (i.e. weighting individuals with slowest growth).
#' @param type Type of MSE estimate used as denominator. Default is nMSE but can be se to standard MSE.
#'
#' @return data.frame with id and subject-specific weighted MSE or nMSE estimates
#'
#' @references Grigsby MR, Di J, Leroux A, Zipunnikov V, Xiao L, Crainiceanu C, Checkley W. Novel metrics for growth model selection. Emerging themes in epidemiology. 2018 Feb;15(1):4.
#'
#' @export
wmse = function (observed="observed", predicted="pred", id.var="id", weight.var="weights", type="nmse", data){
count=0
wmse.list<-NULL
for (k in unique(data[[id.var]])){
count=count+1
current.mat=subset(data,id==k)
if (type=="nmse"){
a = ((current.mat[[observed]] - current.mat[[predicted]])^2 /(current.mat[[observed]])^2)/current.mat[[weight.var]]
}
if (type=="mse"){
a = ((current.mat[[observed]]-current.mat[[predicted]])^2) /current.mat[[weight.var]]
}
wmse = mean(a)
wmse.list[count]=wmse
}
wmse.result<-data.frame(id=id, wmse=wmse.list)
return(wmse.result)
}
| /R/wmse.R | no_license | MatthewGrigsby/statmedtools | R | false | false | 1,591 | r | #' wMSE
#'
#' Function for weighting MSE, nMSE, or aMSE by the specified variable for weights.
#'
#'
#' @param observed observed growth values (e.g. height or weight)
#' @param predicted predicted values from models fitted to observed data
#' @param id.var variable that identifies individual subjects
#' @param weight.var Variable used to weight MSE or nMSE. This should be a vector of values that will be used to divide subject-specific MSE estimates by. An example could be using subject-specific growth trajectories (i.e. weighting individuals with slowest growth).
#' @param type Type of MSE estimate used as denominator. Default is nMSE but can be se to standard MSE.
#'
#' @return data.frame with id and subject-specific weighted MSE or nMSE estimates
#'
#' @references Grigsby MR, Di J, Leroux A, Zipunnikov V, Xiao L, Crainiceanu C, Checkley W. Novel metrics for growth model selection. Emerging themes in epidemiology. 2018 Feb;15(1):4.
#'
#' @export
wmse = function (observed="observed", predicted="pred", id.var="id", weight.var="weights", type="nmse", data){
count=0
wmse.list<-NULL
for (k in unique(data[[id.var]])){
count=count+1
current.mat=subset(data,id==k)
if (type=="nmse"){
a = ((current.mat[[observed]] - current.mat[[predicted]])^2 /(current.mat[[observed]])^2)/current.mat[[weight.var]]
}
if (type=="mse"){
a = ((current.mat[[observed]]-current.mat[[predicted]])^2) /current.mat[[weight.var]]
}
wmse = mean(a)
wmse.list[count]=wmse
}
wmse.result<-data.frame(id=id, wmse=wmse.list)
return(wmse.result)
}
|
#read in data
data <- read.table("household_power_consumption.txt", sep=";",
header=TRUE, na.strings=c("?"), stringsAsFactors=FALSE )
#subset data
data <- subset(data, subset=(Date=="1/2/2007" | Date=="2/2/2007"))
#create date time variable
library(lubridate)
data$DateTime <- dmy_hms(paste(data$Date, data$Time))
#create plot 3
with(data,plot(DateTime, Sub_metering_1,type="n",
ylab="Energy sub metering",xlab=""))
with(data,lines(DateTime, Sub_metering_1,col="black"))
with(data,lines(DateTime, Sub_metering_2,col="red"))
with(data,lines(DateTime, Sub_metering_3,col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png,file="plot3.png")
dev.off()
| /plot3.R | no_license | jshife/ExData_Plotting1 | R | false | false | 784 | r |
#read in data
data <- read.table("household_power_consumption.txt", sep=";",
header=TRUE, na.strings=c("?"), stringsAsFactors=FALSE )
#subset data
data <- subset(data, subset=(Date=="1/2/2007" | Date=="2/2/2007"))
#create date time variable
library(lubridate)
data$DateTime <- dmy_hms(paste(data$Date, data$Time))
#create plot 3
with(data,plot(DateTime, Sub_metering_1,type="n",
ylab="Energy sub metering",xlab=""))
with(data,lines(DateTime, Sub_metering_1,col="black"))
with(data,lines(DateTime, Sub_metering_2,col="red"))
with(data,lines(DateTime, Sub_metering_3,col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.copy(png,file="plot3.png")
dev.off()
|
#' Checking assay method for any class
#'
#' The \code{hasAssay} function is intended for developers who would like to
#' include new classes into a \code{MultiAssayExperiment} instance.
#' It checks the methods tables of the \code{assay} function for the
#' specified class of the argument.
#'
#' @param object A \code{MultiAssayExperiment} or \code{list} object instance
#' @return A \code{logical} value indicating method availability
#'
#' @examples
#' char <- character()
#' hasAssay(char)
#'
#' @export hasAssay
hasAssay <- function(object) {
validClasses <- vapply(findMethods("assay")@signatures,
FUN = "[",
FUN.VALUE = character(1), ... = 1L)
validClasses <- unique(validClasses)
all(vapply(Elist(object), FUN = function(element) {
any(vapply(validClasses, FUN = function(cl) {
inherits(element, cl)
}, FUN.VALUE = logical(1)))
}, FUN.VALUE = logical(1)))
} | /R/hasAssay.R | no_license | patrick-edu/MultiAssayExperiment | R | false | false | 944 | r | #' Checking assay method for any class
#'
#' The \code{hasAssay} function is intended for developers who would like to
#' include new classes into a \code{MultiAssayExperiment} instance.
#' It checks the methods tables of the \code{assay} function for the
#' specified class of the argument.
#'
#' @param object A \code{MultiAssayExperiment} or \code{list} object instance
#' @return A \code{logical} value indicating method availability
#'
#' @examples
#' char <- character()
#' hasAssay(char)
#'
#' @export hasAssay
hasAssay <- function(object) {
validClasses <- vapply(findMethods("assay")@signatures,
FUN = "[",
FUN.VALUE = character(1), ... = 1L)
validClasses <- unique(validClasses)
all(vapply(Elist(object), FUN = function(element) {
any(vapply(validClasses, FUN = function(cl) {
inherits(element, cl)
}, FUN.VALUE = logical(1)))
}, FUN.VALUE = logical(1)))
} |
library(igraph)
library(tidyverse)
trackMap <- read_tsv("../../data/trackMap.tsv",
col_names = c("new_id", "UUID"))
artistas_unicos <- read_delim("../../data/artistas_unicos_bash.tsv",
col_names = c("UUID", "Artist"),
quote = "",
delim = "\t") %>%
left_join(trackMap) %>%
select(new_id, Artist) %>%
filter(!is.na(new_id)) %>%
filter(!duplicated(new_id))
similitudes <- read_tsv("../../data/filtered-similarities-50.tsv",
col_names = F)
similitudes_2 <- similitudes %>%
left_join(artistas_unicos, by = c("X1" = "new_id")) %>%
left_join(artistas_unicos, by = c("X2" = "new_id")) %>%
mutate(Artist.x = paste0(X1, "_", Artist.x),
Artist.y = paste0(X2, "_", Artist.y)) %>%
select(Artist.x, Artist.y, sim = X3)
saveRDS(similitudes_2, "../../out/similitudes.rds")
grafo_sims <- graph_from_edgelist(as.matrix(similitudes_2[,1:2]),
directed = F)
#grafo_sims <- graph.data.frame(similitudes_2, directed = F)
saveRDS(grafo_sims, "../../out/grafo_sims.rds") | /code/R/crear_grafo.R | no_license | mariobecerra/Analisis_Algoritmos_Proyecto | R | false | false | 1,155 | r | library(igraph)
library(tidyverse)
trackMap <- read_tsv("../../data/trackMap.tsv",
col_names = c("new_id", "UUID"))
artistas_unicos <- read_delim("../../data/artistas_unicos_bash.tsv",
col_names = c("UUID", "Artist"),
quote = "",
delim = "\t") %>%
left_join(trackMap) %>%
select(new_id, Artist) %>%
filter(!is.na(new_id)) %>%
filter(!duplicated(new_id))
similitudes <- read_tsv("../../data/filtered-similarities-50.tsv",
col_names = F)
similitudes_2 <- similitudes %>%
left_join(artistas_unicos, by = c("X1" = "new_id")) %>%
left_join(artistas_unicos, by = c("X2" = "new_id")) %>%
mutate(Artist.x = paste0(X1, "_", Artist.x),
Artist.y = paste0(X2, "_", Artist.y)) %>%
select(Artist.x, Artist.y, sim = X3)
saveRDS(similitudes_2, "../../out/similitudes.rds")
grafo_sims <- graph_from_edgelist(as.matrix(similitudes_2[,1:2]),
directed = F)
#grafo_sims <- graph.data.frame(similitudes_2, directed = F)
saveRDS(grafo_sims, "../../out/grafo_sims.rds") |
context("Ensuring that the common utility functions work as expected")
test_that("the `date_formats()` function works correctly", {
# Expect that the `info_date_style()` function produces an
# information table with certain classes
expect_is(
date_formats(),
c("tbl_df", "tbl", "data.frame"))
# Expect the tibble to be of specific dimensions
expect_equal(
date_formats() %>%
dim(),
c(14, 3))
# Expect the tibble to have specific column names
expect_equal(
date_formats() %>%
colnames(),
c("format_number", "format_name", "format_code"))
})
test_that("the `time_formats()` util fcn works as expected", {
# Expect that the `info_date_style()` function produces an
# information table with certain classes
expect_is(
time_formats(),
c("tbl_df", "tbl", "data.frame"))
# Expect the tibble to be of specific dimensions
expect_equal(
time_formats() %>%
dim(),
c(5, 3))
# Expect the tibble to have specific column names
expect_equal(
time_formats() %>%
colnames(),
c("format_number", "format_name", "format_code"))
})
test_that("the `get_date_format()` function works correctly", {
# Expect specific `format_code` values for each
# numeric `date_style` value passed in
lapply(1:14, get_date_format) %>%
unlist() %>%
expect_equal(
c("%F", "%A, %B %d, %Y", "%a, %b %d, %Y", "%A %d %B %Y",
"%B %d, %Y", "%b %d, %Y", "%d %b %Y", "%d %B %Y", "%d %B",
"%Y", "%B", "%d", "%Y/%m/%d", "%y/%m/%d"))
# Expect specific `format_code` values for each
# text-based `date_style` value passed in
lapply(date_formats()$format_name, get_date_format) %>%
unlist() %>%
expect_equal(
c("%F", "%A, %B %d, %Y", "%a, %b %d, %Y", "%A %d %B %Y",
"%B %d, %Y", "%b %d, %Y", "%d %b %Y", "%d %B %Y", "%d %B",
"%Y", "%B", "%d", "%Y/%m/%d", "%y/%m/%d"))
})
test_that("the `get_time_format()` function works correctly", {
# Expect specific `format_code` values for each
# numeric `date_style` value passed in
lapply(1:5, get_time_format) %>%
unlist() %>%
expect_equal(
c("%H:%M:%S", "%H:%M", "%I:%M:%S %P", "%I:%M %P", "%I %P"))
# Expect specific `format_code` values for each
# text-based `date_style` value passed in
lapply(time_formats()$format_name, get_time_format) %>%
unlist() %>%
expect_equal(
c("%H:%M:%S", "%H:%M", "%I:%M:%S %P", "%I:%M %P", "%I %P"))
})
test_that("the `validate_currency()` function works correctly", {
# Expect that specific currency names supplied to
# `validate_currency()` will all return NULL
expect_null(
lapply(currency_symbols$curr_symbol, validate_currency) %>%
unlist()
)
# Expect that invalid currency names supplied to
# `validate_currency()` will result in an error
expect_error(lapply(c("thaler", "tetarteron"), validate_currency))
# Expect that specific currency codes supplied to
# `validate_currency()` will all return NULL
expect_null(
lapply(currencies$curr_code, validate_currency) %>%
unlist()
)
# Expect that invalid currency codes supplied to
# `validate_currency()` will result in an error
expect_error(lapply(c("AAA", "ZZZ"), validate_currency))
# Expect that specific currency codes (3-number)
# supplied to `validate_currency()` will return NULL
expect_null(
lapply(currencies$curr_number, validate_currency) %>%
unlist()
)
expect_null(
lapply(as.numeric(currencies$curr_number), validate_currency) %>%
unlist()
)
# Expect that invalid currency codes supplied to
# `validate_currency()` will return an error
expect_error(lapply(c(999, 998), validate_currency))
})
test_that("the `get_currency_str()` function works correctly", {
# Expect that various currency codes (3-letter)
# return a currency symbol
get_currency_str(currency = "CAD") %>%
expect_equal("$")
get_currency_str(currency = "DKK") %>%
expect_equal("kr.")
get_currency_str(currency = "JPY") %>%
expect_equal("¥")
get_currency_str(currency = "RUB") %>%
expect_equal("₽")
# Expect that various currency codes (3-number)
# return a currency symbol
get_currency_str(currency = "230") %>%
expect_equal("Br")
get_currency_str(currency = "946") %>%
expect_equal("RON")
get_currency_str(currency = "682") %>%
expect_equal("SR")
get_currency_str(currency = "90") %>%
expect_equal("SI$")
# Expect that various common currency names
# return a currency symbol
get_currency_str(currency = "pound") %>%
expect_equal("£")
get_currency_str(currency = "franc") %>%
expect_equal("₣")
get_currency_str(currency = "guarani") %>%
expect_equal("₲")
get_currency_str(currency = "hryvnia") %>%
expect_equal("₴")
# Expect that various currency codes (3-letter) can
# return a currency code when an HTML entity would
# otherwise be provided
get_currency_str(currency = "CAD", fallback_to_code = TRUE) %>%
expect_equal("$")
get_currency_str(currency = "DKK", fallback_to_code = TRUE) %>%
expect_equal("kr.")
get_currency_str(currency = "JPY", fallback_to_code = TRUE) %>%
expect_equal("JPY")
get_currency_str(currency = "RUB", fallback_to_code = TRUE) %>%
expect_equal("RUB")
# Expect that various currency codes (3-number) can
# return a currency code when an HTML entity would
# otherwise be provided
get_currency_str(currency = "532", fallback_to_code = TRUE) %>%
expect_equal("ANG")
get_currency_str(currency = 533, fallback_to_code = TRUE) %>%
expect_equal("AWG")
# Expect that when using common currency names we won't
# get a currency code when an HTML entity would
# otherwise be provided
get_currency_str(currency = "pound", fallback_to_code = TRUE) %>%
expect_equal("£")
get_currency_str(currency = "franc", fallback_to_code = TRUE) %>%
expect_equal("₣")
get_currency_str(currency = "guarani", fallback_to_code = TRUE) %>%
expect_equal("₲")
get_currency_str(currency = "hryvnia", fallback_to_code = TRUE) %>%
expect_equal("₴")
# Expect the input value when the currency can't be
# interpreted as a valid currency
get_currency_str(currency = "thaler") %>%
expect_equal("thaler")
})
test_that("the `get_currency_exponent()` function works correctly", {
# Expect that various currency codes (3-letter)
# return a currency exponent
get_currency_exponent(currency = "BIF") %>%
expect_equal(0)
get_currency_exponent(currency = "AED") %>%
expect_equal(2)
get_currency_exponent(currency = "TND") %>%
expect_equal(3)
get_currency_exponent(currency = "CLF") %>%
expect_equal(4)
# Expect that various currency codes (3-number)
# return a currency exponent
get_currency_exponent(currency = "533") %>%
expect_equal(2)
get_currency_exponent(currency = "152") %>%
expect_equal(0)
get_currency_exponent(currency = 990) %>%
expect_equal(4)
get_currency_exponent(currency = 886) %>%
expect_equal(2)
# Expect an exponent of 0 if the currency
# exponent field is NA
lapply(
currencies$curr_code[is.na(currencies$exponent)],
get_currency_exponent) %>%
unlist() %>%
expect_equal(rep(0, 7))
})
test_that("the `process_text()` function works correctly", {
# Create the `simple_text` variable, which is text
# with the class `character`
simple_text <- "this is simple text"
# Create the `md_text` variable, which is markdown text
# with the class `from_markdown` (via the `md()` helper)
md_text <- md("this is *text* interpreted as **markdown**")
# Create the `html_text` variable, which is HTML text with
# the classes `html`/`character` (via the `html()` helper)
html_text <- html("this is <em>text</em> that's <strong>HTML</strong>")
# Expect that text with the class `character` will
# be returned from `process_text` as is
process_text(text = simple_text) %>%
expect_equal(simple_text)
simple_text %>% expect_is("character")
# Expect that text with the class `from_markdown` will
# be returned from `process_text` as character-based
# text that's been transformed to HTML
process_text(text = md_text) %>%
expect_equal("this is <em>text</em> interpreted as <strong>markdown</strong>")
md_text %>% expect_is("from_markdown")
process_text(text = md_text) %>% expect_is("character")
# Expect that text with the class `html` will
# be returned from `process_text` as character-based
# text that's been transformed to HTML
process_text(text = html_text) %>%
expect_equal(as.character(html_text))
html_text %>% expect_is(c("html", "character"))
process_text(text = html_text) %>% expect_is(c("html", "character"))
})
test_that("the `apply_pattern_fmt_x()` function works correctly", {
# Set formatted values in a character vector
x <- c("23.4%", "32.6%", "9.15%")
# Expect that the default pattern `{x}` does not
# modify the values in `x`
apply_pattern_fmt_x(pattern = "{x}", values = x) %>%
expect_equal(x)
# Expect that a pattern that appends literal text
# will work
apply_pattern_fmt_x(pattern = "{x}:n", values = x) %>%
expect_equal(paste0(x, ":n"))
# Expect that a pattern that appends and prepends
# literal text will work
apply_pattern_fmt_x(pattern = "+{x}:n", values = x) %>%
expect_equal(paste0("+", x, ":n"))
# Expect that multiple instances of `{x}` will
# create copies of `x` within the output strings
apply_pattern_fmt_x(pattern = "{x}, ({x})", values = x) %>%
expect_equal(paste0(x, ", (", x, ")"))
})
test_that("the `remove_html()` function works correctly", {
# Create the `html_text_1` variable, which is HTML text
# with the `character` class
html_text_1 <- "<p>this is <em>text</em> that's <strong>HTML</strong></p>"
# Create the `html_text_2` variable, which is HTML text
# with the `html` and `character` classes (via `html()`)
html_text_2 <- html("this is <em>text</em> that's <strong>HTML</strong>")
# Expect that the `character` text object has had the
# HTML tags removed
remove_html(html_text_1) %>%
expect_equal("this is text that's HTML")
# Expect that the `character` text object retains the
# `character` class after transformation
remove_html(html_text_1) %>% expect_is("character")
# Call the `remove_html()` function on HTML text that's
# classed as `html` and `character`
html_text_2_removed <- remove_html(html_text_2)
# Expect that the new object retains the html` and
# `character` classes
html_text_2_removed %>% expect_is(c("html", "character"))
# Expect that the HTML tags have been removed from the
# `html_text_2` string
html_text_2_removed %>% as.character() %>%
expect_equal(remove_html(html_text_1))
})
test_that("the `get_css_tbl()` function works correctly", {
# Get a CSS table from a gt table based on the
# `mtcars` dataset
css_tbl <-
gt(mtcars, rownames_to_stub = TRUE) %>%
get_css_tbl()
css_tbl %>% expect_is(c("tbl_df", "tbl", "data.frame"))
css_tbl %>% dim() %>% expect_equal(c(131, 4))
css_tbl %>%
colnames() %>%
expect_equal(c("selector", "type", "property", "value"))
})
test_that("the `inline_html_styles()` function works correctly", {
# Create a simple gt table from `mtcars`
data <- gt(mtcars)
# Get the CSS tibble and the raw HTML
css_tbl <- data %>% get_css_tbl()
html <- data %>% as_raw_html(inline_css = FALSE)
# Get the inlined HTML using `inline_html_styles()`
inlined_html <- inline_html_styles(html = html, css_tbl = css_tbl)
# Expect that certain portions of `inlined_html` have
# inlined CSS rules
expect_true(
grepl(
paste0(
"style=\"font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', ",
"Roboto, Oxygen, Ubuntu, Cantarell, 'Helvetica Neue', 'Fira Sans', ",
"'Droid Sans', Arial, sans-serif; display: table; border-collapse: ",
"collapse; margin-left: auto; margin-right: auto; color: #333333; ",
"font-size: 16px; background-color: #FFFFFF; width: auto; ",
"border-top-style: solid; border-top-width: 2px; border-top-color: ",
"#A8A8A8; border-bottom-style: solid; border-bottom-width: 2px; ",
"border-bottom-color: #A8A8A8;\""
),
inlined_html
)
)
expect_true(
grepl(
paste0(
"style=\"color: #333333; background-color: #FFFFFF; font-size: ",
"16px; font-weight: initial; vertical-align: middle; padding: ",
"5px; margin: 10px; overflow-x: hidden; border-top-style: solid; ",
"border-top-width: 2px; border-top-color: #D3D3D3; ",
"border-bottom-style: solid; border-bottom-width: 2px; ",
"border-bottom-color: #D3D3D3; text-align: center;\""
),
inlined_html
)
)
# Augment the gt table with custom styles
data <-
data %>%
tab_style(
style = cell_text(size = px(10)),
locations = cells_data(columns = TRUE)
)
# Get the CSS tibble and the raw HTML
css_tbl <- data %>% get_css_tbl()
html <- data %>% as_raw_html(inline_css = FALSE)
# Get the inlined HTML using `inline_html_styles()`
inlined_html <- inline_html_styles(html = html, css_tbl = css_tbl)
# Expect that the style rule from `tab_style` is a listed value along with
# the inlined rules derived from the CSS classes
expect_true(
grepl(
paste0(
"style=\"padding: 8px; margin: 10px; border-bottom-style: solid; ",
"border-bottom-width: thin; border-bottom-color: #D3D3D3; ",
"vertical-align: middle; overflow-x: hidden; text-align: right; ",
"font-variant-numeric: tabular-nums; font-size: 10px;\""
),
inlined_html
)
)
# Create a gt table with a custom style in the title and subtitle
# (left alignment of text)
data <-
gt(mtcars) %>%
tab_header(
title = "The title",
subtitle = "The subtitle"
) %>%
tab_style(
style = cell_text(align = "left"),
locations = list(
cells_title(groups = "title"),
cells_title(groups = "subtitle")
)
)
# Get the CSS tibble and the raw HTML
css_tbl <- data %>% get_css_tbl()
html <- data %>% as_raw_html(inline_css = FALSE)
# Get the inlined HTML using `inline_html_styles()`
inlined_html <- inline_html_styles(html = html, css_tbl = css_tbl)
# Expect that the `colspan` attr is preserved in both <th> elements
# and that the `text-align:left` rule is present
expect_true(
grepl("th colspan=\"11\" style=.*?text-align: left;", inlined_html)
)
})
test_that("the `as_locations()` function works correctly", {
# Define `locations` as a `cells_data` object
locations <-
cells_data(
columns = vars(hp),
rows = c("Datsun 710", "Valiant"))
# Expect certain structural features for a `locations` object
locations %>% length() %>% expect_equal(2)
locations[[1]] %>% length() %>% expect_equal(2)
locations[[1]] %>% expect_is(c("quosure", "formula"))
locations[[2]] %>% expect_is(c("quosure", "formula"))
# Upgrade `locations` to a list of locations
locations_list <- as_locations(locations)
# Expect certain structural features for this `locations_list` object
locations_list %>% length() %>% expect_equal(1)
locations_list[[1]] %>% length() %>% expect_equal(2)
locations_list[[1]] %>% expect_is(c("cells_data", "location_cells"))
# Define locations as a named vector
locations <-
c(
columns = "hp",
rows = c("Datsun 710", "Valiant"))
# Expect an error with `locations` object structured in this way
expect_error(
as_locations(locations))
})
test_that("the `process_footnote_marks()` function works correctly", {
process_footnote_marks(
x = 1:10,
marks = "numbers") %>%
expect_equal(as.character(1:10))
process_footnote_marks(
x = as.character(1:10),
marks = "numbers") %>%
expect_equal(as.character(1:10))
process_footnote_marks(
x = 1:10,
marks = "numbers") %>%
expect_equal(as.character(1:10))
process_footnote_marks(
x = 1:10,
marks = as.character(1:5)) %>%
expect_equal(c("1", "2", "3", "4", "5", "11", "22", "33", "44", "55"))
process_footnote_marks(
x = 1:10,
marks = "letters") %>%
expect_equal(letters[1:10])
process_footnote_marks(
x = 1:10,
marks = letters) %>%
expect_equal(letters[1:10])
process_footnote_marks(
x = 1:10,
marks = "LETTERS") %>%
expect_equal(LETTERS[1:10])
process_footnote_marks(
x = 1:10,
marks = LETTERS) %>%
expect_equal(LETTERS[1:10])
process_footnote_marks(
x = 1:10,
marks = c("⁕", "‖", "†", "§", "¶")) %>%
expect_equal(
c("\u2055", "‖", "†", "§", "¶",
"\u2055\u2055", "‖‖", "††", "§§", "¶¶"))
})
| /tests/testthat/test-util_functions.R | permissive | Glewando/gt | R | false | false | 16,855 | r | context("Ensuring that the common utility functions work as expected")
test_that("the `date_formats()` function works correctly", {
# Expect that the `info_date_style()` function produces an
# information table with certain classes
expect_is(
date_formats(),
c("tbl_df", "tbl", "data.frame"))
# Expect the tibble to be of specific dimensions
expect_equal(
date_formats() %>%
dim(),
c(14, 3))
# Expect the tibble to have specific column names
expect_equal(
date_formats() %>%
colnames(),
c("format_number", "format_name", "format_code"))
})
test_that("the `time_formats()` util fcn works as expected", {
# Expect that the `info_date_style()` function produces an
# information table with certain classes
expect_is(
time_formats(),
c("tbl_df", "tbl", "data.frame"))
# Expect the tibble to be of specific dimensions
expect_equal(
time_formats() %>%
dim(),
c(5, 3))
# Expect the tibble to have specific column names
expect_equal(
time_formats() %>%
colnames(),
c("format_number", "format_name", "format_code"))
})
test_that("the `get_date_format()` function works correctly", {
# Expect specific `format_code` values for each
# numeric `date_style` value passed in
lapply(1:14, get_date_format) %>%
unlist() %>%
expect_equal(
c("%F", "%A, %B %d, %Y", "%a, %b %d, %Y", "%A %d %B %Y",
"%B %d, %Y", "%b %d, %Y", "%d %b %Y", "%d %B %Y", "%d %B",
"%Y", "%B", "%d", "%Y/%m/%d", "%y/%m/%d"))
# Expect specific `format_code` values for each
# text-based `date_style` value passed in
lapply(date_formats()$format_name, get_date_format) %>%
unlist() %>%
expect_equal(
c("%F", "%A, %B %d, %Y", "%a, %b %d, %Y", "%A %d %B %Y",
"%B %d, %Y", "%b %d, %Y", "%d %b %Y", "%d %B %Y", "%d %B",
"%Y", "%B", "%d", "%Y/%m/%d", "%y/%m/%d"))
})
test_that("the `get_time_format()` function works correctly", {
# Expect specific `format_code` values for each
# numeric `date_style` value passed in
lapply(1:5, get_time_format) %>%
unlist() %>%
expect_equal(
c("%H:%M:%S", "%H:%M", "%I:%M:%S %P", "%I:%M %P", "%I %P"))
# Expect specific `format_code` values for each
# text-based `date_style` value passed in
lapply(time_formats()$format_name, get_time_format) %>%
unlist() %>%
expect_equal(
c("%H:%M:%S", "%H:%M", "%I:%M:%S %P", "%I:%M %P", "%I %P"))
})
test_that("the `validate_currency()` function works correctly", {
# Expect that specific currency names supplied to
# `validate_currency()` will all return NULL
expect_null(
lapply(currency_symbols$curr_symbol, validate_currency) %>%
unlist()
)
# Expect that invalid currency names supplied to
# `validate_currency()` will result in an error
expect_error(lapply(c("thaler", "tetarteron"), validate_currency))
# Expect that specific currency codes supplied to
# `validate_currency()` will all return NULL
expect_null(
lapply(currencies$curr_code, validate_currency) %>%
unlist()
)
# Expect that invalid currency codes supplied to
# `validate_currency()` will result in an error
expect_error(lapply(c("AAA", "ZZZ"), validate_currency))
# Expect that specific currency codes (3-number)
# supplied to `validate_currency()` will return NULL
expect_null(
lapply(currencies$curr_number, validate_currency) %>%
unlist()
)
expect_null(
lapply(as.numeric(currencies$curr_number), validate_currency) %>%
unlist()
)
# Expect that invalid currency codes supplied to
# `validate_currency()` will return an error
expect_error(lapply(c(999, 998), validate_currency))
})
test_that("the `get_currency_str()` function works correctly", {
# Expect that various currency codes (3-letter)
# return a currency symbol
get_currency_str(currency = "CAD") %>%
expect_equal("$")
get_currency_str(currency = "DKK") %>%
expect_equal("kr.")
get_currency_str(currency = "JPY") %>%
expect_equal("¥")
get_currency_str(currency = "RUB") %>%
expect_equal("₽")
# Expect that various currency codes (3-number)
# return a currency symbol
get_currency_str(currency = "230") %>%
expect_equal("Br")
get_currency_str(currency = "946") %>%
expect_equal("RON")
get_currency_str(currency = "682") %>%
expect_equal("SR")
get_currency_str(currency = "90") %>%
expect_equal("SI$")
# Expect that various common currency names
# return a currency symbol
get_currency_str(currency = "pound") %>%
expect_equal("£")
get_currency_str(currency = "franc") %>%
expect_equal("₣")
get_currency_str(currency = "guarani") %>%
expect_equal("₲")
get_currency_str(currency = "hryvnia") %>%
expect_equal("₴")
# Expect that various currency codes (3-letter) can
# return a currency code when an HTML entity would
# otherwise be provided
get_currency_str(currency = "CAD", fallback_to_code = TRUE) %>%
expect_equal("$")
get_currency_str(currency = "DKK", fallback_to_code = TRUE) %>%
expect_equal("kr.")
get_currency_str(currency = "JPY", fallback_to_code = TRUE) %>%
expect_equal("JPY")
get_currency_str(currency = "RUB", fallback_to_code = TRUE) %>%
expect_equal("RUB")
# Expect that various currency codes (3-number) can
# return a currency code when an HTML entity would
# otherwise be provided
get_currency_str(currency = "532", fallback_to_code = TRUE) %>%
expect_equal("ANG")
get_currency_str(currency = 533, fallback_to_code = TRUE) %>%
expect_equal("AWG")
# Expect that when using common currency names we won't
# get a currency code when an HTML entity would
# otherwise be provided
get_currency_str(currency = "pound", fallback_to_code = TRUE) %>%
expect_equal("£")
get_currency_str(currency = "franc", fallback_to_code = TRUE) %>%
expect_equal("₣")
get_currency_str(currency = "guarani", fallback_to_code = TRUE) %>%
expect_equal("₲")
get_currency_str(currency = "hryvnia", fallback_to_code = TRUE) %>%
expect_equal("₴")
# Expect the input value when the currency can't be
# interpreted as a valid currency
get_currency_str(currency = "thaler") %>%
expect_equal("thaler")
})
test_that("the `get_currency_exponent()` function works correctly", {
# Expect that various currency codes (3-letter)
# return a currency exponent
get_currency_exponent(currency = "BIF") %>%
expect_equal(0)
get_currency_exponent(currency = "AED") %>%
expect_equal(2)
get_currency_exponent(currency = "TND") %>%
expect_equal(3)
get_currency_exponent(currency = "CLF") %>%
expect_equal(4)
# Expect that various currency codes (3-number)
# return a currency exponent
get_currency_exponent(currency = "533") %>%
expect_equal(2)
get_currency_exponent(currency = "152") %>%
expect_equal(0)
get_currency_exponent(currency = 990) %>%
expect_equal(4)
get_currency_exponent(currency = 886) %>%
expect_equal(2)
# Expect an exponent of 0 if the currency
# exponent field is NA
lapply(
currencies$curr_code[is.na(currencies$exponent)],
get_currency_exponent) %>%
unlist() %>%
expect_equal(rep(0, 7))
})
test_that("the `process_text()` function works correctly", {
# Create the `simple_text` variable, which is text
# with the class `character`
simple_text <- "this is simple text"
# Create the `md_text` variable, which is markdown text
# with the class `from_markdown` (via the `md()` helper)
md_text <- md("this is *text* interpreted as **markdown**")
# Create the `html_text` variable, which is HTML text with
# the classes `html`/`character` (via the `html()` helper)
html_text <- html("this is <em>text</em> that's <strong>HTML</strong>")
# Expect that text with the class `character` will
# be returned from `process_text` as is
process_text(text = simple_text) %>%
expect_equal(simple_text)
simple_text %>% expect_is("character")
# Expect that text with the class `from_markdown` will
# be returned from `process_text` as character-based
# text that's been transformed to HTML
process_text(text = md_text) %>%
expect_equal("this is <em>text</em> interpreted as <strong>markdown</strong>")
md_text %>% expect_is("from_markdown")
process_text(text = md_text) %>% expect_is("character")
# Expect that text with the class `html` will
# be returned from `process_text` as character-based
# text that's been transformed to HTML
process_text(text = html_text) %>%
expect_equal(as.character(html_text))
html_text %>% expect_is(c("html", "character"))
process_text(text = html_text) %>% expect_is(c("html", "character"))
})
test_that("the `apply_pattern_fmt_x()` function works correctly", {
# Set formatted values in a character vector
x <- c("23.4%", "32.6%", "9.15%")
# Expect that the default pattern `{x}` does not
# modify the values in `x`
apply_pattern_fmt_x(pattern = "{x}", values = x) %>%
expect_equal(x)
# Expect that a pattern that appends literal text
# will work
apply_pattern_fmt_x(pattern = "{x}:n", values = x) %>%
expect_equal(paste0(x, ":n"))
# Expect that a pattern that appends and prepends
# literal text will work
apply_pattern_fmt_x(pattern = "+{x}:n", values = x) %>%
expect_equal(paste0("+", x, ":n"))
# Expect that multiple instances of `{x}` will
# create copies of `x` within the output strings
apply_pattern_fmt_x(pattern = "{x}, ({x})", values = x) %>%
expect_equal(paste0(x, ", (", x, ")"))
})
test_that("the `remove_html()` function works correctly", {
# Create the `html_text_1` variable, which is HTML text
# with the `character` class
html_text_1 <- "<p>this is <em>text</em> that's <strong>HTML</strong></p>"
# Create the `html_text_2` variable, which is HTML text
# with the `html` and `character` classes (via `html()`)
html_text_2 <- html("this is <em>text</em> that's <strong>HTML</strong>")
# Expect that the `character` text object has had the
# HTML tags removed
remove_html(html_text_1) %>%
expect_equal("this is text that's HTML")
# Expect that the `character` text object retains the
# `character` class after transformation
remove_html(html_text_1) %>% expect_is("character")
# Call the `remove_html()` function on HTML text that's
# classed as `html` and `character`
html_text_2_removed <- remove_html(html_text_2)
# Expect that the new object retains the html` and
# `character` classes
html_text_2_removed %>% expect_is(c("html", "character"))
# Expect that the HTML tags have been removed from the
# `html_text_2` string
html_text_2_removed %>% as.character() %>%
expect_equal(remove_html(html_text_1))
})
test_that("the `get_css_tbl()` function works correctly", {
# Get a CSS table from a gt table based on the
# `mtcars` dataset
css_tbl <-
gt(mtcars, rownames_to_stub = TRUE) %>%
get_css_tbl()
css_tbl %>% expect_is(c("tbl_df", "tbl", "data.frame"))
css_tbl %>% dim() %>% expect_equal(c(131, 4))
css_tbl %>%
colnames() %>%
expect_equal(c("selector", "type", "property", "value"))
})
test_that("the `inline_html_styles()` function works correctly", {
# Create a simple gt table from `mtcars`
data <- gt(mtcars)
# Get the CSS tibble and the raw HTML
css_tbl <- data %>% get_css_tbl()
html <- data %>% as_raw_html(inline_css = FALSE)
# Get the inlined HTML using `inline_html_styles()`
inlined_html <- inline_html_styles(html = html, css_tbl = css_tbl)
# Expect that certain portions of `inlined_html` have
# inlined CSS rules
expect_true(
grepl(
paste0(
"style=\"font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', ",
"Roboto, Oxygen, Ubuntu, Cantarell, 'Helvetica Neue', 'Fira Sans', ",
"'Droid Sans', Arial, sans-serif; display: table; border-collapse: ",
"collapse; margin-left: auto; margin-right: auto; color: #333333; ",
"font-size: 16px; background-color: #FFFFFF; width: auto; ",
"border-top-style: solid; border-top-width: 2px; border-top-color: ",
"#A8A8A8; border-bottom-style: solid; border-bottom-width: 2px; ",
"border-bottom-color: #A8A8A8;\""
),
inlined_html
)
)
expect_true(
grepl(
paste0(
"style=\"color: #333333; background-color: #FFFFFF; font-size: ",
"16px; font-weight: initial; vertical-align: middle; padding: ",
"5px; margin: 10px; overflow-x: hidden; border-top-style: solid; ",
"border-top-width: 2px; border-top-color: #D3D3D3; ",
"border-bottom-style: solid; border-bottom-width: 2px; ",
"border-bottom-color: #D3D3D3; text-align: center;\""
),
inlined_html
)
)
# Augment the gt table with custom styles
data <-
data %>%
tab_style(
style = cell_text(size = px(10)),
locations = cells_data(columns = TRUE)
)
# Get the CSS tibble and the raw HTML
css_tbl <- data %>% get_css_tbl()
html <- data %>% as_raw_html(inline_css = FALSE)
# Get the inlined HTML using `inline_html_styles()`
inlined_html <- inline_html_styles(html = html, css_tbl = css_tbl)
# Expect that the style rule from `tab_style` is a listed value along with
# the inlined rules derived from the CSS classes
expect_true(
grepl(
paste0(
"style=\"padding: 8px; margin: 10px; border-bottom-style: solid; ",
"border-bottom-width: thin; border-bottom-color: #D3D3D3; ",
"vertical-align: middle; overflow-x: hidden; text-align: right; ",
"font-variant-numeric: tabular-nums; font-size: 10px;\""
),
inlined_html
)
)
# Create a gt table with a custom style in the title and subtitle
# (left alignment of text)
data <-
gt(mtcars) %>%
tab_header(
title = "The title",
subtitle = "The subtitle"
) %>%
tab_style(
style = cell_text(align = "left"),
locations = list(
cells_title(groups = "title"),
cells_title(groups = "subtitle")
)
)
# Get the CSS tibble and the raw HTML
css_tbl <- data %>% get_css_tbl()
html <- data %>% as_raw_html(inline_css = FALSE)
# Get the inlined HTML using `inline_html_styles()`
inlined_html <- inline_html_styles(html = html, css_tbl = css_tbl)
# Expect that the `colspan` attr is preserved in both <th> elements
# and that the `text-align:left` rule is present
expect_true(
grepl("th colspan=\"11\" style=.*?text-align: left;", inlined_html)
)
})
test_that("the `as_locations()` function works correctly", {
# Define `locations` as a `cells_data` object
locations <-
cells_data(
columns = vars(hp),
rows = c("Datsun 710", "Valiant"))
# Expect certain structural features for a `locations` object
locations %>% length() %>% expect_equal(2)
locations[[1]] %>% length() %>% expect_equal(2)
locations[[1]] %>% expect_is(c("quosure", "formula"))
locations[[2]] %>% expect_is(c("quosure", "formula"))
# Upgrade `locations` to a list of locations
locations_list <- as_locations(locations)
# Expect certain structural features for this `locations_list` object
locations_list %>% length() %>% expect_equal(1)
locations_list[[1]] %>% length() %>% expect_equal(2)
locations_list[[1]] %>% expect_is(c("cells_data", "location_cells"))
# Define locations as a named vector
locations <-
c(
columns = "hp",
rows = c("Datsun 710", "Valiant"))
# Expect an error with `locations` object structured in this way
expect_error(
as_locations(locations))
})
test_that("the `process_footnote_marks()` function works correctly", {
process_footnote_marks(
x = 1:10,
marks = "numbers") %>%
expect_equal(as.character(1:10))
process_footnote_marks(
x = as.character(1:10),
marks = "numbers") %>%
expect_equal(as.character(1:10))
process_footnote_marks(
x = 1:10,
marks = "numbers") %>%
expect_equal(as.character(1:10))
process_footnote_marks(
x = 1:10,
marks = as.character(1:5)) %>%
expect_equal(c("1", "2", "3", "4", "5", "11", "22", "33", "44", "55"))
process_footnote_marks(
x = 1:10,
marks = "letters") %>%
expect_equal(letters[1:10])
process_footnote_marks(
x = 1:10,
marks = letters) %>%
expect_equal(letters[1:10])
process_footnote_marks(
x = 1:10,
marks = "LETTERS") %>%
expect_equal(LETTERS[1:10])
process_footnote_marks(
x = 1:10,
marks = LETTERS) %>%
expect_equal(LETTERS[1:10])
process_footnote_marks(
x = 1:10,
marks = c("⁕", "‖", "†", "§", "¶")) %>%
expect_equal(
c("\u2055", "‖", "†", "§", "¶",
"\u2055\u2055", "‖‖", "††", "§§", "¶¶"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caretList.R
\name{predict.caretList}
\alias{predict.caretList}
\title{Create a matrix of predictions for each of the models in a caretList}
\usage{
\method{predict}{caretList}(object, newdata = NULL, ..., verbose = FALSE)
}
\arguments{
\item{object}{an object of class caretList}
\item{newdata}{New data for predictions. It can be NULL, but this is ill-advised.}
\item{...}{additional arguments to pass to predict.train. Pass the \code{newdata}
argument here, DO NOT PASS the "type" argument. Classification models will
return probabilities if possible, and regression models will return "raw".}
\item{verbose}{Logical. If FALSE no progress bar is printed if TRUE a progress
bar is shown. Default FALSE.}
}
\description{
Make a matrix of predictions from a list of caret models
}
| /man/predict.caretList.Rd | permissive | Malhadas/caretEnsemble | R | false | true | 864 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caretList.R
\name{predict.caretList}
\alias{predict.caretList}
\title{Create a matrix of predictions for each of the models in a caretList}
\usage{
\method{predict}{caretList}(object, newdata = NULL, ..., verbose = FALSE)
}
\arguments{
\item{object}{an object of class caretList}
\item{newdata}{New data for predictions. It can be NULL, but this is ill-advised.}
\item{...}{additional arguments to pass to predict.train. Pass the \code{newdata}
argument here, DO NOT PASS the "type" argument. Classification models will
return probabilities if possible, and regression models will return "raw".}
\item{verbose}{Logical. If FALSE no progress bar is printed if TRUE a progress
bar is shown. Default FALSE.}
}
\description{
Make a matrix of predictions from a list of caret models
}
|
#--------------------加载包--------------------
rm(list = ls())
library(RCurl)
library(stringr)
library(httr)
library(jsonlite)
library(dplyr)
library(readxl)
library(rvest)
library(downloader)
#--------------------公式区--------------------
json_to_csv<-function(json){
email<-"495165378@qq.com"
url="https://json-csv.com/api/getcsv"
params<-list(
'email'= email,
'json'= json
)
html<-POST(url,body = params, encode = "form")
mycondition<-content(html)
mycondition
}
url<-"http://www.mot.gov.cn/tongjishuju/gonglu/index.html"
html<-getURL(url,encoding = 'utf-8')
web <- read_html(html)
id<-web%>%html_nodes('a.list-group-item')%>%html_attr('title')
url_detail<-web%>%html_nodes('a.list-group-item')%>%html_attr('href')
i=1
for (i in 1:length(ID)) {
if (ifelse(is.na(str_locate(id[i],"公路旅客运输量")[1]),0,str_locate(id[i],"公路旅客运输量")[1])>0) {
url<-url_detail[i]
html<-getURL(url,encoding = 'utf-8')
web <- read_html(html)
url_download<-web%>%html_nodes('div.fl.w100.gksqxz_fj ol li a')%>%html_attr('href')
a1<-str_sub(url_download[1],start = 2)
a2<-str_sub(url,start = 1,end = 46)
url_download<-paste(a2,a1,sep = "")
download(url_download,paste("C:/Users/richard.jin/Desktop/Case/客运/",id[i],".pdf",sep = ""), mode = "wb")
} else if(ifelse(is.na(str_locate(id[i],"公路货物运输量")[1]),0,str_locate(id[i],"公路货物运输量")[1])>0){
url<-url_detail[i]
html<-getURL(url,encoding = 'utf-8')
web <- read_html(html)
url_download<-web%>%html_nodes('div.fl.w100.gksqxz_fj ol li a')%>%html_attr('href')
a1<-str_sub(url_download[1],start = 2)
a2<-str_sub(url,start = 1,end = 46)
url_download<-paste(a2,a1,sep = "")
download(url_download,paste("C:/Users/richard.jin/Desktop/Case/货运/",id[i],".pdf",sep = ""), mode = "wb")
}
}
dir.create("C:/Users/richard.jin/Desktop/Case") #新建文件夹
dir.create("C:/Users/richard.jin/Desktop/Case/客运") #新建文件夹
dir.create("C:/Users/richard.jin/Desktop/Case/货运") #新建文件夹
download("http://xxgk.mot.gov.cn/2020/jigou/zhghs/202102/P020210226552808417837.pdf",paste("C:/Users/richard.jin/Desktop/Case/picture",i,".pdf",sep = ""), mode = "wb")
| /交通指数v2.R | no_license | jfontestad/pachong_R | R | false | false | 2,298 | r | #--------------------加载包--------------------
rm(list = ls())
library(RCurl)
library(stringr)
library(httr)
library(jsonlite)
library(dplyr)
library(readxl)
library(rvest)
library(downloader)
#--------------------公式区--------------------
json_to_csv<-function(json){
email<-"495165378@qq.com"
url="https://json-csv.com/api/getcsv"
params<-list(
'email'= email,
'json'= json
)
html<-POST(url,body = params, encode = "form")
mycondition<-content(html)
mycondition
}
url<-"http://www.mot.gov.cn/tongjishuju/gonglu/index.html"
html<-getURL(url,encoding = 'utf-8')
web <- read_html(html)
id<-web%>%html_nodes('a.list-group-item')%>%html_attr('title')
url_detail<-web%>%html_nodes('a.list-group-item')%>%html_attr('href')
i=1
for (i in 1:length(ID)) {
if (ifelse(is.na(str_locate(id[i],"公路旅客运输量")[1]),0,str_locate(id[i],"公路旅客运输量")[1])>0) {
url<-url_detail[i]
html<-getURL(url,encoding = 'utf-8')
web <- read_html(html)
url_download<-web%>%html_nodes('div.fl.w100.gksqxz_fj ol li a')%>%html_attr('href')
a1<-str_sub(url_download[1],start = 2)
a2<-str_sub(url,start = 1,end = 46)
url_download<-paste(a2,a1,sep = "")
download(url_download,paste("C:/Users/richard.jin/Desktop/Case/客运/",id[i],".pdf",sep = ""), mode = "wb")
} else if(ifelse(is.na(str_locate(id[i],"公路货物运输量")[1]),0,str_locate(id[i],"公路货物运输量")[1])>0){
url<-url_detail[i]
html<-getURL(url,encoding = 'utf-8')
web <- read_html(html)
url_download<-web%>%html_nodes('div.fl.w100.gksqxz_fj ol li a')%>%html_attr('href')
a1<-str_sub(url_download[1],start = 2)
a2<-str_sub(url,start = 1,end = 46)
url_download<-paste(a2,a1,sep = "")
download(url_download,paste("C:/Users/richard.jin/Desktop/Case/货运/",id[i],".pdf",sep = ""), mode = "wb")
}
}
dir.create("C:/Users/richard.jin/Desktop/Case") #新建文件夹
dir.create("C:/Users/richard.jin/Desktop/Case/客运") #新建文件夹
dir.create("C:/Users/richard.jin/Desktop/Case/货运") #新建文件夹
download("http://xxgk.mot.gov.cn/2020/jigou/zhghs/202102/P020210226552808417837.pdf",paste("C:/Users/richard.jin/Desktop/Case/picture",i,".pdf",sep = ""), mode = "wb")
|
install.packages("caret", dependencies = c("Depends", "Suggests"))
summary(CompleteResponses)
#check for missing values, although I know this is the complete dataset
any(is.na(CompleteResponses))
#Check names of attributes
names(CompleteResponses)
#Boxplots for eacht attribute, and see if there are outliers:
boxplot(CompleteResponses$salary)
boxplot(CompleteResponses$age)
hist(CompleteResponses$age)
hist(CompleteResponses$elevel) #error, x must be numeric
hist(CompleteResponses$car) #error, x must be numeric
hist(CompleteResponses$zipcode) #error, x must be numeric
boxplot(CompleteResponses$credit)
hist(CompleteResponses$brand) #error, x must be numeric
#checking the data types
str(CompleteResponses)
#Convert data type elevel: int to ordinal
CompleteResponses$elevel <- as.ordered(CompleteResponses$elevel)
#Convert data type car: int to factor
CompleteResponses$car <- as.factor(CompleteResponses$car)
#Convert data type zipcode: int to factor
CompleteResponses$zipcode <- as.factor(CompleteResponses$zipcode)
#Convert data type brand: int to factor, 0 (acer) = false and 1 (Sony) = true
CompleteResponses$brand <- as.factor(CompleteResponses$brand)
#changing false and true to acer and sony
levels(CompleteResponses$brand) <-c('Acer','Sony')
#checking the data types again after converstion
str(CompleteResponses)
#Making boxplots again for the converted attributes:
plot(CompleteResponses$elevel) #instead of hist which is only for numeric values
plot(CompleteResponses$car) #instead of hist which is only for numeric values
plot(CompleteResponses$zipcode) #instead of hist which is only for numeric values
#how to plot brand
plot(CompleteResponses$brand)
#pie chart
mytable <- table(CompleteResponses$brand)
pie(mytable, main="Pie Chart of Brands")
library(ggplot2)
#changing numeric value to categorical (discretization): salary, age and credit
Catsalary <- cut(CompleteResponses$salary, breaks=c(0,30000,60000,90000,120000,150000), labels = c("Salary 0-30000", "Salary 30000-60000","Salary 60000-90000","Salary 90000-120000","Salary 120000-150000")) #5 bins
Catage <- cut(CompleteResponses$age, breaks=c(20,40,60,81), labels = c("Age 20-40","Age 40-60","Age 60-80"), right=FALSE) #3 bins
#add extra column
CompleteResponses["Catage"] <- Catage
CompleteResponses["Catsalary"] <- Catsalary
plot(Catage)
plot(Catsalary)
Catage[1:10]
#make plot: difference in salary between brand preference acer vs. sony
ggplot(data = CompleteResponses) +
geom_boxplot(aes(x = brand, y = salary)) #people buying sony have higher salary
ggplot(data = CompleteResponses, aes(x = salary)) +
geom_histogram(aes(fill=brand), bins = 6) +
facet_wrap(~zipcode)
#make plot: difference in age between brand preference acer vs. sony
boxplot(CompleteResponses$age ~ CompleteResponses$brand)
ggplot(data = CompleteResponses) +
geom_boxplot(aes(x = brand, y = age)) #no difference in age between acer vs. sony
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = age))
ggplot(data = CompleteResponses, aes(x = age)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~zipcode) #different zipcodes
#relationship between age vs. salary for acer and sony
#geom_point
ggplot(data = CompleteResponses) +
geom_point(aes(x = age,y = salary, col=brand)) +
facet_wrap(~brand)
#geom_jitter
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = age, y = salary, col=brand)) +
facet_wrap(~brand)
#categorized
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = Catage, y = Catsalary, col=brand)) +
facet_wrap(~brand) +
theme_bw()
#relationship between salary (5 categories) vs. brand, for age (3 categories)
#geom_jitter
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = Catsalary, col=Catage))
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = Catsalary, col=brand)) +
facet_wrap(~Catage) +
theme_bw()
#relationship between age (3 categories) vs. brand, for salary (5 categories)
#geom_jitter
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = Catage, col=brand)) +
facet_wrap(~Catsalary) +
theme_bw()
#make plot: difference in elevel between acer vs. sony
ggplot(data = CompleteResponses, aes(x = elevel)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~brand) #no difference in elevel between acer vs. sony
#make plot: difference in primary car between acer vs. sony
ggplot(data = CompleteResponses, aes(x = car)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~brand) #no difference in primary car between acer vs. sony
#make plot: difference in zipcode between acer vs. sony
ggplot(data = CompleteResponses, aes(x = zipcode)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~brand) #no difference in zipcode between acer vs. sony
#make plot: difference in credit between brand preference acer vs. sony
ggplot(data = CompleteResponses) +
geom_boxplot(aes(x = brand, y = credit))
ggplot(data = CompleteResponses, aes(x = credit)) +
geom_histogram(aes(fill=brand), bins = 10) +
facet_wrap(~zipcode)
#delete columns in dataset: Catage and Catsalary
CompleteResponses <- CompleteResponses[,-8]
CompleteResponses <- CompleteResponses[,-8]
library(caret)
library(lattice)
set.seed(688)
inTraining <- createDataPartition(CompleteResponses$brand, p = .75, list = FALSE)
trainSet <- CompleteResponses[inTraining,]
testSet <- CompleteResponses[-inTraining,]
#decision tree C5.0
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#Train model using C5.0, with all independent variables to predict brand, tunelength = 2
dt_model_all <- train(brand~.,
data = trainSet,
method = "C5.0",
tunelength = 2)
#performance of the model
dt_model_all
#Train model using C5.0, with independent variables age and salary to predict brand, tunelength = 2
dt_model_2 <- train(brand~age+salary,
data = trainSet,
method = "C5.0",
tunelength = 2)
#performance of the model
dt_model_2
#how the model prioritized each feature in the training
plot(varImp(dt_model_all))
#random forest
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_2 <- train(brand~age+salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results
rf_model_2
rf_model_2_1 <- rf_model_2
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 2 (trains with 2 mtry value for RandomForest)
rf_model_2_2 <- train(brand~age+salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 2)
#results
rf_model_2_2
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 3 (trains with 2 mtry value for RandomForest)
rf_model_2_3 <- train(brand~age+salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 3)
#results
rf_model_2_3
#train Random Forest Regression model with age, salary and credit as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_3_1 <- train(brand~age+salary+credit,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results
rf_model_3_1
#train Random Forest Regression model with age, salary and credit as predictors for brand
#with a tuneLenght = 2 (trains with 2 mtry value for RandomForest)
rf_model_3_2 <- train(brand~age+salary+credit,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 2)
#results
rf_model_3_2
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_all_1 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results
rf_model_all_1
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 2 (trains with 2 mtry value for RandomForest)
rf_model_all_2 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 2)
#results
rf_model_all_2
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 3 (trains with 3 mtry value for RandomForest)
rf_model_all_3 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 3)
#results
rf_model_all_3
plot(rf_model_all_3)
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 4 (trains with 4 mtry value for RandomForest)
rf_model_all_4 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 4)
#results
rf_model_all_4
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 5 (trains with 5 mtry value for RandomForest)
rf_model_all_5 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 5)
#results
rf_model_all_5
#manual Grid
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#dataframe for manual tuning of mtry
rfGrid <- expand.grid(mtry=c(1,2,3))
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_modelmanual_1 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tuneGrid=rfGrid)
#results
rf_modelmanual_1
#predict on new data, model: dt C5.0, predictors: age and salary, accuracy 0.913, kappa 0.815
pred_brand_dt <- predict(dt_model_2, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_dt,testSet$brand)
# accuracy 0.527, kappa -0.0017, so C5.0 model is overfitting
#predict on new data, model: rf, predictors: age and salary, accuracy 0.913, kappa 0.815
pred_brand_rf <- predict(rf_model_2_1, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_rf,testSet$brand) # accuracy 0.624, kappa 0, so rf model is overfitting
###########################################
#going back to model random forest.
#varImp of rf model:
varImp(rf_model_all_1) # salary 100.00, age 64
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#training new Random forest model using only 1 predictor salary for output variable brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_1_1 <- train(brand~salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results of model
rf_model_1_1 #accuracy 0.644, kappa 0.24
#predict on new data, model: rf, predictors: salary, accuracy 0.644, kappa 0.24
pred_brand_rf1 <- predict(rf_model_1_1, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_rf1,testSet$brand) # accuracy 0.521, kappa -0.01889, low accuracy for both training and testSet.
#######now selecting model with 3 predictors (age, salary, credit) and see what the accuracy is in testSet.
#predict on new data, model: rf, predictors: age, salary, credit
pred_brand_rf3 <- predict(rf_model_3_1, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_rf3,testSet$brand) #accuracy 0.62, kappa 0, indicating overfitting of training dataset.
####### how to handle overfitting, going back to model, try less folds
#5 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 5,
repeats = 1)
#Train model using C5.0, with independent variables age and salary to predict brand, tunelength = 2
dt_model_2 <- train(brand~age+salary,
data = trainSet,
method = "C5.0",
tunelength = 2)
#results
dt_model_2 #accuracy 0.91, kappa 0.81
#predict on new data, model: dt C5.0, predictors: age and salary, accuracy 0.91, kappa 0.81
pred_brand_dt <- predict(dt_model_2, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_dt,testSet$brand)
# accuracy 0.521, kappa -0.00834, so model with less folds is still overfitting | /Classification brand trainingdata.R | no_license | yusokkim/Classification-brand-trainingdata | R | false | false | 13,739 | r | install.packages("caret", dependencies = c("Depends", "Suggests"))
summary(CompleteResponses)
#check for missing values, although I know this is the complete dataset
any(is.na(CompleteResponses))
#Check names of attributes
names(CompleteResponses)
#Boxplots for eacht attribute, and see if there are outliers:
boxplot(CompleteResponses$salary)
boxplot(CompleteResponses$age)
hist(CompleteResponses$age)
hist(CompleteResponses$elevel) #error, x must be numeric
hist(CompleteResponses$car) #error, x must be numeric
hist(CompleteResponses$zipcode) #error, x must be numeric
boxplot(CompleteResponses$credit)
hist(CompleteResponses$brand) #error, x must be numeric
#checking the data types
str(CompleteResponses)
#Convert data type elevel: int to ordinal
CompleteResponses$elevel <- as.ordered(CompleteResponses$elevel)
#Convert data type car: int to factor
CompleteResponses$car <- as.factor(CompleteResponses$car)
#Convert data type zipcode: int to factor
CompleteResponses$zipcode <- as.factor(CompleteResponses$zipcode)
#Convert data type brand: int to factor, 0 (acer) = false and 1 (Sony) = true
CompleteResponses$brand <- as.factor(CompleteResponses$brand)
#changing false and true to acer and sony
levels(CompleteResponses$brand) <-c('Acer','Sony')
#checking the data types again after converstion
str(CompleteResponses)
#Making boxplots again for the converted attributes:
plot(CompleteResponses$elevel) #instead of hist which is only for numeric values
plot(CompleteResponses$car) #instead of hist which is only for numeric values
plot(CompleteResponses$zipcode) #instead of hist which is only for numeric values
#how to plot brand
plot(CompleteResponses$brand)
#pie chart
mytable <- table(CompleteResponses$brand)
pie(mytable, main="Pie Chart of Brands")
library(ggplot2)
#changing numeric value to categorical (discretization): salary, age and credit
Catsalary <- cut(CompleteResponses$salary, breaks=c(0,30000,60000,90000,120000,150000), labels = c("Salary 0-30000", "Salary 30000-60000","Salary 60000-90000","Salary 90000-120000","Salary 120000-150000")) #5 bins
Catage <- cut(CompleteResponses$age, breaks=c(20,40,60,81), labels = c("Age 20-40","Age 40-60","Age 60-80"), right=FALSE) #3 bins
#add extra column
CompleteResponses["Catage"] <- Catage
CompleteResponses["Catsalary"] <- Catsalary
plot(Catage)
plot(Catsalary)
Catage[1:10]
#make plot: difference in salary between brand preference acer vs. sony
ggplot(data = CompleteResponses) +
geom_boxplot(aes(x = brand, y = salary)) #people buying sony have higher salary
ggplot(data = CompleteResponses, aes(x = salary)) +
geom_histogram(aes(fill=brand), bins = 6) +
facet_wrap(~zipcode)
#make plot: difference in age between brand preference acer vs. sony
boxplot(CompleteResponses$age ~ CompleteResponses$brand)
ggplot(data = CompleteResponses) +
geom_boxplot(aes(x = brand, y = age)) #no difference in age between acer vs. sony
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = age))
ggplot(data = CompleteResponses, aes(x = age)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~zipcode) #different zipcodes
#relationship between age vs. salary for acer and sony
#geom_point
ggplot(data = CompleteResponses) +
geom_point(aes(x = age,y = salary, col=brand)) +
facet_wrap(~brand)
#geom_jitter
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = age, y = salary, col=brand)) +
facet_wrap(~brand)
#categorized
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = Catage, y = Catsalary, col=brand)) +
facet_wrap(~brand) +
theme_bw()
#relationship between salary (5 categories) vs. brand, for age (3 categories)
#geom_jitter
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = Catsalary, col=Catage))
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = Catsalary, col=brand)) +
facet_wrap(~Catage) +
theme_bw()
#relationship between age (3 categories) vs. brand, for salary (5 categories)
#geom_jitter
ggplot(data = CompleteResponses) +
geom_jitter(aes(x = brand, y = Catage, col=brand)) +
facet_wrap(~Catsalary) +
theme_bw()
#make plot: difference in elevel between acer vs. sony
ggplot(data = CompleteResponses, aes(x = elevel)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~brand) #no difference in elevel between acer vs. sony
#make plot: difference in primary car between acer vs. sony
ggplot(data = CompleteResponses, aes(x = car)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~brand) #no difference in primary car between acer vs. sony
#make plot: difference in zipcode between acer vs. sony
ggplot(data = CompleteResponses, aes(x = zipcode)) +
geom_bar(stat = "count", aes(fill=brand)) +
facet_wrap(~brand) #no difference in zipcode between acer vs. sony
#make plot: difference in credit between brand preference acer vs. sony
ggplot(data = CompleteResponses) +
geom_boxplot(aes(x = brand, y = credit))
ggplot(data = CompleteResponses, aes(x = credit)) +
geom_histogram(aes(fill=brand), bins = 10) +
facet_wrap(~zipcode)
#delete columns in dataset: Catage and Catsalary
CompleteResponses <- CompleteResponses[,-8]
CompleteResponses <- CompleteResponses[,-8]
library(caret)
library(lattice)
set.seed(688)
inTraining <- createDataPartition(CompleteResponses$brand, p = .75, list = FALSE)
trainSet <- CompleteResponses[inTraining,]
testSet <- CompleteResponses[-inTraining,]
#decision tree C5.0
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#Train model using C5.0, with all independent variables to predict brand, tunelength = 2
dt_model_all <- train(brand~.,
data = trainSet,
method = "C5.0",
tunelength = 2)
#performance of the model
dt_model_all
#Train model using C5.0, with independent variables age and salary to predict brand, tunelength = 2
dt_model_2 <- train(brand~age+salary,
data = trainSet,
method = "C5.0",
tunelength = 2)
#performance of the model
dt_model_2
#how the model prioritized each feature in the training
plot(varImp(dt_model_all))
#random forest
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_2 <- train(brand~age+salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results
rf_model_2
rf_model_2_1 <- rf_model_2
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 2 (trains with 2 mtry value for RandomForest)
rf_model_2_2 <- train(brand~age+salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 2)
#results
rf_model_2_2
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 3 (trains with 2 mtry value for RandomForest)
rf_model_2_3 <- train(brand~age+salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 3)
#results
rf_model_2_3
#train Random Forest Regression model with age, salary and credit as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_3_1 <- train(brand~age+salary+credit,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results
rf_model_3_1
#train Random Forest Regression model with age, salary and credit as predictors for brand
#with a tuneLenght = 2 (trains with 2 mtry value for RandomForest)
rf_model_3_2 <- train(brand~age+salary+credit,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 2)
#results
rf_model_3_2
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_all_1 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results
rf_model_all_1
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 2 (trains with 2 mtry value for RandomForest)
rf_model_all_2 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 2)
#results
rf_model_all_2
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 3 (trains with 3 mtry value for RandomForest)
rf_model_all_3 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 3)
#results
rf_model_all_3
plot(rf_model_all_3)
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 4 (trains with 4 mtry value for RandomForest)
rf_model_all_4 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 4)
#results
rf_model_all_4
#train Random Forest Regression model with all variables as predictors for brand
#with a tuneLenght = 5 (trains with 5 mtry value for RandomForest)
rf_model_all_5 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 5)
#results
rf_model_all_5
#manual Grid
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#dataframe for manual tuning of mtry
rfGrid <- expand.grid(mtry=c(1,2,3))
#train Random Forest Regression model with age and salary as predictors for brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_modelmanual_1 <- train(brand~.,
data = trainSet,
method = "rf",
trControl=fitControl,
tuneGrid=rfGrid)
#results
rf_modelmanual_1
#predict on new data, model: dt C5.0, predictors: age and salary, accuracy 0.913, kappa 0.815
pred_brand_dt <- predict(dt_model_2, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_dt,testSet$brand)
# accuracy 0.527, kappa -0.0017, so C5.0 model is overfitting
#predict on new data, model: rf, predictors: age and salary, accuracy 0.913, kappa 0.815
pred_brand_rf <- predict(rf_model_2_1, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_rf,testSet$brand) # accuracy 0.624, kappa 0, so rf model is overfitting
###########################################
#going back to model random forest.
#varImp of rf model:
varImp(rf_model_all_1) # salary 100.00, age 64
#10 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 1)
#training new Random forest model using only 1 predictor salary for output variable brand
#with a tuneLenght = 1 (trains with 1 mtry value for RandomForest)
rf_model_1_1 <- train(brand~salary,
data = trainSet,
method = "rf",
trControl=fitControl,
tunelength = 1)
#results of model
rf_model_1_1 #accuracy 0.644, kappa 0.24
#predict on new data, model: rf, predictors: salary, accuracy 0.644, kappa 0.24
pred_brand_rf1 <- predict(rf_model_1_1, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_rf1,testSet$brand) # accuracy 0.521, kappa -0.01889, low accuracy for both training and testSet.
#######now selecting model with 3 predictors (age, salary, credit) and see what the accuracy is in testSet.
#predict on new data, model: rf, predictors: age, salary, credit
pred_brand_rf3 <- predict(rf_model_3_1, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_rf3,testSet$brand) #accuracy 0.62, kappa 0, indicating overfitting of training dataset.
####### how to handle overfitting, going back to model, try less folds
#5 fold cross validation, repeat = 1
fitControl <- trainControl(method = "repeatedcv",
number = 5,
repeats = 1)
#Train model using C5.0, with independent variables age and salary to predict brand, tunelength = 2
dt_model_2 <- train(brand~age+salary,
data = trainSet,
method = "C5.0",
tunelength = 2)
#results
dt_model_2 #accuracy 0.91, kappa 0.81
#predict on new data, model: dt C5.0, predictors: age and salary, accuracy 0.91, kappa 0.81
pred_brand_dt <- predict(dt_model_2, newdata = SurveyIncomplete)
#postresample, comparing accuracy testSet
postResample(pred_brand_dt,testSet$brand)
# accuracy 0.521, kappa -0.00834, so model with less folds is still overfitting |
#' 3rd script
#' summary:
#' 01: Download Drug Perturbed Gens Expression Profiles, LINCS L1000 dataset
#' 02: Map from LINCS IDs to Chembl IDs using to PubChem IDs as intermediate
#' unichem RESTful API was last accessed on 11 March, 2019.
suppressWarnings(suppressMessages(library(data.table)))
suppressWarnings(suppressMessages(library(httr)))
suppressWarnings(suppressMessages(library(jsonlite)))
#####################################################################
#TODO: Change to the directory where you cloned this repository
#~~~~~~~Using relative path~~~~~~~#
ensureFolder = function(folder) {
if (! file.exists(folder)) {
dir.create(folder)
}
}
args = commandArgs(trailingOnly = TRUE)
resultsFolder = normalizePath(args[1])
ensureFolder(resultsFolder)
sprintf("Using results folder at %s", resultsFolder)
dataFolder = file.path(resultsFolder)
#####################################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~get LINCS L1000 data from Harmonizome~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if(!file.exists(file.path(dataFolder, "L1000_raw.RData"))){
url ="http://amp.pharm.mssm.edu/static/hdfs/harmonizome/data/lincscmapchemical/gene_attribute_edges.txt.gz"
tryCatch(if(!http_error(url) == TRUE){
tmp = tempfile()
download.file(url,tmp)
L1000_raw = read.csv(gzfile(tmp),header = T, skip = 1, sep = "\t")[,c(1,3,4,7)]
rm(tmp,url)
} else {
print("The url is outdated, please update!")
},
error=function(e) 1)
L1000_raw = data.table(unique(L1000_raw))
L1000_raw[, lincs_id := substr(`Perturbation.ID_Perturbagen_Cell.Line_Time_Time.Unit_Dose_Dose.Unit`, 1, 13)]
L1000_raw$Perturbation.ID_Perturbagen_Cell.Line_Time_Time.Unit_Dose_Dose.Unit = NULL
L1000_raw = data.table(unique(L1000_raw))
save(L1000_raw,file = file.path(dataFolder, "L1000_raw.RData"))
} else {cat(sprintf("~~ L1000_raw file already exists, not downloading again. ~~\n"))
load(file.path(dataFolder, "L1000_raw.RData"))}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~get LINCS to PubChem mappings~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if(!file.exists(file.path(dataFolder, "lincs_pubchem.RData"))){
url ="http://maayanlab.net/SEP-L1000/downloads/meta_SMILES.csv"
tryCatch(if(!http_error(url) == TRUE){
cat("~~Downloading lincs_pubchem mapping file.~~")
lincs_pubchem = read.csv(url)[,c(1,3)]
rm(url)
} else {
cat("The url is outdated, please updatde!")
},
error=function(e) 1)
lincs_pubchem = lincs_pubchem[which(! is.na(lincs_pubchem$pubchem_cid)),]
lincs_pubchem = data.table(unique(lincs_pubchem))
lincs_pubchem$pert_id = as.character(lincs_pubchem$pert_id)
lincs_pubchem$pubchem_cid = as.character(lincs_pubchem$pubchem_cid)
save(lincs_pubchem,file = file.path(dataFolder, "lincs_pubchem.RData"))
} else {cat(sprintf("~~ lincs_pubchem file already exists, not downloading again. ~~\n"))
load(file.path(dataFolder, "lincs_pubchem.RData"))}
lincs_pubchem = merge(L1000_raw, lincs_pubchem, by.x= "lincs_id",by.y = "pert_id")
lincs_pubchem = unique(lincs_pubchem[,.(lincs_id,pubchem_cid)])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~map Entrez IDs to Ensembl~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
load(file.path(dataFolder, "geneID_v97.RData"))
L1000_genes = L1000_raw[, as.character(unique(GeneID))]
L1000 = merge(L1000_raw, gene_id, by.x = "GeneID", by.y = "ENTREZ")
L1000 = L1000[,c(1,5,2,3,4)]
names(L1000) = c("ENTREZID","GENEID","GeneSym","weight","lincs_id")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~map LINCS IDs to ChEMBL IDs~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if(!file.exists(file.path(dataFolder, "L1000.RData"))){
cat(sprintf("~~ Mapping BROAD IDs to ChEMBL via PubChem IDs. ~~\n"))
pb <- txtProgressBar(min = 0, max = length(lincs_pubchem[, pubchem_cid]), style = 3)
unichem_url = "https://www.ebi.ac.uk/unichem/rest/src_compound_id/"
unichem_map = data.table()
tryCatch(for(i in 1:length(lincs_pubchem[, pubchem_cid])){
Sys.sleep(0.1)
lincs_id = lincs_pubchem[i, lincs_id]
pubchem_id = lincs_pubchem[i, pubchem_cid]
chembl_id = as.character(fromJSON(content(GET(paste0(unichem_url, lincs_pubchem[i, pubchem_cid], "/22/1")), as = "text", encoding = "UTF-8")))
if (length(chembl_id > 0) && startsWith(chembl_id, "CHEMBL")) {
tmp = data.table(lincs_id, pubchem_id, chembl_id)
unichem_map = rbind(unichem_map,tmp)
}
setTxtProgressBar(pb, i)
},
error=function(e) 1)
close(pb)
L1000 = merge(L1000, unichem_map, by = "lincs_id")
L1000 = L1000[, .(ensembl.id = GENEID, gene.symbol = GeneSym, lincs.id=lincs_id, pubchem.id=pubchem_id, chembl.id=chembl_id, direction = weight)]
save(L1000, file=file.path(dataFolder, "L1000.RData"))
} else {cat(sprintf("~~ L1000 file already exists, not mapping again. ~~\n"))
load(file.path(dataFolder, "L1000.RData"))}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~L1000 Drugs~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#' write all L1000 Drugs into a CSV file to use it to get their
#' CHEMBL names and clinical tril information from CHEMBL via their API
#' using chemblid2name.ipynb script
L1000Drugs = unique(L1000[, 5])
fwrite(L1000Drugs, file=file.path(dataFolder,"L1000Drugs.csv"), col.names = F)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
| /R/preprocessing/RetrieveDrugResponseData.R | permissive | sailfish009/ps4dr | R | false | false | 5,535 | r | #' 3rd script
#' summary:
#' 01: Download Drug Perturbed Gens Expression Profiles, LINCS L1000 dataset
#' 02: Map from LINCS IDs to Chembl IDs using to PubChem IDs as intermediate
#' unichem RESTful API was last accessed on 11 March, 2019.
suppressWarnings(suppressMessages(library(data.table)))
suppressWarnings(suppressMessages(library(httr)))
suppressWarnings(suppressMessages(library(jsonlite)))
#####################################################################
#TODO: Change to the directory where you cloned this repository
#~~~~~~~Using relative path~~~~~~~#
ensureFolder = function(folder) {
if (! file.exists(folder)) {
dir.create(folder)
}
}
args = commandArgs(trailingOnly = TRUE)
resultsFolder = normalizePath(args[1])
ensureFolder(resultsFolder)
sprintf("Using results folder at %s", resultsFolder)
dataFolder = file.path(resultsFolder)
#####################################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~get LINCS L1000 data from Harmonizome~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if(!file.exists(file.path(dataFolder, "L1000_raw.RData"))){
url ="http://amp.pharm.mssm.edu/static/hdfs/harmonizome/data/lincscmapchemical/gene_attribute_edges.txt.gz"
tryCatch(if(!http_error(url) == TRUE){
tmp = tempfile()
download.file(url,tmp)
L1000_raw = read.csv(gzfile(tmp),header = T, skip = 1, sep = "\t")[,c(1,3,4,7)]
rm(tmp,url)
} else {
print("The url is outdated, please update!")
},
error=function(e) 1)
L1000_raw = data.table(unique(L1000_raw))
L1000_raw[, lincs_id := substr(`Perturbation.ID_Perturbagen_Cell.Line_Time_Time.Unit_Dose_Dose.Unit`, 1, 13)]
L1000_raw$Perturbation.ID_Perturbagen_Cell.Line_Time_Time.Unit_Dose_Dose.Unit = NULL
L1000_raw = data.table(unique(L1000_raw))
save(L1000_raw,file = file.path(dataFolder, "L1000_raw.RData"))
} else {cat(sprintf("~~ L1000_raw file already exists, not downloading again. ~~\n"))
load(file.path(dataFolder, "L1000_raw.RData"))}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~get LINCS to PubChem mappings~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if(!file.exists(file.path(dataFolder, "lincs_pubchem.RData"))){
url ="http://maayanlab.net/SEP-L1000/downloads/meta_SMILES.csv"
tryCatch(if(!http_error(url) == TRUE){
cat("~~Downloading lincs_pubchem mapping file.~~")
lincs_pubchem = read.csv(url)[,c(1,3)]
rm(url)
} else {
cat("The url is outdated, please updatde!")
},
error=function(e) 1)
lincs_pubchem = lincs_pubchem[which(! is.na(lincs_pubchem$pubchem_cid)),]
lincs_pubchem = data.table(unique(lincs_pubchem))
lincs_pubchem$pert_id = as.character(lincs_pubchem$pert_id)
lincs_pubchem$pubchem_cid = as.character(lincs_pubchem$pubchem_cid)
save(lincs_pubchem,file = file.path(dataFolder, "lincs_pubchem.RData"))
} else {cat(sprintf("~~ lincs_pubchem file already exists, not downloading again. ~~\n"))
load(file.path(dataFolder, "lincs_pubchem.RData"))}
lincs_pubchem = merge(L1000_raw, lincs_pubchem, by.x= "lincs_id",by.y = "pert_id")
lincs_pubchem = unique(lincs_pubchem[,.(lincs_id,pubchem_cid)])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~map Entrez IDs to Ensembl~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
load(file.path(dataFolder, "geneID_v97.RData"))
L1000_genes = L1000_raw[, as.character(unique(GeneID))]
L1000 = merge(L1000_raw, gene_id, by.x = "GeneID", by.y = "ENTREZ")
L1000 = L1000[,c(1,5,2,3,4)]
names(L1000) = c("ENTREZID","GENEID","GeneSym","weight","lincs_id")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~map LINCS IDs to ChEMBL IDs~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
if(!file.exists(file.path(dataFolder, "L1000.RData"))){
cat(sprintf("~~ Mapping BROAD IDs to ChEMBL via PubChem IDs. ~~\n"))
pb <- txtProgressBar(min = 0, max = length(lincs_pubchem[, pubchem_cid]), style = 3)
unichem_url = "https://www.ebi.ac.uk/unichem/rest/src_compound_id/"
unichem_map = data.table()
tryCatch(for(i in 1:length(lincs_pubchem[, pubchem_cid])){
Sys.sleep(0.1)
lincs_id = lincs_pubchem[i, lincs_id]
pubchem_id = lincs_pubchem[i, pubchem_cid]
chembl_id = as.character(fromJSON(content(GET(paste0(unichem_url, lincs_pubchem[i, pubchem_cid], "/22/1")), as = "text", encoding = "UTF-8")))
if (length(chembl_id > 0) && startsWith(chembl_id, "CHEMBL")) {
tmp = data.table(lincs_id, pubchem_id, chembl_id)
unichem_map = rbind(unichem_map,tmp)
}
setTxtProgressBar(pb, i)
},
error=function(e) 1)
close(pb)
L1000 = merge(L1000, unichem_map, by = "lincs_id")
L1000 = L1000[, .(ensembl.id = GENEID, gene.symbol = GeneSym, lincs.id=lincs_id, pubchem.id=pubchem_id, chembl.id=chembl_id, direction = weight)]
save(L1000, file=file.path(dataFolder, "L1000.RData"))
} else {cat(sprintf("~~ L1000 file already exists, not mapping again. ~~\n"))
load(file.path(dataFolder, "L1000.RData"))}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~L1000 Drugs~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#' write all L1000 Drugs into a CSV file to use it to get their
#' CHEMBL names and clinical tril information from CHEMBL via their API
#' using chemblid2name.ipynb script
L1000Drugs = unique(L1000[, 5])
fwrite(L1000Drugs, file=file.path(dataFolder,"L1000Drugs.csv"), col.names = F)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
|
## A script that performs PCA on a normalized count matrix.
args = base::commandArgs(trailingOnly = TRUE)
print(args)
path2_json_file = args[1]
# **********************************************************************
## Load in the necessary libraries:
print("*** Loading libraries ***")
options(stringsAsFactors = FALSE)
options(bitmapType='quartz')
library(jsonlite)
library(ggplot2)
library(dplyr)
library(factoextra)
library(readr)
library(stringr)
#### Read in input files ###
# JSON input file with SD and AVG thresholds
print("*** Reading the input files ***")
json = read_json(path2_json_file)
parent_folder = json$folders$output_folder
experiment = json$experiment_name
path2_design = file.path(parent_folder, "results", paste0(experiment, "_design.txt"))
path2_count = file.path(parent_folder, "results", paste0(experiment , "_Z_threshold.txt"))
### Read in the filtered count matrix and the design file ###
filt_count = as.matrix(read.table(path2_count, sep = "\t", header = TRUE, row.names = 1, check.names=FALSE))
design = read.table(path2_design, sep = "\t", header = TRUE, row.names = 1)
# **************** Start of the program **********************
print("*** Start of the program ***")
### Check that the count matrix has been normalized ###
mn = apply(filt_count, 1, mean)
stdev = apply(filt_count, 1, sd)
if (mean(mn) < -(0.0001) | mean(mn) > 0.0001){
print("The count matrix is not normalized. Mean of means != 0")
stop()
}
if (mean(stdev) != 1){
print("Not all standard deviations of the normalized matrix == 1")
}
### Perform PCA on the samples ###
print("***Performing PCA. This can take a while.***")
cols = ncol(filt_count)
pca = prcomp(t(filt_count), scale = TRUE)
# for scree plot generation:
pcavar <- pca$sdev^2
per.pcavar = round(pcavar/sum(pcavar)*100,1)
### Generate a loading scores table ##
loadings = pca$rotation
### Save the loadings for each PC into a file ###
output_loadings = file.path(parent_folder, "results", paste0(experiment, "_pca_loading_scores.txt"))
write.table(loadings, file = output_loadings, sep = '\t',col.names=NA,row.names=TRUE,quote=FALSE)
# Save the eigenvalues
pca_eigenvalue=factoextra::get_eig(pca)
output_eigenvalues = file.path(parent_folder, "results", paste0(experiment, "_pca_eigenvalues.txt"))
write.table(pca_eigenvalue, file = output_eigenvalues, sep = '\t',col.names=NA,row.names=TRUE,quote=FALSE)
# Save the pca object
output_pca = file.path(parent_folder, "results", paste0(experiment, "_pca_object.rds"))
write_rds(pca, output_pca)
# Extract the design equation variables
for (i in 1:(length(json$design_variables))){
if (str_length(json$design_variables[[i]]) > 0){
nam <- paste0("formula", i)
assign(nam, json$design_variables[[i]])
last_number = i
}else if (str_length(json$design_variables[[i]]) <= 0){
print(" ")
}
}
# Figures for the report
figure6 = file.path(parent_folder, "figures", paste0(experiment, "_scree_plot.png"))
png(figure6)
factoextra::fviz_eig(pca) # another way to visualize percentage contribution
dev.off()
# figure of PC1 vs PC2
# Format the data the way ggplot2 likes it:
pca_data <- matrix(ncol= ncol(pca$x)+1, nrow = nrow(pca$x))
pca_data[,1] = rownames(pca$x)
for (columns in 1:ncol(pca$x)){
pca_data[,columns+1] = pca$x[,columns]
}
### Save all PC to a file (not just significant/meaningful)
output_full_pca = file.path(parent_folder, "results", paste0(experiment, "_pca_scores.txt"))
write.table(pca$x, file = output_full_pca, sep = '\t',col.names=NA,row.names=TRUE,quote=FALSE)
pca_data = as.data.frame(pca_data)
names(pca_data)[1] = "Sample"
for (col_names in 2:ncol(pca_data)){
names(pca_data)[col_names] = paste0("PC", col_names-1)
}
design$Sample = row.names(design)
pca_data = dplyr::left_join(pca_data, design, by = "Sample")
# Convert the PC columns to numeric in the data set
for (column in 1:ncol(pca_data)){
colname = colnames(pca_data[column])
if (sum(base::grep("PC", colname))>0){
pca_data[,column] = as.numeric(pca_data[,column])
}
}
# If loop to make a PC plot depending on whether we have 1 or 2 design formulas:
if (exists("formula2")){
figure7 = file.path(parent_folder, "figures", paste0(experiment, "PC1_PC2.png"))
png(figure7)
print(ggplot(data = pca_data, aes_string(x = "PC1", y = "PC2",
label = formula1,
color = formula2)) +
geom_text() +
xlab(paste("PC1: ", per.pcavar[1], "%", sep = ""))+
ylab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC1 vs PC2", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
} else if (!exists("formula2") & exists("formula1")){
figure7 = file.path(parent_folder, "figures", paste0(experiment, "PC1_PC2.png"))
png(figure7)
print(ggplot(data = pca_data, aes_string(x = "PC1", y = "PC2",
label = formula1,
color = formula1)) +
geom_text() +
xlab(paste("PC1: ", per.pcavar[1], "%", sep = ""))+
ylab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC1 vs PC2", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
# Same loop, but for PC2 and PC3
if (exists("formula2")){
figure8 = file.path(parent_folder, "figures", paste0(experiment, "PC2_PC3.png"))
png(figure8)
print(ggplot(data = pca_data, aes_string(x = "PC2", y = "PC3", label = formula1,
color = formula2)) +
geom_text() +
xlab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
ylab(paste("PC3: ", per.pcavar[3], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC2 vs PC3", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
figure8 = file.path(parent_folder, "figures", paste0(experiment, "PC2_PC3.png"))
png(figure8)
print(ggplot(data = pca_data, aes_string(x = "PC2", y = "PC3", label = formula1,
color = formula1)) +
geom_text() +
xlab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
ylab(paste("PC3: ", per.pcavar[3], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC2 vs PC3", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
#Same loop, but for facet plot of PC1 (added 7/18/21)
if (exists("formula2")){
PC1_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC1_facetplot.png"))
png(PC1_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC1", fill=formula2)) +
geom_bar(stat="identity", position=position_dodge())+
facet_grid(. ~ pca_data[,formula1], scales='free')+
labs(y="Sample Loading")+
theme_bw() +
ggtitle(paste("PC1: ", per.pcavar[1], "% | Experiment: ", experiment,sep = ""))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
PC1_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC1_facetplot.png"))
png(PC1_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC1", fill=formula1)) +
geom_bar(stat="identity", position=position_dodge())+
theme_bw() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
ggtitle(paste("PC1: ", per.pcavar[1], "% | Experiment: ", experiment,sep = "")))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
#Same loop, but for facet plot of PC2 (added 7/18/21)
if (exists("formula2")){
PC2_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC2_facetplot.png"))
png(PC2_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC2", fill=formula2)) +
geom_bar(stat="identity", position=position_dodge())+
facet_grid(. ~ pca_data[,formula1], scales='free')+
labs(y="Sample Loading")+
theme_bw() +
ggtitle(paste("PC2: ", per.pcavar[2], "% | Experiment: ", experiment,sep = ""))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
PC2_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC2_facetplot.png"))
png(PC2_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC2", fill=formula1)) +
geom_bar(stat="identity", position=position_dodge())+
theme_bw() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
ggtitle(paste("PC2: ", per.pcavar[2], "% | Experiment: ", experiment,sep = "")))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
#Same loop, but for facet plot of PC3 (added 7/18/21)
if (exists("formula2")){
PC3_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC3_facetplot.png"))
png(PC3_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC3", fill=formula2)) +
geom_bar(stat="identity", position=position_dodge())+
facet_grid(. ~ pca_data[,formula1], scales='free')+
labs(y="Sample Loading")+
theme_bw() +
ggtitle(paste("PC3: ", per.pcavar[3], "% | Experiment: ", experiment,sep = ""))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
PC3_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC3_facetplot.png"))
png(PC3_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC3", fill=formula1)) +
geom_bar(stat="identity", position=position_dodge())+
theme_bw() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
ggtitle(paste("PC3: ", per.pcavar[3], "% | Experiment: ", experiment,sep = "")))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
# Updating the json copy
path_2_json_copy = file.path(parent_folder, "results", paste0(experiment, "_json_copy.json"))
json_copy <- read_json(path_2_json_copy)
json_copy$path_2_results$all_loading_scores = as.character(output_loadings)
json_copy$path_2_results$eigenvalues = as.character(output_eigenvalues)
json_copy$path_2_results$pca_object = as.character(output_pca)
json_copy$figures$scree_plot = as.character(figure6)
json_copy$figures$PC1_PC2 = as.character(figure7)
json_copy$figures$PC2_PC3 = as.character(figure8)
#lines below added 7/18/21
json_copy$figures$PC1_facetplot = as.character(PC1_facetplot)
json_copy$figures$PC2_facetplot = as.character(PC2_facetplot)
json_copy$figures$PC3_facetplot = as.character(PC3_facetplot)
write_json(json_copy, path_2_json_copy, auto_unbox = TRUE)
| /step_05.R | no_license | mdibl/biocore_automated-pca | R | false | false | 13,727 | r | ## A script that performs PCA on a normalized count matrix.
args = base::commandArgs(trailingOnly = TRUE)
print(args)
path2_json_file = args[1]
# **********************************************************************
## Load in the necessary libraries:
print("*** Loading libraries ***")
options(stringsAsFactors = FALSE)
options(bitmapType='quartz')
library(jsonlite)
library(ggplot2)
library(dplyr)
library(factoextra)
library(readr)
library(stringr)
#### Read in input files ###
# JSON input file with SD and AVG thresholds
print("*** Reading the input files ***")
json = read_json(path2_json_file)
parent_folder = json$folders$output_folder
experiment = json$experiment_name
path2_design = file.path(parent_folder, "results", paste0(experiment, "_design.txt"))
path2_count = file.path(parent_folder, "results", paste0(experiment , "_Z_threshold.txt"))
### Read in the filtered count matrix and the design file ###
filt_count = as.matrix(read.table(path2_count, sep = "\t", header = TRUE, row.names = 1, check.names=FALSE))
design = read.table(path2_design, sep = "\t", header = TRUE, row.names = 1)
# **************** Start of the program **********************
print("*** Start of the program ***")
### Check that the count matrix has been normalized ###
mn = apply(filt_count, 1, mean)
stdev = apply(filt_count, 1, sd)
if (mean(mn) < -(0.0001) | mean(mn) > 0.0001){
print("The count matrix is not normalized. Mean of means != 0")
stop()
}
if (mean(stdev) != 1){
print("Not all standard deviations of the normalized matrix == 1")
}
### Perform PCA on the samples ###
print("***Performing PCA. This can take a while.***")
cols = ncol(filt_count)
pca = prcomp(t(filt_count), scale = TRUE)
# for scree plot generation:
pcavar <- pca$sdev^2
per.pcavar = round(pcavar/sum(pcavar)*100,1)
### Generate a loading scores table ##
loadings = pca$rotation
### Save the loadings for each PC into a file ###
output_loadings = file.path(parent_folder, "results", paste0(experiment, "_pca_loading_scores.txt"))
write.table(loadings, file = output_loadings, sep = '\t',col.names=NA,row.names=TRUE,quote=FALSE)
# Save the eigenvalues
pca_eigenvalue=factoextra::get_eig(pca)
output_eigenvalues = file.path(parent_folder, "results", paste0(experiment, "_pca_eigenvalues.txt"))
write.table(pca_eigenvalue, file = output_eigenvalues, sep = '\t',col.names=NA,row.names=TRUE,quote=FALSE)
# Save the pca object
output_pca = file.path(parent_folder, "results", paste0(experiment, "_pca_object.rds"))
write_rds(pca, output_pca)
# Extract the design equation variables
for (i in 1:(length(json$design_variables))){
if (str_length(json$design_variables[[i]]) > 0){
nam <- paste0("formula", i)
assign(nam, json$design_variables[[i]])
last_number = i
}else if (str_length(json$design_variables[[i]]) <= 0){
print(" ")
}
}
# Figures for the report
figure6 = file.path(parent_folder, "figures", paste0(experiment, "_scree_plot.png"))
png(figure6)
factoextra::fviz_eig(pca) # another way to visualize percentage contribution
dev.off()
# figure of PC1 vs PC2
# Format the data the way ggplot2 likes it:
pca_data <- matrix(ncol= ncol(pca$x)+1, nrow = nrow(pca$x))
pca_data[,1] = rownames(pca$x)
for (columns in 1:ncol(pca$x)){
pca_data[,columns+1] = pca$x[,columns]
}
### Save all PC to a file (not just significant/meaningful)
output_full_pca = file.path(parent_folder, "results", paste0(experiment, "_pca_scores.txt"))
write.table(pca$x, file = output_full_pca, sep = '\t',col.names=NA,row.names=TRUE,quote=FALSE)
pca_data = as.data.frame(pca_data)
names(pca_data)[1] = "Sample"
for (col_names in 2:ncol(pca_data)){
names(pca_data)[col_names] = paste0("PC", col_names-1)
}
design$Sample = row.names(design)
pca_data = dplyr::left_join(pca_data, design, by = "Sample")
# Convert the PC columns to numeric in the data set
for (column in 1:ncol(pca_data)){
colname = colnames(pca_data[column])
if (sum(base::grep("PC", colname))>0){
pca_data[,column] = as.numeric(pca_data[,column])
}
}
# If loop to make a PC plot depending on whether we have 1 or 2 design formulas:
if (exists("formula2")){
figure7 = file.path(parent_folder, "figures", paste0(experiment, "PC1_PC2.png"))
png(figure7)
print(ggplot(data = pca_data, aes_string(x = "PC1", y = "PC2",
label = formula1,
color = formula2)) +
geom_text() +
xlab(paste("PC1: ", per.pcavar[1], "%", sep = ""))+
ylab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC1 vs PC2", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
} else if (!exists("formula2") & exists("formula1")){
figure7 = file.path(parent_folder, "figures", paste0(experiment, "PC1_PC2.png"))
png(figure7)
print(ggplot(data = pca_data, aes_string(x = "PC1", y = "PC2",
label = formula1,
color = formula1)) +
geom_text() +
xlab(paste("PC1: ", per.pcavar[1], "%", sep = ""))+
ylab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC1 vs PC2", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
# Same loop, but for PC2 and PC3
if (exists("formula2")){
figure8 = file.path(parent_folder, "figures", paste0(experiment, "PC2_PC3.png"))
png(figure8)
print(ggplot(data = pca_data, aes_string(x = "PC2", y = "PC3", label = formula1,
color = formula2)) +
geom_text() +
xlab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
ylab(paste("PC3: ", per.pcavar[3], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC2 vs PC3", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
figure8 = file.path(parent_folder, "figures", paste0(experiment, "PC2_PC3.png"))
png(figure8)
print(ggplot(data = pca_data, aes_string(x = "PC2", y = "PC3", label = formula1,
color = formula1)) +
geom_text() +
xlab(paste("PC2: ", per.pcavar[2], "%", sep = ""))+
ylab(paste("PC3: ", per.pcavar[3], "%", sep = ""))+
theme_bw() +
ggtitle(paste("PC2 vs PC3", "| Experiment: ", experiment))+
theme(axis.text.x=element_blank(),
axis.text.y=element_blank()))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
#Same loop, but for facet plot of PC1 (added 7/18/21)
if (exists("formula2")){
PC1_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC1_facetplot.png"))
png(PC1_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC1", fill=formula2)) +
geom_bar(stat="identity", position=position_dodge())+
facet_grid(. ~ pca_data[,formula1], scales='free')+
labs(y="Sample Loading")+
theme_bw() +
ggtitle(paste("PC1: ", per.pcavar[1], "% | Experiment: ", experiment,sep = ""))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
PC1_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC1_facetplot.png"))
png(PC1_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC1", fill=formula1)) +
geom_bar(stat="identity", position=position_dodge())+
theme_bw() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
ggtitle(paste("PC1: ", per.pcavar[1], "% | Experiment: ", experiment,sep = "")))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
#Same loop, but for facet plot of PC2 (added 7/18/21)
if (exists("formula2")){
PC2_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC2_facetplot.png"))
png(PC2_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC2", fill=formula2)) +
geom_bar(stat="identity", position=position_dodge())+
facet_grid(. ~ pca_data[,formula1], scales='free')+
labs(y="Sample Loading")+
theme_bw() +
ggtitle(paste("PC2: ", per.pcavar[2], "% | Experiment: ", experiment,sep = ""))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
PC2_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC2_facetplot.png"))
png(PC2_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC2", fill=formula1)) +
geom_bar(stat="identity", position=position_dodge())+
theme_bw() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
ggtitle(paste("PC2: ", per.pcavar[2], "% | Experiment: ", experiment,sep = "")))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
#Same loop, but for facet plot of PC3 (added 7/18/21)
if (exists("formula2")){
PC3_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC3_facetplot.png"))
png(PC3_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC3", fill=formula2)) +
geom_bar(stat="identity", position=position_dodge())+
facet_grid(. ~ pca_data[,formula1], scales='free')+
labs(y="Sample Loading")+
theme_bw() +
ggtitle(paste("PC3: ", per.pcavar[3], "% | Experiment: ", experiment,sep = ""))+
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()))
dev.off()
}else if (!exists("formula2") & exists("formula1")){
PC3_facetplot = file.path(parent_folder, "figures", paste0(experiment, "PC3_facetplot.png"))
png(PC3_facetplot)
print(ggplot(data = pca_data, aes_string(x = "Sample", y = "PC3", fill=formula1)) +
geom_bar(stat="identity", position=position_dodge())+
theme_bw() +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(),
panel.grid.minor = element_blank())+
ggtitle(paste("PC3: ", per.pcavar[3], "% | Experiment: ", experiment,sep = "")))
dev.off()
} else if (!exists("formula2") & !exists("formula1")){
print("--- Error: Missing design variable. Please check the JSON input file ***")
} else if (exists("formula2") & !exists("formula1")){
print("--- Error: please change the JSON file. If there is only one design variable, save it under design1 ***")
} else {
print("--- Error: there is a problem with the design formula. Please check the JSON input file ***")
}
# Updating the json copy
path_2_json_copy = file.path(parent_folder, "results", paste0(experiment, "_json_copy.json"))
json_copy <- read_json(path_2_json_copy)
json_copy$path_2_results$all_loading_scores = as.character(output_loadings)
json_copy$path_2_results$eigenvalues = as.character(output_eigenvalues)
json_copy$path_2_results$pca_object = as.character(output_pca)
json_copy$figures$scree_plot = as.character(figure6)
json_copy$figures$PC1_PC2 = as.character(figure7)
json_copy$figures$PC2_PC3 = as.character(figure8)
#lines below added 7/18/21
json_copy$figures$PC1_facetplot = as.character(PC1_facetplot)
json_copy$figures$PC2_facetplot = as.character(PC2_facetplot)
json_copy$figures$PC3_facetplot = as.character(PC3_facetplot)
write_json(json_copy, path_2_json_copy, auto_unbox = TRUE)
|
\name{CDF.Pval.HA}
\alias{CDF.Pval.HA}
\title{
CDF of p-values for test statistics distribted under HA.
}
\description{
Computes the CDF of p-values for test statistics distribted under HA.
}
\usage{
CDF.Pval.HA(u, effect.size, n.sample, r.1, groups = 2, type="balanced",
grpj.per.grp1=1, control)
}
\arguments{
\item{u}{
Argument of the CDF. Result will be Pr( P_i <= u )
}
\item{effect.size}{
The effect size (mean over standard deviation) for test statistics
having non-zero means. Assumed to be a constant (in magnitude) over
non-zero mean test statistics.
}
\item{n.sample}{
The number of experimental replicates.
}
\item{r.1}{
The proportion of all test statistics that are distributed under HA.
}
\item{groups}{
The number of experimental groups to compare. Default value is 2.
}
\item{type}{
A character string specifying, in the groups=2 case, whether the
test is 'paired', 'balanced', or 'unbalanced' and in the case when
groups >=3, whether the test is 'balanced' or 'unbalanced'. The
default in all cases is 'balanced'. Left unspecified in the one
sample (groups=1) case.
}
\item{grpj.per.grp1}{
Required when \code{type}="unbalanced", specifies the group 0 to
group 1 ratio in the two group case, and in the case of 3 or more
groups, the group j to group 1 ratio, where group 1 is the group
with the largest effect under the alternative hypothesis.
}
\item{control}{
Optionally, a list with components with the following components:
'groups', used when distop=3 (F-dist), specifying number of groups.
'tol' is a convergence criterion used in iterative methods
which is set to 1e-8 by default
'max.iter' is an iteration limit, set to 20 for function iteration
and 1000 for all others by default
'distop', specifying the distribution family of the central and
non-centrally located sub-populations. =1 gives normal (2 groups)
=2 gives t- (2 groups) and =3 gives F- (2+ groups)
}
}
\details{
Computes the CDF of p-values for test statistics distribted under HA.
If Fc_0 is the cCDF of a test statistic under H0 and Fc_A is the cCDF
of a test statistic under HA then the CDF of a P-value for a test
statistic distributed under HA is
G_A(u) = Fc_A(Fc_0^{-1}(u))
The limiting true positive fraction is the infinite simultaneous tests
average power,
lim_m T_m/M_m = average.power (a.s.),
which is used to approximate the average power for finite 'm', is
G_1 at gamma alpha:
G_1( gamma alpha) = average.pwer
where alpha is the nominal FDR and gamma = lim_m R_m/m (a.s.) is the limiting
positive call fraction.
}
\value{
A list with components
\item{call}{The call which produced the result}
\item{u}{The argument that was passed to the function}
\item{CDF.Pval.HA}{The value of the CDF}
}
\references{
Izmirlian G. (2020) Strong consistency and asymptotic normality for
quantities related to the Benjamini-Hochberg false discovery rate
procedure. Statistics and Probability Letters; 108713,
<doi:10.1016/j.spl.2020.108713>.
Izmirlian G. (2017) Average Power and \eqn{\lambda}-power in
Multiple Testing Scenarios when the Benjamini-Hochberg False
Discovery Rate Procedure is Used. <arXiv:1801.03989>
Genovese, C. and L. Wasserman. (2004) A stochastic process approach to
false discovery control. Annals of Statistics. 32 (3), 1035-1061.
}
\author{
Grant Izmirlian <izmirlian at nih dot gov>
}
\seealso{
\code{\link{CDF.Pval}}
}
\examples{
## First calculate an average power for a given set of parameters
rslt.avgp <- pwrFDR(effect.size=0.79, n.sample=42, r.1=0.05, alpha=0.15)
## Now verify that G_A( gamma f ) = average.power
gma <- rslt.avgp$gamma
alpha <- rslt.avgp$call$alpha
GA.gma.alpha <- CDF.Pval.HA(u=gma*alpha, r.1=0.05, effect.size=0.79, n.sample=42)
c(G.gm.alpha=GA.gma.alpha$CDF.Pval.HA$CDF.Pval.HA, average.power=rslt.avgp$average.power)
}
\keyword{FDR}
\keyword{Benjamini}
\keyword{Hochberg}
\keyword{microarrays}
\keyword{Multiple.Testing}
\keyword{average.power}
\keyword{k.power}
\keyword{lambda.power}
| /man/Ch10-CDF-Pval-HA.Rd | no_license | cran/pwrFDR | R | false | false | 4,190 | rd | \name{CDF.Pval.HA}
\alias{CDF.Pval.HA}
\title{
CDF of p-values for test statistics distribted under HA.
}
\description{
Computes the CDF of p-values for test statistics distribted under HA.
}
\usage{
CDF.Pval.HA(u, effect.size, n.sample, r.1, groups = 2, type="balanced",
grpj.per.grp1=1, control)
}
\arguments{
\item{u}{
Argument of the CDF. Result will be Pr( P_i <= u )
}
\item{effect.size}{
The effect size (mean over standard deviation) for test statistics
having non-zero means. Assumed to be a constant (in magnitude) over
non-zero mean test statistics.
}
\item{n.sample}{
The number of experimental replicates.
}
\item{r.1}{
The proportion of all test statistics that are distributed under HA.
}
\item{groups}{
The number of experimental groups to compare. Default value is 2.
}
\item{type}{
A character string specifying, in the groups=2 case, whether the
test is 'paired', 'balanced', or 'unbalanced' and in the case when
groups >=3, whether the test is 'balanced' or 'unbalanced'. The
default in all cases is 'balanced'. Left unspecified in the one
sample (groups=1) case.
}
\item{grpj.per.grp1}{
Required when \code{type}="unbalanced", specifies the group 0 to
group 1 ratio in the two group case, and in the case of 3 or more
groups, the group j to group 1 ratio, where group 1 is the group
with the largest effect under the alternative hypothesis.
}
\item{control}{
Optionally, a list with components with the following components:
'groups', used when distop=3 (F-dist), specifying number of groups.
'tol' is a convergence criterion used in iterative methods
which is set to 1e-8 by default
'max.iter' is an iteration limit, set to 20 for function iteration
and 1000 for all others by default
'distop', specifying the distribution family of the central and
non-centrally located sub-populations. =1 gives normal (2 groups)
=2 gives t- (2 groups) and =3 gives F- (2+ groups)
}
}
\details{
Computes the CDF of p-values for test statistics distribted under HA.
If Fc_0 is the cCDF of a test statistic under H0 and Fc_A is the cCDF
of a test statistic under HA then the CDF of a P-value for a test
statistic distributed under HA is
G_A(u) = Fc_A(Fc_0^{-1}(u))
The limiting true positive fraction is the infinite simultaneous tests
average power,
lim_m T_m/M_m = average.power (a.s.),
which is used to approximate the average power for finite 'm', is
G_1 at gamma alpha:
G_1( gamma alpha) = average.pwer
where alpha is the nominal FDR and gamma = lim_m R_m/m (a.s.) is the limiting
positive call fraction.
}
\value{
A list with components
\item{call}{The call which produced the result}
\item{u}{The argument that was passed to the function}
\item{CDF.Pval.HA}{The value of the CDF}
}
\references{
Izmirlian G. (2020) Strong consistency and asymptotic normality for
quantities related to the Benjamini-Hochberg false discovery rate
procedure. Statistics and Probability Letters; 108713,
<doi:10.1016/j.spl.2020.108713>.
Izmirlian G. (2017) Average Power and \eqn{\lambda}-power in
Multiple Testing Scenarios when the Benjamini-Hochberg False
Discovery Rate Procedure is Used. <arXiv:1801.03989>
Genovese, C. and L. Wasserman. (2004) A stochastic process approach to
false discovery control. Annals of Statistics. 32 (3), 1035-1061.
}
\author{
Grant Izmirlian <izmirlian at nih dot gov>
}
\seealso{
\code{\link{CDF.Pval}}
}
\examples{
## First calculate an average power for a given set of parameters
rslt.avgp <- pwrFDR(effect.size=0.79, n.sample=42, r.1=0.05, alpha=0.15)
## Now verify that G_A( gamma f ) = average.power
gma <- rslt.avgp$gamma
alpha <- rslt.avgp$call$alpha
GA.gma.alpha <- CDF.Pval.HA(u=gma*alpha, r.1=0.05, effect.size=0.79, n.sample=42)
c(G.gm.alpha=GA.gma.alpha$CDF.Pval.HA$CDF.Pval.HA, average.power=rslt.avgp$average.power)
}
\keyword{FDR}
\keyword{Benjamini}
\keyword{Hochberg}
\keyword{microarrays}
\keyword{Multiple.Testing}
\keyword{average.power}
\keyword{k.power}
\keyword{lambda.power}
|
source('ucr_ts.R')
setwd("~/prog/alexeyche-junk/cns/cpp/r_package/r_scripts")
source('../../scripts/eval_dist_matrix.R')
ts_dir = '~/prog/sim/ts'
sample_size = 60
data = synth # synthetic control
c(train_dataset, test_dataset) := read_ts_file(data, sample_size, ts_dir)
data = list()
data$distance_matrix = vector("list", length(train_dataset))
for(ti in 1:length(train_dataset)) {
for(tj in 1:length(train_dataset)) {
data$distance_matrix[[ti]] = cbind(data$distance_matrix[[ti]], sum((train_dataset[[ti]]$data - train_dataset[[tj]]$data)^2)/1000)
}
}
data$rates = rep(target_rate, 100)
data$labels = sapply(train_dataset, function(x) x$label)
calculate_criterion(data) | /cns/cpp/r_package/another_r_scripts/test_ucr.R | no_license | alexeyche/alexeyche-junk | R | false | false | 692 | r |
source('ucr_ts.R')
setwd("~/prog/alexeyche-junk/cns/cpp/r_package/r_scripts")
source('../../scripts/eval_dist_matrix.R')
ts_dir = '~/prog/sim/ts'
sample_size = 60
data = synth # synthetic control
c(train_dataset, test_dataset) := read_ts_file(data, sample_size, ts_dir)
data = list()
data$distance_matrix = vector("list", length(train_dataset))
for(ti in 1:length(train_dataset)) {
for(tj in 1:length(train_dataset)) {
data$distance_matrix[[ti]] = cbind(data$distance_matrix[[ti]], sum((train_dataset[[ti]]$data - train_dataset[[tj]]$data)^2)/1000)
}
}
data$rates = rep(target_rate, 100)
data$labels = sapply(train_dataset, function(x) x$label)
calculate_criterion(data) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{xyloplot}
\alias{xyloplot}
\alias{xyloplot.list}
\alias{xyloplot.factor}
\alias{xyloplot.logical}
\alias{xyloplot.character}
\alias{xyloplot.numeric}
\title{Create a xyloplot}
\usage{
xyloplot(x, ...)
\method{xyloplot}{list}(x, breaks = NULL, space = 0.1, pivot = if
(!is.null(names(x))) factor(names(x), levels = names(x)) else
seq_along(x), pivot_labels = if (is.factor(pivot)) levels(pivot) else
NULL, just = 0.5, freq = FALSE, ...)
\method{xyloplot}{factor}(x, ...)
\method{xyloplot}{logical}(x, ...)
\method{xyloplot}{character}(x, ...)
\method{xyloplot}{numeric}(x, ...)
}
\arguments{
\item{x}{Vector or list of vectors to use for creating xyloplots.}
\item{...}{Additional arguments passed to \code{\link{xyloplot.list}}, or other graphical parameters (e.g. \code{"col"}, \code{"lwd"}, ..., etc.) for \code{xyloplot.list} which are recycled along the xylophones and then used by functions for rendering the individual rectangles (e.g. \code{rect}).}
\item{breaks}{A single positive integer value giving the number of breakpoints to use for an evenly spaced partition of the values in \code{x}, a numeric vector explicitly giving the the breakpoints, or \code{NULL} to use the default partition.}
\item{space}{The proportion of the total distance on the pivots axis allocated to each 'xylophone' which should be empty or \code{NULL}, in which case the pivot axis coordinates for the xyloplot rectangles for each pivot are transformed to [0, 1].}
\item{pivot}{Vector the same length as \code{x} used to determine which pivot to place the xylophone representing corresponding distributions of \code{x} onto (duplicated values go on the same pivots).}
\item{pivot_labels}{Character vector giving names for each pivot or \code{NULL}.}
\item{just}{Vector whose elements should take values in \code{0, 0.5, 1} which determines whether to centre-align the xylophones (\code{0.5}, default), left align them (\code{0}) or right align them (\code{1}).}
\item{freq}{Logical value. If \code{TRUE}, the frequencies/counts of data points falling in each interval are represented. If \code{FALSE} (default), the frequency density of data points in each interval are represented.}
}
\value{
Returns an object of class \code{"xyloplot"} containing the specification of graphical elements required to create a corresponding plot, including the coordinates of the corners of rectangles (in terms of the location on the value value axis and the pivot axis across which the xyloplots are spread) and the positions of the breakpoints used to partition the range of values.
}
\description{
Plots xylophones (centre-aligned histograms) for the input vector(s), provided either as a single vector or list of vectors. Numeric vectors and factors are admissible (character vectors are transformed to factors). If numeric vectors are provided, \code{cut} will be used to aggregate values, whereas if character vectors or factors are provided, each 'level' will have it's own `key' on the `xylophone'. Note that if factors are used, all factors in `x` must have identical levels.
}
\seealso{
\code{\link{plot.xyloplot}}
}
| /man/xyloplot.Rd | no_license | cran/xyloplot | R | false | true | 3,265 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{xyloplot}
\alias{xyloplot}
\alias{xyloplot.list}
\alias{xyloplot.factor}
\alias{xyloplot.logical}
\alias{xyloplot.character}
\alias{xyloplot.numeric}
\title{Create a xyloplot}
\usage{
xyloplot(x, ...)
\method{xyloplot}{list}(x, breaks = NULL, space = 0.1, pivot = if
(!is.null(names(x))) factor(names(x), levels = names(x)) else
seq_along(x), pivot_labels = if (is.factor(pivot)) levels(pivot) else
NULL, just = 0.5, freq = FALSE, ...)
\method{xyloplot}{factor}(x, ...)
\method{xyloplot}{logical}(x, ...)
\method{xyloplot}{character}(x, ...)
\method{xyloplot}{numeric}(x, ...)
}
\arguments{
\item{x}{Vector or list of vectors to use for creating xyloplots.}
\item{...}{Additional arguments passed to \code{\link{xyloplot.list}}, or other graphical parameters (e.g. \code{"col"}, \code{"lwd"}, ..., etc.) for \code{xyloplot.list} which are recycled along the xylophones and then used by functions for rendering the individual rectangles (e.g. \code{rect}).}
\item{breaks}{A single positive integer value giving the number of breakpoints to use for an evenly spaced partition of the values in \code{x}, a numeric vector explicitly giving the the breakpoints, or \code{NULL} to use the default partition.}
\item{space}{The proportion of the total distance on the pivots axis allocated to each 'xylophone' which should be empty or \code{NULL}, in which case the pivot axis coordinates for the xyloplot rectangles for each pivot are transformed to [0, 1].}
\item{pivot}{Vector the same length as \code{x} used to determine which pivot to place the xylophone representing corresponding distributions of \code{x} onto (duplicated values go on the same pivots).}
\item{pivot_labels}{Character vector giving names for each pivot or \code{NULL}.}
\item{just}{Vector whose elements should take values in \code{0, 0.5, 1} which determines whether to centre-align the xylophones (\code{0.5}, default), left align them (\code{0}) or right align them (\code{1}).}
\item{freq}{Logical value. If \code{TRUE}, the frequencies/counts of data points falling in each interval are represented. If \code{FALSE} (default), the frequency density of data points in each interval are represented.}
}
\value{
Returns an object of class \code{"xyloplot"} containing the specification of graphical elements required to create a corresponding plot, including the coordinates of the corners of rectangles (in terms of the location on the value value axis and the pivot axis across which the xyloplots are spread) and the positions of the breakpoints used to partition the range of values.
}
\description{
Plots xylophones (centre-aligned histograms) for the input vector(s), provided either as a single vector or list of vectors. Numeric vectors and factors are admissible (character vectors are transformed to factors). If numeric vectors are provided, \code{cut} will be used to aggregate values, whereas if character vectors or factors are provided, each 'level' will have it's own `key' on the `xylophone'. Note that if factors are used, all factors in `x` must have identical levels.
}
\seealso{
\code{\link{plot.xyloplot}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Eyeglasses.R
\docType{data}
\name{Eyeglasses}
\alias{Eyeglasses}
\title{Processes used in eyeglass manufacturing}
\format{
A data.frame with one variable. Each row is
one pair of eyeglasses manufactured by Eyeglass-omatic
\itemize{
\item \code{activity} the operation performed on the eyeglasses. For each
pair of glasses, the company performeds a single operation:
\code{Assemble}, \code{Grind}, put the \code{frames} together. \code{received} means
the eyeglasses were already totally finished. Guess what \code{Unknown} means!
}
}
\source{
Personal communication from John Matic,
a consultant to Eyeglass-omatic in Australia. (The company’s name is fictitious, but the data is from an actual company.)
}
\usage{
data(Eyeglasses)
}
\description{
Processes used in eyeglass manufacturing
}
\keyword{datasets}
| /man/Eyeglasses.Rd | no_license | dtkaplan/StatsUsingTechnologyData | R | false | true | 890 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Eyeglasses.R
\docType{data}
\name{Eyeglasses}
\alias{Eyeglasses}
\title{Processes used in eyeglass manufacturing}
\format{
A data.frame with one variable. Each row is
one pair of eyeglasses manufactured by Eyeglass-omatic
\itemize{
\item \code{activity} the operation performed on the eyeglasses. For each
pair of glasses, the company performeds a single operation:
\code{Assemble}, \code{Grind}, put the \code{frames} together. \code{received} means
the eyeglasses were already totally finished. Guess what \code{Unknown} means!
}
}
\source{
Personal communication from John Matic,
a consultant to Eyeglass-omatic in Australia. (The company’s name is fictitious, but the data is from an actual company.)
}
\usage{
data(Eyeglasses)
}
\description{
Processes used in eyeglass manufacturing
}
\keyword{datasets}
|
# This script measures and records the module descriptors of a set of networks
# Load required libraries
require(vegan)
require(dplyr)
require(tidyr)
measure_module_connectance = function(connectance_og,replica){
# This function computes the connectance of the modules in the network.
# It requires the following parameters:
# - connectance_og: target connectance value of the network
# - replica: number of replica of the network
# Load required libraries
require(vegan)
require(dplyr)
require(igraph)
# Read Membership data from MODULAR software output
# It will currently read data from networks of 60 species with a 30-30 resource-consumer ratio. This is specified in the directory path (for_modular_40_60).
# If you want to work with networks of different size, change the numbers (for_modular_X_X)
membership = read.table(paste('Data/for_modular_40_60/resultsSA/MEMBERS_C_',connectance_og,'_R_',replica,'.txt',sep =''))
membership = membership[-1,]
membership$index = as.numeric(substring(membership$V1,2))
membership = membership[order(membership$index),]
membership_index = as.numeric(substring(membership$V1,2))
membership_rows = subset(membership, substring(membership$V1,1,1) == 'R')
membership_cols = subset(membership, substring(membership$V1,1,1) == 'C')
# Load network file
# It will currently read data from networks of 60 species with a 30-30 resource-consumer ratio. This is specified in the directory path (networks_30_30).
# If you want to work with networks of different size, change the numbers (networks_X_X)
network = read.csv(paste('Data/networks_30_30/C_',connectance_og,'_R_',replica,'.csv',sep = ''),header=FALSE)
n_col = ncol(network)
n_row = nrow(network)
# Name rows and columns of dataframe
colnames(network) = c(paste('C',1:n_col,sep=''))
rownames(network) = c(paste('R',1:n_row,sep=''))
# Initialize empty vectors for filling module descriptors
module_vector = NULL
module_size = NULL
module_connectance = NULL
# Cycle through modules
for (module in unique(membership$V2)) {
# Subset network by modules
index_row = membership_rows %>% filter(V2 == module) %>% .$V1
index_col = membership_cols %>% filter(V2 == module) %>% .$V1
sub_net = network[as.character(index_row),as.character(index_col)]
module_vector = c(module_vector,module)
m = sum(dim(sub_net))
# Compute connectance
if (m == 0) {
m = length(sub_net)
module_size = c(module_size,m)
connectance = sum(sub_net)/length(sub_net)
module_connectance = c(module_connectance,connectance)
}
else if (m == 1) {
m = 2
module_size = c(module_size,m)
connectance = 1
module_connectance = c(module_connectance,connectance)
}
else if(m > 2){
module_size = c(module_size,m)
connectance = sum(sub_net)/(nrow(sub_net)*ncol(sub_net))
module_connectance = c(module_connectance,connectance)
}
}
# Extract number of modules
num_modules = max(as.numeric(unique(membership$V2))) + 1
network_name = paste('C_',connectance_og,'_R_',replica, sep = '')
# Save results
df = data.frame(network_name = network_name, module = as.numeric(module_vector), module_size = module_size,
module_connectance = module_connectance, number_of_modules = num_modules,
stringsAsFactors = FALSE)
return(df)
}
measure_module_metrics = function(connectance_from,connectance_to,connectance_by,replica_from,replica_to,replica_by){
# This function computes the module metrics for a specified set of networks.
# It requires the following parameters:
# - connectance_from: float, lowest connectance value
# - connectance_to: float, highest connectance value
# - connectance_by: float, increment in connectance
# - replica_from: int, lowest replica value
# - replica_to: int, highest replica value
# - replica_by: int, increment in replica
# Initialize empty dataframe
data_frame_analysis = NULL
# loop through connectance values
for (connectance in seq(connectance_from,connectance_to,connectance_by)) {
# loop through replcias
for (replica in seq(replica_from,replica_to,replica_by)) {
# Measure module properties
current_result = measure_module_connectance(connectance,replica)
# Append to dataframe
data_frame_analysis = rbind(data_frame_analysis, current_result)
}
}
# Write data to csv
# It will currently save data for networks of 60 species with a 30-30 resource-consumer ratio. This is specified in the directory path (processed_output_30_30).
# If you want to work with networks of different size, change the numbers (processed_output_X_X)
write.csv(data_frame_analysis,paste("Results/processed_output_30_30/module_metrics/module_metrics.csv",sep=""))
}
###################################################################################################
# Run functions to measure module metrics for networks ranging in connectance from
# 0.05 to 0.49 with increments of 0.02. Measure properties for all 20 replicas per network.
measure_module_metrics(0.05,0.49,0.02,1,20,1)
| /Code/measure_network_properties/module_metrics.R | no_license | fp3draza/coevo_in_networks | R | false | false | 5,184 | r | # This script measures and records the module descriptors of a set of networks
# Load required libraries
require(vegan)
require(dplyr)
require(tidyr)
measure_module_connectance = function(connectance_og,replica){
# This function computes the connectance of the modules in the network.
# It requires the following parameters:
# - connectance_og: target connectance value of the network
# - replica: number of replica of the network
# Load required libraries
require(vegan)
require(dplyr)
require(igraph)
# Read Membership data from MODULAR software output
# It will currently read data from networks of 60 species with a 30-30 resource-consumer ratio. This is specified in the directory path (for_modular_40_60).
# If you want to work with networks of different size, change the numbers (for_modular_X_X)
membership = read.table(paste('Data/for_modular_40_60/resultsSA/MEMBERS_C_',connectance_og,'_R_',replica,'.txt',sep =''))
membership = membership[-1,]
membership$index = as.numeric(substring(membership$V1,2))
membership = membership[order(membership$index),]
membership_index = as.numeric(substring(membership$V1,2))
membership_rows = subset(membership, substring(membership$V1,1,1) == 'R')
membership_cols = subset(membership, substring(membership$V1,1,1) == 'C')
# Load network file
# It will currently read data from networks of 60 species with a 30-30 resource-consumer ratio. This is specified in the directory path (networks_30_30).
# If you want to work with networks of different size, change the numbers (networks_X_X)
network = read.csv(paste('Data/networks_30_30/C_',connectance_og,'_R_',replica,'.csv',sep = ''),header=FALSE)
n_col = ncol(network)
n_row = nrow(network)
# Name rows and columns of dataframe
colnames(network) = c(paste('C',1:n_col,sep=''))
rownames(network) = c(paste('R',1:n_row,sep=''))
# Initialize empty vectors for filling module descriptors
module_vector = NULL
module_size = NULL
module_connectance = NULL
# Cycle through modules
for (module in unique(membership$V2)) {
# Subset network by modules
index_row = membership_rows %>% filter(V2 == module) %>% .$V1
index_col = membership_cols %>% filter(V2 == module) %>% .$V1
sub_net = network[as.character(index_row),as.character(index_col)]
module_vector = c(module_vector,module)
m = sum(dim(sub_net))
# Compute connectance
if (m == 0) {
m = length(sub_net)
module_size = c(module_size,m)
connectance = sum(sub_net)/length(sub_net)
module_connectance = c(module_connectance,connectance)
}
else if (m == 1) {
m = 2
module_size = c(module_size,m)
connectance = 1
module_connectance = c(module_connectance,connectance)
}
else if(m > 2){
module_size = c(module_size,m)
connectance = sum(sub_net)/(nrow(sub_net)*ncol(sub_net))
module_connectance = c(module_connectance,connectance)
}
}
# Extract number of modules
num_modules = max(as.numeric(unique(membership$V2))) + 1
network_name = paste('C_',connectance_og,'_R_',replica, sep = '')
# Save results
df = data.frame(network_name = network_name, module = as.numeric(module_vector), module_size = module_size,
module_connectance = module_connectance, number_of_modules = num_modules,
stringsAsFactors = FALSE)
return(df)
}
measure_module_metrics = function(connectance_from,connectance_to,connectance_by,replica_from,replica_to,replica_by){
# This function computes the module metrics for a specified set of networks.
# It requires the following parameters:
# - connectance_from: float, lowest connectance value
# - connectance_to: float, highest connectance value
# - connectance_by: float, increment in connectance
# - replica_from: int, lowest replica value
# - replica_to: int, highest replica value
# - replica_by: int, increment in replica
# Initialize empty dataframe
data_frame_analysis = NULL
# loop through connectance values
for (connectance in seq(connectance_from,connectance_to,connectance_by)) {
# loop through replcias
for (replica in seq(replica_from,replica_to,replica_by)) {
# Measure module properties
current_result = measure_module_connectance(connectance,replica)
# Append to dataframe
data_frame_analysis = rbind(data_frame_analysis, current_result)
}
}
# Write data to csv
# It will currently save data for networks of 60 species with a 30-30 resource-consumer ratio. This is specified in the directory path (processed_output_30_30).
# If you want to work with networks of different size, change the numbers (processed_output_X_X)
write.csv(data_frame_analysis,paste("Results/processed_output_30_30/module_metrics/module_metrics.csv",sep=""))
}
###################################################################################################
# Run functions to measure module metrics for networks ranging in connectance from
# 0.05 to 0.49 with increments of 0.02. Measure properties for all 20 replicas per network.
measure_module_metrics(0.05,0.49,0.02,1,20,1)
|
###################################################
# The following script is meant for zip folders under 4G of size, to unzip them,
# and remove the zip folders afterwards
###################################################
#set working directory
setwd("C:\\Users\\sanaz\\Documents\\MB12-project\\CREODIAS_part\\data_from_CREODIAS\\L2A_2017")# #"C:/Users/sanaz/Desktop/Playground_dir"
# Load Sentinel2 zip tiles
S2_names_zip <- "C:\\Users\\sanaz\\Documents\\MB12-project\\CREODIAS_part\\data_from_CREODIAS\\L2A_2017"#C:/Users/sanaz/Documents/MB12-project/CREODIAS_part/data_from_CREODIAS"#\\L2A_2017
S2_names_list <- list.files(S2_names_zip,recursive = FALSE,
full.names = TRUE, pattern=".zip$")
# Unzip and remove the zip folder
S2_names <- lapply(1:length(S2_names_list),
function(x){unzip(S2_names_list[x], exdir=".");
unlink(S2_names_list[x], recursive=TRUE, force = TRUE)})
| /1_Unzip.R | no_license | narges-mohammadi/MB12_project | R | false | false | 960 | r | ###################################################
# The following script is meant for zip folders under 4G of size, to unzip them,
# and remove the zip folders afterwards
###################################################
#set working directory
setwd("C:\\Users\\sanaz\\Documents\\MB12-project\\CREODIAS_part\\data_from_CREODIAS\\L2A_2017")# #"C:/Users/sanaz/Desktop/Playground_dir"
# Load Sentinel2 zip tiles
S2_names_zip <- "C:\\Users\\sanaz\\Documents\\MB12-project\\CREODIAS_part\\data_from_CREODIAS\\L2A_2017"#C:/Users/sanaz/Documents/MB12-project/CREODIAS_part/data_from_CREODIAS"#\\L2A_2017
S2_names_list <- list.files(S2_names_zip,recursive = FALSE,
full.names = TRUE, pattern=".zip$")
# Unzip and remove the zip folder
S2_names <- lapply(1:length(S2_names_list),
function(x){unzip(S2_names_list[x], exdir=".");
unlink(S2_names_list[x], recursive=TRUE, force = TRUE)})
|
#' Open a session to a server running RStudio Package Manager
#'
#' @inheritParams ssh::ssh_connect
#'
#' @return A Connection Object
#' @export
RSPM <- R6Class(
"RSPM",
inherit = RStudio,
public = list(
initialize = function(host,
keyfile = NULL, verbose = FALSE,
service = c("systemctl", "upstart")){
self$config_file <- "/etc/rstudio-connect/rstudio-connect.gcfg"
self$server_log <- "/var/log/rstudio-pm.log"
self$access_log <- "/var/log/rstudio-pm.access.log"
super$initialize(
host = host, keyfile = keyfile,verbose = verbose,
service = service, product = "rstudio-pm"
)
}
)
)
| /R/rspm.R | permissive | ColinFay/majordome | R | false | false | 700 | r | #' Open a session to a server running RStudio Package Manager
#'
#' @inheritParams ssh::ssh_connect
#'
#' @return A Connection Object
#' @export
RSPM <- R6Class(
"RSPM",
inherit = RStudio,
public = list(
initialize = function(host,
keyfile = NULL, verbose = FALSE,
service = c("systemctl", "upstart")){
self$config_file <- "/etc/rstudio-connect/rstudio-connect.gcfg"
self$server_log <- "/var/log/rstudio-pm.log"
self$access_log <- "/var/log/rstudio-pm.access.log"
super$initialize(
host = host, keyfile = keyfile,verbose = verbose,
service = service, product = "rstudio-pm"
)
}
)
)
|
###########################################################
### Load packages
###########################################################
library(stringr)
library(purrr)
library(plyr)
library(ggplot2)
library(lubridate)
library(xts)
library(forecast)
###########################################################
### Class to model time series analysis
###########################################################
## get the dataframe loaded and cleansed in DataLoad.R
pass_reset_df <- readRDS("data/pass_reset_df.RDS")
# Need to create a time-series for each variable of concern. Order them by a posix date.
self_change_xts <- xts(pass_reset_df$SELF_PASSWORD_CHANGE, order.by = pass_reset_df$DAY)
plot(pass_reset_df$SELF_PASSWORD_CHANGE, main="Self-Password Changes Over Time", pch=1, col="blue")
plot(self_change_xts, main="Self-Password Changes Over Time", pch=1, col="green")
# Plot time-series as a curve, for all time.
plot.ts(self_change_xts, main="Self Password Changes per day\nFrom 2012-07",
xlab="Day", ylab="Count", col="blue")
# Plot time-series as a curve, starting from 07/2012 onwards.
plot.ts(self_change_xts['2012-07/'], main="Self Password Changes per day\nFrom 2012-07",
xlab="Day", ylab="Count", col="green")
# Plot time-series curve for the month of August, 2014.
plot.ts(self_change_xts['2014-08'], main="Self Password Changes per day\nin Aug 2014",
xlab="Day", ylab="Count", col="red")
##########################################################
# Time series need to understand the frequency of the TS.
# Here we are using another TS library that is building on the XTS df already built.
##########################################################
self_change_ts <- ts(self_change_xts, frequency=365)
# now we decompose it into its various parts from which we can analyze and plot
self_change_components <- decompose(self_change_ts)
plot(self_change_components, col="blue")
# another way to do the decompositions is with stl
# Seasonal decomposition
fit <- stl(ts(as.numeric(self_change_xts), frequency=365), s.window="periodic", robust=TRUE)
plot(fit)
# These appear to average everything into a single year (probably from the frequency?)
# and show by days in the year.
monthplot(ts(as.numeric(self_change_xts), frequency=365))
monthplot(fit, choice="seasonal")
monthplot(fit, choice="trend")
# this is using a different kind of model fitting.
# presumably it allows for multiplicative as well as additive
# the spikey nature of the data might be damaging the model fit
fit2 <- ets(ts(as.numeric(self_change_xts), frequency=365))
plot(fit2)
# method to estimate statiscal significance of seasonal component
# http://robjhyndman.com/hyndsight/detecting-seasonality/
# fit1 <- ets(sales_month)
# fit2 <- ets(sales_month, model='ANN')
# deviance <- 2*c(logLik(fit1) - logLik(fit2))
# df <- attributes(logLik(fit1))$df - attributes(logLik(fit2))$df
# 1-pchisq(deviance,df)
# this is less cooked, and looking at logs of the data as well as
# doing differentials, i.e., from day to day.
# http://www.statmethods.net/advstats/timeseries.html
self_change_log <- log(ts(as.numeric(self_change_xts), frequency=365))
plot(self_change_log)
plot(diff(self_change_log))
plot(diff(self_change_xts))
# let's look at a histogram of the counts, and make the bins a bit smaller.
hist(self_change_ts, breaks = 100)
# definitely pareto (power-law) distribution, so let's confirm with a qqplot
qqnorm(diff(self_change_ts))
# http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/zeitreihen/sommer03/ts_r_intro.pdf
| /Time_Series.R | no_license | smehan/sd-passwd-reset | R | false | false | 3,564 | r | ###########################################################
### Load packages
###########################################################
library(stringr)
library(purrr)
library(plyr)
library(ggplot2)
library(lubridate)
library(xts)
library(forecast)
###########################################################
### Class to model time series analysis
###########################################################
## get the dataframe loaded and cleansed in DataLoad.R
pass_reset_df <- readRDS("data/pass_reset_df.RDS")
# Need to create a time-series for each variable of concern. Order them by a posix date.
self_change_xts <- xts(pass_reset_df$SELF_PASSWORD_CHANGE, order.by = pass_reset_df$DAY)
plot(pass_reset_df$SELF_PASSWORD_CHANGE, main="Self-Password Changes Over Time", pch=1, col="blue")
plot(self_change_xts, main="Self-Password Changes Over Time", pch=1, col="green")
# Plot time-series as a curve, for all time.
plot.ts(self_change_xts, main="Self Password Changes per day\nFrom 2012-07",
xlab="Day", ylab="Count", col="blue")
# Plot time-series as a curve, starting from 07/2012 onwards.
plot.ts(self_change_xts['2012-07/'], main="Self Password Changes per day\nFrom 2012-07",
xlab="Day", ylab="Count", col="green")
# Plot time-series curve for the month of August, 2014.
plot.ts(self_change_xts['2014-08'], main="Self Password Changes per day\nin Aug 2014",
xlab="Day", ylab="Count", col="red")
##########################################################
# Time series need to understand the frequency of the TS.
# Here we are using another TS library that is building on the XTS df already built.
##########################################################
self_change_ts <- ts(self_change_xts, frequency=365)
# now we decompose it into its various parts from which we can analyze and plot
self_change_components <- decompose(self_change_ts)
plot(self_change_components, col="blue")
# another way to do the decompositions is with stl
# Seasonal decomposition
fit <- stl(ts(as.numeric(self_change_xts), frequency=365), s.window="periodic", robust=TRUE)
plot(fit)
# These appear to average everything into a single year (probably from the frequency?)
# and show by days in the year.
monthplot(ts(as.numeric(self_change_xts), frequency=365))
monthplot(fit, choice="seasonal")
monthplot(fit, choice="trend")
# this is using a different kind of model fitting.
# presumably it allows for multiplicative as well as additive
# the spikey nature of the data might be damaging the model fit
fit2 <- ets(ts(as.numeric(self_change_xts), frequency=365))
plot(fit2)
# method to estimate statiscal significance of seasonal component
# http://robjhyndman.com/hyndsight/detecting-seasonality/
# fit1 <- ets(sales_month)
# fit2 <- ets(sales_month, model='ANN')
# deviance <- 2*c(logLik(fit1) - logLik(fit2))
# df <- attributes(logLik(fit1))$df - attributes(logLik(fit2))$df
# 1-pchisq(deviance,df)
# this is less cooked, and looking at logs of the data as well as
# doing differentials, i.e., from day to day.
# http://www.statmethods.net/advstats/timeseries.html
self_change_log <- log(ts(as.numeric(self_change_xts), frequency=365))
plot(self_change_log)
plot(diff(self_change_log))
plot(diff(self_change_xts))
# let's look at a histogram of the counts, and make the bins a bit smaller.
hist(self_change_ts, breaks = 100)
# definitely pareto (power-law) distribution, so let's confirm with a qqplot
qqnorm(diff(self_change_ts))
# http://www.statoek.wiso.uni-goettingen.de/veranstaltungen/zeitreihen/sommer03/ts_r_intro.pdf
|
library(testthat)
library(shackettMisc)
test_check("shackettMisc")
| /tests/testthat.R | no_license | shackett/shackettMisc | R | false | false | 68 | r | library(testthat)
library(shackettMisc)
test_check("shackettMisc")
|
# Hay 3 caballos. Se pide determinar la probabilidad de cada uno de ganar. Los casos totales
# seran la suma de todas las probabilidades
# a = b/2; c = 2b; b = 2a;
# casos totales: b/2 + 2b + b = 7b/2
# son eventos mutuamente excluyentes
# casos favorable / casos totales
#P(a) = (b/2) / (7b/2)
a = 1/7
#P(b) = (b) / (7b/2)
b = 2/7
#P(c) = (2b) / (7b/2)
c = 4/7
S = data.frame(a, b, c)
S
| /Hoja de ejercicios /Hoja de ejercicios probabilidades 3/hoja3.1.R | no_license | DiegoSalas27/Artificial-intelligence | R | false | false | 391 | r | # Hay 3 caballos. Se pide determinar la probabilidad de cada uno de ganar. Los casos totales
# seran la suma de todas las probabilidades
# a = b/2; c = 2b; b = 2a;
# casos totales: b/2 + 2b + b = 7b/2
# son eventos mutuamente excluyentes
# casos favorable / casos totales
#P(a) = (b/2) / (7b/2)
a = 1/7
#P(b) = (b) / (7b/2)
b = 2/7
#P(c) = (2b) / (7b/2)
c = 4/7
S = data.frame(a, b, c)
S
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307483667e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615774870-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307483667e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
library(testthat)
library(rnaseqtools)
test_check("rnaseqtools")
| /tests/testthat.R | no_license | richysix/rnaseqtools | R | false | false | 66 | r | library(testthat)
library(rnaseqtools)
test_check("rnaseqtools")
|
## Programming Assignment 2 - R Programming
##
## Two functions that compute the inverse of a matrix and cache the result so that, if the calculation is
## attempted again over the same variable, R first checks if the result has been cached. If that's the case, a message
## is returned together with the inverse of the matrix.
##
## makeCacheMatrix defines a list of functions to get and the set the values so that cacheSolve can verify
## if a previous result has been cached. A list of funcions get, set, setinv and getinv is the stored output
## of the function.
##
## N.B. in order to test the code it is necessary to:
## a) create a (square) matrix [like in mat <- matrix(1:25, ncol = 5, nrow = 5)]
## b) use (e.g.): mylist <- makeCacheMatrix(mat) to create the list of functions
## c) and then: cacheSolve(mylist) to get the inverse of the matrix +
## c1) again cacheSolve(mylist) to check that the message "getting cached data" is returned.
makeCacheMatrix <- function(x = matrix()) {
mtrx <- NULL
set <- function(y) {
x <<- y
mtrx <<- NULL
}
get <- function() x
setInverse <- function(solve) mtrx <<- solve
getInverse <- function() mtrx
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve then checks if there is a value stored and retrieves it, otherwise it computes the inverse
cacheSolve <- function(x, ...) {
mtrx <- x$getInverse()
if(!is.null(mtrx)) {
message("getting cached data")
return(mtrx)
}
data <- x$get()
mtrx <- solve(data)
x$setInverse(mtrx)
mtrx
}
| /cachematrix.R | no_license | l0rNz/ProgrammingAssignment2 | R | false | false | 1,694 | r | ## Programming Assignment 2 - R Programming
##
## Two functions that compute the inverse of a matrix and cache the result so that, if the calculation is
## attempted again over the same variable, R first checks if the result has been cached. If that's the case, a message
## is returned together with the inverse of the matrix.
##
## makeCacheMatrix defines a list of functions to get and the set the values so that cacheSolve can verify
## if a previous result has been cached. A list of funcions get, set, setinv and getinv is the stored output
## of the function.
##
## N.B. in order to test the code it is necessary to:
## a) create a (square) matrix [like in mat <- matrix(1:25, ncol = 5, nrow = 5)]
## b) use (e.g.): mylist <- makeCacheMatrix(mat) to create the list of functions
## c) and then: cacheSolve(mylist) to get the inverse of the matrix +
## c1) again cacheSolve(mylist) to check that the message "getting cached data" is returned.
makeCacheMatrix <- function(x = matrix()) {
mtrx <- NULL
set <- function(y) {
x <<- y
mtrx <<- NULL
}
get <- function() x
setInverse <- function(solve) mtrx <<- solve
getInverse <- function() mtrx
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve then checks if there is a value stored and retrieves it, otherwise it computes the inverse
cacheSolve <- function(x, ...) {
mtrx <- x$getInverse()
if(!is.null(mtrx)) {
message("getting cached data")
return(mtrx)
}
data <- x$get()
mtrx <- solve(data)
x$setInverse(mtrx)
mtrx
}
|
library(LiblineaR)
### Name: heuristicC
### Title: Fast Heuristics For The Estimation Of the C Constant Of A
### Support Vector Machine.
### Aliases: heuristicC
### Keywords: classif
### ** Examples
data(iris)
x=iris[,1:4]
y=factor(iris[,5])
train=sample(1:dim(iris)[1],100)
xTrain=x[train,]
xTest=x[-train,]
yTrain=y[train]
yTest=y[-train]
# Center and scale data
s=scale(xTrain,center=TRUE,scale=TRUE)
# Sparse Logistic Regression
t=6
co=heuristicC(s)
m=LiblineaR(data=s,labels=yTrain,type=t,cost=co,bias=TRUE,verbose=FALSE)
# Scale the test data
s2=scale(xTest,attr(s,"scaled:center"),attr(s,"scaled:scale"))
# Make prediction
p=predict(m,s2)
# Display confusion matrix
res=table(p$predictions,yTest)
print(res)
# Compute Balanced Classification Rate
BCR=mean(c(res[1,1]/sum(res[,1]),res[2,2]/sum(res[,2]),res[3,3]/sum(res[,3])))
print(BCR)
| /data/genthat_extracted_code/LiblineaR/examples/heuristicC.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 863 | r | library(LiblineaR)
### Name: heuristicC
### Title: Fast Heuristics For The Estimation Of the C Constant Of A
### Support Vector Machine.
### Aliases: heuristicC
### Keywords: classif
### ** Examples
data(iris)
x=iris[,1:4]
y=factor(iris[,5])
train=sample(1:dim(iris)[1],100)
xTrain=x[train,]
xTest=x[-train,]
yTrain=y[train]
yTest=y[-train]
# Center and scale data
s=scale(xTrain,center=TRUE,scale=TRUE)
# Sparse Logistic Regression
t=6
co=heuristicC(s)
m=LiblineaR(data=s,labels=yTrain,type=t,cost=co,bias=TRUE,verbose=FALSE)
# Scale the test data
s2=scale(xTest,attr(s,"scaled:center"),attr(s,"scaled:scale"))
# Make prediction
p=predict(m,s2)
# Display confusion matrix
res=table(p$predictions,yTest)
print(res)
# Compute Balanced Classification Rate
BCR=mean(c(res[1,1]/sum(res[,1]),res[2,2]/sum(res[,2]),res[3,3]/sum(res[,3])))
print(BCR)
|
\alias{gtkTextMarkGetDeleted}
\name{gtkTextMarkGetDeleted}
\title{gtkTextMarkGetDeleted}
\description{Returns \code{TRUE} if the mark has been removed from its buffer
with \code{\link{gtkTextBufferDeleteMark}}. Marks can't be used
once deleted.}
\usage{gtkTextMarkGetDeleted(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkTextMark}}] a \code{\link{GtkTextMark}}}}
\value{[logical] whether the mark is deleted}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkTextMarkGetDeleted.Rd | no_license | cran/RGtk2.10 | R | false | false | 493 | rd | \alias{gtkTextMarkGetDeleted}
\name{gtkTextMarkGetDeleted}
\title{gtkTextMarkGetDeleted}
\description{Returns \code{TRUE} if the mark has been removed from its buffer
with \code{\link{gtkTextBufferDeleteMark}}. Marks can't be used
once deleted.}
\usage{gtkTextMarkGetDeleted(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkTextMark}}] a \code{\link{GtkTextMark}}}}
\value{[logical] whether the mark is deleted}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library(rgdal)
library(raster)
library(data.table)
iDir <- "D:/jymutua/ls-heat-stress-mapping - EA"
sites <- read.csv(paste0(iDir, "/data/historical/", "weather_stations.csv"), header=TRUE)
varLS <- c("HURS", "TASMAX")
varStats <- lapply(X = varLS, FUN = function(var){
# list gcms
gcmLS <- list.dirs(paste0(iDir, "/data/historical/_netcdf/", var, "/"), recursive = FALSE, full.names = FALSE)
gcmStats <- lapply(X=gcmLS, FUN = function(gcm){
ncLS <- list.files(paste0(iDir, "/data/historical/_netcdf/", var, "/", gcm, "/"), pattern = ".nc$", full.names = TRUE)
ncStats <- lapply(X = ncLS, FUN = function(nc){
nc <- brick(nc)
nPt <- nrow(sites)
shpStats <- lapply(1:nPt, FUN = function(p){
shpStats <- list()
site_row<- sites[p,]
xy <- site_row[,c(3,2)]
spdf <- SpatialPointsDataFrame(coords = xy, data = site_row, proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
daily_ag <- as.data.frame(t(as.data.frame(extract(nc, spdf))))
nc_names <- as.data.frame(names(nc))
# combine nc_names and daily_ag
daily_ag <- cbind(nc_names, daily_ag)
#rename the columns
colnames(daily_ag) <- c("LAYER_NAME", "VAR")
row.names(daily_ag) <- NULL
site_id <- site_row$ID; site_long <- site_row$Long; site_lat <- site_row$Lat
d <- cbind(SITE_ID=rep(site_id,times=nrow(daily_ag)), GCM=rep(gcm, times=nrow(daily_ag)), LONG=rep(site_long,times=nrow(daily_ag)), LAT=rep(site_lat, times=nrow(daily_ag)), CL_VARIABLE=rep(var,times=nrow(daily_ag)), daily_ag)
return(d)
})
shpStats <- do.call("rbind", shpStats)
return(shpStats)
})
ncStats <- do.call(rbind, ncStats)
return(ncStats)
})
gcmtats <- do.call(rbind, gcmStats)
return(gcmStats)
})
varStats.c <- do.call(rbind, varStats)
yr <- substr(varStats.c$LAYER_NAME, 2, 5); mth <- substr(varStats.c$LAYER_NAME, 7, 8); dy <- substr(varStats.c$LAYER_NAME, 10, 11)
varStats.c$YEAR <- yr; varStats.c$MONTH <- mth; varStats.c$DAY <- dy
write.csv(varStats.c, file = paste0(iDir, "/data/historical/", "weather_data_1981_2010.csv"), row.names = TRUE)
| /_data_prep/00_extractGCMS.R | no_license | CIAT/ls_heat_stress_mapping-EA | R | false | false | 2,361 | r | library(rgdal)
library(raster)
library(data.table)
iDir <- "D:/jymutua/ls-heat-stress-mapping - EA"
sites <- read.csv(paste0(iDir, "/data/historical/", "weather_stations.csv"), header=TRUE)
varLS <- c("HURS", "TASMAX")
varStats <- lapply(X = varLS, FUN = function(var){
# list gcms
gcmLS <- list.dirs(paste0(iDir, "/data/historical/_netcdf/", var, "/"), recursive = FALSE, full.names = FALSE)
gcmStats <- lapply(X=gcmLS, FUN = function(gcm){
ncLS <- list.files(paste0(iDir, "/data/historical/_netcdf/", var, "/", gcm, "/"), pattern = ".nc$", full.names = TRUE)
ncStats <- lapply(X = ncLS, FUN = function(nc){
nc <- brick(nc)
nPt <- nrow(sites)
shpStats <- lapply(1:nPt, FUN = function(p){
shpStats <- list()
site_row<- sites[p,]
xy <- site_row[,c(3,2)]
spdf <- SpatialPointsDataFrame(coords = xy, data = site_row, proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
daily_ag <- as.data.frame(t(as.data.frame(extract(nc, spdf))))
nc_names <- as.data.frame(names(nc))
# combine nc_names and daily_ag
daily_ag <- cbind(nc_names, daily_ag)
#rename the columns
colnames(daily_ag) <- c("LAYER_NAME", "VAR")
row.names(daily_ag) <- NULL
site_id <- site_row$ID; site_long <- site_row$Long; site_lat <- site_row$Lat
d <- cbind(SITE_ID=rep(site_id,times=nrow(daily_ag)), GCM=rep(gcm, times=nrow(daily_ag)), LONG=rep(site_long,times=nrow(daily_ag)), LAT=rep(site_lat, times=nrow(daily_ag)), CL_VARIABLE=rep(var,times=nrow(daily_ag)), daily_ag)
return(d)
})
shpStats <- do.call("rbind", shpStats)
return(shpStats)
})
ncStats <- do.call(rbind, ncStats)
return(ncStats)
})
gcmtats <- do.call(rbind, gcmStats)
return(gcmStats)
})
varStats.c <- do.call(rbind, varStats)
yr <- substr(varStats.c$LAYER_NAME, 2, 5); mth <- substr(varStats.c$LAYER_NAME, 7, 8); dy <- substr(varStats.c$LAYER_NAME, 10, 11)
varStats.c$YEAR <- yr; varStats.c$MONTH <- mth; varStats.c$DAY <- dy
write.csv(varStats.c, file = paste0(iDir, "/data/historical/", "weather_data_1981_2010.csv"), row.names = TRUE)
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
PieceExpIntensity1 <- function(Y, Rates, B, Poi) {
.Call('PieceExpIntensity_PieceExpIntensity1', PACKAGE = 'PieceExpIntensity', Y, Rates, B, Poi)
}
| /R/RcppExports.R | no_license | AndrewGChapple/PieceExpIntensity1 | R | false | false | 280 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
PieceExpIntensity1 <- function(Y, Rates, B, Poi) {
.Call('PieceExpIntensity_PieceExpIntensity1', PACKAGE = 'PieceExpIntensity', Y, Rates, B, Poi)
}
|
testIndices <- c(1:10)
X<-scale(usair[-testIndices,2:length(colnames(usair))])
form <- paste0("y","~",paste0(colnames(usair)[2:length(colnames(usair))], collapse = "+"))
form2 <- paste0("y","~",paste0(paste0("pb(",colnames(usair)[2:length(colnames(usair))],")"), collapse = "+"))
form3 <- paste0("y","~",paste0(paste0("ridge(",colnames(usair)[2:length(colnames(usair))],")"), collapse = "+"))
fitcontrol <- gamlss.control(c.crit = 0.02, n.cyc = 200, mu.step = 1, sigma.step = 1, nu.step = 1, tau.step = 1)
fitcontrol2 <- glim.control(cc = 0.02, cyc = 200, glm.trace = F, bf.cyc = 200, bf.tol = 0.02, bf.trace = F)
m00 <- gamlss(as.formula(form), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"))
m0 <- gamlss(y~X, data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"))
m0 <- gamlss(as.formula(form2), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"))
xupdate <- ri(x1, Lp = 1, method = "GAIC", start = 0.05, Lp = 1, kappa = 1e-05, iter = 10000, c.crit = 1e-03, k = 2)
m0 <- gamlss(as.formula(form3), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"), control = fitcontrol, i.control = fitcontrol2, trace = F)
m0 <- gamlss(y~ri(X, lambda = 1, Lp = 1), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"), control = fitcontrol, i.control = fitcontrol2, trace = F)
m0$mu.coefficients <- c(m0$mu.coefficients[1], m0$mu.coefSmo[[1]]$coef)
#,m0$mu.coefSmo[[2]]$coef,m0$mu.coefSmo[[3]]$coef,m0$mu.coefSmo[[4]]$coef,m0$mu.coefSmo[[5]]$coef,m0$mu.coefSmo[[6]]$coef)
names(m0$mu.coefficients) <- c("(Intercept)", "x1","x2","x3","x4","x5","x6")
predictions2 <- rep(m0$mu.coefficients[1], length(testIndices)) + as.matrix(usair[testIndices,-1])%*%m0$mu.coefficients[-1]
print(RMSE(usair[testIndices,1], predictions2))
predictions1 <- rep(m00$mu.coefficients[1], length(testIndices)) + as.matrix(usair[testIndices,-1])%*%m00$mu.coefficients[-1]
predict(m0, newdata = usair[testIndices,2:length(colnames(usair))], what = c("mu"))
plot(getSmo(m0))
| /temp1.R | no_license | kilianbakker/KNMI-internship | R | false | false | 2,149 | r | testIndices <- c(1:10)
X<-scale(usair[-testIndices,2:length(colnames(usair))])
form <- paste0("y","~",paste0(colnames(usair)[2:length(colnames(usair))], collapse = "+"))
form2 <- paste0("y","~",paste0(paste0("pb(",colnames(usair)[2:length(colnames(usair))],")"), collapse = "+"))
form3 <- paste0("y","~",paste0(paste0("ridge(",colnames(usair)[2:length(colnames(usair))],")"), collapse = "+"))
fitcontrol <- gamlss.control(c.crit = 0.02, n.cyc = 200, mu.step = 1, sigma.step = 1, nu.step = 1, tau.step = 1)
fitcontrol2 <- glim.control(cc = 0.02, cyc = 200, glm.trace = F, bf.cyc = 200, bf.tol = 0.02, bf.trace = F)
m00 <- gamlss(as.formula(form), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"))
m0 <- gamlss(y~X, data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"))
m0 <- gamlss(as.formula(form2), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"))
xupdate <- ri(x1, Lp = 1, method = "GAIC", start = 0.05, Lp = 1, kappa = 1e-05, iter = 10000, c.crit = 1e-03, k = 2)
m0 <- gamlss(as.formula(form3), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"), control = fitcontrol, i.control = fitcontrol2, trace = F)
m0 <- gamlss(y~ri(X, lambda = 1, Lp = 1), data = usair[-testIndices,], family = NO(mu.link = "identity", sigma.link = "identity"), control = fitcontrol, i.control = fitcontrol2, trace = F)
m0$mu.coefficients <- c(m0$mu.coefficients[1], m0$mu.coefSmo[[1]]$coef)
#,m0$mu.coefSmo[[2]]$coef,m0$mu.coefSmo[[3]]$coef,m0$mu.coefSmo[[4]]$coef,m0$mu.coefSmo[[5]]$coef,m0$mu.coefSmo[[6]]$coef)
names(m0$mu.coefficients) <- c("(Intercept)", "x1","x2","x3","x4","x5","x6")
predictions2 <- rep(m0$mu.coefficients[1], length(testIndices)) + as.matrix(usair[testIndices,-1])%*%m0$mu.coefficients[-1]
print(RMSE(usair[testIndices,1], predictions2))
predictions1 <- rep(m00$mu.coefficients[1], length(testIndices)) + as.matrix(usair[testIndices,-1])%*%m00$mu.coefficients[-1]
predict(m0, newdata = usair[testIndices,2:length(colnames(usair))], what = c("mu"))
plot(getSmo(m0))
|
library(ggplot2)
library(Rsamtools)
library(svMisc)
library(seqinr)
library(reshape2)
library(cowplot)
setwd('/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch')
load(file = '/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch/figure_4A.rdata')
load(file='/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch/figure_4B.rdata')
load(file='/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch/figure_4C.rdata')
plot_grid(plot,cmv_plot_4b, cumulative_freq_with_human ,labels = c('A','B','C'), ncol = 3, align = 'hv')
ggsave(plot = last_plot(), height = 3, width = 8, filename = 'figure_4.pdf')
| /reproducibility/CMV/SOT/current_version/figure_4.R | no_license | vpeddu/CMV-NIPT | R | false | false | 741 | r | library(ggplot2)
library(Rsamtools)
library(svMisc)
library(seqinr)
library(reshape2)
library(cowplot)
setwd('/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch')
load(file = '/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch/figure_4A.rdata')
load(file='/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch/figure_4B.rdata')
load(file='/Users/gerbix/Documents/vikas/NIPT/nipt_git_repo/reproducibility/CMV/SOT/fragment_patch/figure_4C.rdata')
plot_grid(plot,cmv_plot_4b, cumulative_freq_with_human ,labels = c('A','B','C'), ncol = 3, align = 'hv')
ggsave(plot = last_plot(), height = 3, width = 8, filename = 'figure_4.pdf')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_graph_weighted.R
\name{is_graph_weighted}
\alias{is_graph_weighted}
\title{Is the graph a weighted graph?}
\usage{
is_graph_weighted(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a logical value.
}
\description{
Provides a logical value on whether
the graph is weighted. A graph is considered to
be weighted when it contains edges that all
have a edge \code{weight} attribute with
numerical values assigned for all edges.
}
\examples{
# Create a graph where the edges have
# a `weight` attribute
graph <-
create_graph() \%>\%
add_cycle(n = 5) \%>\%
select_edges() \%>\%
set_edge_attrs_ws(
edge_attr = weight,
value = c(3, 5, 2, 9, 6)) \%>\%
clear_selection()
# Determine whether the graph
# is a weighted graph
is_graph_weighted(graph)
# Create graph where the edges do
# not have a `weight` attribute
graph <-
create_graph() \%>\%
add_cycle(n = 5)
# Determine whether this graph
# is weighted
graph \%>\%
is_graph_weighted()
}
| /man/is_graph_weighted.Rd | permissive | OleksiyAnokhin/DiagrammeR | R | false | true | 1,077 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_graph_weighted.R
\name{is_graph_weighted}
\alias{is_graph_weighted}
\title{Is the graph a weighted graph?}
\usage{
is_graph_weighted(graph)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
}
\value{
a logical value.
}
\description{
Provides a logical value on whether
the graph is weighted. A graph is considered to
be weighted when it contains edges that all
have a edge \code{weight} attribute with
numerical values assigned for all edges.
}
\examples{
# Create a graph where the edges have
# a `weight` attribute
graph <-
create_graph() \%>\%
add_cycle(n = 5) \%>\%
select_edges() \%>\%
set_edge_attrs_ws(
edge_attr = weight,
value = c(3, 5, 2, 9, 6)) \%>\%
clear_selection()
# Determine whether the graph
# is a weighted graph
is_graph_weighted(graph)
# Create graph where the edges do
# not have a `weight` attribute
graph <-
create_graph() \%>\%
add_cycle(n = 5)
# Determine whether this graph
# is weighted
graph \%>\%
is_graph_weighted()
}
|
require(ggplot2)
args = commandArgs(trailingOnly=TRUE)
dat_file = args[1]
out = args[2]
d = read.delim(dat_file, sep='\t', header=TRUE)
ggplot(data=d) + geom_line(aes(x=fpr,y=tpr, colour=curve), size=2) + theme_bw(base_size=24) +
theme(legend.position=c(0.7, 0.2), legend.title = element_blank()) +
xlab('False Positive Rate') + ylab('True Positive Rate') +
ggtitle('ClinVar Missense Variants')
ggsave(out)
| /src/scripts/plot_clinvar_roc.R | permissive | samesense/pathopredictor | R | false | false | 410 | r | require(ggplot2)
args = commandArgs(trailingOnly=TRUE)
dat_file = args[1]
out = args[2]
d = read.delim(dat_file, sep='\t', header=TRUE)
ggplot(data=d) + geom_line(aes(x=fpr,y=tpr, colour=curve), size=2) + theme_bw(base_size=24) +
theme(legend.position=c(0.7, 0.2), legend.title = element_blank()) +
xlab('False Positive Rate') + ylab('True Positive Rate') +
ggtitle('ClinVar Missense Variants')
ggsave(out)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{getCatalogs}
\alias{getCatalogs}
\title{Get Catalogs}
\usage{
getCatalogs(server, apiKey = NULL)
}
\arguments{
\item{server}{The server to query for the catalogs.#'}
\item{apiKey}{The user's apiKey to access the API, if the API is not secured this can be NULL.}
}
\description{
The getCatalogs(server) method can be used to get all the catalogs from the provided rds.server.
}
| /man/getCatalogs.Rd | permissive | mtna/rds-r | R | false | true | 472 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{getCatalogs}
\alias{getCatalogs}
\title{Get Catalogs}
\usage{
getCatalogs(server, apiKey = NULL)
}
\arguments{
\item{server}{The server to query for the catalogs.#'}
\item{apiKey}{The user's apiKey to access the API, if the API is not secured this can be NULL.}
}
\description{
The getCatalogs(server) method can be used to get all the catalogs from the provided rds.server.
}
|
\name{FR}
\alias{varFR}
\alias{esFR}
\title{Freimer distribution}
\description{Computes the pdf, cdf, value at risk and expected shortfall for the Freimer distribution due to Freimer et al. (1988) given by
\deqn{\begin{array}{ll}
&\displaystyle
{\rm VaR}_p (X) = \frac {1}{a} \left[ \frac {p^b - 1}{b} -
\frac {(1 - p)^c - 1}{c} \right],
\\
&\displaystyle
{\rm ES}_p (X) = \frac {1}{a} \left( \frac {1}{c} - \frac {1}{b} \right) +
\frac {p^b}{a b (b + 1)} + \frac {(1 - p)^{c + 1} - 1}{p a c (c + 1)}
\end{array}}
for \eqn{0 < p < 1}, \eqn{a > 0}, the scale parameter,
\eqn{b > 0}, the first shape parameter, and \eqn{c > 0}, the second shape parameter.}
\usage{
varFR(p, a=1, b=1, c=1, log.p=FALSE, lower.tail=TRUE)
esFR(p, a=1, b=1, c=1)
}
\arguments{
\item{p}{scaler or vector of values at which the value at risk or expected shortfall needs to be computed}
\item{a}{the value of the scale parameter, must be positive, the default is 1}
\item{b}{the value of the first shape parameter, must be positive, the default is 1}
\item{c}{the value of the second shape parameter, must be positive, the default is 1}
\item{log}{if TRUE then log(pdf) are returned}
\item{log.p}{if TRUE then log(cdf) are returned and quantiles are computed for exp(p)}
\item{lower.tail}{if FALSE then 1-cdf are returned and quantiles are computed for 1-p}
}
\value{An object of the same length as \code{x}, giving the pdf or cdf values computed at \code{x} or an object of the same length as \code{p}, giving the values at risk or expected shortfall computed at \code{p}.}
\references{Stephen Chan, Saralees Nadarajah & Emmanuel Afuecheta (2016). An R Package for Value at Risk and Expected Shortfall, Communications in Statistics - Simulation and Computation, 45:9, 3416-3434, \doi{10.1080/03610918.2014.944658}}
\author{Saralees Nadarajah}
\examples{x=runif(10,min=0,max=1)
varFR(x)
esFR(x)}
| /man/FR.Rd | no_license | cran/VaRES | R | false | false | 1,922 | rd | \name{FR}
\alias{varFR}
\alias{esFR}
\title{Freimer distribution}
\description{Computes the pdf, cdf, value at risk and expected shortfall for the Freimer distribution due to Freimer et al. (1988) given by
\deqn{\begin{array}{ll}
&\displaystyle
{\rm VaR}_p (X) = \frac {1}{a} \left[ \frac {p^b - 1}{b} -
\frac {(1 - p)^c - 1}{c} \right],
\\
&\displaystyle
{\rm ES}_p (X) = \frac {1}{a} \left( \frac {1}{c} - \frac {1}{b} \right) +
\frac {p^b}{a b (b + 1)} + \frac {(1 - p)^{c + 1} - 1}{p a c (c + 1)}
\end{array}}
for \eqn{0 < p < 1}, \eqn{a > 0}, the scale parameter,
\eqn{b > 0}, the first shape parameter, and \eqn{c > 0}, the second shape parameter.}
\usage{
varFR(p, a=1, b=1, c=1, log.p=FALSE, lower.tail=TRUE)
esFR(p, a=1, b=1, c=1)
}
\arguments{
\item{p}{scaler or vector of values at which the value at risk or expected shortfall needs to be computed}
\item{a}{the value of the scale parameter, must be positive, the default is 1}
\item{b}{the value of the first shape parameter, must be positive, the default is 1}
\item{c}{the value of the second shape parameter, must be positive, the default is 1}
\item{log}{if TRUE then log(pdf) are returned}
\item{log.p}{if TRUE then log(cdf) are returned and quantiles are computed for exp(p)}
\item{lower.tail}{if FALSE then 1-cdf are returned and quantiles are computed for 1-p}
}
\value{An object of the same length as \code{x}, giving the pdf or cdf values computed at \code{x} or an object of the same length as \code{p}, giving the values at risk or expected shortfall computed at \code{p}.}
\references{Stephen Chan, Saralees Nadarajah & Emmanuel Afuecheta (2016). An R Package for Value at Risk and Expected Shortfall, Communications in Statistics - Simulation and Computation, 45:9, 3416-3434, \doi{10.1080/03610918.2014.944658}}
\author{Saralees Nadarajah}
\examples{x=runif(10,min=0,max=1)
varFR(x)
esFR(x)}
|
\name{limma.one.sided}
\alias{limma.one.sided}
\title{Internal algorithm: Make limma test one-sided...}
\usage{limma.one.sided(fit, lower=FALSE)}
\description{Internal algorithm: Make limma test one-sided}
\arguments{\item{fit}{Result of "lmFit" and "eBayes" functions in "limma" package.}
\item{lower}{Shall one-sided p-value indicated down-regultation?}}
| /man/limma.one.sided.Rd | no_license | cran/miRtest | R | false | false | 357 | rd | \name{limma.one.sided}
\alias{limma.one.sided}
\title{Internal algorithm: Make limma test one-sided...}
\usage{limma.one.sided(fit, lower=FALSE)}
\description{Internal algorithm: Make limma test one-sided}
\arguments{\item{fit}{Result of "lmFit" and "eBayes" functions in "limma" package.}
\item{lower}{Shall one-sided p-value indicated down-regultation?}}
|
#' @title subtract
#' @description Subtract two numbers.
#'
#' @param x A real number
#' @param y A real number
#'
#' @return the subtraction of \code{x} and \code{y}
#' @examples
#' subtract(1,1)
#' subtract(10,2)
subtract <- function(x,y){
x - y
}
| /ltevrn/R/subtract.R | no_license | lauratomkinsku/tomkins_dcei | R | false | false | 253 | r | #' @title subtract
#' @description Subtract two numbers.
#'
#' @param x A real number
#' @param y A real number
#'
#' @return the subtraction of \code{x} and \code{y}
#' @examples
#' subtract(1,1)
#' subtract(10,2)
subtract <- function(x,y){
x - y
}
|
source("downloadArchive.R")
# Load the NEI & SCC data frames.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset NEI data by Baltimore's fip.
baltimoreNEI <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI,sum)
png("plot2.png",width=480,height=480,units="px",bg="transparent")
barplot(
aggTotalsBaltimore$Emissions,
names.arg=aggTotalsBaltimore$year,
xlab="Year",
ylab="PM2.5 Emissions (Tons)",
main="Total PM2.5 Emissions From all Baltimore City Sources"
)
dev.off()
| /plot2.R | no_license | cguduru/Exploratory-Data-Analysis-Projects- | R | false | false | 628 | r | source("downloadArchive.R")
# Load the NEI & SCC data frames.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset NEI data by Baltimore's fip.
baltimoreNEI <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI,sum)
png("plot2.png",width=480,height=480,units="px",bg="transparent")
barplot(
aggTotalsBaltimore$Emissions,
names.arg=aggTotalsBaltimore$year,
xlab="Year",
ylab="PM2.5 Emissions (Tons)",
main="Total PM2.5 Emissions From all Baltimore City Sources"
)
dev.off()
|
# LOOCV
library(rpart)
input_data<-read.csv("C:/Users/sam/Desktop/DM Coursework Data/breast-cancer-wisconsin.data", header=FALSE)
input_data<-input_data[-1]
na.omit(input_data)
colnames(input_data) <- c("Clump Thickness", "Uniformity of Cell Size", "Uniformity of Cell Shape", "Marginal Adhesion", "Single Epithelial Cell Size", "Bare Nuclei", "Bland Chromatin", "Normal Nucleoli", "Mitoses", "class")
numRecords<-length(input_data[[1]])
numTrials<-20
sample_size<-numRecords * 0.9 + 1
accuracies<-c()
for(trial in 1:numTrials)
{
sample<-sample.int(n=nrow(input_data), size=sample_size, replace=FALSE)
main_training_set<-input_data[sample,]
correct_predictions<-0
for(i in 1:sample_size)
{
row<-main_training_set[i,]
training_set<-main_training_set[-i,]
decision_tree = rpart(class~., data=training_set, method='class')
prediction<-predict(decision_tree, newdata=row[-10], type='class')
predict<-0
if(row$class == prediction)
{
predict<-1
}
correct_predictions<- correct_predictions + predict
}
accuracies<-append(accuracies, (correct_predictions / sample_size))
}
mean_accuracy<-sum(accuracies)/length(accuracies)
accuracies
std_deviation<-sd(accuracies)
| /R Code/Classification Model/Breast Cancer Dataset/LOOCV.R | no_license | Sam-Malpass/Data-Analytics-and-Mining | R | false | false | 1,189 | r | # LOOCV
library(rpart)
input_data<-read.csv("C:/Users/sam/Desktop/DM Coursework Data/breast-cancer-wisconsin.data", header=FALSE)
input_data<-input_data[-1]
na.omit(input_data)
colnames(input_data) <- c("Clump Thickness", "Uniformity of Cell Size", "Uniformity of Cell Shape", "Marginal Adhesion", "Single Epithelial Cell Size", "Bare Nuclei", "Bland Chromatin", "Normal Nucleoli", "Mitoses", "class")
numRecords<-length(input_data[[1]])
numTrials<-20
sample_size<-numRecords * 0.9 + 1
accuracies<-c()
for(trial in 1:numTrials)
{
sample<-sample.int(n=nrow(input_data), size=sample_size, replace=FALSE)
main_training_set<-input_data[sample,]
correct_predictions<-0
for(i in 1:sample_size)
{
row<-main_training_set[i,]
training_set<-main_training_set[-i,]
decision_tree = rpart(class~., data=training_set, method='class')
prediction<-predict(decision_tree, newdata=row[-10], type='class')
predict<-0
if(row$class == prediction)
{
predict<-1
}
correct_predictions<- correct_predictions + predict
}
accuracies<-append(accuracies, (correct_predictions / sample_size))
}
mean_accuracy<-sum(accuracies)/length(accuracies)
accuracies
std_deviation<-sd(accuracies)
|
library(ape)
testtree <- read.tree("8806_3.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8806_3_unrooted.txt") | /codeml_files/newick_trees_processed/8806_3/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("8806_3.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8806_3_unrooted.txt") |
#### Detecting Local Mins & Maxs ####
setwd("~/Documents/Medical_Imaging")
#Example with galaxies...
#Generate some synthetic data
layout(t(1:2))
set.seed(4)
points <- rbinom(100*100,1,.001) %>% as.cimg(x=100,y=100)
blobs <- isoblur(points,5)
plot(points,main="Random points")
plot(blobs,main="Blobs")
#Look at Hessian
imhessian(blobs)
#Derivatives
Hdet <- with(imhessian(blobs),(xx*yy - xy^2))
plot(Hdet,main="Determinant of Hessian")
#Get only pixels with highest values
threshold(Hdet,"99%") %>% plot(main="Determinant: 1% highest values")
#Label said regions
lab <- threshold(Hdet,"99%") %>% label
plot(lab,main="Labelled regions")
#Extract the labels
df <- as.data.frame(lab) %>% subset(value>0)
head(df,3)
#See how many local max's
unique(df$value)
#Split the data.frame into regions, and compute the mean coordinate values in each
centers <- dplyr::group_by(df,value) %>% dplyr::summarise(mx=mean(x),my=mean(y))
#Overlay results
plot(blobs)
with(centers,points(mx,my,col="red"))
#Now add noise to the synthetic data
nblobs <- blobs+.001*imnoise(dim=dim(blobs))
plot(nblobs,main="Noisy blobs")
#Summarized commands:
get.centers <- function(im,thr="99%")
{
dt <- imhessian(im) %$% { -xx*yy + xy^2 } %>% threshold(thr) %>% label
as.data.frame(dt) %>% subset(value>0) %>% dplyr::group_by(value) %>% dplyr::summarise(mx=mean(x),my=mean(y))
}
plot(nblobs)
get.centers(nblobs,"99%") %$% points(mx,my,col="red")
#Extra de-noising step:
nblobs.denoised <- isoblur(nblobs,2)
plot(nblobs.denoised)
get.centers(nblobs.denoised,"99%") %$% points(mx,my,col="red")
#### Moving onto Hubble ####
#Load Example Data
hub <- load.example("hubble") %>% grayscale
plot(hub,main="Hubble Deep Field")
#First attempt (using the function defined above):
plot(hub)
get.centers(hub,"99.8%") %$% points(mx,my,col="red")
#Add blur results:
plot(hub)
isoblur(hub,5) %>% get.centers("99.8%") %$% points(mx,my,col="red")
#Multi-scale approach:
#Compute determinant at scale "scale".
hessdet <- function(im,scale=1) isoblur(im,scale) %>% imhessian %$% { scale^2*(xx*yy - xy^2) }
#Note the scaling (scale^2) factor in the determinant
plot(hessdet(hub,1),main="Determinant of the Hessian at scale 1")
#Get a data.frame with results at scale 2, 3 and 4
dat <- ldply(c(2,3,4),function(scale) hessdet(hub,scale) %>% as.data.frame %>% mutate(scale=scale))
p <- ggplot(dat,aes(x,y))+geom_raster(aes(fill=value))+facet_wrap(~ scale)
p+scale_x_continuous(expand=c(0,0))+scale_y_continuous(expand=c(0,0),trans=scales::reverse_trans())
#Data across scales
scales <- seq(2,20,l=10)
d.max <- llply(scales,function(scale) hessdet(hub,scale)) %>% parmax
plot(d.max,main="Point-wise maximum across scales")
#Something I don't quite understand:
i.max <- llply(scales,function(scale) hessdet(hub,scale)) %>% which.parmax
plot(i.max,main="Index of the point-wise maximum \n across scales")
#Label and plot the regions:
#Get a data.frame of labelled regions
labs <- d.max %>% threshold("96%") %>% label %>% as.data.frame
#Add scale indices
labs <- mutate(labs,index=as.data.frame(i.max)$value)
regs <- dplyr::group_by(labs,value) %>% dplyr::summarise(mx=mean(x),my=mean(y),scale.index=mean(index))
p <- ggplot(as.data.frame(hub),aes(x,y))+geom_raster(aes(fill=value))+geom_point(data=regs,aes(mx,my,size=scale.index),pch=2,col="red")
p+scale_fill_gradient(low="black",high="white")+scale_x_continuous(expand=c(0,0))+scale_y_continuous(expand=c(0,0),trans=scales::reverse_trans())
#Running Example with thermo images
#Example w/ Pixsets
im <- load.image("0101_baseline_anterior.jpg") %>% grayscale
im2 <-load.image("0101_baseline_anterior2.jpg") %>% grayscale
#Select pixels with high luminance
plot(px)
sum(px)
mean(px)
plot(im)
#Convert to image
as.cimg(px)
plot(as.cimg(px))
#Highlight pixset on image:
plot(im)
px <- im > .3 & (Xc(img) %inr% c(26,615)) & (Yc(img) %inr% c(41,448))
highlight(px)
plot(im2)
px2 <- im2 > .3 & (Xc(im2) %inr% c(26,615)) & (Yc(im2) %inr% c(41,448))
highlight(px2)
View(im)
View(px)
plot(px)
px
img
plot(im)
plot(split_connected(px))
plot(px)
#Boundary
boundary(px) %>% plot
plot(im)
boundary(px) %>% where %$% { points(x,y,cex=.1,col="red") }
im <- im & px
plot(im)
plot(px)
dfpx<-as.data.frame(px)
View(dfpx)
View(im)
##The actual thing...
img <- load.image("0101_baseline_anterior.jpg") %>% grayscale
plot(im3)
imsub(img,x %inr% c(26,615),y %inr% c(41,440)) %>% plot
highlight(px)
get.centers(im3,"99%") %$% points(mx,my,col="red")
msk <- px.flood(parrots,100,100,sigma=.28) %>% as.cimg
plot(parrots*msk)
get.locations(im, im > .3)
View(im)
View(dfim)
View(dfpx)
dfim$x
dfim<-as.data.frame(im)
dfpx<-as.data.frame(px)
intersect<-paste0(dfim$x,dfim$y) %in% paste0(dfpx$x,dfpx$y)
intersect
bwint<-as.integer(intersect)
bwint
bwint<-as.integer(px[,,1,1])
dfim2<-dfim
dfim2$value<-dfim2$value*bwint
View(dfim2)
im3<-as.cimg(dfim2)
plot(im3)
bwint<-as.integer(px[,,1,1])
bwint
length(im[,,1,1])
#Coin Example
im <- load.example("coins")
plot(im)
#Thresholding
threshold(im) %>% plot
#Correct with linear model
d <- as.data.frame(im)
##Subsamble, fit a linear model
m <- sample_n(d,1e4) %>% lm(value ~ x*y,data=.)
##Correct by removing the trend
im.c <- im-predict(m,d)
out <- threshold(im.c)
plot(out)
#Correct more
out <- clean(out,3) %>% imager::fill(7)
plot(im)
highlight(out)
#Watershed approach
d <- as.data.frame(im)
m <- sample_n(d,1e4) %>% lm(value ~ x*y,data=.)
im.c <- im-predict(m,d)
bg <- (!threshold(im.c,"25%"))
fg <- (threshold(im.c,"75%"))
imlist(fg,bg) %>% plot(layout="row")
seed <- bg+2*fg
plot(seed)
edges <- imgradient(im,"xy") %>% enorm
p <- 1/(1+edges)
plot(p)
ws <- (watershed(seed,p)==1)
plot(ws)
ws <- bucketfill(ws,1,1,color=2) %>% {!( . == 2) }
plot(ws)
clean(ws,5) %>% plot
split_connected(ws) %>% purrr::discard(~ sum(.) < 100) %>%
parany %>% plot
| /mins_and_maxs.R | no_license | jmostovoy/Medical_Imaging | R | false | false | 5,839 | r | #### Detecting Local Mins & Maxs ####
setwd("~/Documents/Medical_Imaging")
#Example with galaxies...
#Generate some synthetic data
layout(t(1:2))
set.seed(4)
points <- rbinom(100*100,1,.001) %>% as.cimg(x=100,y=100)
blobs <- isoblur(points,5)
plot(points,main="Random points")
plot(blobs,main="Blobs")
#Look at Hessian
imhessian(blobs)
#Derivatives
Hdet <- with(imhessian(blobs),(xx*yy - xy^2))
plot(Hdet,main="Determinant of Hessian")
#Get only pixels with highest values
threshold(Hdet,"99%") %>% plot(main="Determinant: 1% highest values")
#Label said regions
lab <- threshold(Hdet,"99%") %>% label
plot(lab,main="Labelled regions")
#Extract the labels
df <- as.data.frame(lab) %>% subset(value>0)
head(df,3)
#See how many local max's
unique(df$value)
#Split the data.frame into regions, and compute the mean coordinate values in each
centers <- dplyr::group_by(df,value) %>% dplyr::summarise(mx=mean(x),my=mean(y))
#Overlay results
plot(blobs)
with(centers,points(mx,my,col="red"))
#Now add noise to the synthetic data
nblobs <- blobs+.001*imnoise(dim=dim(blobs))
plot(nblobs,main="Noisy blobs")
#Summarized commands:
get.centers <- function(im,thr="99%")
{
dt <- imhessian(im) %$% { -xx*yy + xy^2 } %>% threshold(thr) %>% label
as.data.frame(dt) %>% subset(value>0) %>% dplyr::group_by(value) %>% dplyr::summarise(mx=mean(x),my=mean(y))
}
plot(nblobs)
get.centers(nblobs,"99%") %$% points(mx,my,col="red")
#Extra de-noising step:
nblobs.denoised <- isoblur(nblobs,2)
plot(nblobs.denoised)
get.centers(nblobs.denoised,"99%") %$% points(mx,my,col="red")
#### Moving onto Hubble ####
#Load Example Data
hub <- load.example("hubble") %>% grayscale
plot(hub,main="Hubble Deep Field")
#First attempt (using the function defined above):
plot(hub)
get.centers(hub,"99.8%") %$% points(mx,my,col="red")
#Add blur results:
plot(hub)
isoblur(hub,5) %>% get.centers("99.8%") %$% points(mx,my,col="red")
#Multi-scale approach:
#Compute determinant at scale "scale".
hessdet <- function(im,scale=1) isoblur(im,scale) %>% imhessian %$% { scale^2*(xx*yy - xy^2) }
#Note the scaling (scale^2) factor in the determinant
plot(hessdet(hub,1),main="Determinant of the Hessian at scale 1")
#Get a data.frame with results at scale 2, 3 and 4
dat <- ldply(c(2,3,4),function(scale) hessdet(hub,scale) %>% as.data.frame %>% mutate(scale=scale))
p <- ggplot(dat,aes(x,y))+geom_raster(aes(fill=value))+facet_wrap(~ scale)
p+scale_x_continuous(expand=c(0,0))+scale_y_continuous(expand=c(0,0),trans=scales::reverse_trans())
#Data across scales
scales <- seq(2,20,l=10)
d.max <- llply(scales,function(scale) hessdet(hub,scale)) %>% parmax
plot(d.max,main="Point-wise maximum across scales")
#Something I don't quite understand:
i.max <- llply(scales,function(scale) hessdet(hub,scale)) %>% which.parmax
plot(i.max,main="Index of the point-wise maximum \n across scales")
#Label and plot the regions:
#Get a data.frame of labelled regions
labs <- d.max %>% threshold("96%") %>% label %>% as.data.frame
#Add scale indices
labs <- mutate(labs,index=as.data.frame(i.max)$value)
regs <- dplyr::group_by(labs,value) %>% dplyr::summarise(mx=mean(x),my=mean(y),scale.index=mean(index))
p <- ggplot(as.data.frame(hub),aes(x,y))+geom_raster(aes(fill=value))+geom_point(data=regs,aes(mx,my,size=scale.index),pch=2,col="red")
p+scale_fill_gradient(low="black",high="white")+scale_x_continuous(expand=c(0,0))+scale_y_continuous(expand=c(0,0),trans=scales::reverse_trans())
#Running Example with thermo images
#Example w/ Pixsets
im <- load.image("0101_baseline_anterior.jpg") %>% grayscale
im2 <-load.image("0101_baseline_anterior2.jpg") %>% grayscale
#Select pixels with high luminance
plot(px)
sum(px)
mean(px)
plot(im)
#Convert to image
as.cimg(px)
plot(as.cimg(px))
#Highlight pixset on image:
plot(im)
px <- im > .3 & (Xc(img) %inr% c(26,615)) & (Yc(img) %inr% c(41,448))
highlight(px)
plot(im2)
px2 <- im2 > .3 & (Xc(im2) %inr% c(26,615)) & (Yc(im2) %inr% c(41,448))
highlight(px2)
View(im)
View(px)
plot(px)
px
img
plot(im)
plot(split_connected(px))
plot(px)
#Boundary
boundary(px) %>% plot
plot(im)
boundary(px) %>% where %$% { points(x,y,cex=.1,col="red") }
im <- im & px
plot(im)
plot(px)
dfpx<-as.data.frame(px)
View(dfpx)
View(im)
##The actual thing...
img <- load.image("0101_baseline_anterior.jpg") %>% grayscale
plot(im3)
imsub(img,x %inr% c(26,615),y %inr% c(41,440)) %>% plot
highlight(px)
get.centers(im3,"99%") %$% points(mx,my,col="red")
msk <- px.flood(parrots,100,100,sigma=.28) %>% as.cimg
plot(parrots*msk)
get.locations(im, im > .3)
View(im)
View(dfim)
View(dfpx)
dfim$x
dfim<-as.data.frame(im)
dfpx<-as.data.frame(px)
intersect<-paste0(dfim$x,dfim$y) %in% paste0(dfpx$x,dfpx$y)
intersect
bwint<-as.integer(intersect)
bwint
bwint<-as.integer(px[,,1,1])
dfim2<-dfim
dfim2$value<-dfim2$value*bwint
View(dfim2)
im3<-as.cimg(dfim2)
plot(im3)
bwint<-as.integer(px[,,1,1])
bwint
length(im[,,1,1])
#Coin Example
im <- load.example("coins")
plot(im)
#Thresholding
threshold(im) %>% plot
#Correct with linear model
d <- as.data.frame(im)
##Subsamble, fit a linear model
m <- sample_n(d,1e4) %>% lm(value ~ x*y,data=.)
##Correct by removing the trend
im.c <- im-predict(m,d)
out <- threshold(im.c)
plot(out)
#Correct more
out <- clean(out,3) %>% imager::fill(7)
plot(im)
highlight(out)
#Watershed approach
d <- as.data.frame(im)
m <- sample_n(d,1e4) %>% lm(value ~ x*y,data=.)
im.c <- im-predict(m,d)
bg <- (!threshold(im.c,"25%"))
fg <- (threshold(im.c,"75%"))
imlist(fg,bg) %>% plot(layout="row")
seed <- bg+2*fg
plot(seed)
edges <- imgradient(im,"xy") %>% enorm
p <- 1/(1+edges)
plot(p)
ws <- (watershed(seed,p)==1)
plot(ws)
ws <- bucketfill(ws,1,1,color=2) %>% {!( . == 2) }
plot(ws)
clean(ws,5) %>% plot
split_connected(ws) %>% purrr::discard(~ sum(.) < 100) %>%
parany %>% plot
|
\alias{gtkTimeoutAdd}
\name{gtkTimeoutAdd}
\title{gtkTimeoutAdd}
\description{
Registers a function to be called periodically. The function will be called
repeatedly after \code{interval} milliseconds until it returns \code{FALSE} at which
point the timeout is destroyed and will not be called again.
\strong{WARNING: \code{gtk_timeout_add} is deprecated and should not be used in newly-written code. Use \code{\link{gTimeoutAdd}} instead.}
}
\usage{gtkTimeoutAdd(interval, fun, data = NULL)}
\arguments{
\item{\code{interval}}{[numeric] The time between calls to the function, in milliseconds
(1/1000ths of a second.)}
\item{\code{data}}{[R object] The data to pass to the function.}
}
\value{[numeric] A unique id for the event source.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkTimeoutAdd.Rd | no_license | cran/RGtk2.10 | R | false | false | 814 | rd | \alias{gtkTimeoutAdd}
\name{gtkTimeoutAdd}
\title{gtkTimeoutAdd}
\description{
Registers a function to be called periodically. The function will be called
repeatedly after \code{interval} milliseconds until it returns \code{FALSE} at which
point the timeout is destroyed and will not be called again.
\strong{WARNING: \code{gtk_timeout_add} is deprecated and should not be used in newly-written code. Use \code{\link{gTimeoutAdd}} instead.}
}
\usage{gtkTimeoutAdd(interval, fun, data = NULL)}
\arguments{
\item{\code{interval}}{[numeric] The time between calls to the function, in milliseconds
(1/1000ths of a second.)}
\item{\code{data}}{[R object] The data to pass to the function.}
}
\value{[numeric] A unique id for the event source.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#________________________________________________________________________
#
# THESIS: Penalized Discriminant Analysis (PDA)
#
# Author: Katie Roberts
# Last edited: 12/6/2016
#
#
# Goals:
# 1. Update functions from the 'mda' package to accomodate
# a hierarchically structured data set
# i) Update the gen.ridge function -> gen.ridge.pda
# ii) Update the contr.fda function -> contr.pda
# iii) Update the fda function -> pda
# 2. Test functions ignoring hierarchy
# i) Test original IRIS data fda vs. pda with no hierarchy
# ii) Test thyroid data fda vs. pda with no hierarchy
# 3. Create IRIS data
# i) Manipulate the Iris data set to contain a psuedo
# hierarchically structured data set
# 4. Test functions assuming hierarchy
# i) Test manip. IRIS data with pda using hierarchy
# ii) Test thyroid data with pda using hierarchy
# 5. Try Thyroid hierarchy data with these models:
# i) Univariate
# ii) Final GLMM model used
# final GLMM model: cancer~ logvol + echoTextureNonHomo + microcalcYes + (1|patient_num)
#
#
# Packages used:
# "mda"
#
# Citation of "mda" package used and modified here:
# S original by Trevor Hastie & Robert Tibshirani. Original R port by Friedrich
# Leisch, Kurt Hornik and Brian D. Ripley. (2016). mda: Mixture and Flexible
# Discriminant Analysis. R package version 0.4-9.
# https://CRAN.R-project.org/package=mda
#________________________________________________________________________
#=================================================================================
#
# PDA EDITED FUNCTIONS FOR HIERARCHICAL DATA
#
#=================================================================================
#install.packages("mda")
library(mda)
#-----------------------------------------------------------------
# GEN RIDGE FUNCTION
#-----------------------------------------------------------------
#gen.ridge
#Perform a penalized regression, as used in penalized discriminant analysis.
gen.ridge.pda <- function (x, y, weights, lambda = 1, h=FALSE,means, omega, df, ...)
{
if (h==FALSE){
if (missing(df) && lambda <= .Machine$double.eps) #.Machine$double.esp is the smallest postitive floating-point number on the machine running R.
return(polyreg(x, y)) #do a simple polynomial regression on x and y if missing df and lambda is essentially zero.
d <- dim(x) #dim of the predictor matrix
mm <- apply(x, 2, mean) #means by column of the pred matrix
x <- scale(x, mm, FALSE) #center x about the means but don't scale
simple <- if (missing(omega))
TRUE
else FALSE
if (!simple) {
if (!all(match(c("values", "vectors"), names(omega),
FALSE)))
stop("You must supply an eigen-decomposed version of omega")
vals <- pmax(sqrt(.Machine$double.eps), sqrt(omega$values))
basis <- scale(omega$vectors, FALSE, vals)
x <- x %*% basis
}
svd.x <- svd(x)
dd <- svd.x$d
if (!missing(df))
lambda = df.gold(dd, df)
df = sum(dd^2/(dd^2 + lambda))
y <- (t(t(y) %*% svd.x$u) * dd)/(dd^2 + lambda)
coef <- svd.x$v %*% y
fitted <- x %*% coef
if (!simple)
coef <- basis %*% coef
structure(list(fitted.values = fitted, coefficients = coef,
df = df, lambda = lambda, xmeans = mm), class = "gen.ridge")
}else {
if(!missing(means)){
mm <- means
}
if (missing(df) && lambda <= .Machine$double.eps)
return(polyreg(x, y))
simple <- if (missing(omega))
TRUE
else FALSE
if (!simple) {
if (!all(match(c("values", "vectors"), names(omega),
FALSE)))
stop("You must supply an eigen-decomposed version of omega")
vals <- pmax(sqrt(.Machine$double.eps), sqrt(omega$values))
basis <- scale(omega$vectors, FALSE, vals)
x <- x %*% basis
}
svd.x <- svd(x)
dd <- svd.x$d
if (!missing(df))
lambda = df.gold(dd, df)
df = sum(dd^2/(dd^2 + lambda))
y <- (t(t(y) %*% svd.x$u) * dd)/(dd^2 + lambda)
coef <- svd.x$v %*% y
fitted <- x %*% coef
if (!simple)
coef <- basis %*% coef
structure(list(fitted.values = fitted, coefficients = coef,
df = df, lambda = lambda, xmeans = mm), class = "gen.ridge")
}
}
#gen.ridge(x, y, weights, lambda=1, omega, df, ...)
#gen.ridge.pda()
#-----------------------------------------------------------------
# CONTRAST FUNCTION
#-----------------------------------------------------------------
#contr.fda function
#runs matrix of contrasts to compute QR decomp. matrix
contr.pda <- function (p = rep(1, d[1]), contrast.default = contr.helmert(length(p)))
{
d <- dim(contrast.default)
sqp <- sqrt(p/sum(p))
x <- cbind(1, contrast.default) * outer(sqp, rep(1, d[2] + 1))
qx <- qr(x)
J <- qx$rank
qr.qy(qx, diag(d[1])[, seq(2, J)])/outer(sqp, rep(1, J - 1)) #computes QR decomp. matrix
}
#-----------------------------------------------------------------
# PDA FUNCTION
#-----------------------------------------------------------------
#PDA
#hier = level 1 hierarchy in data if applicable. For the thyroid data this would be subject
pda <- function (formula = formula(data), data = sys.frame(sys.parent()), hier,
weights, theta, dimension = J - 1, eps = .Machine$double.eps,
method = gen.ridge.pda, keep.fitted = (n * dimension < 5000), ...)
{
this.call <- match.call() #will add argument names if they weren't explicit
m <- match.call(expand.dots = FALSE) #don't expand the ... arguments
m[[1]] <- as.name("model.frame") #identify/label the model.frame that's read in
m <- m[match(names(m), c("", "formula", "data", "hier", "weights"), 0)] #identify/label parts of the function that are read in
m <- eval(m, parent.frame()) #evaluate m at the parent.frame environment (default)
Terms <- attr(m, "terms") #get term attributes of m
g <- model.extract(m, "response") #returns the response component of the model frame m
x <- model.matrix(Terms, m) #creates a design (model) matrix
if (attr(Terms, "intercept"))
x = x[, -1, drop = FALSE] #if there's an intercept, drop it from x
dd <- dim(x)
n <- dd[1] #number of records
weights <- model.extract(m, weights)
if (!length(weights))
weights <- rep(1, n) #if no weights, then create numeric list of 1's of length n
else if (any(weights < 0))
stop("negative weights not allowed")
if (length(g) != n)
stop("g should have length nrow(x)")
fg <- factor(g)
prior <- table(fg) #table of factored response variable
prior <- prior/sum(prior) #converted to percentage (fraction)
cnames <- levels(fg) #response variable names
g <- as.numeric(fg) #converts factored levels to numbers
J <- length(cnames) #number of levels for response variable
iswt <- FALSE
if (missing(weights))
dp <- table(g)/n
else {
weights <- (n * weights)/sum(weights)
dp <- tapply(weights, g, sum)/n
iswt <- TRUE
}
if (missing(theta))
theta <- contr.helmert(J) #runs matrix of contrasts
theta <- contr.pda(dp, theta) #function that creates contrasts to compute QR decomp. matrix
if (missing(hier)) {
#continue with original function...
Theta <- theta[g, , drop = FALSE] #applies the theta matrix contrasts to full n matrix
fit <- method(x, Theta, weights, ...) # polyreg fit method with x=predictor matrix and Theta=response matrix
if (iswt)
Theta <- Theta * weights
ssm <- t(Theta) %*% fitted(fit)/n #transpose Theta multiplied by the fitted values of fit/n
} else {
#utilize the hierarchy if applicable
hier <- model.extract(m, hier)
N <- length(unique(hier)) #number of unique level 1 hierarchical records
#Theta placeholder
dimT <- dim(theta) #collect dimensions of theta
Tcol <- dimT[2] #collect number of columns from theta
Theta = matrix(ncol=Tcol) #create a placeholder matrix for Theta
#my.gen.x placeholder
# my.gen.x = data.matrix(x[0]) #create empty data matrix to fill
my.gen.x <- matrix(ncol = ncol(x))
colnames(my.gen.x) <- colnames(x)
#my.gen.mm placeholder to collect xmeans
my.gen.mm <- matrix(ncol = ncol(x))
colnames(my.gen.mm) <- colnames(x)
#create if statement to separate those that only have one factor level vs. those with two or more.
subj <- data.frame(m$`(hier)`)
names(subj) <- "subj"
x2 <- cbind(x, subj)
# unique(x2$subj)
# BEGIN THE LEVEL 1 FOR-LOOP
for (i in unique(m$`(hier)`)){ #for each level 1 hierarchy (subject...)
#execute the pda function for nodes within each subject
hier.data <- m[m$`(hier)`==i,]
#
xi.var <- x2[x2$subj==i,]
xi <- xi.var[,-which(names(xi.var) == "subj")]
ddi <- dim(xi)
ni <- ddi[1]
gi <- model.extract(hier.data, "response")
gi <- as.numeric(gi)
#
Thetai <- theta[gi, , drop=FALSE]
Thetai <- Thetai/ni
#
my.gen.d <- dim(xi)
my.gen.mmi <- apply(xi, 2, mean)
my.gen.xi <- scale(xi, my.gen.mmi, FALSE)
my.gen.xi <- my.gen.xi/ni
my.gen.mm = rbind(my.gen.mm,my.gen.mmi) #stack xmeans
Theta = rbind(Theta, Thetai) #stack Thetai's
my.gen.x = rbind(my.gen.x, my.gen.xi) #stack the my.gen.x's
} #end level 1 for-loop
#
#remove first row in Thetai's and my.gen.x
# Theta <- Theta[-1,]
# my.gen.x <- my.gen.x[-1,]
# my.gen.mm <- my.gen.mm[-1,]
Theta <- t(t(Theta[-1,]))
my.gen.x <- t(t(my.gen.x[-1,]))
my.gen.mm <- t(t(my.gen.mm[-1,]))
mm <- apply(my.gen.mm, 2, mean) #means by column of the pred matrix
#now use the method=gen.ridge for hierarchy (h!=1)
fit <- method(my.gen.x, Theta, h=TRUE,means=mm, weights, ...) # polyreg fit method with my.gen.x=predictor matrix, Theta=response matrix, h=2 for hierarchy
#structure(list(fitted.values = fitted, coefficients = coef,
# df = df, lambda = lambda), class = "gen.ridge")
if (iswt)
Theta <- Theta * weights
ssm <- t(Theta) %*% fitted(fit)/N #transpose Theta multiplied by the fitted values of fit/n
}
#get out: Theta, fit, ssm
ed <- svd(ssm, nu = 0) #singular value decomposition of matrix ssm. nu= number of left singular vectors to be computed
thetan <- ed$v #matrix whose columns contain the right singular vectors of ssm
lambda <- ed$d #vector containing the singular values of ssm
# eps = .Machine$double.eps means the smallest positive floating-point number x such that 1+x != 1. Normally 2.220446e-16
#dimension = J - 1 number of response factors minus 1
lambda[lambda > 1 - eps] <- 1 - eps #convert value of lambda that are essentially greater than 1 to 1 minus essentially zero =~1
discr.eigen <- lambda/(1 - lambda)
pe <- (100 * cumsum(discr.eigen))/sum(discr.eigen)
dimension <- min(dimension, sum(lambda > eps))
if (dimension == 0) {
warning("degenerate problem; no discrimination")
return(structure(list(dimension = 0, fit = fit, call = this.call),
class = "fda"))
}
thetan <- thetan[, seq(dimension), drop = FALSE]
pe <- pe[seq(dimension)]
alpha <- sqrt(lambda[seq(dimension)])
sqima <- sqrt(1 - lambda[seq(dimension)])
vnames <- paste("v", seq(dimension), sep = "")
means <- scale(theta %*% thetan, FALSE, sqima/alpha) #scale theta%*%thetan by sqima/alpha
dimnames(means) <- list(cnames, vnames)
names(lambda) <- c(vnames, rep("", length(lambda) - dimension))
names(pe) <- vnames
obj <- structure(list(percent.explained = pe, values = lambda,
means = means, theta.mod = thetan, dimension = dimension,
prior = prior, fit = fit, call = this.call, terms = Terms),
class = "fda")
obj$confusion <- confusion(predict(obj), fg)
if (!keep.fitted)
obj$fit$fitted.values <- NULL
obj
}
#=================================================================================
#
# TEST THE FUNCTIONS WITH DATA BELOW
#
#=================================================================================
#-----------------------------------------------------------------
# TEST THE FUNCTION WITH NAIVE APPROACHES COMPARED TO KNOWN MDA
#-----------------------------------------------------------------
# IRIS - they match
data(iris)
head(iris)
#known function with iris data
set.seed(2345)
irisfit <- fda(Species ~ ., data = iris, method=gen.ridge)
irisfit
confusion(irisfit, iris)
irisfit$confusion
plot(irisfit,coords=c(1,2))
coef(irisfit)
# str(irisfit)
#pda function with iris data
set.seed(2345)
irisfit1 <- pda(Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width, data = iris, method=gen.ridge)
irisfit1
confusion(irisfit1, iris)
irisfit1$confusion
plot(irisfit1)
coef(irisfit1)
# str(irisfit1)
#-----------------------------------------------------------------
# CREATE TESTING IRIS DATA TO USE WITH HIERARCHY
#-----------------------------------------------------------------
#create iris data with added hierarchy
# NOTE: this hierarchy is generated to test the functionality of the code ONLY. There is no correlation
# here so the results should be less than interesting
data(iris)
my.iris <- iris
table(my.iris$Species)
my.iris$subject <- c(rep(1:50,each=3))
my.iris <- my.iris[with(my.iris, order(subject)), ]
#my.iris$node <- ave(my.iris$subject, my.iris$subject, FUN = seq_along)
#my.iris$indic <- sample(0:1, 150, replace=T) # random indicator variable
my.iris
set.seed(165)
my.iris <- my.iris[sample(nrow(my.iris), 100), ]
#sort data by subject and node
my.iris <- my.iris[order(my.iris$subject),]
my.iris$node <- ave(my.iris$subject, my.iris$subject, FUN = seq_along)
my.iris
#need to randomize the order of subject and node so that the outcome (species) will be spread over subjects
subnode <- my.iris[6:7]
subnode
names(subnode) <- c("sub1","node1")
set.seed(165)
ran <- subnode[sample(nrow(subnode)),]
my.iris <- cbind(my.iris, ran)
my.iris <- my.iris[,-c(6:7)]
names(my.iris)[names(my.iris)=="sub1"] <- "subject"
names(my.iris)[names(my.iris)=="node1"] <- "node"
my.iris <- my.iris[with(my.iris, order(subject)), ]
my.iris
#-----------------------------------------------------------------
# TEST THE FUNCTION WITH HIERARCHICALLY STRUCTURED DATA
#-----------------------------------------------------------------
# IRIS
head(my.iris)
#my function with hierarchical my.iris data
set.seed(2345)
irisfit2 <- pda(Species ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width, hier= subject,data = my.iris, method=gen.ridge)
irisfit2
#confusion(irisfit2, my.iris)
irisfit2$confusion
plot(irisfit2)
coef(irisfit2)
# Compare non-hier to hier IRIS data
head(my.iris)
#my function with iris data
set.seed(2345)
irisfit.NOhier <- pda(Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width, data = my.iris, method=gen.ridge)
irisfit.NOhier
confusion(irisfit.NOhier, my.iris)
irisfit.NOhier$confusion
plot(irisfit.NOhier)
coef(irisfit.NOhier)
# str(irisfit.NOhier)
#my function with hierarchical my.iris data
set.seed(2345)
irisfit.hier <- pda(Species ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width, hier= subject,data = my.iris, method=gen.ridge)
irisfit.hier
#confusion(irisfit.hier, my.iris)
irisfit.hier$confusion
plot(irisfit.hier)
coef(irisfit.hier)
| /source/PDA_hier_function.R | permissive | robekath/MS_MLHier_thesis | R | false | false | 15,533 | r | #________________________________________________________________________
#
# THESIS: Penalized Discriminant Analysis (PDA)
#
# Author: Katie Roberts
# Last edited: 12/6/2016
#
#
# Goals:
# 1. Update functions from the 'mda' package to accomodate
# a hierarchically structured data set
# i) Update the gen.ridge function -> gen.ridge.pda
# ii) Update the contr.fda function -> contr.pda
# iii) Update the fda function -> pda
# 2. Test functions ignoring hierarchy
# i) Test original IRIS data fda vs. pda with no hierarchy
# ii) Test thyroid data fda vs. pda with no hierarchy
# 3. Create IRIS data
# i) Manipulate the Iris data set to contain a psuedo
# hierarchically structured data set
# 4. Test functions assuming hierarchy
# i) Test manip. IRIS data with pda using hierarchy
# ii) Test thyroid data with pda using hierarchy
# 5. Try Thyroid hierarchy data with these models:
# i) Univariate
# ii) Final GLMM model used
# final GLMM model: cancer~ logvol + echoTextureNonHomo + microcalcYes + (1|patient_num)
#
#
# Packages used:
# "mda"
#
# Citation of "mda" package used and modified here:
# S original by Trevor Hastie & Robert Tibshirani. Original R port by Friedrich
# Leisch, Kurt Hornik and Brian D. Ripley. (2016). mda: Mixture and Flexible
# Discriminant Analysis. R package version 0.4-9.
# https://CRAN.R-project.org/package=mda
#________________________________________________________________________
#=================================================================================
#
# PDA EDITED FUNCTIONS FOR HIERARCHICAL DATA
#
#=================================================================================
#install.packages("mda")
library(mda)
#-----------------------------------------------------------------
# GEN RIDGE FUNCTION
#-----------------------------------------------------------------
#gen.ridge
#Perform a penalized regression, as used in penalized discriminant analysis.
gen.ridge.pda <- function (x, y, weights, lambda = 1, h=FALSE,means, omega, df, ...)
{
if (h==FALSE){
if (missing(df) && lambda <= .Machine$double.eps) #.Machine$double.esp is the smallest postitive floating-point number on the machine running R.
return(polyreg(x, y)) #do a simple polynomial regression on x and y if missing df and lambda is essentially zero.
d <- dim(x) #dim of the predictor matrix
mm <- apply(x, 2, mean) #means by column of the pred matrix
x <- scale(x, mm, FALSE) #center x about the means but don't scale
simple <- if (missing(omega))
TRUE
else FALSE
if (!simple) {
if (!all(match(c("values", "vectors"), names(omega),
FALSE)))
stop("You must supply an eigen-decomposed version of omega")
vals <- pmax(sqrt(.Machine$double.eps), sqrt(omega$values))
basis <- scale(omega$vectors, FALSE, vals)
x <- x %*% basis
}
svd.x <- svd(x)
dd <- svd.x$d
if (!missing(df))
lambda = df.gold(dd, df)
df = sum(dd^2/(dd^2 + lambda))
y <- (t(t(y) %*% svd.x$u) * dd)/(dd^2 + lambda)
coef <- svd.x$v %*% y
fitted <- x %*% coef
if (!simple)
coef <- basis %*% coef
structure(list(fitted.values = fitted, coefficients = coef,
df = df, lambda = lambda, xmeans = mm), class = "gen.ridge")
}else {
if(!missing(means)){
mm <- means
}
if (missing(df) && lambda <= .Machine$double.eps)
return(polyreg(x, y))
simple <- if (missing(omega))
TRUE
else FALSE
if (!simple) {
if (!all(match(c("values", "vectors"), names(omega),
FALSE)))
stop("You must supply an eigen-decomposed version of omega")
vals <- pmax(sqrt(.Machine$double.eps), sqrt(omega$values))
basis <- scale(omega$vectors, FALSE, vals)
x <- x %*% basis
}
svd.x <- svd(x)
dd <- svd.x$d
if (!missing(df))
lambda = df.gold(dd, df)
df = sum(dd^2/(dd^2 + lambda))
y <- (t(t(y) %*% svd.x$u) * dd)/(dd^2 + lambda)
coef <- svd.x$v %*% y
fitted <- x %*% coef
if (!simple)
coef <- basis %*% coef
structure(list(fitted.values = fitted, coefficients = coef,
df = df, lambda = lambda, xmeans = mm), class = "gen.ridge")
}
}
#gen.ridge(x, y, weights, lambda=1, omega, df, ...)
#gen.ridge.pda()
#-----------------------------------------------------------------
# CONTRAST FUNCTION
#-----------------------------------------------------------------
#contr.fda function
#runs matrix of contrasts to compute QR decomp. matrix
contr.pda <- function (p = rep(1, d[1]), contrast.default = contr.helmert(length(p)))
{
d <- dim(contrast.default)
sqp <- sqrt(p/sum(p))
x <- cbind(1, contrast.default) * outer(sqp, rep(1, d[2] + 1))
qx <- qr(x)
J <- qx$rank
qr.qy(qx, diag(d[1])[, seq(2, J)])/outer(sqp, rep(1, J - 1)) #computes QR decomp. matrix
}
#-----------------------------------------------------------------
# PDA FUNCTION
#-----------------------------------------------------------------
#PDA
#hier = level 1 hierarchy in data if applicable. For the thyroid data this would be subject
pda <- function (formula = formula(data), data = sys.frame(sys.parent()), hier,
weights, theta, dimension = J - 1, eps = .Machine$double.eps,
method = gen.ridge.pda, keep.fitted = (n * dimension < 5000), ...)
{
this.call <- match.call() #will add argument names if they weren't explicit
m <- match.call(expand.dots = FALSE) #don't expand the ... arguments
m[[1]] <- as.name("model.frame") #identify/label the model.frame that's read in
m <- m[match(names(m), c("", "formula", "data", "hier", "weights"), 0)] #identify/label parts of the function that are read in
m <- eval(m, parent.frame()) #evaluate m at the parent.frame environment (default)
Terms <- attr(m, "terms") #get term attributes of m
g <- model.extract(m, "response") #returns the response component of the model frame m
x <- model.matrix(Terms, m) #creates a design (model) matrix
if (attr(Terms, "intercept"))
x = x[, -1, drop = FALSE] #if there's an intercept, drop it from x
dd <- dim(x)
n <- dd[1] #number of records
weights <- model.extract(m, weights)
if (!length(weights))
weights <- rep(1, n) #if no weights, then create numeric list of 1's of length n
else if (any(weights < 0))
stop("negative weights not allowed")
if (length(g) != n)
stop("g should have length nrow(x)")
fg <- factor(g)
prior <- table(fg) #table of factored response variable
prior <- prior/sum(prior) #converted to percentage (fraction)
cnames <- levels(fg) #response variable names
g <- as.numeric(fg) #converts factored levels to numbers
J <- length(cnames) #number of levels for response variable
iswt <- FALSE
if (missing(weights))
dp <- table(g)/n
else {
weights <- (n * weights)/sum(weights)
dp <- tapply(weights, g, sum)/n
iswt <- TRUE
}
if (missing(theta))
theta <- contr.helmert(J) #runs matrix of contrasts
theta <- contr.pda(dp, theta) #function that creates contrasts to compute QR decomp. matrix
if (missing(hier)) {
#continue with original function...
Theta <- theta[g, , drop = FALSE] #applies the theta matrix contrasts to full n matrix
fit <- method(x, Theta, weights, ...) # polyreg fit method with x=predictor matrix and Theta=response matrix
if (iswt)
Theta <- Theta * weights
ssm <- t(Theta) %*% fitted(fit)/n #transpose Theta multiplied by the fitted values of fit/n
} else {
#utilize the hierarchy if applicable
hier <- model.extract(m, hier)
N <- length(unique(hier)) #number of unique level 1 hierarchical records
#Theta placeholder
dimT <- dim(theta) #collect dimensions of theta
Tcol <- dimT[2] #collect number of columns from theta
Theta = matrix(ncol=Tcol) #create a placeholder matrix for Theta
#my.gen.x placeholder
# my.gen.x = data.matrix(x[0]) #create empty data matrix to fill
my.gen.x <- matrix(ncol = ncol(x))
colnames(my.gen.x) <- colnames(x)
#my.gen.mm placeholder to collect xmeans
my.gen.mm <- matrix(ncol = ncol(x))
colnames(my.gen.mm) <- colnames(x)
#create if statement to separate those that only have one factor level vs. those with two or more.
subj <- data.frame(m$`(hier)`)
names(subj) <- "subj"
x2 <- cbind(x, subj)
# unique(x2$subj)
# BEGIN THE LEVEL 1 FOR-LOOP
for (i in unique(m$`(hier)`)){ #for each level 1 hierarchy (subject...)
#execute the pda function for nodes within each subject
hier.data <- m[m$`(hier)`==i,]
#
xi.var <- x2[x2$subj==i,]
xi <- xi.var[,-which(names(xi.var) == "subj")]
ddi <- dim(xi)
ni <- ddi[1]
gi <- model.extract(hier.data, "response")
gi <- as.numeric(gi)
#
Thetai <- theta[gi, , drop=FALSE]
Thetai <- Thetai/ni
#
my.gen.d <- dim(xi)
my.gen.mmi <- apply(xi, 2, mean)
my.gen.xi <- scale(xi, my.gen.mmi, FALSE)
my.gen.xi <- my.gen.xi/ni
my.gen.mm = rbind(my.gen.mm,my.gen.mmi) #stack xmeans
Theta = rbind(Theta, Thetai) #stack Thetai's
my.gen.x = rbind(my.gen.x, my.gen.xi) #stack the my.gen.x's
} #end level 1 for-loop
#
#remove first row in Thetai's and my.gen.x
# Theta <- Theta[-1,]
# my.gen.x <- my.gen.x[-1,]
# my.gen.mm <- my.gen.mm[-1,]
Theta <- t(t(Theta[-1,]))
my.gen.x <- t(t(my.gen.x[-1,]))
my.gen.mm <- t(t(my.gen.mm[-1,]))
mm <- apply(my.gen.mm, 2, mean) #means by column of the pred matrix
#now use the method=gen.ridge for hierarchy (h!=1)
fit <- method(my.gen.x, Theta, h=TRUE,means=mm, weights, ...) # polyreg fit method with my.gen.x=predictor matrix, Theta=response matrix, h=2 for hierarchy
#structure(list(fitted.values = fitted, coefficients = coef,
# df = df, lambda = lambda), class = "gen.ridge")
if (iswt)
Theta <- Theta * weights
ssm <- t(Theta) %*% fitted(fit)/N #transpose Theta multiplied by the fitted values of fit/n
}
#get out: Theta, fit, ssm
ed <- svd(ssm, nu = 0) #singular value decomposition of matrix ssm. nu= number of left singular vectors to be computed
thetan <- ed$v #matrix whose columns contain the right singular vectors of ssm
lambda <- ed$d #vector containing the singular values of ssm
# eps = .Machine$double.eps means the smallest positive floating-point number x such that 1+x != 1. Normally 2.220446e-16
#dimension = J - 1 number of response factors minus 1
lambda[lambda > 1 - eps] <- 1 - eps #convert value of lambda that are essentially greater than 1 to 1 minus essentially zero =~1
discr.eigen <- lambda/(1 - lambda)
pe <- (100 * cumsum(discr.eigen))/sum(discr.eigen)
dimension <- min(dimension, sum(lambda > eps))
if (dimension == 0) {
warning("degenerate problem; no discrimination")
return(structure(list(dimension = 0, fit = fit, call = this.call),
class = "fda"))
}
thetan <- thetan[, seq(dimension), drop = FALSE]
pe <- pe[seq(dimension)]
alpha <- sqrt(lambda[seq(dimension)])
sqima <- sqrt(1 - lambda[seq(dimension)])
vnames <- paste("v", seq(dimension), sep = "")
means <- scale(theta %*% thetan, FALSE, sqima/alpha) #scale theta%*%thetan by sqima/alpha
dimnames(means) <- list(cnames, vnames)
names(lambda) <- c(vnames, rep("", length(lambda) - dimension))
names(pe) <- vnames
obj <- structure(list(percent.explained = pe, values = lambda,
means = means, theta.mod = thetan, dimension = dimension,
prior = prior, fit = fit, call = this.call, terms = Terms),
class = "fda")
obj$confusion <- confusion(predict(obj), fg)
if (!keep.fitted)
obj$fit$fitted.values <- NULL
obj
}
#=================================================================================
#
# TEST THE FUNCTIONS WITH DATA BELOW
#
#=================================================================================
#-----------------------------------------------------------------
# TEST THE FUNCTION WITH NAIVE APPROACHES COMPARED TO KNOWN MDA
#-----------------------------------------------------------------
# IRIS - they match
data(iris)
head(iris)
#known function with iris data
set.seed(2345)
irisfit <- fda(Species ~ ., data = iris, method=gen.ridge)
irisfit
confusion(irisfit, iris)
irisfit$confusion
plot(irisfit,coords=c(1,2))
coef(irisfit)
# str(irisfit)
#pda function with iris data
set.seed(2345)
irisfit1 <- pda(Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width, data = iris, method=gen.ridge)
irisfit1
confusion(irisfit1, iris)
irisfit1$confusion
plot(irisfit1)
coef(irisfit1)
# str(irisfit1)
#-----------------------------------------------------------------
# CREATE TESTING IRIS DATA TO USE WITH HIERARCHY
#-----------------------------------------------------------------
#create iris data with added hierarchy
# NOTE: this hierarchy is generated to test the functionality of the code ONLY. There is no correlation
# here so the results should be less than interesting
data(iris)
my.iris <- iris
table(my.iris$Species)
my.iris$subject <- c(rep(1:50,each=3))
my.iris <- my.iris[with(my.iris, order(subject)), ]
#my.iris$node <- ave(my.iris$subject, my.iris$subject, FUN = seq_along)
#my.iris$indic <- sample(0:1, 150, replace=T) # random indicator variable
my.iris
set.seed(165)
my.iris <- my.iris[sample(nrow(my.iris), 100), ]
#sort data by subject and node
my.iris <- my.iris[order(my.iris$subject),]
my.iris$node <- ave(my.iris$subject, my.iris$subject, FUN = seq_along)
my.iris
#need to randomize the order of subject and node so that the outcome (species) will be spread over subjects
subnode <- my.iris[6:7]
subnode
names(subnode) <- c("sub1","node1")
set.seed(165)
ran <- subnode[sample(nrow(subnode)),]
my.iris <- cbind(my.iris, ran)
my.iris <- my.iris[,-c(6:7)]
names(my.iris)[names(my.iris)=="sub1"] <- "subject"
names(my.iris)[names(my.iris)=="node1"] <- "node"
my.iris <- my.iris[with(my.iris, order(subject)), ]
my.iris
#-----------------------------------------------------------------
# TEST THE FUNCTION WITH HIERARCHICALLY STRUCTURED DATA
#-----------------------------------------------------------------
# IRIS
head(my.iris)
#my function with hierarchical my.iris data
set.seed(2345)
irisfit2 <- pda(Species ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width, hier= subject,data = my.iris, method=gen.ridge)
irisfit2
#confusion(irisfit2, my.iris)
irisfit2$confusion
plot(irisfit2)
coef(irisfit2)
# Compare non-hier to hier IRIS data
head(my.iris)
#my function with iris data
set.seed(2345)
irisfit.NOhier <- pda(Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width, data = my.iris, method=gen.ridge)
irisfit.NOhier
confusion(irisfit.NOhier, my.iris)
irisfit.NOhier$confusion
plot(irisfit.NOhier)
coef(irisfit.NOhier)
# str(irisfit.NOhier)
#my function with hierarchical my.iris data
set.seed(2345)
irisfit.hier <- pda(Species ~ Sepal.Length+Sepal.Width+Petal.Length+Petal.Width, hier= subject,data = my.iris, method=gen.ridge)
irisfit.hier
#confusion(irisfit.hier, my.iris)
irisfit.hier$confusion
plot(irisfit.hier)
coef(irisfit.hier)
|
#' Function to get database name.
#'
#' @author Stuart K. Grange
#'
#' @param con Database connection.
#'
#' @param extension For SQLite databases, should the database name include the
#' file name extension?
#'
#' @export
db_name <- function(con, extension = TRUE) {
if (db.class(con) == "postgres")
x <- db_get(con, "SELECT CURRENT_DATABASE()")[, 1]
if (db.class(con) == "mysql")
x <- db_get(con, "SELECT DATABASE()")[, 1]
if (db.class(con) == "sqlite") {
# Get file name
x <- basename(con@dbname)
# Drop file extension, could be unreliable
if (!extension) x <- stringr::str_split_fixed(x, "\\.", 2)[, 1]
}
# Return
x
}
| /R/db_name.R | no_license | MohoWu/databaser | R | false | false | 693 | r | #' Function to get database name.
#'
#' @author Stuart K. Grange
#'
#' @param con Database connection.
#'
#' @param extension For SQLite databases, should the database name include the
#' file name extension?
#'
#' @export
db_name <- function(con, extension = TRUE) {
if (db.class(con) == "postgres")
x <- db_get(con, "SELECT CURRENT_DATABASE()")[, 1]
if (db.class(con) == "mysql")
x <- db_get(con, "SELECT DATABASE()")[, 1]
if (db.class(con) == "sqlite") {
# Get file name
x <- basename(con@dbname)
# Drop file extension, could be unreliable
if (!extension) x <- stringr::str_split_fixed(x, "\\.", 2)[, 1]
}
# Return
x
}
|
# read dateset
dataset <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?")
# add a date_time column newDate and transform all dates and times
cols <- c("Date", "Time")
dataset$newDate <- do.call(paste, c(dataset[cols], sep=" "))
dataset$Date <- as.Date(dataset$Date, "%d/%m/%Y")
dataset$Time <- strptime(dataset$Time, "%H:%M:%S")
dataset$newDate <- strptime(dataset$newDate, "%d/%m/%Y %H:%M:%S")
# subset data to dates as given in assignment
dataset_subset <- subset(dataset, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02") )
# open png file, reset mfrow paramter and draw plot
png('plot3.png')
par(mfrow = c(1,1))
plot(dataset_subset$newDate, dataset_subset$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(dataset_subset$newDate, dataset_subset$Sub_metering_2, col = "red")
lines(dataset_subset$newDate, dataset_subset$Sub_metering_3, col = "blue")
legend("topright", legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"))
dev.off()
| /plot3.R | no_license | iswiss/ExData_Plotting1 | R | false | false | 1,057 | r | # read dateset
dataset <- read.csv("household_power_consumption.txt", sep = ";", na.strings = "?")
# add a date_time column newDate and transform all dates and times
cols <- c("Date", "Time")
dataset$newDate <- do.call(paste, c(dataset[cols], sep=" "))
dataset$Date <- as.Date(dataset$Date, "%d/%m/%Y")
dataset$Time <- strptime(dataset$Time, "%H:%M:%S")
dataset$newDate <- strptime(dataset$newDate, "%d/%m/%Y %H:%M:%S")
# subset data to dates as given in assignment
dataset_subset <- subset(dataset, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02") )
# open png file, reset mfrow paramter and draw plot
png('plot3.png')
par(mfrow = c(1,1))
plot(dataset_subset$newDate, dataset_subset$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "")
lines(dataset_subset$newDate, dataset_subset$Sub_metering_2, col = "red")
lines(dataset_subset$newDate, dataset_subset$Sub_metering_3, col = "blue")
legend("topright", legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QUANTILE.R
\name{QUANTILE}
\alias{QUANTILE}
\title{Quantile function}
\usage{
QUANTILE(family, p, param, size = 0)
}
\arguments{
\item{family}{distribution name; run the function distributions() for help}
\item{p}{values at which the quantile needs to be computed; between 0 and 1; (e.g 0.01, 0.05)}
\item{param}{parameters of the distribution; (1 x p)}
\item{size}{additional parameter for some discrete distributions; run the command distributions() for help}
}
\value{
\item{q}{quantile/VAR}
}
\description{
This function computes the quantile function of a univariate distribution
}
\examples{
family = "gaussian"
Q = 1 ; theta = matrix(c(-1.5, 1.7),1,2) ;
quantile = QUANTILE(family, (0.01), theta)
print('Quantile : ')
print(quantile)
}
| /man/QUANTILE.Rd | no_license | cran/GenHMM1d | R | false | true | 826 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QUANTILE.R
\name{QUANTILE}
\alias{QUANTILE}
\title{Quantile function}
\usage{
QUANTILE(family, p, param, size = 0)
}
\arguments{
\item{family}{distribution name; run the function distributions() for help}
\item{p}{values at which the quantile needs to be computed; between 0 and 1; (e.g 0.01, 0.05)}
\item{param}{parameters of the distribution; (1 x p)}
\item{size}{additional parameter for some discrete distributions; run the command distributions() for help}
}
\value{
\item{q}{quantile/VAR}
}
\description{
This function computes the quantile function of a univariate distribution
}
\examples{
family = "gaussian"
Q = 1 ; theta = matrix(c(-1.5, 1.7),1,2) ;
quantile = QUANTILE(family, (0.01), theta)
print('Quantile : ')
print(quantile)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/surv_box_plot.R
\name{exp_boxplot}
\alias{exp_boxplot}
\title{exp_boxplot}
\usage{
exp_boxplot(exp_hub)
}
\arguments{
\item{exp_hub}{an expression matrix for hubgenes}
}
\value{
box plots list for all genes in the matrix
}
\description{
draw box plot for a hub gene expression matrix
}
\examples{
k = exp_boxplot(log2(exp_hub1+1));k[[1]]
}
\seealso{
\code{\link{geo_download}};\code{\link{draw_volcano}};\code{\link{draw_venn}}
}
\author{
Xiaojie Sun
}
| /man/exp_boxplot.Rd | no_license | nyj123/tinyarray | R | false | true | 531 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/surv_box_plot.R
\name{exp_boxplot}
\alias{exp_boxplot}
\title{exp_boxplot}
\usage{
exp_boxplot(exp_hub)
}
\arguments{
\item{exp_hub}{an expression matrix for hubgenes}
}
\value{
box plots list for all genes in the matrix
}
\description{
draw box plot for a hub gene expression matrix
}
\examples{
k = exp_boxplot(log2(exp_hub1+1));k[[1]]
}
\seealso{
\code{\link{geo_download}};\code{\link{draw_volcano}};\code{\link{draw_venn}}
}
\author{
Xiaojie Sun
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{contaminationSim}
\alias{contaminationSim}
\title{contaminationSim}
\format{A list}
\usage{
contaminationSim
}
\description{
Generated by simulateContaminatedMatrix
}
\details{
A toy contamination data generated by simulateContaminatedMatrix
}
\keyword{datasets}
| /man/contaminationSim.Rd | permissive | Irisapo/celda | R | false | true | 369 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{contaminationSim}
\alias{contaminationSim}
\title{contaminationSim}
\format{A list}
\usage{
contaminationSim
}
\description{
Generated by simulateContaminatedMatrix
}
\details{
A toy contamination data generated by simulateContaminatedMatrix
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gghmd.R
\name{gghmd}
\alias{gghmd}
\title{gghmd function}
\usage{
gghmd(my.df, loc = "USA")
}
\arguments{
\item{Take}{hmd_pop as input}
}
\value{
ggplot2 graph
}
\description{
This function loads a hmd_pop as input dataframe.
ggplot function is used here and we will get a simple country plot with available timeframe.
}
\examples{
gghmd(hmd_pop)
}
\keyword{"USA")}
\keyword{(Ex:}
\keyword{Country}
\keyword{as}
\keyword{code}
\keyword{loc}
| /man/gghmd.Rd | permissive | ramamet/hmdR | R | false | true | 520 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gghmd.R
\name{gghmd}
\alias{gghmd}
\title{gghmd function}
\usage{
gghmd(my.df, loc = "USA")
}
\arguments{
\item{Take}{hmd_pop as input}
}
\value{
ggplot2 graph
}
\description{
This function loads a hmd_pop as input dataframe.
ggplot function is used here and we will get a simple country plot with available timeframe.
}
\examples{
gghmd(hmd_pop)
}
\keyword{"USA")}
\keyword{(Ex:}
\keyword{Country}
\keyword{as}
\keyword{code}
\keyword{loc}
|
#
# This is a template Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Author: Owen Bezick
#
# Source Libraries
# Libraries
# Shiny
library(shinydashboard)
library(shinyWidgets)
library(quanteda)
library(leaflet)
library(plotly)
library(htmltools)
# Data
library(dplyr)
library(lubridate)
library(tidyverse)
# UI ----
ui <- dashboardPage(
dashboardHeader(title = "Pirate Attacks"
)
# Sidebar ----
, dashboardSidebar(
sidebarMenu(
menuItem(tabName = "welcome", text = "Welcome", icon = icon("info"))
, menuItem(tabName = "dataExploration", text = "Data Exploration", icon = icon("data"))
, menuItem(tabName = "report", text = "Report", icon = icon("data"))
)
)
# Body ----
, dashboardBody(
tabItems(
# Welcome ----
tabItem(
tabName = "welcome"
, fluidRow(
box(width = 12, title = "About our Project", status = "primary"
, column(width = 6
, HTML("<b> About </b>")
, uiOutput("aboutText")
)
, column(width = 6
, HTML("<b> Modern Piracy </b>")
, uiOutput("modernPiracy")
)
)
)
, fluidRow(
box(width = 12, title = "Attack Narration Map: Explore the Data!", status = "primary"
, fluidRow(
column(width = 6
, HTML("<b> Filter By: </b>")
, uiOutput("regionfilter")
)
, column(width = 6
, uiOutput("timeFilter")
)
)
, box(width = 12
, leafletOutput("map", height = "750")
)
)
)
)
# Data Viz ----
, tabItem(
tabName = "dataExploration"
, fluidRow(
column(width = 6
, plotlyOutput("time_plotly")
)
, column(width = 6
, plotlyOutput("island")
)
)
)
, tabItem(
tabName = "report"
, fluidRow(
box(width = 12, title = "Our Findings", status = "primary"
, column(width = 6
, HTML("<b> Time of Day Results </b>")
)
, column(width = 6
, HTML("<b> Island Nation Results </b>")
)
)
)
)
)
)
)
# Define server logic
server <- function(input, output) {
# Data Import ----
pirate <- read_rds("df_pirate.RDS")
# Welcome ----
output$aboutText <- renderText("For the Pirate Attack Project, we chose to look at the International Maritime Bureau’s
data on piracy world from 2015-2019, focusing on 2019. Misconceptions about modern piracy
flood our imaginations with pictures of eye patches, skull & crossbones, and scruffy men
yelling “arrrrgh”. This is not reality, however. The Pirate Attack Project seeks to dispel
these misconceptions and shed light on the trends and issues surrounding theft on the high
seas in 2020. Through interactive maps, charts, and authentic attack narrations, we explore
questions like “Are ships from island nations more likely to experience attacks?” or “What
time of day should crews be most on their guard against intruders?”. We are intrigued as to
how the Pirate Attack Project will change our (and hopefully your) thinking about piracy.")
output$modernPiracy <- renderText("A partial definition according to the International Maritime Bureau says “piracy” is
“any illegal acts of violence or detention, or any act of depredation, committed for private ends by the crew or the
passengers of a private ship or a private aircraft, and directed on the high seas, against another ship or aircraft,
or against persons or property on board such ship or aircraft.”Modern pirates are not usually carefree adventurers looking
for some treasure and a good time. Often, pirates are poor men using rafts, old boats, and a variety of simple weapons to
carry out amateur attacks. For example, when international fishing vessels began encroaching on Somali waters, depleting
fish stocks and forcing fishermen out of business, Somali pirate groups began to form. In the Gulf of Aden, Somali pirates
soon became a high-profile issue. Next, did you know that the “gold” for modern pirates is not a heavy yellow metal? Ransoms
paid to recover hostage sailors are the true modern “treasures” in the world of piracy. Sailors face this continual threat in
areas like the Gulf of Guinea, the Strait of Malacca, the Indian Ocean, and the Singapore Straits. Have you ever thought of
insurance costs involved with a 21st century pirate attack? Many ships refrain from reporting incidents to avoid higher insurance
costs. Several other factors influence the likelihood of piracy today, such as the flag your ship flies, the time of day, or the
city where your ship is berthed.")
# MAP
# DATA
# List of regions
ls_region <- unique(pirate$region)
# Filter
output$regionfilter <- renderUI({
pickerInput(inputId = "region", label ="Region", choices = ls_region, multiple = T, selected = ls_region, options = list(`actions-box` = TRUE))
})
# Filter
output$timeFilter <- renderUI({
sliderInput("time", "Hour of Day:", min = 0, max = 2400, value = c(0,2400), step = 100)
})
# Reactive Dataframe
pirate_R <- reactive({
req(input$region, input$time)
pirate %>%
filter(region %in% input$region) %>%
filter(time > input$time[1] & time < input$time[2])
})
# Viz
# Icon
shipIcon <- makeIcon(
iconUrl = "historic_ship.png",
iconWidth = 30, iconHeight = 30,
iconAnchorX = 0, iconAnchorY = 0,
)
# Leaflet
output$map <- renderLeaflet({
df <- pirate_R()
df %>%
leaflet() %>%
addProviderTiles(providers$Esri.WorldImagery, group = "World Imagery (default)") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Toner Lite") %>%
addMarkers(pirate$longitude, pirate$latitude,
clusterOptions = markerClusterOptions(removeOutsideVisibleBounds = F)
, popup = paste0("Ship Name: ", pirate$ship_name
, "<br>"
,"Flag: ", pirate$flag
, "<br>"
, pirate$narration
, "<br>"
)
, label = ~htmlEscape(pirate$ship_name)
, icon = shipIcon) %>%
addLayersControl(baseGroups = c( "World Imagery (default)", "Toner Lite"),
options = layersControlOptions(collapsed = FALSE))
})
# Data Exploration ----
# Time Graph
# Plot
time_plot <- pirate %>%
ggplot(aes(x = time)) +
geom_density(aes(color = region)
, binwidth = 100
, boundary = 0)+
scale_x_continuous(breaks = seq(0, 2359, by = 200)) +
labs(title = "Attacks Per Hour"
, subtitle = "What time of day was a ship more likely to be attacked?"
, caption = "Source: International Maritime Bureau"
, x = "Hour"
, y = "Attacks") +
theme(axis.text.x = element_text(angle = 45))
# Plotly
output$time_plotly <- renderPlotly({
ggplotly(time_plot, tooltip = "text") %>%
layout(title = list(text = paste0('Attacks Per Hour',
'<br>',
'<sup>',
'What time of day was a ship more likely to be attacked?',
'</sup>')))
})
# Island Graph
# Plot
island_plot <- pirate %>%
group_by(flag) %>%
count(sort = TRUE) %>%
mutate(frequency = (n / 163)
, typeC = case_when(
flag %in% islands ~ "Island Nation", TRUE ~ "Mainland Nation")
, percentage = frequency * 100) %>%
head(10) %>%
ggplot()+
geom_point(aes(x=reorder(flag, desc(frequency)), y = frequency, color = typeC, text = sprintf("Frequency: %.2f%% <br>Number of Ships Attacked: %.0f<br> ", percentage, n)
)
) +
scale_y_continuous(labels = scales::percent) +
labs(title = "Frequency of Pirate Attacks For Island Nations Versus Mainland Nations", subtitle = "Are island nations’ ships more likely to experience attacks?", caption = "Source: International Maritime Bureau", x = "Origin Country of Ship", y = "Frequency") +
theme(legend.title = element_blank()) +
theme (axis.text.x = element_text(angle = 45)
)
# Plotly
output$island <- renderPlotly({
ggplotly(island_plot, tooltip = "text") %>%
layout(title = list(text = paste0('Frequency of Pirate Attacks For Island Nations Versus Mainland Nations',
'<br>',
'<sup>',
'Are island nations’ ships more likely to experience attacks?',
'</sup>')
)
)
})
# Report Server
}
# Run the application
shinyApp(ui = ui, server = server) | /app.R | no_license | owbezick/Pirate-Attacks | R | false | false | 10,888 | r | #
# This is a template Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Author: Owen Bezick
#
# Source Libraries
# Libraries
# Shiny
library(shinydashboard)
library(shinyWidgets)
library(quanteda)
library(leaflet)
library(plotly)
library(htmltools)
# Data
library(dplyr)
library(lubridate)
library(tidyverse)
# UI ----
ui <- dashboardPage(
dashboardHeader(title = "Pirate Attacks"
)
# Sidebar ----
, dashboardSidebar(
sidebarMenu(
menuItem(tabName = "welcome", text = "Welcome", icon = icon("info"))
, menuItem(tabName = "dataExploration", text = "Data Exploration", icon = icon("data"))
, menuItem(tabName = "report", text = "Report", icon = icon("data"))
)
)
# Body ----
, dashboardBody(
tabItems(
# Welcome ----
tabItem(
tabName = "welcome"
, fluidRow(
box(width = 12, title = "About our Project", status = "primary"
, column(width = 6
, HTML("<b> About </b>")
, uiOutput("aboutText")
)
, column(width = 6
, HTML("<b> Modern Piracy </b>")
, uiOutput("modernPiracy")
)
)
)
, fluidRow(
box(width = 12, title = "Attack Narration Map: Explore the Data!", status = "primary"
, fluidRow(
column(width = 6
, HTML("<b> Filter By: </b>")
, uiOutput("regionfilter")
)
, column(width = 6
, uiOutput("timeFilter")
)
)
, box(width = 12
, leafletOutput("map", height = "750")
)
)
)
)
# Data Viz ----
, tabItem(
tabName = "dataExploration"
, fluidRow(
column(width = 6
, plotlyOutput("time_plotly")
)
, column(width = 6
, plotlyOutput("island")
)
)
)
, tabItem(
tabName = "report"
, fluidRow(
box(width = 12, title = "Our Findings", status = "primary"
, column(width = 6
, HTML("<b> Time of Day Results </b>")
)
, column(width = 6
, HTML("<b> Island Nation Results </b>")
)
)
)
)
)
)
)
# Define server logic
server <- function(input, output) {
# Data Import ----
pirate <- read_rds("df_pirate.RDS")
# Welcome ----
output$aboutText <- renderText("For the Pirate Attack Project, we chose to look at the International Maritime Bureau’s
data on piracy world from 2015-2019, focusing on 2019. Misconceptions about modern piracy
flood our imaginations with pictures of eye patches, skull & crossbones, and scruffy men
yelling “arrrrgh”. This is not reality, however. The Pirate Attack Project seeks to dispel
these misconceptions and shed light on the trends and issues surrounding theft on the high
seas in 2020. Through interactive maps, charts, and authentic attack narrations, we explore
questions like “Are ships from island nations more likely to experience attacks?” or “What
time of day should crews be most on their guard against intruders?”. We are intrigued as to
how the Pirate Attack Project will change our (and hopefully your) thinking about piracy.")
output$modernPiracy <- renderText("A partial definition according to the International Maritime Bureau says “piracy” is
“any illegal acts of violence or detention, or any act of depredation, committed for private ends by the crew or the
passengers of a private ship or a private aircraft, and directed on the high seas, against another ship or aircraft,
or against persons or property on board such ship or aircraft.”Modern pirates are not usually carefree adventurers looking
for some treasure and a good time. Often, pirates are poor men using rafts, old boats, and a variety of simple weapons to
carry out amateur attacks. For example, when international fishing vessels began encroaching on Somali waters, depleting
fish stocks and forcing fishermen out of business, Somali pirate groups began to form. In the Gulf of Aden, Somali pirates
soon became a high-profile issue. Next, did you know that the “gold” for modern pirates is not a heavy yellow metal? Ransoms
paid to recover hostage sailors are the true modern “treasures” in the world of piracy. Sailors face this continual threat in
areas like the Gulf of Guinea, the Strait of Malacca, the Indian Ocean, and the Singapore Straits. Have you ever thought of
insurance costs involved with a 21st century pirate attack? Many ships refrain from reporting incidents to avoid higher insurance
costs. Several other factors influence the likelihood of piracy today, such as the flag your ship flies, the time of day, or the
city where your ship is berthed.")
# MAP
# DATA
# List of regions
ls_region <- unique(pirate$region)
# Filter
output$regionfilter <- renderUI({
pickerInput(inputId = "region", label ="Region", choices = ls_region, multiple = T, selected = ls_region, options = list(`actions-box` = TRUE))
})
# Filter
output$timeFilter <- renderUI({
sliderInput("time", "Hour of Day:", min = 0, max = 2400, value = c(0,2400), step = 100)
})
# Reactive Dataframe
pirate_R <- reactive({
req(input$region, input$time)
pirate %>%
filter(region %in% input$region) %>%
filter(time > input$time[1] & time < input$time[2])
})
# Viz
# Icon
shipIcon <- makeIcon(
iconUrl = "historic_ship.png",
iconWidth = 30, iconHeight = 30,
iconAnchorX = 0, iconAnchorY = 0,
)
# Leaflet
output$map <- renderLeaflet({
df <- pirate_R()
df %>%
leaflet() %>%
addProviderTiles(providers$Esri.WorldImagery, group = "World Imagery (default)") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Toner Lite") %>%
addMarkers(pirate$longitude, pirate$latitude,
clusterOptions = markerClusterOptions(removeOutsideVisibleBounds = F)
, popup = paste0("Ship Name: ", pirate$ship_name
, "<br>"
,"Flag: ", pirate$flag
, "<br>"
, pirate$narration
, "<br>"
)
, label = ~htmlEscape(pirate$ship_name)
, icon = shipIcon) %>%
addLayersControl(baseGroups = c( "World Imagery (default)", "Toner Lite"),
options = layersControlOptions(collapsed = FALSE))
})
# Data Exploration ----
# Time Graph
# Plot
time_plot <- pirate %>%
ggplot(aes(x = time)) +
geom_density(aes(color = region)
, binwidth = 100
, boundary = 0)+
scale_x_continuous(breaks = seq(0, 2359, by = 200)) +
labs(title = "Attacks Per Hour"
, subtitle = "What time of day was a ship more likely to be attacked?"
, caption = "Source: International Maritime Bureau"
, x = "Hour"
, y = "Attacks") +
theme(axis.text.x = element_text(angle = 45))
# Plotly
output$time_plotly <- renderPlotly({
ggplotly(time_plot, tooltip = "text") %>%
layout(title = list(text = paste0('Attacks Per Hour',
'<br>',
'<sup>',
'What time of day was a ship more likely to be attacked?',
'</sup>')))
})
# Island Graph
# Plot
island_plot <- pirate %>%
group_by(flag) %>%
count(sort = TRUE) %>%
mutate(frequency = (n / 163)
, typeC = case_when(
flag %in% islands ~ "Island Nation", TRUE ~ "Mainland Nation")
, percentage = frequency * 100) %>%
head(10) %>%
ggplot()+
geom_point(aes(x=reorder(flag, desc(frequency)), y = frequency, color = typeC, text = sprintf("Frequency: %.2f%% <br>Number of Ships Attacked: %.0f<br> ", percentage, n)
)
) +
scale_y_continuous(labels = scales::percent) +
labs(title = "Frequency of Pirate Attacks For Island Nations Versus Mainland Nations", subtitle = "Are island nations’ ships more likely to experience attacks?", caption = "Source: International Maritime Bureau", x = "Origin Country of Ship", y = "Frequency") +
theme(legend.title = element_blank()) +
theme (axis.text.x = element_text(angle = 45)
)
# Plotly
output$island <- renderPlotly({
ggplotly(island_plot, tooltip = "text") %>%
layout(title = list(text = paste0('Frequency of Pirate Attacks For Island Nations Versus Mainland Nations',
'<br>',
'<sup>',
'Are island nations’ ships more likely to experience attacks?',
'</sup>')
)
)
})
# Report Server
}
# Run the application
shinyApp(ui = ui, server = server) |
rm(list=ls(all=TRUE))
pkgName <- "ternaryplot"
if( Sys.info()[["sysname"]] == "Linux" ){
setwd( sprintf(
"/home/%s/Dropbox/_WORK/_PROJECTS/r_packages/ternaryplot/pkg/ternaryplot",
Sys.info()[[ "user" ]]
) )
}else{
pkgDir <- sprintf(
"%s/_WORK/_PROJECTS/r_packages/%s/pkg",
Sys.getenv("dropboxPath"), pkgName )
}
# Files to be sourced first (order matters)
sourceFiles <- c(
"aa00-ternaryplot-package.R",
"aa01-ternaryplot-options.R",
"aa02-ternaryplot-classes.R",
"aa03-ternaryplot-classes-utility.R",
"aa04-ternarysystems.R",
"aa05-ternarydata.R",
"aa06-ternary2xy.R",
"aa07-plotUtilities.R"
)
# Find all the R files
allRFiles <- list.files(
path = file.path( pkgDir, pkgName, "R" ),
pattern = ".R",
ignore.case = TRUE,
full.names = FALSE
)
allRFiles <- allRFiles[ !grepl( x = allRFiles, pattern = "R~",
fixed = TRUE ) ]
allRFiles <- allRFiles[ !(allRFiles %in% sourceFiles) ]
# Find the dependencies in the description
desc <- utils::packageDescription(
pkg = pkgName,
lib.loc = pkgDir )
findDeps <- function( d, what = c( "Depends", "Suggests", "Imports" ) ){
return( unique( unlist( lapply( X = what, FUN = function(w){
out <- d[[ w ]]
# out <- gsub( x = out, pattern = w, replacement = "" )
out <- gsub( x = out, pattern = "\n", replacement = "" )
out <- gsub( x = out, pattern = " ", replacement = "" )
out <- unlist( strsplit( x = out, split = "," )[[ 1L ]] )
return( out[ !grepl( x = out, pattern = "R(>=", fixed = TRUE ) ] )
} ) ) ) )
}
(deps <- findDeps( d = desc ))
for( p in deps ){
library( package = p, character.only = TRUE )
}
for( f in sourceFiles ){
source( file = file.path( pkgDir, pkgName, "R", f ) )
}
for( f in allRFiles ){
source( file = file.path( pkgDir, pkgName, "R", f ) )
}
.setPackageArguments( pkgname = "ternaryplot" )
# Otherwise set by .onAttach()
| /prepare/ternaryplot_source.R | no_license | julienmoeys/ternaryplot | R | false | false | 2,042 | r |
rm(list=ls(all=TRUE))
pkgName <- "ternaryplot"
if( Sys.info()[["sysname"]] == "Linux" ){
setwd( sprintf(
"/home/%s/Dropbox/_WORK/_PROJECTS/r_packages/ternaryplot/pkg/ternaryplot",
Sys.info()[[ "user" ]]
) )
}else{
pkgDir <- sprintf(
"%s/_WORK/_PROJECTS/r_packages/%s/pkg",
Sys.getenv("dropboxPath"), pkgName )
}
# Files to be sourced first (order matters)
sourceFiles <- c(
"aa00-ternaryplot-package.R",
"aa01-ternaryplot-options.R",
"aa02-ternaryplot-classes.R",
"aa03-ternaryplot-classes-utility.R",
"aa04-ternarysystems.R",
"aa05-ternarydata.R",
"aa06-ternary2xy.R",
"aa07-plotUtilities.R"
)
# Find all the R files
allRFiles <- list.files(
path = file.path( pkgDir, pkgName, "R" ),
pattern = ".R",
ignore.case = TRUE,
full.names = FALSE
)
allRFiles <- allRFiles[ !grepl( x = allRFiles, pattern = "R~",
fixed = TRUE ) ]
allRFiles <- allRFiles[ !(allRFiles %in% sourceFiles) ]
# Find the dependencies in the description
desc <- utils::packageDescription(
pkg = pkgName,
lib.loc = pkgDir )
findDeps <- function( d, what = c( "Depends", "Suggests", "Imports" ) ){
return( unique( unlist( lapply( X = what, FUN = function(w){
out <- d[[ w ]]
# out <- gsub( x = out, pattern = w, replacement = "" )
out <- gsub( x = out, pattern = "\n", replacement = "" )
out <- gsub( x = out, pattern = " ", replacement = "" )
out <- unlist( strsplit( x = out, split = "," )[[ 1L ]] )
return( out[ !grepl( x = out, pattern = "R(>=", fixed = TRUE ) ] )
} ) ) ) )
}
(deps <- findDeps( d = desc ))
for( p in deps ){
library( package = p, character.only = TRUE )
}
for( f in sourceFiles ){
source( file = file.path( pkgDir, pkgName, "R", f ) )
}
for( f in allRFiles ){
source( file = file.path( pkgDir, pkgName, "R", f ) )
}
.setPackageArguments( pkgname = "ternaryplot" )
# Otherwise set by .onAttach()
|
testlist <- list(data = structure(c(NaN, NaN, -Inf, 4.94065645841247e-324 ), .Dim = c(2L, 2L)), q = 5.44244545691763e-109)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) | /biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556480-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 192 | r | testlist <- list(data = structure(c(NaN, NaN, -Inf, 4.94065645841247e-324 ), .Dim = c(2L, 2L)), q = 5.44244545691763e-109)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result) |
test.bayes<-function(vars, C, test_data) {
# Function implementing (with a hack) the Bayesian test from Margaritis2009.
# vars - variables used to test the independence
# C - conditining set as a list of variable indexes
# test_data - data for the test:
# test_data$N sample size
# test_data$p_threshold prior probability of independence
# - deprecated: test_data$X the samples used, now use test_data$df rather
# test_data$df: dataframe of test_data$X
# test_data$alpha
# This function uses the 'deal' package code for calculating the local score.
# The function is currently not particularly fast but it is not the bottleneck.
# It would be relatively easy to reuse some of the calculated local scores again.
# For faster operation it is just easier to use the BIC approximation in test.BIC
# which often gives as good of a performance.
# Sorry a rather ugly hack of the deal code. No easy to use score for linear gaussian
# existed for R at the moment of writing.
# deal needs a data frame, only take the variables and the conditioning set
if (is.null(test_data$df)){
df <- data.frame(test_data$X[,c(vars,C)])
} else {
df <- test_data$df[,c(vars,C)]
}
# here using the package deal to for the test
nw<-network(df)
#ALL TEST IN THE PAPER WERE RUN WITH BELOW
#OPTIMAL PRIOR alpha 1.5 and p_threshold 0.1
#prior<-jointprior.mod(nw,1.5,phiprior="bottcher")
prior<-jointprior.mod(nw,test_data$alpha,phiprior="bottcher")
#first put in as parents only the conditioning set
if (length(C) == 0 ) {
nw$nodes[[1]]$parents<-c()
} else {
nw$nodes[[1]]$parents<-index(3,ncol(df))
}
#thise are just some spells from deal code
node <- nw$nodes[[1]]
node <- cond.node( node, nw, prior )
node$condposterior <- node$condprior
node$loglik <- 0
node <- learnnode( node, nw, df, timetrace = FALSE )
#finally the logp of the independent model
logpind<-node$loglik
#then essentially add the second variable to the parents
if (length(C) == 0 ) {
nw$nodes[[1]]$parents<-2
} else {
nw$nodes[[1]]$parents<-index(2,ncol(df))
}
node <- nw$nodes[[1]]
node <- cond.node( node, nw, prior )
node$condposterior <- node$condprior
node$loglik <- 0
node <- learnnode( node, nw, df, timetrace = FALSE )
#and get the logp of the depedent model
logpdep<-node$loglik
#then add the priors in the log space
#p_threshold is the prior prob of indep.
priors<-c(1-test_data$p_threshold,test_data$p_threshold)
logp<-c(logpdep,logpind)+log(priors)
#probability vector
p <- exp(logp - max(logp))
p <- p/sum(p)
test_result<-list()
test_result$vars<-vars
test_result$C<-C
test_result$independent <- ( logp[2] > logp[1] )
if ( test_result$independent) {
#independence
test_result$w<-logp[2]-logp[1]
} else {
#dependence
test_result$w<-logp[1]-logp[2]
}
test_result$p<-p[2]; #putting in the probability of independence
test_result$prob_dep<-p[1];
test_result
} | /R/tests/test.bayes.R | no_license | caus-am/dom_adapt | R | false | false | 3,115 | r | test.bayes<-function(vars, C, test_data) {
# Function implementing (with a hack) the Bayesian test from Margaritis2009.
# vars - variables used to test the independence
# C - conditining set as a list of variable indexes
# test_data - data for the test:
# test_data$N sample size
# test_data$p_threshold prior probability of independence
# - deprecated: test_data$X the samples used, now use test_data$df rather
# test_data$df: dataframe of test_data$X
# test_data$alpha
# This function uses the 'deal' package code for calculating the local score.
# The function is currently not particularly fast but it is not the bottleneck.
# It would be relatively easy to reuse some of the calculated local scores again.
# For faster operation it is just easier to use the BIC approximation in test.BIC
# which often gives as good of a performance.
# Sorry a rather ugly hack of the deal code. No easy to use score for linear gaussian
# existed for R at the moment of writing.
# deal needs a data frame, only take the variables and the conditioning set
if (is.null(test_data$df)){
df <- data.frame(test_data$X[,c(vars,C)])
} else {
df <- test_data$df[,c(vars,C)]
}
# here using the package deal to for the test
nw<-network(df)
#ALL TEST IN THE PAPER WERE RUN WITH BELOW
#OPTIMAL PRIOR alpha 1.5 and p_threshold 0.1
#prior<-jointprior.mod(nw,1.5,phiprior="bottcher")
prior<-jointprior.mod(nw,test_data$alpha,phiprior="bottcher")
#first put in as parents only the conditioning set
if (length(C) == 0 ) {
nw$nodes[[1]]$parents<-c()
} else {
nw$nodes[[1]]$parents<-index(3,ncol(df))
}
#thise are just some spells from deal code
node <- nw$nodes[[1]]
node <- cond.node( node, nw, prior )
node$condposterior <- node$condprior
node$loglik <- 0
node <- learnnode( node, nw, df, timetrace = FALSE )
#finally the logp of the independent model
logpind<-node$loglik
#then essentially add the second variable to the parents
if (length(C) == 0 ) {
nw$nodes[[1]]$parents<-2
} else {
nw$nodes[[1]]$parents<-index(2,ncol(df))
}
node <- nw$nodes[[1]]
node <- cond.node( node, nw, prior )
node$condposterior <- node$condprior
node$loglik <- 0
node <- learnnode( node, nw, df, timetrace = FALSE )
#and get the logp of the depedent model
logpdep<-node$loglik
#then add the priors in the log space
#p_threshold is the prior prob of indep.
priors<-c(1-test_data$p_threshold,test_data$p_threshold)
logp<-c(logpdep,logpind)+log(priors)
#probability vector
p <- exp(logp - max(logp))
p <- p/sum(p)
test_result<-list()
test_result$vars<-vars
test_result$C<-C
test_result$independent <- ( logp[2] > logp[1] )
if ( test_result$independent) {
#independence
test_result$w<-logp[2]-logp[1]
} else {
#dependence
test_result$w<-logp[1]-logp[2]
}
test_result$p<-p[2]; #putting in the probability of independence
test_result$prob_dep<-p[1];
test_result
} |
recall_m = function(y_true, y_pred) {
true_positives = k_sum(k_round(k_clip(y_true * y_pred, 0, 1)))
possible_positives = k_sum(k_round(k_clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + k_epsilon())
return(recall)
}
precision_m = function(y_true, y_pred) {
true_positives = k_sum(k_round(k_clip(y_true * y_pred, 0, 1)))
predicted_positives = k_sum(k_round(k_clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + k_epsilon())
return(precision)
}
f1_m = function(y_true, y_pred) {
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return(2*((precision*recall)/(precision+recall+k_epsilon())) )
}
# recall_m = function(y_true, y_pred) {
# true_positives = k_sum(k_round(k_clip(k_dot(y_true, y_pred), 0, 1)))
# possible_positives = k_sum(k_round(k_clip(y_true, 0, 1)))
# recall = true_positives / (possible_positives + k_epsilon())
# return(recall)
# }
#
# precision_m = function(y_true, y_pred) {
# true_positives = k_sum(k_round(k_clip(k_dot(y_true, y_pred), 0, 1)))
# predicted_positives = k_sum(k_round(k_clip(y_pred, 0, 1)))
# precision = true_positives / (predicted_positives + k_epsilon())
# return(precision)
# }
#
#
#
# f1_metric <- custom_metric("f1", f1_m)
#
# f1_m = function(y_true, y_pred) {
# y_true = k_eval(y_true)
# print(y_true)
# y_pred = k_eval(y_pred)
# print(y_pred)
# f1_score = MLmetrics::F1_Score(y_true=y_true, y_pred=y_pred, positive="1")
# print(f1_score)
# return(k_constant(f1_score))
# }
#
| /metrics.R | permissive | lazariv/sun-flare | R | false | false | 1,562 | r |
recall_m = function(y_true, y_pred) {
true_positives = k_sum(k_round(k_clip(y_true * y_pred, 0, 1)))
possible_positives = k_sum(k_round(k_clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + k_epsilon())
return(recall)
}
precision_m = function(y_true, y_pred) {
true_positives = k_sum(k_round(k_clip(y_true * y_pred, 0, 1)))
predicted_positives = k_sum(k_round(k_clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + k_epsilon())
return(precision)
}
f1_m = function(y_true, y_pred) {
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return(2*((precision*recall)/(precision+recall+k_epsilon())) )
}
# recall_m = function(y_true, y_pred) {
# true_positives = k_sum(k_round(k_clip(k_dot(y_true, y_pred), 0, 1)))
# possible_positives = k_sum(k_round(k_clip(y_true, 0, 1)))
# recall = true_positives / (possible_positives + k_epsilon())
# return(recall)
# }
#
# precision_m = function(y_true, y_pred) {
# true_positives = k_sum(k_round(k_clip(k_dot(y_true, y_pred), 0, 1)))
# predicted_positives = k_sum(k_round(k_clip(y_pred, 0, 1)))
# precision = true_positives / (predicted_positives + k_epsilon())
# return(precision)
# }
#
#
#
# f1_metric <- custom_metric("f1", f1_m)
#
# f1_m = function(y_true, y_pred) {
# y_true = k_eval(y_true)
# print(y_true)
# y_pred = k_eval(y_pred)
# print(y_pred)
# f1_score = MLmetrics::F1_Score(y_true=y_true, y_pred=y_pred, positive="1")
# print(f1_score)
# return(k_constant(f1_score))
# }
#
|
"make.param" <-
function(VAR,df,sd.init,pi.init,nmixt)
{
st.em<-proc.time()
niter<-niter.max
N<-length(VAR)
vec<-VAR*df
c<-df/2
var.init<-sd.init^2
if(nmixt==1)
{
ppost<-0
deno<-0
b.init<-2*var.init
loglike<-sum(log(dgamma(vec,scale=b.init,shape=c)))
log.lik.cur<-loglike
AIC<- -2*loglike+2*(2-1)
BIC<- -2*loglike+(2-1)*log(N)
niter.f<-1
b<-b.init
vars<-b/2
pi<-1
}
else
{
ppost<-matrix(ncol=nmixt,nrow=N)
gamma.dist<-matrix(ncol=nmixt,nrow=N)
deno<-rep(0,length(vec))
b.init<-2*var.init
pi<-pi.init
b<-b.init
for(j in 1:nmixt)
{
gamma.dist[,j]<-dgamma(vec,scale=b[j],shape=c)
}
deno<-as.vector(gamma.dist%*%pi)
deno[deno==0]<-min(deno[deno>0])
log.lik.cur<-sum(log(deno))
if(is.na(log.lik.cur))
{
BIC<-1.e9
stop("Cannot fit the variance model. There might be missing values")
}
ppost<-(gamma.dist/deno)%*%diag(pi)
}
param.data<-list(pi=pi,b=b,ppost=ppost,c=c,deno=deno)
}
| /R/make.param.R | no_license | cran/varmixt | R | false | false | 1,118 | r | "make.param" <-
function(VAR,df,sd.init,pi.init,nmixt)
{
st.em<-proc.time()
niter<-niter.max
N<-length(VAR)
vec<-VAR*df
c<-df/2
var.init<-sd.init^2
if(nmixt==1)
{
ppost<-0
deno<-0
b.init<-2*var.init
loglike<-sum(log(dgamma(vec,scale=b.init,shape=c)))
log.lik.cur<-loglike
AIC<- -2*loglike+2*(2-1)
BIC<- -2*loglike+(2-1)*log(N)
niter.f<-1
b<-b.init
vars<-b/2
pi<-1
}
else
{
ppost<-matrix(ncol=nmixt,nrow=N)
gamma.dist<-matrix(ncol=nmixt,nrow=N)
deno<-rep(0,length(vec))
b.init<-2*var.init
pi<-pi.init
b<-b.init
for(j in 1:nmixt)
{
gamma.dist[,j]<-dgamma(vec,scale=b[j],shape=c)
}
deno<-as.vector(gamma.dist%*%pi)
deno[deno==0]<-min(deno[deno>0])
log.lik.cur<-sum(log(deno))
if(is.na(log.lik.cur))
{
BIC<-1.e9
stop("Cannot fit the variance model. There might be missing values")
}
ppost<-(gamma.dist/deno)%*%diag(pi)
}
param.data<-list(pi=pi,b=b,ppost=ppost,c=c,deno=deno)
}
|
# Exercise 2: working with data frames
# Create a vector of 100 employees ("Employee 1", "Employee 2", ... "Employee 100")
# Hint: use the `paste()` function and vector recycling to add a number to the word
# "Employee"
employees <- paste("Employee", 1:100)
# Create a vector of 100 random salaries for the year 2017
# Use the `runif()` function to pick random numbers between 40000 and 50000
salaries_2017 <- runif(100, 40000, 50000)
# Create a vector of 100 annual salary adjustments between -5000 and 10000.
# (A negative number represents a salary decrease due to corporate greed)
# Again use the `runif()` function to pick 100 random numbers in that range.
salary_adjustments <- runif(100, -5000, 10000)
# Create a data frame `salaries` by combining the 3 vectors you just made
# Remember to set `stringsAsFactors=FALSE`!
salaries <- data.frame(employees, salaries_2017, salary_adjustments, stringsAsFactors = FALSE)
# Add a column to the `salaries` data frame that represents each person's
# salary in 2018 (e.g., with the salary adjustment added in).
salaries$salaries_2018 <- salaries$salaries_2017 + salaries$salary_adjustments
# Add a column to the `salaries` data frame that has a value of `TRUE` if the
# person got a raise (their salary went up)
salaries$got_raise <- salaries$salaries_2018 > salaries$salaries_2017
### Retrieve values from your data frame to answer the following questions
### Note that you should get the value as specific as possible (e.g., a single
### cell rather than the whole row!)
# What was the 2018 salary of Employee 57
salary_57 <- salaries[salaries$employees == "Employee 57", "salaries_2018"]
# How many employees got a raise?
nrow(salaries[salaries$got_raise == TRUE, ])
# What was the dollar value of the highest raise?
highest_raise <- max(salaries$salary_adjustments)
# What was the "name" of the employee who received the highest raise?
got_biggest_raise <- salaries[salaries$salary_adjustments == highest_raise, "employees"]
# What was the largest decrease in salaries between the two years?
biggest_paycut <- min(salaries$salary_adjustments)
# What was the name of the employee who recieved largest decrease in salary?
got_biggest_paycut <- salaries[salaries$salary_adjustments == biggest_paycut, "employees"]
# What was the average salary change?
avg_increase <- mean(salaries$salary_adjustments)
# For people who did not get a raise, how much money did they lose on average?
avg_loss <- mean(salaries$salary_adjustments[salaries$got_raise == FALSE])
## Consider: do the above averages match what you expected them to be based on
## how you generated the salaries?
# Write a .csv file of your salary data to your working directory
write.csv(salaries, "salaries.csv")
| /chapter-10-exercises/exercise-2/exercise.R | permissive | ITCuw/RLessons-Solutions | R | false | false | 2,738 | r | # Exercise 2: working with data frames
# Create a vector of 100 employees ("Employee 1", "Employee 2", ... "Employee 100")
# Hint: use the `paste()` function and vector recycling to add a number to the word
# "Employee"
employees <- paste("Employee", 1:100)
# Create a vector of 100 random salaries for the year 2017
# Use the `runif()` function to pick random numbers between 40000 and 50000
salaries_2017 <- runif(100, 40000, 50000)
# Create a vector of 100 annual salary adjustments between -5000 and 10000.
# (A negative number represents a salary decrease due to corporate greed)
# Again use the `runif()` function to pick 100 random numbers in that range.
salary_adjustments <- runif(100, -5000, 10000)
# Create a data frame `salaries` by combining the 3 vectors you just made
# Remember to set `stringsAsFactors=FALSE`!
salaries <- data.frame(employees, salaries_2017, salary_adjustments, stringsAsFactors = FALSE)
# Add a column to the `salaries` data frame that represents each person's
# salary in 2018 (e.g., with the salary adjustment added in).
salaries$salaries_2018 <- salaries$salaries_2017 + salaries$salary_adjustments
# Add a column to the `salaries` data frame that has a value of `TRUE` if the
# person got a raise (their salary went up)
salaries$got_raise <- salaries$salaries_2018 > salaries$salaries_2017
### Retrieve values from your data frame to answer the following questions
### Note that you should get the value as specific as possible (e.g., a single
### cell rather than the whole row!)
# What was the 2018 salary of Employee 57
salary_57 <- salaries[salaries$employees == "Employee 57", "salaries_2018"]
# How many employees got a raise?
nrow(salaries[salaries$got_raise == TRUE, ])
# What was the dollar value of the highest raise?
highest_raise <- max(salaries$salary_adjustments)
# What was the "name" of the employee who received the highest raise?
got_biggest_raise <- salaries[salaries$salary_adjustments == highest_raise, "employees"]
# What was the largest decrease in salaries between the two years?
biggest_paycut <- min(salaries$salary_adjustments)
# What was the name of the employee who recieved largest decrease in salary?
got_biggest_paycut <- salaries[salaries$salary_adjustments == biggest_paycut, "employees"]
# What was the average salary change?
avg_increase <- mean(salaries$salary_adjustments)
# For people who did not get a raise, how much money did they lose on average?
avg_loss <- mean(salaries$salary_adjustments[salaries$got_raise == FALSE])
## Consider: do the above averages match what you expected them to be based on
## how you generated the salaries?
# Write a .csv file of your salary data to your working directory
write.csv(salaries, "salaries.csv")
|
#############################################################
### Construct features and responses for training images###
#############################################################
### Authors: Chengliang Tang/Tian Zheng
### Project 3
#########Move all the functions out of the loop
###get a single pixel value
get_val=function(img,a,b,d){
ifelse( (a>0 & a<=nrow(img) & b<=ncol(img) & b>0),
getarray <- img[a,b,d],
getarray <- 0)
return(getarray)
}
###get neighbors of a selected pixel, then substract central
###simply apply this function to the loop
getnbL=function(index, d, imgLR, imgHR){
c<-(index-1) %/% nrow(imgLR)+1
r<- index - (c-1)*nrow(imgLR)
# slow method
# r <- arrayInd(index, dim(imgLR[,,1]))[1]
# c <- arrayInd(index, dim(imgLR[,,1]))[2]
center8 <- get_val(imgLR, r,c,d)
neighbor8 <- c(get_val(imgLR,r-1,c-1,d), get_val(imgLR,r,c-1,d), get_val(imgLR,r+1,c-1,d),
get_val(imgLR,r-1,c,d), get_val(imgLR,r+1,c,d),
get_val(imgLR,r-1,c+1,d), get_val(imgLR,r,c+1,d), get_val(imgLR,r+1,c+1,d)) -center8
neighbor4 <- c(get_val(imgHR,2*r-1,2*c-1,d), get_val(imgHR,2*r,2*c-1,d),
get_val(imgHR,2*r-1,2*c,d), get_val(imgHR,2*r,2*c,d)) - center8
return(list(neighbor8=neighbor8, neighbor4=neighbor4))
}
###get neighbors of one image
getallnb=function(LR_points_total,imgLR, imgHR, n_points=1000){
feat= array(NA, c(n_points, 8, 3))
lab= array(NA, c(n_points, 4, 3))
sample_points <- sample(LR_points_total,n_points, replace = FALSE)
feat[,,1] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor8))
feat[,,2] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor8))
feat[,,3] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor8))
lab[,,1] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor4))
lab[,,2] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor4))
lab[,,3] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor4))
###similar speed by using for loop
# for (j in 1:1000){
# k = sample_points[j]
# feat[j,,1] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor8
# feat[j,,2] <- getnbL(k,2,imgLR = imgLR, imgHR=imgHR)$neighbor8
# feat[j,,3] <- getnbL(k,3,imgLR = imgLR, imgHR=imgHR)$neighbor8
# lab[j,,1] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor4
# lab[j,,2] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor4
# lab[j,,3] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor4
# }
return(list(feat=feat, lab=lab))
}
# getallnb8=function(sample_points){
# array1 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 1)$neighbor8), along = 0)
# array2 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 2)$neighbor8), along = 0)
# array3 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 3)$neighbor8), along = 0)
# return(dim(abind(array1,array2,array3, along=3)))
# }
#
# getallnb4=function(sample_points){
# array1 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 1)$neighbor4), along = 0)
# array2 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 2)$neighbor4), along = 0)
# array3 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 3)$neighbor4), along = 0)
# return(dim(abind(array1,array2,array3, along=3)))
# }
###########
feature <- function(LR_dir, HR_dir, n_points=1000){
### Construct process features for training images (LR/HR pairs)
### Input: a path for low-resolution images + a path for high-resolution images
### + number of points sampled from each LR image
### Output: an .RData file contains processed features and responses for the images
### load libraries
library("EBImage")
n_files <- length(list.files(LR_dir))
### store feature and responses
featMat <- array(NA, c(n_files * n_points, 8, 3))
labMat <- array(NA, c(n_files * n_points, 4, 3))
### read LR/HR image pairs
for(i in 1:n_files){
imgLR <- readImage(paste0(LR_dir, "img_", sprintf("%04d", i), ".jpg"))@.Data
imgHR <- readImage(paste0(HR_dir, "img_", sprintf("%04d", i), ".jpg"))@.Data
### step 1. sample n_points from imgLR
LR_points_total <- nrow(imgLR)*ncol(imgLR)
#temp_matrix <- matrix(c(1:LR_points_total),nrow = LR_pixel_row, byrow=TRUE)
#excl_margin <- temp_matrix[-c(1,LR_pixel_row), -c(1,LR_pixel_col)]
### step 2. for each sampled point in imgLR,
### step 2.1. save (the neighbor 8 pixels - central pixel) in featMat
### tips: padding zeros for boundary points
# savenb8_1 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=1), along = 0)[,1:8]
# savenb8_2 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=2), along = 0)[,1:8]
# savenb8_3 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=3), along = 0)[,1:8]
#
### step 2.2. save the corresponding 4 sub-pixels of imgHR in labMat
# savenb4_1 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=1), along = 0)[,9:12]
# savenb4_2 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=2), along = 0)[,9:12]
# savenb4_3 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=3), along = 0)[,9:12]
### step 3. repeat above for three channels
featMat[c(((i-1)*n_points+1):(i*n_points)),,] <- getallnb(LR_points_total,imgLR = imgLR, imgHR=imgHR)$feat
labMat[c(((i-1)*n_points+1):(i*n_points)),,] <- getallnb(LR_points_total,imgLR = imgLR, imgHR=imgHR)$lab
cat("file", i, "\n")
}
return(list(feature = featMat, label = labMat))
}
| /lib/feature.R | no_license | TZstatsADS/Spring2019-Proj3-grp3 | R | false | false | 6,033 | r | #############################################################
### Construct features and responses for training images###
#############################################################
### Authors: Chengliang Tang/Tian Zheng
### Project 3
#########Move all the functions out of the loop
###get a single pixel value
get_val=function(img,a,b,d){
ifelse( (a>0 & a<=nrow(img) & b<=ncol(img) & b>0),
getarray <- img[a,b,d],
getarray <- 0)
return(getarray)
}
###get neighbors of a selected pixel, then substract central
###simply apply this function to the loop
getnbL=function(index, d, imgLR, imgHR){
c<-(index-1) %/% nrow(imgLR)+1
r<- index - (c-1)*nrow(imgLR)
# slow method
# r <- arrayInd(index, dim(imgLR[,,1]))[1]
# c <- arrayInd(index, dim(imgLR[,,1]))[2]
center8 <- get_val(imgLR, r,c,d)
neighbor8 <- c(get_val(imgLR,r-1,c-1,d), get_val(imgLR,r,c-1,d), get_val(imgLR,r+1,c-1,d),
get_val(imgLR,r-1,c,d), get_val(imgLR,r+1,c,d),
get_val(imgLR,r-1,c+1,d), get_val(imgLR,r,c+1,d), get_val(imgLR,r+1,c+1,d)) -center8
neighbor4 <- c(get_val(imgHR,2*r-1,2*c-1,d), get_val(imgHR,2*r,2*c-1,d),
get_val(imgHR,2*r-1,2*c,d), get_val(imgHR,2*r,2*c,d)) - center8
return(list(neighbor8=neighbor8, neighbor4=neighbor4))
}
###get neighbors of one image
getallnb=function(LR_points_total,imgLR, imgHR, n_points=1000){
feat= array(NA, c(n_points, 8, 3))
lab= array(NA, c(n_points, 4, 3))
sample_points <- sample(LR_points_total,n_points, replace = FALSE)
feat[,,1] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor8))
feat[,,2] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor8))
feat[,,3] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor8))
lab[,,1] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor4))
lab[,,2] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor4))
lab[,,3] <- do.call(rbind,lapply(sample_points[1:n_points],function(x) getnbL(x, d=1, imgLR=imgLR, imgHR=imgHR)$neighbor4))
###similar speed by using for loop
# for (j in 1:1000){
# k = sample_points[j]
# feat[j,,1] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor8
# feat[j,,2] <- getnbL(k,2,imgLR = imgLR, imgHR=imgHR)$neighbor8
# feat[j,,3] <- getnbL(k,3,imgLR = imgLR, imgHR=imgHR)$neighbor8
# lab[j,,1] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor4
# lab[j,,2] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor4
# lab[j,,3] <- getnbL(k,1,imgLR = imgLR, imgHR=imgHR)$neighbor4
# }
return(list(feat=feat, lab=lab))
}
# getallnb8=function(sample_points){
# array1 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 1)$neighbor8), along = 0)
# array2 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 2)$neighbor8), along = 0)
# array3 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 3)$neighbor8), along = 0)
# return(dim(abind(array1,array2,array3, along=3)))
# }
#
# getallnb4=function(sample_points){
# array1 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 1)$neighbor4), along = 0)
# array2 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 2)$neighbor4), along = 0)
# array3 <- abind(lapply(sample_points[1:1000],function(x) getnbL(x, 3)$neighbor4), along = 0)
# return(dim(abind(array1,array2,array3, along=3)))
# }
###########
feature <- function(LR_dir, HR_dir, n_points=1000){
### Construct process features for training images (LR/HR pairs)
### Input: a path for low-resolution images + a path for high-resolution images
### + number of points sampled from each LR image
### Output: an .RData file contains processed features and responses for the images
### load libraries
library("EBImage")
n_files <- length(list.files(LR_dir))
### store feature and responses
featMat <- array(NA, c(n_files * n_points, 8, 3))
labMat <- array(NA, c(n_files * n_points, 4, 3))
### read LR/HR image pairs
for(i in 1:n_files){
imgLR <- readImage(paste0(LR_dir, "img_", sprintf("%04d", i), ".jpg"))@.Data
imgHR <- readImage(paste0(HR_dir, "img_", sprintf("%04d", i), ".jpg"))@.Data
### step 1. sample n_points from imgLR
LR_points_total <- nrow(imgLR)*ncol(imgLR)
#temp_matrix <- matrix(c(1:LR_points_total),nrow = LR_pixel_row, byrow=TRUE)
#excl_margin <- temp_matrix[-c(1,LR_pixel_row), -c(1,LR_pixel_col)]
### step 2. for each sampled point in imgLR,
### step 2.1. save (the neighbor 8 pixels - central pixel) in featMat
### tips: padding zeros for boundary points
# savenb8_1 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=1), along = 0)[,1:8]
# savenb8_2 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=2), along = 0)[,1:8]
# savenb8_3 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=3), along = 0)[,1:8]
#
### step 2.2. save the corresponding 4 sub-pixels of imgHR in labMat
# savenb4_1 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=1), along = 0)[,9:12]
# savenb4_2 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=2), along = 0)[,9:12]
# savenb4_3 <- abind(lapply(sample_points[1:1000], getnbL, imgLR=imgLR, imgHR=imgHR, d=3), along = 0)[,9:12]
### step 3. repeat above for three channels
featMat[c(((i-1)*n_points+1):(i*n_points)),,] <- getallnb(LR_points_total,imgLR = imgLR, imgHR=imgHR)$feat
labMat[c(((i-1)*n_points+1):(i*n_points)),,] <- getallnb(LR_points_total,imgLR = imgLR, imgHR=imgHR)$lab
cat("file", i, "\n")
}
return(list(feature = featMat, label = labMat))
}
|
#!/usr/bin/env Rscript
# ================================================================================
#
# Coursera - Exploratory Data Analysis - Course Project 1
#
# Generate plot3.png - a graph of Energy sub metering
## downloadAndUnpackData()
#
# Download and unpack the source data.
#
# Note: will not update/overwrite existing copies of the data. Warnings are
# reported if the source data file and/or the unpacked data directory already
# exist.
#
# Usage:
# downloadAndUnpackData()
#
downloadAndUnpackData <- function() {
file_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
file_name <- 'household_power_consumption.zip'
data_file_name <- 'household_power_consumption.txt'
# Download data archive.
if(!file.exists(file_name)) {
message('Downloading data from Internet')
# If available use the 'downloader' package to deal with HTTPS sources.
if(require(downloader, quietly=TRUE)) {
download(file_url,destfile=file_name)
}
# Otherwise use the built-in (has problems with HTTPS on non-Windows platforms)
else {
download.file(file_url, file_name, mode='wb', method='auto')
}
}
else {
warning('Local copy of data archive found, not downloading')
}
# Unpack data archive if data not already present.
if(!file.exists(data_file_name)) {
message('Unpacking downloaded data archive.')
unzip(file_name)
}
else {
warning('Existing data file found, not unpacking data archive.')
}
}
## loadSourceData()
#
# Load the source data into a single data.frame adding a 'datetime' column
# based on the Date and Time columns in the source data.
#
# Usage:
# srcData <- loadSourceData()
#
loadSourceData <- function() {
# Read cached data if available.
cacheDataFile <- 'household_power_consumption.rds'
if(file.exists(cacheDataFile)) {
data <- readRDS(cacheDataFile)
}
else {
# Read data for 2007-02-01 and 2007-02-02
srcData <- read.csv('household_power_consumption.txt', header=TRUE, sep=';',
na.strings='?', stringsAsFactors=FALSE)
data <- subset(srcData, Date == "1/2/2007" | Date == "2/2/2007")
data$datetime <- strptime(sprintf('%s %s', data$Date, data$Time), format='%d/%m/%Y %T')
# Save processed data to a cache file for faster loading.
saveRDS(data, cacheDataFile)
}
data
}
## create_plot3()
#
# Generate a graph for Energy sub metering.
#
# Usage:
# create_plot3()
#
create_plot3 <- function() {
# Download and unpack the source data if required.
downloadAndUnpackData()
# Load the data.
data <- loadSourceData()
# Set plotting output to PNG.
png(filename='plot3.png', width=480, height=480)
# Plot a graph for Energy sub metering.
plot(data$datetime, data$Sub_metering_1, type='l', xlab='',
ylab='Energy sub metering')
lines(data$datetime, data$Sub_metering_2, col='red')
lines(data$datetime, data$Sub_metering_3, col='blue')
legend('topright', col=c('black', 'red', 'blue'), lty=par('lty'),
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
# Close the PNG device.
dev.off()
}
# Run plot3 generation.
create_plot3()
| /plot3.R | no_license | hpmcwill/ExData_Plotting1 | R | false | false | 3,282 | r | #!/usr/bin/env Rscript
# ================================================================================
#
# Coursera - Exploratory Data Analysis - Course Project 1
#
# Generate plot3.png - a graph of Energy sub metering
## downloadAndUnpackData()
#
# Download and unpack the source data.
#
# Note: will not update/overwrite existing copies of the data. Warnings are
# reported if the source data file and/or the unpacked data directory already
# exist.
#
# Usage:
# downloadAndUnpackData()
#
downloadAndUnpackData <- function() {
file_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
file_name <- 'household_power_consumption.zip'
data_file_name <- 'household_power_consumption.txt'
# Download data archive.
if(!file.exists(file_name)) {
message('Downloading data from Internet')
# If available use the 'downloader' package to deal with HTTPS sources.
if(require(downloader, quietly=TRUE)) {
download(file_url,destfile=file_name)
}
# Otherwise use the built-in (has problems with HTTPS on non-Windows platforms)
else {
download.file(file_url, file_name, mode='wb', method='auto')
}
}
else {
warning('Local copy of data archive found, not downloading')
}
# Unpack data archive if data not already present.
if(!file.exists(data_file_name)) {
message('Unpacking downloaded data archive.')
unzip(file_name)
}
else {
warning('Existing data file found, not unpacking data archive.')
}
}
## loadSourceData()
#
# Load the source data into a single data.frame adding a 'datetime' column
# based on the Date and Time columns in the source data.
#
# Usage:
# srcData <- loadSourceData()
#
loadSourceData <- function() {
# Read cached data if available.
cacheDataFile <- 'household_power_consumption.rds'
if(file.exists(cacheDataFile)) {
data <- readRDS(cacheDataFile)
}
else {
# Read data for 2007-02-01 and 2007-02-02
srcData <- read.csv('household_power_consumption.txt', header=TRUE, sep=';',
na.strings='?', stringsAsFactors=FALSE)
data <- subset(srcData, Date == "1/2/2007" | Date == "2/2/2007")
data$datetime <- strptime(sprintf('%s %s', data$Date, data$Time), format='%d/%m/%Y %T')
# Save processed data to a cache file for faster loading.
saveRDS(data, cacheDataFile)
}
data
}
## create_plot3()
#
# Generate a graph for Energy sub metering.
#
# Usage:
# create_plot3()
#
create_plot3 <- function() {
# Download and unpack the source data if required.
downloadAndUnpackData()
# Load the data.
data <- loadSourceData()
# Set plotting output to PNG.
png(filename='plot3.png', width=480, height=480)
# Plot a graph for Energy sub metering.
plot(data$datetime, data$Sub_metering_1, type='l', xlab='',
ylab='Energy sub metering')
lines(data$datetime, data$Sub_metering_2, col='red')
lines(data$datetime, data$Sub_metering_3, col='blue')
legend('topright', col=c('black', 'red', 'blue'), lty=par('lty'),
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
# Close the PNG device.
dev.off()
}
# Run plot3 generation.
create_plot3()
|
library(BiocManager)
library(ATACseqQC)
library(ChIPpeakAnno)
library(MotifDb)
library(GenomicAlignments)
library(Rsamtools)
library(BSgenome.Ddiscoideum.ensembl.27)
seqlev <- "DDB0232428"
Ddiscoideum
which <- as(seqinfo(Ddiscoideum)[seqlev], "GRanges")
which
gal <- readBamFile(bamFile = "ATAC_bam_files_2nd/I4.trim.sort.bam", tag=tags, which=which, asMates=TRUE, bigFile=TRUE)
objs <- shiftGAlignmentsList(gal)
shiftedBamfile <- file.path(outPath, "shifted.bam")
shiftedBamfile
gal1 <- shiftGAlignmentsList(gal, outbam=shiftedBamfile)
#Produce fragment length and read density files
V1_second <- c("ATAC_bam_files_2nd/G1.bam")
bamfile.labels <- gsub(".bam", "", basename(V1_second))
fragSize <- fragSizeDist(V1_second, bamfile.labels)
V2_second <- c("ATAC_bam_files_2nd/G2.bam")
bamfile.labels <- gsub(".bam", "", basename(V2_second))
fragSize <- fragSizeDist(V2_second, bamfile.labels)
V3_second <- c("ATAC_bam_files_2nd/G3.bam")
bamfile.labels <- gsub(".bam", "", basename(V3_second))
fragSize <- fragSizeDist(V3_second, bamfile.labels)
St1_second <- c("ATAC_bam_files_2nd/H1.bam")
bamfile.labels <- gsub(".bam", "", basename(St1_second))
fragSize <- fragSizeDist(St1_second, bamfile.labels)
St2_second <- c("ATAC_bam_files_2nd/H2.bam")
bamfile.labels <- gsub(".bam", "", basename(St2_second))
fragSize <- fragSizeDist(St2_second, bamfile.labels)
St3_second <- c("ATAC_bam_files_2nd/H3.bam")
bamfile.labels <- gsub(".bam", "", basename(St3_second))
fragSize <- fragSizeDist(St3_second, bamfile.labels)
M1_second <- c("ATAC_bam_files_2nd/I1.bam")
bamfile.labels <- gsub(".bam", "", basename(M1_second))
fragSize <- fragSizeDist(M1_second, bamfile.labels)
M2_second <- c("ATAC_bam_files_2nd/I2.bam")
bamfile.labels <- gsub(".bam", "", basename(M2_second))
fragSize <- fragSizeDist(M2_second, bamfile.labels)
M3_second <- c("ATAC_bam_files_2nd/I3.bam")
bamfile.labels <- gsub(".bam", "", basename(M3_second))
fragSize <- fragSizeDist(M3_second, bamfile.labels)
M4_second <- c("ATAC_bam_files_2nd/I4.bam")
bamfile.labels <- gsub(".bam", "", basename(M4_second))
fragSize <- fragSizeDist(M4_second, bamfile.labels)
F1_second <- c("ATAC_bam_files_2nd/J1.bam")
bamfile.labels <- gsub(".bam", "", basename(F1_second))
fragSize <- fragSizeDist(F1_second, bamfile.labels)
F2_second <- c("ATAC_bam_files_2nd/J2.bam")
bamfile.labels <- gsub(".bam", "", basename(F2_second))
fragSize <- fragSizeDist(F2_second, bamfile.labels)
F3_second <- c("ATAC_bam_files_2nd/J3.bam")
bamfile.labels <- gsub(".bam", "", basename(F3_second))
fragSize <- fragSizeDist(F3_second, bamfile.labels)
F4_second <- c("ATAC_bam_files_2nd/J4.bam")
bamfile.labels <- gsub(".bam", "", basename(F4_second))
fragSize <- fragSizeDist(F4_second, bamfile.labels)
| /ATACseq/ATACQC.R | no_license | BCHGreerlab/Dictyostelium | R | false | false | 2,740 | r | library(BiocManager)
library(ATACseqQC)
library(ChIPpeakAnno)
library(MotifDb)
library(GenomicAlignments)
library(Rsamtools)
library(BSgenome.Ddiscoideum.ensembl.27)
seqlev <- "DDB0232428"
Ddiscoideum
which <- as(seqinfo(Ddiscoideum)[seqlev], "GRanges")
which
gal <- readBamFile(bamFile = "ATAC_bam_files_2nd/I4.trim.sort.bam", tag=tags, which=which, asMates=TRUE, bigFile=TRUE)
objs <- shiftGAlignmentsList(gal)
shiftedBamfile <- file.path(outPath, "shifted.bam")
shiftedBamfile
gal1 <- shiftGAlignmentsList(gal, outbam=shiftedBamfile)
#Produce fragment length and read density files
V1_second <- c("ATAC_bam_files_2nd/G1.bam")
bamfile.labels <- gsub(".bam", "", basename(V1_second))
fragSize <- fragSizeDist(V1_second, bamfile.labels)
V2_second <- c("ATAC_bam_files_2nd/G2.bam")
bamfile.labels <- gsub(".bam", "", basename(V2_second))
fragSize <- fragSizeDist(V2_second, bamfile.labels)
V3_second <- c("ATAC_bam_files_2nd/G3.bam")
bamfile.labels <- gsub(".bam", "", basename(V3_second))
fragSize <- fragSizeDist(V3_second, bamfile.labels)
St1_second <- c("ATAC_bam_files_2nd/H1.bam")
bamfile.labels <- gsub(".bam", "", basename(St1_second))
fragSize <- fragSizeDist(St1_second, bamfile.labels)
St2_second <- c("ATAC_bam_files_2nd/H2.bam")
bamfile.labels <- gsub(".bam", "", basename(St2_second))
fragSize <- fragSizeDist(St2_second, bamfile.labels)
St3_second <- c("ATAC_bam_files_2nd/H3.bam")
bamfile.labels <- gsub(".bam", "", basename(St3_second))
fragSize <- fragSizeDist(St3_second, bamfile.labels)
M1_second <- c("ATAC_bam_files_2nd/I1.bam")
bamfile.labels <- gsub(".bam", "", basename(M1_second))
fragSize <- fragSizeDist(M1_second, bamfile.labels)
M2_second <- c("ATAC_bam_files_2nd/I2.bam")
bamfile.labels <- gsub(".bam", "", basename(M2_second))
fragSize <- fragSizeDist(M2_second, bamfile.labels)
M3_second <- c("ATAC_bam_files_2nd/I3.bam")
bamfile.labels <- gsub(".bam", "", basename(M3_second))
fragSize <- fragSizeDist(M3_second, bamfile.labels)
M4_second <- c("ATAC_bam_files_2nd/I4.bam")
bamfile.labels <- gsub(".bam", "", basename(M4_second))
fragSize <- fragSizeDist(M4_second, bamfile.labels)
F1_second <- c("ATAC_bam_files_2nd/J1.bam")
bamfile.labels <- gsub(".bam", "", basename(F1_second))
fragSize <- fragSizeDist(F1_second, bamfile.labels)
F2_second <- c("ATAC_bam_files_2nd/J2.bam")
bamfile.labels <- gsub(".bam", "", basename(F2_second))
fragSize <- fragSizeDist(F2_second, bamfile.labels)
F3_second <- c("ATAC_bam_files_2nd/J3.bam")
bamfile.labels <- gsub(".bam", "", basename(F3_second))
fragSize <- fragSizeDist(F3_second, bamfile.labels)
F4_second <- c("ATAC_bam_files_2nd/J4.bam")
bamfile.labels <- gsub(".bam", "", basename(F4_second))
fragSize <- fragSizeDist(F4_second, bamfile.labels)
|
require(foreign)
require(ggplot2)
leg <- read.dta("/Users/christophergandrud/Dropbox/Leg_Violence/leg_violence_main.dta")
leg$violence <- factor(leg$violence, label = c("No Violence", "Violence"))
leg <- subset(leg, violence != "NA")
leg <- subset(leg, elect_legislature == 1)
require(gridExtra)
## Label violence variable and remove if violence is missing
dem.p <- dem
dem.p$violence <- factor(dem.p$violence, label = c("No Violence", "Violence"))
dem.p <- subset(dem.p, violence != "NA")
## Box plot colours
box.col <- c("Violence" = "#ED6700", "No Violence" = "grey80")
## Disproportionality Box Plot
disp.boxp <- ggplot(leg, aes(violence, disproportionality)) +
geom_jitter(aes(color = violence), alpha = 0.5, show_guide = FALSE) +
geom_boxplot(aes(fill = violence), show_guide = FALSE) +
scale_y_log10(breaks = c(1, 2.5, 5, 10, 20, 30), labels = c(1, 2.5, 5, 10, 20, 30)) +
xlab("") + ylab("Disproportionality (Log Scale)\n") +
scale_fill_manual(values = box.col, guide = "none") +
scale_colour_manual(values = box.col, guide = "none") +
theme_bw()
print(disp.boxp)
## Trust Box Plot
#trust.boxp <- ggplot(dem.p, aes(violence, CWtrust)) +
# geom_jitter(aes(color = violence), alpha = 0.5, show_guide = FALSE) +
# geom_boxplot(aes(fill = violence), show_guide = FALSE) +
# xlab("") + ylab("Trust") +
# scale_fill_manual(values = box.col, guide = "none") +
# scale_colour_manual(values = box.col, guide = "none") +
# scale_y_reverse(breaks = c(1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9), labels = c("High", "1.4", "1.5", "1.6", "1.7", "1.8", "Low")) +
# theme_bw()
## Combibine plots
# boxPlot.combine <- grid.arrange(disp.boxp, trust.boxp, ncol = 2) | /Analysis/Old/boxPlots.R | no_license | mujahedulislam/leg_violence_paper1 | R | false | false | 1,702 | r | require(foreign)
require(ggplot2)
leg <- read.dta("/Users/christophergandrud/Dropbox/Leg_Violence/leg_violence_main.dta")
leg$violence <- factor(leg$violence, label = c("No Violence", "Violence"))
leg <- subset(leg, violence != "NA")
leg <- subset(leg, elect_legislature == 1)
require(gridExtra)
## Label violence variable and remove if violence is missing
dem.p <- dem
dem.p$violence <- factor(dem.p$violence, label = c("No Violence", "Violence"))
dem.p <- subset(dem.p, violence != "NA")
## Box plot colours
box.col <- c("Violence" = "#ED6700", "No Violence" = "grey80")
## Disproportionality Box Plot
disp.boxp <- ggplot(leg, aes(violence, disproportionality)) +
geom_jitter(aes(color = violence), alpha = 0.5, show_guide = FALSE) +
geom_boxplot(aes(fill = violence), show_guide = FALSE) +
scale_y_log10(breaks = c(1, 2.5, 5, 10, 20, 30), labels = c(1, 2.5, 5, 10, 20, 30)) +
xlab("") + ylab("Disproportionality (Log Scale)\n") +
scale_fill_manual(values = box.col, guide = "none") +
scale_colour_manual(values = box.col, guide = "none") +
theme_bw()
print(disp.boxp)
## Trust Box Plot
#trust.boxp <- ggplot(dem.p, aes(violence, CWtrust)) +
# geom_jitter(aes(color = violence), alpha = 0.5, show_guide = FALSE) +
# geom_boxplot(aes(fill = violence), show_guide = FALSE) +
# xlab("") + ylab("Trust") +
# scale_fill_manual(values = box.col, guide = "none") +
# scale_colour_manual(values = box.col, guide = "none") +
# scale_y_reverse(breaks = c(1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9), labels = c("High", "1.4", "1.5", "1.6", "1.7", "1.8", "Low")) +
# theme_bw()
## Combibine plots
# boxPlot.combine <- grid.arrange(disp.boxp, trust.boxp, ncol = 2) |
\name{edu_inc}
\alias{edu_inc}
\docType{data}
\title{
edu_inc Data
}
\description{
obs: 428 subsample of Mroz 1975 data including families with working wives
}
\usage{data("edu_inc")}
\format{
A data frame with 428 observations on the following 6 variables.
\describe{
\item{\code{faminc}}{Family income in 2006 dollars
= [husband's hours worked in 1975 * husbands hourly wage
+ wife's hours worked in 1975 * wife's hourly wage]*3.78
(The multiplier 3.78 is used to convert 1975 dollars to 2006 dollars.)}
\item{\code{he}}{Husband's educational attainment, in years}
\item{\code{we}}{Wife's educational attainment, in years}
\item{\code{kl6}}{Number of children less than 6 years old in household}
\item{\code{xtra_x5}}{an artifically generated variable used to illustrate the effect of
irrelevant variables.}
\item{\code{xtra_x6}}{a second artifically generated variable used to illustrate the effect of
irrelevant variables.}
}
}
\details{
THE MROZ DATA FILE IS TAKEN FROM THE 1976 PANEL STUDY OF INCOME
DYNAMICS, AND IS BASED ON DATA FOR THE PREVIOUS YEAR, 1975. OF THE 753
OBSERVATIONS, THE FIRST 428 ARE FOR WOMEN WITH POSITIVE HOURS
WORKED IN 1975, WHILE THE REMAINING 325 OBSERVATIONS ARE FOR WOMEN
WHO DID NOT WORK FOR PAY IN 1975. A MORE COMPLETE DISCUSSION OF THE
DATA IS FOUND IN MROZ [1987], APPENDIX 1.
}
\source{
http://principlesofeconometrics.com/poe4/poe4.htm
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(edu_inc)
## maybe str(edu_inc) ; plot(edu_inc) ...
}
\keyword{datasets}
| /man/edu_inc.Rd | no_license | Worathan/PoEdata | R | false | false | 1,643 | rd | \name{edu_inc}
\alias{edu_inc}
\docType{data}
\title{
edu_inc Data
}
\description{
obs: 428 subsample of Mroz 1975 data including families with working wives
}
\usage{data("edu_inc")}
\format{
A data frame with 428 observations on the following 6 variables.
\describe{
\item{\code{faminc}}{Family income in 2006 dollars
= [husband's hours worked in 1975 * husbands hourly wage
+ wife's hours worked in 1975 * wife's hourly wage]*3.78
(The multiplier 3.78 is used to convert 1975 dollars to 2006 dollars.)}
\item{\code{he}}{Husband's educational attainment, in years}
\item{\code{we}}{Wife's educational attainment, in years}
\item{\code{kl6}}{Number of children less than 6 years old in household}
\item{\code{xtra_x5}}{an artifically generated variable used to illustrate the effect of
irrelevant variables.}
\item{\code{xtra_x6}}{a second artifically generated variable used to illustrate the effect of
irrelevant variables.}
}
}
\details{
THE MROZ DATA FILE IS TAKEN FROM THE 1976 PANEL STUDY OF INCOME
DYNAMICS, AND IS BASED ON DATA FOR THE PREVIOUS YEAR, 1975. OF THE 753
OBSERVATIONS, THE FIRST 428 ARE FOR WOMEN WITH POSITIVE HOURS
WORKED IN 1975, WHILE THE REMAINING 325 OBSERVATIONS ARE FOR WOMEN
WHO DID NOT WORK FOR PAY IN 1975. A MORE COMPLETE DISCUSSION OF THE
DATA IS FOUND IN MROZ [1987], APPENDIX 1.
}
\source{
http://principlesofeconometrics.com/poe4/poe4.htm
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
data(edu_inc)
## maybe str(edu_inc) ; plot(edu_inc) ...
}
\keyword{datasets}
|
setContentType ("image/png")
temp <- tempfile ()
y = rnorm (100)
png (temp, type="cairo")
plot (1:100, y, t='l')
dev.off ()
sendBin (readBin (temp, 'raw', n=file.info(temp)$size))
unlink (temp)
| /R/web-image.R | no_license | iDanielLaw/stuff | R | false | false | 194 | r | setContentType ("image/png")
temp <- tempfile ()
y = rnorm (100)
png (temp, type="cairo")
plot (1:100, y, t='l')
dev.off ()
sendBin (readBin (temp, 'raw', n=file.info(temp)$size))
unlink (temp)
|
#############################################
# This code is subject to the license as stated in DESCRIPTION.
# Using this software implies acceptance of the license terms:
# - GPL 2
#
# (C) by F. Hoffgaard, P. Weil, and K. Hamacher in 2009.
#
# keul(AT)bio.tu-darmstadt.de
#
#
# http://www.kay-hamacher.de
#############################################
extractPDB<-function(file.name, verbose=TRUE){
# ### Begin of the original bio3d functions "atom2xyz", "atom.select" and "read.pdb" as provided in bio3d 1.0-6 under GPL version2 by Grant, Rodrigues, ElSawy, McCammon, Caves, (2006) {Bioinformatics} 22, 2695--2696.
atom2xyz<-function(num) {
num3 <- num*3
c(t(matrix(c(((num3) - 2),
((num3) - 1),
(num3)), ncol=3)))
}
atom.select<-function(pdb, string=NULL, verbose=TRUE, rm.insert=FALSE) {
if (missing(pdb)) {
stop("atom.select: must supply 'pdb' object, e.g. from 'read.pdb'")
}
pdb.bounds <- function(nums) {
# find the 'bounds' (i.e. the
# start, end and length) of a
# concetive range of residue
# or atom numbers.
nums <- as.numeric(nums)
bounds <- nums[1]
diff.i <- 1; j <- 1
nums.start <- nums[1]
store.inds <- NULL
# also store ind of 1st atom of new res
for (i in 2:length(nums)) {
if (nums[i] != nums[j]) { # for resno
if ((nums[i] - diff.i)!= nums.start) {
bounds <- c(bounds,nums[i-1],nums[i])
nums.start <- nums[i]
diff.i <- 1
} else { diff.i <- diff.i + 1 }
store.inds <- c(store.inds,i)
}
j<-j+1
}
bounds<-c(bounds, nums[length(nums)])
bounds<-matrix( bounds, ncol=2, byrow=TRUE,
dimnames=list( NULL, #c(1:(length(bounds)/2),
c("start","end")) )
bounds<-cbind(bounds,length=(bounds[,2]-bounds[,1])+1)
return(list(bounds=bounds,r.ind=store.inds))
}
sel.txt2nums <- function (num.sel.txt) {
# Splitting functions for numbers
num1<-unlist(strsplit(num.sel.txt, split=","))
num2<-suppressWarnings( as.numeric(num1) )
# comma split still may have "10:100" = NA in num2
tosplit <- num1[ which(is.na(num2)) ]
num3 <- unlist(strsplit(tosplit, split=":"))
# pair-up num3 to make num4
num4<-NULL; i<-1
while (i < length(num3) ) {
num4 <- c(num4, as.numeric(num3[i]):
as.numeric(num3[i+1]) )
i<-i+2
}
# join and order num2 with num4
return( sort(unique(c(na.omit(num2),num4))) )
}
sel.txt2type <- function (type.sel.txt) {
# Splitting functions for characters
type1 <- unlist(strsplit(type.sel.txt, split=","))
# split on coma and remove white space
return( gsub(" ","",type1) )
}
if (is.null(string)) {
## Summary if called without a selection string
sum.segid <- unique(pdb$atom[,"segid"])
sum.chain <- unique(pdb$atom[,"chain"])
sum.rnum <- pdb.bounds(pdb$atom[,"resno"])
sum.resno <- sum.rnum$bounds
sum.resid <- table(pdb$atom[sum.rnum$r.ind,"resid"])
sum.eleno <- pdb.bounds(pdb$atom[,"eleno"])$bounds
sum.elety <- table(pdb$atom[,"elety"])
cat(" * Structure Summary *",sep="\n")
cat("---- segid ----",sep="\n");
print(sum.segid)
cat("---- chain ----",sep="\n");
print(sum.chain)
cat("---- resno ----",sep="\n")
print(sum.resno)
cat("---- resid ----",sep="\n");
print(sum.resid)
cat("---- eleno ----",sep="\n");
print(sum.eleno)
cat("---- elety ----",sep="\n");
print(sum.elety)
} else {
# string shortcuts
if (string=="calpha" || string=="CA") {
string= "//////CA/"
}
if (string=="cbeta" || string=="CB") {
string= "//////N,CA,C,O,CB/"
}
if (string=="backbone" || string=="back") {
string= "//////N,CA,C,O/"
}
if (string=="all") {
string= "///////"
}
## - Addation Jan 17 2008
if (string=="h") {
h.atom <- which( substr(pdb$atom[,"elety"], 1, 1) %in% "H" )
match <- list(atom=h.atom, xyz=atom2xyz(h.atom))
class(match) <- "select"
if(verbose)
cat(paste(" * Selected", length(h.atom), "hydrogen atoms *\n"))
return(match)
}
if (string=="noh") {
noh.atom <- which( !substr(pdb$atom[,"elety"], 1, 1) %in% "H" )
match <- list(atom=noh.atom, xyz=atom2xyz(noh.atom))
class(match) <- "select"
if(verbose)
cat(paste(" * Selected", length(noh.atom), "non-hydrogen atoms *\n"))
return(match)
}
## - end addation
# main function
sel <- unlist(strsplit(string, split = "/"))
if (sel[1] == "") { # full selection string (starts with "/")
sel <- sel[-1]
if(length(sel) != 6) {
print("missing elements, should be:")
print("/segid/chain/resno/resid/eleno/elety/")
}
names(sel) <- c("segid","chain", "resno","resid","eleno","elety")
#print(sel)
blank <- rep(TRUE, nrow(pdb$atom) )
sel.inds <- NULL
# SEGID
if(sel["segid"] != "") {
sel.inds <- cbind(sel.inds,
segid=is.element( pdb$atom[,"segid"],
sel.txt2type( sel["segid"] )) )
} else { sel.inds <- cbind(sel.inds, segid=blank) }
# CHAIN
if(sel["chain"] != "") {
sel.inds <- cbind(sel.inds,
chain=is.element( pdb$atom[,"chain"],
sel.txt2type( sel["chain"] )) )
} else { sel.inds <- cbind(sel.inds, chain=blank) }
# RESNO
if(sel["resno"] != "") {
rn <- sel.txt2nums( sel["resno"] )
if(is.numeric(rn) & length(rn)==0) {
# check for R object
rn <- get(gsub(" ","",sel["resno"]))
}
sel.inds <- cbind(sel.inds,
resno=is.element( as.numeric(pdb$atom[,"resno"]),
rn))
#sel.txt2nums( sel["resno"] )) )
} else { sel.inds <- cbind(sel.inds, resno=blank) }
# RESID
if(sel["resid"] != "") {
sel.inds <- cbind(sel.inds,
resid=is.element(pdb$atom[,"resid"],
sel.txt2type( sel["resid"] )) )
} else { sel.inds <- cbind(sel.inds, resid=blank) }
# ELENO
if(sel["eleno"] != "") {
sel.inds <- cbind(sel.inds,
eleno=is.element(as.numeric(pdb$atom[,"eleno"]),
sel.txt2nums( sel["eleno"] )) )
} else { sel.inds <- cbind(sel.inds, eleno=blank) }
# ELETY
if(sel["elety"] != "") {
## cat( sel["elety"] ,"\n" ) ### glob2rx
#if(any(i <- grep("*", sel["elety"]))) {
# print("WARN: no wild card '*' matching, yet")
#}
sel.inds <- cbind(sel.inds,
elety=is.element(pdb$atom[,"elety"],
sel.txt2type( sel["elety"] )) )
} else { sel.inds <- cbind(sel.inds, elety=blank) }
match.inds <- ( (apply(sel.inds, 1, sum, na.rm=TRUE)==6) )
if (rm.insert) { # ignore INSERT records
insert <- which(!is.na(pdb$atom[,"insert"]))
match.inds[insert] <- FALSE
}
# return XYZ indices
xyz.inds <- matrix(1:length( pdb$atom[,c("x","y","z")] ),nrow=3,byrow=FALSE)
xyz.inds <- as.vector(xyz.inds[,match.inds])
if (verbose) {
sel <- rbind( sel, apply(sel.inds, 2, sum, na.rm=TRUE) )
rownames(sel)=c("Stest","Natom"); print(sel)
cat(paste(" * Selected a total of:",sum(match.inds),
"intersecting atoms *"),sep="\n")
}
match <- list(atom=which(match.inds), xyz=xyz.inds)
class(match) <- "select"
return(match)
}
}
}
read.pdb<-function (file, maxlines=50000, multi=FALSE,rm.insert=FALSE, rm.alt=TRUE, het2atom=FALSE, verbose=TRUE) {
if(missing(file)) {
stop("read.pdb: please specify a PDB 'file' for reading")
}
if(!is.numeric(maxlines)) {
stop("read.pdb: 'maxlines' must be numeric")
}
if(!is.logical(multi)) {
stop("read.pdb: 'multi' must be logical TRUE/FALSE")
}
# PDB FORMAT v2.0: colpos, datatype, name, description
atom.format <- matrix(c(-6, NA, NA, # (ATOM)
5, 'numeric', "eleno", # atom_no
-1, NA, NA, # (blank)
4, 'character', "elety", # atom_ty
1, 'character', "alt", # alt_loc
4, 'character', "resid", # res_na
1, 'character', "chain", # chain_id
4, 'numeric', "resno", # res_no
1, 'character', "insert", # ins_code
-3, NA, NA, # (blank)
8, 'numeric', "x", # x
8, 'numeric', "y", # y
8, 'numeric', "z", # z
6, 'numeric', "o", # o
6, 'numeric', "b", # b
-6, NA, NA, # (blank)
4, 'character', "segid" # seg_id
), ncol=3, byrow=TRUE,
dimnames = list(c(1:17), c("widths","what","name")) )
split.string <- function(x) {
# split a string 'x'
x <- substring(x, first, last)
x[nchar(x) == 0] <- as.character(NA)
x
}
is.character0 <- function(x){length(x)==0 & is.character(x)}
trim <- function (s) {
# Remove leading and traling
# spaces from character strings
s <- sub("^ +", "", s)
s <- sub(" +$", "", s)
s[(s=="")]<-NA
s
}
# finds first and last (substr positions)
widths <- as.numeric(atom.format[,"widths"]) # fixed-width spec
drop.ind <- (widths < 0) # cols to ignore (i.e. -ve)
widths <- abs(widths) # absolute vales for later
st <- c(1, 1 + cumsum( widths ))
first <- st[-length(st)][!drop.ind] # substr start
last <- cumsum( widths )[!drop.ind] # substr end
# read n lines of PDB file
raw.lines <- readLines(file, n = maxlines)
type <- substring(raw.lines,1,6)
# check number of END/ENDMDL records
raw.end <- sort(c(which(type == "END"),
which(type == "ENDMDL")))
if (length(raw.end) > 1) {
print("PDB has multiple END/ENDMDL records")
if (!multi) {
print("multi=FALSE: taking first record only")
raw.lines <- raw.lines[ (1:raw.end[1]) ]
type <- type[ (1:raw.end[1]) ]
} else {
print("multi=TRUE: 'read.dcd' will be quicker!")
}
}
if ( length(raw.end) !=1 ) {
if (length(raw.lines) == maxlines) {
# have not yet read all the file
print("You may need to increase 'maxlines'")
print("check you have all data in $atom")
}
}
# split by record type
raw.header <- raw.lines[type == "HEADER"]
raw.seqres <- raw.lines[type == "SEQRES"]
raw.helix <- raw.lines[type == "HELIX "]
raw.sheet <- raw.lines[type == "SHEET "]
raw.atom <- raw.lines[type == "ATOM "]
het.atom <- raw.lines[type == "HETATM"]
all.atom <- raw.lines[type %in% c("ATOM ","HETATM")]
# also look for "TER" records
rm(raw.lines)
if (verbose) {
if (!is.character0(raw.header)) { cat(" ", raw.header, "\n") }
}
seqres <- unlist(strsplit( trim(substring(raw.seqres,19,80))," "))
helix <- list(start = as.numeric(substring(raw.helix,22,25)),
end = as.numeric(substring(raw.helix,34,37)),
chain = trim(substring(raw.helix,20,20)),
type = trim(substring(raw.helix,39,40)))
sheet <- list(start = as.numeric(substring(raw.sheet,23,26)),
end = as.numeric(substring(raw.sheet,34,37)),
chain = trim(substring(raw.sheet,22,22)),
sense = trim(substring(raw.sheet,39,40)))
# format ATOM records as a character matrix
if (het2atom) {
atom <- matrix(trim(sapply(all.atom, split.string)), byrow=TRUE,
ncol=nrow(atom.format[ !drop.ind,]),
dimnames = list(NULL, atom.format[ !drop.ind,"name"]) )
} else {
atom <- matrix(trim(sapply(raw.atom, split.string)), byrow=TRUE,
ncol=nrow(atom.format[ !drop.ind,]),
dimnames = list(NULL, atom.format[ !drop.ind,"name"]) )
}
# Alt records with m[,"alt"] != NA
if (rm.alt) {
if ( sum( !is.na(atom[,"alt"]) ) > 0 ) {
## Edited: Mon May 4 17:41:11 PDT 2009 to cope with
## both numeric and character ALT records
first.alt <- sort( unique(na.omit(atom[,"alt"])) )[1]
cat(paste(" PDB has ALT records, taking",first.alt,"only, rm.alt=TRUE\n"))
alt.inds <- which( (atom[,"alt"] != first.alt) ) # take first alt only
if(length(alt.inds)>0)
atom <- atom[-alt.inds,]
}
}
# Insert records with m[,"insert"] != NA
if (rm.insert) {
if ( sum( !is.na(atom[,"insert"]) ) > 0 ) {
cat(" PDB has INSERT records, removing, rm.insert=TRUE\n")
insert.inds <- which(!is.na(atom[,"insert"])) # rm insert positions
atom <- atom[-insert.inds,]
}
}
het <- matrix(trim(sapply(het.atom, split.string)), byrow=TRUE,
ncol=nrow(atom.format[ !drop.ind,]),
dimnames = list(NULL, atom.format[ !drop.ind,"name"]) )
output<-list(atom=atom,
het=het,
helix=helix,
sheet=sheet,
seqres=seqres,
xyz=as.numeric(t(atom[,c("x","y","z")])),
calpha = as.logical(atom[,"elety"]=="CA"))
class(output) <- "pdb"
return(output)
}
# ### End of bio3d functions
p<-read.pdb(file.name,maxlines=5000000,verbose=verbose);
seq<-p$seqres;
n1<-length(seq);
pu<-atom.select(p,string="//////CA/",verbose=verbose);
n<-length(pu$atom);
if(n!=n1)print("Length of the sequence extracted from SEQRES and the number of CA atoms in ATOM differ.");
coords<-matrix(data=p$xyz[pu$xyz],ncol=3,byrow=TRUE);
b<-as.numeric(p$atom[pu$atom,"b"]);
seq2<-p$atom[pu$atom,"resid"]
chains<-summary(as.factor(p$atom[pu$atom,5]));
ret<-list(pdb=p,seq=seq,lseq=n1,lca=n,caseq=seq2,coords=coords,b=b,chains=chains)
return(ret)
}
| /BioPhysConnectoR/R/extractPDB.r | no_license | ingted/R-Examples | R | false | false | 12,708 | r | #############################################
# This code is subject to the license as stated in DESCRIPTION.
# Using this software implies acceptance of the license terms:
# - GPL 2
#
# (C) by F. Hoffgaard, P. Weil, and K. Hamacher in 2009.
#
# keul(AT)bio.tu-darmstadt.de
#
#
# http://www.kay-hamacher.de
#############################################
extractPDB<-function(file.name, verbose=TRUE){
# ### Begin of the original bio3d functions "atom2xyz", "atom.select" and "read.pdb" as provided in bio3d 1.0-6 under GPL version2 by Grant, Rodrigues, ElSawy, McCammon, Caves, (2006) {Bioinformatics} 22, 2695--2696.
atom2xyz<-function(num) {
num3 <- num*3
c(t(matrix(c(((num3) - 2),
((num3) - 1),
(num3)), ncol=3)))
}
atom.select<-function(pdb, string=NULL, verbose=TRUE, rm.insert=FALSE) {
if (missing(pdb)) {
stop("atom.select: must supply 'pdb' object, e.g. from 'read.pdb'")
}
pdb.bounds <- function(nums) {
# find the 'bounds' (i.e. the
# start, end and length) of a
# concetive range of residue
# or atom numbers.
nums <- as.numeric(nums)
bounds <- nums[1]
diff.i <- 1; j <- 1
nums.start <- nums[1]
store.inds <- NULL
# also store ind of 1st atom of new res
for (i in 2:length(nums)) {
if (nums[i] != nums[j]) { # for resno
if ((nums[i] - diff.i)!= nums.start) {
bounds <- c(bounds,nums[i-1],nums[i])
nums.start <- nums[i]
diff.i <- 1
} else { diff.i <- diff.i + 1 }
store.inds <- c(store.inds,i)
}
j<-j+1
}
bounds<-c(bounds, nums[length(nums)])
bounds<-matrix( bounds, ncol=2, byrow=TRUE,
dimnames=list( NULL, #c(1:(length(bounds)/2),
c("start","end")) )
bounds<-cbind(bounds,length=(bounds[,2]-bounds[,1])+1)
return(list(bounds=bounds,r.ind=store.inds))
}
sel.txt2nums <- function (num.sel.txt) {
# Splitting functions for numbers
num1<-unlist(strsplit(num.sel.txt, split=","))
num2<-suppressWarnings( as.numeric(num1) )
# comma split still may have "10:100" = NA in num2
tosplit <- num1[ which(is.na(num2)) ]
num3 <- unlist(strsplit(tosplit, split=":"))
# pair-up num3 to make num4
num4<-NULL; i<-1
while (i < length(num3) ) {
num4 <- c(num4, as.numeric(num3[i]):
as.numeric(num3[i+1]) )
i<-i+2
}
# join and order num2 with num4
return( sort(unique(c(na.omit(num2),num4))) )
}
sel.txt2type <- function (type.sel.txt) {
# Splitting functions for characters
type1 <- unlist(strsplit(type.sel.txt, split=","))
# split on coma and remove white space
return( gsub(" ","",type1) )
}
if (is.null(string)) {
## Summary if called without a selection string
sum.segid <- unique(pdb$atom[,"segid"])
sum.chain <- unique(pdb$atom[,"chain"])
sum.rnum <- pdb.bounds(pdb$atom[,"resno"])
sum.resno <- sum.rnum$bounds
sum.resid <- table(pdb$atom[sum.rnum$r.ind,"resid"])
sum.eleno <- pdb.bounds(pdb$atom[,"eleno"])$bounds
sum.elety <- table(pdb$atom[,"elety"])
cat(" * Structure Summary *",sep="\n")
cat("---- segid ----",sep="\n");
print(sum.segid)
cat("---- chain ----",sep="\n");
print(sum.chain)
cat("---- resno ----",sep="\n")
print(sum.resno)
cat("---- resid ----",sep="\n");
print(sum.resid)
cat("---- eleno ----",sep="\n");
print(sum.eleno)
cat("---- elety ----",sep="\n");
print(sum.elety)
} else {
# string shortcuts
if (string=="calpha" || string=="CA") {
string= "//////CA/"
}
if (string=="cbeta" || string=="CB") {
string= "//////N,CA,C,O,CB/"
}
if (string=="backbone" || string=="back") {
string= "//////N,CA,C,O/"
}
if (string=="all") {
string= "///////"
}
## - Addation Jan 17 2008
if (string=="h") {
h.atom <- which( substr(pdb$atom[,"elety"], 1, 1) %in% "H" )
match <- list(atom=h.atom, xyz=atom2xyz(h.atom))
class(match) <- "select"
if(verbose)
cat(paste(" * Selected", length(h.atom), "hydrogen atoms *\n"))
return(match)
}
if (string=="noh") {
noh.atom <- which( !substr(pdb$atom[,"elety"], 1, 1) %in% "H" )
match <- list(atom=noh.atom, xyz=atom2xyz(noh.atom))
class(match) <- "select"
if(verbose)
cat(paste(" * Selected", length(noh.atom), "non-hydrogen atoms *\n"))
return(match)
}
## - end addation
# main function
sel <- unlist(strsplit(string, split = "/"))
if (sel[1] == "") { # full selection string (starts with "/")
sel <- sel[-1]
if(length(sel) != 6) {
print("missing elements, should be:")
print("/segid/chain/resno/resid/eleno/elety/")
}
names(sel) <- c("segid","chain", "resno","resid","eleno","elety")
#print(sel)
blank <- rep(TRUE, nrow(pdb$atom) )
sel.inds <- NULL
# SEGID
if(sel["segid"] != "") {
sel.inds <- cbind(sel.inds,
segid=is.element( pdb$atom[,"segid"],
sel.txt2type( sel["segid"] )) )
} else { sel.inds <- cbind(sel.inds, segid=blank) }
# CHAIN
if(sel["chain"] != "") {
sel.inds <- cbind(sel.inds,
chain=is.element( pdb$atom[,"chain"],
sel.txt2type( sel["chain"] )) )
} else { sel.inds <- cbind(sel.inds, chain=blank) }
# RESNO
if(sel["resno"] != "") {
rn <- sel.txt2nums( sel["resno"] )
if(is.numeric(rn) & length(rn)==0) {
# check for R object
rn <- get(gsub(" ","",sel["resno"]))
}
sel.inds <- cbind(sel.inds,
resno=is.element( as.numeric(pdb$atom[,"resno"]),
rn))
#sel.txt2nums( sel["resno"] )) )
} else { sel.inds <- cbind(sel.inds, resno=blank) }
# RESID
if(sel["resid"] != "") {
sel.inds <- cbind(sel.inds,
resid=is.element(pdb$atom[,"resid"],
sel.txt2type( sel["resid"] )) )
} else { sel.inds <- cbind(sel.inds, resid=blank) }
# ELENO
if(sel["eleno"] != "") {
sel.inds <- cbind(sel.inds,
eleno=is.element(as.numeric(pdb$atom[,"eleno"]),
sel.txt2nums( sel["eleno"] )) )
} else { sel.inds <- cbind(sel.inds, eleno=blank) }
# ELETY
if(sel["elety"] != "") {
## cat( sel["elety"] ,"\n" ) ### glob2rx
#if(any(i <- grep("*", sel["elety"]))) {
# print("WARN: no wild card '*' matching, yet")
#}
sel.inds <- cbind(sel.inds,
elety=is.element(pdb$atom[,"elety"],
sel.txt2type( sel["elety"] )) )
} else { sel.inds <- cbind(sel.inds, elety=blank) }
match.inds <- ( (apply(sel.inds, 1, sum, na.rm=TRUE)==6) )
if (rm.insert) { # ignore INSERT records
insert <- which(!is.na(pdb$atom[,"insert"]))
match.inds[insert] <- FALSE
}
# return XYZ indices
xyz.inds <- matrix(1:length( pdb$atom[,c("x","y","z")] ),nrow=3,byrow=FALSE)
xyz.inds <- as.vector(xyz.inds[,match.inds])
if (verbose) {
sel <- rbind( sel, apply(sel.inds, 2, sum, na.rm=TRUE) )
rownames(sel)=c("Stest","Natom"); print(sel)
cat(paste(" * Selected a total of:",sum(match.inds),
"intersecting atoms *"),sep="\n")
}
match <- list(atom=which(match.inds), xyz=xyz.inds)
class(match) <- "select"
return(match)
}
}
}
read.pdb<-function (file, maxlines=50000, multi=FALSE,rm.insert=FALSE, rm.alt=TRUE, het2atom=FALSE, verbose=TRUE) {
if(missing(file)) {
stop("read.pdb: please specify a PDB 'file' for reading")
}
if(!is.numeric(maxlines)) {
stop("read.pdb: 'maxlines' must be numeric")
}
if(!is.logical(multi)) {
stop("read.pdb: 'multi' must be logical TRUE/FALSE")
}
# PDB FORMAT v2.0: colpos, datatype, name, description
atom.format <- matrix(c(-6, NA, NA, # (ATOM)
5, 'numeric', "eleno", # atom_no
-1, NA, NA, # (blank)
4, 'character', "elety", # atom_ty
1, 'character', "alt", # alt_loc
4, 'character', "resid", # res_na
1, 'character', "chain", # chain_id
4, 'numeric', "resno", # res_no
1, 'character', "insert", # ins_code
-3, NA, NA, # (blank)
8, 'numeric', "x", # x
8, 'numeric', "y", # y
8, 'numeric', "z", # z
6, 'numeric', "o", # o
6, 'numeric', "b", # b
-6, NA, NA, # (blank)
4, 'character', "segid" # seg_id
), ncol=3, byrow=TRUE,
dimnames = list(c(1:17), c("widths","what","name")) )
split.string <- function(x) {
# split a string 'x'
x <- substring(x, first, last)
x[nchar(x) == 0] <- as.character(NA)
x
}
is.character0 <- function(x){length(x)==0 & is.character(x)}
trim <- function (s) {
# Remove leading and traling
# spaces from character strings
s <- sub("^ +", "", s)
s <- sub(" +$", "", s)
s[(s=="")]<-NA
s
}
# finds first and last (substr positions)
widths <- as.numeric(atom.format[,"widths"]) # fixed-width spec
drop.ind <- (widths < 0) # cols to ignore (i.e. -ve)
widths <- abs(widths) # absolute vales for later
st <- c(1, 1 + cumsum( widths ))
first <- st[-length(st)][!drop.ind] # substr start
last <- cumsum( widths )[!drop.ind] # substr end
# read n lines of PDB file
raw.lines <- readLines(file, n = maxlines)
type <- substring(raw.lines,1,6)
# check number of END/ENDMDL records
raw.end <- sort(c(which(type == "END"),
which(type == "ENDMDL")))
if (length(raw.end) > 1) {
print("PDB has multiple END/ENDMDL records")
if (!multi) {
print("multi=FALSE: taking first record only")
raw.lines <- raw.lines[ (1:raw.end[1]) ]
type <- type[ (1:raw.end[1]) ]
} else {
print("multi=TRUE: 'read.dcd' will be quicker!")
}
}
if ( length(raw.end) !=1 ) {
if (length(raw.lines) == maxlines) {
# have not yet read all the file
print("You may need to increase 'maxlines'")
print("check you have all data in $atom")
}
}
# split by record type
raw.header <- raw.lines[type == "HEADER"]
raw.seqres <- raw.lines[type == "SEQRES"]
raw.helix <- raw.lines[type == "HELIX "]
raw.sheet <- raw.lines[type == "SHEET "]
raw.atom <- raw.lines[type == "ATOM "]
het.atom <- raw.lines[type == "HETATM"]
all.atom <- raw.lines[type %in% c("ATOM ","HETATM")]
# also look for "TER" records
rm(raw.lines)
if (verbose) {
if (!is.character0(raw.header)) { cat(" ", raw.header, "\n") }
}
seqres <- unlist(strsplit( trim(substring(raw.seqres,19,80))," "))
helix <- list(start = as.numeric(substring(raw.helix,22,25)),
end = as.numeric(substring(raw.helix,34,37)),
chain = trim(substring(raw.helix,20,20)),
type = trim(substring(raw.helix,39,40)))
sheet <- list(start = as.numeric(substring(raw.sheet,23,26)),
end = as.numeric(substring(raw.sheet,34,37)),
chain = trim(substring(raw.sheet,22,22)),
sense = trim(substring(raw.sheet,39,40)))
# format ATOM records as a character matrix
if (het2atom) {
atom <- matrix(trim(sapply(all.atom, split.string)), byrow=TRUE,
ncol=nrow(atom.format[ !drop.ind,]),
dimnames = list(NULL, atom.format[ !drop.ind,"name"]) )
} else {
atom <- matrix(trim(sapply(raw.atom, split.string)), byrow=TRUE,
ncol=nrow(atom.format[ !drop.ind,]),
dimnames = list(NULL, atom.format[ !drop.ind,"name"]) )
}
# Alt records with m[,"alt"] != NA
if (rm.alt) {
if ( sum( !is.na(atom[,"alt"]) ) > 0 ) {
## Edited: Mon May 4 17:41:11 PDT 2009 to cope with
## both numeric and character ALT records
first.alt <- sort( unique(na.omit(atom[,"alt"])) )[1]
cat(paste(" PDB has ALT records, taking",first.alt,"only, rm.alt=TRUE\n"))
alt.inds <- which( (atom[,"alt"] != first.alt) ) # take first alt only
if(length(alt.inds)>0)
atom <- atom[-alt.inds,]
}
}
# Insert records with m[,"insert"] != NA
if (rm.insert) {
if ( sum( !is.na(atom[,"insert"]) ) > 0 ) {
cat(" PDB has INSERT records, removing, rm.insert=TRUE\n")
insert.inds <- which(!is.na(atom[,"insert"])) # rm insert positions
atom <- atom[-insert.inds,]
}
}
het <- matrix(trim(sapply(het.atom, split.string)), byrow=TRUE,
ncol=nrow(atom.format[ !drop.ind,]),
dimnames = list(NULL, atom.format[ !drop.ind,"name"]) )
output<-list(atom=atom,
het=het,
helix=helix,
sheet=sheet,
seqres=seqres,
xyz=as.numeric(t(atom[,c("x","y","z")])),
calpha = as.logical(atom[,"elety"]=="CA"))
class(output) <- "pdb"
return(output)
}
# ### End of bio3d functions
p<-read.pdb(file.name,maxlines=5000000,verbose=verbose);
seq<-p$seqres;
n1<-length(seq);
pu<-atom.select(p,string="//////CA/",verbose=verbose);
n<-length(pu$atom);
if(n!=n1)print("Length of the sequence extracted from SEQRES and the number of CA atoms in ATOM differ.");
coords<-matrix(data=p$xyz[pu$xyz],ncol=3,byrow=TRUE);
b<-as.numeric(p$atom[pu$atom,"b"]);
seq2<-p$atom[pu$atom,"resid"]
chains<-summary(as.factor(p$atom[pu$atom,5]));
ret<-list(pdb=p,seq=seq,lseq=n1,lca=n,caseq=seq2,coords=coords,b=b,chains=chains)
return(ret)
}
|
#Import the dataset
library(readr)
sms_raw <- read_csv("C:\\Users\\jeeva\\Downloads\\R assignment\\Naive Bayes\\sms_raw_NB.csv")
sms_raw$type <- factor(sms_raw$type)#factorize the ham and spam
# build a corpus using the text mining (tm) package
install.packages("tm") #install tm package
library(tm) #import tm package
sms_corpus <- Corpus(VectorSource(sms_raw$text))
sms_corpus <- tm_map(sms_corpus, function(x) iconv(enc2utf8(x), sub='byte'))
# clean up the corpus using tm_map()
corpus_clean <- tm_map(sms_corpus, tolower) #change to lower
corpus_clean <- tm_map(corpus_clean, removeNumbers) #remove numbers
corpus_clean <- tm_map(corpus_clean, removeWords, stopwords()) #remove stopwords
corpus_clean <- tm_map(corpus_clean, removePunctuation) #remove punctuation
corpus_clean <- tm_map(corpus_clean, stripWhitespace) #remove space
# create a document-term sparse matrix
sms_dtm <- DocumentTermMatrix(corpus_clean)
sms_dtm
# creating training and test datasets
sms_raw_train <- sms_raw[1:4169, ]
sms_raw_test <- sms_raw[4170:5559, ]
sms_dtm_train <- sms_dtm[1:4169, ]
sms_dtm_test <- sms_dtm[4170:5559, ]
sms_corpus_train <- corpus_clean[1:4169]
sms_corpus_test <- corpus_clean[4170:5559]
# check that the proportion of spam is similar
prop.table(table(sms_raw_train$type))
prop.table(table(sms_raw_test$type))
# indicator features for frequent words
# dictionary of words which are used more than 5 times
sms_dict <- findFreqTerms(sms_dtm_train, 5)
sms_train <- DocumentTermMatrix(sms_corpus_train, list(dictionary = sms_dict))
sms_test <- DocumentTermMatrix(sms_corpus_test, list(dictionary = sms_dict))
# convert counts to a factor
# custom function: if a word is used more than 0 times then mention 1 else mention 0
convert_counts <- function(x) {
x <- ifelse(x > 0, 1, 0)
x <- factor(x, levels = c(0, 1), labels = c("No", "Yes"))
}
# apply() convert_counts() to columns of train/test data
# Margin = 2 is for columns
# Margin = 1 is for rows
sms_train <- apply(sms_train, MARGIN = 2, convert_counts)
sms_test <- apply(sms_test, MARGIN = 2, convert_counts)
## Training a model on the data ----
install.packages("e1071")
library(e1071)
sms_classifier <- naiveBayes(sms_train, sms_raw_train$type)
sms_classifier
## Evaluating model performance
sms_test_pred <- predict(sms_classifier, sms_test)
table(sms_test_pred)
prop.table(table(sms_test_pred))
library(gmodels)
CrossTable(sms_test_pred, sms_raw_test$type,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
# ham spam
#0.8884892 0.1115108
############## | /Naive Bayes/sms.R | no_license | Vivek-DataScientist/assignments | R | false | false | 2,611 | r | #Import the dataset
library(readr)
sms_raw <- read_csv("C:\\Users\\jeeva\\Downloads\\R assignment\\Naive Bayes\\sms_raw_NB.csv")
sms_raw$type <- factor(sms_raw$type)#factorize the ham and spam
# build a corpus using the text mining (tm) package
install.packages("tm") #install tm package
library(tm) #import tm package
sms_corpus <- Corpus(VectorSource(sms_raw$text))
sms_corpus <- tm_map(sms_corpus, function(x) iconv(enc2utf8(x), sub='byte'))
# clean up the corpus using tm_map()
corpus_clean <- tm_map(sms_corpus, tolower) #change to lower
corpus_clean <- tm_map(corpus_clean, removeNumbers) #remove numbers
corpus_clean <- tm_map(corpus_clean, removeWords, stopwords()) #remove stopwords
corpus_clean <- tm_map(corpus_clean, removePunctuation) #remove punctuation
corpus_clean <- tm_map(corpus_clean, stripWhitespace) #remove space
# create a document-term sparse matrix
sms_dtm <- DocumentTermMatrix(corpus_clean)
sms_dtm
# creating training and test datasets
sms_raw_train <- sms_raw[1:4169, ]
sms_raw_test <- sms_raw[4170:5559, ]
sms_dtm_train <- sms_dtm[1:4169, ]
sms_dtm_test <- sms_dtm[4170:5559, ]
sms_corpus_train <- corpus_clean[1:4169]
sms_corpus_test <- corpus_clean[4170:5559]
# check that the proportion of spam is similar
prop.table(table(sms_raw_train$type))
prop.table(table(sms_raw_test$type))
# indicator features for frequent words
# dictionary of words which are used more than 5 times
sms_dict <- findFreqTerms(sms_dtm_train, 5)
sms_train <- DocumentTermMatrix(sms_corpus_train, list(dictionary = sms_dict))
sms_test <- DocumentTermMatrix(sms_corpus_test, list(dictionary = sms_dict))
# convert counts to a factor
# custom function: if a word is used more than 0 times then mention 1 else mention 0
convert_counts <- function(x) {
x <- ifelse(x > 0, 1, 0)
x <- factor(x, levels = c(0, 1), labels = c("No", "Yes"))
}
# apply() convert_counts() to columns of train/test data
# Margin = 2 is for columns
# Margin = 1 is for rows
sms_train <- apply(sms_train, MARGIN = 2, convert_counts)
sms_test <- apply(sms_test, MARGIN = 2, convert_counts)
## Training a model on the data ----
install.packages("e1071")
library(e1071)
sms_classifier <- naiveBayes(sms_train, sms_raw_train$type)
sms_classifier
## Evaluating model performance
sms_test_pred <- predict(sms_classifier, sms_test)
table(sms_test_pred)
prop.table(table(sms_test_pred))
library(gmodels)
CrossTable(sms_test_pred, sms_raw_test$type,
prop.chisq = FALSE, prop.t = FALSE, prop.r = FALSE,
dnn = c('predicted', 'actual'))
# ham spam
#0.8884892 0.1115108
############## |
shinyUI(navbarPage("Cluster Experiment",
tabPanel("Welcome Page",
startPageMessage("startMessage", "")),
tabPanel("Getting Started",
tabsetPanel(
tabPanel("Setup",
h4("Set working directory"),
p("Enter a working directory for this Cluster Experiment session and click on 'Choose Working Directory' to set it"),
fluidRow(
column(6,
textInput("workingDirectory", label = "eg: 'homeDirectory/subdirectory/filename.r",
value = path.expand("~"), width = '100%')
)
),
actionButton("createWD", "Choose Working Directory"),
tags$hr(),
checkboxInput("makeScript", label = "Would you like to create a reproducible R script from this work session?", value = FALSE),
conditionalPanel(condition = "input.makeScript",
fluidRow(
column(6,
p("Enter file path and name of file to store script"),
uiOutput("createScriptInputs")
),
column(6,
p("Enter any descriptive comments for the beginning of the R file:"),
textInput("fileComments", label = "eg: Name, date, experiment", value = "")
)
),
p("Click below on 'Create File' to create the R script. If file already exists, any code will be appended to the end of existing file"),
actionButton("createReproducibleFile", label = "Create File")
),
tags$hr(),
checkboxInput("autoCreateObject", label = "Would you like to automatically save the internal cluster experiment object every time it is updated?", value = FALSE),
conditionalPanel(condition = "input.autoCreateObject",
p("Enter file path and name (with extension .rds, see 'saveRDS') in order to create a continuously updated R object:"),
uiOutput("createObjectInputs")
)
),
tabPanel("Upload Data",
fluidRow(
column(12,p("The following choices regarding transformation of the data (will take effect only when run clusterMany/RSEC)"))
),
fluidRow(
column(12,countInfo("trans"))
),
tabsetPanel(
tabPanel("RDS file input",
rdaFileInput("fileInput", "User rds file"),
h4("Summary of object uploaded:"),
uiOutput("isRda")),
tabPanel("CSV format input",
fluidRow(
column(8, csvAssay("fileInput", "")),
column(4,
h5(" First 4 rows and columns of uploaded table:"),
tableOutput("csvAssayContents")
)
),
fluidRow(
column(8, csvColData("fileInput", "")),
column(4,
h5(" First 4 rows and columns of uploaded table:"),
tableOutput("csvColContents")
)
),
fluidRow(
column(8, csvRowData("fileInput", "")),
column(4,
h5(" First 4 rows and columns of uploaded table:"),
tableOutput("csvRowContents")
)
),
actionButton("makeObject",
"Create Summarized Experiment object from selected data"),
h5("Summary of summarized experiment created from uploaded data:"),
#h3(paste(capture.output(show(sE)),collapse="\n")),
uiOutput("isAssay")
)
)
)
)
),
tabPanel("RSEC",
# h3("Core imputs for RSEC")
# ),
fluidRow(
column(6,
#Displays basic help text for Shiny App and clusterMany
RSECHelpText()
),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("RSECCode"),
#Action button that allows one to run above code
actionButton("runRSEC", "Run This Code"),
textOutput("numRSECIterations")
)
),
navlistPanel(
tabPanel("Main Options",
RSECInputs("rsec")
),
tabPanel("Dimensionality Reduction",
#Allows user to enter all inputs
h3("Choose Dimensionality Reduction Options"),
dimReduceInput("rsec", "dim inputs",isRSEC=TRUE,sidelabel="Set dimensionality reduction for clustering?"),
dimReduceInput("rsec", isRSEC=TRUE,singleChoice=TRUE,sidelabel="Set dimensionality reduction for making dendrogram?",dimVal="dendroReduce",ndimVal="dendroNDims")
),
tabPanel("Specialized control",
specializedInputs("rsec", "specialized inputs",isRSEC=TRUE)
),
tabPanel("Plot Clusters",
tabsetPanel(
tabPanel("Default Plot",
downloadButton("downloadDefaultPlotPCRSEC", label = "DownLoad this Plot"),
plotOutput("imgRSEC")
)
)
)
)
),
tabPanel("Cluster Many",
conditionalPanel(condition = paste0("!input['showCMDir']"),
column(12,clusterManyHelpText())
),
fluidRow(
column(6, checkboxInput("showCMDir", value = FALSE, label = "Hide Directions?")),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("clusterManyCode"),
#Action button that allows one to run above code
actionButton("runCM", "Run This Code"),
textOutput("numClusterIterations")
)
),
navlistPanel(
tabPanel("Main Options",
h3("Core imputs for clusterMany"),
sSBInputs("parameters", "SSB inputs")
),
tabPanel("Dimensionality Reduction",
#Allows user to enter all inputs
h3("Choose Dimensionality Reduction Options"),
dimReduceInput("parameters", "dim inputs")
),
tabPanel("Further clustering options",
h3("Warning!"),
h4("If you change options on the 'Main Options' tab, you should return to this tab to see what options have changed. It is best to complete the 'Main Options' before starting this page"),
clusterFunctionInputs("parameters", "cluster function inputs")
),
tabPanel("Specialized control",
specializedInputs("parameters", "specialized inputs")
),
tabPanel("Plot Clusters",
tabsetPanel(
tabPanel("Default Plot",
downloadButton("downloadDefaultPlotPCCM", label = "DownLoad this Plot"),
plotOutput("imgCE")
)
)
)
)
),
tabPanel("Combine Many",
# conditionalPanel(condition = paste0("!input['showCombManyDir']"),
# column(12,combineManyHelpText())
# ),
fluidRow(
column(6, combineManyHelpText()),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("combineManyCode"),
#Action button that allows one to run above code
actionButton("runCombineMany", "Run This Code")
)
),
navlistPanel(
tabPanel("Combine Many Inputs",
h2("Inputs for Combine Many"),
#uiOutput("combineManyWhichClusters"),
combineManyInput("cMInputs", "")
),
tabPanel("Plot Clusters",
downloadButton("downloadDefaultPlotPCCombineMany", label = "DownLoad this Plot"),
plotOutput("imgCombineManyPC")
),
tabPanel("Plot CoClusters",
downloadButton("downloadDefaultPlotCoClustersCombineMany", label = "DownLoad this Plot"),
plotOutput("imgCombineManyPCC")
)
)
),
tabPanel("Make Dendrogram",
fluidRow(
column(6,
#Displays basic help text for Shiny App and clusterMany
makeDendrogramHelpText()
),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("makeDendrogramCode"),
#Action button that allows one to run above code
actionButton("runMakeDendrogram", "Run This Code")
)
),
navlistPanel(
tabPanel("Make Dendrogram",
h2("Inputs for Make Dendrogram"),
makeDendrogramInput("mDInputs", "")#,
#uiOutput("makeDendrogramWhichClusters")
),
tabPanel("Plot Dendrogram",
downloadButton("downloadDefaultPlotPDMD", label = "DownLoad this Plot"),
plotOutput("imgPlotDendrogram")
),
tabPanel("Plot HeatMap",
downloadButton("downloadDefaultPlotPHMD", label = "DownLoad this Plot"),
plotOutput("imgPlotHeatmapMD")
)
)
),
tabPanel("Merge Clusters",
fluidRow(
column(6,
mergeClustersHelpText()
),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
#Action button that allows one to run above code
textOutput("mergeClustersCode")
),
fluidRow(
column(3,actionButton("runMergeClusters", "Run This Code")),
column(3,actionButton("updateDendrogram", "Update dendrogram"))
)
),
navlistPanel(
tabPanel("Dendrogram used for merging",
p("Informative Dendrogram for choosing how to merge cluster inputs:"),
downloadButton("downloadPlotPDMC", label = "DownLoad this Plot"),
plotOutput("imgInitalMergeClusters")
),
tabPanel("Set Parameters",
mergeClustersInput("mergeCInputs", "")
),
tabPanel("Plot Clusters",
downloadButton("downloadDefaultPlotClustersMergeClusters", label = "DownLoad this Plot"),
plotOutput("imgPlotClustersMergeClusters")
),
tabPanel("Plot Heatmap",
downloadButton("downloadDefaultPlotHeatmapMergeClusters", label = "DownLoad this Plot"),
plotOutput("imgPlotHeatmapMergeClusters")
),
tabPanel("PCA Plot",
h3("PCA plot feature in development")
)
)
),
navbarMenu("Personalized Plots",
tabPanel("plotClusters",
fluidRow(
column(6,
plotClustersHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotClustersCode"),
actionButton("runPCCM", "Run Plot Cluster Code")
)
),
navlistPanel("Plot Clusters",
tabPanel("Specialized Inputs",
h3("Specialized Plot Cluster Inputs"),
uiOutput("plotClustersWhichClusters"),
plotClustersInput("pCInputs",
"inputs for plot Clusters, cM")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotPCCM", label = "DownLoad this Plot"),
plotOutput("imgPC")
)
)
),
tabPanel("plotCoClustering",
fluidRow(
column(6,
plotCoClusteringHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotCoClusteringCode"),
actionButton("runPlotCoClustering", "Run Plot CoClustering Code")
)
),
navlistPanel(tabPanel("Specialized Inputs",
plotCoClusteringInput("plotCoClustering",
"inputs for plotCoClustering")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotCoClustering",
label = "DownLoad this Plot"),
plotOutput("imgSpecializedPlotCoClustering")
)
)
),
tabPanel("Plot Dendrogram",
fluidRow(
column(6,
plotDendrogramHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotDendrogramCode"),
actionButton("runPlotDendrogram", "Run Plot Dendrogram Code")
)
),
navlistPanel("Plot Dendrogram",
tabPanel("Specialized Inputs",
plotDendrogramInput("plotDendrogram",
"inputs for plotDendrogram")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotDendrogram",
label = "DownLoad this Plot"),
plotOutput("imgSpecializedPlotDendrogram")
)
)
),
tabPanel("Plot Heatmap",
fluidRow(
column(6,
plotHeatmapHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotHeatmapCode"),
actionButton("runPlotHeatmap", "Run Plot Heatmap Code")
)
),
navlistPanel(
tabPanel("Specialized Inputs",
plotHeatmapInput("plotHeatmap",
"inputs for plotHeatmap")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotHeatmap",
label = "DownLoad this Plot"),
plotOutput("imgSpecializedPlotHeatmap")
)
)
),
tabPanel("PCA Plot",
navlistPanel(
tabPanel("Specialized Inputs"),
tabPanel("Output Plot")
)
)
),
tabPanel("Save Object",
saveObjectMessage("saveObject", ""),
textOutput("saveObjectMessage")
),
tabPanel("What clusters",
whatClusters("whatClusters", ""),
actionButton("showSummmary", "Show Summary"),
tableOutput("cESummary")
)
)
)
| /inst/shinyApp/ui.r | no_license | epurdom/clusterExperimentShiny | R | false | false | 25,784 | r | shinyUI(navbarPage("Cluster Experiment",
tabPanel("Welcome Page",
startPageMessage("startMessage", "")),
tabPanel("Getting Started",
tabsetPanel(
tabPanel("Setup",
h4("Set working directory"),
p("Enter a working directory for this Cluster Experiment session and click on 'Choose Working Directory' to set it"),
fluidRow(
column(6,
textInput("workingDirectory", label = "eg: 'homeDirectory/subdirectory/filename.r",
value = path.expand("~"), width = '100%')
)
),
actionButton("createWD", "Choose Working Directory"),
tags$hr(),
checkboxInput("makeScript", label = "Would you like to create a reproducible R script from this work session?", value = FALSE),
conditionalPanel(condition = "input.makeScript",
fluidRow(
column(6,
p("Enter file path and name of file to store script"),
uiOutput("createScriptInputs")
),
column(6,
p("Enter any descriptive comments for the beginning of the R file:"),
textInput("fileComments", label = "eg: Name, date, experiment", value = "")
)
),
p("Click below on 'Create File' to create the R script. If file already exists, any code will be appended to the end of existing file"),
actionButton("createReproducibleFile", label = "Create File")
),
tags$hr(),
checkboxInput("autoCreateObject", label = "Would you like to automatically save the internal cluster experiment object every time it is updated?", value = FALSE),
conditionalPanel(condition = "input.autoCreateObject",
p("Enter file path and name (with extension .rds, see 'saveRDS') in order to create a continuously updated R object:"),
uiOutput("createObjectInputs")
)
),
tabPanel("Upload Data",
fluidRow(
column(12,p("The following choices regarding transformation of the data (will take effect only when run clusterMany/RSEC)"))
),
fluidRow(
column(12,countInfo("trans"))
),
tabsetPanel(
tabPanel("RDS file input",
rdaFileInput("fileInput", "User rds file"),
h4("Summary of object uploaded:"),
uiOutput("isRda")),
tabPanel("CSV format input",
fluidRow(
column(8, csvAssay("fileInput", "")),
column(4,
h5(" First 4 rows and columns of uploaded table:"),
tableOutput("csvAssayContents")
)
),
fluidRow(
column(8, csvColData("fileInput", "")),
column(4,
h5(" First 4 rows and columns of uploaded table:"),
tableOutput("csvColContents")
)
),
fluidRow(
column(8, csvRowData("fileInput", "")),
column(4,
h5(" First 4 rows and columns of uploaded table:"),
tableOutput("csvRowContents")
)
),
actionButton("makeObject",
"Create Summarized Experiment object from selected data"),
h5("Summary of summarized experiment created from uploaded data:"),
#h3(paste(capture.output(show(sE)),collapse="\n")),
uiOutput("isAssay")
)
)
)
)
),
tabPanel("RSEC",
# h3("Core imputs for RSEC")
# ),
fluidRow(
column(6,
#Displays basic help text for Shiny App and clusterMany
RSECHelpText()
),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("RSECCode"),
#Action button that allows one to run above code
actionButton("runRSEC", "Run This Code"),
textOutput("numRSECIterations")
)
),
navlistPanel(
tabPanel("Main Options",
RSECInputs("rsec")
),
tabPanel("Dimensionality Reduction",
#Allows user to enter all inputs
h3("Choose Dimensionality Reduction Options"),
dimReduceInput("rsec", "dim inputs",isRSEC=TRUE,sidelabel="Set dimensionality reduction for clustering?"),
dimReduceInput("rsec", isRSEC=TRUE,singleChoice=TRUE,sidelabel="Set dimensionality reduction for making dendrogram?",dimVal="dendroReduce",ndimVal="dendroNDims")
),
tabPanel("Specialized control",
specializedInputs("rsec", "specialized inputs",isRSEC=TRUE)
),
tabPanel("Plot Clusters",
tabsetPanel(
tabPanel("Default Plot",
downloadButton("downloadDefaultPlotPCRSEC", label = "DownLoad this Plot"),
plotOutput("imgRSEC")
)
)
)
)
),
tabPanel("Cluster Many",
conditionalPanel(condition = paste0("!input['showCMDir']"),
column(12,clusterManyHelpText())
),
fluidRow(
column(6, checkboxInput("showCMDir", value = FALSE, label = "Hide Directions?")),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("clusterManyCode"),
#Action button that allows one to run above code
actionButton("runCM", "Run This Code"),
textOutput("numClusterIterations")
)
),
navlistPanel(
tabPanel("Main Options",
h3("Core imputs for clusterMany"),
sSBInputs("parameters", "SSB inputs")
),
tabPanel("Dimensionality Reduction",
#Allows user to enter all inputs
h3("Choose Dimensionality Reduction Options"),
dimReduceInput("parameters", "dim inputs")
),
tabPanel("Further clustering options",
h3("Warning!"),
h4("If you change options on the 'Main Options' tab, you should return to this tab to see what options have changed. It is best to complete the 'Main Options' before starting this page"),
clusterFunctionInputs("parameters", "cluster function inputs")
),
tabPanel("Specialized control",
specializedInputs("parameters", "specialized inputs")
),
tabPanel("Plot Clusters",
tabsetPanel(
tabPanel("Default Plot",
downloadButton("downloadDefaultPlotPCCM", label = "DownLoad this Plot"),
plotOutput("imgCE")
)
)
)
)
),
tabPanel("Combine Many",
# conditionalPanel(condition = paste0("!input['showCombManyDir']"),
# column(12,combineManyHelpText())
# ),
fluidRow(
column(6, combineManyHelpText()),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("combineManyCode"),
#Action button that allows one to run above code
actionButton("runCombineMany", "Run This Code")
)
),
navlistPanel(
tabPanel("Combine Many Inputs",
h2("Inputs for Combine Many"),
#uiOutput("combineManyWhichClusters"),
combineManyInput("cMInputs", "")
),
tabPanel("Plot Clusters",
downloadButton("downloadDefaultPlotPCCombineMany", label = "DownLoad this Plot"),
plotOutput("imgCombineManyPC")
),
tabPanel("Plot CoClusters",
downloadButton("downloadDefaultPlotCoClustersCombineMany", label = "DownLoad this Plot"),
plotOutput("imgCombineManyPCC")
)
)
),
tabPanel("Make Dendrogram",
fluidRow(
column(6,
#Displays basic help text for Shiny App and clusterMany
makeDendrogramHelpText()
),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
textOutput("makeDendrogramCode"),
#Action button that allows one to run above code
actionButton("runMakeDendrogram", "Run This Code")
)
),
navlistPanel(
tabPanel("Make Dendrogram",
h2("Inputs for Make Dendrogram"),
makeDendrogramInput("mDInputs", "")#,
#uiOutput("makeDendrogramWhichClusters")
),
tabPanel("Plot Dendrogram",
downloadButton("downloadDefaultPlotPDMD", label = "DownLoad this Plot"),
plotOutput("imgPlotDendrogram")
),
tabPanel("Plot HeatMap",
downloadButton("downloadDefaultPlotPHMD", label = "DownLoad this Plot"),
plotOutput("imgPlotHeatmapMD")
)
)
),
tabPanel("Merge Clusters",
fluidRow(
column(6,
mergeClustersHelpText()
),
column(6,
#textual output of code that is to be run
h3("Code to be run internally:"),
#Action button that allows one to run above code
textOutput("mergeClustersCode")
),
fluidRow(
column(3,actionButton("runMergeClusters", "Run This Code")),
column(3,actionButton("updateDendrogram", "Update dendrogram"))
)
),
navlistPanel(
tabPanel("Dendrogram used for merging",
p("Informative Dendrogram for choosing how to merge cluster inputs:"),
downloadButton("downloadPlotPDMC", label = "DownLoad this Plot"),
plotOutput("imgInitalMergeClusters")
),
tabPanel("Set Parameters",
mergeClustersInput("mergeCInputs", "")
),
tabPanel("Plot Clusters",
downloadButton("downloadDefaultPlotClustersMergeClusters", label = "DownLoad this Plot"),
plotOutput("imgPlotClustersMergeClusters")
),
tabPanel("Plot Heatmap",
downloadButton("downloadDefaultPlotHeatmapMergeClusters", label = "DownLoad this Plot"),
plotOutput("imgPlotHeatmapMergeClusters")
),
tabPanel("PCA Plot",
h3("PCA plot feature in development")
)
)
),
navbarMenu("Personalized Plots",
tabPanel("plotClusters",
fluidRow(
column(6,
plotClustersHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotClustersCode"),
actionButton("runPCCM", "Run Plot Cluster Code")
)
),
navlistPanel("Plot Clusters",
tabPanel("Specialized Inputs",
h3("Specialized Plot Cluster Inputs"),
uiOutput("plotClustersWhichClusters"),
plotClustersInput("pCInputs",
"inputs for plot Clusters, cM")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotPCCM", label = "DownLoad this Plot"),
plotOutput("imgPC")
)
)
),
tabPanel("plotCoClustering",
fluidRow(
column(6,
plotCoClusteringHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotCoClusteringCode"),
actionButton("runPlotCoClustering", "Run Plot CoClustering Code")
)
),
navlistPanel(tabPanel("Specialized Inputs",
plotCoClusteringInput("plotCoClustering",
"inputs for plotCoClustering")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotCoClustering",
label = "DownLoad this Plot"),
plotOutput("imgSpecializedPlotCoClustering")
)
)
),
tabPanel("Plot Dendrogram",
fluidRow(
column(6,
plotDendrogramHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotDendrogramCode"),
actionButton("runPlotDendrogram", "Run Plot Dendrogram Code")
)
),
navlistPanel("Plot Dendrogram",
tabPanel("Specialized Inputs",
plotDendrogramInput("plotDendrogram",
"inputs for plotDendrogram")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotDendrogram",
label = "DownLoad this Plot"),
plotOutput("imgSpecializedPlotDendrogram")
)
)
),
tabPanel("Plot Heatmap",
fluidRow(
column(6,
plotHeatmapHelpText()
),
column(6,
h3("Code to be Run:"),
textOutput("plotHeatmapCode"),
actionButton("runPlotHeatmap", "Run Plot Heatmap Code")
)
),
navlistPanel(
tabPanel("Specialized Inputs",
plotHeatmapInput("plotHeatmap",
"inputs for plotHeatmap")
),
tabPanel("Output Plot",
downloadButton("downloadSpecializedPlotHeatmap",
label = "DownLoad this Plot"),
plotOutput("imgSpecializedPlotHeatmap")
)
)
),
tabPanel("PCA Plot",
navlistPanel(
tabPanel("Specialized Inputs"),
tabPanel("Output Plot")
)
)
),
tabPanel("Save Object",
saveObjectMessage("saveObject", ""),
textOutput("saveObjectMessage")
),
tabPanel("What clusters",
whatClusters("whatClusters", ""),
actionButton("showSummmary", "Show Summary"),
tableOutput("cESummary")
)
)
)
|
#' @title Company Search
#'
#' @description This function gives a list of companies, their company numbers and other information that match the company search term
#' @param company Company name search term
#' @param mkey Authorisation key
#' @export
#' @return Dataframe listing company name, company number, postcode of all companies matching the search term
CompanySearch <- function(company,mkey) {
firmNAME<-gsub(" ", "+",company)
firmNAME<-gsub("&","%26",firmNAME)
#FIRMurl<-paste0("https://api.companieshouse.gov.uk/search/companies?q=",firmNAME)
FIRMurl<-paste0("https://api.company-information.service.gov.uk/search/companies?q=",firmNAME)
firmTEST<-httr::GET(FIRMurl, httr::authenticate(mkey, ""))
firmTEXT<-httr::content(firmTEST, as="text")
JLfirm<-jsonlite::fromJSON(firmTEXT, flatten=TRUE)
MM<-JLfirm$total_results
MM2<-MM/JLfirm$items_per_page
MM2b<-ceiling(MM2)
DFfirmL<-list()
for (j in 1:MM2b){
#FIRMurl2<-paste0("https://api.companieshouse.gov.uk/search/companies?q=",firmNAME,"&page_number=",j)
FIRMurl2<-paste0("https://api.company-information.service.gov.uk/search/companies?q=",firmNAME,"&page_number=",j)
firmTEST2<-httr::GET(FIRMurl2, httr::authenticate(mkey, ""))
firmTEXT2<-httr::content(firmTEST2, as="text")
JLfirm2<-jsonlite::fromJSON(firmTEXT2, flatten=TRUE)
DFfirmL[[j]]<-JLfirm2
}
DFfirm<-plyr::ldply(DFfirmL,data.frame)
#suppressWarnings(purrr::map_df(DFfirmL,data.frame))
DFfirmNAMES<-DFfirm$items.title
DFfirmNUMBER<-as.character(DFfirm$items.company_number)
DFfirmDateofCreation<-DFfirm$items.date_of_creation
DFfirmTYPE<-DFfirm$items.company_type
DFfirmSTATUS<-DFfirm$items.company_status
DFfirmADDRESS<-DFfirm$items.address_snippet
#DFfirmCOUNTRY<-DFfirm$items.address.country
DFfirmLOCAL<-DFfirm$items.address.locality
DFfirmPOSTCODE<-DFfirm$items.address.postal_code
myDf <- data.frame(
id.search.term = company,
company.name=DFfirmNAMES,
company.number = DFfirmNUMBER,
Date.of.Creation = DFfirmDateofCreation,
company.type = DFfirmTYPE,
company.status = DFfirmSTATUS,
address = DFfirmADDRESS,
Locality = DFfirmLOCAL,
postcode = DFfirmPOSTCODE)
return(myDf)
}
| /R/CompanySearch_function.R | no_license | MatthewSmith430/CompaniesHouse | R | false | false | 2,283 | r | #' @title Company Search
#'
#' @description This function gives a list of companies, their company numbers and other information that match the company search term
#' @param company Company name search term
#' @param mkey Authorisation key
#' @export
#' @return Dataframe listing company name, company number, postcode of all companies matching the search term
CompanySearch <- function(company,mkey) {
firmNAME<-gsub(" ", "+",company)
firmNAME<-gsub("&","%26",firmNAME)
#FIRMurl<-paste0("https://api.companieshouse.gov.uk/search/companies?q=",firmNAME)
FIRMurl<-paste0("https://api.company-information.service.gov.uk/search/companies?q=",firmNAME)
firmTEST<-httr::GET(FIRMurl, httr::authenticate(mkey, ""))
firmTEXT<-httr::content(firmTEST, as="text")
JLfirm<-jsonlite::fromJSON(firmTEXT, flatten=TRUE)
MM<-JLfirm$total_results
MM2<-MM/JLfirm$items_per_page
MM2b<-ceiling(MM2)
DFfirmL<-list()
for (j in 1:MM2b){
#FIRMurl2<-paste0("https://api.companieshouse.gov.uk/search/companies?q=",firmNAME,"&page_number=",j)
FIRMurl2<-paste0("https://api.company-information.service.gov.uk/search/companies?q=",firmNAME,"&page_number=",j)
firmTEST2<-httr::GET(FIRMurl2, httr::authenticate(mkey, ""))
firmTEXT2<-httr::content(firmTEST2, as="text")
JLfirm2<-jsonlite::fromJSON(firmTEXT2, flatten=TRUE)
DFfirmL[[j]]<-JLfirm2
}
DFfirm<-plyr::ldply(DFfirmL,data.frame)
#suppressWarnings(purrr::map_df(DFfirmL,data.frame))
DFfirmNAMES<-DFfirm$items.title
DFfirmNUMBER<-as.character(DFfirm$items.company_number)
DFfirmDateofCreation<-DFfirm$items.date_of_creation
DFfirmTYPE<-DFfirm$items.company_type
DFfirmSTATUS<-DFfirm$items.company_status
DFfirmADDRESS<-DFfirm$items.address_snippet
#DFfirmCOUNTRY<-DFfirm$items.address.country
DFfirmLOCAL<-DFfirm$items.address.locality
DFfirmPOSTCODE<-DFfirm$items.address.postal_code
myDf <- data.frame(
id.search.term = company,
company.name=DFfirmNAMES,
company.number = DFfirmNUMBER,
Date.of.Creation = DFfirmDateofCreation,
company.type = DFfirmTYPE,
company.status = DFfirmSTATUS,
address = DFfirmADDRESS,
Locality = DFfirmLOCAL,
postcode = DFfirmPOSTCODE)
return(myDf)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StatCompR.R
\name{DSelector}
\alias{DSelector}
\title{Dantzig Selector}
\usage{
DSelector(X, y, sigma, lambda = 3.5)
}
\arguments{
\item{X}{n * p predictor matrix}
\item{sigma}{sd of noise}
\item{lambda}{regularizing parameter}
\item{Y}{n * 1 vector of observations}
}
\value{
beta \code{beta}
}
\description{
Dantzig selector for sparse estimation
}
\examples{
\dontrun{
a=GSD(n=72,p=256,spa=8)
X=a$X
Y=a$Y
theta=sqrt(8/72)/3
DSelector(X,Y,theta)
}
}
| /man/DSelector.Rd | no_license | oniontimes/StatComp20049 | R | false | true | 533 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/StatCompR.R
\name{DSelector}
\alias{DSelector}
\title{Dantzig Selector}
\usage{
DSelector(X, y, sigma, lambda = 3.5)
}
\arguments{
\item{X}{n * p predictor matrix}
\item{sigma}{sd of noise}
\item{lambda}{regularizing parameter}
\item{Y}{n * 1 vector of observations}
}
\value{
beta \code{beta}
}
\description{
Dantzig selector for sparse estimation
}
\examples{
\dontrun{
a=GSD(n=72,p=256,spa=8)
X=a$X
Y=a$Y
theta=sqrt(8/72)/3
DSelector(X,Y,theta)
}
}
|
## This script creates the plot #3 and saves it in a PNG file called "plot3.png"
## It first loads the package "data.table" containing the very fast fread() function.
library(data.table)
## Then with fread() it reads the content of the input file present in the working directory
## fread() allows to use as input a shell command that preprocesses the file (see ?fread), so
## only the lines that match the "^[12]\\/2\\/2007" regex are loaded into the data.frame
dataFile<-"household_power_consumption.txt"
consumptions <- fread(paste("grep ^[12]/2/2007", dataFile), na.strings = c("?", ""))
## it reads the names from the first line of the file and associates them to the data.table columns
setnames(consumptions, colnames(fread(dataFile, nrows=0)))
## opens the graphics device of png type
png(filename = "plot3.png",width = 480, height = 480, units = "px", bg = "transparent", pointsize=12,
type = "cairo-png")
## sets the language to have week-days names in English
Sys.setlocale("LC_TIME", "en_US.UTF-8")
## prepares the plotting area
par(pin=c(4.8,4.8)) ## sets the plotting area to a square of 4.8"x4.8"
par(ps=12) ## sets the font size
par(mar=c(5,4,4,2))
par(mgp=c(3,1,0)) ## sets the default margins line for the axis title, axis labels and lines
## produces the objects of class POSIXct associated to Date and Time, to use as x-coordinate
x_coord<-as.POSIXct(strptime(paste(consumptions$Date, consumptions$Time),"%d/%m/%Y %H:%M:%S"))
## prints the graph on the active device
plot(x_coord, consumptions$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", col="black")
lines(x_coord, consumptions$Sub_metering_2, col="red")
lines(x_coord, consumptions$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), pch="_" ,lwd=3, col=c("black", "red", "blue"))
## closes the graphics device png
dev.off() | /plot3.R | no_license | maxmax65/ExData_Plotting1 | R | false | false | 1,884 | r | ## This script creates the plot #3 and saves it in a PNG file called "plot3.png"
## It first loads the package "data.table" containing the very fast fread() function.
library(data.table)
## Then with fread() it reads the content of the input file present in the working directory
## fread() allows to use as input a shell command that preprocesses the file (see ?fread), so
## only the lines that match the "^[12]\\/2\\/2007" regex are loaded into the data.frame
dataFile<-"household_power_consumption.txt"
consumptions <- fread(paste("grep ^[12]/2/2007", dataFile), na.strings = c("?", ""))
## it reads the names from the first line of the file and associates them to the data.table columns
setnames(consumptions, colnames(fread(dataFile, nrows=0)))
## opens the graphics device of png type
png(filename = "plot3.png",width = 480, height = 480, units = "px", bg = "transparent", pointsize=12,
type = "cairo-png")
## sets the language to have week-days names in English
Sys.setlocale("LC_TIME", "en_US.UTF-8")
## prepares the plotting area
par(pin=c(4.8,4.8)) ## sets the plotting area to a square of 4.8"x4.8"
par(ps=12) ## sets the font size
par(mar=c(5,4,4,2))
par(mgp=c(3,1,0)) ## sets the default margins line for the axis title, axis labels and lines
## produces the objects of class POSIXct associated to Date and Time, to use as x-coordinate
x_coord<-as.POSIXct(strptime(paste(consumptions$Date, consumptions$Time),"%d/%m/%Y %H:%M:%S"))
## prints the graph on the active device
plot(x_coord, consumptions$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", col="black")
lines(x_coord, consumptions$Sub_metering_2, col="red")
lines(x_coord, consumptions$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), pch="_" ,lwd=3, col=c("black", "red", "blue"))
## closes the graphics device png
dev.off() |
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
theme = "bootstrap.css",
# Application title
titlePanel("Deputados Nordestinos Investigados na Operação Lava-Jato"),
h4("Uma análise sobre os gastos dos seis deputados nordestinos investigados", align = "center"),
p(""),
p(""),
h5("Por", tags$a(href= "https://www.linkedin.com/in/arthursampaiopcorreia?", "Arthur Sampaio"), align = "right"),
h2("A Operação Lava-Jato"),
p("Nas mídias muito se fala da Operação Lava-Jato, a maior investigação sobre corrupção conduzida até hoje em solo Brasileiro.
Ela começou investigando uma rede de doleiros que atuavam em vários setores e Estados e descobriu um vasto esquema de corrupção
na maior estatal do país - A Petrobrás, envolvendo desde políticos às maiores empreiteras do Brasil. Para enteder mais sobre
a Operação Lava Jato o ", tags$a(href = "http://lavajato.mpf.mp.br/entenda-o-caso", "Ministério Público Federal"), "criou um portal que explica sucintamente
todo os processos da operação."),
p("Cerca de 22 Deputados Federais, eleitos para representarem o povo, são acusados de pertecerem ao maior esquema de corrupção
brasileira que custou diretamente aos cofres públicos mais de R$ 6 bilhões que poderiam ser gastos por nós, povo. Seis desses vinte e dois deputados acusados são
nordestinos o que me deixa com um senso de dever mais agunçado para saber como estes seis gastam os nossos recursos, que são destinados à CEAP - Cota para Exercício da Atividade Parlamentar.\n\n\n"),
h3("Os dados"),
p("Os dados disponíveis no site da Transparência da Câmara Federal são em formato XML. A conversão para .csv (comma-separated value)
foi feita pelo professor Nazareno e disponibilizado no seu",tags$a(href = "https://github.com/nazareno/ciencia-de-dados-1/blob/master/dados/ano-atual.csv.tgz","GitHub"), "pessoal.
O banco de dados conta com as descrições dos dados parlamentares distribuídos em vinte e nove (29) variáveis, incluindo quando e onde ocorreu os gastos, o
valor do documento e nome do deputado, entre outras informações importantes para a análise."),
h3(tags$strong("Antes de mais nada: como é o comportamento desses gastos?")),
p("Os valores estão muito concentrados a esquerda do gráfico, assimétricos , além disto os valores
crescem exponencialmente. Para facilitar a visualização é plotada em um gráfico monolog."),
sidebarLayout(
sidebarPanel(
sliderInput("precision",
"Precisão da visualização",
min = 1, max = 250, value = 50)
),
mainPanel(
plotOutput(outputId = "behavoirData",hover = "hover"),
verbatimTextOutput("pHover")
)
),
p("Os valores estão concentrados entre R$ 50 e R$ 1000, como mostra o gráfico abaixo. Contudo, a maior
concetração de valores é entorno da mediana (R$ 556,20). Além disto, 75% dos gastos são inferiores a
R$ 565,90. Os valores variam de R$ -1901 referente compensação de bilhete aéreo e o maior valor gasto
é de R$ 39,6 mil do", tags$em("Deputado Roberto Britto"),"referente a divulgação com atividade parlamentar. "),
h3(tags$strong("Vamos verificar como cada deputado gasta sua Cota Parlamentar mensalmente?")),
p("Abaixo está os gastos mensais dos Senhores Deputados referentes a sua cota Parlamentar. É perciptível que
alguns deputados como os senhores",tags$strong("Aníbal Gomes e Waldír Maranhão"), "ainda não prestaram contas
dos seus gastos referentes aos meses de Maio e junho. Qual o motivo dessa não prestação de contas?"),
p("Ao pesquisar em páginas pessoais dos deputados não encontrei nenhuma informação sobre este motivo, em seguida fui pesquisar o que a legislação diz nesses casos."),
sidebarLayout(
sidebarPanel(
selectInput("deputiesName",
"Escolha o deputado investigado: ",
c("ANÍBAL GOMES", "AGUINALDO RIBEIRO", "ARTHUR LIRA", "EDUARDO DA FONTE", "WALDIR MARANHÃO", "ROBERTO BRITTO"))
),
mainPanel(
plotOutput(outputId = "deputieMonth", hover = "plot_hover"),
verbatimTextOutput("info")
)
),
p("Após me debruçar nas páginas da Câmara Federal encontrei o",tags$a(href = "http://www2.camara.leg.br/a-camara/estruturaadm/deapa/portal-da-posse/ato-da-mesa-43-ceap", "Ato de Mesa de número 43"),
", que no seu artigo 4 tem o seguinte insiso: ", align = "justify"),
p(tags$em("§ 12. A apresentação da documentação comprobatória do gasto disciplinado pela Cota de que trata este
Ato dar-se-á no prazo máximo de noventa dias após o fornecimento do produto ou serviço.")),
p("Assim, os deputados acima mencionados estão judicialmente amparados e tem ainda 60 dias, no mínimo, para prestar
conta dos seus gastos. Por esse motivo e com o intuito de aumentar a veracidade das informações aqui levantadas,
caro leito, irei analisar apenas os gastos referentes aos meses de Janeiro à Abril. Vamos começar esta investigação
com os gastos referentes à cada tipo de despesa."),
h6("¹Os valores negativos são referentes a compensação de passagens aéreas, que é quando o deputado utiliza do seu próprio dinheiro para
realizar a viagem e o CEAP reembolsa o mesmo.", align = "right"),
p("Além disto, o deputado baiano Roberto Britto no mês de Abril gastou mais de R$ 60 mil reais,", tags$a(href = 'http://www2.camara.leg.br/a-camara/estruturaadm/deapa/portal-da-posse/ceap-1', "R$ 25 mil"),
" a mais do que sua cota mensal. Já que cada deputado só pode gastar mensalmente um valor determinado pela legislação,
há algum anteparo legal que permite que o deputado em questão gaste 170% da sua cota sem nenhuma fiscalização?"),
p("Para responder mais uma questão foi recorrer aos Atos de Mesas da Câmara e encontrei o", tags$a( href = 'http://www2.camara.leg.br/legin/int/atomes/2009/atodamesa-43-21-maio-2009-588364-publicacaooriginal-112820-cd-mesa.html'," Ato de Mesa de número 23"),
", especificamente no Artigo 13, que diz o seguinte: "),
p(tags$em("Art. 13. O saldo da Cota não utilizado acumula-se ao longo do exercício financeiro, vedada a acumulação de saldo de
um exercício para o seguinte.")),
p(tags$em("Parágrafo 1º - A Cota somente poderá ser utilizada para despesas de competência do respectivo exercício financeiro.")),
p(tags$em("Parágrafo 2º - A importância que exceder, no exercício financeiro, o saldo de Cota disponível será deduzida automática e
integralmente da remuneração do parlamentar ou do saldo de acerto de contas de que ele seja credor, revertendo-se à conta
orçamentária própria da Câmara dos Deputados. ")),
p("Diante do descrito pela legislação é notório a facilidade em que os deputados têm para exceder suas cotas. Ainda é possível concluir que
o valor mensal da CEAP nem sempre é respeitado pelos Deputados, uma vez que o exercício financeiro é referente ao período de um ano."),
h3(tags$strong("Gastos por despesa dos deputados")),
p("A seguir é possível ver quanto cada deputado gastou por despesa durante os meses de Janeiro à Abril. Para ter
detalhes do valor basta colocar o curso ao fim da barra para ser calculado o valor gasto naquela despesa."),
sidebarLayout(
sidebarPanel(
selectInput("deputados",
"Escolha o deputado investigado: ",
c("ANÍBAL GOMES", "AGUINALDO RIBEIRO", "ARTHUR LIRA", "EDUARDO DA FONTE", "WALDIR MARANHÃO", "ROBERTO BRITTO"))
),
mainPanel(
plotOutput(outputId = "deputieExpense", hover = "hover_plot"),
verbatimTextOutput("hoverExpense")
)
),
p("O atual Presidente da República Michel Temer nos últimos meses lançou uma série de medidas para enxugar o gasto
público. Os cortes foram sobretudo na áreas de ", tags$a(href = "http://exame.abril.com.br/economia/noticias/grupo-de-temer-avalia-desvincular-beneficios-do-minimo","Saúde e Educação"),
", basta pesquisar um pouco na internet para ver mais cortes nessas duas áreas tão importantes para a qualidade de vida dos Brasileiros. "),
mainPanel(
plotOutput(outputId = "allExpenses", hover = "Hover"),
verbatimTextOutput("expenseHover"), width = 12
),
p("Acima é possível ver o montante gasto dos seis deputados por cada despesa. Será que o
governo está realmente encurtando os gastos?"),
h3("Para encerrar, o que poderia ser feito com os gastos destes deputados no Nordeste?"),
p("Em 2016, o Nordeste brasileiro passa por uma das maiores secas da história. Grandes reservatórios estaduais estão no
seu volume morto - com alto teor de substâncias nocivas ao ser humano - e poucas coisas estão sendo feitas para melhor a
qualidade de vida dos cidadãos dessas localidades. Diante dos gastos de milhares de reais por conta da CEAP, o que poderia ser
feito com esse recurso?"),
h4("1. Construção de novas trinta (30) cisternas!"),
p("Segundo o", tags$a(href = "http://g1.globo.com/economia/agronegocios/noticia/2012/03/governo-troca-cisternas-de-cimento-por-reservatorios-de-plastico.html", "G1"),
"cada cisterna de 16 mil litros de água doada pelo governo custa R$ 5 mil aos cofres publicos; . O valor gasto até o mês de Abril com as despesas de Locação de Veículos e Combustíveis somam mais de R$ 152 mil,
o suficiente para construir trinta (30) cisternas de águas para comunidades isoladas do Nordeste."),
img(src = "cisternas.jpg", height = 300, width = 300),
h4("2. 438 caminhões pipas abastecidos com 15 mil litros de água potável"),
p(" "),
p("O valor gasto com Passagens Aéreas dos seis deputados investigados durante o período de Janeiro à Abril é da ordem de R$ 175 mil reais,
o suficiente para pagar mais 430 caminhões-pipa para abastecer as comunidades que sofrem com a falta d'água."),
img(src = "caminhao_pipa.jpg", height = 300, width = 300),
h4("3. Trinta e seis (36) novos alunos no Ensino Médio"),
p("Segundo a portaria Interministerial de Número 6 do",
tags$a(href = "https://www.fnde.gov.br/fndelegis/action/UrlPublicasAction.php?acao=abrirAtoPublico&sgl_tipo=PIM&num_ato=00000006&seq_ato=000&vlr_ano=2016&sgl_orgao=MF/MEC", "FNDE"),
"o custo médio anual de um aluno do Ensino Médio no nordeste custa cerca de R$ 3600. A despesa referente ao gasto com Divulgação Parlamentar dos deputados acima
tem um valor de mais de R$ 131 mil, o suficiente para matricular trinta e seis alunos no ensino médio profissionalizante durante um ano."),
h3("Chegamos ao fim..."),
p("Nossa análise chegou ao fim, vimos que os mecanismos legais para controlar os gastos dos deputados na realidade são defasadas e possuem furos, como mostrei acima.
Dificil pedir isto, mas não fique triste! Juntos investigamos o comportamento dos gastos dos seis deputados investigado e exercemos o nosso direito e dever de cidadãos.
Novas análises irão ocorrer e vocês ficaram a par de tudo!"),
h5("Campina Grande - 07 de Agosto de 2016", align = "center")
))
| /Visualization/interactiveVisualization/deputiesExpense/ui.R | no_license | ArthurSampaio/DataAnalysis | R | false | false | 11,510 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
theme = "bootstrap.css",
# Application title
titlePanel("Deputados Nordestinos Investigados na Operação Lava-Jato"),
h4("Uma análise sobre os gastos dos seis deputados nordestinos investigados", align = "center"),
p(""),
p(""),
h5("Por", tags$a(href= "https://www.linkedin.com/in/arthursampaiopcorreia?", "Arthur Sampaio"), align = "right"),
h2("A Operação Lava-Jato"),
p("Nas mídias muito se fala da Operação Lava-Jato, a maior investigação sobre corrupção conduzida até hoje em solo Brasileiro.
Ela começou investigando uma rede de doleiros que atuavam em vários setores e Estados e descobriu um vasto esquema de corrupção
na maior estatal do país - A Petrobrás, envolvendo desde políticos às maiores empreiteras do Brasil. Para enteder mais sobre
a Operação Lava Jato o ", tags$a(href = "http://lavajato.mpf.mp.br/entenda-o-caso", "Ministério Público Federal"), "criou um portal que explica sucintamente
todo os processos da operação."),
p("Cerca de 22 Deputados Federais, eleitos para representarem o povo, são acusados de pertecerem ao maior esquema de corrupção
brasileira que custou diretamente aos cofres públicos mais de R$ 6 bilhões que poderiam ser gastos por nós, povo. Seis desses vinte e dois deputados acusados são
nordestinos o que me deixa com um senso de dever mais agunçado para saber como estes seis gastam os nossos recursos, que são destinados à CEAP - Cota para Exercício da Atividade Parlamentar.\n\n\n"),
h3("Os dados"),
p("Os dados disponíveis no site da Transparência da Câmara Federal são em formato XML. A conversão para .csv (comma-separated value)
foi feita pelo professor Nazareno e disponibilizado no seu",tags$a(href = "https://github.com/nazareno/ciencia-de-dados-1/blob/master/dados/ano-atual.csv.tgz","GitHub"), "pessoal.
O banco de dados conta com as descrições dos dados parlamentares distribuídos em vinte e nove (29) variáveis, incluindo quando e onde ocorreu os gastos, o
valor do documento e nome do deputado, entre outras informações importantes para a análise."),
h3(tags$strong("Antes de mais nada: como é o comportamento desses gastos?")),
p("Os valores estão muito concentrados a esquerda do gráfico, assimétricos , além disto os valores
crescem exponencialmente. Para facilitar a visualização é plotada em um gráfico monolog."),
sidebarLayout(
sidebarPanel(
sliderInput("precision",
"Precisão da visualização",
min = 1, max = 250, value = 50)
),
mainPanel(
plotOutput(outputId = "behavoirData",hover = "hover"),
verbatimTextOutput("pHover")
)
),
p("Os valores estão concentrados entre R$ 50 e R$ 1000, como mostra o gráfico abaixo. Contudo, a maior
concetração de valores é entorno da mediana (R$ 556,20). Além disto, 75% dos gastos são inferiores a
R$ 565,90. Os valores variam de R$ -1901 referente compensação de bilhete aéreo e o maior valor gasto
é de R$ 39,6 mil do", tags$em("Deputado Roberto Britto"),"referente a divulgação com atividade parlamentar. "),
h3(tags$strong("Vamos verificar como cada deputado gasta sua Cota Parlamentar mensalmente?")),
p("Abaixo está os gastos mensais dos Senhores Deputados referentes a sua cota Parlamentar. É perciptível que
alguns deputados como os senhores",tags$strong("Aníbal Gomes e Waldír Maranhão"), "ainda não prestaram contas
dos seus gastos referentes aos meses de Maio e junho. Qual o motivo dessa não prestação de contas?"),
p("Ao pesquisar em páginas pessoais dos deputados não encontrei nenhuma informação sobre este motivo, em seguida fui pesquisar o que a legislação diz nesses casos."),
sidebarLayout(
sidebarPanel(
selectInput("deputiesName",
"Escolha o deputado investigado: ",
c("ANÍBAL GOMES", "AGUINALDO RIBEIRO", "ARTHUR LIRA", "EDUARDO DA FONTE", "WALDIR MARANHÃO", "ROBERTO BRITTO"))
),
mainPanel(
plotOutput(outputId = "deputieMonth", hover = "plot_hover"),
verbatimTextOutput("info")
)
),
p("Após me debruçar nas páginas da Câmara Federal encontrei o",tags$a(href = "http://www2.camara.leg.br/a-camara/estruturaadm/deapa/portal-da-posse/ato-da-mesa-43-ceap", "Ato de Mesa de número 43"),
", que no seu artigo 4 tem o seguinte insiso: ", align = "justify"),
p(tags$em("§ 12. A apresentação da documentação comprobatória do gasto disciplinado pela Cota de que trata este
Ato dar-se-á no prazo máximo de noventa dias após o fornecimento do produto ou serviço.")),
p("Assim, os deputados acima mencionados estão judicialmente amparados e tem ainda 60 dias, no mínimo, para prestar
conta dos seus gastos. Por esse motivo e com o intuito de aumentar a veracidade das informações aqui levantadas,
caro leito, irei analisar apenas os gastos referentes aos meses de Janeiro à Abril. Vamos começar esta investigação
com os gastos referentes à cada tipo de despesa."),
h6("¹Os valores negativos são referentes a compensação de passagens aéreas, que é quando o deputado utiliza do seu próprio dinheiro para
realizar a viagem e o CEAP reembolsa o mesmo.", align = "right"),
p("Além disto, o deputado baiano Roberto Britto no mês de Abril gastou mais de R$ 60 mil reais,", tags$a(href = 'http://www2.camara.leg.br/a-camara/estruturaadm/deapa/portal-da-posse/ceap-1', "R$ 25 mil"),
" a mais do que sua cota mensal. Já que cada deputado só pode gastar mensalmente um valor determinado pela legislação,
há algum anteparo legal que permite que o deputado em questão gaste 170% da sua cota sem nenhuma fiscalização?"),
p("Para responder mais uma questão foi recorrer aos Atos de Mesas da Câmara e encontrei o", tags$a( href = 'http://www2.camara.leg.br/legin/int/atomes/2009/atodamesa-43-21-maio-2009-588364-publicacaooriginal-112820-cd-mesa.html'," Ato de Mesa de número 23"),
", especificamente no Artigo 13, que diz o seguinte: "),
p(tags$em("Art. 13. O saldo da Cota não utilizado acumula-se ao longo do exercício financeiro, vedada a acumulação de saldo de
um exercício para o seguinte.")),
p(tags$em("Parágrafo 1º - A Cota somente poderá ser utilizada para despesas de competência do respectivo exercício financeiro.")),
p(tags$em("Parágrafo 2º - A importância que exceder, no exercício financeiro, o saldo de Cota disponível será deduzida automática e
integralmente da remuneração do parlamentar ou do saldo de acerto de contas de que ele seja credor, revertendo-se à conta
orçamentária própria da Câmara dos Deputados. ")),
p("Diante do descrito pela legislação é notório a facilidade em que os deputados têm para exceder suas cotas. Ainda é possível concluir que
o valor mensal da CEAP nem sempre é respeitado pelos Deputados, uma vez que o exercício financeiro é referente ao período de um ano."),
h3(tags$strong("Gastos por despesa dos deputados")),
p("A seguir é possível ver quanto cada deputado gastou por despesa durante os meses de Janeiro à Abril. Para ter
detalhes do valor basta colocar o curso ao fim da barra para ser calculado o valor gasto naquela despesa."),
sidebarLayout(
sidebarPanel(
selectInput("deputados",
"Escolha o deputado investigado: ",
c("ANÍBAL GOMES", "AGUINALDO RIBEIRO", "ARTHUR LIRA", "EDUARDO DA FONTE", "WALDIR MARANHÃO", "ROBERTO BRITTO"))
),
mainPanel(
plotOutput(outputId = "deputieExpense", hover = "hover_plot"),
verbatimTextOutput("hoverExpense")
)
),
p("O atual Presidente da República Michel Temer nos últimos meses lançou uma série de medidas para enxugar o gasto
público. Os cortes foram sobretudo na áreas de ", tags$a(href = "http://exame.abril.com.br/economia/noticias/grupo-de-temer-avalia-desvincular-beneficios-do-minimo","Saúde e Educação"),
", basta pesquisar um pouco na internet para ver mais cortes nessas duas áreas tão importantes para a qualidade de vida dos Brasileiros. "),
mainPanel(
plotOutput(outputId = "allExpenses", hover = "Hover"),
verbatimTextOutput("expenseHover"), width = 12
),
p("Acima é possível ver o montante gasto dos seis deputados por cada despesa. Será que o
governo está realmente encurtando os gastos?"),
h3("Para encerrar, o que poderia ser feito com os gastos destes deputados no Nordeste?"),
p("Em 2016, o Nordeste brasileiro passa por uma das maiores secas da história. Grandes reservatórios estaduais estão no
seu volume morto - com alto teor de substâncias nocivas ao ser humano - e poucas coisas estão sendo feitas para melhor a
qualidade de vida dos cidadãos dessas localidades. Diante dos gastos de milhares de reais por conta da CEAP, o que poderia ser
feito com esse recurso?"),
h4("1. Construção de novas trinta (30) cisternas!"),
p("Segundo o", tags$a(href = "http://g1.globo.com/economia/agronegocios/noticia/2012/03/governo-troca-cisternas-de-cimento-por-reservatorios-de-plastico.html", "G1"),
"cada cisterna de 16 mil litros de água doada pelo governo custa R$ 5 mil aos cofres publicos; . O valor gasto até o mês de Abril com as despesas de Locação de Veículos e Combustíveis somam mais de R$ 152 mil,
o suficiente para construir trinta (30) cisternas de águas para comunidades isoladas do Nordeste."),
img(src = "cisternas.jpg", height = 300, width = 300),
h4("2. 438 caminhões pipas abastecidos com 15 mil litros de água potável"),
p(" "),
p("O valor gasto com Passagens Aéreas dos seis deputados investigados durante o período de Janeiro à Abril é da ordem de R$ 175 mil reais,
o suficiente para pagar mais 430 caminhões-pipa para abastecer as comunidades que sofrem com a falta d'água."),
img(src = "caminhao_pipa.jpg", height = 300, width = 300),
h4("3. Trinta e seis (36) novos alunos no Ensino Médio"),
p("Segundo a portaria Interministerial de Número 6 do",
tags$a(href = "https://www.fnde.gov.br/fndelegis/action/UrlPublicasAction.php?acao=abrirAtoPublico&sgl_tipo=PIM&num_ato=00000006&seq_ato=000&vlr_ano=2016&sgl_orgao=MF/MEC", "FNDE"),
"o custo médio anual de um aluno do Ensino Médio no nordeste custa cerca de R$ 3600. A despesa referente ao gasto com Divulgação Parlamentar dos deputados acima
tem um valor de mais de R$ 131 mil, o suficiente para matricular trinta e seis alunos no ensino médio profissionalizante durante um ano."),
h3("Chegamos ao fim..."),
p("Nossa análise chegou ao fim, vimos que os mecanismos legais para controlar os gastos dos deputados na realidade são defasadas e possuem furos, como mostrei acima.
Dificil pedir isto, mas não fique triste! Juntos investigamos o comportamento dos gastos dos seis deputados investigado e exercemos o nosso direito e dever de cidadãos.
Novas análises irão ocorrer e vocês ficaram a par de tudo!"),
h5("Campina Grande - 07 de Agosto de 2016", align = "center")
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapping.R
\name{print.mapping}
\alias{print.mapping}
\title{Print a mapping}
\usage{
\method{print}{mapping}(x, ...)
}
\arguments{
\item{x}{\code{\link{mapping}}.}
\item{...}{Ignored.}
}
\value{
Returns \code{x} invisibly.
}
\description{
Print a mapping
}
| /man/print.mapping.Rd | no_license | cran/mappings | R | false | true | 336 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapping.R
\name{print.mapping}
\alias{print.mapping}
\title{Print a mapping}
\usage{
\method{print}{mapping}(x, ...)
}
\arguments{
\item{x}{\code{\link{mapping}}.}
\item{...}{Ignored.}
}
\value{
Returns \code{x} invisibly.
}
\description{
Print a mapping
}
|
\name{isAutoSave}
\alias{isAutoSave}
\title{Returns whether the PROJECT.xml file is automatically saved.}
\usage{
isAutoSave()
}
\description{
Returns whether the PROJECT.xml file is automatically
saved.
}
\seealso{
\code{\link{setAutoSave}}
}
| /man/isAutoSave.Rd | no_license | jbryer/makeR | R | false | false | 253 | rd | \name{isAutoSave}
\alias{isAutoSave}
\title{Returns whether the PROJECT.xml file is automatically saved.}
\usage{
isAutoSave()
}
\description{
Returns whether the PROJECT.xml file is automatically
saved.
}
\seealso{
\code{\link{setAutoSave}}
}
|
/tcl/mac/tclMacResource.r | permissive | Schiiiiins/lcu1 | R | false | false | 2,825 | r | ||
rm(list=objects())
library("tidyverse")
library("xml2")
library("rvest")
read_delim("ris.csv",delim=";",col_names = TRUE,col_types = cols(Elevation=col_integer(),Longitude=col_double(),Latitude=col_double()))->ana
purrr::map_dfr(1:nrow(ana),.f=function(riga){
if(is.na(ana[riga,]$SiteCode)) return(ana[riga,])
ana[riga,]$SiteCode->CODICE
xml2::read_html(glue::glue("http://93.57.89.4:8081/temporeale/stazioni/{CODICE}/anagrafica"))->myhtml
myhtml %>%
rvest::html_node(xpath = "/html/body/div/div[1]/section/div/div/div/div[2]/div/div") %>%
html_nodes(xpath="h5")->ris
unlist(str_split(str_trim(str_remove(html_text(ris[[3]]),"[:alpha:]+:"),side="both"),","))->coordinate
coordinate[1]->lat
coordinate[2]->lon
str_extract(html_text(ris[[4]]),"[0-9]+")->quota
ana[riga,]$Elevation<-as.integer(quota)
ana[riga,]$Longitude<-as.double(lon)
ana[riga,]$Latitude<-as.double(lat)
Sys.sleep(5)
ana[riga,]
})->finale
| /sito_web/leggiAnagraficaPuglia.R | no_license | valori-climatologici-1991-2020/Puglia | R | false | false | 974 | r | rm(list=objects())
library("tidyverse")
library("xml2")
library("rvest")
read_delim("ris.csv",delim=";",col_names = TRUE,col_types = cols(Elevation=col_integer(),Longitude=col_double(),Latitude=col_double()))->ana
purrr::map_dfr(1:nrow(ana),.f=function(riga){
if(is.na(ana[riga,]$SiteCode)) return(ana[riga,])
ana[riga,]$SiteCode->CODICE
xml2::read_html(glue::glue("http://93.57.89.4:8081/temporeale/stazioni/{CODICE}/anagrafica"))->myhtml
myhtml %>%
rvest::html_node(xpath = "/html/body/div/div[1]/section/div/div/div/div[2]/div/div") %>%
html_nodes(xpath="h5")->ris
unlist(str_split(str_trim(str_remove(html_text(ris[[3]]),"[:alpha:]+:"),side="both"),","))->coordinate
coordinate[1]->lat
coordinate[2]->lon
str_extract(html_text(ris[[4]]),"[0-9]+")->quota
ana[riga,]$Elevation<-as.integer(quota)
ana[riga,]$Longitude<-as.double(lon)
ana[riga,]$Latitude<-as.double(lat)
Sys.sleep(5)
ana[riga,]
})->finale
|
library(ggplot2)
library(dplyr)
all.predicted.demands <- read.csv('all_predicted_demands.csv',
stringsAsFactors=FALSE)
ora.df <- all.predicted.demands[all.predicted.demands$product == 'ORA',]
# ORA
ora.df <- ora.df %>%
mutate(revenue=2000 * price * predicted_demand,
weekly_demand = predicted_demand)
s35.grove.dist <- 266
s51.grove.dist <- 967
s59.grove.dist <- 176
s73.grove.dist <- 1470
ora.df$grove_dist <- c(rep(s51.grove.dist, 903),
rep(s73.grove.dist, 301),
rep(s35.grove.dist, 301),
rep(s59.grove.dist, 602))
# We have 301 rows per region (in the order NE, MA, SE, MW, DS, NW, SW)
region.storage <- read.csv('region_storage_dists_opt.csv')
ora.df$storage_dist <- c(rep(479.1429, 301),
rep(286.7647, 301),
rep(712.1667, 301),
rep(368.5909, 301),
rep(413.3750, 301),
rep(659.1250, 301),
rep(659, 301))
ora.df$weekly_transp_cost <- ora.df$weekly_demand *
(0.22 * ora.df$grove_dist + 1.2 * ora.df$storage_dist)
# Also include cost to buy capacity and maintain necessary
# storage. There's a one-time upgrade cost and an every-year
# maintenance. Divide the one-time cost by 48 to "week-ize" it.
ora.df$weekly_storage_build <- 6000 * ora.df$weekly_demand / 48
ora.df$weekly_storage_maint <- (650 * ora.df$weekly_demand) / 48
# ^ Note that we do not account for the 7.5m * 4 because it will
# be present at every price (add in at end).
# Finally, include the raw material cost (spot purchase of ORA).
# We use our mean belief for each grove's spot purchase price.
# (FLA x 36, FLA x 12, TEX x 12, CAL x 24) is the vectoring.
# Average over months for now, disregarding seasonality -- also,
# no need to factor in exchange rates for now, assume we buy from
# FLA, not FLA / BRA / SPA.
cwd <- getwd()
setwd('..') # move up one directory
mean.raw.price.beliefs <- read.csv(
'grove_beliefs/raw_price_beliefs_mean.csv')
setwd(cwd)
mean.over.months <- mean.raw.price.beliefs %>%
group_by(grove) %>%
summarise(mean_month=mean(price))
ora.df$raw_material_cost <- ora.df$weekly_demand * 2000 * c(
rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
4 * 301),
rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
301),
rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
2 * 301))
# Note: this "profit" is for the first year, actual profit
# should be even higher in later years when we don't have the
# capacity cost.
ora.df$year1_profit <- ora.df$revenue - (ora.df$weekly_transp_cost +
ora.df$weekly_storage_build + ora.df$weekly_storage_maint +
ora.df$raw_material_cost)
ora.df$profit <- ora.df$revenue - (ora.df$weekly_transp_cost +
ora.df$weekly_storage_maint +
ora.df$raw_material_cost)
ggplot(ora.df, aes(x=price, colour=region)) +
geom_line(aes(y=year1_profit), linetype='dotted') +
geom_line(aes(y=profit)) +
ggtitle('ORA Profit (Year 1 and After)')
ggsave('profit_curves/ora_profit.png', width=10, height=6)
ora.profit.max <- ora.df %>% group_by(region) %>%
filter(profit == max(profit))
write.csv(ora.profit.max, file='profit_csvs/ora_max_profit.csv',
quote=FALSE, row.names=FALSE)
# POJ
poj.df <- all.predicted.demands[all.predicted.demands$product == 'POJ',]
poj.df <- poj.df %>%
mutate(revenue=2000 * price * predicted_demand,
weekly_demand = predicted_demand)
# Add storage to market distances
poj.df$storage_dist <- c(rep(479.1429, 301),
rep(286.7647, 301),
rep(712.1667, 301),
rep(368.5909, 301),
rep(413.3750, 301),
rep(659.1250, 301),
rep(659, 301))
# Instead of grove to storage, now we have grove to plant and
# plant to storage distances. We can make similar "efficiency"
# assumptions, where P2->S35, P3->S51, P5->S59, P9->S73.
# We'll ship raw ORA from TEX->P2, CAL->P5, FLA->P3, FLA->P9.
# TEX->P2 = 381
# CAL->P5 = 351
# FLA->P3 = 773
# FLA->P9 = 1528
poj.df$g_p_dist <- c(rep(773, 903),
rep(1528, 301),
rep(381, 301),
rep(351, 602))
# P2 -> S35 = 140
# P3 -> S51 = 317
# P5 -> S59 = 393
# P9 -> S73 = 98
poj.df$p_s_dist <- c(rep(317, 903),
rep(98, 301),
rep(140, 301),
rep(393, 602))
# For tanker car cost, we need to calculate how many tanker
# cars the given demand would require, multiply by its purchase
# cost, and then add the weekly traveling cost. We'll spread the
# one time purchase cost over weeks by dividing it by 48.
poj.df$num_tanker_cars_needed <- 2 * poj.df$weekly_demand / 30
poj.df$tanker_car_weekly_purchase_cost <-
poj.df$num_tanker_cars_needed * 100000 / 48
poj.df$tanker_car_weekly_travel_cost <- 36 *
0.5 * poj.df$num_tanker_cars_needed * poj.df$p_s_dist
poj.df$tanker_car_weekly_hold_cost <- 10 *
0.5 * poj.df$num_tanker_cars_needed
poj.df$g_p_weekly_cost <- 0.22 * poj.df$weekly_demand * poj.df$g_p_dist
poj.df$storage_market_weekly_cost <- 1.2 * poj.df$weekly_demand *
poj.df$storage_dist
# Also include cost to buy capacity and maintain necessary
# processing. There's a one-time upgrade cost and an every-year
# maintenance. Divide the one-time cost by 48 to "week-ize" it.
poj.df$weekly_proc_build <- 8000 * poj.df$weekly_demand / 48
poj.df$weekly_proc_maint <- (2500 * poj.df$weekly_demand) / 48
# Note that we do not add in the $8m * 4 processing maintenance,
# because it will be there for all prices (and we're 2x-counting it
# for the other products)
poj.df$weekly_storage_build <- 6000 * poj.df$weekly_demand / 48
poj.df$weekly_storage_maint <- (650 * poj.df$weekly_demand) / 48
poj.df$manufacturing_cost <- 2000 * poj.df$weekly_demand
# Add in raw material cost
poj.df$raw_material_cost <- poj.df$weekly_demand * 2000 * c(
rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
4 * 301),
rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
301),
rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
2 * 301))
poj.df$year1_profit <- poj.df$revenue - (
poj.df$tanker_car_weekly_purchase_cost +
poj.df$tanker_car_weekly_travel_cost +
poj.df$tanker_car_weekly_hold_cost +
poj.df$g_p_weekly_cost +
poj.df$storage_market_weekly_cost +
poj.df$manufacturing_cost +
poj.df$weekly_proc_build +
poj.df$weekly_proc_maint +
poj.df$raw_material_cost +
poj.df$weekly_storage_build +
poj.df$weekly_storage_maint)
poj.df$profit <- poj.df$year1_profit + (
poj.df$tanker_car_weekly_purchase_cost +
poj.df$weekly_proc_build +
poj.df$weekly_storage_build
)
ggplot(poj.df, aes(x=price, colour=region)) +
geom_line(aes(y=year1_profit), linetype='dotted') +
geom_line(aes(y=profit)) +
ggtitle('POJ Profit (Year 1 and After)')
ggsave('profit_curves/poj_profit.png', width=10, height=6)
poj.profit.max <- poj.df %>% group_by(region) %>%
filter(profit == max(profit))
write.csv(poj.profit.max, file='profit_csvs/poj_max_profit.csv',
quote=FALSE, row.names=FALSE)
#### The other two products are price-optimized using futures
# ROJ
roj.df <- all.predicted.demands[all.predicted.demands$product == 'ROJ',]
roj.df <- roj.df %>%
mutate(revenue=2000 * price * predicted_demand,
weekly_demand = predicted_demand)
# Add storage to market distances
roj.df$storage_dist <- c(rep(479.1429, 301),
rep(286.7647, 301),
rep(712.1667, 301),
rep(368.5909, 301),
rep(413.3750, 301),
rep(659.1250, 301),
rep(659, 301))
# Instead of grove to storage, now we have grove to plant and
# plant to storage distances. We can make similar "efficiency"
# assumptions, where P2->S35, P3->S51, P5->S59, P9->S73.
# We'll ship raw ORA from TEX->P2, CAL->P5, FLA->P3, FLA->P9.
# TEX->P2 = 381
# CAL->P5 = 351
# FLA->P3 = 773
# FLA->P9 = 1528
roj.df$g_p_dist <- c(rep(773, 903),
rep(1528, 301),
rep(381, 301),
rep(351, 602))
# P2 -> S35 = 140
# P3 -> S51 = 317
# P5 -> S59 = 393
# P9 -> S73 = 98
roj.df$p_s_dist <- c(rep(317, 903),
rep(98, 301),
rep(140, 301),
rep(393, 602))
# For tanker car cost, we need to calculate how many tanker
# cars the given demand would require, multiply by its purchase
# cost, and then add the weekly traveling cost. We'll spread the
# one time purchase cost over weeks by dividing it by 48.
roj.df$num_tanker_cars_needed <- 2 * roj.df$weekly_demand / 30
roj.df$tanker_car_weekly_purchase_cost <-
roj.df$num_tanker_cars_needed * 100000 / 48
roj.df$tanker_car_weekly_travel_cost <- 36 *
0.5 * roj.df$num_tanker_cars_needed * roj.df$p_s_dist
roj.df$tanker_car_weekly_hold_cost <- 10 *
0.5 * roj.df$num_tanker_cars_needed
roj.df$g_p_weekly_cost <- 0.22 * roj.df$weekly_demand * roj.df$g_p_dist
roj.df$storage_market_weekly_cost <- 1.2 * roj.df$weekly_demand *
roj.df$storage_dist
roj.df$weekly_storage_build <- 6000 * roj.df$weekly_demand / 48
roj.df$weekly_storage_maint <- (650 * roj.df$weekly_demand) / 48
roj.df$weekly_proc_build <- 8000 * roj.df$weekly_demand / 48
roj.df$weekly_proc_maint <- (2500 * roj.df$weekly_demand) / 48
# Reconstitution cost
roj.df$reconstitution_cost <- 650 * roj.df$weekly_demand
# Add in raw material cost
roj.df$raw_material_cost <- roj.df$weekly_demand * 2000 * c(
rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
4 * 301),
rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
301),
rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
2 * 301))
# Also, add in manufacturing cost of FCOJ because we need to make
# FCOJ to get ROJ (assume no futures).
roj.df$manufacturing_cost <- 2000 * roj.df$weekly_demand
roj.df$year1_profit <- roj.df$revenue - (
roj.df$tanker_car_weekly_purchase_cost +
roj.df$tanker_car_weekly_travel_cost +
roj.df$tanker_car_weekly_hold_cost +
roj.df$g_p_weekly_cost +
roj.df$storage_market_weekly_cost +
roj.df$manufacturing_cost +
roj.df$reconstitution_cost +
roj.df$weekly_proc_build +
roj.df$weekly_proc_maint +
roj.df$raw_material_cost +
roj.df$weekly_storage_build +
roj.df$weekly_storage_maint)
roj.df$profit <- roj.df$year1_profit + (
roj.df$tanker_car_weekly_purchase_cost +
roj.df$weekly_proc_build +
roj.df$weekly_storage_build
)
ggplot(roj.df, aes(x=price, y=profit, colour=region)) +
geom_line(aes(y=year1_profit), linetype='dotted') +
geom_line(aes(y=profit)) +
ggtitle('ROJ Profit (Year 1 and After)')
ggsave('profit_curves/roj_profit.png', width=10, height=6)
roj.profit.max <- roj.df %>% group_by(region) %>%
filter(profit == max(profit))
write.csv(roj.profit.max, file='profit_csvs/roj_max_profit.csv',
quote=FALSE, row.names=FALSE)
# # FCOJ
# ####
# # Note this assumes we manufacture the FCOJ
# ####
# fcoj.df <- all.predicted.demands[all.predicted.demands$product == 'FCOJ',]
# fcoj.df <- fcoj.df %>%
# mutate(revenue=2000 * price * predicted_demand,
# weekly_demand = predicted_demand)
# # Add storage to market distances
# fcoj.df$storage_dist <- c(rep(479.1429, 301),
# rep(286.7647, 301),
# rep(712.1667, 301),
# rep(368.5909, 301),
# rep(413.3750, 301),
# rep(659.1250, 301),
# rep(659, 301))
# # Instead of grove to storage, now we have grove to plant and
# # plant to storage distances. We can make similar "efficiency"
# # assumptions, where P2->S35, P3->S51, P5->S59, P9->S73.
# # We'll ship raw ORA from TEX->P2, CAL->P5, FLA->P3, FLA->P9.
# # TEX->P2 = 381
# # CAL->P5 = 351
# # FLA->P3 = 773
# # FLA->P9 = 1528
# fcoj.df$g_p_dist <- c(rep(773, 903),
# rep(1528, 301),
# rep(381, 301),
# rep(351, 602))
# # P2 -> S35 = 140
# # P3 -> S51 = 317
# # P5 -> S59 = 393
# # P9 -> S73 = 98
# fcoj.df$p_s_dist <- c(rep(317, 903),
# rep(98, 301),
# rep(140, 301),
# rep(393, 602))
# # For tanker car cost, we need to calculate how many tanker
# # cars the given demand would require, multiply by its purchase
# # cost, and then add the weekly traveling cost. We'll spread the
# # one time purchase cost over weeks by dividing it by 48.
# fcoj.df$num_tanker_cars_needed <- fcoj.df$weekly_demand / 30
# fcoj.df$tanker_car_weekly_purchase_cost <-
# fcoj.df$num_tanker_cars_needed * 100000 / 48
# fcoj.df$tanker_car_weekly_travel_cost <- 36 *
# fcoj.df$num_tanker_cars_needed * fcoj.df$p_s_dist
# fcoj.df$g_p_weekly_cost <- 0.22 * fcoj.df$weekly_demand * fcoj.df$g_p_dist
# fcoj.df$storage_market_weekly_cost <- 1.2 * fcoj.df$weekly_demand *
# fcoj.df$storage_dist
# fcoj.df$weekly_proc_build <- 8000 * fcoj.df$weekly_demand / 48
# fcoj.df$weekly_proc_maint <- (2500 * fcoj.df$weekly_demand) / 48
# fcoj.df$weekly_storage_build <- 6000 * fcoj.df$weekly_demand / 48
# fcoj.df$weekly_storage_maint <- (650 * fcoj.df$weekly_demand) / 48
# fcoj.df$manufacturing_cost <- 2000 * fcoj.df$weekly_demand
# fcoj.df$raw_material_cost <- fcoj.df$weekly_demand * 2000 * c(
# rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
# 4 * 301),
# rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
# 301),
# rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
# 2 * 301))
# fcoj.df$year1_profit <- fcoj.df$revenue - (fcoj.df$tanker_car_weekly_purchase_cost +
# fcoj.df$tanker_car_weekly_travel_cost +
# fcoj.df$g_p_weekly_cost + fcoj.df$storage_market_weekly_cost +
# fcoj.df$manufacturing_cost +
# fcoj.df$weekly_proc_build +
# fcoj.df$weekly_proc_maint +
# fcoj.df$raw_material_cost +
# fcoj.df$weekly_storage_build +
# fcoj.df$weekly_storage_maint)
# fcoj.df$profit <- fcoj.df$year1_profit + fcoj.df$weekly_proc_build +
# fcoj.df$weekly_storage_build
# ggplot(fcoj.df, aes(x=price, y=profit, colour=region)) +
# geom_line(aes(y=year1_profit), linetype='dotted') +
# geom_line(aes(y=profit)) +
# ggtitle('FCOJ Profit (Year 1 and After)')
# ggsave('profit_curves/fcoj_profit.png', width=10, height=6)
# fcoj.profit.max <- fcoj.df %>% group_by(region) %>%
# filter(profit == max(profit))
# write.csv(fcoj.profit.max, file='profit_csvs/fcoj_max_profit.csv',
# quote=FALSE, row.names=FALSE)
# # Total profit, using FCOJ futures
# for (fcoj_future_price in seq(0.6, 1.1, 0.1)) {
# profit <- 48 * (sum(ora.profit.max$profit) + sum(poj.profit.max$profit) +
# sum(roj.profit.max$profit)) +
# (6112246 * 48 - fcoj_future_price * 136000 * 2000) -
# (4 * 7500000 + 4 * 8000000)
# print(profit)
# }
# sum(ora.profit.max$weekly_demand) + sum(poj.profit.max$weekly_demand) +
# sum(roj.profit.max$weekly_demand) | /old_code/consider_fifth_storage/4_storage/find_optimal_prices_ORA_POJ_ROJ.R | no_license | edz504/not_grapefruit | R | false | false | 15,471 | r | library(ggplot2)
library(dplyr)
all.predicted.demands <- read.csv('all_predicted_demands.csv',
stringsAsFactors=FALSE)
ora.df <- all.predicted.demands[all.predicted.demands$product == 'ORA',]
# ORA
ora.df <- ora.df %>%
mutate(revenue=2000 * price * predicted_demand,
weekly_demand = predicted_demand)
s35.grove.dist <- 266
s51.grove.dist <- 967
s59.grove.dist <- 176
s73.grove.dist <- 1470
ora.df$grove_dist <- c(rep(s51.grove.dist, 903),
rep(s73.grove.dist, 301),
rep(s35.grove.dist, 301),
rep(s59.grove.dist, 602))
# We have 301 rows per region (in the order NE, MA, SE, MW, DS, NW, SW)
region.storage <- read.csv('region_storage_dists_opt.csv')
ora.df$storage_dist <- c(rep(479.1429, 301),
rep(286.7647, 301),
rep(712.1667, 301),
rep(368.5909, 301),
rep(413.3750, 301),
rep(659.1250, 301),
rep(659, 301))
ora.df$weekly_transp_cost <- ora.df$weekly_demand *
(0.22 * ora.df$grove_dist + 1.2 * ora.df$storage_dist)
# Also include cost to buy capacity and maintain necessary
# storage. There's a one-time upgrade cost and an every-year
# maintenance. Divide the one-time cost by 48 to "week-ize" it.
ora.df$weekly_storage_build <- 6000 * ora.df$weekly_demand / 48
ora.df$weekly_storage_maint <- (650 * ora.df$weekly_demand) / 48
# ^ Note that we do not account for the 7.5m * 4 because it will
# be present at every price (add in at end).
# Finally, include the raw material cost (spot purchase of ORA).
# We use our mean belief for each grove's spot purchase price.
# (FLA x 36, FLA x 12, TEX x 12, CAL x 24) is the vectoring.
# Average over months for now, disregarding seasonality -- also,
# no need to factor in exchange rates for now, assume we buy from
# FLA, not FLA / BRA / SPA.
cwd <- getwd()
setwd('..') # move up one directory
mean.raw.price.beliefs <- read.csv(
'grove_beliefs/raw_price_beliefs_mean.csv')
setwd(cwd)
mean.over.months <- mean.raw.price.beliefs %>%
group_by(grove) %>%
summarise(mean_month=mean(price))
ora.df$raw_material_cost <- ora.df$weekly_demand * 2000 * c(
rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
4 * 301),
rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
301),
rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
2 * 301))
# Note: this "profit" is for the first year, actual profit
# should be even higher in later years when we don't have the
# capacity cost.
ora.df$year1_profit <- ora.df$revenue - (ora.df$weekly_transp_cost +
ora.df$weekly_storage_build + ora.df$weekly_storage_maint +
ora.df$raw_material_cost)
ora.df$profit <- ora.df$revenue - (ora.df$weekly_transp_cost +
ora.df$weekly_storage_maint +
ora.df$raw_material_cost)
ggplot(ora.df, aes(x=price, colour=region)) +
geom_line(aes(y=year1_profit), linetype='dotted') +
geom_line(aes(y=profit)) +
ggtitle('ORA Profit (Year 1 and After)')
ggsave('profit_curves/ora_profit.png', width=10, height=6)
ora.profit.max <- ora.df %>% group_by(region) %>%
filter(profit == max(profit))
write.csv(ora.profit.max, file='profit_csvs/ora_max_profit.csv',
quote=FALSE, row.names=FALSE)
# POJ
poj.df <- all.predicted.demands[all.predicted.demands$product == 'POJ',]
poj.df <- poj.df %>%
mutate(revenue=2000 * price * predicted_demand,
weekly_demand = predicted_demand)
# Add storage to market distances
poj.df$storage_dist <- c(rep(479.1429, 301),
rep(286.7647, 301),
rep(712.1667, 301),
rep(368.5909, 301),
rep(413.3750, 301),
rep(659.1250, 301),
rep(659, 301))
# Instead of grove to storage, now we have grove to plant and
# plant to storage distances. We can make similar "efficiency"
# assumptions, where P2->S35, P3->S51, P5->S59, P9->S73.
# We'll ship raw ORA from TEX->P2, CAL->P5, FLA->P3, FLA->P9.
# TEX->P2 = 381
# CAL->P5 = 351
# FLA->P3 = 773
# FLA->P9 = 1528
poj.df$g_p_dist <- c(rep(773, 903),
rep(1528, 301),
rep(381, 301),
rep(351, 602))
# P2 -> S35 = 140
# P3 -> S51 = 317
# P5 -> S59 = 393
# P9 -> S73 = 98
poj.df$p_s_dist <- c(rep(317, 903),
rep(98, 301),
rep(140, 301),
rep(393, 602))
# For tanker car cost, we need to calculate how many tanker
# cars the given demand would require, multiply by its purchase
# cost, and then add the weekly traveling cost. We'll spread the
# one time purchase cost over weeks by dividing it by 48.
poj.df$num_tanker_cars_needed <- 2 * poj.df$weekly_demand / 30
poj.df$tanker_car_weekly_purchase_cost <-
poj.df$num_tanker_cars_needed * 100000 / 48
poj.df$tanker_car_weekly_travel_cost <- 36 *
0.5 * poj.df$num_tanker_cars_needed * poj.df$p_s_dist
poj.df$tanker_car_weekly_hold_cost <- 10 *
0.5 * poj.df$num_tanker_cars_needed
poj.df$g_p_weekly_cost <- 0.22 * poj.df$weekly_demand * poj.df$g_p_dist
poj.df$storage_market_weekly_cost <- 1.2 * poj.df$weekly_demand *
poj.df$storage_dist
# Also include cost to buy capacity and maintain necessary
# processing. There's a one-time upgrade cost and an every-year
# maintenance. Divide the one-time cost by 48 to "week-ize" it.
poj.df$weekly_proc_build <- 8000 * poj.df$weekly_demand / 48
poj.df$weekly_proc_maint <- (2500 * poj.df$weekly_demand) / 48
# Note that we do not add in the $8m * 4 processing maintenance,
# because it will be there for all prices (and we're 2x-counting it
# for the other products)
poj.df$weekly_storage_build <- 6000 * poj.df$weekly_demand / 48
poj.df$weekly_storage_maint <- (650 * poj.df$weekly_demand) / 48
poj.df$manufacturing_cost <- 2000 * poj.df$weekly_demand
# Add in raw material cost
poj.df$raw_material_cost <- poj.df$weekly_demand * 2000 * c(
rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
4 * 301),
rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
301),
rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
2 * 301))
poj.df$year1_profit <- poj.df$revenue - (
poj.df$tanker_car_weekly_purchase_cost +
poj.df$tanker_car_weekly_travel_cost +
poj.df$tanker_car_weekly_hold_cost +
poj.df$g_p_weekly_cost +
poj.df$storage_market_weekly_cost +
poj.df$manufacturing_cost +
poj.df$weekly_proc_build +
poj.df$weekly_proc_maint +
poj.df$raw_material_cost +
poj.df$weekly_storage_build +
poj.df$weekly_storage_maint)
poj.df$profit <- poj.df$year1_profit + (
poj.df$tanker_car_weekly_purchase_cost +
poj.df$weekly_proc_build +
poj.df$weekly_storage_build
)
ggplot(poj.df, aes(x=price, colour=region)) +
geom_line(aes(y=year1_profit), linetype='dotted') +
geom_line(aes(y=profit)) +
ggtitle('POJ Profit (Year 1 and After)')
ggsave('profit_curves/poj_profit.png', width=10, height=6)
poj.profit.max <- poj.df %>% group_by(region) %>%
filter(profit == max(profit))
write.csv(poj.profit.max, file='profit_csvs/poj_max_profit.csv',
quote=FALSE, row.names=FALSE)
#### The other two products are price-optimized using futures
# ROJ
roj.df <- all.predicted.demands[all.predicted.demands$product == 'ROJ',]
roj.df <- roj.df %>%
mutate(revenue=2000 * price * predicted_demand,
weekly_demand = predicted_demand)
# Add storage to market distances
roj.df$storage_dist <- c(rep(479.1429, 301),
rep(286.7647, 301),
rep(712.1667, 301),
rep(368.5909, 301),
rep(413.3750, 301),
rep(659.1250, 301),
rep(659, 301))
# Instead of grove to storage, now we have grove to plant and
# plant to storage distances. We can make similar "efficiency"
# assumptions, where P2->S35, P3->S51, P5->S59, P9->S73.
# We'll ship raw ORA from TEX->P2, CAL->P5, FLA->P3, FLA->P9.
# TEX->P2 = 381
# CAL->P5 = 351
# FLA->P3 = 773
# FLA->P9 = 1528
roj.df$g_p_dist <- c(rep(773, 903),
rep(1528, 301),
rep(381, 301),
rep(351, 602))
# P2 -> S35 = 140
# P3 -> S51 = 317
# P5 -> S59 = 393
# P9 -> S73 = 98
roj.df$p_s_dist <- c(rep(317, 903),
rep(98, 301),
rep(140, 301),
rep(393, 602))
# For tanker car cost, we need to calculate how many tanker
# cars the given demand would require, multiply by its purchase
# cost, and then add the weekly traveling cost. We'll spread the
# one time purchase cost over weeks by dividing it by 48.
roj.df$num_tanker_cars_needed <- 2 * roj.df$weekly_demand / 30
roj.df$tanker_car_weekly_purchase_cost <-
roj.df$num_tanker_cars_needed * 100000 / 48
roj.df$tanker_car_weekly_travel_cost <- 36 *
0.5 * roj.df$num_tanker_cars_needed * roj.df$p_s_dist
roj.df$tanker_car_weekly_hold_cost <- 10 *
0.5 * roj.df$num_tanker_cars_needed
roj.df$g_p_weekly_cost <- 0.22 * roj.df$weekly_demand * roj.df$g_p_dist
roj.df$storage_market_weekly_cost <- 1.2 * roj.df$weekly_demand *
roj.df$storage_dist
roj.df$weekly_storage_build <- 6000 * roj.df$weekly_demand / 48
roj.df$weekly_storage_maint <- (650 * roj.df$weekly_demand) / 48
roj.df$weekly_proc_build <- 8000 * roj.df$weekly_demand / 48
roj.df$weekly_proc_maint <- (2500 * roj.df$weekly_demand) / 48
# Reconstitution cost
roj.df$reconstitution_cost <- 650 * roj.df$weekly_demand
# Add in raw material cost
roj.df$raw_material_cost <- roj.df$weekly_demand * 2000 * c(
rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
4 * 301),
rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
301),
rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
2 * 301))
# Also, add in manufacturing cost of FCOJ because we need to make
# FCOJ to get ROJ (assume no futures).
roj.df$manufacturing_cost <- 2000 * roj.df$weekly_demand
roj.df$year1_profit <- roj.df$revenue - (
roj.df$tanker_car_weekly_purchase_cost +
roj.df$tanker_car_weekly_travel_cost +
roj.df$tanker_car_weekly_hold_cost +
roj.df$g_p_weekly_cost +
roj.df$storage_market_weekly_cost +
roj.df$manufacturing_cost +
roj.df$reconstitution_cost +
roj.df$weekly_proc_build +
roj.df$weekly_proc_maint +
roj.df$raw_material_cost +
roj.df$weekly_storage_build +
roj.df$weekly_storage_maint)
roj.df$profit <- roj.df$year1_profit + (
roj.df$tanker_car_weekly_purchase_cost +
roj.df$weekly_proc_build +
roj.df$weekly_storage_build
)
ggplot(roj.df, aes(x=price, y=profit, colour=region)) +
geom_line(aes(y=year1_profit), linetype='dotted') +
geom_line(aes(y=profit)) +
ggtitle('ROJ Profit (Year 1 and After)')
ggsave('profit_curves/roj_profit.png', width=10, height=6)
roj.profit.max <- roj.df %>% group_by(region) %>%
filter(profit == max(profit))
write.csv(roj.profit.max, file='profit_csvs/roj_max_profit.csv',
quote=FALSE, row.names=FALSE)
# # FCOJ
# ####
# # Note this assumes we manufacture the FCOJ
# ####
# fcoj.df <- all.predicted.demands[all.predicted.demands$product == 'FCOJ',]
# fcoj.df <- fcoj.df %>%
# mutate(revenue=2000 * price * predicted_demand,
# weekly_demand = predicted_demand)
# # Add storage to market distances
# fcoj.df$storage_dist <- c(rep(479.1429, 301),
# rep(286.7647, 301),
# rep(712.1667, 301),
# rep(368.5909, 301),
# rep(413.3750, 301),
# rep(659.1250, 301),
# rep(659, 301))
# # Instead of grove to storage, now we have grove to plant and
# # plant to storage distances. We can make similar "efficiency"
# # assumptions, where P2->S35, P3->S51, P5->S59, P9->S73.
# # We'll ship raw ORA from TEX->P2, CAL->P5, FLA->P3, FLA->P9.
# # TEX->P2 = 381
# # CAL->P5 = 351
# # FLA->P3 = 773
# # FLA->P9 = 1528
# fcoj.df$g_p_dist <- c(rep(773, 903),
# rep(1528, 301),
# rep(381, 301),
# rep(351, 602))
# # P2 -> S35 = 140
# # P3 -> S51 = 317
# # P5 -> S59 = 393
# # P9 -> S73 = 98
# fcoj.df$p_s_dist <- c(rep(317, 903),
# rep(98, 301),
# rep(140, 301),
# rep(393, 602))
# # For tanker car cost, we need to calculate how many tanker
# # cars the given demand would require, multiply by its purchase
# # cost, and then add the weekly traveling cost. We'll spread the
# # one time purchase cost over weeks by dividing it by 48.
# fcoj.df$num_tanker_cars_needed <- fcoj.df$weekly_demand / 30
# fcoj.df$tanker_car_weekly_purchase_cost <-
# fcoj.df$num_tanker_cars_needed * 100000 / 48
# fcoj.df$tanker_car_weekly_travel_cost <- 36 *
# fcoj.df$num_tanker_cars_needed * fcoj.df$p_s_dist
# fcoj.df$g_p_weekly_cost <- 0.22 * fcoj.df$weekly_demand * fcoj.df$g_p_dist
# fcoj.df$storage_market_weekly_cost <- 1.2 * fcoj.df$weekly_demand *
# fcoj.df$storage_dist
# fcoj.df$weekly_proc_build <- 8000 * fcoj.df$weekly_demand / 48
# fcoj.df$weekly_proc_maint <- (2500 * fcoj.df$weekly_demand) / 48
# fcoj.df$weekly_storage_build <- 6000 * fcoj.df$weekly_demand / 48
# fcoj.df$weekly_storage_maint <- (650 * fcoj.df$weekly_demand) / 48
# fcoj.df$manufacturing_cost <- 2000 * fcoj.df$weekly_demand
# fcoj.df$raw_material_cost <- fcoj.df$weekly_demand * 2000 * c(
# rep(mean.over.months[mean.over.months$grove == 'FLA', ]$mean_month,
# 4 * 301),
# rep(mean.over.months[mean.over.months$grove == 'TEX', ]$mean_month,
# 301),
# rep(mean.over.months[mean.over.months$grove == 'CAL', ]$mean_month,
# 2 * 301))
# fcoj.df$year1_profit <- fcoj.df$revenue - (fcoj.df$tanker_car_weekly_purchase_cost +
# fcoj.df$tanker_car_weekly_travel_cost +
# fcoj.df$g_p_weekly_cost + fcoj.df$storage_market_weekly_cost +
# fcoj.df$manufacturing_cost +
# fcoj.df$weekly_proc_build +
# fcoj.df$weekly_proc_maint +
# fcoj.df$raw_material_cost +
# fcoj.df$weekly_storage_build +
# fcoj.df$weekly_storage_maint)
# fcoj.df$profit <- fcoj.df$year1_profit + fcoj.df$weekly_proc_build +
# fcoj.df$weekly_storage_build
# ggplot(fcoj.df, aes(x=price, y=profit, colour=region)) +
# geom_line(aes(y=year1_profit), linetype='dotted') +
# geom_line(aes(y=profit)) +
# ggtitle('FCOJ Profit (Year 1 and After)')
# ggsave('profit_curves/fcoj_profit.png', width=10, height=6)
# fcoj.profit.max <- fcoj.df %>% group_by(region) %>%
# filter(profit == max(profit))
# write.csv(fcoj.profit.max, file='profit_csvs/fcoj_max_profit.csv',
# quote=FALSE, row.names=FALSE)
# # Total profit, using FCOJ futures
# for (fcoj_future_price in seq(0.6, 1.1, 0.1)) {
# profit <- 48 * (sum(ora.profit.max$profit) + sum(poj.profit.max$profit) +
# sum(roj.profit.max$profit)) +
# (6112246 * 48 - fcoj_future_price * 136000 * 2000) -
# (4 * 7500000 + 4 * 8000000)
# print(profit)
# }
# sum(ora.profit.max$weekly_demand) + sum(poj.profit.max$weekly_demand) +
# sum(roj.profit.max$weekly_demand) |
plot_dependency_graph <- function() {
library(igraph)
library(readtext)
g <- igraph::make_empty_graph();
#read files
directory = "C:\\workspace\\org.servicifi.gelato.dependency\\myData\\test-project"
files = c("test.ee.txt", "train.ee.txt", "valid.ee.txt");
d = lapply(files, function(f) readtext(paste(directory, f, sep="\\")))
z <- unlist(lapply(d, function(x) strsplit(x$text, '\n') [[1]]))
z <- gsub("#", "$", z)
all <- read.table(text = z, sep=";",
col.names=c("head", "tail", "relation"))
# all <- as.matrix(rbind(d[[1]], d[[2]], d[[3]]))
all <- as.matrix(all)
print(all)
identifiers <- unique(c(all[,1], all[,2]))
g <- make_empty_graph() %>%
add_vertices(length(identifiers)) %>%
set_vertex_attr("label", value = identifiers)
for (i in 1:dim(all)[1]){
headIndex = which(identifiers == all[i,1])
tailIndex = which(identifiers == all[i,2])
g <- add_edges(g, c(headIndex, tailIndex), label=all[i,3])
}
tkplot(g, vertex.size=10, vertex.color="green")
} | /R/visualization/dependency_graph.R | no_license | amirms/GeLaToLab | R | false | false | 1,041 | r | plot_dependency_graph <- function() {
library(igraph)
library(readtext)
g <- igraph::make_empty_graph();
#read files
directory = "C:\\workspace\\org.servicifi.gelato.dependency\\myData\\test-project"
files = c("test.ee.txt", "train.ee.txt", "valid.ee.txt");
d = lapply(files, function(f) readtext(paste(directory, f, sep="\\")))
z <- unlist(lapply(d, function(x) strsplit(x$text, '\n') [[1]]))
z <- gsub("#", "$", z)
all <- read.table(text = z, sep=";",
col.names=c("head", "tail", "relation"))
# all <- as.matrix(rbind(d[[1]], d[[2]], d[[3]]))
all <- as.matrix(all)
print(all)
identifiers <- unique(c(all[,1], all[,2]))
g <- make_empty_graph() %>%
add_vertices(length(identifiers)) %>%
set_vertex_attr("label", value = identifiers)
for (i in 1:dim(all)[1]){
headIndex = which(identifiers == all[i,1])
tailIndex = which(identifiers == all[i,2])
g <- add_edges(g, c(headIndex, tailIndex), label=all[i,3])
}
tkplot(g, vertex.size=10, vertex.color="green")
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.