content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
setwd("~/ExData_Plotting1")
## Import Data
household_power_consumption <- read.csv("~/ExData_Plotting1/household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
## Create datetime
household_power_consumption$DateTime <- as.POSIXct(paste(household_power_consumption$Date, household_power_consumption$Time), format="%d/%m/%Y %H:%M:%S")
## Convert date
household_power_consumption$Date <- as.Date(household_power_consumption$Date, "%d/%m/%Y")
## Create working subset of the Original Data
workset <- subset(household_power_consumption, Date >= "2007-02-01" & Date <= "2007-02-02")
## Plot 2 - GLobal Active Power by Day
png(file = "Plot2.png")
plot(workset$DateTime, as.numeric(workset$Global_active_power), type = "n", ylab = "Global Active Power (kilowatts)", xlab = "", cex.axis = 0.8, cex.lab = .7)
lines(workset$DateTime, as.numeric(workset$Global_active_power))
dev.off()
| /Plot2.R | no_license | Ando3121/ExData_Plotting1 | R | false | false | 895 | r | setwd("~/ExData_Plotting1")
## Import Data
household_power_consumption <- read.csv("~/ExData_Plotting1/household_power_consumption.txt", sep=";", stringsAsFactors=FALSE)
## Create datetime
household_power_consumption$DateTime <- as.POSIXct(paste(household_power_consumption$Date, household_power_consumption$Time), format="%d/%m/%Y %H:%M:%S")
## Convert date
household_power_consumption$Date <- as.Date(household_power_consumption$Date, "%d/%m/%Y")
## Create working subset of the Original Data
workset <- subset(household_power_consumption, Date >= "2007-02-01" & Date <= "2007-02-02")
## Plot 2 - GLobal Active Power by Day
png(file = "Plot2.png")
plot(workset$DateTime, as.numeric(workset$Global_active_power), type = "n", ylab = "Global Active Power (kilowatts)", xlab = "", cex.axis = 0.8, cex.lab = .7)
lines(workset$DateTime, as.numeric(workset$Global_active_power))
dev.off()
|
# #############################################################################
# quantilebias_functions.R
#
# Author: Enrico Arnone (ISAC-CNR, Italy)
#
# #############################################################################
# Description
# Originally developed as functions to be used in HyInt routines
#
# Modification history
# 20170901-A_arno_en: adapted to HyInt and extended
# 20170522-A_davi_pa: Creation for MiLES
# #############################################################################
# basis functions
##########################################################
#------------------------Packages------------------------#
##########################################################
# loading packages
library("maps")
library("ncdf4")
library("PCICt")
##########################################################
#--------------Time Based functions----------------------#
##########################################################
# check number of days for each month
number_days_month <- function(datas) {
# evaluate the number of days in a defined month of a year
datas <- as.Date(datas)
m <- format(datas, format = "%m")
while (format(datas, format = "%m") == m) {
datas <- datas + 1
}
return(as.integer(format(datas - 1, format = "%d")))
}
##########################################################
#--------------NetCDF loading function-------------------#
##########################################################
# universal function to open a single var 3D (x,y,time) ncdf files: it includes
# rotation, y-axis filpping, time selection and CDO-based interpolation
# to replace both ncdf.opener.time and ncdf.opener (deprecated and removed)
# automatically rotate matrix to place greenwich at the center (flag "rotate")
# and flip the latitudes in order to have increasing
# if required (flag "interp2grid") additional interpolation with CDO is used.
# "grid" can be used to specify the target grid name
# time selection based on package PCICt must be specifed with both "tmonths"
# and "tyears" flags. It returns a list including its own dimensions
ncdf_opener_universal <- # nolint
function(namefile,
namevar = NULL,
namelon = NULL,
namelat = NULL,
tmonths = NULL,
tyears = NULL,
rotate = "full",
interp2grid = F,
grid = "r144x73",
remap_method = "remapcon2",
exportlonlat = TRUE,
verbose = F) {
# load package
require(ncdf4)
# verbose-only printing function
printv <- function(value) {
if (verbose) {
print(value)
}
}
# check if timeflag is activated or full file must be loaded
if (is.null(tyears) | is.null(tmonths)) {
timeflag <- FALSE
printv("No time and months specified, loading all the data")
} else {
timeflag <- TRUE
printv("tyears and tmonths are set!")
require(PCICt)
}
if (rotate == "full") {
rot <- T
move1 <- move2 <- 1 / 2
} # 180 degrees rotation of longitude
if (rotate == "half") {
rot <- T
move1 <- 1 / 4
move2 <- 3 / 4
} # 90 degree rotation (useful for TM90)
if (rotate == "no") {
rot <- F
} # keep as it is
# interpolation made with CDO: second order conservative remapping
if (interp2grid) {
print(paste("Remapping with CDO on", grid, "grid"))
if (is.null(namevar)) {
namefile <- cdo(remap_method,
args = paste0("'", grid, "'"),
input = namefile
)
} else {
selectf <- cdo("selvar", args = namevar, input = namefile)
gridf <- tempfile()
cdo("griddes", input = grid, stdout = gridf)
namefile <- cdo(remap_method, args = gridf, input = selectf)
unlink(c(selectf, gridf))
}
}
# define rotate function (faster than with apply)
rotation <- function(line) {
vettore <- line
dims <- length(dim(vettore))
# for longitudes
if (dims == 1) {
ll <- length(line)
line[(ll * move1):ll] <- vettore[1:(ll * move2 + 1)]
line[1:(ll * move1 - 1)] <- vettore[(ll * move2 + 2):ll] - 360
}
# for x,y data
if (dims == 2) {
ll <- length(line[, 1])
line[(ll * move1):ll, ] <- vettore[1:(ll * move2 + 1), ]
line[1:(ll * move1 - 1), ] <- vettore[(ll * move2 + 2):ll, ]
}
# for x,y,t data
if (dims == 3) {
ll <- length(line[, 1, 1])
line[(ll * move1):ll, , ] <- vettore[1:(ll * move2 + 1), , ]
line[1:(ll * move1 - 1), , ] <-
vettore[(ll * move2 + 2):ll, , ]
}
return(line)
}
# define flip function ('cos rev/apply is not working)
flipper <- function(field) {
dims <- length(dim(field))
if (dims == 2) {
ll <- length(field[1, ])
field <- field[, ll:1]
} # for x,y data
if (dims == 3) {
ll <- length(field[1, , 1])
field <- field[, ll:1, ]
} # for x,y,t data
return(field)
}
# opening file: getting variable (if namevar is given, that variable
# is extracted)
printv(paste("opening file:", namefile))
a <- nc_open(namefile)
# if no name provided load the only variable available
if (is.null(namevar)) {
namevar <- names(a$var)
if (length(namevar) > 1) {
print(namevar)
stop("More than one var in the files, please select it
with namevar=yourvar")
}
}
# load axis: updated version, looking for dimension directly stored
# inside the variable
naxis <-
unlist(lapply(a$var[[namevar]]$dim, function(x) {
x["name"]
}))
for (axis in naxis) {
assign(axis, ncvar_get(a, axis))
printv(paste(axis, ":", length(get(axis)), "records"))
}
if (timeflag) {
printv("selecting years and months")
# based on preprocessing of CDO time format: get calendar type and
# use PCICt package for irregular data
caldata <- ncatt_get(a, "time", "calendar")$value
timeline <-
as.PCICt(as.character(time), format = "%Y%m%d", cal = caldata)
# break if the calendar has not been recognized
if (any(is.na(timeline))) {
stop("Calendar from NetCDF is unsupported or not present. Stopping!!!")
}
# break if the data requested is not there
lastday_base <- paste0(max(tyears), "-", max(tmonths), "-28")
maxdays <- number_days_month(lastday_base)
if (caldata == "360_day") {
maxdays <- 30
}
# uses number_days_month, which loops to get the month change
lastday <- as.PCICt(paste0(
max(tyears), "-", max(tmonths), "-",
maxdays
),
cal = caldata,
format = "%Y-%m-%d"
)
firstday <-
as.PCICt(paste0(min(tyears), "-", min(tmonths), "-01"),
cal = caldata,
format = "%Y-%m-%d"
)
if (max(timeline) < lastday | min(timeline) > firstday) {
stop("You requested a time interval that is not present in the NetCDF")
}
}
# time selection and variable loading
printv("loading full field...")
field <- ncvar_get(a, namevar)
if (timeflag) {
# select data we need
select <- which(as.numeric(format(timeline, "%Y")) %in% tyears &
as.numeric(format(timeline, "%m")) %in% tmonths)
field <- field[, , select]
time <- timeline[select]
printv(paste("This is a", caldata, "calendar"))
printv(paste(
length(time), "days selected from", time[1],
"to", time[length(time)]
))
printv(paste("Months that have been loaded are.. "))
printv(unique(format(time, "%Y-%m")))
}
# check for dimensions (presence or not of time dimension)
dimensions <- length(dim(field))
# if dimensions are multiple, get longitude, latitude
# if needed, rotate and flip the array
xlist <- c("lon", "Lon", "longitude", "Longitude")
ylist <- c("lat", "Lat", "latitude", "Latitude")
if (dimensions > 1) {
# assign ics and ipsilon
if (is.null(namelon)) {
if (any(xlist %in% naxis)) {
ics <- get(naxis[naxis %in% xlist], a$dim)$vals
} else {
print("WARNING: No lon found")
ics <- NA
}
} else {
ics <- ncvar_get(a, namelon)
}
if (is.null(namelat)) {
if (any(ylist %in% naxis)) {
ipsilon <- get(naxis[naxis %in% ylist], a$dim)$vals
} else {
print("WARNING: No lat found")
ipsilon <- NA
}
} else {
ipsilon <- ncvar_get(a, namelat)
}
# longitute rotation around Greenwich
if (rot) {
printv("rotating...")
ics <- rotation(ics)
field <- rotation(field)
}
if (ipsilon[2] < ipsilon[1] & length(ipsilon) > 1) {
if (length(ics) > 1) {
print("flipping...")
ipsilon <- sort(ipsilon)
field <- flipper(field)
}
}
# exporting variables to the main program
if (exportlonlat) {
assign("ics", ics, envir = .GlobalEnv)
assign("ipsilon", ipsilon, envir = .GlobalEnv)
}
assign(naxis[naxis %in% c(xlist, namelon)], ics)
assign(naxis[naxis %in% c(ylist, namelat)], ipsilon)
}
if (dimensions > 3) {
stop("This file is more than 3D file")
}
# close connection
nc_close(a)
# remove interpolated file
if (interp2grid) {
unlink(namefile)
}
# showing array properties
printv(paste(dim(field)))
if (timeflag) {
printv(paste("From", time[1], "to", time[length(time)]))
}
# returning file list
return(mget(c("field", naxis)))
}
# ncdf.opener is a simplified wrapper for ncdf.opener.universal which returns
# only the field, ignoring the list
ncdf_opener <- function(namefile,
namevar = NULL,
namelon = NULL,
namelat = NULL,
tmonths = NULL,
tyears = NULL,
rotate = "full",
interp2grid = F,
grid = "r144x73",
remap_method = "remapcon2",
exportlonlat = T) {
field <-
ncdf_opener_universal(
namefile,
namevar,
namelon,
namelat,
tmonths,
tyears,
rotate,
interp2grid,
grid,
remap_method,
exportlonlat = exportlonlat
)
return(field$field)
}
##########################################################
#--------------Plotting functions------------------------#
##########################################################
graphics_startup <- function(figname, output_file_type, plot_size) {
# choose output format for figure - by JvH
if (tolower(output_file_type) == "png") {
png(
filename = figname,
width = plot_size[1],
height = plot_size[2]
)
} else if (tolower(output_file_type) == "pdf") {
pdf(
file = figname,
width = plot_size[1],
height = plot_size[2],
onefile = T
)
} else if ((tolower(output_file_type) == "eps") |
(tolower(output_file_type) == "epsi") |
(tolower(output_file_type) == "ps")) {
setEPS(
width = plot_size[1],
height = plot_size[2],
onefile = T,
paper = "special"
)
postscript(figname)
} else if (tolower(output_file_type) == "x11") {
x11(width = plot_size[1], height = plot_size[2])
}
return()
}
graphics_close <- function(figname) {
print(figname)
dev.off()
return()
}
# extensive filled.contour function
filled_contour3 <- # nolint
function(x = seq(0, 1, length.out = nrow(z)),
y = seq(0, 1, length.out = ncol(z)),
z,
xlim = range(x, finite = TRUE),
ylim = range(y, finite = TRUE),
zlim = range(z, finite = TRUE),
levels = pretty(zlim, nlevels),
nlevels = 20,
color.palette = cm.colors,
col = color.palette(length(levels) - 1),
extend = TRUE,
plot.title,
plot.axes,
key.title,
key.axes,
asp = NA,
xaxs = "i",
yaxs = "i",
las = 1,
axes = TRUE,
frame.plot = axes,
mar,
...) {
# modification by Ian Taylor of the filled.contour function
# to remove the key and facilitate overplotting with contour()
# further modified by Carey McGilliard and Bridget Ferris
# to allow multiple plots on one page
# modification to allow plot outside boundaries
if (missing(z)) {
if (!missing(x)) {
if (is.list(x)) {
z <- x$z
y <- x$y
x <- x$x
}
else {
z <- x
x <- seq.int(0, 1, length.out = nrow(z))
}
}
else {
stop("no 'z' matrix specified")
}
}
else if (is.list(x)) {
y <- x$y
x <- x$x
}
if (any(diff(x) <= 0) || any(diff(y) <= 0)) {
stop("increasing 'x' and 'y' values expected")
}
# trim extremes for nicer plots
if (extend) {
z[z < min(levels)] <- min(levels)
z[z > max(levels)] <- max(levels)
}
plot.new()
plot.window(xlim,
ylim,
"",
xaxs = xaxs,
yaxs = yaxs,
asp = asp
)
if (!is.matrix(z) || nrow(z) <= 1 || ncol(z) <= 1) {
stop("no proper 'z' matrix specified")
}
if (!is.double(z)) {
storage.mode(z) <- "double"
}
.filled.contour(as.double(x), as.double(y), z, as.double(levels),
col = col
)
if (missing(plot.axes)) {
if (axes) {
title(
main = "",
xlab = "",
ylab = ""
)
Axis(x, side = 1, ...)
Axis(y, side = 2, ...)
}
}
else {
plot.axes
}
if (frame.plot) {
box()
}
if (missing(plot.title)) {
title(...)
} else {
plot.title
}
invisible()
}
image_scale3 <- function(z,
levels,
color.palette = heat.colors,
col = col,
colorbar.label = "image.scale",
extend = T,
line.label = 2,
line.colorbar = 0,
cex.label = 1,
cex.colorbar = 1,
colorbar.width = 1,
new_fig_scale = c(-0.07, -0.03, 0.1, -0.1),
...) {
# save properties from main plotting region
old.par <- par(no.readonly = TRUE)
mfg.save <- par()$mfg
old.fig <- par()$fig
# defining plotting region with proper scaling
xscal <- (old.fig[2] - old.fig[1])
yscal <- (old.fig[4] - old.fig[3])
lw <- colorbar.width
lp <- line.colorbar / 100
new.fig <- c(
old.fig[2] + new_fig_scale[1] * xscal * lw - lp,
old.fig[2] + new_fig_scale[2] * xscal - lp,
old.fig[3] + new_fig_scale[3] * yscal,
old.fig[4] + new_fig_scale[4] * yscal
)
if (missing(levels)) {
levels <- seq(min(z), max(z), , 12)
}
# fixing color palette
if (missing(col)) {
col <- color.palette(length(levels) - 1)
}
# starting plot
par(
mar = c(1, 1, 1, 1),
fig = new.fig,
new = TRUE
)
# creating polygons for legend
poly <- vector(mode = "list", length(col))
for (i in seq(poly)) {
poly[[i]] <- c(levels[i], levels[i + 1], levels[i + 1], levels[i])
}
xlim <- c(0, 1)
if (extend) {
longer <- 1.5
dl <- diff(levels)[1] * longer
ylim <- c(min(levels) - dl, max(levels) + dl)
} else {
ylim <- range(levels)
}
plot(
1,
1,
t = "n",
ylim = ylim,
xlim = xlim,
axes = FALSE,
xlab = "",
ylab = "",
xaxs = "i",
yaxs = "i",
...
)
for (i in seq(poly)) {
polygon(c(0, 0, 1, 1), poly[[i]], col = col[i], border = NA)
}
if (extend) {
polygon(c(0, 1, 1 / 2),
c(levels[1], levels[1], levels[1] - dl),
col = col[1],
border = NA
)
polygon(c(0, 1, 1 / 2),
c(
levels[length(levels)], levels[length(levels)],
levels[length(levels)] + dl
),
col = col[length(col)],
border = NA
)
polygon(
c(0, 0, 1 / 2, 1, 1, 1 / 2),
c(
levels[1], levels[length(levels)], levels[length(levels)] + dl,
levels[length(levels)], levels[1], levels[1] - dl
),
border = "black",
lwd = 2
)
ylim0 <- range(levels)
prettyspecial <- pretty(ylim0)
prettyspecial <- prettyspecial[prettyspecial <= max(ylim0) &
prettyspecial >= min(ylim0)]
axis(
4,
las = 1,
cex.axis = cex.colorbar,
at = prettyspecial,
labels = prettyspecial,
...
)
} else {
box()
axis(4, las = 1, cex.axis = cex.colorbar, ...)
}
# box, axis and leged
mtext(colorbar.label,
line = line.label,
side = 4,
cex = cex.label,
...
)
# resetting properties for starting a new plot (mfrow style)
par(old.par)
par(mfg = mfg.save, new = FALSE)
invisible()
}
| /esmvaltool/diag_scripts/quantilebias/quantilebias_functions.R | permissive | ESMValGroup/ESMValTool | R | false | false | 17,239 | r | # #############################################################################
# quantilebias_functions.R
#
# Author: Enrico Arnone (ISAC-CNR, Italy)
#
# #############################################################################
# Description
# Originally developed as functions to be used in HyInt routines
#
# Modification history
# 20170901-A_arno_en: adapted to HyInt and extended
# 20170522-A_davi_pa: Creation for MiLES
# #############################################################################
# basis functions
##########################################################
#------------------------Packages------------------------#
##########################################################
# loading packages
library("maps")
library("ncdf4")
library("PCICt")
##########################################################
#--------------Time Based functions----------------------#
##########################################################
# check number of days for each month
number_days_month <- function(datas) {
# evaluate the number of days in a defined month of a year
datas <- as.Date(datas)
m <- format(datas, format = "%m")
while (format(datas, format = "%m") == m) {
datas <- datas + 1
}
return(as.integer(format(datas - 1, format = "%d")))
}
##########################################################
#--------------NetCDF loading function-------------------#
##########################################################
# universal function to open a single var 3D (x,y,time) ncdf files: it includes
# rotation, y-axis filpping, time selection and CDO-based interpolation
# to replace both ncdf.opener.time and ncdf.opener (deprecated and removed)
# automatically rotate matrix to place greenwich at the center (flag "rotate")
# and flip the latitudes in order to have increasing
# if required (flag "interp2grid") additional interpolation with CDO is used.
# "grid" can be used to specify the target grid name
# time selection based on package PCICt must be specifed with both "tmonths"
# and "tyears" flags. It returns a list including its own dimensions
ncdf_opener_universal <- # nolint
function(namefile,
namevar = NULL,
namelon = NULL,
namelat = NULL,
tmonths = NULL,
tyears = NULL,
rotate = "full",
interp2grid = F,
grid = "r144x73",
remap_method = "remapcon2",
exportlonlat = TRUE,
verbose = F) {
# load package
require(ncdf4)
# verbose-only printing function
printv <- function(value) {
if (verbose) {
print(value)
}
}
# check if timeflag is activated or full file must be loaded
if (is.null(tyears) | is.null(tmonths)) {
timeflag <- FALSE
printv("No time and months specified, loading all the data")
} else {
timeflag <- TRUE
printv("tyears and tmonths are set!")
require(PCICt)
}
if (rotate == "full") {
rot <- T
move1 <- move2 <- 1 / 2
} # 180 degrees rotation of longitude
if (rotate == "half") {
rot <- T
move1 <- 1 / 4
move2 <- 3 / 4
} # 90 degree rotation (useful for TM90)
if (rotate == "no") {
rot <- F
} # keep as it is
# interpolation made with CDO: second order conservative remapping
if (interp2grid) {
print(paste("Remapping with CDO on", grid, "grid"))
if (is.null(namevar)) {
namefile <- cdo(remap_method,
args = paste0("'", grid, "'"),
input = namefile
)
} else {
selectf <- cdo("selvar", args = namevar, input = namefile)
gridf <- tempfile()
cdo("griddes", input = grid, stdout = gridf)
namefile <- cdo(remap_method, args = gridf, input = selectf)
unlink(c(selectf, gridf))
}
}
# define rotate function (faster than with apply)
rotation <- function(line) {
vettore <- line
dims <- length(dim(vettore))
# for longitudes
if (dims == 1) {
ll <- length(line)
line[(ll * move1):ll] <- vettore[1:(ll * move2 + 1)]
line[1:(ll * move1 - 1)] <- vettore[(ll * move2 + 2):ll] - 360
}
# for x,y data
if (dims == 2) {
ll <- length(line[, 1])
line[(ll * move1):ll, ] <- vettore[1:(ll * move2 + 1), ]
line[1:(ll * move1 - 1), ] <- vettore[(ll * move2 + 2):ll, ]
}
# for x,y,t data
if (dims == 3) {
ll <- length(line[, 1, 1])
line[(ll * move1):ll, , ] <- vettore[1:(ll * move2 + 1), , ]
line[1:(ll * move1 - 1), , ] <-
vettore[(ll * move2 + 2):ll, , ]
}
return(line)
}
# define flip function ('cos rev/apply is not working)
flipper <- function(field) {
dims <- length(dim(field))
if (dims == 2) {
ll <- length(field[1, ])
field <- field[, ll:1]
} # for x,y data
if (dims == 3) {
ll <- length(field[1, , 1])
field <- field[, ll:1, ]
} # for x,y,t data
return(field)
}
# opening file: getting variable (if namevar is given, that variable
# is extracted)
printv(paste("opening file:", namefile))
a <- nc_open(namefile)
# if no name provided load the only variable available
if (is.null(namevar)) {
namevar <- names(a$var)
if (length(namevar) > 1) {
print(namevar)
stop("More than one var in the files, please select it
with namevar=yourvar")
}
}
# load axis: updated version, looking for dimension directly stored
# inside the variable
naxis <-
unlist(lapply(a$var[[namevar]]$dim, function(x) {
x["name"]
}))
for (axis in naxis) {
assign(axis, ncvar_get(a, axis))
printv(paste(axis, ":", length(get(axis)), "records"))
}
if (timeflag) {
printv("selecting years and months")
# based on preprocessing of CDO time format: get calendar type and
# use PCICt package for irregular data
caldata <- ncatt_get(a, "time", "calendar")$value
timeline <-
as.PCICt(as.character(time), format = "%Y%m%d", cal = caldata)
# break if the calendar has not been recognized
if (any(is.na(timeline))) {
stop("Calendar from NetCDF is unsupported or not present. Stopping!!!")
}
# break if the data requested is not there
lastday_base <- paste0(max(tyears), "-", max(tmonths), "-28")
maxdays <- number_days_month(lastday_base)
if (caldata == "360_day") {
maxdays <- 30
}
# uses number_days_month, which loops to get the month change
lastday <- as.PCICt(paste0(
max(tyears), "-", max(tmonths), "-",
maxdays
),
cal = caldata,
format = "%Y-%m-%d"
)
firstday <-
as.PCICt(paste0(min(tyears), "-", min(tmonths), "-01"),
cal = caldata,
format = "%Y-%m-%d"
)
if (max(timeline) < lastday | min(timeline) > firstday) {
stop("You requested a time interval that is not present in the NetCDF")
}
}
# time selection and variable loading
printv("loading full field...")
field <- ncvar_get(a, namevar)
if (timeflag) {
# select data we need
select <- which(as.numeric(format(timeline, "%Y")) %in% tyears &
as.numeric(format(timeline, "%m")) %in% tmonths)
field <- field[, , select]
time <- timeline[select]
printv(paste("This is a", caldata, "calendar"))
printv(paste(
length(time), "days selected from", time[1],
"to", time[length(time)]
))
printv(paste("Months that have been loaded are.. "))
printv(unique(format(time, "%Y-%m")))
}
# check for dimensions (presence or not of time dimension)
dimensions <- length(dim(field))
# if dimensions are multiple, get longitude, latitude
# if needed, rotate and flip the array
xlist <- c("lon", "Lon", "longitude", "Longitude")
ylist <- c("lat", "Lat", "latitude", "Latitude")
if (dimensions > 1) {
# assign ics and ipsilon
if (is.null(namelon)) {
if (any(xlist %in% naxis)) {
ics <- get(naxis[naxis %in% xlist], a$dim)$vals
} else {
print("WARNING: No lon found")
ics <- NA
}
} else {
ics <- ncvar_get(a, namelon)
}
if (is.null(namelat)) {
if (any(ylist %in% naxis)) {
ipsilon <- get(naxis[naxis %in% ylist], a$dim)$vals
} else {
print("WARNING: No lat found")
ipsilon <- NA
}
} else {
ipsilon <- ncvar_get(a, namelat)
}
# longitute rotation around Greenwich
if (rot) {
printv("rotating...")
ics <- rotation(ics)
field <- rotation(field)
}
if (ipsilon[2] < ipsilon[1] & length(ipsilon) > 1) {
if (length(ics) > 1) {
print("flipping...")
ipsilon <- sort(ipsilon)
field <- flipper(field)
}
}
# exporting variables to the main program
if (exportlonlat) {
assign("ics", ics, envir = .GlobalEnv)
assign("ipsilon", ipsilon, envir = .GlobalEnv)
}
assign(naxis[naxis %in% c(xlist, namelon)], ics)
assign(naxis[naxis %in% c(ylist, namelat)], ipsilon)
}
if (dimensions > 3) {
stop("This file is more than 3D file")
}
# close connection
nc_close(a)
# remove interpolated file
if (interp2grid) {
unlink(namefile)
}
# showing array properties
printv(paste(dim(field)))
if (timeflag) {
printv(paste("From", time[1], "to", time[length(time)]))
}
# returning file list
return(mget(c("field", naxis)))
}
# ncdf.opener is a simplified wrapper for ncdf.opener.universal which returns
# only the field, ignoring the list
ncdf_opener <- function(namefile,
namevar = NULL,
namelon = NULL,
namelat = NULL,
tmonths = NULL,
tyears = NULL,
rotate = "full",
interp2grid = F,
grid = "r144x73",
remap_method = "remapcon2",
exportlonlat = T) {
field <-
ncdf_opener_universal(
namefile,
namevar,
namelon,
namelat,
tmonths,
tyears,
rotate,
interp2grid,
grid,
remap_method,
exportlonlat = exportlonlat
)
return(field$field)
}
##########################################################
#--------------Plotting functions------------------------#
##########################################################
graphics_startup <- function(figname, output_file_type, plot_size) {
# choose output format for figure - by JvH
if (tolower(output_file_type) == "png") {
png(
filename = figname,
width = plot_size[1],
height = plot_size[2]
)
} else if (tolower(output_file_type) == "pdf") {
pdf(
file = figname,
width = plot_size[1],
height = plot_size[2],
onefile = T
)
} else if ((tolower(output_file_type) == "eps") |
(tolower(output_file_type) == "epsi") |
(tolower(output_file_type) == "ps")) {
setEPS(
width = plot_size[1],
height = plot_size[2],
onefile = T,
paper = "special"
)
postscript(figname)
} else if (tolower(output_file_type) == "x11") {
x11(width = plot_size[1], height = plot_size[2])
}
return()
}
graphics_close <- function(figname) {
print(figname)
dev.off()
return()
}
# extensive filled.contour function
filled_contour3 <- # nolint
function(x = seq(0, 1, length.out = nrow(z)),
y = seq(0, 1, length.out = ncol(z)),
z,
xlim = range(x, finite = TRUE),
ylim = range(y, finite = TRUE),
zlim = range(z, finite = TRUE),
levels = pretty(zlim, nlevels),
nlevels = 20,
color.palette = cm.colors,
col = color.palette(length(levels) - 1),
extend = TRUE,
plot.title,
plot.axes,
key.title,
key.axes,
asp = NA,
xaxs = "i",
yaxs = "i",
las = 1,
axes = TRUE,
frame.plot = axes,
mar,
...) {
# modification by Ian Taylor of the filled.contour function
# to remove the key and facilitate overplotting with contour()
# further modified by Carey McGilliard and Bridget Ferris
# to allow multiple plots on one page
# modification to allow plot outside boundaries
if (missing(z)) {
if (!missing(x)) {
if (is.list(x)) {
z <- x$z
y <- x$y
x <- x$x
}
else {
z <- x
x <- seq.int(0, 1, length.out = nrow(z))
}
}
else {
stop("no 'z' matrix specified")
}
}
else if (is.list(x)) {
y <- x$y
x <- x$x
}
if (any(diff(x) <= 0) || any(diff(y) <= 0)) {
stop("increasing 'x' and 'y' values expected")
}
# trim extremes for nicer plots
if (extend) {
z[z < min(levels)] <- min(levels)
z[z > max(levels)] <- max(levels)
}
plot.new()
plot.window(xlim,
ylim,
"",
xaxs = xaxs,
yaxs = yaxs,
asp = asp
)
if (!is.matrix(z) || nrow(z) <= 1 || ncol(z) <= 1) {
stop("no proper 'z' matrix specified")
}
if (!is.double(z)) {
storage.mode(z) <- "double"
}
.filled.contour(as.double(x), as.double(y), z, as.double(levels),
col = col
)
if (missing(plot.axes)) {
if (axes) {
title(
main = "",
xlab = "",
ylab = ""
)
Axis(x, side = 1, ...)
Axis(y, side = 2, ...)
}
}
else {
plot.axes
}
if (frame.plot) {
box()
}
if (missing(plot.title)) {
title(...)
} else {
plot.title
}
invisible()
}
image_scale3 <- function(z,
levels,
color.palette = heat.colors,
col = col,
colorbar.label = "image.scale",
extend = T,
line.label = 2,
line.colorbar = 0,
cex.label = 1,
cex.colorbar = 1,
colorbar.width = 1,
new_fig_scale = c(-0.07, -0.03, 0.1, -0.1),
...) {
# save properties from main plotting region
old.par <- par(no.readonly = TRUE)
mfg.save <- par()$mfg
old.fig <- par()$fig
# defining plotting region with proper scaling
xscal <- (old.fig[2] - old.fig[1])
yscal <- (old.fig[4] - old.fig[3])
lw <- colorbar.width
lp <- line.colorbar / 100
new.fig <- c(
old.fig[2] + new_fig_scale[1] * xscal * lw - lp,
old.fig[2] + new_fig_scale[2] * xscal - lp,
old.fig[3] + new_fig_scale[3] * yscal,
old.fig[4] + new_fig_scale[4] * yscal
)
if (missing(levels)) {
levels <- seq(min(z), max(z), , 12)
}
# fixing color palette
if (missing(col)) {
col <- color.palette(length(levels) - 1)
}
# starting plot
par(
mar = c(1, 1, 1, 1),
fig = new.fig,
new = TRUE
)
# creating polygons for legend
poly <- vector(mode = "list", length(col))
for (i in seq(poly)) {
poly[[i]] <- c(levels[i], levels[i + 1], levels[i + 1], levels[i])
}
xlim <- c(0, 1)
if (extend) {
longer <- 1.5
dl <- diff(levels)[1] * longer
ylim <- c(min(levels) - dl, max(levels) + dl)
} else {
ylim <- range(levels)
}
plot(
1,
1,
t = "n",
ylim = ylim,
xlim = xlim,
axes = FALSE,
xlab = "",
ylab = "",
xaxs = "i",
yaxs = "i",
...
)
for (i in seq(poly)) {
polygon(c(0, 0, 1, 1), poly[[i]], col = col[i], border = NA)
}
if (extend) {
polygon(c(0, 1, 1 / 2),
c(levels[1], levels[1], levels[1] - dl),
col = col[1],
border = NA
)
polygon(c(0, 1, 1 / 2),
c(
levels[length(levels)], levels[length(levels)],
levels[length(levels)] + dl
),
col = col[length(col)],
border = NA
)
polygon(
c(0, 0, 1 / 2, 1, 1, 1 / 2),
c(
levels[1], levels[length(levels)], levels[length(levels)] + dl,
levels[length(levels)], levels[1], levels[1] - dl
),
border = "black",
lwd = 2
)
ylim0 <- range(levels)
prettyspecial <- pretty(ylim0)
prettyspecial <- prettyspecial[prettyspecial <= max(ylim0) &
prettyspecial >= min(ylim0)]
axis(
4,
las = 1,
cex.axis = cex.colorbar,
at = prettyspecial,
labels = prettyspecial,
...
)
} else {
box()
axis(4, las = 1, cex.axis = cex.colorbar, ...)
}
# box, axis and leged
mtext(colorbar.label,
line = line.label,
side = 4,
cex = cex.label,
...
)
# resetting properties for starting a new plot (mfrow style)
par(old.par)
par(mfg = mfg.save, new = FALSE)
invisible()
}
|
\name{chen}
\alias{dchen}
\alias{pchen}
\alias{varchen}
\alias{eschen}
\title{Chen distribution}
\description{Computes the pdf, cdf, value at risk and expected shortfall for the Chen distribution due to Chen (2000) given by
\deqn{\begin{array}{ll}
&\displaystyle
f(x) = \lambda b x^{b - 1} \exp \left( x^b \right) \exp \left[ \lambda - \lambda \exp \left( x^b \right) \right],
\\
&\displaystyle
F (x) = 1 - \exp \left[ \lambda - \lambda \exp \left( x^b \right) \right],
\\
&\displaystyle
{\rm VaR}_p (X) = \left\{ \log \left[ 1 - \frac {\log (1 - p)}{\lambda} \right] \right\}^{1 / b},
\\
&\displaystyle
{\rm ES}_p (X) = \frac {1}{p} \int_0^p \left\{ \log \left[ 1 - \frac {\log (1 - v)}{\lambda} \right] \right\}^{1 / b} dv
\end{array}}
for \eqn{x > 0}, \eqn{0 < p < 1}, \eqn{b > 0}, the shape parameter, and \eqn{\lambda > 0}, the scale parameter.}
\usage{
dchen(x, b=1, lambda=1, log=FALSE)
pchen(x, b=1, lambda=1, log.p=FALSE, lower.tail=TRUE)
varchen(p, b=1, lambda=1, log.p=FALSE, lower.tail=TRUE)
eschen(p, b=1, lambda=1)
}
\arguments{
\item{x}{scaler or vector of values at which the pdf or cdf needs to be computed}
\item{p}{scaler or vector of values at which the value at risk or expected shortfall needs to be computed}
\item{lambda}{the value of the scale parameter, must be positive, the default is 1}
\item{b}{the value of the shape parameter, must be positive, the default is 1}
\item{log}{if TRUE then log(pdf) are returned}
\item{log.p}{if TRUE then log(cdf) are returned and quantiles are computed for exp(p)}
\item{lower.tail}{if FALSE then 1-cdf are returned and quantiles are computed for 1-p}
}
\value{An object of the same length as \code{x}, giving the pdf or cdf values computed at \code{x} or an object of the same length as \code{p}, giving the values at risk or expected shortfall computed at \code{p}.}
\references{Stephen Chan, Saralees Nadarajah & Emmanuel Afuecheta (2016). An R Package for Value at Risk and Expected Shortfall, Communications in Statistics - Simulation and Computation, 45:9, 3416-3434, \doi{10.1080/03610918.2014.944658}}
\author{Saralees Nadarajah}
\examples{x=runif(10,min=0,max=1)
dchen(x)
pchen(x)
varchen(x)
eschen(x)}
| /man/chen.Rd | no_license | cran/VaRES | R | false | false | 2,237 | rd | \name{chen}
\alias{dchen}
\alias{pchen}
\alias{varchen}
\alias{eschen}
\title{Chen distribution}
\description{Computes the pdf, cdf, value at risk and expected shortfall for the Chen distribution due to Chen (2000) given by
\deqn{\begin{array}{ll}
&\displaystyle
f(x) = \lambda b x^{b - 1} \exp \left( x^b \right) \exp \left[ \lambda - \lambda \exp \left( x^b \right) \right],
\\
&\displaystyle
F (x) = 1 - \exp \left[ \lambda - \lambda \exp \left( x^b \right) \right],
\\
&\displaystyle
{\rm VaR}_p (X) = \left\{ \log \left[ 1 - \frac {\log (1 - p)}{\lambda} \right] \right\}^{1 / b},
\\
&\displaystyle
{\rm ES}_p (X) = \frac {1}{p} \int_0^p \left\{ \log \left[ 1 - \frac {\log (1 - v)}{\lambda} \right] \right\}^{1 / b} dv
\end{array}}
for \eqn{x > 0}, \eqn{0 < p < 1}, \eqn{b > 0}, the shape parameter, and \eqn{\lambda > 0}, the scale parameter.}
\usage{
dchen(x, b=1, lambda=1, log=FALSE)
pchen(x, b=1, lambda=1, log.p=FALSE, lower.tail=TRUE)
varchen(p, b=1, lambda=1, log.p=FALSE, lower.tail=TRUE)
eschen(p, b=1, lambda=1)
}
\arguments{
\item{x}{scaler or vector of values at which the pdf or cdf needs to be computed}
\item{p}{scaler or vector of values at which the value at risk or expected shortfall needs to be computed}
\item{lambda}{the value of the scale parameter, must be positive, the default is 1}
\item{b}{the value of the shape parameter, must be positive, the default is 1}
\item{log}{if TRUE then log(pdf) are returned}
\item{log.p}{if TRUE then log(cdf) are returned and quantiles are computed for exp(p)}
\item{lower.tail}{if FALSE then 1-cdf are returned and quantiles are computed for 1-p}
}
\value{An object of the same length as \code{x}, giving the pdf or cdf values computed at \code{x} or an object of the same length as \code{p}, giving the values at risk or expected shortfall computed at \code{p}.}
\references{Stephen Chan, Saralees Nadarajah & Emmanuel Afuecheta (2016). An R Package for Value at Risk and Expected Shortfall, Communications in Statistics - Simulation and Computation, 45:9, 3416-3434, \doi{10.1080/03610918.2014.944658}}
\author{Saralees Nadarajah}
\examples{x=runif(10,min=0,max=1)
dchen(x)
pchen(x)
varchen(x)
eschen(x)}
|
library(smfsb)
### Name: simSample
### Title: Simulate a many realisations of a model at a given fixed time in
### the future given an initial time and state, using a function
### (closure) for advancing the state of the model
### Aliases: simSample
### Keywords: smfsb
### ** Examples
out3 = simSample(100,c(x1=50,x2=100),0,20,stepLVc)
hist(out3[,"x2"])
| /data/genthat_extracted_code/smfsb/examples/simSample.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 366 | r | library(smfsb)
### Name: simSample
### Title: Simulate a many realisations of a model at a given fixed time in
### the future given an initial time and state, using a function
### (closure) for advancing the state of the model
### Aliases: simSample
### Keywords: smfsb
### ** Examples
out3 = simSample(100,c(x1=50,x2=100),0,20,stepLVc)
hist(out3[,"x2"])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Create_Diag_Scaling_Mat_Sparse.R
\name{adj_to_probTrans}
\alias{adj_to_probTrans}
\title{Adjacency to Probability Transition Matrix}
\usage{
adj_to_probTrans(mat)
}
\arguments{
\item{mat}{A matrix like object (either a matrix, sparse matrix or dataframe)}
}
\value{
the function returns a matrix of the form dgCMatrix from
from the Matrix package, wrap in as.matrix() if necessary
}
\description{
Takes an Adjacency matrix and scales each column to 1 or 0.
}
\details{
The returned matrix will be such that each entry A\link{i,j} describes the
probability of travelling from vertex j to vertex i during a random
walk. (Note that column -> row is the transpose of what igraph returns)
which is row to column)
}
\examples{
adj_to_probTrans(matrix(1:3, 3))
}
| /man/adj_to_probTrans.Rd | no_license | RyanGreenup/PageRank | R | false | true | 836 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Create_Diag_Scaling_Mat_Sparse.R
\name{adj_to_probTrans}
\alias{adj_to_probTrans}
\title{Adjacency to Probability Transition Matrix}
\usage{
adj_to_probTrans(mat)
}
\arguments{
\item{mat}{A matrix like object (either a matrix, sparse matrix or dataframe)}
}
\value{
the function returns a matrix of the form dgCMatrix from
from the Matrix package, wrap in as.matrix() if necessary
}
\description{
Takes an Adjacency matrix and scales each column to 1 or 0.
}
\details{
The returned matrix will be such that each entry A\link{i,j} describes the
probability of travelling from vertex j to vertex i during a random
walk. (Note that column -> row is the transpose of what igraph returns)
which is row to column)
}
\examples{
adj_to_probTrans(matrix(1:3, 3))
}
|
## This code is intended to calculate the convex hull of the defensive team at ball snap.
## The convex hull is the outermost polygon connecting their positions.
## The area of the convex hull is basically a summary of how spread out the defense is,
## which could be an interesting feature to look at in relation to different coverages.
library(tidyverse)
library(janitor)
library(arrow)
source("scripts/gg_field.R")
## load plays, games, and tracking data
games <-
read_csv("data/games.csv") %>%
clean_names() %>%
mutate(game_date = lubridate::mdy(game_date))
plays <-
read_csv("data/plays.csv") %>%
clean_names() %>%
# There are 2 of these. Not sure what to do with them... drop them.
filter(!is.na(pass_result))
plays <- plays %>%
left_join(games, by = "game_id")
all_weeks <-
read_parquet("data/all_weeks.parquet") %>%
clean_names()
# Standardizing tracking data so its always in direction of offense vs raw on-field coordinates:
all_weeks <- all_weeks %>%
mutate(x = ifelse(play_direction == "left", 120-x, x),
y = ifelse(play_direction == "left", 160/3 - y, y))
## read in coverage data for week 1
coverage <- read_csv("data/coverages_week1.csv") %>%
clean_names()
## subset tracking data to week 1
week1 <- all_weeks %>% filter(week=="week1")
week1 <- week1 %>% inner_join(plays, by=c("game_id","play_id"))
week1 <- week1 %>% inner_join(coverage, by=c("game_id","play_id"))
## create variable to check if player is on offense or defense
week1 <- week1 %>%
mutate(team_abbrev = case_when(
team == "home" ~ home_team_abbr,
team == "away" ~ visitor_team_abbr
),
side_of_ball = case_when(
team_abbrev == possession_team ~ "offense",
team_abbrev != possession_team ~ "defense",
TRUE ~ "football"
)
)
## subset to single play
ex_game_id <- "2018090600"
ex_play_id <- 75
play1 <- week1 %>% filter(game_id == ex_game_id, play_id == ex_play_id)
## only use frame at time of snap
play1_snap <- play1 %>% filter(event == "ball_snap")
## order of defensive players needed to make polygon
def_chull_order <- play1_snap %>%
filter(side_of_ball == "defense") %>%
select(x, y) %>%
chull
def_chull_order <- c(def_chull_order, def_chull_order[1])
def_chull_coords <- play1_snap %>% filter(side_of_ball == "defense") %>%
select(x,y) %>% slice(def_chull_order)
## polygon object to get area of chull
def_chull_poly <- sp::Polygon(def_chull_coords, hole=F)
def_chull_area <- def_chull_poly@area
## area of polygon spanned by defense
print(def_chull_area)
## plot player positions with defensive convex hull
gg_field() +
geom_point(data=play1_snap, aes(x=x, y=y, col=factor(side_of_ball)), cex=3) +
scale_color_manual(values=c('offense'='blue','defense'='red','football'='brown')) +
geom_polygon(data=def_chull_coords, aes(x=x,y=y), fill='red',alpha=0.2) +
labs(color='') +
ggtitle(paste0('GameID=', ex_game_id,', PlayID=',ex_play_id))
## function to compute area of convex hull of defensive setup
calc_chull_area <- function(playdf, gameid, playid){
## pull out locations of defenders at time of ball snap
player_positions <- playdf %>%
filter(game_id == gameid, play_id == playid,
event == "ball_snap", side_of_ball == "defense") %>%
select(x, y)
## get connection order of players
chull_order <- chull(player_positions)
## add last point to connect polygon
chull_order <- c(chull_order, chull_order[1])
## order positions according to polygon
chull_coords <- player_positions %>% slice(chull_order)
## define polygon and calculate area
chull_poly <- sp::Polygon(chull_coords, hole=F)
chull_area <- chull_poly@area
return(chull_area)
}
## example of function for single play
calc_chull_area(playdf=week1, gameid = "2018090600", playid = 75)
## number of unique plays
nplays <- week1 %>% distinct(game_id, play_id) %>% nrow
distinct_plays <- week1 %>% distinct(game_id, play_id)
## calculate for all week 1 plays - would love to know a tidier way to do this!!
ch_area_vec <- rep(NA, nplays)
for(p in 1:nplays){
ch_area_vec[p] <- calc_chull_area(week1, distinct_plays$game_id[p],
distinct_plays$play_id[p])
print(p)
}
distinct_plays$chull_area <- ch_area_vec
## add coverage info
distinct_plays <- distinct_plays %>% inner_join(coverage)
## plot histogram of areas by coverage type
distinct_plays %>%
ggplot() +
geom_histogram(aes(x=chull_area)) +
facet_wrap(~coverage) +
labs(x='Area of Convex Hull of Defenders ') +
ggtitle("Defensive Convex Hull Area, by Coverage Type")
## compare all densities on same plot
distinct_plays %>%
ggplot() +
geom_density(aes(x=chull_area, col=factor(coverage))) +
labs(x='Area of Convex Hull of Defenders ') +
ggtitle("Defensive Convex Hull Area, by Coverage Type")
| /scripts/convex_hull_defense.R | no_license | SUNNY11286/oh_snap | R | false | false | 4,983 | r |
## This code is intended to calculate the convex hull of the defensive team at ball snap.
## The convex hull is the outermost polygon connecting their positions.
## The area of the convex hull is basically a summary of how spread out the defense is,
## which could be an interesting feature to look at in relation to different coverages.
library(tidyverse)
library(janitor)
library(arrow)
source("scripts/gg_field.R")
## load plays, games, and tracking data
games <-
read_csv("data/games.csv") %>%
clean_names() %>%
mutate(game_date = lubridate::mdy(game_date))
plays <-
read_csv("data/plays.csv") %>%
clean_names() %>%
# There are 2 of these. Not sure what to do with them... drop them.
filter(!is.na(pass_result))
plays <- plays %>%
left_join(games, by = "game_id")
all_weeks <-
read_parquet("data/all_weeks.parquet") %>%
clean_names()
# Standardizing tracking data so its always in direction of offense vs raw on-field coordinates:
all_weeks <- all_weeks %>%
mutate(x = ifelse(play_direction == "left", 120-x, x),
y = ifelse(play_direction == "left", 160/3 - y, y))
## read in coverage data for week 1
coverage <- read_csv("data/coverages_week1.csv") %>%
clean_names()
## subset tracking data to week 1
week1 <- all_weeks %>% filter(week=="week1")
week1 <- week1 %>% inner_join(plays, by=c("game_id","play_id"))
week1 <- week1 %>% inner_join(coverage, by=c("game_id","play_id"))
## create variable to check if player is on offense or defense
week1 <- week1 %>%
mutate(team_abbrev = case_when(
team == "home" ~ home_team_abbr,
team == "away" ~ visitor_team_abbr
),
side_of_ball = case_when(
team_abbrev == possession_team ~ "offense",
team_abbrev != possession_team ~ "defense",
TRUE ~ "football"
)
)
## subset to single play
ex_game_id <- "2018090600"
ex_play_id <- 75
play1 <- week1 %>% filter(game_id == ex_game_id, play_id == ex_play_id)
## only use frame at time of snap
play1_snap <- play1 %>% filter(event == "ball_snap")
## order of defensive players needed to make polygon
def_chull_order <- play1_snap %>%
filter(side_of_ball == "defense") %>%
select(x, y) %>%
chull
def_chull_order <- c(def_chull_order, def_chull_order[1])
def_chull_coords <- play1_snap %>% filter(side_of_ball == "defense") %>%
select(x,y) %>% slice(def_chull_order)
## polygon object to get area of chull
def_chull_poly <- sp::Polygon(def_chull_coords, hole=F)
def_chull_area <- def_chull_poly@area
## area of polygon spanned by defense
print(def_chull_area)
## plot player positions with defensive convex hull
gg_field() +
geom_point(data=play1_snap, aes(x=x, y=y, col=factor(side_of_ball)), cex=3) +
scale_color_manual(values=c('offense'='blue','defense'='red','football'='brown')) +
geom_polygon(data=def_chull_coords, aes(x=x,y=y), fill='red',alpha=0.2) +
labs(color='') +
ggtitle(paste0('GameID=', ex_game_id,', PlayID=',ex_play_id))
## function to compute area of convex hull of defensive setup
calc_chull_area <- function(playdf, gameid, playid){
## pull out locations of defenders at time of ball snap
player_positions <- playdf %>%
filter(game_id == gameid, play_id == playid,
event == "ball_snap", side_of_ball == "defense") %>%
select(x, y)
## get connection order of players
chull_order <- chull(player_positions)
## add last point to connect polygon
chull_order <- c(chull_order, chull_order[1])
## order positions according to polygon
chull_coords <- player_positions %>% slice(chull_order)
## define polygon and calculate area
chull_poly <- sp::Polygon(chull_coords, hole=F)
chull_area <- chull_poly@area
return(chull_area)
}
## example of function for single play
calc_chull_area(playdf=week1, gameid = "2018090600", playid = 75)
## number of unique plays
nplays <- week1 %>% distinct(game_id, play_id) %>% nrow
distinct_plays <- week1 %>% distinct(game_id, play_id)
## calculate for all week 1 plays - would love to know a tidier way to do this!!
ch_area_vec <- rep(NA, nplays)
for(p in 1:nplays){
ch_area_vec[p] <- calc_chull_area(week1, distinct_plays$game_id[p],
distinct_plays$play_id[p])
print(p)
}
distinct_plays$chull_area <- ch_area_vec
## add coverage info
distinct_plays <- distinct_plays %>% inner_join(coverage)
## plot histogram of areas by coverage type
distinct_plays %>%
ggplot() +
geom_histogram(aes(x=chull_area)) +
facet_wrap(~coverage) +
labs(x='Area of Convex Hull of Defenders ') +
ggtitle("Defensive Convex Hull Area, by Coverage Type")
## compare all densities on same plot
distinct_plays %>%
ggplot() +
geom_density(aes(x=chull_area, col=factor(coverage))) +
labs(x='Area of Convex Hull of Defenders ') +
ggtitle("Defensive Convex Hull Area, by Coverage Type")
|
# # Example preprocessing script.
source("./lib/funcs.R")
indicators <- raw.data$log %>% dplyr::filter(source == "WB")
filename <- "./data/sfr model data/gdp per capita.xls"
x <- read_excel(filename, "Data")
pos <- min(grep("Country Name", unlist(x[, 1])))
names(x) <- tolower(x[pos, ])
x <- x[-c(1:pos), ]
x <- x %>% dplyr::rename(iso3c = `country name`, variablename = `indicator name`)
x <- x[, -c(2, 4)]
x <- x %>% dplyr::filter(!is.na(`2014`))
x <- x %>% gather(year, value, -c(iso3c, variablename))
x$value <- as.numeric(x$value)
x <- x %>% dplyr::filter(year %in% c(2010, 2014))
x <- x %>% spread(year, value)
x$value <- (x$`2014`/x$`2010`)^(1/5) - 1
x$year <- 2014
x <- x %>% dplyr::filter(!grepl("Sub-Saharan Africa", iso3c))
x$country <- x$iso3c
x <- most.recent(x)
x$iso3c <- country.code.name(x$iso3c)
x$value <- as.numeric(x$value)
x$iso3c <- country.code.name(x$iso3c)
x$variablename <- paste(x$variablename, "CAGR 2010-2014")
x <- x[, c("iso3c", "variablename", "year", "value")]
x <- x %>% dplyr::filter(complete.cases(.))
raw.data$gdpgrowth <- x
rmExcept("raw.data")
| /munge/23-gdp growth rate.R | no_license | githubIEP/oecd-sfr-2016 | R | false | false | 1,085 | r | # # Example preprocessing script.
source("./lib/funcs.R")
indicators <- raw.data$log %>% dplyr::filter(source == "WB")
filename <- "./data/sfr model data/gdp per capita.xls"
x <- read_excel(filename, "Data")
pos <- min(grep("Country Name", unlist(x[, 1])))
names(x) <- tolower(x[pos, ])
x <- x[-c(1:pos), ]
x <- x %>% dplyr::rename(iso3c = `country name`, variablename = `indicator name`)
x <- x[, -c(2, 4)]
x <- x %>% dplyr::filter(!is.na(`2014`))
x <- x %>% gather(year, value, -c(iso3c, variablename))
x$value <- as.numeric(x$value)
x <- x %>% dplyr::filter(year %in% c(2010, 2014))
x <- x %>% spread(year, value)
x$value <- (x$`2014`/x$`2010`)^(1/5) - 1
x$year <- 2014
x <- x %>% dplyr::filter(!grepl("Sub-Saharan Africa", iso3c))
x$country <- x$iso3c
x <- most.recent(x)
x$iso3c <- country.code.name(x$iso3c)
x$value <- as.numeric(x$value)
x$iso3c <- country.code.name(x$iso3c)
x$variablename <- paste(x$variablename, "CAGR 2010-2014")
x <- x[, c("iso3c", "variablename", "year", "value")]
x <- x %>% dplyr::filter(complete.cases(.))
raw.data$gdpgrowth <- x
rmExcept("raw.data")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Plot.R
\name{plotMyData}
\alias{plotMyData}
\title{Wrapper function for ggplot2 for data d
Computes the mean, variance and sd of a vector}
\usage{
plotMyData(x)
}
\arguments{
\item{x}{data.frame}
}
\value{
ggplot2
}
\description{
Wrapper function for ggplot2 for data d
Computes the mean, variance and sd of a vector
}
\examples{
d<-c(2,1,3,4,6,7,78,8,8,8,87,77,6,434,6,5)
data(d)
plotMyData(d)
}
| /RomanSimonTools/man/plotMyData.Rd | no_license | romanEsimon/Stats-3701 | R | false | true | 499 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Plot.R
\name{plotMyData}
\alias{plotMyData}
\title{Wrapper function for ggplot2 for data d
Computes the mean, variance and sd of a vector}
\usage{
plotMyData(x)
}
\arguments{
\item{x}{data.frame}
}
\value{
ggplot2
}
\description{
Wrapper function for ggplot2 for data d
Computes the mean, variance and sd of a vector
}
\examples{
d<-c(2,1,3,4,6,7,78,8,8,8,87,77,6,434,6,5)
data(d)
plotMyData(d)
}
|
# Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
# Code > Insert Roxygen skeleton
hello <- function() {
print("Hello, world!")
}
| /R/hello.R | no_license | ecophilina/TestPackage | R | false | false | 481 | r | # Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
# Code > Insert Roxygen skeleton
hello <- function() {
print("Hello, world!")
}
|
# Function for providing KL divergence as goodness of fit measure
if (!'pacman' %in% installed.packages()[,'Package']) install.packages('pacman', repos='http://cran.r-project.org')
pacman::p_load(boot,dplyr,StableEstim)
# Kullback-Leibler divergence
kl_fun <- function(p, q) {
# Arguments:
# p: numeric, predicted values
# q: numeric, sample values
# Returns:
# kl divergences
kl_values <- ifelse(p == 0, 0, p * log(p / q))
return(kl_values)
}
| /fittinglevy/R/goodness_score_kullback_leibler.R | no_license | Orbis-Amadeus-Oxford/Amadeus-Datawork | R | false | false | 472 | r | # Function for providing KL divergence as goodness of fit measure
if (!'pacman' %in% installed.packages()[,'Package']) install.packages('pacman', repos='http://cran.r-project.org')
pacman::p_load(boot,dplyr,StableEstim)
# Kullback-Leibler divergence
kl_fun <- function(p, q) {
# Arguments:
# p: numeric, predicted values
# q: numeric, sample values
# Returns:
# kl divergences
kl_values <- ifelse(p == 0, 0, p * log(p / q))
return(kl_values)
}
|
# Get the Data
# Read in with tidytuesdayR package
# Install from CRAN via: install.packages("tidytuesdayR")
# This loads the readme and all the datasets for the week of interest
# Either ISO-8601 date or year/week works!
tuesdata <- tidytuesdayR::tt_load('2021-02-09')
tuesdata <- tidytuesdayR::tt_load(2021, week = 7)
lifetime_earn <- tuesdata$lifetime_earn
# Or read in the data manually
lifetime_earn <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/lifetime_earn.csv')
student_debt <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/student_debt.csv')
retirement <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/retirement.csv')
home_owner <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/home_owner.csv')
race_wealth <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/race_wealth.csv')
income_time <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_time.csv')
income_limits <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_limits.csv')
income_aggregate <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_aggregate.csv')
income_distribution <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_distribution.csv')
income_mean <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_mean.csv')
# Plots race/home ownership -----------------------------------------------
library(tidyverse)
p = ggplot(data = home_owner,aes(x= year,y=home_owner_pct*100,color = race))
p + geom_line()
| /Code_XZ/2021/2021-2-9.R | permissive | xiaosongz/tidytuesday | R | false | false | 2,019 | r | # Get the Data
# Read in with tidytuesdayR package
# Install from CRAN via: install.packages("tidytuesdayR")
# This loads the readme and all the datasets for the week of interest
# Either ISO-8601 date or year/week works!
tuesdata <- tidytuesdayR::tt_load('2021-02-09')
tuesdata <- tidytuesdayR::tt_load(2021, week = 7)
lifetime_earn <- tuesdata$lifetime_earn
# Or read in the data manually
lifetime_earn <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/lifetime_earn.csv')
student_debt <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/student_debt.csv')
retirement <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/retirement.csv')
home_owner <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/home_owner.csv')
race_wealth <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/race_wealth.csv')
income_time <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_time.csv')
income_limits <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_limits.csv')
income_aggregate <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_aggregate.csv')
income_distribution <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_distribution.csv')
income_mean <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-09/income_mean.csv')
# Plots race/home ownership -----------------------------------------------
library(tidyverse)
p = ggplot(data = home_owner,aes(x= year,y=home_owner_pct*100,color = race))
p + geom_line()
|
require(deSolve)
require(scales)
require(ggplot2)
require(dplyr)
require(RColorBrewer)
require(viridis)
require(rcartocolor)
require(gridExtra)
# Environmentally-transmitted systems #
##### Describe a curve for individual-level parasitism (i.e., average parasite load)
temp <- seq(0,45,0.1) #temperature range
ind.matrix <- matrix(nrow=1000,ncol=length(temp))
set.seed(1222)
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
ind.c <- runif(dim(ind.matrix)[1], min=0.5,max=1.3)
ind.tmin <- runif(dim(ind.matrix)[1], min=0,max=10)
ind.tmax <- ind.tmin + runif(dim(ind.matrix)[1], min=15, max=35)
for(j in 1:dim(ind.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<ind.tmin[j] || temp[i]>ind.tmax[j],
ind.matrix[j,i]<-0,
ind.matrix[j,i]<- (-ind.c[j]*(temp[i]-ind.tmin[j])*(temp[i]-ind.tmax[j]) ) / 2 )
}
}
#### POPULATION-LEVEL MODEL ####
# Contact rate: chi #
chi.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
chi.c <- runif(dim(chi.matrix)[1], min=0.5,max=1.3)
chi.tmin <- runif(dim(chi.matrix)[1], min=0,max=10)
chi.tmax <- chi.tmin + runif(dim(chi.matrix)[1], min=15, max=35)
# Divide chi by 1000 to get approximate scale for contact rate #
for(j in 1:dim(chi.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<chi.tmin[j] || temp[i]>chi.tmax[j],
chi.matrix[j,i]<-0,
chi.matrix[j,i]<- (chi.c[j]*temp[i]*(temp[i]-chi.tmin[j])*((chi.tmax[j]-temp[i])^(1/2)) / 2000) ) # Divide by 1000 to get proper scale for contact rate
}
}
# Probability of infection: sigma #
sigma.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
sigma.c <- runif(dim(sigma.matrix)[1], min=0.5,max=1.3)
sigma.tmin <- runif(dim(sigma.matrix)[1], min=0,max=10)
sigma.tmax <- sigma.tmin + runif(dim(sigma.matrix)[1], min=15, max=35)
# Divide sigma by 1,000,000 to get approximate scale for probability of infection #
for(j in 1:dim(sigma.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<sigma.tmin[j] || temp[i]>sigma.tmax[j],
sigma.matrix[j,i]<-0,
sigma.matrix[j,i]<- -sigma.c[j]*(temp[i]-sigma.tmin[j])*(temp[i]-sigma.tmax[j]) / 10000000)
}
}
# Parasite-induced mortality: alpha #
# Make this proportional to load
alpha.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
alpha.matrix[,] <- ind.matrix[,]/1000
# Parasites released after host death
# Make this proportional to load
omega.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
omega.matrix[,] <- ind.matrix[,]
# Parasites released over time
lambda.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
lambda.c <- runif(dim(lambda.matrix)[1], min=0.5,max=1.3)
lambda.tmin <- runif(dim(lambda.matrix)[1], min=0,max=10)
lambda.tmax <- lambda.tmin + runif(dim(lambda.matrix)[1], min=15, max=35)
for(j in 1:dim(lambda.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<lambda.tmin[j] || temp[i]>lambda.tmax[j],
lambda.matrix[j,i]<-0,
lambda.matrix[j,i]<- (-lambda.c[j]*(temp[i]-lambda.tmin[j])*(temp[i]-lambda.tmax[j]) ) / 40 )
}
}
# Host birth.rate: birth.rate #
birth.rate.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
birth.rate.c <- runif(dim(birth.rate.matrix)[1], min=0.5,max=1.3)
birth.rate.tmin <- runif(dim(birth.rate.matrix)[1], min=0,max=10)
birth.rate.tmax <- birth.rate.tmin + runif(dim(birth.rate.matrix)[1], min=15, max=35)
for(j in 1:dim(birth.rate.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<birth.rate.tmin[j] || temp[i]>birth.rate.tmax[j],
birth.rate.matrix[j,i]<-0,
birth.rate.matrix[j,i]<- (birth.rate.c[j]*temp[i]*(temp[i]-birth.rate.tmin[j])*((birth.rate.tmax[j]-temp[i])^(1/2)) ) / 100 )
}
}
# For host background mortality rate, assuming mortality follows an inverted quadratic
mu.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
mu.inter <- runif(dim(mu.matrix)[1], min=.1,max=1) # this parameter also known as c
mu.n.slope <- runif(dim(mu.matrix)[1], min=.02,max=0.03) # this parameter also known as b
mu.qd <- runif(dim(mu.matrix)[1], min=0.0008, max=0.0009) #this parameter also known as a
for(j in 1:dim(mu.matrix)[1]){
for(i in 1:length(temp)){
temp.value <- c()
temp.value[i] <- (mu.qd[j]*(temp[i])^2)-(mu.n.slope[j]*temp[i])+mu.inter[j]
ifelse(temp.value/5<0.00667, #if 1/5 value is less than 0.00667 (corresponding to a lifespan of 150 days), set to 0.00667, else set to 1/5 the value
mu.matrix[j,i]<-0.00667,
mu.matrix[j,i]<-temp.value[i]/5)
}
}
# For parasite background mortality rate, assuming mortality follows an inverted quadratic
theta.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
theta.inter <- runif(dim(theta.matrix)[1], min=.1,max=1) # this parameter also known as c
theta.n.slope <- runif(dim(theta.matrix)[1], min=.02,max=0.03) # this parameter also known as b
theta.qd <- runif(dim(theta.matrix)[1], min=0.0008, max=0.0009) #this parameter also known as a
for(j in 1:dim(theta.matrix)[1]){
for(i in 1:length(temp)){
temp.value <- c()
temp.value[i] <- (theta.qd[j]*(temp[i])^2)-(theta.n.slope[j]*temp[i])+theta.inter[j]
ifelse(temp.value/5<0.0667, #if 1/5 value is less than 0.005 (corresponding to a lifespan of 200 days), set to 0.005, else set to 1/5 the value
theta.matrix[j,i]<-0.0667,
theta.matrix[j,i]<-temp.value[i]/5)
}
}
# approximating density as birth rate / death rate
density.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
density.matrix[,] <- birth.rate.matrix[,]/(mu.matrix[,])
# R0 calculations for different scenarios #
R0.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
for(j in 1:dim(R0.matrix)[1]){
for(i in 1:length(temp)){
R0.matrix[j,i] = (chi.matrix[j,i]*sigma.matrix[j,i]*density.matrix[j,i] / theta.matrix[j,i]) *
((lambda.matrix[j,i] / mu.matrix[j,i] + alpha.matrix[j,i]) + omega.matrix[j,i])
}
}
R0.topt <- c()
ind.topt <- c()
vec <- c() # 0 and 1s for if the epidemic spreads (If R0 = 0 or not)
for(j in 1:dim(R0.matrix)[1]){
R0.topt[j] <- temp[which.max((R0.matrix[j,]))]
ind.topt[j] <- temp[which.max((ind.matrix[j,]))]
vec[j] <- isTRUE(R0.topt[j]>0)
}
data.mat <- data.frame(ind.topt,R0.topt,vec)
# filter out ones where R0 was 0
data.mat <- data.mat %>%
filter(vec==TRUE)
# Save the thermal optima data in a CSV to plot after #
write.csv(data.mat, file="~/enviro_simulations_only_omega_prop.csv")
| /thermal scaling modeling - JAE - enviro model - one relationship.R | no_license | devingkirk/thermal_scaling | R | false | false | 6,995 | r | require(deSolve)
require(scales)
require(ggplot2)
require(dplyr)
require(RColorBrewer)
require(viridis)
require(rcartocolor)
require(gridExtra)
# Environmentally-transmitted systems #
##### Describe a curve for individual-level parasitism (i.e., average parasite load)
temp <- seq(0,45,0.1) #temperature range
ind.matrix <- matrix(nrow=1000,ncol=length(temp))
set.seed(1222)
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
ind.c <- runif(dim(ind.matrix)[1], min=0.5,max=1.3)
ind.tmin <- runif(dim(ind.matrix)[1], min=0,max=10)
ind.tmax <- ind.tmin + runif(dim(ind.matrix)[1], min=15, max=35)
for(j in 1:dim(ind.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<ind.tmin[j] || temp[i]>ind.tmax[j],
ind.matrix[j,i]<-0,
ind.matrix[j,i]<- (-ind.c[j]*(temp[i]-ind.tmin[j])*(temp[i]-ind.tmax[j]) ) / 2 )
}
}
#### POPULATION-LEVEL MODEL ####
# Contact rate: chi #
chi.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
chi.c <- runif(dim(chi.matrix)[1], min=0.5,max=1.3)
chi.tmin <- runif(dim(chi.matrix)[1], min=0,max=10)
chi.tmax <- chi.tmin + runif(dim(chi.matrix)[1], min=15, max=35)
# Divide chi by 1000 to get approximate scale for contact rate #
for(j in 1:dim(chi.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<chi.tmin[j] || temp[i]>chi.tmax[j],
chi.matrix[j,i]<-0,
chi.matrix[j,i]<- (chi.c[j]*temp[i]*(temp[i]-chi.tmin[j])*((chi.tmax[j]-temp[i])^(1/2)) / 2000) ) # Divide by 1000 to get proper scale for contact rate
}
}
# Probability of infection: sigma #
sigma.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
sigma.c <- runif(dim(sigma.matrix)[1], min=0.5,max=1.3)
sigma.tmin <- runif(dim(sigma.matrix)[1], min=0,max=10)
sigma.tmax <- sigma.tmin + runif(dim(sigma.matrix)[1], min=15, max=35)
# Divide sigma by 1,000,000 to get approximate scale for probability of infection #
for(j in 1:dim(sigma.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<sigma.tmin[j] || temp[i]>sigma.tmax[j],
sigma.matrix[j,i]<-0,
sigma.matrix[j,i]<- -sigma.c[j]*(temp[i]-sigma.tmin[j])*(temp[i]-sigma.tmax[j]) / 10000000)
}
}
# Parasite-induced mortality: alpha #
# Make this proportional to load
alpha.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
alpha.matrix[,] <- ind.matrix[,]/1000
# Parasites released after host death
# Make this proportional to load
omega.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
omega.matrix[,] <- ind.matrix[,]
# Parasites released over time
lambda.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
lambda.c <- runif(dim(lambda.matrix)[1], min=0.5,max=1.3)
lambda.tmin <- runif(dim(lambda.matrix)[1], min=0,max=10)
lambda.tmax <- lambda.tmin + runif(dim(lambda.matrix)[1], min=15, max=35)
for(j in 1:dim(lambda.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<lambda.tmin[j] || temp[i]>lambda.tmax[j],
lambda.matrix[j,i]<-0,
lambda.matrix[j,i]<- (-lambda.c[j]*(temp[i]-lambda.tmin[j])*(temp[i]-lambda.tmax[j]) ) / 40 )
}
}
# Host birth.rate: birth.rate #
birth.rate.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
birth.rate.c <- runif(dim(birth.rate.matrix)[1], min=0.5,max=1.3)
birth.rate.tmin <- runif(dim(birth.rate.matrix)[1], min=0,max=10)
birth.rate.tmax <- birth.rate.tmin + runif(dim(birth.rate.matrix)[1], min=15, max=35)
for(j in 1:dim(birth.rate.matrix)[1]){
for(i in 1:length(temp)){
ifelse(temp[i]<birth.rate.tmin[j] || temp[i]>birth.rate.tmax[j],
birth.rate.matrix[j,i]<-0,
birth.rate.matrix[j,i]<- (birth.rate.c[j]*temp[i]*(temp[i]-birth.rate.tmin[j])*((birth.rate.tmax[j]-temp[i])^(1/2)) ) / 100 )
}
}
# For host background mortality rate, assuming mortality follows an inverted quadratic
mu.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
mu.inter <- runif(dim(mu.matrix)[1], min=.1,max=1) # this parameter also known as c
mu.n.slope <- runif(dim(mu.matrix)[1], min=.02,max=0.03) # this parameter also known as b
mu.qd <- runif(dim(mu.matrix)[1], min=0.0008, max=0.0009) #this parameter also known as a
for(j in 1:dim(mu.matrix)[1]){
for(i in 1:length(temp)){
temp.value <- c()
temp.value[i] <- (mu.qd[j]*(temp[i])^2)-(mu.n.slope[j]*temp[i])+mu.inter[j]
ifelse(temp.value/5<0.00667, #if 1/5 value is less than 0.00667 (corresponding to a lifespan of 150 days), set to 0.00667, else set to 1/5 the value
mu.matrix[j,i]<-0.00667,
mu.matrix[j,i]<-temp.value[i]/5)
}
}
# For parasite background mortality rate, assuming mortality follows an inverted quadratic
theta.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
# pull 1000 different tmin, c, and add somewhere between 10 and 25 to get tmax
theta.inter <- runif(dim(theta.matrix)[1], min=.1,max=1) # this parameter also known as c
theta.n.slope <- runif(dim(theta.matrix)[1], min=.02,max=0.03) # this parameter also known as b
theta.qd <- runif(dim(theta.matrix)[1], min=0.0008, max=0.0009) #this parameter also known as a
for(j in 1:dim(theta.matrix)[1]){
for(i in 1:length(temp)){
temp.value <- c()
temp.value[i] <- (theta.qd[j]*(temp[i])^2)-(theta.n.slope[j]*temp[i])+theta.inter[j]
ifelse(temp.value/5<0.0667, #if 1/5 value is less than 0.005 (corresponding to a lifespan of 200 days), set to 0.005, else set to 1/5 the value
theta.matrix[j,i]<-0.0667,
theta.matrix[j,i]<-temp.value[i]/5)
}
}
# approximating density as birth rate / death rate
density.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
density.matrix[,] <- birth.rate.matrix[,]/(mu.matrix[,])
# R0 calculations for different scenarios #
R0.matrix <- matrix(nrow=dim(ind.matrix)[1],ncol=length(temp))
for(j in 1:dim(R0.matrix)[1]){
for(i in 1:length(temp)){
R0.matrix[j,i] = (chi.matrix[j,i]*sigma.matrix[j,i]*density.matrix[j,i] / theta.matrix[j,i]) *
((lambda.matrix[j,i] / mu.matrix[j,i] + alpha.matrix[j,i]) + omega.matrix[j,i])
}
}
R0.topt <- c()
ind.topt <- c()
vec <- c() # 0 and 1s for if the epidemic spreads (If R0 = 0 or not)
for(j in 1:dim(R0.matrix)[1]){
R0.topt[j] <- temp[which.max((R0.matrix[j,]))]
ind.topt[j] <- temp[which.max((ind.matrix[j,]))]
vec[j] <- isTRUE(R0.topt[j]>0)
}
data.mat <- data.frame(ind.topt,R0.topt,vec)
# filter out ones where R0 was 0
data.mat <- data.mat %>%
filter(vec==TRUE)
# Save the thermal optima data in a CSV to plot after #
write.csv(data.mat, file="~/enviro_simulations_only_omega_prop.csv")
|
library("mmstat4")
# run example program
run("stat/sum.R")
# list of all example programs
prg()
prg(pattern=".py") # show only python programs
# file name of example program
prg("stat/sum.R")
# editexample program
file.edit(prg("stat/sum.R")) # RStudio editor
| /inst/examples/stat/use_mmstat4.R | no_license | Kale14/mmstat4 | R | false | false | 262 | r | library("mmstat4")
# run example program
run("stat/sum.R")
# list of all example programs
prg()
prg(pattern=".py") # show only python programs
# file name of example program
prg("stat/sum.R")
# editexample program
file.edit(prg("stat/sum.R")) # RStudio editor
|
library(mrds)
library(testthat)
context("Single Observer Analyses")
test_that("Test Analyses", {
#datasetup
ex.filename<-system.file("testData/input_checks/ddf_dat.robj", package="mads")
load(ex.filename)
ex.filename<-system.file("testData/input_checks/obs_table.robj", package="mads")
load(ex.filename)
ex.filename<-system.file("testData/input_checks/region_table.robj", package="mads")
load(ex.filename)
ex.filename<-system.file("testData/input_checks/sample_table.robj", package="mads")
load(ex.filename)
#run ddf analyses
ddf.1 <- ddf(dsmodel = ~mcds(key = "hn", formula = ~ size), method='ds', data=ddf.dat,meta.data=list(width=4))
ddf.2 <- ddf(dsmodel = ~mcds(key = "hr", formula = ~ size), method='ds', data=ddf.dat,meta.data=list(width=4))
ddf.3 <- ddf(dsmodel = ~mcds(key = "hn", formula = ~ 1, adj.series = "cos", adj.order = c(2)), method='ds', data=ddf.dat,meta.data=list(width=4, mono=TRUE))
#think this should have been fixed in mrds
ddf.1$data$detected <- rep(1, nrow(ddf.1$data))
ddf.2$data$detected <- rep(1, nrow(ddf.2$data))
ddf.3$data$detected <- rep(1, nrow(ddf.3$data))
#Multi-analysis options
model.names <- list("CD"=c("ddf.1","ddf.2","ddf.3"), "WD"=c("ddf.1","ddf.2","ddf.3"), "UnidDol"=c("ddf.1","ddf.2","ddf.3"))
ddf.models <- list("ddf.1" = ddf.1, "ddf.2" = ddf.2, "ddf.3" = ddf.3)
species.code.definitions <- list("UnidDol" = c("CD","WD"))
species.presence <- list("A" = c("CD","WD"))
covariate.uncertainty <- NULL
ddf.model.options <- list(criterion="AIC")
ddf.model.options$distance.naming.conv <- TRUE
bootstrap <- TRUE
bootstrap.options <- list(resample="samples", n=2, quantile.type = 7)
dht.options <- list(convert.units = 1)
set.seed(747)
results.to.compare <- execute.multi.analysis(
species.code = names(model.names),
unidentified.sightings = species.code.definitions,
species.presence = species.presence,
covariate.uncertainty = covariate.uncertainty,
models.by.species.code = model.names,
ddf.model.objects = ddf.models,
ddf.model.options = ddf.model.options,
region.table = region.table,
sample.table = sample.table,
obs.table = obs.table,
bootstrap = bootstrap,
bootstrap.option = bootstrap.options,
silent = FALSE)
set.seed(747)
MAE.warnings <- NULL
species.code <- names(model.names)
ddf.model.info <- check.ddf.models(model.names, ddf.models)
clusters <- ddf.model.info$clusters
double.observer <- ddf.model.info$double.observer
# If the user has not specified the criteria set it
if(is.null(ddf.model.options$criterion)){
ddf.model.options$criterion <- "AIC"
}
# If the user has not specified the species field name set it
if(is.null(ddf.model.options$species.field.name)){
ddf.model.options$species.field.name <- "species"
}
##################################
expect_true(clusters)
expect_false(double.observer)
##################################
species.code.definitions <- check.species.code.definitions(species.code.definitions, species.code)
unidentified.species <- species.code.definitions$unidentified
species.code.definitions <- species.code.definitions$species.code.definitions
##################################
expect_true(unidentified.species)
##################################
species.presence <- check.species.presence(species.presence, species.code, strata.name = as.character(region.table$Region.Label))
##################################
expect_identical(names(species.presence), "A")
expect_identical(species.presence[[1]], c("CD","WD"))
##################################
species.presence.compare <- species.presence
species.presence <- NULL
species.presence <- check.species.presence(species.presence, species.code, strata.name = as.character(region.table$Region.Label))
##################################
#expect_that(species.presence, is_identical_to(species.presence.compare))
rm(species.presence.compare)
##################################
covariate.uncertainty <- check.covar.uncertainty(covariate.uncertainty)
check.bootstrap.options(bootstrap, bootstrap.options$resample, bootstrap.options$n, sample.table)
bootstrap.options$n <- ifelse(bootstrap, bootstrap.options$n, 1)
#Make master copies of all the datasets
ddf.dat.master <- get.datasets(model.names, ddf.models)
unique.model.names <- ddf.dat.master$unique.model.names
model.index <- ddf.dat.master$model.index
ddf.dat.master <- ddf.dat.master$ddf.dat.master
##################################
expect_identical(unique.model.names, list("CD" = c("ddf.1", "ddf.2", "ddf.3")))
test <- c("CD","CD","CD")
names(test) <- c("CD","WD","UnidDol")
expect_that(model.index, is_identical_to(test))
rm(test)
expect_equal(length(ddf.dat.master), 1)
expect_equal(nrow(ddf.dat.master[[1]]), nrow(ddf.1$data))
##################################
obs.table.master <- obs.table
sample.table.master <- sample.table
#Create storage for results (only for the species codes not the unidentified codes)
bootstrap.results <- create.result.arrays(species.code, species.code.definitions, region.table, clusters, bootstrap.options$n)
bootstrap.ddf.statistics <- create.param.arrays(unique.model.names, ddf.models, bootstrap.options$n, ddf.model.options$criterion)
##################################
expect_match(names(bootstrap.ddf.statistics), "CD")
expect_identical(dimnames(bootstrap.results$individual.summary)[[4]], c("CD","WD"))
##################################
n=1
#Resample Data
bootstrap = TRUE
if(bootstrap){
ddf.dat.working <- resample.data(resample=bootstrap.options$resample, obs.table.master, sample.table.master, ddf.dat.master, double.observer)
obs.table <- ddf.dat.working$obs.table
sample.table <- ddf.dat.working$sample.table
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}else{
ddf.dat.working <- ddf.dat.master
}
##################################
expect_equal(length(unique(sample.table$Sample.Label)), length(unique(sample.table.master$Sample.Label)))
expect_identical(table(sample.table$Region), table(sample.table.master$Region))
expect_equal(nrow(ddf.dat.working[[1]]), nrow(obs.table))
expect_equal(length(which(ddf.dat.working[[1]]$object%in%obs.table$object)), nrow(obs.table))
expect_equal(ddf.dat.working[["CD"]]$distance[ddf.dat.working[["CD"]]$object == 16], ddf.dat.master[["CD"]]$distance[ddf.dat.master[["CD"]]$object == 16])
##################################
#ddf.dat.working.check <- ddf.dat.working
if(!is.null(covariate.uncertainty)){
ddf.dat.working <- resample.covariates(ddf.dat.working, covariate.uncertainty, MAE.warnings)
MAE.warnings <- ddf.dat.working$MAE.warnings
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}
##################################
#expect_that(ddf.dat.working[["10"]]$object, is_identical_to(ddf.dat.working.check[["10"]]$object))
#expect_that(ddf.dat.working[["10"]]$scaledtotsize[1] == ddf.dat.working.check[["10"]]$scaledtotsize[1], is_false())
#expect_that(ddf.dat.working[["10"]]$distance[ddf.dat.working[["10"]]$object == 106], equals(ddf.dat.master[["10"]]$distance[ddf.dat.master[["10"]]$object == 106]))
##################################
#Fit ddf models to all species codes
ddf.results <- fit.ddf.models(ddf.dat.working, unique.model.names, ddf.models, ddf.model.options$criterion, bootstrap.ddf.statistics, n, MAE.warnings)
if(class(ddf.results) == "list"){
bootstrap.ddf.statistics <- ddf.results$bootstrap.ddf.statistics
ddf.results <- ddf.results$ddf.results
}else{
#If the ddf results are not valid for all species move to next bootstrap iteration
MAE.warnings <- ddf.results
next
}
##################################
expect_equal(as.numeric(bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$ds.param[n,1:2]), as.numeric(ddf.results[[1]]$ds$aux$ddfobj$scale$parameters))
expect_true(bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$AIC[n] < bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$AIC[n])
expect_equal(ddf.results[[1]]$criterion, bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$AIC[n])
##################################
dht.results <- calculate.dht(species.code, ddf.model.options$species.field.name, model.index, ddf.results, region.table, sample.table, obs.table, dht.options)
##################################
expect_identical(names(dht.results), c("CD","WD","UnidDol"))
expect_equal(dht.results[[1]]$clusters$summary$n[1]+dht.results[[2]]$clusters$summary$n[1]+dht.results[[3]]$clusters$summary$n[1], nrow(obs.table))
##################################
if(unidentified.species){
formatted.dht.results <- prorate.unidentified(dht.results, species.code.definitions, species.presence, clusters)
}else{
formatted.dht.results <- format.dht.results(dht.results, species.code, clusters)
}
##################################
expect_equal(length(formatted.dht.results), 2)
expect_identical(names(formatted.dht.results), c("CD","WD"))
expect_equal(dht.results[[1]]$clusters$N$Estimate[1]+dht.results[[2]]$clusters$N$Estimate[1]+dht.results[[3]]$clusters$N$Estimate[1], formatted.dht.results[[1]]$clusters$N$Estimate[1]+formatted.dht.results[[2]]$clusters$N$Estimate[1])
expect_that(as.numeric(((formatted.dht.results[["CD"]]$clusters$N$Estimate[1]-dht.results[["CD"]]$clusters$N$Estimate[1])/formatted.dht.results[["CD"]]$clusters$N$Estimate[1])*100), equals(formatted.dht.results[["CD"]]$clusters$N$PercentUnidentified[1], tolerance = 0.0001))
##################################
bootstrap.results <- accumulate.results(n, bootstrap.results, formatted.dht.results, clusters)
##################################
expect_that(bootstrap.results$clusters.N["Total","PercentUnidentified",1,"CD"], equals(bootstrap.results$clusters.N["Total","PercentUnidentified",1,"WD"]))
expect_that(bootstrap.results$clusters.N["Total","Estimate",1,"WD"], equals(as.numeric(formatted.dht.results[["WD"]]$clusters$N$Estimate[1])))
expect_that(bootstrap.results$individual.N["Total","PercentUnidentified",1,"CD"], equals(as.numeric(((bootstrap.results$individual.N["Total","Estimate",1,"CD"]- dht.results[["CD"]]$individual$N$Estimate[1])/bootstrap.results$individual.N["Total","Estimate",1,"CD"])*100), tolerance = 0.001))
##################################
n=2
#Resample Data
bootstrap = TRUE
if(bootstrap){
ddf.dat.working <- resample.data(resample=bootstrap.options$resample, obs.table.master, sample.table.master, ddf.dat.master, double.observer)
obs.table <- ddf.dat.working$obs.table
sample.table <- ddf.dat.working$sample.table
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}else{
ddf.dat.working <- ddf.dat.master
}
##################################
expect_that(length(unique(sample.table$Sample.Label)), equals(length(unique(sample.table.master$Sample.Label))))
expect_that(table(sample.table$Region), is_identical_to(table(sample.table.master$Region)))
expect_that(nrow(ddf.dat.working[[1]]), equals(nrow(obs.table)))
expect_that(length(which(ddf.dat.working[[1]]$object%in%obs.table$object)), equals(nrow(obs.table)))
expect_that(ddf.dat.working[["CD"]]$distance[ddf.dat.working[["CD"]]$object == 11], equals(ddf.dat.master[["CD"]]$distance[ddf.dat.master[["CD"]]$object == 11]))
##################################
ddf.dat.working.check <- ddf.dat.working
if(!is.null(covariate.uncertainty)){
ddf.dat.working <- resample.covariates(ddf.dat.working, covariate.uncertainty, MAE.warnings)
MAE.warnings <- ddf.dat.working$MAE.warnings
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}
##################################
expect_that(ddf.dat.working[["CD"]], is_identical_to(ddf.dat.working.check[["CD"]]))
rm(ddf.dat.working.check)
##################################
#Fit ddf models to all species codes
ddf.results <- fit.ddf.models(ddf.dat.working, unique.model.names, ddf.models, ddf.model.options$criterion, bootstrap.ddf.statistics, n, MAE.warnings)
if(class(ddf.results) == "list"){
bootstrap.ddf.statistics <- ddf.results$bootstrap.ddf.statistics
ddf.results <- ddf.results$ddf.results
}else{
#If the ddf results are not valid for all species move to next bootstrap iteration
MAE.warnings <- ddf.results
next
}
##################################
expect_that(as.numeric(bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$ds.param[n,1:2]), equals(as.numeric(ddf.results[[1]]$ds$aux$ddfobj$scale$parameters)))
expect_true(bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$AIC[n] > bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$AIC[n])
expect_that(ddf.results[[1]]$criterion, equals(bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$AIC[n]))
##################################
dht.results <- calculate.dht(species.code, ddf.model.options$species.field.name, model.index, ddf.results, region.table, sample.table, obs.table, dht.options)
if(unidentified.species){
formatted.dht.results <- prorate.unidentified(dht.results, species.code.definitions, species.presence, clusters)
}else{
formatted.dht.results <- format.dht.results(dht.results, species.code, clusters)
}
if(unidentified.species){
formatted.dht.results <- prorate.unidentified(dht.results, species.code.definitions, species.presence, clusters)
}else{
formatted.dht.results <- format.dht.results(dht.results, species.code, clusters)
}
##################################
expect_that(length(formatted.dht.results), equals(2))
expect_that(names(formatted.dht.results), is_identical_to(c("CD","WD")))
expect_that(dht.results[[1]]$clusters$N$Estimate[1]+dht.results[[2]]$clusters$N$Estimate[1]+dht.results[[3]]$clusters$N$Estimate[1], equals(formatted.dht.results[[1]]$clusters$N$Estimate[1]+formatted.dht.results[[2]]$clusters$N$Estimate[1]))
expect_that(as.numeric(((formatted.dht.results[["CD"]]$clusters$N$Estimate[1]-dht.results[["CD"]]$clusters$N$Estimate[1])/formatted.dht.results[["CD"]]$clusters$N$Estimate[1])*100), equals(formatted.dht.results[["CD"]]$clusters$N$PercentUnidentified[1], tolerance = 0.0001))
##################################
bootstrap.results <- accumulate.results(n, bootstrap.results, formatted.dht.results, clusters)
##################################
expect_that(bootstrap.results$clusters.N["Total","PercentUnidentified",2,"CD"], equals(bootstrap.results$clusters.N["Total","PercentUnidentified",2,"WD"]))
expect_that(bootstrap.results$individual.N["Total","PercentUnidentified",2,"WD"], equals(as.numeric(((bootstrap.results$individual.N["Total","Estimate",2,"WD"]- dht.results[["WD"]]$individual$N$Estimate[1])/bootstrap.results$individual.N["Total","Estimate",2,"WD"])*100), tolerance = 0.001))
expect_that(bootstrap.results$Expected.S["Total","new.Expected.S",2,"CD"], equals(as.numeric(formatted.dht.results[["CD"]]$individual$N$Estimate[1]/formatted.dht.results[["CD"]]$clusters$N$Estimate[1])))
##################################
#process results
results <- process.bootstrap.results(bootstrap.results, model.index, clusters, bootstrap.ddf.statistics, bootstrap.options$quantile.type, analysis.options = list(bootstrap = bootstrap, n = bootstrap.options$n, covariate.uncertainty = covariate.uncertainty, clusters = clusters, double.observer = double.observer, unidentified.species = unidentified.species, species.code.definitions = species.code.definitions, model.names = model.names))
class(results) <- "ma"
class(results$analysis.options) <- "ma.analysis"
class(results$species) <- "ma.allspecies"
for(sp in seq(along = results$species)){
class(results$species[[sp]]) <- "ma.species"
}
if(!is.null(results$unidentified)){
class(results$unidentified) <- "ma.allunid"
for(sp in seq(along = results$unidentified)){
class(results$unidentified[[sp]]) <- "ma.unid"
}
}
##################################
expect_that(results, is_identical_to(results.to.compare))
##################################
#rm(.Random.seed)
})
| /tests/testthat/test_singleObserver.R | no_license | cran/mads | R | false | false | 16,537 | r | library(mrds)
library(testthat)
context("Single Observer Analyses")
test_that("Test Analyses", {
#datasetup
ex.filename<-system.file("testData/input_checks/ddf_dat.robj", package="mads")
load(ex.filename)
ex.filename<-system.file("testData/input_checks/obs_table.robj", package="mads")
load(ex.filename)
ex.filename<-system.file("testData/input_checks/region_table.robj", package="mads")
load(ex.filename)
ex.filename<-system.file("testData/input_checks/sample_table.robj", package="mads")
load(ex.filename)
#run ddf analyses
ddf.1 <- ddf(dsmodel = ~mcds(key = "hn", formula = ~ size), method='ds', data=ddf.dat,meta.data=list(width=4))
ddf.2 <- ddf(dsmodel = ~mcds(key = "hr", formula = ~ size), method='ds', data=ddf.dat,meta.data=list(width=4))
ddf.3 <- ddf(dsmodel = ~mcds(key = "hn", formula = ~ 1, adj.series = "cos", adj.order = c(2)), method='ds', data=ddf.dat,meta.data=list(width=4, mono=TRUE))
#think this should have been fixed in mrds
ddf.1$data$detected <- rep(1, nrow(ddf.1$data))
ddf.2$data$detected <- rep(1, nrow(ddf.2$data))
ddf.3$data$detected <- rep(1, nrow(ddf.3$data))
#Multi-analysis options
model.names <- list("CD"=c("ddf.1","ddf.2","ddf.3"), "WD"=c("ddf.1","ddf.2","ddf.3"), "UnidDol"=c("ddf.1","ddf.2","ddf.3"))
ddf.models <- list("ddf.1" = ddf.1, "ddf.2" = ddf.2, "ddf.3" = ddf.3)
species.code.definitions <- list("UnidDol" = c("CD","WD"))
species.presence <- list("A" = c("CD","WD"))
covariate.uncertainty <- NULL
ddf.model.options <- list(criterion="AIC")
ddf.model.options$distance.naming.conv <- TRUE
bootstrap <- TRUE
bootstrap.options <- list(resample="samples", n=2, quantile.type = 7)
dht.options <- list(convert.units = 1)
set.seed(747)
results.to.compare <- execute.multi.analysis(
species.code = names(model.names),
unidentified.sightings = species.code.definitions,
species.presence = species.presence,
covariate.uncertainty = covariate.uncertainty,
models.by.species.code = model.names,
ddf.model.objects = ddf.models,
ddf.model.options = ddf.model.options,
region.table = region.table,
sample.table = sample.table,
obs.table = obs.table,
bootstrap = bootstrap,
bootstrap.option = bootstrap.options,
silent = FALSE)
set.seed(747)
MAE.warnings <- NULL
species.code <- names(model.names)
ddf.model.info <- check.ddf.models(model.names, ddf.models)
clusters <- ddf.model.info$clusters
double.observer <- ddf.model.info$double.observer
# If the user has not specified the criteria set it
if(is.null(ddf.model.options$criterion)){
ddf.model.options$criterion <- "AIC"
}
# If the user has not specified the species field name set it
if(is.null(ddf.model.options$species.field.name)){
ddf.model.options$species.field.name <- "species"
}
##################################
expect_true(clusters)
expect_false(double.observer)
##################################
species.code.definitions <- check.species.code.definitions(species.code.definitions, species.code)
unidentified.species <- species.code.definitions$unidentified
species.code.definitions <- species.code.definitions$species.code.definitions
##################################
expect_true(unidentified.species)
##################################
species.presence <- check.species.presence(species.presence, species.code, strata.name = as.character(region.table$Region.Label))
##################################
expect_identical(names(species.presence), "A")
expect_identical(species.presence[[1]], c("CD","WD"))
##################################
species.presence.compare <- species.presence
species.presence <- NULL
species.presence <- check.species.presence(species.presence, species.code, strata.name = as.character(region.table$Region.Label))
##################################
#expect_that(species.presence, is_identical_to(species.presence.compare))
rm(species.presence.compare)
##################################
covariate.uncertainty <- check.covar.uncertainty(covariate.uncertainty)
check.bootstrap.options(bootstrap, bootstrap.options$resample, bootstrap.options$n, sample.table)
bootstrap.options$n <- ifelse(bootstrap, bootstrap.options$n, 1)
#Make master copies of all the datasets
ddf.dat.master <- get.datasets(model.names, ddf.models)
unique.model.names <- ddf.dat.master$unique.model.names
model.index <- ddf.dat.master$model.index
ddf.dat.master <- ddf.dat.master$ddf.dat.master
##################################
expect_identical(unique.model.names, list("CD" = c("ddf.1", "ddf.2", "ddf.3")))
test <- c("CD","CD","CD")
names(test) <- c("CD","WD","UnidDol")
expect_that(model.index, is_identical_to(test))
rm(test)
expect_equal(length(ddf.dat.master), 1)
expect_equal(nrow(ddf.dat.master[[1]]), nrow(ddf.1$data))
##################################
obs.table.master <- obs.table
sample.table.master <- sample.table
#Create storage for results (only for the species codes not the unidentified codes)
bootstrap.results <- create.result.arrays(species.code, species.code.definitions, region.table, clusters, bootstrap.options$n)
bootstrap.ddf.statistics <- create.param.arrays(unique.model.names, ddf.models, bootstrap.options$n, ddf.model.options$criterion)
##################################
expect_match(names(bootstrap.ddf.statistics), "CD")
expect_identical(dimnames(bootstrap.results$individual.summary)[[4]], c("CD","WD"))
##################################
n=1
#Resample Data
bootstrap = TRUE
if(bootstrap){
ddf.dat.working <- resample.data(resample=bootstrap.options$resample, obs.table.master, sample.table.master, ddf.dat.master, double.observer)
obs.table <- ddf.dat.working$obs.table
sample.table <- ddf.dat.working$sample.table
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}else{
ddf.dat.working <- ddf.dat.master
}
##################################
expect_equal(length(unique(sample.table$Sample.Label)), length(unique(sample.table.master$Sample.Label)))
expect_identical(table(sample.table$Region), table(sample.table.master$Region))
expect_equal(nrow(ddf.dat.working[[1]]), nrow(obs.table))
expect_equal(length(which(ddf.dat.working[[1]]$object%in%obs.table$object)), nrow(obs.table))
expect_equal(ddf.dat.working[["CD"]]$distance[ddf.dat.working[["CD"]]$object == 16], ddf.dat.master[["CD"]]$distance[ddf.dat.master[["CD"]]$object == 16])
##################################
#ddf.dat.working.check <- ddf.dat.working
if(!is.null(covariate.uncertainty)){
ddf.dat.working <- resample.covariates(ddf.dat.working, covariate.uncertainty, MAE.warnings)
MAE.warnings <- ddf.dat.working$MAE.warnings
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}
##################################
#expect_that(ddf.dat.working[["10"]]$object, is_identical_to(ddf.dat.working.check[["10"]]$object))
#expect_that(ddf.dat.working[["10"]]$scaledtotsize[1] == ddf.dat.working.check[["10"]]$scaledtotsize[1], is_false())
#expect_that(ddf.dat.working[["10"]]$distance[ddf.dat.working[["10"]]$object == 106], equals(ddf.dat.master[["10"]]$distance[ddf.dat.master[["10"]]$object == 106]))
##################################
#Fit ddf models to all species codes
ddf.results <- fit.ddf.models(ddf.dat.working, unique.model.names, ddf.models, ddf.model.options$criterion, bootstrap.ddf.statistics, n, MAE.warnings)
if(class(ddf.results) == "list"){
bootstrap.ddf.statistics <- ddf.results$bootstrap.ddf.statistics
ddf.results <- ddf.results$ddf.results
}else{
#If the ddf results are not valid for all species move to next bootstrap iteration
MAE.warnings <- ddf.results
next
}
##################################
expect_equal(as.numeric(bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$ds.param[n,1:2]), as.numeric(ddf.results[[1]]$ds$aux$ddfobj$scale$parameters))
expect_true(bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$AIC[n] < bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$AIC[n])
expect_equal(ddf.results[[1]]$criterion, bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$AIC[n])
##################################
dht.results <- calculate.dht(species.code, ddf.model.options$species.field.name, model.index, ddf.results, region.table, sample.table, obs.table, dht.options)
##################################
expect_identical(names(dht.results), c("CD","WD","UnidDol"))
expect_equal(dht.results[[1]]$clusters$summary$n[1]+dht.results[[2]]$clusters$summary$n[1]+dht.results[[3]]$clusters$summary$n[1], nrow(obs.table))
##################################
if(unidentified.species){
formatted.dht.results <- prorate.unidentified(dht.results, species.code.definitions, species.presence, clusters)
}else{
formatted.dht.results <- format.dht.results(dht.results, species.code, clusters)
}
##################################
expect_equal(length(formatted.dht.results), 2)
expect_identical(names(formatted.dht.results), c("CD","WD"))
expect_equal(dht.results[[1]]$clusters$N$Estimate[1]+dht.results[[2]]$clusters$N$Estimate[1]+dht.results[[3]]$clusters$N$Estimate[1], formatted.dht.results[[1]]$clusters$N$Estimate[1]+formatted.dht.results[[2]]$clusters$N$Estimate[1])
expect_that(as.numeric(((formatted.dht.results[["CD"]]$clusters$N$Estimate[1]-dht.results[["CD"]]$clusters$N$Estimate[1])/formatted.dht.results[["CD"]]$clusters$N$Estimate[1])*100), equals(formatted.dht.results[["CD"]]$clusters$N$PercentUnidentified[1], tolerance = 0.0001))
##################################
bootstrap.results <- accumulate.results(n, bootstrap.results, formatted.dht.results, clusters)
##################################
expect_that(bootstrap.results$clusters.N["Total","PercentUnidentified",1,"CD"], equals(bootstrap.results$clusters.N["Total","PercentUnidentified",1,"WD"]))
expect_that(bootstrap.results$clusters.N["Total","Estimate",1,"WD"], equals(as.numeric(formatted.dht.results[["WD"]]$clusters$N$Estimate[1])))
expect_that(bootstrap.results$individual.N["Total","PercentUnidentified",1,"CD"], equals(as.numeric(((bootstrap.results$individual.N["Total","Estimate",1,"CD"]- dht.results[["CD"]]$individual$N$Estimate[1])/bootstrap.results$individual.N["Total","Estimate",1,"CD"])*100), tolerance = 0.001))
##################################
n=2
#Resample Data
bootstrap = TRUE
if(bootstrap){
ddf.dat.working <- resample.data(resample=bootstrap.options$resample, obs.table.master, sample.table.master, ddf.dat.master, double.observer)
obs.table <- ddf.dat.working$obs.table
sample.table <- ddf.dat.working$sample.table
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}else{
ddf.dat.working <- ddf.dat.master
}
##################################
expect_that(length(unique(sample.table$Sample.Label)), equals(length(unique(sample.table.master$Sample.Label))))
expect_that(table(sample.table$Region), is_identical_to(table(sample.table.master$Region)))
expect_that(nrow(ddf.dat.working[[1]]), equals(nrow(obs.table)))
expect_that(length(which(ddf.dat.working[[1]]$object%in%obs.table$object)), equals(nrow(obs.table)))
expect_that(ddf.dat.working[["CD"]]$distance[ddf.dat.working[["CD"]]$object == 11], equals(ddf.dat.master[["CD"]]$distance[ddf.dat.master[["CD"]]$object == 11]))
##################################
ddf.dat.working.check <- ddf.dat.working
if(!is.null(covariate.uncertainty)){
ddf.dat.working <- resample.covariates(ddf.dat.working, covariate.uncertainty, MAE.warnings)
MAE.warnings <- ddf.dat.working$MAE.warnings
ddf.dat.working <- ddf.dat.working$ddf.dat.working
}
##################################
expect_that(ddf.dat.working[["CD"]], is_identical_to(ddf.dat.working.check[["CD"]]))
rm(ddf.dat.working.check)
##################################
#Fit ddf models to all species codes
ddf.results <- fit.ddf.models(ddf.dat.working, unique.model.names, ddf.models, ddf.model.options$criterion, bootstrap.ddf.statistics, n, MAE.warnings)
if(class(ddf.results) == "list"){
bootstrap.ddf.statistics <- ddf.results$bootstrap.ddf.statistics
ddf.results <- ddf.results$ddf.results
}else{
#If the ddf results are not valid for all species move to next bootstrap iteration
MAE.warnings <- ddf.results
next
}
##################################
expect_that(as.numeric(bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$ds.param[n,1:2]), equals(as.numeric(ddf.results[[1]]$ds$aux$ddfobj$scale$parameters)))
expect_true(bootstrap.ddf.statistics[["CD"]][["ddf.2"]]$AIC[n] > bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$AIC[n])
expect_that(ddf.results[[1]]$criterion, equals(bootstrap.ddf.statistics[["CD"]][["ddf.1"]]$AIC[n]))
##################################
dht.results <- calculate.dht(species.code, ddf.model.options$species.field.name, model.index, ddf.results, region.table, sample.table, obs.table, dht.options)
if(unidentified.species){
formatted.dht.results <- prorate.unidentified(dht.results, species.code.definitions, species.presence, clusters)
}else{
formatted.dht.results <- format.dht.results(dht.results, species.code, clusters)
}
if(unidentified.species){
formatted.dht.results <- prorate.unidentified(dht.results, species.code.definitions, species.presence, clusters)
}else{
formatted.dht.results <- format.dht.results(dht.results, species.code, clusters)
}
##################################
expect_that(length(formatted.dht.results), equals(2))
expect_that(names(formatted.dht.results), is_identical_to(c("CD","WD")))
expect_that(dht.results[[1]]$clusters$N$Estimate[1]+dht.results[[2]]$clusters$N$Estimate[1]+dht.results[[3]]$clusters$N$Estimate[1], equals(formatted.dht.results[[1]]$clusters$N$Estimate[1]+formatted.dht.results[[2]]$clusters$N$Estimate[1]))
expect_that(as.numeric(((formatted.dht.results[["CD"]]$clusters$N$Estimate[1]-dht.results[["CD"]]$clusters$N$Estimate[1])/formatted.dht.results[["CD"]]$clusters$N$Estimate[1])*100), equals(formatted.dht.results[["CD"]]$clusters$N$PercentUnidentified[1], tolerance = 0.0001))
##################################
bootstrap.results <- accumulate.results(n, bootstrap.results, formatted.dht.results, clusters)
##################################
expect_that(bootstrap.results$clusters.N["Total","PercentUnidentified",2,"CD"], equals(bootstrap.results$clusters.N["Total","PercentUnidentified",2,"WD"]))
expect_that(bootstrap.results$individual.N["Total","PercentUnidentified",2,"WD"], equals(as.numeric(((bootstrap.results$individual.N["Total","Estimate",2,"WD"]- dht.results[["WD"]]$individual$N$Estimate[1])/bootstrap.results$individual.N["Total","Estimate",2,"WD"])*100), tolerance = 0.001))
expect_that(bootstrap.results$Expected.S["Total","new.Expected.S",2,"CD"], equals(as.numeric(formatted.dht.results[["CD"]]$individual$N$Estimate[1]/formatted.dht.results[["CD"]]$clusters$N$Estimate[1])))
##################################
#process results
results <- process.bootstrap.results(bootstrap.results, model.index, clusters, bootstrap.ddf.statistics, bootstrap.options$quantile.type, analysis.options = list(bootstrap = bootstrap, n = bootstrap.options$n, covariate.uncertainty = covariate.uncertainty, clusters = clusters, double.observer = double.observer, unidentified.species = unidentified.species, species.code.definitions = species.code.definitions, model.names = model.names))
class(results) <- "ma"
class(results$analysis.options) <- "ma.analysis"
class(results$species) <- "ma.allspecies"
for(sp in seq(along = results$species)){
class(results$species[[sp]]) <- "ma.species"
}
if(!is.null(results$unidentified)){
class(results$unidentified) <- "ma.allunid"
for(sp in seq(along = results$unidentified)){
class(results$unidentified[[sp]]) <- "ma.unid"
}
}
##################################
expect_that(results, is_identical_to(results.to.compare))
##################################
#rm(.Random.seed)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_vaa.R
\name{get_vaa}
\alias{get_vaa}
\title{NHDPlusV2 Attribute Subset}
\usage{
get_vaa(
atts = NULL,
path = get_vaa_path(),
download = TRUE,
updated_network = FALSE
)
}
\arguments{
\item{atts}{character The variable names you would like, always includes comid}
\item{path}{character path where the file should be saved. Default is a
persistent system data as retrieved by \link{nhdplusTools_data_dir}.
Also see: \link{get_vaa_path}}
\item{download}{logical if TRUE, the default, will download VAA table if not
found at path.}
\item{updated_network}{logical default FALSE. If TRUE, updated network attributes
from E2NHD and National Water Model retrieved from
\doi{10.5066/P976XCVT}.}
}
\value{
data.frame containing requested VAA data
}
\description{
Return requested NHDPlusv2 Attributes.
}
\details{
The VAA data is a aggregate table of information from the NHDPlusV2
elevslope.dbf(s), PlusFlowlineVAA.dbf(s); and NHDFlowlines. All data
originates from the EPA NHDPlus Homepage
\href{https://www.epa.gov/waterdata/get-nhdplus-national-hydrography-dataset-plus-data}{here}.
To see the location of cached data on your machine use
\code{\link{get_vaa_path}}.
To view aggregate data and documentation, see
\href{https://www.hydroshare.org/resource/6092c8a62fac45be97a09bfd0b0bf726/}{here}
}
\examples{
\dontrun{
# This will download the vaa file to the path from get_vaa_path()
get_vaa("slope")
get_vaa(c("slope", "lengthkm"))
get_vaa(updated_network = TRUE)
get_vaa("reachcode", updated_network = TRUE)
#cleanup if desired
unlink(dirname(get_vaa_path()), recursive = TRUE)
}
}
| /man/get_vaa.Rd | permissive | cran/nhdplusTools | R | false | true | 1,730 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_vaa.R
\name{get_vaa}
\alias{get_vaa}
\title{NHDPlusV2 Attribute Subset}
\usage{
get_vaa(
atts = NULL,
path = get_vaa_path(),
download = TRUE,
updated_network = FALSE
)
}
\arguments{
\item{atts}{character The variable names you would like, always includes comid}
\item{path}{character path where the file should be saved. Default is a
persistent system data as retrieved by \link{nhdplusTools_data_dir}.
Also see: \link{get_vaa_path}}
\item{download}{logical if TRUE, the default, will download VAA table if not
found at path.}
\item{updated_network}{logical default FALSE. If TRUE, updated network attributes
from E2NHD and National Water Model retrieved from
\doi{10.5066/P976XCVT}.}
}
\value{
data.frame containing requested VAA data
}
\description{
Return requested NHDPlusv2 Attributes.
}
\details{
The VAA data is a aggregate table of information from the NHDPlusV2
elevslope.dbf(s), PlusFlowlineVAA.dbf(s); and NHDFlowlines. All data
originates from the EPA NHDPlus Homepage
\href{https://www.epa.gov/waterdata/get-nhdplus-national-hydrography-dataset-plus-data}{here}.
To see the location of cached data on your machine use
\code{\link{get_vaa_path}}.
To view aggregate data and documentation, see
\href{https://www.hydroshare.org/resource/6092c8a62fac45be97a09bfd0b0bf726/}{here}
}
\examples{
\dontrun{
# This will download the vaa file to the path from get_vaa_path()
get_vaa("slope")
get_vaa(c("slope", "lengthkm"))
get_vaa(updated_network = TRUE)
get_vaa("reachcode", updated_network = TRUE)
#cleanup if desired
unlink(dirname(get_vaa_path()), recursive = TRUE)
}
}
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
library("Matrix")
library("matrixStats")
X = readMM(paste(args[1], "X.mtx", sep=""));
# two fused with and without aggregation
R = as.matrix(sum(X/3 * X/4 * X/5) - sum(X * X/2))
writeMM(as(R,"CsparseMatrix"), paste(args[2], "R", sep=""));
| /src/test/scripts/functions/codegen/CompressedMultiAggregateMain.R | permissive | apache/systemds | R | false | false | 1,186 | r | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
library("Matrix")
library("matrixStats")
X = readMM(paste(args[1], "X.mtx", sep=""));
# two fused with and without aggregation
R = as.matrix(sum(X/3 * X/4 * X/5) - sum(X * X/2))
writeMM(as(R,"CsparseMatrix"), paste(args[2], "R", sep=""));
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plus_objects.R
\name{Comment.actor}
\alias{Comment.actor}
\title{Comment.actor Object}
\usage{
Comment.actor(Comment.actor.clientSpecificActorInfo = NULL,
Comment.actor.clientSpecificActorInfo.youtubeActorInfo = NULL,
Comment.actor.image = NULL, Comment.actor.verification = NULL,
clientSpecificActorInfo = NULL, displayName = NULL, id = NULL,
image = NULL, url = NULL, verification = NULL)
}
\arguments{
\item{Comment.actor.clientSpecificActorInfo}{The \link{Comment.actor.clientSpecificActorInfo} object or list of objects}
\item{Comment.actor.clientSpecificActorInfo.youtubeActorInfo}{The \link{Comment.actor.clientSpecificActorInfo.youtubeActorInfo} object or list of objects}
\item{Comment.actor.image}{The \link{Comment.actor.image} object or list of objects}
\item{Comment.actor.verification}{The \link{Comment.actor.verification} object or list of objects}
\item{clientSpecificActorInfo}{Actor info specific to particular clients}
\item{displayName}{The name of this actor, suitable for display}
\item{id}{The ID of the actor}
\item{image}{The image representation of this actor}
\item{url}{A link to the Person resource for this actor}
\item{verification}{Verification status of actor}
}
\value{
Comment.actor object
}
\description{
Comment.actor Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The person who posted this comment.
}
\seealso{
Other Comment functions: \code{\link{Comment.actor.clientSpecificActorInfo.youtubeActorInfo}},
\code{\link{Comment.actor.clientSpecificActorInfo}},
\code{\link{Comment.actor.image}},
\code{\link{Comment.actor.verification}},
\code{\link{Comment.inReplyTo}},
\code{\link{Comment.object}},
\code{\link{Comment.plusoners}}, \code{\link{Comment}}
}
| /googleplusv1.auto/man/Comment.actor.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 1,844 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plus_objects.R
\name{Comment.actor}
\alias{Comment.actor}
\title{Comment.actor Object}
\usage{
Comment.actor(Comment.actor.clientSpecificActorInfo = NULL,
Comment.actor.clientSpecificActorInfo.youtubeActorInfo = NULL,
Comment.actor.image = NULL, Comment.actor.verification = NULL,
clientSpecificActorInfo = NULL, displayName = NULL, id = NULL,
image = NULL, url = NULL, verification = NULL)
}
\arguments{
\item{Comment.actor.clientSpecificActorInfo}{The \link{Comment.actor.clientSpecificActorInfo} object or list of objects}
\item{Comment.actor.clientSpecificActorInfo.youtubeActorInfo}{The \link{Comment.actor.clientSpecificActorInfo.youtubeActorInfo} object or list of objects}
\item{Comment.actor.image}{The \link{Comment.actor.image} object or list of objects}
\item{Comment.actor.verification}{The \link{Comment.actor.verification} object or list of objects}
\item{clientSpecificActorInfo}{Actor info specific to particular clients}
\item{displayName}{The name of this actor, suitable for display}
\item{id}{The ID of the actor}
\item{image}{The image representation of this actor}
\item{url}{A link to the Person resource for this actor}
\item{verification}{Verification status of actor}
}
\value{
Comment.actor object
}
\description{
Comment.actor Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The person who posted this comment.
}
\seealso{
Other Comment functions: \code{\link{Comment.actor.clientSpecificActorInfo.youtubeActorInfo}},
\code{\link{Comment.actor.clientSpecificActorInfo}},
\code{\link{Comment.actor.image}},
\code{\link{Comment.actor.verification}},
\code{\link{Comment.inReplyTo}},
\code{\link{Comment.object}},
\code{\link{Comment.plusoners}}, \code{\link{Comment}}
}
|
# Simulation Study Code for:
# No Measurement Error
# 4n
# 10
# Missing Completely at Random
# GLMNET
# Last Modified: 3/7/2020
Sys.setenv(JAVA_HOME='')
library(earth)
library(randomForest)
library(DMwR)
library(caret)
library(caretEnsemble)
library(pROC)
library(glmnet)
library(plotROC)
library(tictoc)
library(mice)
library(gtools)
library(data.table)
library(readxl)
library(openxlsx)
set.seed(6) # Random seed used for all 500 iterations
auc_list <- c() # List to store the AUC values
mod <- c() # List to store the tuning parameters at each iteration for the method
# Name of the file that will output the AUC values. Its name consists
# of the four data mining properties and the method from the caret package
of="NoError_4n_10_MCAR_GLMNET.csv"
# Th execution time will also be recorded
tic("timer")
# 500 iterations of this program will be run
for (i in 1:500){
n = 2500 # Size of the training + testing corpus
# Generate 12 predictors from a standard normal distribution with mean 0 & var 1
x1 = rnorm(n,mean = 0,sd = 1)
x2 = rnorm(n,mean = 0,sd = 1)
x3 = rnorm(n,mean = 0,sd = 1)
x4 = rnorm(n,mean = 0,sd = 1)
x5 = rnorm(n,mean = 0,sd = 1)
x6 = rnorm(n,mean = 0,sd = 1)
x7 = rnorm(n,mean = 0,sd = 1)
x8 = rnorm(n,mean = 0,sd = 1)
x9 = rnorm(n,mean = 0,sd = 1)
x10 = rnorm(n,mean = 0,sd = 1)
x11 = rnorm(n,mean = 0,sd = 1)
x12 = rnorm(n,mean = 0,sd = 1)
# Logistic Equation
z = -3 + .75*x1 + .75*x2 + .75*x3 + .75*x4 + .75*x5 + .75*x6+rnorm(1,0,0.0001) # linear combination with a bias
pr = 1/(1+exp(z)) # Inverted logit function for the majority class
y = rbinom(n,1,pr) # Bernoulli response variable
# Create a dataframe with the independent variables and response variable
data_mat <- as.data.frame(cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,y))
# Class imbalance: 10% minority class and 90% majority outcome
test_fail <- data_mat[ sample( which(data_mat$y==0), 50), ]
test_pass <- data_mat[ sample( which(data_mat$y==1), 450), ]
testing_data <- rbind(test_fail,test_pass)
# Divide the data into training and testing sets
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
testing_data <- rbind(test_fail,test_pass)
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
# Data Amputation: Missing Completely at Random
data_mat_final <- ampute(data = training_data[,1:ncol(training_data)-1], prop = 0.6, mech = 'MCAR')$amp
# After applying amputation, we reorganize the corpus
data_mat_final$index <- as.numeric(row.names(data_mat_final))
data_mat_final <- data_mat_final[order(data_mat_final$index), ]
data_mat_final <- subset(data_mat_final, select = -c(index))
data_original <- data_mat_final
eve_data <- cbind(data_original,train_dep)
names(eve_data)[names(eve_data) == 'train_dep'] <- 'y'
training_data <- eve_data
# Apply MICE to fill in the missing entries of the training data
mice_training <- mice(training_data,m=1,maxit=50,meth='pmm',seed=500)
training_data <- complete(mice_training,1)
# Convert the dependent variable to pass and fail
training_data$y[training_data$y == "0"] <- "F"
training_data$y[training_data$y == "1"] <- "P"
testing_data$y[testing_data$y == "0"] <- "F"
testing_data$y[testing_data$y == "1"] <- "P"
# Convert the dependent variable to a factor
training_data$y <- factor(training_data$y)
testing_data$y <- factor(testing_data$y)
# Apply SMOTE to the training data
training_data <- SMOTE(y ~ ., data = training_data)
# 10-fold cross-validation will be applied to the training data
ctrl = trainControl(method = "repeatedcv", repeats = 1, classProbs = T, savePredictions = T, summaryFunction = twoClassSummary)
mymethods = c("glmnet") # Data mining method
out = caretList(y~., data = training_data, methodList = mymethods, trControl = ctrl, tuneLength = 6) # Train the model
# Apply the model to the testing data and calculate the AUC on the testing corpus
model_preds_tst = lapply(out, predict, newdata = testing_data[, 1:(dim(testing_data)[2] - 1)], type = "prob")
model_preds_tst = lapply(model_preds_tst, function(x)x[,"F"])
model_preds_tst = as.data.frame(model_preds_tst)[,-4]
auc_test = caTools::colAUC(model_preds_tst, testing_data$y == "F", plotROC = T)
auc_list[i] <- auc_test
# Store the tuning parameters for each iteration in a csv spreadsheet
if (i > 1){
mod <- rbind(mod,out$glmnet$bestTune)
}else{
mod <- data.frame(out$glmnet$bestTune)
}
print(i)
rm(data_mat,testing_data)
}
write.csv(mod,'NoError_4n_10_MCAR_GLMNET_OUT.csv') # CSV file with parameters
print('')
toc(log=TRUE) # Record the execution time
boxplot(auc_list) # Generate a boxplot of the AUC values
write.csv(auc_list,file=paste('AUC',paste(mymethods,sep="_"),of)) # AUC spreadsheet | /NoMeasureError_4n_10_MCAR_GLMNET.R | no_license | robertobertolini/Binary_Classification_Simulation_Study | R | false | false | 5,156 | r | # Simulation Study Code for:
# No Measurement Error
# 4n
# 10
# Missing Completely at Random
# GLMNET
# Last Modified: 3/7/2020
Sys.setenv(JAVA_HOME='')
library(earth)
library(randomForest)
library(DMwR)
library(caret)
library(caretEnsemble)
library(pROC)
library(glmnet)
library(plotROC)
library(tictoc)
library(mice)
library(gtools)
library(data.table)
library(readxl)
library(openxlsx)
set.seed(6) # Random seed used for all 500 iterations
auc_list <- c() # List to store the AUC values
mod <- c() # List to store the tuning parameters at each iteration for the method
# Name of the file that will output the AUC values. Its name consists
# of the four data mining properties and the method from the caret package
of="NoError_4n_10_MCAR_GLMNET.csv"
# Th execution time will also be recorded
tic("timer")
# 500 iterations of this program will be run
for (i in 1:500){
n = 2500 # Size of the training + testing corpus
# Generate 12 predictors from a standard normal distribution with mean 0 & var 1
x1 = rnorm(n,mean = 0,sd = 1)
x2 = rnorm(n,mean = 0,sd = 1)
x3 = rnorm(n,mean = 0,sd = 1)
x4 = rnorm(n,mean = 0,sd = 1)
x5 = rnorm(n,mean = 0,sd = 1)
x6 = rnorm(n,mean = 0,sd = 1)
x7 = rnorm(n,mean = 0,sd = 1)
x8 = rnorm(n,mean = 0,sd = 1)
x9 = rnorm(n,mean = 0,sd = 1)
x10 = rnorm(n,mean = 0,sd = 1)
x11 = rnorm(n,mean = 0,sd = 1)
x12 = rnorm(n,mean = 0,sd = 1)
# Logistic Equation
z = -3 + .75*x1 + .75*x2 + .75*x3 + .75*x4 + .75*x5 + .75*x6+rnorm(1,0,0.0001) # linear combination with a bias
pr = 1/(1+exp(z)) # Inverted logit function for the majority class
y = rbinom(n,1,pr) # Bernoulli response variable
# Create a dataframe with the independent variables and response variable
data_mat <- as.data.frame(cbind(x1,x2,x3,x4,x5,x6,x7,x8,x9,x10,x11,x12,y))
# Class imbalance: 10% minority class and 90% majority outcome
test_fail <- data_mat[ sample( which(data_mat$y==0), 50), ]
test_pass <- data_mat[ sample( which(data_mat$y==1), 450), ]
testing_data <- rbind(test_fail,test_pass)
# Divide the data into training and testing sets
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
testing_data <- rbind(test_fail,test_pass)
training_data <- subset(data_mat, !(rownames(data_mat) %in% rownames(testing_data)))
train_dep <- training_data$y
# Data Amputation: Missing Completely at Random
data_mat_final <- ampute(data = training_data[,1:ncol(training_data)-1], prop = 0.6, mech = 'MCAR')$amp
# After applying amputation, we reorganize the corpus
data_mat_final$index <- as.numeric(row.names(data_mat_final))
data_mat_final <- data_mat_final[order(data_mat_final$index), ]
data_mat_final <- subset(data_mat_final, select = -c(index))
data_original <- data_mat_final
eve_data <- cbind(data_original,train_dep)
names(eve_data)[names(eve_data) == 'train_dep'] <- 'y'
training_data <- eve_data
# Apply MICE to fill in the missing entries of the training data
mice_training <- mice(training_data,m=1,maxit=50,meth='pmm',seed=500)
training_data <- complete(mice_training,1)
# Convert the dependent variable to pass and fail
training_data$y[training_data$y == "0"] <- "F"
training_data$y[training_data$y == "1"] <- "P"
testing_data$y[testing_data$y == "0"] <- "F"
testing_data$y[testing_data$y == "1"] <- "P"
# Convert the dependent variable to a factor
training_data$y <- factor(training_data$y)
testing_data$y <- factor(testing_data$y)
# Apply SMOTE to the training data
training_data <- SMOTE(y ~ ., data = training_data)
# 10-fold cross-validation will be applied to the training data
ctrl = trainControl(method = "repeatedcv", repeats = 1, classProbs = T, savePredictions = T, summaryFunction = twoClassSummary)
mymethods = c("glmnet") # Data mining method
out = caretList(y~., data = training_data, methodList = mymethods, trControl = ctrl, tuneLength = 6) # Train the model
# Apply the model to the testing data and calculate the AUC on the testing corpus
model_preds_tst = lapply(out, predict, newdata = testing_data[, 1:(dim(testing_data)[2] - 1)], type = "prob")
model_preds_tst = lapply(model_preds_tst, function(x)x[,"F"])
model_preds_tst = as.data.frame(model_preds_tst)[,-4]
auc_test = caTools::colAUC(model_preds_tst, testing_data$y == "F", plotROC = T)
auc_list[i] <- auc_test
# Store the tuning parameters for each iteration in a csv spreadsheet
if (i > 1){
mod <- rbind(mod,out$glmnet$bestTune)
}else{
mod <- data.frame(out$glmnet$bestTune)
}
print(i)
rm(data_mat,testing_data)
}
write.csv(mod,'NoError_4n_10_MCAR_GLMNET_OUT.csv') # CSV file with parameters
print('')
toc(log=TRUE) # Record the execution time
boxplot(auc_list) # Generate a boxplot of the AUC values
write.csv(auc_list,file=paste('AUC',paste(mymethods,sep="_"),of)) # AUC spreadsheet |
## This function creates a matrix that can cache its inverse.
## Requires a square invertible matrix as input(limit of solve fxn).
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL
set <- function(y) {
## this sets the values of x and inv_mat in the global environment
x <<- y
inv_mat <<- NULL
}
get <- function() x
## this allows the program to get the matrix
setinv <- function(inverse) inv_mat <<- inverse
## this sets the inverse of the matrix
getinv <- function() inv_mat
## this gets the inverse of the matrix
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function either calculates the inverse of the matrix from
## function makeCacheMatrix or retrieves the calculated value from
## the cache.
## Requires a square invertible matrix as input (limit of solve fxn).
cacheSolve <- function(x, ...) {
##input for cacheSolve is the output of makeCacheMatrix
inv_mat <- x$getinv()
if(!is.null(inv_mat)) {
## gets cached data if it is there
message("getting cached data")
return(inv_mat)
}
## if cache is empty, calculates the inverse
mat_data <- x$get()
inv_mat <- solve(mat_data, ...)
x$setinv(inv_mat)
return(inv_mat)
}
| /cachematrix.R | no_license | stoering/ProgrammingAssignment2 | R | false | false | 1,269 | r |
## This function creates a matrix that can cache its inverse.
## Requires a square invertible matrix as input(limit of solve fxn).
makeCacheMatrix <- function(x = matrix()) {
inv_mat <- NULL
set <- function(y) {
## this sets the values of x and inv_mat in the global environment
x <<- y
inv_mat <<- NULL
}
get <- function() x
## this allows the program to get the matrix
setinv <- function(inverse) inv_mat <<- inverse
## this sets the inverse of the matrix
getinv <- function() inv_mat
## this gets the inverse of the matrix
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function either calculates the inverse of the matrix from
## function makeCacheMatrix or retrieves the calculated value from
## the cache.
## Requires a square invertible matrix as input (limit of solve fxn).
cacheSolve <- function(x, ...) {
##input for cacheSolve is the output of makeCacheMatrix
inv_mat <- x$getinv()
if(!is.null(inv_mat)) {
## gets cached data if it is there
message("getting cached data")
return(inv_mat)
}
## if cache is empty, calculates the inverse
mat_data <- x$get()
inv_mat <- solve(mat_data, ...)
x$setinv(inv_mat)
return(inv_mat)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ChIPseqSpikeInFree.R
\name{ReadMeta}
\alias{ReadMeta}
\title{read in sample metadata file}
\usage{
ReadMeta(metaFile = "sample_meta.txt")
}
\arguments{
\item{metaFile}{a metadata file name; the file must have three columns: ID (bam filename without full path), ANTIBODY and GROUP. the COLOR column is optional and will be used for plotting purpose.}
}
\value{
A data.frame of metaFile
}
\description{
This function allows you to load metadat to a R data.frame and return the object.
In addtion, it validates meta_info format and adds a COLOR column if it's undefined.
}
\examples{
## 1. load an example of metadata file
metaFile <- system.file("extdata", "sample_meta.txt", package = "ChIPseqSpikeInFree")
meta <- ReadMeta(metaFile)
head(meta, n = 1)
meta
# ID ANTIBODY GROUP COLOR
# H3K27me3-NSH.K27M.A.bam H3K27me3-NSH.K27M.A.bam H3K27me3 K27M green
# H3K27me3-NSH.K27M.B.bam H3K27me3-NSH.K27M.B.bam H3K27me3 K27M green
# H3K27me3-NSH.K27M.C.bam H3K27me3-NSH.K27M.C.bam H3K27me3 K27M green
# H3K27me3-NSH.WT.D.bam H3K27me3-NSH.WT.D.bam H3K27me3 WT grey
# H3K27me3-NSH.WT.E.bam H3K27me3-NSH.WT.E.bam H3K27me3 WT grey
# H3K27me3-NSH.WT.F.bam H3K27me3-NSH.WT.F.bam H3K27me3 WT grey
}
| /man/ReadMeta.Rd | permissive | stjude/ChIPseqSpikeInFree | R | false | true | 1,345 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ChIPseqSpikeInFree.R
\name{ReadMeta}
\alias{ReadMeta}
\title{read in sample metadata file}
\usage{
ReadMeta(metaFile = "sample_meta.txt")
}
\arguments{
\item{metaFile}{a metadata file name; the file must have three columns: ID (bam filename without full path), ANTIBODY and GROUP. the COLOR column is optional and will be used for plotting purpose.}
}
\value{
A data.frame of metaFile
}
\description{
This function allows you to load metadat to a R data.frame and return the object.
In addtion, it validates meta_info format and adds a COLOR column if it's undefined.
}
\examples{
## 1. load an example of metadata file
metaFile <- system.file("extdata", "sample_meta.txt", package = "ChIPseqSpikeInFree")
meta <- ReadMeta(metaFile)
head(meta, n = 1)
meta
# ID ANTIBODY GROUP COLOR
# H3K27me3-NSH.K27M.A.bam H3K27me3-NSH.K27M.A.bam H3K27me3 K27M green
# H3K27me3-NSH.K27M.B.bam H3K27me3-NSH.K27M.B.bam H3K27me3 K27M green
# H3K27me3-NSH.K27M.C.bam H3K27me3-NSH.K27M.C.bam H3K27me3 K27M green
# H3K27me3-NSH.WT.D.bam H3K27me3-NSH.WT.D.bam H3K27me3 WT grey
# H3K27me3-NSH.WT.E.bam H3K27me3-NSH.WT.E.bam H3K27me3 WT grey
# H3K27me3-NSH.WT.F.bam H3K27me3-NSH.WT.F.bam H3K27me3 WT grey
}
|
#' Obtener tasa de inflacion
#'
#' Obtiene tasa de inflación inter anual en porcentaje.
#' La inflación se define como el cambio porcentual en el INPC.
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token persona emitido por el INEGI para acceder al API de indicadores.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' Inflacion<-inflacion_general(token)
#' }
#' @export
#'
inflacion_general<-function (token){
#Serie de INPC general
s<-"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216064/00000/es/false/xml/"
i<-inegiR::serie_inegi(s, token)
t<-inegiR::YoY(serie = i$Valores, lapso = 12, decimal = FALSE)
d<-cbind.data.frame(Fechas=i$Fechas, Valores=t)
return(d)
}
#' Obtener tasa de inflacion de Estudiantes
#'
#' Obtiene tasa de inflación de estudiantes, inter anual en porcentaje. Es un wrapper de las funciones Serie_Inegi() y YoY().
#' La metodología del índice se puede encontrar aquí: \url{http://enelmargen.org/eem/ipe/}
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token persona emitido por el INEGI para acceder al API.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' InflacionEstudiantes<-inflacion_estudiantes(token)
#' }
#' @export
#'
inflacion_estudiantes<-function (token){
#Series de INPC;
s1<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216065/00000/es/false/xml/",token)
names(s1)<-c("s1","Fechas")
s2<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216066/00000/es/false/xml/",token)
names(s2)<-c("s2","Fechas")
s3<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216067/00000/es/false/xml/",token)
names(s3)<-c("s3","Fechas")
s4<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216068/00000/es/false/xml/",token)
names(s4)<-c("s4","Fechas")
s5<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216069/00000/es/false/xml/",token)
names(s5)<-c("s5","Fechas")
s6<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216070/00000/es/false/xml/",token)
names(s6)<-c("s6","Fechas")
s7<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216071/00000/es/false/xml/",token)
names(s7)<-c("s7","Fechas")
s8<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216072/00000/es/false/xml/",token)
names(s8)<-c("s8","Fechas")
df<-Reduce(function(...) merge(...,all=T), list(s1,s2,s3,s4,s5,s6,s7,s8))
df$ipe<-(df$s1*0.331417)+(df$s2*0.032764)+(df$s3*0.077735)+(df$s4*0.00378)+(df$s5*0.028353177)+(df$s6*0.199190)+(df$s7*0.0606992)+(df$s8*0.266067)
st<-inegiR::YoY(serie = df$ipe, lapso = 12, decimal = FALSE)
d<-cbind.data.frame(Fechas=df$Fechas, Valores=st)
return(d)
}
#' Obtener terminos de intercambio
#'
#' Obtiene la razón de términos de intercambio para México (ToT). Es un wrapper de las funciones serie_inegi() y YoY().
#' La razón se define como el índice de precios de exportaciones entre el índice de precios de importaciones.
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token personal emitido por el INEGI para acceder al API.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' TerminosIntercambio<-inflacion_tot(token)
#' }
#' @export
#'
inflacion_tot<-function(token)
{ #calcular terminos de intercambio (Terms-Of-Trade)
x<-"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/37502/00000/es/false/xml/"
m<-"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/37503/00000/es/false/xml/"
x_val<-inegiR::serie_inegi(x,token)
names(x_val)<-c("x","Fechas")
m_val<-inegiR::serie_inegi(m,token)
names(m_val)<-c("m","Fechas")
df<-Reduce(function(...) merge(...,all=TRUE), list(m_val,x_val))
df$ToT<-df$x/df$m
d<-cbind.data.frame(Fechas=df$Fechas,Valores=df$ToT)
return(d)
}
#' Obtener inflacion por Ciudad
#'
#' Obtiene la tasa de inflación mensual por ciudad.
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token personal emitido por el INEGI para acceder al API.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' InflacionCiudades<-inflacion_ciudades(token)
#' }
#' @export
#'
inflacion_ciudades<-function(token){
#Series de INPC;
SeriesDf<-
data.frame(
"Ciudad"=c(
"DF","Merida","Morelia","Guadalajara","Monterrey",
"Mexicali","CdJuarez","Acapulco","Culiacan","Leon",
"Puebla","SanLuisPotosi","Tapachula","Toluca","Torreon",
"Veracruz","Villahermosa","Tampico","Chihuahua","Hermosillo","Monclova",
"Cordoba","Ags","Tijuana","Matamoros","Colima","LaPaz","Chetumal",
"Jacona","Fresnillo","Iguala","Huatabampo","Tulancingo","Cortazar",
"CdJimenez","Durango","Tepic","Oaxaca","Queretaro","Cuernavaca",
"Tlaxcala","SanAndres","Campeche","Tepatitlan","Tehuantepec","CdAcuna"),
"Data"=c(
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216095/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216096/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216097/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216098/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216099/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216100/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216101/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216102/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216103/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216104/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216105/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216106/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216107/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216108/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216109/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216110/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216111/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216112/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216113/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216114/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216115/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216116/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216117/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216118/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216119/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216120/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216121/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216122/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216123/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216124/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216125/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216126/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216127/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216128/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216129/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216130/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216131/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216132/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216133/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216134/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216135/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216136/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216137/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216138/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216139/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216140/00000/en/false/xml/"
),
stringsAsFactors = FALSE)
# download
dloads<-list()
for(i in 1:46)
{
s<-SeriesDf$Data[i]
dloads[[i]]<-inegiR::serie_inegi(serie = s,
token)
}
# names
names(dloads)<-as.character(SeriesDf$Ciudad)
for(i in 1:46)
{
names(dloads[[i]])<-c(names(dloads[i]),"Fechas")
}
#join
df<-Reduce(function(...) merge(..., all=TRUE), dloads)
# year over year
ts<-apply(df[,2:47],
2, function(x){
inegiR::YoY(serie = x, lapso = 12, decimal = FALSE)})
ts<-as.data.frame(ts)
# bind
ts$Fechas<-df$Fechas
return(ts)
}
| /inegiR/R/IndicesPrecios.R | no_license | ingted/R-Examples | R | false | false | 10,380 | r | #' Obtener tasa de inflacion
#'
#' Obtiene tasa de inflación inter anual en porcentaje.
#' La inflación se define como el cambio porcentual en el INPC.
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token persona emitido por el INEGI para acceder al API de indicadores.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' Inflacion<-inflacion_general(token)
#' }
#' @export
#'
inflacion_general<-function (token){
#Serie de INPC general
s<-"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216064/00000/es/false/xml/"
i<-inegiR::serie_inegi(s, token)
t<-inegiR::YoY(serie = i$Valores, lapso = 12, decimal = FALSE)
d<-cbind.data.frame(Fechas=i$Fechas, Valores=t)
return(d)
}
#' Obtener tasa de inflacion de Estudiantes
#'
#' Obtiene tasa de inflación de estudiantes, inter anual en porcentaje. Es un wrapper de las funciones Serie_Inegi() y YoY().
#' La metodología del índice se puede encontrar aquí: \url{http://enelmargen.org/eem/ipe/}
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token persona emitido por el INEGI para acceder al API.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' InflacionEstudiantes<-inflacion_estudiantes(token)
#' }
#' @export
#'
inflacion_estudiantes<-function (token){
#Series de INPC;
s1<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216065/00000/es/false/xml/",token)
names(s1)<-c("s1","Fechas")
s2<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216066/00000/es/false/xml/",token)
names(s2)<-c("s2","Fechas")
s3<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216067/00000/es/false/xml/",token)
names(s3)<-c("s3","Fechas")
s4<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216068/00000/es/false/xml/",token)
names(s4)<-c("s4","Fechas")
s5<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216069/00000/es/false/xml/",token)
names(s5)<-c("s5","Fechas")
s6<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216070/00000/es/false/xml/",token)
names(s6)<-c("s6","Fechas")
s7<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216071/00000/es/false/xml/",token)
names(s7)<-c("s7","Fechas")
s8<-inegiR::serie_inegi("http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216072/00000/es/false/xml/",token)
names(s8)<-c("s8","Fechas")
df<-Reduce(function(...) merge(...,all=T), list(s1,s2,s3,s4,s5,s6,s7,s8))
df$ipe<-(df$s1*0.331417)+(df$s2*0.032764)+(df$s3*0.077735)+(df$s4*0.00378)+(df$s5*0.028353177)+(df$s6*0.199190)+(df$s7*0.0606992)+(df$s8*0.266067)
st<-inegiR::YoY(serie = df$ipe, lapso = 12, decimal = FALSE)
d<-cbind.data.frame(Fechas=df$Fechas, Valores=st)
return(d)
}
#' Obtener terminos de intercambio
#'
#' Obtiene la razón de términos de intercambio para México (ToT). Es un wrapper de las funciones serie_inegi() y YoY().
#' La razón se define como el índice de precios de exportaciones entre el índice de precios de importaciones.
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token personal emitido por el INEGI para acceder al API.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' TerminosIntercambio<-inflacion_tot(token)
#' }
#' @export
#'
inflacion_tot<-function(token)
{ #calcular terminos de intercambio (Terms-Of-Trade)
x<-"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/37502/00000/es/false/xml/"
m<-"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/37503/00000/es/false/xml/"
x_val<-inegiR::serie_inegi(x,token)
names(x_val)<-c("x","Fechas")
m_val<-inegiR::serie_inegi(m,token)
names(m_val)<-c("m","Fechas")
df<-Reduce(function(...) merge(...,all=TRUE), list(m_val,x_val))
df$ToT<-df$x/df$m
d<-cbind.data.frame(Fechas=df$Fechas,Valores=df$ToT)
return(d)
}
#' Obtener inflacion por Ciudad
#'
#' Obtiene la tasa de inflación mensual por ciudad.
#' Es un wrapper de las funciones \code{serie_inegi()} y \code{YoY()}.
#'
#' @param token token personal emitido por el INEGI para acceder al API.
#' @author Eduardo Flores
#' @return Data.frame
#'
#' @examples
#' \dontrun{
#' token<-"webservice_token"
#' InflacionCiudades<-inflacion_ciudades(token)
#' }
#' @export
#'
inflacion_ciudades<-function(token){
#Series de INPC;
SeriesDf<-
data.frame(
"Ciudad"=c(
"DF","Merida","Morelia","Guadalajara","Monterrey",
"Mexicali","CdJuarez","Acapulco","Culiacan","Leon",
"Puebla","SanLuisPotosi","Tapachula","Toluca","Torreon",
"Veracruz","Villahermosa","Tampico","Chihuahua","Hermosillo","Monclova",
"Cordoba","Ags","Tijuana","Matamoros","Colima","LaPaz","Chetumal",
"Jacona","Fresnillo","Iguala","Huatabampo","Tulancingo","Cortazar",
"CdJimenez","Durango","Tepic","Oaxaca","Queretaro","Cuernavaca",
"Tlaxcala","SanAndres","Campeche","Tepatitlan","Tehuantepec","CdAcuna"),
"Data"=c(
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216095/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216096/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216097/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216098/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216099/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216100/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216101/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216102/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216103/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216104/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216105/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216106/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216107/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216108/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216109/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216110/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216111/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216112/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216113/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216114/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216115/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216116/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216117/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216118/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216119/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216120/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216121/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216122/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216123/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216124/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216125/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216126/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216127/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216128/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216129/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216130/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216131/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216132/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216133/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216134/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216135/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216136/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216137/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216138/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216139/00000/en/false/xml/",
"http://www3.inegi.org.mx/sistemas/api/indicadores/v1//Indicador/216140/00000/en/false/xml/"
),
stringsAsFactors = FALSE)
# download
dloads<-list()
for(i in 1:46)
{
s<-SeriesDf$Data[i]
dloads[[i]]<-inegiR::serie_inegi(serie = s,
token)
}
# names
names(dloads)<-as.character(SeriesDf$Ciudad)
for(i in 1:46)
{
names(dloads[[i]])<-c(names(dloads[i]),"Fechas")
}
#join
df<-Reduce(function(...) merge(..., all=TRUE), dloads)
# year over year
ts<-apply(df[,2:47],
2, function(x){
inegiR::YoY(serie = x, lapso = 12, decimal = FALSE)})
ts<-as.data.frame(ts)
# bind
ts$Fechas<-df$Fechas
return(ts)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upload_video.R
\name{upload_video}
\alias{upload_video}
\title{Upload Video to Youtube}
\usage{
upload_video(file, snippet = NULL, status = list(privacyStatus = "public"),
query = NULL, open_url = FALSE, ...)
}
\arguments{
\item{file}{Filename of the video locally}
\item{snippet}{Additional fields for the video, including `description`
and `title`. See
\url{https://developers.google.com/youtube/v3/docs/videos#resource} for
other fields. Coerced to a JSON object}
\item{status}{Additional fields to be put into the \code{status} input.
options for `status` are `license` (which should hold:
`creativeCommon`, or `youtube`), `privacyStatus`, `publicStatsViewable`,
`publishAt`.}
\item{query}{Fields for `query` in `POST`}
\item{open_url}{Should the video be opened using \code{\link{browseURL}}}
\item{...}{Additional arguments to send to \code{\link{tuber_POST}} and
therefore \code{\link{POST}}}
}
\value{
A list of the response object from the \code{POST}, content,
and the URL of the uploaded
}
\description{
Upload Video to Youtube
}
\note{
The information for `status` and `snippet` are at
\url{https://developers.google.com/youtube/v3/docs/videos#resource}
but the subset of these fields to pass in are located at:
\url{https://developers.google.com/youtube/v3/docs/videos/insert}
The `part`` parameter serves two purposes in this operation.
It identifies the properties that the write operation will set, this will be
automatically detected by the names of `body`.
See \url{https://developers.google.com/youtube/v3/docs/videos/insert#usage}
}
\examples{
snippet = list(
title = "Test Video",
description = "This is just a random test.",
tags = c("r language", "r programming", "data analysis")
)
status = list(privacyStatus = "private")
}
| /man/upload_video.Rd | permissive | stjordanis/tuber | R | false | true | 1,836 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upload_video.R
\name{upload_video}
\alias{upload_video}
\title{Upload Video to Youtube}
\usage{
upload_video(file, snippet = NULL, status = list(privacyStatus = "public"),
query = NULL, open_url = FALSE, ...)
}
\arguments{
\item{file}{Filename of the video locally}
\item{snippet}{Additional fields for the video, including `description`
and `title`. See
\url{https://developers.google.com/youtube/v3/docs/videos#resource} for
other fields. Coerced to a JSON object}
\item{status}{Additional fields to be put into the \code{status} input.
options for `status` are `license` (which should hold:
`creativeCommon`, or `youtube`), `privacyStatus`, `publicStatsViewable`,
`publishAt`.}
\item{query}{Fields for `query` in `POST`}
\item{open_url}{Should the video be opened using \code{\link{browseURL}}}
\item{...}{Additional arguments to send to \code{\link{tuber_POST}} and
therefore \code{\link{POST}}}
}
\value{
A list of the response object from the \code{POST}, content,
and the URL of the uploaded
}
\description{
Upload Video to Youtube
}
\note{
The information for `status` and `snippet` are at
\url{https://developers.google.com/youtube/v3/docs/videos#resource}
but the subset of these fields to pass in are located at:
\url{https://developers.google.com/youtube/v3/docs/videos/insert}
The `part`` parameter serves two purposes in this operation.
It identifies the properties that the write operation will set, this will be
automatically detected by the names of `body`.
See \url{https://developers.google.com/youtube/v3/docs/videos/insert#usage}
}
\examples{
snippet = list(
title = "Test Video",
description = "This is just a random test.",
tags = c("r language", "r programming", "data analysis")
)
status = list(privacyStatus = "private")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/has-name.R, R/pipe.R, R/tbl_sum.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{has_name}
\alias{\%>\%}
\alias{obj_sum}
\alias{type_sum}
\alias{is_vector_s3}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{magrittr}{\code{\link[magrittr]{\%>\%}}}
\item{pillar}{\code{\link[pillar]{is_vector_s3}}, \code{\link[pillar]{obj_sum}}, \code{\link[pillar]{type_sum}}}
\item{rlang}{\code{\link[rlang]{has_name}}}
}}
| /man/reexports.Rd | permissive | datacamp/tibble | R | false | true | 654 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/has-name.R, R/pipe.R, R/tbl_sum.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{has_name}
\alias{\%>\%}
\alias{obj_sum}
\alias{type_sum}
\alias{is_vector_s3}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{magrittr}{\code{\link[magrittr]{\%>\%}}}
\item{pillar}{\code{\link[pillar]{is_vector_s3}}, \code{\link[pillar]{obj_sum}}, \code{\link[pillar]{type_sum}}}
\item{rlang}{\code{\link[rlang]{has_name}}}
}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.MsTMIP_NARR.R
\name{download.MsTMIP_NARR}
\alias{download.MsTMIP_NARR}
\title{download.MsTMIP_NARR}
\usage{
download.MsTMIP_NARR(
outfolder,
start_date,
end_date,
site_id,
lat.in,
lon.in,
overwrite = FALSE,
verbose = FALSE,
...
)
}
\arguments{
\item{start_date}{YYYY-MM-DD}
\item{end_date}{YYYY-MM-DD}
\item{lat}{decimal degrees [-90, 90]}
\item{lon}{decimal degrees [-180, 180]}
}
\description{
Download and conver to CF NARR single grid point from MSTIMIP server using OPENDAP interface
}
\author{
James Simkins
}
| /modules/data.atmosphere/man/download.MsTMIP_NARR.Rd | permissive | ashiklom/pecan | R | false | true | 624 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.MsTMIP_NARR.R
\name{download.MsTMIP_NARR}
\alias{download.MsTMIP_NARR}
\title{download.MsTMIP_NARR}
\usage{
download.MsTMIP_NARR(
outfolder,
start_date,
end_date,
site_id,
lat.in,
lon.in,
overwrite = FALSE,
verbose = FALSE,
...
)
}
\arguments{
\item{start_date}{YYYY-MM-DD}
\item{end_date}{YYYY-MM-DD}
\item{lat}{decimal degrees [-90, 90]}
\item{lon}{decimal degrees [-180, 180]}
}
\description{
Download and conver to CF NARR single grid point from MSTIMIP server using OPENDAP interface
}
\author{
James Simkins
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare.summStat.R
\name{pvalue_arbutus}
\alias{pvalue_arbutus}
\title{Extract p--values for test statistics}
\usage{
pvalue_arbutus(x)
}
\arguments{
\item{x}{an \code{arbutus} object from the function \code{\link{compare_pic_stat}}}
}
\value{
a named vector of two-tailed p-values
}
\description{
Utility function for extracting p-values from the output
of \code{\link{compare_pic_stat}}
}
\examples{
data(finch)
phy <- finch$phy
dat <- finch$data[,"wingL"]
unit.tree <- make_unit_tree(phy, data=dat)
## calculate default test stats on observed data
obs <- calculate_pic_stat(unit.tree, stats=NULL)
## simulate data on unit.tree
sim.dat <- simulate_char_unit(unit.tree, nsim=10)
## calculate default test stats on simulated data
sim <- calculate_pic_stat(sim.dat, stats=NULL)
## compare simulated to observed test statistics
res <- compare_pic_stat(obs, sim)
## get p-values
pvalue_arbutus(res)
## note these are returned by default with print.arbutus
res
}
\seealso{
\code{\link{compare_pic_stat}}
}
| /man/pvalue_arbutus.Rd | no_license | mwpennell/arbutus | R | false | true | 1,087 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare.summStat.R
\name{pvalue_arbutus}
\alias{pvalue_arbutus}
\title{Extract p--values for test statistics}
\usage{
pvalue_arbutus(x)
}
\arguments{
\item{x}{an \code{arbutus} object from the function \code{\link{compare_pic_stat}}}
}
\value{
a named vector of two-tailed p-values
}
\description{
Utility function for extracting p-values from the output
of \code{\link{compare_pic_stat}}
}
\examples{
data(finch)
phy <- finch$phy
dat <- finch$data[,"wingL"]
unit.tree <- make_unit_tree(phy, data=dat)
## calculate default test stats on observed data
obs <- calculate_pic_stat(unit.tree, stats=NULL)
## simulate data on unit.tree
sim.dat <- simulate_char_unit(unit.tree, nsim=10)
## calculate default test stats on simulated data
sim <- calculate_pic_stat(sim.dat, stats=NULL)
## compare simulated to observed test statistics
res <- compare_pic_stat(obs, sim)
## get p-values
pvalue_arbutus(res)
## note these are returned by default with print.arbutus
res
}
\seealso{
\code{\link{compare_pic_stat}}
}
|
datos = read.csv("DATA/PalmCurrentDataset.csv")
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Accumulation Curves per Palm"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
selectInput(inputId = "bins",
label = "Palm Species",
choices = sort(droplevels(unique(datos$PALM)))
)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Histogram ----
plotOutput(outputId = "distPlot"),
h5("Plot above represents the individual accumulation curves constructed by randomizing
the number of frugivores (y axis) in function of the number of unique studies a palm species has been found
red line shows the expected assymptote calculated with Chao1, confidence intervals are
represented with the dashed red lines
Sampling Completeness (i.e. SC) is calculated as the number of frugivore species observed / expected"),
h4("Interaction data from Zona and Henderson have been omitted")
)
)
)
server <- function(input, output) {
makeFrugPlot = function(dataset, x){
SROm = droplevels(dataset[dataset$PALM == x,])
SROm1 = table(SROm$FRUGIVORE,SROm$referenceKey)
Acum = vegan::specpool(SROm1)[c("Species", "chao", "chao.se")]
plot(vegan::specaccum(SROm1),
xlab = "No Studies",
ylab = "Frugivores",
ylim = c(0, Acum$chao + Acum$chao.se + 2),
main = paste(x,"from:", unique(SROm$biogeographicRegion)))
abline(h=Acum$chao, col = "red")
abline(h=c(Acum$chao-Acum$chao.se, Acum$chao+Acum$chao.se), lty = 2, col = "red")
legend("topleft" , paste("SC = ", round(Acum$Species/Acum$chao, 3) * 100, "%"),
bty = "n")}
datos2 = reactive({read.csv("DATA/PalmCurrentDataset.csv")})
output$distPlot <- renderPlot({
makeFrugPlot(datos2(), input$bins)
})
}
shinyApp(ui, server)
| /Scripts_R/shiny.R | permissive | fgabriel1891/Palm-frugivore-Interactions-Macroscales | R | false | false | 2,143 | r |
datos = read.csv("DATA/PalmCurrentDataset.csv")
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Accumulation Curves per Palm"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Slider for the number of bins ----
selectInput(inputId = "bins",
label = "Palm Species",
choices = sort(droplevels(unique(datos$PALM)))
)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Histogram ----
plotOutput(outputId = "distPlot"),
h5("Plot above represents the individual accumulation curves constructed by randomizing
the number of frugivores (y axis) in function of the number of unique studies a palm species has been found
red line shows the expected assymptote calculated with Chao1, confidence intervals are
represented with the dashed red lines
Sampling Completeness (i.e. SC) is calculated as the number of frugivore species observed / expected"),
h4("Interaction data from Zona and Henderson have been omitted")
)
)
)
server <- function(input, output) {
makeFrugPlot = function(dataset, x){
SROm = droplevels(dataset[dataset$PALM == x,])
SROm1 = table(SROm$FRUGIVORE,SROm$referenceKey)
Acum = vegan::specpool(SROm1)[c("Species", "chao", "chao.se")]
plot(vegan::specaccum(SROm1),
xlab = "No Studies",
ylab = "Frugivores",
ylim = c(0, Acum$chao + Acum$chao.se + 2),
main = paste(x,"from:", unique(SROm$biogeographicRegion)))
abline(h=Acum$chao, col = "red")
abline(h=c(Acum$chao-Acum$chao.se, Acum$chao+Acum$chao.se), lty = 2, col = "red")
legend("topleft" , paste("SC = ", round(Acum$Species/Acum$chao, 3) * 100, "%"),
bty = "n")}
datos2 = reactive({read.csv("DATA/PalmCurrentDataset.csv")})
output$distPlot <- renderPlot({
makeFrugPlot(datos2(), input$bins)
})
}
shinyApp(ui, server)
|
######## pipeLine for final report to plink format
#remove all stored values
rm(list=ls(all=TRUE))
#change working directory
setwd(choose.dir())
getwd()
#2: read the data as follow
fam <- read.table(file="Sample_Map.txt", header=F, skip=1)
head(fam)
# 3: Define the col name of new file
colnames(fam) <- c("Index", "Name", "ID", "Gender", "Plate")
head(fam)
col_order <- c("Name", "Name", "Plate", "Plate", "Plate", "Plate")
#col_order <- c("ID", "ID", "SNP.Name", "Allele1.F", "Allele2.F")
#col_order <- c("ID", "ID", "SNP.Name", "Allele1.AB", "Allele2.AB")
#4: Define the new datafile with new col order and changeing the cols
data40x <- fam[, col_order]
head(data40x)
# 6: export the data to the lgen format for plink as follow:
write.table(data40x, file = "data40-3.fam",sep="\t", row.names=FALSE, col.names=FALSE, quote = F)
# 7: Or Replace the "- -" with "0 0" in txtpad sofware
####### Lgen is ready to use
| /fam.R | no_license | MBZandi/Convert-the-Illumina-Final-Report-to-Plink | R | false | false | 939 | r | ######## pipeLine for final report to plink format
#remove all stored values
rm(list=ls(all=TRUE))
#change working directory
setwd(choose.dir())
getwd()
#2: read the data as follow
fam <- read.table(file="Sample_Map.txt", header=F, skip=1)
head(fam)
# 3: Define the col name of new file
colnames(fam) <- c("Index", "Name", "ID", "Gender", "Plate")
head(fam)
col_order <- c("Name", "Name", "Plate", "Plate", "Plate", "Plate")
#col_order <- c("ID", "ID", "SNP.Name", "Allele1.F", "Allele2.F")
#col_order <- c("ID", "ID", "SNP.Name", "Allele1.AB", "Allele2.AB")
#4: Define the new datafile with new col order and changeing the cols
data40x <- fam[, col_order]
head(data40x)
# 6: export the data to the lgen format for plink as follow:
write.table(data40x, file = "data40-3.fam",sep="\t", row.names=FALSE, col.names=FALSE, quote = F)
# 7: Or Replace the "- -" with "0 0" in txtpad sofware
####### Lgen is ready to use
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upset.R
\name{get_labels_from_binary}
\alias{get_labels_from_binary}
\title{Get corresponding label from a binary group value}
\usage{
get_labels_from_binary(data, mask, group = "g", trans = FALSE)
}
\arguments{
\item{data}{data.frame}
\item{mask}{integer vector binary mask for individual labels}
\item{group}{name of the column in data containing binary group value}
\item{trans}{translate name if TRUE use \code{\link{i18n}}, if function use it a translator}
}
\description{
Get corresponding label from a binary group value
}
\seealso{
Other upset:
\code{\link{apply_binary_mask}()},
\code{\link{create_binary_groups}()},
\code{\link{create_binary_mask}()},
\code{\link{upset_plot}()}
}
\concept{upset}
| /man/get_labels_from_binary.Rd | no_license | cturbelin/ifnBase | R | false | true | 789 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upset.R
\name{get_labels_from_binary}
\alias{get_labels_from_binary}
\title{Get corresponding label from a binary group value}
\usage{
get_labels_from_binary(data, mask, group = "g", trans = FALSE)
}
\arguments{
\item{data}{data.frame}
\item{mask}{integer vector binary mask for individual labels}
\item{group}{name of the column in data containing binary group value}
\item{trans}{translate name if TRUE use \code{\link{i18n}}, if function use it a translator}
}
\description{
Get corresponding label from a binary group value
}
\seealso{
Other upset:
\code{\link{apply_binary_mask}()},
\code{\link{create_binary_groups}()},
\code{\link{create_binary_mask}()},
\code{\link{upset_plot}()}
}
\concept{upset}
|
library(PenCoxFrail)
### Name: pencoxfrailControl
### Title: Control Values for 'pencoxfrail' fit
### Aliases: pencoxfrailControl
### ** Examples
# Use different weighting of the two penalty parts
# and lighten the convergence criterion
pencoxfrailControl(zeta=0.3, conv.eps=1e-3)
| /data/genthat_extracted_code/PenCoxFrail/examples/pencoxfrailControl.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 289 | r | library(PenCoxFrail)
### Name: pencoxfrailControl
### Title: Control Values for 'pencoxfrail' fit
### Aliases: pencoxfrailControl
### ** Examples
# Use different weighting of the two penalty parts
# and lighten the convergence criterion
pencoxfrailControl(zeta=0.3, conv.eps=1e-3)
|
#' Ipsen Mikhailov
#'
#' @param graph_1 igraph or matrix object.
#' @param graph_2 igraph or matrix object.
#' @param hwhm Numeric parameter for the lorentzian kernel.
#' @param results_list Logical indicating whether or not to return results list.
#'
#' @export
dist_ipsen_mikhailov <- function(graph_1, graph_2, hwhm = 0.08, results_list = FALSE) UseMethod("dist_ipsen_mikhailov")
#' @export
dist_ipsen_mikhailov.igraph <- function(graph_1, graph_2, hwhm = 0.08, results_list = FALSE) {
assertthat::assert_that(
all(igraph::is.igraph(graph_1), igraph::is.igraph(graph_2)),
msg = "Graphs must be igraph objects."
)
dist_ipsen_mikhailov.matrix(
igraph::as_adjacency_matrix(graph_1, sparse = FALSE),
igraph::as_adjacency_matrix(graph_2, sparse = FALSE),
hwhm,
results_list
)
}
#' @export
dist_ipsen_mikhailov.matrix <- function(graph_1, graph_2, hwhm = 0.08, results_list = FALSE) {
assertthat::assert_that(
all(is.matrix(graph_1), is.matrix(graph_2)),
msg = "Graphs must be adjacency matrices."
)
# initialize optional results list
results <- list()
results[["adjacency_matrices"]] <- list(graph_1, graph_2)
N <- dim(graph_1)[1]
# Laplacian matrices for both graphs
# the only laplacian function in igraph takes graphs, not matrices
# this still seems easier than doing the work in the igraph method
L1 <- igraph::laplacian_matrix(igraph::graph_from_adjacency_matrix(graph_1), normalized = FALSE, sparse = FALSE)
L2 <- igraph::laplacian_matrix(igraph::graph_from_adjacency_matrix(graph_2), normalized = FALSE, sparse = FALSE)
# modes for positive-semidefinite Laplacian
w1 <- sqrt(abs(eigen(L1, symmetric = TRUE, only.values = TRUE)$values[2:N]))
w2 <- sqrt(abs(eigen(L2, symmetric = TRUE, only.values = TRUE)$values[2:N]))
# calculate the norm of each spectrum
norm1 <- (N - 1) * pi / 2 - sum(atan(-w1 / hwhm))
norm2 <- (N - 1) * pi / 2 - sum(atan(-w2 / hwhm))
# define spectral densities
density1 <- function(w) {
sum(hwhm / ((w - w1)^2 + hwhm^2)) / norm1
}
density2 <- function(w) {
sum(hwhm / ((w - w2)^2 + hwhm^2)) / norm2
}
func <- function(w) {
(density1(w) - density2(w))^2
}
dist <- sqrt(stats::integrate(Vectorize(func), 0, Inf, subdivisions = 100)$value)
if (results_list) {
ret <- list(dist, c(graph_1, graph_2))
names(ret) <- c("dist", "adjacency matrices")
ret
} else {
dist
}
}
| /R/ipsen-mikhailov.R | permissive | Fagan-Lab/disgraph | R | false | false | 2,429 | r | #' Ipsen Mikhailov
#'
#' @param graph_1 igraph or matrix object.
#' @param graph_2 igraph or matrix object.
#' @param hwhm Numeric parameter for the lorentzian kernel.
#' @param results_list Logical indicating whether or not to return results list.
#'
#' @export
dist_ipsen_mikhailov <- function(graph_1, graph_2, hwhm = 0.08, results_list = FALSE) UseMethod("dist_ipsen_mikhailov")
#' @export
dist_ipsen_mikhailov.igraph <- function(graph_1, graph_2, hwhm = 0.08, results_list = FALSE) {
assertthat::assert_that(
all(igraph::is.igraph(graph_1), igraph::is.igraph(graph_2)),
msg = "Graphs must be igraph objects."
)
dist_ipsen_mikhailov.matrix(
igraph::as_adjacency_matrix(graph_1, sparse = FALSE),
igraph::as_adjacency_matrix(graph_2, sparse = FALSE),
hwhm,
results_list
)
}
#' @export
dist_ipsen_mikhailov.matrix <- function(graph_1, graph_2, hwhm = 0.08, results_list = FALSE) {
assertthat::assert_that(
all(is.matrix(graph_1), is.matrix(graph_2)),
msg = "Graphs must be adjacency matrices."
)
# initialize optional results list
results <- list()
results[["adjacency_matrices"]] <- list(graph_1, graph_2)
N <- dim(graph_1)[1]
# Laplacian matrices for both graphs
# the only laplacian function in igraph takes graphs, not matrices
# this still seems easier than doing the work in the igraph method
L1 <- igraph::laplacian_matrix(igraph::graph_from_adjacency_matrix(graph_1), normalized = FALSE, sparse = FALSE)
L2 <- igraph::laplacian_matrix(igraph::graph_from_adjacency_matrix(graph_2), normalized = FALSE, sparse = FALSE)
# modes for positive-semidefinite Laplacian
w1 <- sqrt(abs(eigen(L1, symmetric = TRUE, only.values = TRUE)$values[2:N]))
w2 <- sqrt(abs(eigen(L2, symmetric = TRUE, only.values = TRUE)$values[2:N]))
# calculate the norm of each spectrum
norm1 <- (N - 1) * pi / 2 - sum(atan(-w1 / hwhm))
norm2 <- (N - 1) * pi / 2 - sum(atan(-w2 / hwhm))
# define spectral densities
density1 <- function(w) {
sum(hwhm / ((w - w1)^2 + hwhm^2)) / norm1
}
density2 <- function(w) {
sum(hwhm / ((w - w2)^2 + hwhm^2)) / norm2
}
func <- function(w) {
(density1(w) - density2(w))^2
}
dist <- sqrt(stats::integrate(Vectorize(func), 0, Inf, subdivisions = 100)$value)
if (results_list) {
ret <- list(dist, c(graph_1, graph_2))
names(ret) <- c("dist", "adjacency matrices")
ret
} else {
dist
}
}
|
VMPDSigmaLL <- function(vmName) {
obs <- DSigma('VMP', 'LL')
attr(obs, 'vmName') <- vmName
obs
}
getExternalStateFactor.VMPDSigmaLL <- function(vmpll, Q2 = Q2, alpha = 0) {
f1 <- getU1NNMode(Q2 = Q2, alpha = alpha)$dfQ(z)
f3 <- getU1NormalizableMode()$dfm(z)
splinefun(z, f1 * f3 / (sqrt(Q2) * mass))
}
getCfact.VMPDSigmaLL <- getCfact.VMPDSigma
| /R/VMPDSigmaLL.R | permissive | rcarcasses/HQCD-P | R | false | false | 364 | r | VMPDSigmaLL <- function(vmName) {
obs <- DSigma('VMP', 'LL')
attr(obs, 'vmName') <- vmName
obs
}
getExternalStateFactor.VMPDSigmaLL <- function(vmpll, Q2 = Q2, alpha = 0) {
f1 <- getU1NNMode(Q2 = Q2, alpha = alpha)$dfQ(z)
f3 <- getU1NormalizableMode()$dfm(z)
splinefun(z, f1 * f3 / (sqrt(Q2) * mass))
}
getCfact.VMPDSigmaLL <- getCfact.VMPDSigma
|
######################################################
#
# Iteratively Weighted Penalized Regression
#
######################################################
IterWeight <- function(y.train, X.train, y.test, X.test,
alpha = 0.2, tol = 0.001, maxCount = 20,
left.cut.train = quantile(y.train, 1/4),
right.cut.train = quantile(y.train, 3/4),
left.cut.test = quantile(y.train, 1/4),
right.cut.test = quantile(y.train, 3/4),
nfolds4lambda = 3,
tailToWeight = c("left","right", "both"),
print.out = TRUE) {
#############################################
trnsize <- length(y.train)
tstsize <- length(y.test)
#================================
# Initial Fitting
#================================
cat("*Starting initial fit.\n")
cv.linmod <- cv.glmnet(x = X.train, y= y.train, family= "gaussian",
type.measure = "mse",
alpha = alpha,
nfolds = nfolds4lambda,
intercept = TRUE,
standardize = TRUE)
lambda <- cv.linmod$lambda.min
EN.linmod <- glmnet(x = X.train, y= y.train, family= "gaussian",
lambda = lambda, alpha = alpha,
intercept = TRUE,
standardize = TRUE)
cat("*Finished initial fit.\n")
yhat.train <- predict(EN.linmod, newx = X.train)
init.tail.err <- rmse(y.train, yhat.train, direction = tailToWeight,
left.cut = left.cut.train, right.cut = right.cut.train)
tail.err <- tol+1
count <- 0
#================================
# Iteratted Weighting
#================================
cat("*Starting iterations.\n")
t <- proc.time()
while((tail.err >= tol) & (count <= maxCount)) {
wt <- Weights(y = y.train, yhat = yhat.train, tail = tailToWeight,
left.cut = left.cut.train, right.cut = right.cut.train)
cv.EN.linmod <- cv.glmnet(x = X.train, y= y.train, family= "gaussian",
weights = wt,
type.measure = "mse",
nfolds = nfolds4lambda,
intercept = TRUE,
standardize = TRUE
)
lambda <- cv.EN.linmod$lambda.min
EN.linmod <- glmnet(x = X.train, y= y.train, family= "gaussian",
lambda= lambda, alpha = alpha, weights = wt,
intercept = TRUE,
standardize = TRUE)
yhat.train <- predict(EN.linmod, newx = X.train)
tail.err <- rmse(y = y.train, yhat = yhat.train, direction = tailToWeight,
left.cut = left.cut.train, right.cut = right.cut.train)
count <- count + 1
if(print.out){
cat("count = ", count, ", Training Tail Error = ", tail.err,"\n")
}
}
timeTaken <- proc.time() - t
cat("*Finished iterations.\n")
count <- count-1
optBeta <- EN.linmod$beta
sparsity <- length(which(optBeta!=0))
#=======================================
# Predicted Values
#=======================================
yhat.test <- predict(EN.linmod, newx = X.test)
# RMSE results
rmse.all <- sqrt(sum((yhat.test-y.test)^2))/sqrt(length(y.test))
rmse.left <- rmse(y = y.test, yhat = yhat.test,
direction = "left",
left.cut = left.cut.test,
right.cut = right.cut.test)
rmse.right <- rmse(y = y.test, yhat = yhat.test,
direction = "right",
left.cut = left.cut.test,
right.cut = right.cut.test)
rmse.both <- rmse(y = y.test, yhat = yhat.test,
direction = "both",
left.cut = left.cut.test,
right.cut = right.cut.test)
if(print.out){
cat("\n====================================================\n",
" Iterated Weighting \n",
"\n----------------------------------------------------",
"\nRMSE - Total :", round(rmse.all, 4),
"\nRMSE - Left Tail :", round(rmse.left, 4),
"\nRMSE - Right Tail :", round(rmse.right, 4),
"\nRMSE - Both Tails :", round(rmse.both, 4),
"\n----------------------------------------------------",
"\nSparsity :", sparsity,
"\nalpha :", round(alpha, 4),
"\nlambda :", round(lambda, 4),
"\nNo. of Iterations :", count,
"\nTime Taken :", timeTaken["elapsed"], " seconds",
"\n====================================================\n")
}
res <- list(yhat.test = yhat.test,
finalENModel = EN.linmod,
rmse.all = rmse.all,
rmse.left = rmse.left,
rmse.right = rmse.right,
rmse.both = rmse.both,
sparsity = sparsity,
timeTaken = timeTaken["elapsed"]
)
return(res)
}
######################################################
#
# Iterated Weighting Scheme
#
######################################################
Weights <- function(y, yhat, tail = c("left", "right", "both"),
left.cut = quantile(y, 1/4),
right.cut = quantile(y, 3/4)) {
n <- length(y)
diff <- abs(y-yhat)
if(tail == "left") {
w <- exp(1 + abs(diff))
ind <- which(y > left.cut)
#cat(w[-ind],"\n")
w[ind] <- 0.0
w <- (w/sum(w))*n
}
if(tail == "right") {
w <- exp(1 + abs(diff))
ind <- which(y < right.cut)
w[ind] <- 0.0
w <- (w/sum(w))*n
}
if(tail == "both") {
w <- exp(1 + abs(diff))
ind <- which((y > left.cut) & (y < right.cut))
w[ind] <- 0.0
w <- (w/sum(w))*n
}
return(w)
}
######################################################
#
# RMSE Measure
#
######################################################
rmse <- function(y, yhat, direction = c("left", "right", "both"),
left.cut = quantile(y, 1/4),
right.cut = quantile(y, 3/4)) {
if(direction == "left") {
lefttailed.ind <- which((y <= left.cut))
lefttailed.n <- length(lefttailed.ind )
SS <- sum((y[lefttailed.ind] - yhat[lefttailed.ind])^2)
rmse <- sqrt(SS/lefttailed.n)
}
if(direction == "right") {
righttailed.ind <- which((y >= right.cut))
righttailed.n <- length(righttailed.ind )
SS <- sum((y[righttailed.ind] - yhat[righttailed.ind])^2)
rmse <- sqrt(SS/righttailed.n)
}
if(direction == "both") {
twotailed.ind <- which((y <= left.cut) | (y >= right.cut))
twotailed.n <- length(twotailed.ind )
SS <- sum((y[twotailed.ind] - yhat[twotailed.ind])^2)
rmse <- sqrt(SS/twotailed.n)
}
return(rmse)
}
Validation <- function(n_fold, X_train, Y_train){
list_train_fold = matrix(list(),nrow =n_fold, ncol = 1)
list_val_fold = matrix(list(),nrow = n_fold, ncol =1)
list_train = c()
Number = dim(X_train)[1]%/%n_fold
for (i in 1:dim(X_train)[1])
list_train <- c(list_train,i)
for (i in 1:n_fold){
list_val = c()
if (i == n_fold)
{
for (j in (Number*(i-1))+1:(dim(X_train)[1]-Number*(i-1)))
list_val <- c(list_val, j)
list_train_fold[[i,1]] = setdiff(list_train,list_val)
list_val_fold[[i,1]] = c(list_val)
}
if (i !=n_fold)
{
for (j in (Number*(i-1))+1:Number)
list_val = c(list_val,j)
list_train_fold[[i, 1]] = setdiff(list_train,list_val)
list_val_fold[[i, 1]] = list_val
}
}
return (list_val_fold)
}
| /RWEN/RWEN.R | no_license | Kimseonghun-468/GCN | R | false | false | 8,244 | r | ######################################################
#
# Iteratively Weighted Penalized Regression
#
######################################################
IterWeight <- function(y.train, X.train, y.test, X.test,
alpha = 0.2, tol = 0.001, maxCount = 20,
left.cut.train = quantile(y.train, 1/4),
right.cut.train = quantile(y.train, 3/4),
left.cut.test = quantile(y.train, 1/4),
right.cut.test = quantile(y.train, 3/4),
nfolds4lambda = 3,
tailToWeight = c("left","right", "both"),
print.out = TRUE) {
#############################################
trnsize <- length(y.train)
tstsize <- length(y.test)
#================================
# Initial Fitting
#================================
cat("*Starting initial fit.\n")
cv.linmod <- cv.glmnet(x = X.train, y= y.train, family= "gaussian",
type.measure = "mse",
alpha = alpha,
nfolds = nfolds4lambda,
intercept = TRUE,
standardize = TRUE)
lambda <- cv.linmod$lambda.min
EN.linmod <- glmnet(x = X.train, y= y.train, family= "gaussian",
lambda = lambda, alpha = alpha,
intercept = TRUE,
standardize = TRUE)
cat("*Finished initial fit.\n")
yhat.train <- predict(EN.linmod, newx = X.train)
init.tail.err <- rmse(y.train, yhat.train, direction = tailToWeight,
left.cut = left.cut.train, right.cut = right.cut.train)
tail.err <- tol+1
count <- 0
#================================
# Iteratted Weighting
#================================
cat("*Starting iterations.\n")
t <- proc.time()
while((tail.err >= tol) & (count <= maxCount)) {
wt <- Weights(y = y.train, yhat = yhat.train, tail = tailToWeight,
left.cut = left.cut.train, right.cut = right.cut.train)
cv.EN.linmod <- cv.glmnet(x = X.train, y= y.train, family= "gaussian",
weights = wt,
type.measure = "mse",
nfolds = nfolds4lambda,
intercept = TRUE,
standardize = TRUE
)
lambda <- cv.EN.linmod$lambda.min
EN.linmod <- glmnet(x = X.train, y= y.train, family= "gaussian",
lambda= lambda, alpha = alpha, weights = wt,
intercept = TRUE,
standardize = TRUE)
yhat.train <- predict(EN.linmod, newx = X.train)
tail.err <- rmse(y = y.train, yhat = yhat.train, direction = tailToWeight,
left.cut = left.cut.train, right.cut = right.cut.train)
count <- count + 1
if(print.out){
cat("count = ", count, ", Training Tail Error = ", tail.err,"\n")
}
}
timeTaken <- proc.time() - t
cat("*Finished iterations.\n")
count <- count-1
optBeta <- EN.linmod$beta
sparsity <- length(which(optBeta!=0))
#=======================================
# Predicted Values
#=======================================
yhat.test <- predict(EN.linmod, newx = X.test)
# RMSE results
rmse.all <- sqrt(sum((yhat.test-y.test)^2))/sqrt(length(y.test))
rmse.left <- rmse(y = y.test, yhat = yhat.test,
direction = "left",
left.cut = left.cut.test,
right.cut = right.cut.test)
rmse.right <- rmse(y = y.test, yhat = yhat.test,
direction = "right",
left.cut = left.cut.test,
right.cut = right.cut.test)
rmse.both <- rmse(y = y.test, yhat = yhat.test,
direction = "both",
left.cut = left.cut.test,
right.cut = right.cut.test)
if(print.out){
cat("\n====================================================\n",
" Iterated Weighting \n",
"\n----------------------------------------------------",
"\nRMSE - Total :", round(rmse.all, 4),
"\nRMSE - Left Tail :", round(rmse.left, 4),
"\nRMSE - Right Tail :", round(rmse.right, 4),
"\nRMSE - Both Tails :", round(rmse.both, 4),
"\n----------------------------------------------------",
"\nSparsity :", sparsity,
"\nalpha :", round(alpha, 4),
"\nlambda :", round(lambda, 4),
"\nNo. of Iterations :", count,
"\nTime Taken :", timeTaken["elapsed"], " seconds",
"\n====================================================\n")
}
res <- list(yhat.test = yhat.test,
finalENModel = EN.linmod,
rmse.all = rmse.all,
rmse.left = rmse.left,
rmse.right = rmse.right,
rmse.both = rmse.both,
sparsity = sparsity,
timeTaken = timeTaken["elapsed"]
)
return(res)
}
######################################################
#
# Iterated Weighting Scheme
#
######################################################
Weights <- function(y, yhat, tail = c("left", "right", "both"),
left.cut = quantile(y, 1/4),
right.cut = quantile(y, 3/4)) {
n <- length(y)
diff <- abs(y-yhat)
if(tail == "left") {
w <- exp(1 + abs(diff))
ind <- which(y > left.cut)
#cat(w[-ind],"\n")
w[ind] <- 0.0
w <- (w/sum(w))*n
}
if(tail == "right") {
w <- exp(1 + abs(diff))
ind <- which(y < right.cut)
w[ind] <- 0.0
w <- (w/sum(w))*n
}
if(tail == "both") {
w <- exp(1 + abs(diff))
ind <- which((y > left.cut) & (y < right.cut))
w[ind] <- 0.0
w <- (w/sum(w))*n
}
return(w)
}
######################################################
#
# RMSE Measure
#
######################################################
rmse <- function(y, yhat, direction = c("left", "right", "both"),
left.cut = quantile(y, 1/4),
right.cut = quantile(y, 3/4)) {
if(direction == "left") {
lefttailed.ind <- which((y <= left.cut))
lefttailed.n <- length(lefttailed.ind )
SS <- sum((y[lefttailed.ind] - yhat[lefttailed.ind])^2)
rmse <- sqrt(SS/lefttailed.n)
}
if(direction == "right") {
righttailed.ind <- which((y >= right.cut))
righttailed.n <- length(righttailed.ind )
SS <- sum((y[righttailed.ind] - yhat[righttailed.ind])^2)
rmse <- sqrt(SS/righttailed.n)
}
if(direction == "both") {
twotailed.ind <- which((y <= left.cut) | (y >= right.cut))
twotailed.n <- length(twotailed.ind )
SS <- sum((y[twotailed.ind] - yhat[twotailed.ind])^2)
rmse <- sqrt(SS/twotailed.n)
}
return(rmse)
}
Validation <- function(n_fold, X_train, Y_train){
list_train_fold = matrix(list(),nrow =n_fold, ncol = 1)
list_val_fold = matrix(list(),nrow = n_fold, ncol =1)
list_train = c()
Number = dim(X_train)[1]%/%n_fold
for (i in 1:dim(X_train)[1])
list_train <- c(list_train,i)
for (i in 1:n_fold){
list_val = c()
if (i == n_fold)
{
for (j in (Number*(i-1))+1:(dim(X_train)[1]-Number*(i-1)))
list_val <- c(list_val, j)
list_train_fold[[i,1]] = setdiff(list_train,list_val)
list_val_fold[[i,1]] = c(list_val)
}
if (i !=n_fold)
{
for (j in (Number*(i-1))+1:Number)
list_val = c(list_val,j)
list_train_fold[[i, 1]] = setdiff(list_train,list_val)
list_val_fold[[i, 1]] = list_val
}
}
return (list_val_fold)
}
|
testlist <- list(genotype = c(-737640063L, -2122219135L, -2129330220L, -16206719L, 134217728L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(detectRUNS:::genoConvertCpp,testlist)
str(result) | /detectRUNS/inst/testfiles/genoConvertCpp/libFuzzer_genoConvertCpp/genoConvertCpp_valgrind_files/1609875053-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 468 | r | testlist <- list(genotype = c(-737640063L, -2122219135L, -2129330220L, -16206719L, 134217728L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(detectRUNS:::genoConvertCpp,testlist)
str(result) |
################################################
# ncvreg works for Poisson regression
################################################
n <- 200
p <- 50
X <- matrix(rnorm(n*p), ncol=p)
y <- rpois(n, 1)
beta <- glm(y~X, family="poisson")$coef
scad <- coef(ncvreg(X, y, lambda.min=0, family="poisson", penalty="SCAD", eps=.0001), lambda=0)
mcp <- coef(ncvreg(X, y, lambda.min=0, family="poisson", penalty="MCP", eps=.0001), lambda=0)
expect_equivalent(scad, beta,tolerance=.01)
expect_equivalent(mcp, beta,tolerance=.01)
##############################################
# ncvreg reproduces lasso: poisson
##############################################
require(glmnet)
nlasso <- coef(fit <- ncvreg(X, y, family="poisson", penalty="lasso"))
plot(fit, log=TRUE)
glasso <- as.matrix(coef(fit <- glmnet(X, y, family="poisson", lambda=fit$lambda)))
plot(fit, "lambda")
expect_equivalent(nlasso, glasso, tolerance=.01)
################################
# logLik() is correct
################################
fit.mle <- glm(y~X, family="poisson")
fit <- ncvreg(X, y, lambda.min=0, family="poisson")
expect_equivalent(logLik(fit)[100], logLik(fit.mle)[1], tol= .001)
expect_equivalent(AIC(logLik(fit))[100], AIC(fit.mle), tol= .001)
##############################################
# ncvreg dependencies work: poisson
##############################################
# Predict
predict(fit, X, 'link')[1:5, 1:5]
predict(fit, X, 'response')[1:5, 1:5]
predict(fit, X, 'coef')[1:5, 1:5]
head(predict(fit, X, 'vars'))
head(predict(fit, X, 'nvars'))
#################################################
# cv.ncvreg() options work for poisson
#################################################
X <- matrix(rnorm(n*p), ncol=p)
b <- c(-1, 1, rep(0, p-2))
y <- rpois(n, exp(X%*%b))
par(mfrow=c(2,2))
cvfit <- cv.ncvreg(X, y, family="poisson")
plot(cvfit, type="all")
summary(cvfit)
head(predict(cvfit, type="coefficients"))
predict(cvfit, type="vars")
predict(cvfit, type="nvars")
head(predict(cvfit, X=X, "link"))
head(predict(cvfit, X=X, "response"))
y <- rpois(n, 1)
cvfit <- cv.ncvreg(X, y, family="poisson")
par(mfrow=c(2,2))
plot(cvfit, type="all")
| /inst/tinytest/poisson.R | no_license | pbreheny/ncvreg | R | false | false | 2,134 | r | ################################################
# ncvreg works for Poisson regression
################################################
n <- 200
p <- 50
X <- matrix(rnorm(n*p), ncol=p)
y <- rpois(n, 1)
beta <- glm(y~X, family="poisson")$coef
scad <- coef(ncvreg(X, y, lambda.min=0, family="poisson", penalty="SCAD", eps=.0001), lambda=0)
mcp <- coef(ncvreg(X, y, lambda.min=0, family="poisson", penalty="MCP", eps=.0001), lambda=0)
expect_equivalent(scad, beta,tolerance=.01)
expect_equivalent(mcp, beta,tolerance=.01)
##############################################
# ncvreg reproduces lasso: poisson
##############################################
require(glmnet)
nlasso <- coef(fit <- ncvreg(X, y, family="poisson", penalty="lasso"))
plot(fit, log=TRUE)
glasso <- as.matrix(coef(fit <- glmnet(X, y, family="poisson", lambda=fit$lambda)))
plot(fit, "lambda")
expect_equivalent(nlasso, glasso, tolerance=.01)
################################
# logLik() is correct
################################
fit.mle <- glm(y~X, family="poisson")
fit <- ncvreg(X, y, lambda.min=0, family="poisson")
expect_equivalent(logLik(fit)[100], logLik(fit.mle)[1], tol= .001)
expect_equivalent(AIC(logLik(fit))[100], AIC(fit.mle), tol= .001)
##############################################
# ncvreg dependencies work: poisson
##############################################
# Predict
predict(fit, X, 'link')[1:5, 1:5]
predict(fit, X, 'response')[1:5, 1:5]
predict(fit, X, 'coef')[1:5, 1:5]
head(predict(fit, X, 'vars'))
head(predict(fit, X, 'nvars'))
#################################################
# cv.ncvreg() options work for poisson
#################################################
X <- matrix(rnorm(n*p), ncol=p)
b <- c(-1, 1, rep(0, p-2))
y <- rpois(n, exp(X%*%b))
par(mfrow=c(2,2))
cvfit <- cv.ncvreg(X, y, family="poisson")
plot(cvfit, type="all")
summary(cvfit)
head(predict(cvfit, type="coefficients"))
predict(cvfit, type="vars")
predict(cvfit, type="nvars")
head(predict(cvfit, X=X, "link"))
head(predict(cvfit, X=X, "response"))
y <- rpois(n, 1)
cvfit <- cv.ncvreg(X, y, family="poisson")
par(mfrow=c(2,2))
plot(cvfit, type="all")
|
enums = getEnums(tu)
# Clark: The enumerated types. I'll have to stop here and inspect these.
enums = enums[grep("poppler", sapply(enums, getFileName))]
enums = enums[ !grepl("Activation", names(enums)) ]
cenums = lapply(enums, makeEnumDef)
renums = lapply(enums, makeEnumClass)
cat(sapply(cenums, function(x) paste(c(x[1:2], ";"), collapse = " ")), sep = "\n", file = "../src/R_auto_enums.h")
cat(c('#include "Rpoppler.h"', sapply(cenums, paste, collapse = "\n")), sep = "\n\n", file = "../src/R_auto_enums.cc")
| /TU/enums.R | permissive | clarkfitzg/Ropencv | R | false | false | 518 | r | enums = getEnums(tu)
# Clark: The enumerated types. I'll have to stop here and inspect these.
enums = enums[grep("poppler", sapply(enums, getFileName))]
enums = enums[ !grepl("Activation", names(enums)) ]
cenums = lapply(enums, makeEnumDef)
renums = lapply(enums, makeEnumClass)
cat(sapply(cenums, function(x) paste(c(x[1:2], ";"), collapse = " ")), sep = "\n", file = "../src/R_auto_enums.h")
cat(c('#include "Rpoppler.h"', sapply(cenums, paste, collapse = "\n")), sep = "\n\n", file = "../src/R_auto_enums.cc")
|
# WSdsm.R (WordSpace / Distributional Semantic Model)
#
# Script constitué par un ensemble de fonctions destinées à faciliter l'usage
# de la bibliothèque R 'wordspace' (Stefan Evert), à partir d'un corpus enregistré sous CWB.
# Un premier groupe de fonctions est destiné à créer un DSM et à calculer, à partir de ce DSM,
# les champs des lemmes choisis, avec visualisation par analyse factorielle des correspondances (AFC),
# et à extraire les mots-clés d'un ensemble à partir des valences lexicales généralisées pondérées.
# Le second groupe permet d'appliquer les mêmes procédures sur un corpus
# découpé en tranches : l'objectif est l'analyse de l'évolution d'un champ sémantique,
# la visualisation est conçue pour faire ressortir les éléments liés plus particulièrement
# à telle ou telle période. Les deux groupes doivent être employés de manière complémentaire.
# version pré-alpha 0.3 AG novembre 2015 - mars 2017. GPL3
# TODO : autres méthodes d'examen des évolutions.
#########################################################################################
# premier groupe : analyses globales > champs sémantiques
#########################################################################################
corpus2scan <- function(corp, dis=3, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA= "lemma", objetB = "lemma", D=0, F="", attr="", val="", destination , flag=TRUE ) {
# Création d'un fichier-somme du scan complet d'un corpus
# ou d'une partie de corpus,
# résultant de l'application de 'cwb-scan-corpus' à une fenêtre
# de la largeur choisie (de part et d'autre du pivot).
#
# Double contrainte : taille de mémoire et temps d'exécution.
# le programme scanne 2 colonnes et décompte toutes les paires identiques ;
# on prend les colonnes successivement pour balayer toute la fenêtre choisie
# et on enregistre au fur et à mesure sur le DD ;
# après quoi, on récupère les fichiers un par un et on les concatène.
# Les affichages pendant l'exécution sont très approximatifs, il s'agit seulement
# de faire patienter !
if (flag==TRUE){
t1 <- Sys.time()
}
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
options(scipen=999) # supprimer la notation scientifique (pb avec cqp)
efftt <- size(corpus(corp))
effpart <- 0
if (F==""){
F <- efftt
}
if (D!=0 | F!="") {
effpart <- F-D
}
if (attr!="") {
def.scorp <- paste('[lemma=".*" %cd]', "::match.", attr, "=\"", val, "\"", sep="")
CRP <- corpus(corp)
crp <- subcorpus(CRP, def.scorp)
effpart <- size(crp)
}
# boucle : scans par colonne
for (i in 0:(dis*2)) {
if (i==dis){
next()
}
# création des paramètres pour la ligne de commande / paramètre -b excessif ??
params <- paste("-b 200000000 -q -s ",D, sep="")
if (F != efftt){
params <- paste(params, " -e ",F, sep="")
}
# if (reg != ""){
# params <- paste(params, " -r '",reg,"' ", sep="")
# }
params <- paste(params, " ",corp, " ", objetA,"+",dis," '?pos+",dis,"=/", posA, "/' pos+",dis," ",objetB,"+",i," '?pos+",i,"=/", posB, "/' pos+",i, sep="")
if (attr != "" & val != ""){
params <- paste(params," '?", attr, "=/", val,"/'", sep="")
}
sortie <- paste("/tmp/xyzxyz",i,".tsv", sep="")
# exécution (sortie sur disque automatique)
system2(command="cwb-scan-corpus", args=params, stdout=sortie)
cat("scan =",i, "sur", dis*2, "\n")
gc()
}
# rassemblement en un seul fichier (sur disque)
commd <- paste("cat /tmp/xyzxyz* > ", destination, sep="")
system(command=commd)
commd2 <- paste("rm /tmp/xyzxyz*") # nettoyage des fichiers provisoires
system(command=commd2)
# création et enregistrement d'un fichier d'infos sur le scan
destination2 <- paste(destination, "_params", sep="")
parametres <- c("corpus","eff.total","eff.actuel","distance","posA","posB","objetA","objetB","D","F","attr","val")
valeurs <- c(corp,efftt,effpart,dis,posA,posB,objetA,objetB,D,F,attr,val)
infos <- cbind(parametres,valeurs)
write.table(infos,file=destination2, quote=FALSE,sep="\t",row.names=FALSE)
if (flag==TRUE) {
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
}
}
#################################################################################
scan2dsm <- function(scan, seuil= 9, coef="simple-ll", transf="log", nproj="", flag=TRUE) {
# récupération sur le DD d'un fichier issu de corpus2scan()
# + paramètres
# nettoyage, scoring, création d'un objet WS exploitable
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
if (flag==TRUE) {
t1 <- Sys.time()
}
gc()
scanp <- paste(scan, "_params", sep="")
params.tripl <- read.table(scanp, header=FALSE, sep="\t", stringsAsFactors=FALSE, quote="", fill=TRUE)
tripl <- read.table(scan, header=FALSE, sep="\t", stringsAsFactors=FALSE, quote="", fill=TRUE)
tripl <- tripl[, c(2,4,1)]
names(tripl) <- c("target", "feature", "eff") # esthétique !
# création de l'objet
triplobj <- dsm(target=tripl$target, feature=tripl$feature, score=tripl$eff, N=as.numeric(params.tripl[4,2]), raw.freq=TRUE, sort=TRUE)
rm(tripl) # nettoyage
gc()
# élagage
triplobj <- subset(triplobj, nnzero > seuil, nnzero > seuil, recursive=TRUE)
# scoring (filtrage des cooccurrents significatifs)
triplobjS <- dsm.score(triplobj, score= coef, transform=transf, normalize=TRUE)
# réduction des dimensions de la matrice
if (nproj != "") {
triplobjS <- dsm.projection(triplobjS, method="rsvd", n=nproj, oversampling=4)
}
# enregistrement des infos ($globals) > dsm documenté !
triplobjS$globals$corpus <- params.tripl[2,2]
triplobjS$globals$nblignes <- length(triplobjS$rows$term)
triplobjS$globals$nbcols <- length(triplobjS$cols$term)
triplobjS$globals$posA <- params.tripl[6,2]
triplobjS$globals$posB <- params.tripl[7,2]
triplobjS$globals$objetA <- params.tripl[8,2]
triplobjS$globals$objetB <- params.tripl[9,2]
triplobjS$globals$dis <- params.tripl[5,2]
triplobjS$globals$effactuel <- as.numeric(params.tripl[4,2])
triplobjS$globals$D <- as.numeric(params.tripl[10,2])
triplobjS$globals$F <- as.numeric(params.tripl[11,2])
if (triplobjS$globals$F==Inf) triplobjS$globals$F <- triplobjS$globals$N-1
triplobjS$globals$attr <- params.tripl[12,2]
triplobjS$globals$val <- params.tripl[13,2]
triplobjS$globals$effcorpus <- params.tripl[3,2]
triplobjS$globals$seuil <- seuil
triplobjS$globals$coef <- coef
triplobjS$globals$transf <- transf
triplobjS$globals$nproj <- nproj
if (flag==T) {
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n\n")
}
cat("lignes : ", length(triplobjS$rows$term), "\n")
cat("colonnes : ", length(triplobjS$cols$term), "\n")
return(triplobjS)
}
###############################################################################
corpus2dsm <- function(corp, dis=5, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA= "lemma", objetB = "lemma",D=0, F="", attr="", val="", destination, seuil= 9, coef="simple-ll",transf="log", nproj=""){
# regroupement de l'ensemble des opérations
# on part d'un corpus, d'une largeur de fenêtre
# et d'un choix des POS (pivot et cooc) ;
# on obtient un objet DSM prêt à l'emploi.
t1 <- Sys.time()
options(warn=-1)
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
# 1. scan
corpus2scan(corp=corp, dis=dis, posA=posA, posB=posB, objetA=objetA, objetB = objetB ,D=D, F=F, attr=attr, val=val, destination=destination, flag=FALSE)
cat("\n","Traitements...","\n")
gc() # nettoyage
# 2. construction de l'objet
res <- scan2dsm(scan=destination, seuil=seuil, coef=coef,transf=transf, nproj=nproj, flag=FALSE)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
res
}
################################################################################
dsm2af <- function(dsm, lm, nppv=40, cex=.9, decal=TRUE) {
# Recherche, dans un dsm donné, des p.p.voisins d'un lemme,
# et représentation par AFC de la matrice des distances.
# Le graphique fournit quelque chose d'analogue au Wortfeld
# au sens de Jost Trier. Les points sont répartis selon
# leurs distances réciproques : les divers 'nuages' correspondent
# aux sous-ensembles du champ.
# Création d'un objet contenant tous les éléments intermédiaires.
opar <- par(mar=par("mar"))
on.exit(par(opar))
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
library(ade4, quietly=TRUE, warn.conflicts=FALSE)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
#library(MASS, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
t1 <- Sys.time()
# recherche des p.p.voisins
vec.ppvoisins <- nearest.neighbours(M=dsm, term=lm, n=nppv)
ppv.names <- names(vec.ppvoisins)
val.ppvoisins <- cbind(as.character(ppv.names), as.numeric(vec.ppvoisins))
row.names(val.ppvoisins) <- NULL
mat.ppvoisins <- nearest.neighbours(M=dsm, term=lm, n=nppv, skip.missing=TRUE, dist.matrix=TRUE)
res <- list(NULL)
res[[1]] <- mat.ppvoisins
res[[2]] <- val.ppvoisins
# AFC sur le tableau des colonnes (la matrice est symétrique : on utilise les noms de ligne)
af.mat.ppvoisins <- dudi.coa(mat.ppvoisins, scannf=FALSE)
af.util <- af.mat.ppvoisins$co
# éviter les recouvrements d'étiquettes (appel à la fonction lisible())
if (decal==TRUE){
ymax <- max(af.util[,2])
ymin <- min(af.util[,2])
Tbon <- lisible(af.util[,1],af.util[,2],lab=row.names(af.util),mn=ymin, mx=ymax,cex=(cex+.1))
af.util[,1] <- Tbon[,1]
af.util[,2] <- Tbon[,2]
}
res[[3]] <- af.util
names(res) <- c("matrice_distances", "vecteur_ppvoisins", "coordonnees")
# affichage de l'AF
par(mar=c(0.5,0.5,1.7,0.5))
plot(af.util, type="n", asp=1, axes=FALSE, frame.plot=TRUE)
text(af.util[1,], labels=row.names(af.util[1,]), cex=(cex+.2), col="red", font=2)
af.util <- af.util[-1,]
text(af.util, labels=row.names(af.util), cex=cex, col="blue")
# affichage d'un titre
nbr <- length(dsm$rows[,1])
nbc <- length(dsm$cols[,1])
nm.obj <- deparse(substitute(dsm))
mn <- paste("DSM d'origine : ",nm.obj," (matrice de ", nbc , " sur ", nbr ,"). ",nppv, " éléments.", sep = "")
title(main = mn, line=1, cex.main=.8, font.main=1, adj = 0)
titranal <- paste("STRUCTURE GLOBALE DU CHAMP SÉMANTIQUE de *",lm,"*", sep="")
mtext(titranal, 3, line=0,cex=.8, font=1, adj=0)
#write.matrix(val.ppvoisins)
for (i in 1:nppv){
cat(names(res$vecteur_ppvoisins)[i], "\n")
}
class(res) <- "NPPV"
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
res
}
##################################################################################
dsm2carte <- function(dsm, seuil= "", mincoo=3, stopw="",nseg=50, decal=TRUE, cex=.8) {
# 1. calcul des mots-clés enn fonction de la valence lexicale pondérée
# 2. représentation factorielle de l'ensemble (ACP sur indices de cooccurrence)
#
# calcul d'une liste de lemmes, évaluée à partir de 2 paramètres :
# seuil = nb minimal de cooc dans chaque case du tableau (calcul pour chaque ligne du nombre de cases > seuil)
# mincoo = nb minimal de cases > 0 dans chaque ligne (tri des lignes en fonction du nbe de cases retenues)
# stopw = fichier de mots-outils ou assimilés, un mot par ligne
t1 <- Sys.time()
gc()
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
library(ade4, quietly=TRUE, warn.conflicts=FALSE)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
library(MASS, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
options(scipen=999) # supprimer la notation scientifique (pb avec cqp)
if (!inherits(dsm, "dsm")) stop("en entrée : un objet de classe dsm")
corp <- dsm$globals$corpus # nom du corpus
attr <- dsm$globals$attr
val <- dsm$globals$val
D <- dsm$globals$D
F <- dsm$globals$F
dsmm <- dsm$M # matrice des effectifs de coocs bruts
cat("cooc.freq.max = ", max(dsmm), "\n")
# calculs (= tris en fonction des paramètres choisis)
S1 <- apply(dsmm, 1, function(x) length(x[x>seuil])) # nbe par ligne de cases > seuil
cat("nb.somme.cooc > seuil = ",length(S1[S1>0]),"\n")
S2 <- S1[S1>mincoo] # tri des lignes à somme > mincoo
rm(S1)
gc()
S2 <- as.data.frame(cbind(names(S2),S2, stringsAsFactors=FALSE))
names(S2) <- c("names", "valbr")
S2$valbr <- as.numeric(as.character(S2$valbr))
S2$names <- as.character(S2$names)
# application d'une pondération aux valeurs brutes (par les fréquences totales)
# on calcule ces fréquences dans l'ensemble considéré, corpus ou sous-corpus
CRP <- corpus(corp)
if (dsm$globals$attr=="" & dsm$globals$N==dsm$globals$effactuel) {
crp <- subcorpus(CRP, '[lemma=".*" & (pos="SUB"|pos="VBE"|pos="QLF")]')
}
else {
def.scorp <- paste("abc:[lemma=\".*\" & (pos=\"SUB\"|pos=\"VBE\"|pos=\"QLF\") & _.", attr, "=\"", val, "\"]::abc >=",D," & abc <=",F, sep="")
crp <- subcorpus(CRP, def.scorp)
}
Clist <- cqp_flist(crp, "match", "lemma")
Clist2 <- Clist[1:length(Clist)]
rm(Clist)
gc()
Clist <- as.data.frame(cbind(names(Clist2),Clist2, stringsAsFactors=FALSE))
names(Clist) <- c("names", "freqtt")
Clist$freqtt <- as.numeric(as.character(Clist$freqtt))
Clist$names <- as.character(Clist$names)
S3 <- merge(S2, Clist, by.x="names", by.y="names", all.x=TRUE, all.y=FALSE)
rm(S2)
gc()
S3$valence <- S3$valbr / S3$freqtt
S3 <- S3[,c(1,2,4,6)]
S3 <- S3[rev(order(S3[,4])),]
gc()
# tri des stop-words (par défaut : QLF et VBE latins inutiles)
if (stopw==""){
stopw <- c("--","ago","aio","alius","audio","debeo","dico1","dico2","facio","fio","habeo","inquio","ipse1","loquor","meus","multus","nihil","nolo","noster","nullus","omnis","pono","possum","quidam","sequor","sum","suus","talis","tantus","totus","tuus","uenio","uester","uolo2","hic2","hic1","iste","ille","diuersus","inquantus","alter","ceterus","quisque","ullus")
}
else {
stopw2 <- read.csv2(stopw, header=FALSE, stringsAsFactors=FALSE, quote="", fill=TRUE)
stopw <- stopw2[,1]
}
lsttri <- setdiff(S3[,1],stopw)
S3 <- S3[(S3[,1]%in%lsttri),]
dsm2 <- subset(dsm, subset=(term %in% S3[,1]), select=(term %in% S3[,1]))
dsm3 <- as.matrix(dsm2$M)
dsm4 <- as.matrix(dsm2$S)
cat("nb.kw = ", ncol(dsm3), "\n\n", sep="")
if (ncol(dsm3) < 20) {
cat("Moins de 20 lemmes retenus : baissez les paramètres !", "\n\n", sep="")
res <- list(S3,dsm3,dsm4)
class(res) <- "carte"
names(res) <- c("valences","mat.brute","mat.coeff")
write.matrix(S3)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
if (ncol(dsm3) > 250) {
cat("Plus de 250 lemmes retenus : augmentez les paramètres !", "\n\n", sep="")
res <- list(S3,dsm3,dsm4)
class(res) <- "carte"
names(res) <- c("valences","mat.brute","mat.coeff")
write.matrix(S3)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
# ACP sur le tableau des colonnes (la matrice est (à peu près) symétrique)
af.mat.coeff <- dudi.pca(dsm4, scannf=FALSE, nf=2) # on peut utiliser la transposée...
af.util <- af.mat.coeff$co
# éviter les recouvrements d'étiquettes (appel à la fonction lisible())
if (decal==TRUE){
ymax <- max(af.util[,2])
ymin <- min(af.util[,2])
Tbon <- lisible(af.util[,1],af.util[,2],lab=row.names(af.util),mn=ymin, mx=ymax,cex=(cex+.1))
af.util[,1] <- Tbon[,1]
af.util[,2] <- Tbon[,2]
}
rm(Tbon)
gc()
# calcul des distances les plus importantes
distab <- data.frame(NULL)
cpt <- 1
nbcol <- ncol(dsm4)
for (i in 1:(nbcol-1)){
for (j in (i+1):nbcol){
distab[cpt,1] <- rownames(dsm4)[i]
distab[cpt,2] <- colnames(dsm4)[j]
distab[cpt,3] <- dsm4[i,j]
cpt <- cpt+1
}
}
distab.tr <- distab[order(distab[,3],decreasing=TRUE),]
distab <- distab.tr[1:nseg,]
# coordonnées d'affichage
R1r <- match(distab[,1], rownames(af.util))
R2r <- match(distab[,2], rownames(af.util))
distab[,4] <- af.util[R1r, 1]
distab[,5] <- af.util[R1r, 2]
distab[,6] <- af.util[R2r, 1]
distab[,7] <- af.util[R2r, 2]
# affichage de l'AF
par(mar=c(0.5,0.5,1.7,0.5))
plot(af.util, type="n", asp=1, axes=FALSE, frame.plot=TRUE)
nb.kw <- nrow(af.util)
text(af.util, labels=row.names(af.util), cex=cex, col="#005500")
segments(distab[,4], distab[,5], distab[,6], distab[,7], lwd=1, col="grey")
# affichage d'un titre
nbr <- length(dsm$rows[,1])
nbc <- length(dsm$cols[,1])
nm.obj <- deparse(substitute(dsm))
mn <- paste("DSM d'origine : ",nm.obj," (matrice de ", nbc , " sur ", nbr ,") effectif : ",dsm$globals$N, " tokens", sep = "")
title(main = mn, line=1, cex.main=.8, font.main=1, adj = 0)
if (dsm$globals$effactuel==dsm$globals$effcorpus & dsm$globals$attr=="") {
titranal <- paste("CARTE SÉMANTIQUE DU CORPUS *",corp, "* ",nb.kw, " mots-clés", sep="")
}
else if (dsm$globals$attr!="") {
titranal <- paste("CARTE DU SOUS-CORPUS *",corp,"* attribut = ",dsm$globals$attr," valeur = ",dsm$globals$val," ",nb.kw, " mots-clés", sep="")
}
else {
titranal <- paste("CARTE DU SOUS-CORPUS *",corp,"* D = ",dsm$globals$D," F = ",dsm$globals$F," ",nb.kw, " mots-clés", sep="")
}
mtext(titranal, 3, line=0,cex=.8, font=1, adj=0)
# création d'une liste en sortie
res <- list(S3,dsm3,dsm4,af.util,distab)
class(res) <- "carte"
names(res) <- c("valences","mat.brute","mat.coeff","acp","distab")
write.matrix(S3)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
################################################################################
# Second groupe : analyses par tranches > évolutions
################################################################################
corpus2scanm <- function(corp, dis=5, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA="lemma", objetB ="lemma", attr="", val="", destination, trnch=5, flag=TRUE){
# construction d'une série de scans
# correspondant aux tranches successives d'un corpus
# enregistrés sur DD
if (flag==TRUE) {
t1 <- Sys.time()
}
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
# découpage en tranches égales
efftt <- size(corpus(corp))
bornes <- seq(1, efftt, length.out=trnch+1)
# boucle : scans des tranches
for (i in 1:trnch) {
D <- bornes[i]
F <- bornes[i+1]
destin <- paste(destination, "_", i, sep="")
corpus2scan(corp=corp, dis=dis, posA=posA, posB=posB, objetA=objetA, objetB=objetB, D=D, F=F, attr=attr, val=val, destination=destin, flag=FALSE)
cat("Tranche ",i, " sur ",trnch, "terminée","\n\n")
}
if (flag==TRUE) {
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
}
}
###############################################################################
scanm2dsmm <- function(scan, seuil= 5, coef="simple-ll", nproj="", trnch, flag=TRUE) {
# récupération d'une série de scans ;
# constrution d'une série correspondante de DSM regroupés dans un objet list.
# boucle : récup des scans > liste de DSM
res <- list(NULL)
for (i in 1:trnch) {
scanm <- paste(scan, "_", i, sep="")
res[[i]] <- scan2dsm(scan=scanm, seuil=seuil, coef=coef, nproj=nproj, flag=FALSE)
}
res
}
#############################################################################
corpus2dsmm <- function(corp, dis=5, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA= "lemma", objetB = "lemma", trnch=5, attr="", val="", destination, seuil= 5, coef="simple-ll", nproj=""){
# regroupement de deux scripts
# permettant d'effectuer à la suite un scan par tranches
# et la création d'un dsm multiple correspondant.
t1 <- Sys.time()
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
# 1. scans
corpus2scanm(corp=corp, dis=dis, posA=posA, posB=posB, objetA=objetA, objetB = objetB , trnch= trnch, attr=attr, val=val, destination=destination, flag=FALSE)
cat("\n","Traitements...","\n")
gc() # nettoyage
# 2. construction d'une série d'objets
res <- scanm2dsmm(scan=destination, seuil=seuil, coef=coef, nproj=nproj, flag=FALSE, trnch=trnch)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
res
}
#################################################################################
dsmm2af <- function(dsmm, lm, nppv, xax=1, yax=1, cex=.9, decal=TRUE) {
# Récupération des p.p.voisins d'un lemme dans une suite de dsmm ;
# construction d'une matrice des distances ;
# visualisation par AFC.
# Strictement complémentaire de dsm2af() :
# cette visualisation est seulement destinée à éclaircir
# les évolutions, - pas les sous-ensembles.
# Difficulté : éliminer les outliers qui bloquent l'AF.
# TODO : afficher ces outliers en éléments supplémentaires.
t1 <- Sys.time()
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
library(ade4, quietly=TRUE, warn.conflicts=FALSE)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
#library(MASS, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
cooc.tt <- as.vector(NULL)
trnch <- length(dsmm)
# premier passage : récupérer les lemmes pour chaque tranche
for (i in 1:trnch) {
cooc.raw <- nearest.neighbours(dsmm[[i]], lm, n=nppv)
cooc.nam <- names(cooc.raw)
cooc.tt <- c(cooc.tt, cooc.nam)
}
# établir la liste complète (vecteur char)
cooc.tt <- unique(cooc.tt)
nb.cooc <- length(cooc.tt)
piv <- rep(lm, times=nb.cooc)
# rechercher les distances pour tous les lemmes dans toutes les tranches > matrice
distmat <- matrix(ncol = trnch, nrow = nb.cooc, 0)
vec.ncol <- as.vector(NULL)
for (i in 1:trnch) {
distmat[,i] <- pair.distances(piv, cooc.tt, method="cosine", dsmm[[i]], convert=FALSE)
vec.ncol <- c(vec.ncol, paste("PER_", i, sep=""))
}
colnames(distmat) <- vec.ncol # on met les noms
rownames(distmat) <- cooc.tt
# calcul de la variance de chaque ligne (lemmes)
diff0.var <- apply(distmat, 1, var, na.rm=TRUE)
distmat0 <- cbind(distmat, diff0.var*100)
res <- list(NULL) # création d'une liste pour les sorties
res[[1]] <- distmat0 # matrice brute
# nettoyer les lignes incomplètes (contenant au moins une valeur Inf)
li.sum <- apply(distmat, 1, sum)
li.sum[is.finite(li.sum)] <- TRUE
li.sum[is.infinite(li.sum)] <- FALSE
li.sum <- as.logical(li.sum)
distmat <- distmat[li.sum,]
# nettoyage des lignes dont la variance dépasse 2 écarts-types
diff.var <- apply(distmat, 1, sd, na.rm=TRUE)
var.mean <- mean(diff.var, na.rm=TRUE)
var.sd <- sd(diff.var, na.rm=TRUE)
li.rm <- (diff.var < (var.mean + (2*var.sd)) & diff.var > (var.mean - (2*var.sd)))
li.rm[is.na(li.rm)] <- FALSE
distmat <- distmat[li.rm,]
# réorganisation (moyennes réciproques)
li.m <- rep(0, nrow(distmat))
for (j in 1:ncol(distmat)){
for (i in 1:nrow(distmat)){
li.m[i] <- li.m[i]+(distmat[i,j]*j)
}
}
li.m <- li.m / rowSums(distmat)
distmat <- distmat[rev(sort.list(li.m)),]
res[[2]] <- distmat # matrice nettoyée et réorganisée
dis.mean <- apply(distmat,1,mean)
dis.sd <- apply(distmat,1,sd)
dis.util <- cbind(row.names(distmat),dis.mean,dis.sd)
colnames(dis.util) <- c("lemmes", "coeff.moy.", "sd")
cat("\n")
write.matrix(dis.util)
res[[3]] <- dis.util[,1:2]
# lissage (avec lowess()) : indispensable !
nb <- nrow(distmat)
dist.colnames <- colnames(distmat)
dist.rownames <- rownames(distmat)
ls.coeff2 <- matrix(0, nrow=nb, ncol=trnch)
for (i in 1:nb) {
ls.coeff2[i,] <- lowess(distmat[i,])$y
}
distmat <- ls.coeff2
colnames(distmat) <- dist.colnames
rownames(distmat) <- dist.rownames
distmat[distmat<0] <- 0
# calcul de la variance par ligne et par colonne
diff.var <- sort(apply(distmat, 1, var, na.rm=TRUE))
diff2.var <- apply(distmat, 2, var, na.rm=TRUE)
res[[4]] <- diff.var
res[[5]] <- diff2.var
# analyse factorielle (AFC)
af.distmat <- dudi.coa(distmat, scannf=FALSE)
af.util.co <- af.distmat$co
colnames(af.util.co) <- c("axe1", "axe2")
af.util.li <- af.distmat$li
colnames(af.util.li) <- c("axe1", "axe2")
af.util.tt <- rbind(af.util.co, af.util.li)
res[[6]] <- af.util.tt
names(res) <- c("matrice_brute", "matrice_nettoyee", "vecteur_ppvoisins", "variances_lignes", "variances_colonnes", "coordonnees")
co.nm <- colnames(distmat)
li.nm <- rownames(distmat)
tt.nm <- c(co.nm, li.nm)
# éviter les recouvrements d'étiquettes (appel à la fonction lisible())
if (decal==TRUE){
ymax <- max(af.util.tt[,2])
ymin <- min(af.util.tt[,2])
Tbon <- lisible(af.util.tt[,1],af.util.tt[,2],lab=row.names(af.util.tt),mn=ymin, mx=ymax,cex=(cex+.1))
af.util.tt[,1] <- Tbon[,1]
af.util.tt[,2] <- Tbon[,2]
}
af.util.tt[,1] <- af.util.tt[,1]*xax # contrôle de l'orientation des axes
af.util.tt[,2] <- af.util.tt[,2]*yax
# distinguer les lignes et colonnes
af.util.co <- af.util.tt[(1:trnch),]
af.util.li <- af.util.tt[((trnch+1):(length(af.util.tt[,2]))),]
# affichage de l'AF
par(mar=c(0.5,0.5,1.7,0.5))
if (asp==1){
plot(af.util.tt, asp=1, type="n", axes=FALSE, frame.plot=TRUE) # cadre
}
else {
plot(af.util.tt, type="n", axes=FALSE, frame.plot=TRUE) # cadre
}
#lines(af.util.co, col="grey", lwd=3) # trace
text(af.util.co, labels=co.nm, cex=cex, col="red", font=2) # points-colonnes
text(af.util.li, labels=li.nm, cex=cex, col="blue") # points-lignes
nm.obj <- deparse(substitute(dsmm))
nbcoocr <- length(af.util.li[,1])
mn <- paste("DSM multiple d'origine : ",nm.obj," (", trnch," tranches). Lemme : ", lm, ". ",nppv, " > ", nbcoocr, " éléments.", sep = "")
title(main = mn, line=1, cex.main=.8, font.main=1, adj = 0) # titre
titranal= "ÉVOLUTION DU CHAMP SÉMANTIQUE (sémantique distributionnelle)"
mtext(titranal, 3, line=0,cex=.8, font=1, adj=0)
spllines(af.util.co[,1], af.util.co[,2], col="red") # trace
class(res) <- "NPPVM"
return(res)
}
##########################################################################
##########################################################################
#####################
# suppression des recouvrements
# partant du centre, on écarte les points qui provoquent recouvrement,
# toujours vers l'extérieur (selon le quadrant), alternativement horizontalement
# et verticalement, de manière à éviter la déformation du nuage,
# en pondérant l'alternance par la proximité angulaire avec l'axe 1 ou 2
# peut durer de quelques secondes à quelques minutes !!!
#####################
lisible <- function (x, y, lab, mn, mx, cex=.2){
#on constitue le tab(leau de )dep(art)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
eps <- 0.0000000001
tabdep <- as.data.frame(cbind(x,y,lab))
names(tabdep) <- c("x","y","lab")
row.names(tabdep) <- seq(1,nrow(tabdep))
tabdep$x <- as.numeric(as.character(tabdep[,1]))
tabdep$y <- as.numeric(as.character(tabdep[,2]))
tabdep$lab <- as.character(tabdep$lab)
htlet <- (mx-mn)/(30/cex)
lglet <- htlet*.5
H <- lglet/2
indx <- as.numeric(row.names(tabdep))
d2 <- (tabdep$x^2)+(tabdep$y^2)
drt <- tabdep$x + (H*nchar(tabdep$lab))
gau <- tabdep$x - (H*nchar(tabdep$lab))
angl <- deg(atan(tabdep$y/tabdep$x))/.9
tabdep <- as.data.frame(cbind(tabdep,indx,d2,drt,gau,angl))
tt <- length(x)
tabfin <- tabpro <- tabdep
# problème : points aux mêmes coordonnées
tabpro <- tabpro[sort.list(tabpro$d2),]
for (i in 2:nrow(tabpro)) {
if (signif(tabpro[i,5],8) == signif(tabpro[i-1,5],8)) {
tabpro[i,1] <- tabpro[i,1] + (tabpro[i,1]/10000)
}
}
tabpro$d2 <- (tabpro$x^2)+(tabpro$y^2)
rn <- (runif(tt*100))*100
for (i in 1:tt){
# on trie et on évacue la première ligne >> tableau final
tabpro <- tabpro[sort.list(tabpro$d2),]
cnt <- (tabpro[1,])
tabfin[i,] <- cnt
tabpro <- tabpro[-1,]
# il faut repousser tout ce qui peut recouvrir le point actif (cnt)
# constitution du rub(an) formé de tous les points à écarter
if (nrow(tabpro)==0) next
cnt[1] <- as.numeric(as.character(cnt[1]))-(eps*sign(as.numeric(as.character(cnt[1]))))
cnt[2] <- as.numeric(as.character(cnt[2]))-(eps*sign(as.numeric(as.character(cnt[2]))))
ruban <- tabpro[(abs(as.numeric(tabpro$y)-as.numeric(as.character(cnt[2])))< htlet),]
if (nrow(ruban) == 0) next
rubg <- ruban[(ruban$x < as.numeric(as.character(cnt[1])) & ruban$drt > as.numeric(as.character(cnt[7]))),]
rubd <- ruban[(ruban$x > as.numeric(as.character(cnt[1])) & ruban$gau < as.numeric(as.character(cnt[6]))),]
rub <- rbind(rubg,rubd)
rub <- unique(rub)
if (nrow(rub) == 0) next
n <- nrow(rub)
r <- 1
# on écarte tous les points du rub(an) alternativement horizontalement et verticalement, vers l'extérieur
# du quadrant en combinant la valeur de l'angle et un nombre aléatoire (!)
for (j in 1:n){
if (rub[j,1]>0 & rub[j,2]>0 & rub[j,8]<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[6]+(H*nchar(rub[j,3]))
if (rub[j,1]>0 & rub[j,2]>0 & rub[j,8]>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]+(htlet)
if (rub[j,1]>0 & rub[j,2]<0 & abs(rub[j,8])<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[6]+(H*nchar(rub[j,3]))
if (rub[j,1]>0 & rub[j,2]<0 & abs(rub[j,8])>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]-(htlet)
if (rub[j,1]<0 & rub[j,2]<0 & rub[j,8]<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[7]-(H*nchar(rub[j,3]))
if (rub[j,1]<0 & rub[j,2]<0 & rub[j,8]>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]-(htlet)
if (rub[j,1]<0 & rub[j,2]>0 & abs(rub[j,8])<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[7]-(H*nchar(rub[j,3]))
if (rub[j,1]<0 & rub[j,2]>0 & abs(rub[j,8])>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]+(htlet)
r <- r+1
}
# on recalcule la position relative de tous les points restants
# de manière à être sûr d'attaquer le bon point au tour suivant
tabpro$d2 <- (tabpro$x^2) + (tabpro$y^2)
tabpro$drt <- tabpro$x + (H*nchar(tabpro$lab))
tabpro$gau <- tabpro$x - (H*nchar(tabpro$lab))
}
# on remet le tableau final dans l'ordre des lignes au départ (indx)
tabfin <- tabfin[sort.list(tabfin$indx),]
tabfin[,3] <- lab
return(tabfin)
}
###################################################################################
listevaleurs <- function(corp, attr) {
# utilitaire de listage des valeurs
# d'un attribut, avec calcul de l'effectif
t1 <- Sys.time()
gc()
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
efftt <- size(corpus(corp))
requ <- paste(corp,".",attr, sep="")
# liste des ids de l'attribut
idsattr <- unique(cqi_cpos2struc(requ, 0:(efftt-1)))
nb.idsattr <- length(idsattr)
# pour chaque id, les cpos-bornes et le nom
df.val <- data.frame(NULL)
for (i in 1:nb.idsattr) {
df.val[i,1] <- idsattr[i]
bornes <- cqi_struc2cpos(requ, idsattr[i])
df.val[i,2] <- cqi_struc2str(requ, idsattr[i])
df.val[i,3] <- bornes[2]-bornes[1]+1
}
names(df.val) <- c("id","nom","effectif")
# cumul des effectifs par valeur
prov <- df.val
prov[,2] <- as.factor(prov[,2])
df.valsum <- tapply(prov[,3],prov[,2],sum)
res <- list(df.val,df.valsum)
names(res) <- c("df.val","df.valsum")
cat("effectif total du corpus ", efftt, "\n\n")
print(as.matrix(df.valsum))
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
| /Scripts/WSdsm.R | no_license | Commune-2017/Semantique | R | false | false | 31,113 | r | # WSdsm.R (WordSpace / Distributional Semantic Model)
#
# Script constitué par un ensemble de fonctions destinées à faciliter l'usage
# de la bibliothèque R 'wordspace' (Stefan Evert), à partir d'un corpus enregistré sous CWB.
# Un premier groupe de fonctions est destiné à créer un DSM et à calculer, à partir de ce DSM,
# les champs des lemmes choisis, avec visualisation par analyse factorielle des correspondances (AFC),
# et à extraire les mots-clés d'un ensemble à partir des valences lexicales généralisées pondérées.
# Le second groupe permet d'appliquer les mêmes procédures sur un corpus
# découpé en tranches : l'objectif est l'analyse de l'évolution d'un champ sémantique,
# la visualisation est conçue pour faire ressortir les éléments liés plus particulièrement
# à telle ou telle période. Les deux groupes doivent être employés de manière complémentaire.
# version pré-alpha 0.3 AG novembre 2015 - mars 2017. GPL3
# TODO : autres méthodes d'examen des évolutions.
#########################################################################################
# premier groupe : analyses globales > champs sémantiques
#########################################################################################
corpus2scan <- function(corp, dis=3, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA= "lemma", objetB = "lemma", D=0, F="", attr="", val="", destination , flag=TRUE ) {
# Création d'un fichier-somme du scan complet d'un corpus
# ou d'une partie de corpus,
# résultant de l'application de 'cwb-scan-corpus' à une fenêtre
# de la largeur choisie (de part et d'autre du pivot).
#
# Double contrainte : taille de mémoire et temps d'exécution.
# le programme scanne 2 colonnes et décompte toutes les paires identiques ;
# on prend les colonnes successivement pour balayer toute la fenêtre choisie
# et on enregistre au fur et à mesure sur le DD ;
# après quoi, on récupère les fichiers un par un et on les concatène.
# Les affichages pendant l'exécution sont très approximatifs, il s'agit seulement
# de faire patienter !
if (flag==TRUE){
t1 <- Sys.time()
}
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
options(scipen=999) # supprimer la notation scientifique (pb avec cqp)
efftt <- size(corpus(corp))
effpart <- 0
if (F==""){
F <- efftt
}
if (D!=0 | F!="") {
effpart <- F-D
}
if (attr!="") {
def.scorp <- paste('[lemma=".*" %cd]', "::match.", attr, "=\"", val, "\"", sep="")
CRP <- corpus(corp)
crp <- subcorpus(CRP, def.scorp)
effpart <- size(crp)
}
# boucle : scans par colonne
for (i in 0:(dis*2)) {
if (i==dis){
next()
}
# création des paramètres pour la ligne de commande / paramètre -b excessif ??
params <- paste("-b 200000000 -q -s ",D, sep="")
if (F != efftt){
params <- paste(params, " -e ",F, sep="")
}
# if (reg != ""){
# params <- paste(params, " -r '",reg,"' ", sep="")
# }
params <- paste(params, " ",corp, " ", objetA,"+",dis," '?pos+",dis,"=/", posA, "/' pos+",dis," ",objetB,"+",i," '?pos+",i,"=/", posB, "/' pos+",i, sep="")
if (attr != "" & val != ""){
params <- paste(params," '?", attr, "=/", val,"/'", sep="")
}
sortie <- paste("/tmp/xyzxyz",i,".tsv", sep="")
# exécution (sortie sur disque automatique)
system2(command="cwb-scan-corpus", args=params, stdout=sortie)
cat("scan =",i, "sur", dis*2, "\n")
gc()
}
# rassemblement en un seul fichier (sur disque)
commd <- paste("cat /tmp/xyzxyz* > ", destination, sep="")
system(command=commd)
commd2 <- paste("rm /tmp/xyzxyz*") # nettoyage des fichiers provisoires
system(command=commd2)
# création et enregistrement d'un fichier d'infos sur le scan
destination2 <- paste(destination, "_params", sep="")
parametres <- c("corpus","eff.total","eff.actuel","distance","posA","posB","objetA","objetB","D","F","attr","val")
valeurs <- c(corp,efftt,effpart,dis,posA,posB,objetA,objetB,D,F,attr,val)
infos <- cbind(parametres,valeurs)
write.table(infos,file=destination2, quote=FALSE,sep="\t",row.names=FALSE)
if (flag==TRUE) {
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
}
}
#################################################################################
scan2dsm <- function(scan, seuil= 9, coef="simple-ll", transf="log", nproj="", flag=TRUE) {
# récupération sur le DD d'un fichier issu de corpus2scan()
# + paramètres
# nettoyage, scoring, création d'un objet WS exploitable
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
if (flag==TRUE) {
t1 <- Sys.time()
}
gc()
scanp <- paste(scan, "_params", sep="")
params.tripl <- read.table(scanp, header=FALSE, sep="\t", stringsAsFactors=FALSE, quote="", fill=TRUE)
tripl <- read.table(scan, header=FALSE, sep="\t", stringsAsFactors=FALSE, quote="", fill=TRUE)
tripl <- tripl[, c(2,4,1)]
names(tripl) <- c("target", "feature", "eff") # esthétique !
# création de l'objet
triplobj <- dsm(target=tripl$target, feature=tripl$feature, score=tripl$eff, N=as.numeric(params.tripl[4,2]), raw.freq=TRUE, sort=TRUE)
rm(tripl) # nettoyage
gc()
# élagage
triplobj <- subset(triplobj, nnzero > seuil, nnzero > seuil, recursive=TRUE)
# scoring (filtrage des cooccurrents significatifs)
triplobjS <- dsm.score(triplobj, score= coef, transform=transf, normalize=TRUE)
# réduction des dimensions de la matrice
if (nproj != "") {
triplobjS <- dsm.projection(triplobjS, method="rsvd", n=nproj, oversampling=4)
}
# enregistrement des infos ($globals) > dsm documenté !
triplobjS$globals$corpus <- params.tripl[2,2]
triplobjS$globals$nblignes <- length(triplobjS$rows$term)
triplobjS$globals$nbcols <- length(triplobjS$cols$term)
triplobjS$globals$posA <- params.tripl[6,2]
triplobjS$globals$posB <- params.tripl[7,2]
triplobjS$globals$objetA <- params.tripl[8,2]
triplobjS$globals$objetB <- params.tripl[9,2]
triplobjS$globals$dis <- params.tripl[5,2]
triplobjS$globals$effactuel <- as.numeric(params.tripl[4,2])
triplobjS$globals$D <- as.numeric(params.tripl[10,2])
triplobjS$globals$F <- as.numeric(params.tripl[11,2])
if (triplobjS$globals$F==Inf) triplobjS$globals$F <- triplobjS$globals$N-1
triplobjS$globals$attr <- params.tripl[12,2]
triplobjS$globals$val <- params.tripl[13,2]
triplobjS$globals$effcorpus <- params.tripl[3,2]
triplobjS$globals$seuil <- seuil
triplobjS$globals$coef <- coef
triplobjS$globals$transf <- transf
triplobjS$globals$nproj <- nproj
if (flag==T) {
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n\n")
}
cat("lignes : ", length(triplobjS$rows$term), "\n")
cat("colonnes : ", length(triplobjS$cols$term), "\n")
return(triplobjS)
}
###############################################################################
corpus2dsm <- function(corp, dis=5, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA= "lemma", objetB = "lemma",D=0, F="", attr="", val="", destination, seuil= 9, coef="simple-ll",transf="log", nproj=""){
# regroupement de l'ensemble des opérations
# on part d'un corpus, d'une largeur de fenêtre
# et d'un choix des POS (pivot et cooc) ;
# on obtient un objet DSM prêt à l'emploi.
t1 <- Sys.time()
options(warn=-1)
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
# 1. scan
corpus2scan(corp=corp, dis=dis, posA=posA, posB=posB, objetA=objetA, objetB = objetB ,D=D, F=F, attr=attr, val=val, destination=destination, flag=FALSE)
cat("\n","Traitements...","\n")
gc() # nettoyage
# 2. construction de l'objet
res <- scan2dsm(scan=destination, seuil=seuil, coef=coef,transf=transf, nproj=nproj, flag=FALSE)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
res
}
################################################################################
dsm2af <- function(dsm, lm, nppv=40, cex=.9, decal=TRUE) {
# Recherche, dans un dsm donné, des p.p.voisins d'un lemme,
# et représentation par AFC de la matrice des distances.
# Le graphique fournit quelque chose d'analogue au Wortfeld
# au sens de Jost Trier. Les points sont répartis selon
# leurs distances réciproques : les divers 'nuages' correspondent
# aux sous-ensembles du champ.
# Création d'un objet contenant tous les éléments intermédiaires.
opar <- par(mar=par("mar"))
on.exit(par(opar))
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
library(ade4, quietly=TRUE, warn.conflicts=FALSE)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
#library(MASS, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
t1 <- Sys.time()
# recherche des p.p.voisins
vec.ppvoisins <- nearest.neighbours(M=dsm, term=lm, n=nppv)
ppv.names <- names(vec.ppvoisins)
val.ppvoisins <- cbind(as.character(ppv.names), as.numeric(vec.ppvoisins))
row.names(val.ppvoisins) <- NULL
mat.ppvoisins <- nearest.neighbours(M=dsm, term=lm, n=nppv, skip.missing=TRUE, dist.matrix=TRUE)
res <- list(NULL)
res[[1]] <- mat.ppvoisins
res[[2]] <- val.ppvoisins
# AFC sur le tableau des colonnes (la matrice est symétrique : on utilise les noms de ligne)
af.mat.ppvoisins <- dudi.coa(mat.ppvoisins, scannf=FALSE)
af.util <- af.mat.ppvoisins$co
# éviter les recouvrements d'étiquettes (appel à la fonction lisible())
if (decal==TRUE){
ymax <- max(af.util[,2])
ymin <- min(af.util[,2])
Tbon <- lisible(af.util[,1],af.util[,2],lab=row.names(af.util),mn=ymin, mx=ymax,cex=(cex+.1))
af.util[,1] <- Tbon[,1]
af.util[,2] <- Tbon[,2]
}
res[[3]] <- af.util
names(res) <- c("matrice_distances", "vecteur_ppvoisins", "coordonnees")
# affichage de l'AF
par(mar=c(0.5,0.5,1.7,0.5))
plot(af.util, type="n", asp=1, axes=FALSE, frame.plot=TRUE)
text(af.util[1,], labels=row.names(af.util[1,]), cex=(cex+.2), col="red", font=2)
af.util <- af.util[-1,]
text(af.util, labels=row.names(af.util), cex=cex, col="blue")
# affichage d'un titre
nbr <- length(dsm$rows[,1])
nbc <- length(dsm$cols[,1])
nm.obj <- deparse(substitute(dsm))
mn <- paste("DSM d'origine : ",nm.obj," (matrice de ", nbc , " sur ", nbr ,"). ",nppv, " éléments.", sep = "")
title(main = mn, line=1, cex.main=.8, font.main=1, adj = 0)
titranal <- paste("STRUCTURE GLOBALE DU CHAMP SÉMANTIQUE de *",lm,"*", sep="")
mtext(titranal, 3, line=0,cex=.8, font=1, adj=0)
#write.matrix(val.ppvoisins)
for (i in 1:nppv){
cat(names(res$vecteur_ppvoisins)[i], "\n")
}
class(res) <- "NPPV"
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
res
}
##################################################################################
dsm2carte <- function(dsm, seuil= "", mincoo=3, stopw="",nseg=50, decal=TRUE, cex=.8) {
# 1. calcul des mots-clés enn fonction de la valence lexicale pondérée
# 2. représentation factorielle de l'ensemble (ACP sur indices de cooccurrence)
#
# calcul d'une liste de lemmes, évaluée à partir de 2 paramètres :
# seuil = nb minimal de cooc dans chaque case du tableau (calcul pour chaque ligne du nombre de cases > seuil)
# mincoo = nb minimal de cases > 0 dans chaque ligne (tri des lignes en fonction du nbe de cases retenues)
# stopw = fichier de mots-outils ou assimilés, un mot par ligne
t1 <- Sys.time()
gc()
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
library(ade4, quietly=TRUE, warn.conflicts=FALSE)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
library(MASS, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
options(scipen=999) # supprimer la notation scientifique (pb avec cqp)
if (!inherits(dsm, "dsm")) stop("en entrée : un objet de classe dsm")
corp <- dsm$globals$corpus # nom du corpus
attr <- dsm$globals$attr
val <- dsm$globals$val
D <- dsm$globals$D
F <- dsm$globals$F
dsmm <- dsm$M # matrice des effectifs de coocs bruts
cat("cooc.freq.max = ", max(dsmm), "\n")
# calculs (= tris en fonction des paramètres choisis)
S1 <- apply(dsmm, 1, function(x) length(x[x>seuil])) # nbe par ligne de cases > seuil
cat("nb.somme.cooc > seuil = ",length(S1[S1>0]),"\n")
S2 <- S1[S1>mincoo] # tri des lignes à somme > mincoo
rm(S1)
gc()
S2 <- as.data.frame(cbind(names(S2),S2, stringsAsFactors=FALSE))
names(S2) <- c("names", "valbr")
S2$valbr <- as.numeric(as.character(S2$valbr))
S2$names <- as.character(S2$names)
# application d'une pondération aux valeurs brutes (par les fréquences totales)
# on calcule ces fréquences dans l'ensemble considéré, corpus ou sous-corpus
CRP <- corpus(corp)
if (dsm$globals$attr=="" & dsm$globals$N==dsm$globals$effactuel) {
crp <- subcorpus(CRP, '[lemma=".*" & (pos="SUB"|pos="VBE"|pos="QLF")]')
}
else {
def.scorp <- paste("abc:[lemma=\".*\" & (pos=\"SUB\"|pos=\"VBE\"|pos=\"QLF\") & _.", attr, "=\"", val, "\"]::abc >=",D," & abc <=",F, sep="")
crp <- subcorpus(CRP, def.scorp)
}
Clist <- cqp_flist(crp, "match", "lemma")
Clist2 <- Clist[1:length(Clist)]
rm(Clist)
gc()
Clist <- as.data.frame(cbind(names(Clist2),Clist2, stringsAsFactors=FALSE))
names(Clist) <- c("names", "freqtt")
Clist$freqtt <- as.numeric(as.character(Clist$freqtt))
Clist$names <- as.character(Clist$names)
S3 <- merge(S2, Clist, by.x="names", by.y="names", all.x=TRUE, all.y=FALSE)
rm(S2)
gc()
S3$valence <- S3$valbr / S3$freqtt
S3 <- S3[,c(1,2,4,6)]
S3 <- S3[rev(order(S3[,4])),]
gc()
# tri des stop-words (par défaut : QLF et VBE latins inutiles)
if (stopw==""){
stopw <- c("--","ago","aio","alius","audio","debeo","dico1","dico2","facio","fio","habeo","inquio","ipse1","loquor","meus","multus","nihil","nolo","noster","nullus","omnis","pono","possum","quidam","sequor","sum","suus","talis","tantus","totus","tuus","uenio","uester","uolo2","hic2","hic1","iste","ille","diuersus","inquantus","alter","ceterus","quisque","ullus")
}
else {
stopw2 <- read.csv2(stopw, header=FALSE, stringsAsFactors=FALSE, quote="", fill=TRUE)
stopw <- stopw2[,1]
}
lsttri <- setdiff(S3[,1],stopw)
S3 <- S3[(S3[,1]%in%lsttri),]
dsm2 <- subset(dsm, subset=(term %in% S3[,1]), select=(term %in% S3[,1]))
dsm3 <- as.matrix(dsm2$M)
dsm4 <- as.matrix(dsm2$S)
cat("nb.kw = ", ncol(dsm3), "\n\n", sep="")
if (ncol(dsm3) < 20) {
cat("Moins de 20 lemmes retenus : baissez les paramètres !", "\n\n", sep="")
res <- list(S3,dsm3,dsm4)
class(res) <- "carte"
names(res) <- c("valences","mat.brute","mat.coeff")
write.matrix(S3)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
if (ncol(dsm3) > 250) {
cat("Plus de 250 lemmes retenus : augmentez les paramètres !", "\n\n", sep="")
res <- list(S3,dsm3,dsm4)
class(res) <- "carte"
names(res) <- c("valences","mat.brute","mat.coeff")
write.matrix(S3)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
# ACP sur le tableau des colonnes (la matrice est (à peu près) symétrique)
af.mat.coeff <- dudi.pca(dsm4, scannf=FALSE, nf=2) # on peut utiliser la transposée...
af.util <- af.mat.coeff$co
# éviter les recouvrements d'étiquettes (appel à la fonction lisible())
if (decal==TRUE){
ymax <- max(af.util[,2])
ymin <- min(af.util[,2])
Tbon <- lisible(af.util[,1],af.util[,2],lab=row.names(af.util),mn=ymin, mx=ymax,cex=(cex+.1))
af.util[,1] <- Tbon[,1]
af.util[,2] <- Tbon[,2]
}
rm(Tbon)
gc()
# calcul des distances les plus importantes
distab <- data.frame(NULL)
cpt <- 1
nbcol <- ncol(dsm4)
for (i in 1:(nbcol-1)){
for (j in (i+1):nbcol){
distab[cpt,1] <- rownames(dsm4)[i]
distab[cpt,2] <- colnames(dsm4)[j]
distab[cpt,3] <- dsm4[i,j]
cpt <- cpt+1
}
}
distab.tr <- distab[order(distab[,3],decreasing=TRUE),]
distab <- distab.tr[1:nseg,]
# coordonnées d'affichage
R1r <- match(distab[,1], rownames(af.util))
R2r <- match(distab[,2], rownames(af.util))
distab[,4] <- af.util[R1r, 1]
distab[,5] <- af.util[R1r, 2]
distab[,6] <- af.util[R2r, 1]
distab[,7] <- af.util[R2r, 2]
# affichage de l'AF
par(mar=c(0.5,0.5,1.7,0.5))
plot(af.util, type="n", asp=1, axes=FALSE, frame.plot=TRUE)
nb.kw <- nrow(af.util)
text(af.util, labels=row.names(af.util), cex=cex, col="#005500")
segments(distab[,4], distab[,5], distab[,6], distab[,7], lwd=1, col="grey")
# affichage d'un titre
nbr <- length(dsm$rows[,1])
nbc <- length(dsm$cols[,1])
nm.obj <- deparse(substitute(dsm))
mn <- paste("DSM d'origine : ",nm.obj," (matrice de ", nbc , " sur ", nbr ,") effectif : ",dsm$globals$N, " tokens", sep = "")
title(main = mn, line=1, cex.main=.8, font.main=1, adj = 0)
if (dsm$globals$effactuel==dsm$globals$effcorpus & dsm$globals$attr=="") {
titranal <- paste("CARTE SÉMANTIQUE DU CORPUS *",corp, "* ",nb.kw, " mots-clés", sep="")
}
else if (dsm$globals$attr!="") {
titranal <- paste("CARTE DU SOUS-CORPUS *",corp,"* attribut = ",dsm$globals$attr," valeur = ",dsm$globals$val," ",nb.kw, " mots-clés", sep="")
}
else {
titranal <- paste("CARTE DU SOUS-CORPUS *",corp,"* D = ",dsm$globals$D," F = ",dsm$globals$F," ",nb.kw, " mots-clés", sep="")
}
mtext(titranal, 3, line=0,cex=.8, font=1, adj=0)
# création d'une liste en sortie
res <- list(S3,dsm3,dsm4,af.util,distab)
class(res) <- "carte"
names(res) <- c("valences","mat.brute","mat.coeff","acp","distab")
write.matrix(S3)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
################################################################################
# Second groupe : analyses par tranches > évolutions
################################################################################
corpus2scanm <- function(corp, dis=5, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA="lemma", objetB ="lemma", attr="", val="", destination, trnch=5, flag=TRUE){
# construction d'une série de scans
# correspondant aux tranches successives d'un corpus
# enregistrés sur DD
if (flag==TRUE) {
t1 <- Sys.time()
}
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
# découpage en tranches égales
efftt <- size(corpus(corp))
bornes <- seq(1, efftt, length.out=trnch+1)
# boucle : scans des tranches
for (i in 1:trnch) {
D <- bornes[i]
F <- bornes[i+1]
destin <- paste(destination, "_", i, sep="")
corpus2scan(corp=corp, dis=dis, posA=posA, posB=posB, objetA=objetA, objetB=objetB, D=D, F=F, attr=attr, val=val, destination=destin, flag=FALSE)
cat("Tranche ",i, " sur ",trnch, "terminée","\n\n")
}
if (flag==TRUE) {
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
}
}
###############################################################################
scanm2dsmm <- function(scan, seuil= 5, coef="simple-ll", nproj="", trnch, flag=TRUE) {
# récupération d'une série de scans ;
# constrution d'une série correspondante de DSM regroupés dans un objet list.
# boucle : récup des scans > liste de DSM
res <- list(NULL)
for (i in 1:trnch) {
scanm <- paste(scan, "_", i, sep="")
res[[i]] <- scan2dsm(scan=scanm, seuil=seuil, coef=coef, nproj=nproj, flag=FALSE)
}
res
}
#############################################################################
corpus2dsmm <- function(corp, dis=5, posA="QLF|SUB|VBE", posB="QLF|SUB|VBE", objetA= "lemma", objetB = "lemma", trnch=5, attr="", val="", destination, seuil= 5, coef="simple-ll", nproj=""){
# regroupement de deux scripts
# permettant d'effectuer à la suite un scan par tranches
# et la création d'un dsm multiple correspondant.
t1 <- Sys.time()
if (destination == "") {
stop(" Indiquer une destination pour le scan ", call.=FALSE)
}
# 1. scans
corpus2scanm(corp=corp, dis=dis, posA=posA, posB=posB, objetA=objetA, objetB = objetB , trnch= trnch, attr=attr, val=val, destination=destination, flag=FALSE)
cat("\n","Traitements...","\n")
gc() # nettoyage
# 2. construction d'une série d'objets
res <- scanm2dsmm(scan=destination, seuil=seuil, coef=coef, nproj=nproj, flag=FALSE, trnch=trnch)
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
res
}
#################################################################################
dsmm2af <- function(dsmm, lm, nppv, xax=1, yax=1, cex=.9, decal=TRUE) {
# Récupération des p.p.voisins d'un lemme dans une suite de dsmm ;
# construction d'une matrice des distances ;
# visualisation par AFC.
# Strictement complémentaire de dsm2af() :
# cette visualisation est seulement destinée à éclaircir
# les évolutions, - pas les sous-ensembles.
# Difficulté : éliminer les outliers qui bloquent l'AF.
# TODO : afficher ces outliers en éléments supplémentaires.
t1 <- Sys.time()
library(wordspace, quietly=TRUE, warn.conflicts=FALSE)
library(ade4, quietly=TRUE, warn.conflicts=FALSE)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
#library(MASS, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
cooc.tt <- as.vector(NULL)
trnch <- length(dsmm)
# premier passage : récupérer les lemmes pour chaque tranche
for (i in 1:trnch) {
cooc.raw <- nearest.neighbours(dsmm[[i]], lm, n=nppv)
cooc.nam <- names(cooc.raw)
cooc.tt <- c(cooc.tt, cooc.nam)
}
# établir la liste complète (vecteur char)
cooc.tt <- unique(cooc.tt)
nb.cooc <- length(cooc.tt)
piv <- rep(lm, times=nb.cooc)
# rechercher les distances pour tous les lemmes dans toutes les tranches > matrice
distmat <- matrix(ncol = trnch, nrow = nb.cooc, 0)
vec.ncol <- as.vector(NULL)
for (i in 1:trnch) {
distmat[,i] <- pair.distances(piv, cooc.tt, method="cosine", dsmm[[i]], convert=FALSE)
vec.ncol <- c(vec.ncol, paste("PER_", i, sep=""))
}
colnames(distmat) <- vec.ncol # on met les noms
rownames(distmat) <- cooc.tt
# calcul de la variance de chaque ligne (lemmes)
diff0.var <- apply(distmat, 1, var, na.rm=TRUE)
distmat0 <- cbind(distmat, diff0.var*100)
res <- list(NULL) # création d'une liste pour les sorties
res[[1]] <- distmat0 # matrice brute
# nettoyer les lignes incomplètes (contenant au moins une valeur Inf)
li.sum <- apply(distmat, 1, sum)
li.sum[is.finite(li.sum)] <- TRUE
li.sum[is.infinite(li.sum)] <- FALSE
li.sum <- as.logical(li.sum)
distmat <- distmat[li.sum,]
# nettoyage des lignes dont la variance dépasse 2 écarts-types
diff.var <- apply(distmat, 1, sd, na.rm=TRUE)
var.mean <- mean(diff.var, na.rm=TRUE)
var.sd <- sd(diff.var, na.rm=TRUE)
li.rm <- (diff.var < (var.mean + (2*var.sd)) & diff.var > (var.mean - (2*var.sd)))
li.rm[is.na(li.rm)] <- FALSE
distmat <- distmat[li.rm,]
# réorganisation (moyennes réciproques)
li.m <- rep(0, nrow(distmat))
for (j in 1:ncol(distmat)){
for (i in 1:nrow(distmat)){
li.m[i] <- li.m[i]+(distmat[i,j]*j)
}
}
li.m <- li.m / rowSums(distmat)
distmat <- distmat[rev(sort.list(li.m)),]
res[[2]] <- distmat # matrice nettoyée et réorganisée
dis.mean <- apply(distmat,1,mean)
dis.sd <- apply(distmat,1,sd)
dis.util <- cbind(row.names(distmat),dis.mean,dis.sd)
colnames(dis.util) <- c("lemmes", "coeff.moy.", "sd")
cat("\n")
write.matrix(dis.util)
res[[3]] <- dis.util[,1:2]
# lissage (avec lowess()) : indispensable !
nb <- nrow(distmat)
dist.colnames <- colnames(distmat)
dist.rownames <- rownames(distmat)
ls.coeff2 <- matrix(0, nrow=nb, ncol=trnch)
for (i in 1:nb) {
ls.coeff2[i,] <- lowess(distmat[i,])$y
}
distmat <- ls.coeff2
colnames(distmat) <- dist.colnames
rownames(distmat) <- dist.rownames
distmat[distmat<0] <- 0
# calcul de la variance par ligne et par colonne
diff.var <- sort(apply(distmat, 1, var, na.rm=TRUE))
diff2.var <- apply(distmat, 2, var, na.rm=TRUE)
res[[4]] <- diff.var
res[[5]] <- diff2.var
# analyse factorielle (AFC)
af.distmat <- dudi.coa(distmat, scannf=FALSE)
af.util.co <- af.distmat$co
colnames(af.util.co) <- c("axe1", "axe2")
af.util.li <- af.distmat$li
colnames(af.util.li) <- c("axe1", "axe2")
af.util.tt <- rbind(af.util.co, af.util.li)
res[[6]] <- af.util.tt
names(res) <- c("matrice_brute", "matrice_nettoyee", "vecteur_ppvoisins", "variances_lignes", "variances_colonnes", "coordonnees")
co.nm <- colnames(distmat)
li.nm <- rownames(distmat)
tt.nm <- c(co.nm, li.nm)
# éviter les recouvrements d'étiquettes (appel à la fonction lisible())
if (decal==TRUE){
ymax <- max(af.util.tt[,2])
ymin <- min(af.util.tt[,2])
Tbon <- lisible(af.util.tt[,1],af.util.tt[,2],lab=row.names(af.util.tt),mn=ymin, mx=ymax,cex=(cex+.1))
af.util.tt[,1] <- Tbon[,1]
af.util.tt[,2] <- Tbon[,2]
}
af.util.tt[,1] <- af.util.tt[,1]*xax # contrôle de l'orientation des axes
af.util.tt[,2] <- af.util.tt[,2]*yax
# distinguer les lignes et colonnes
af.util.co <- af.util.tt[(1:trnch),]
af.util.li <- af.util.tt[((trnch+1):(length(af.util.tt[,2]))),]
# affichage de l'AF
par(mar=c(0.5,0.5,1.7,0.5))
if (asp==1){
plot(af.util.tt, asp=1, type="n", axes=FALSE, frame.plot=TRUE) # cadre
}
else {
plot(af.util.tt, type="n", axes=FALSE, frame.plot=TRUE) # cadre
}
#lines(af.util.co, col="grey", lwd=3) # trace
text(af.util.co, labels=co.nm, cex=cex, col="red", font=2) # points-colonnes
text(af.util.li, labels=li.nm, cex=cex, col="blue") # points-lignes
nm.obj <- deparse(substitute(dsmm))
nbcoocr <- length(af.util.li[,1])
mn <- paste("DSM multiple d'origine : ",nm.obj," (", trnch," tranches). Lemme : ", lm, ". ",nppv, " > ", nbcoocr, " éléments.", sep = "")
title(main = mn, line=1, cex.main=.8, font.main=1, adj = 0) # titre
titranal= "ÉVOLUTION DU CHAMP SÉMANTIQUE (sémantique distributionnelle)"
mtext(titranal, 3, line=0,cex=.8, font=1, adj=0)
spllines(af.util.co[,1], af.util.co[,2], col="red") # trace
class(res) <- "NPPVM"
return(res)
}
##########################################################################
##########################################################################
#####################
# suppression des recouvrements
# partant du centre, on écarte les points qui provoquent recouvrement,
# toujours vers l'extérieur (selon le quadrant), alternativement horizontalement
# et verticalement, de manière à éviter la déformation du nuage,
# en pondérant l'alternance par la proximité angulaire avec l'axe 1 ou 2
# peut durer de quelques secondes à quelques minutes !!!
#####################
lisible <- function (x, y, lab, mn, mx, cex=.2){
#on constitue le tab(leau de )dep(art)
library(circular, quietly=TRUE, warn.conflicts=FALSE)
eps <- 0.0000000001
tabdep <- as.data.frame(cbind(x,y,lab))
names(tabdep) <- c("x","y","lab")
row.names(tabdep) <- seq(1,nrow(tabdep))
tabdep$x <- as.numeric(as.character(tabdep[,1]))
tabdep$y <- as.numeric(as.character(tabdep[,2]))
tabdep$lab <- as.character(tabdep$lab)
htlet <- (mx-mn)/(30/cex)
lglet <- htlet*.5
H <- lglet/2
indx <- as.numeric(row.names(tabdep))
d2 <- (tabdep$x^2)+(tabdep$y^2)
drt <- tabdep$x + (H*nchar(tabdep$lab))
gau <- tabdep$x - (H*nchar(tabdep$lab))
angl <- deg(atan(tabdep$y/tabdep$x))/.9
tabdep <- as.data.frame(cbind(tabdep,indx,d2,drt,gau,angl))
tt <- length(x)
tabfin <- tabpro <- tabdep
# problème : points aux mêmes coordonnées
tabpro <- tabpro[sort.list(tabpro$d2),]
for (i in 2:nrow(tabpro)) {
if (signif(tabpro[i,5],8) == signif(tabpro[i-1,5],8)) {
tabpro[i,1] <- tabpro[i,1] + (tabpro[i,1]/10000)
}
}
tabpro$d2 <- (tabpro$x^2)+(tabpro$y^2)
rn <- (runif(tt*100))*100
for (i in 1:tt){
# on trie et on évacue la première ligne >> tableau final
tabpro <- tabpro[sort.list(tabpro$d2),]
cnt <- (tabpro[1,])
tabfin[i,] <- cnt
tabpro <- tabpro[-1,]
# il faut repousser tout ce qui peut recouvrir le point actif (cnt)
# constitution du rub(an) formé de tous les points à écarter
if (nrow(tabpro)==0) next
cnt[1] <- as.numeric(as.character(cnt[1]))-(eps*sign(as.numeric(as.character(cnt[1]))))
cnt[2] <- as.numeric(as.character(cnt[2]))-(eps*sign(as.numeric(as.character(cnt[2]))))
ruban <- tabpro[(abs(as.numeric(tabpro$y)-as.numeric(as.character(cnt[2])))< htlet),]
if (nrow(ruban) == 0) next
rubg <- ruban[(ruban$x < as.numeric(as.character(cnt[1])) & ruban$drt > as.numeric(as.character(cnt[7]))),]
rubd <- ruban[(ruban$x > as.numeric(as.character(cnt[1])) & ruban$gau < as.numeric(as.character(cnt[6]))),]
rub <- rbind(rubg,rubd)
rub <- unique(rub)
if (nrow(rub) == 0) next
n <- nrow(rub)
r <- 1
# on écarte tous les points du rub(an) alternativement horizontalement et verticalement, vers l'extérieur
# du quadrant en combinant la valeur de l'angle et un nombre aléatoire (!)
for (j in 1:n){
if (rub[j,1]>0 & rub[j,2]>0 & rub[j,8]<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[6]+(H*nchar(rub[j,3]))
if (rub[j,1]>0 & rub[j,2]>0 & rub[j,8]>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]+(htlet)
if (rub[j,1]>0 & rub[j,2]<0 & abs(rub[j,8])<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[6]+(H*nchar(rub[j,3]))
if (rub[j,1]>0 & rub[j,2]<0 & abs(rub[j,8])>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]-(htlet)
if (rub[j,1]<0 & rub[j,2]<0 & rub[j,8]<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[7]-(H*nchar(rub[j,3]))
if (rub[j,1]<0 & rub[j,2]<0 & rub[j,8]>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]-(htlet)
if (rub[j,1]<0 & rub[j,2]>0 & abs(rub[j,8])<rn[r]) tabpro[(tabpro[,4]==rub[j,4]),1] <- cnt[7]-(H*nchar(rub[j,3]))
if (rub[j,1]<0 & rub[j,2]>0 & abs(rub[j,8])>=rn[r]) tabpro[(tabpro[,4]==rub[j,4]),2] <- cnt[2]+(htlet)
r <- r+1
}
# on recalcule la position relative de tous les points restants
# de manière à être sûr d'attaquer le bon point au tour suivant
tabpro$d2 <- (tabpro$x^2) + (tabpro$y^2)
tabpro$drt <- tabpro$x + (H*nchar(tabpro$lab))
tabpro$gau <- tabpro$x - (H*nchar(tabpro$lab))
}
# on remet le tableau final dans l'ordre des lignes au départ (indx)
tabfin <- tabfin[sort.list(tabfin$indx),]
tabfin[,3] <- lab
return(tabfin)
}
###################################################################################
listevaleurs <- function(corp, attr) {
# utilitaire de listage des valeurs
# d'un attribut, avec calcul de l'effectif
t1 <- Sys.time()
gc()
library(rcqp, quietly=TRUE, warn.conflicts=FALSE)
options(warn=-1)
efftt <- size(corpus(corp))
requ <- paste(corp,".",attr, sep="")
# liste des ids de l'attribut
idsattr <- unique(cqi_cpos2struc(requ, 0:(efftt-1)))
nb.idsattr <- length(idsattr)
# pour chaque id, les cpos-bornes et le nom
df.val <- data.frame(NULL)
for (i in 1:nb.idsattr) {
df.val[i,1] <- idsattr[i]
bornes <- cqi_struc2cpos(requ, idsattr[i])
df.val[i,2] <- cqi_struc2str(requ, idsattr[i])
df.val[i,3] <- bornes[2]-bornes[1]+1
}
names(df.val) <- c("id","nom","effectif")
# cumul des effectifs par valeur
prov <- df.val
prov[,2] <- as.factor(prov[,2])
df.valsum <- tapply(prov[,3],prov[,2],sum)
res <- list(df.val,df.valsum)
names(res) <- c("df.val","df.valsum")
cat("effectif total du corpus ", efftt, "\n\n")
print(as.matrix(df.valsum))
t2 <- Sys.time()
td <- difftime(t1,t2)
cat("\n","Temps écoulé :", round(as.numeric(td),2), units(td), "\n")
return(res)
}
|
# 11.plotSignatureProps.R
################# notes #################
# 1. read in EMu results files and plot mutational signature proportions
#
# 2. create stats for trunk-branch-leaf comparisons
#
#
#
################# main program #################
sampleList <- read.csv(file="~/PhD/CRCproject/masterSampleList.allSamples.filt.csv", header=FALSE, stringsAsFactors=FALSE)
sampleNames <- unique(sampleList[1])
holdingDir <- "5.mutationalSignatures/"
holdingDirVCF <- "1.platypusCalls/somaticTotal.0.01/"
################## 1. plot mutational signature proportions ################
#read in propotions file
propEMuRoot <- read.table(file=paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-Z.root.txt", sep=""), stringsAsFactors = FALSE, sep="\t")
propEMubranch <- read.table(file=paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-Z.branches.txt", sep=""), stringsAsFactors = FALSE, sep="\t")
propEMuleaf <- read.table(file=paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-Z.leaf.txt", sep=""), stringsAsFactors = FALSE, sep="\t")
#plot root signatures
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-plot.root.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=7, height=5)
par(mar=c(7,5,5,5))
plotMat <- t(as.matrix(propEMuRoot[4:7]))
barplot(plotMat, col=c("olivedrab", "salmon", "royalblue", "goldenrod"), names.arg = propEMuRoot[[1]], las=2)
dev.off()
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-plot.branches.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=7, height=5)
par(mar=c(7,5,5,5))
plotMat <- t(as.matrix(propEMubranch[4:7]))
barplot(plotMat, col=c("olivedrab", "salmon", "royalblue", "goldenrod"), names.arg = propEMubranch[[1]], las=2)
dev.off()
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-plot.leafs.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=40, height=5)
par(mar=c(7,5,5,5))
plotMat <- t(as.matrix(propEMuleaf[4:7]))
barplot(plotMat, col=c("olivedrab", "salmon", "royalblue", "goldenrod"), names.arg = propEMuleaf[[1]], las=2)
dev.off()
#boxplots and stats of trunk-branch
rootComp <- propEMuRoot[-c(17:20, 24), c(4:7)]
names(rootComp) <- c("A", "B", "C", "D")
row.names(rootComp) <- propEMubranch[[1]]
branchComp <- propEMubranch[, c(4:7)]
names(branchComp) <- c("Ab", "Bb", "Cb", "Db")
row.names(branchComp) <- propEMubranch[[1]]
plotTab <- cbind(branchComp, rootComp)
plotTab <- plotTab[order(names(plotTab))]
#stats for carcinomas
carcinomasTab <- plotTab[c(1:3,5:11),]
sigAcomparison <- wilcox.test(carcinomasTab[["A"]], carcinomasTab[["Ab"]])$p.value
sigBcomparison <- wilcox.test(carcinomasTab[["B"]], carcinomasTab[["Bb"]])$p.value
sigCcomparison <- wilcox.test(carcinomasTab[["C"]], carcinomasTab[["Cb"]])$p.value
sigDcomparison <- wilcox.test(carcinomasTab[["D"]], carcinomasTab[["Db"]])$p.value
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-boxplots.carcinomas.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=5, height=5)
par(mar=c(7,5,5,5), xpd=TRUE)
stripchart(carcinomasTab, vertical = TRUE, las=2, pch = 20, col=c("olivedrab","olivedrab", "salmon","salmon", "royalblue","royalblue", "goldenrod","goldenrod"))
boxplot(carcinomasTab, add=TRUE, xaxt='n', yaxt='n')
text(x = 1, y=1, labels = sigAcomparison, cex = 0.5)
text(x = 3, y=0.95, labels = sigBcomparison, cex = 0.5)
text(x = 5, y=0.9, labels = sigCcomparison, cex = 0.5)
text(x = 7, y=0.85, labels = sigDcomparison, cex = 0.5)
dev.off()
#stats for adenomas
adenomaTab <- plotTab[c(12:16),]
sigAcomparison <- wilcox.test(adenomaTab[["A"]], adenomaTab[["Ab"]])$p.value
sigBcomparison <- wilcox.test(adenomaTab[["B"]], adenomaTab[["Bb"]])$p.value
sigCcomparison <- wilcox.test(adenomaTab[["C"]], adenomaTab[["Cb"]])$p.value
sigDcomparison <- wilcox.test(adenomaTab[["D"]], adenomaTab[["Db"]])$p.value
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-boxplots.adenomas.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=5, height=5)
par(mar=c(7,5,5,5), xpd=TRUE)
stripchart(adenomaTab, ylim = c(0,0.8), vertical = TRUE, las=2, pch = 20, col=c("olivedrab","olivedrab", "salmon","salmon", "royalblue","royalblue", "goldenrod","goldenrod"))
boxplot(adenomaTab, add=TRUE, xaxt='n', yaxt='n')
text(x = 1, y=1, labels = sigAcomparison, cex = 0.5)
text(x = 3, y=0.95, labels = sigBcomparison, cex = 0.5)
text(x = 5, y=0.9, labels = sigCcomparison, cex = 0.5)
text(x = 7, y=0.85, labels = sigDcomparison, cex = 0.5)
dev.off()
#stats for Lynch and MSI
LynchTab <- plotTab[c(4, 17:19),]
sigAcomparison <- wilcox.test(LynchTab[["A"]], LynchTab[["Ab"]])$p.value
sigBcomparison <- wilcox.test(LynchTab[["B"]], LynchTab[["Bb"]])$p.value
sigCcomparison <- wilcox.test(LynchTab[["C"]], LynchTab[["Cb"]])$p.value
sigDcomparison <- wilcox.test(LynchTab[["D"]], LynchTab[["Db"]])$p.value
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-boxplots.lynch.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=5, height=5)
par(mar=c(7,5,5,5), xpd=TRUE)
stripchart(LynchTab, ylim = c(0,0.8), vertical = TRUE, las=2, pch = 20, col=c("olivedrab","olivedrab", "salmon","salmon", "royalblue","royalblue", "goldenrod","goldenrod"))
boxplot(LynchTab, add=TRUE, xaxt='n', yaxt='n')
text(x = 1, y=1, labels = sigAcomparison, cex = 0.5)
text(x = 3, y=0.95, labels = sigBcomparison, cex = 0.5)
text(x = 5, y=0.9, labels = sigCcomparison, cex = 0.5)
text(x = 7, y=0.85, labels = sigDcomparison, cex = 0.5)
dev.off()
################## 2. compare raw signatures ################
| /11.plotSignatureProps.R | no_license | cyclo-hexane/analysisScripts | R | false | false | 5,538 | r | # 11.plotSignatureProps.R
################# notes #################
# 1. read in EMu results files and plot mutational signature proportions
#
# 2. create stats for trunk-branch-leaf comparisons
#
#
#
################# main program #################
sampleList <- read.csv(file="~/PhD/CRCproject/masterSampleList.allSamples.filt.csv", header=FALSE, stringsAsFactors=FALSE)
sampleNames <- unique(sampleList[1])
holdingDir <- "5.mutationalSignatures/"
holdingDirVCF <- "1.platypusCalls/somaticTotal.0.01/"
################## 1. plot mutational signature proportions ################
#read in propotions file
propEMuRoot <- read.table(file=paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-Z.root.txt", sep=""), stringsAsFactors = FALSE, sep="\t")
propEMubranch <- read.table(file=paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-Z.branches.txt", sep=""), stringsAsFactors = FALSE, sep="\t")
propEMuleaf <- read.table(file=paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-Z.leaf.txt", sep=""), stringsAsFactors = FALSE, sep="\t")
#plot root signatures
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-plot.root.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=7, height=5)
par(mar=c(7,5,5,5))
plotMat <- t(as.matrix(propEMuRoot[4:7]))
barplot(plotMat, col=c("olivedrab", "salmon", "royalblue", "goldenrod"), names.arg = propEMuRoot[[1]], las=2)
dev.off()
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-plot.branches.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=7, height=5)
par(mar=c(7,5,5,5))
plotMat <- t(as.matrix(propEMubranch[4:7]))
barplot(plotMat, col=c("olivedrab", "salmon", "royalblue", "goldenrod"), names.arg = propEMubranch[[1]], las=2)
dev.off()
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-plot.leafs.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=40, height=5)
par(mar=c(7,5,5,5))
plotMat <- t(as.matrix(propEMuleaf[4:7]))
barplot(plotMat, col=c("olivedrab", "salmon", "royalblue", "goldenrod"), names.arg = propEMuleaf[[1]], las=2)
dev.off()
#boxplots and stats of trunk-branch
rootComp <- propEMuRoot[-c(17:20, 24), c(4:7)]
names(rootComp) <- c("A", "B", "C", "D")
row.names(rootComp) <- propEMubranch[[1]]
branchComp <- propEMubranch[, c(4:7)]
names(branchComp) <- c("Ab", "Bb", "Cb", "Db")
row.names(branchComp) <- propEMubranch[[1]]
plotTab <- cbind(branchComp, rootComp)
plotTab <- plotTab[order(names(plotTab))]
#stats for carcinomas
carcinomasTab <- plotTab[c(1:3,5:11),]
sigAcomparison <- wilcox.test(carcinomasTab[["A"]], carcinomasTab[["Ab"]])$p.value
sigBcomparison <- wilcox.test(carcinomasTab[["B"]], carcinomasTab[["Bb"]])$p.value
sigCcomparison <- wilcox.test(carcinomasTab[["C"]], carcinomasTab[["Cb"]])$p.value
sigDcomparison <- wilcox.test(carcinomasTab[["D"]], carcinomasTab[["Db"]])$p.value
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-boxplots.carcinomas.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=5, height=5)
par(mar=c(7,5,5,5), xpd=TRUE)
stripchart(carcinomasTab, vertical = TRUE, las=2, pch = 20, col=c("olivedrab","olivedrab", "salmon","salmon", "royalblue","royalblue", "goldenrod","goldenrod"))
boxplot(carcinomasTab, add=TRUE, xaxt='n', yaxt='n')
text(x = 1, y=1, labels = sigAcomparison, cex = 0.5)
text(x = 3, y=0.95, labels = sigBcomparison, cex = 0.5)
text(x = 5, y=0.9, labels = sigCcomparison, cex = 0.5)
text(x = 7, y=0.85, labels = sigDcomparison, cex = 0.5)
dev.off()
#stats for adenomas
adenomaTab <- plotTab[c(12:16),]
sigAcomparison <- wilcox.test(adenomaTab[["A"]], adenomaTab[["Ab"]])$p.value
sigBcomparison <- wilcox.test(adenomaTab[["B"]], adenomaTab[["Bb"]])$p.value
sigCcomparison <- wilcox.test(adenomaTab[["C"]], adenomaTab[["Cb"]])$p.value
sigDcomparison <- wilcox.test(adenomaTab[["D"]], adenomaTab[["Db"]])$p.value
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-boxplots.adenomas.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=5, height=5)
par(mar=c(7,5,5,5), xpd=TRUE)
stripchart(adenomaTab, ylim = c(0,0.8), vertical = TRUE, las=2, pch = 20, col=c("olivedrab","olivedrab", "salmon","salmon", "royalblue","royalblue", "goldenrod","goldenrod"))
boxplot(adenomaTab, add=TRUE, xaxt='n', yaxt='n')
text(x = 1, y=1, labels = sigAcomparison, cex = 0.5)
text(x = 3, y=0.95, labels = sigBcomparison, cex = 0.5)
text(x = 5, y=0.9, labels = sigCcomparison, cex = 0.5)
text(x = 7, y=0.85, labels = sigDcomparison, cex = 0.5)
dev.off()
#stats for Lynch and MSI
LynchTab <- plotTab[c(4, 17:19),]
sigAcomparison <- wilcox.test(LynchTab[["A"]], LynchTab[["Ab"]])$p.value
sigBcomparison <- wilcox.test(LynchTab[["B"]], LynchTab[["Bb"]])$p.value
sigCcomparison <- wilcox.test(LynchTab[["C"]], LynchTab[["Cb"]])$p.value
sigDcomparison <- wilcox.test(LynchTab[["D"]], LynchTab[["Db"]])$p.value
pdfName <- paste(sampleList[1,6], holdingDir, "162906.EMu/Assigned-boxplots.lynch.pdf", sep="")
pdf(file=pdfName, onefile=TRUE, width=5, height=5)
par(mar=c(7,5,5,5), xpd=TRUE)
stripchart(LynchTab, ylim = c(0,0.8), vertical = TRUE, las=2, pch = 20, col=c("olivedrab","olivedrab", "salmon","salmon", "royalblue","royalblue", "goldenrod","goldenrod"))
boxplot(LynchTab, add=TRUE, xaxt='n', yaxt='n')
text(x = 1, y=1, labels = sigAcomparison, cex = 0.5)
text(x = 3, y=0.95, labels = sigBcomparison, cex = 0.5)
text(x = 5, y=0.9, labels = sigCcomparison, cex = 0.5)
text(x = 7, y=0.85, labels = sigDcomparison, cex = 0.5)
dev.off()
################## 2. compare raw signatures ################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OED_isothermal.R
\name{detFIM}
\alias{detFIM}
\title{Objective function for D-optimal OED}
\usage{
detFIM(x, model, pars)
}
\arguments{
\item{x}{a numeric vector of length \code{n} defining the design matrix.
The first n/2 elements are the time points and the last n/2 are the
temperatures of these points.}
\item{model}{character string defining the inactivation model to use.}
\item{pars}{list defining the model parameters according to the rules defined in the bioinactivation package.}
}
\value{
Numeric value of the objective function for criterium D, which is a determinant of the FIM.
}
\description{
Objective function for D-optimal OED
}
\examples{
pars <- list(temp_crit = 55,
n = 1.5,
k_b = 0.1)
detFIM(x = c(10,15, 20, 25), "Peleg", pars)
}
| /man/detFIM.Rd | no_license | jlpesoto/bioOED_0.2.1 | R | false | true | 850 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OED_isothermal.R
\name{detFIM}
\alias{detFIM}
\title{Objective function for D-optimal OED}
\usage{
detFIM(x, model, pars)
}
\arguments{
\item{x}{a numeric vector of length \code{n} defining the design matrix.
The first n/2 elements are the time points and the last n/2 are the
temperatures of these points.}
\item{model}{character string defining the inactivation model to use.}
\item{pars}{list defining the model parameters according to the rules defined in the bioinactivation package.}
}
\value{
Numeric value of the objective function for criterium D, which is a determinant of the FIM.
}
\description{
Objective function for D-optimal OED
}
\examples{
pars <- list(temp_crit = 55,
n = 1.5,
k_b = 0.1)
detFIM(x = c(10,15, 20, 25), "Peleg", pars)
}
|
\name{print n.for.2p}
\alias{print.n.for.2p}
\title{Print n.for.2p results}
\description{Print results for sample size for hypothesis testing of 2 proportions}
\usage{
\method{print}{n.for.2p}(x, ...)
}
\arguments{
\item{x}{object of class 'n.for.2p'}
\item{...}{further arguments passed to or used by methods.}
}
\author{Virasakdi Chongsuvivatwong
\email{ <cvirasak@medicine.psu.ac.th>}
}
\seealso{'n.for.2p'}
\examples{
n.for.2p(p1=.1, p2=.2)
n.for.2p(p1=seq(1,9,.5)/10, p2=.5)
}
\keyword{database}
| /man/print.n.for.2p.rd | no_license | cran/epicalc | R | false | false | 620 | rd | \name{print n.for.2p}
\alias{print.n.for.2p}
\title{Print n.for.2p results}
\description{Print results for sample size for hypothesis testing of 2 proportions}
\usage{
\method{print}{n.for.2p}(x, ...)
}
\arguments{
\item{x}{object of class 'n.for.2p'}
\item{...}{further arguments passed to or used by methods.}
}
\author{Virasakdi Chongsuvivatwong
\email{ <cvirasak@medicine.psu.ac.th>}
}
\seealso{'n.for.2p'}
\examples{
n.for.2p(p1=.1, p2=.2)
n.for.2p(p1=seq(1,9,.5)/10, p2=.5)
}
\keyword{database}
|
library(arules)
library(arulesViz)
book<-read.csv(file.choose())
View(book)
class(book)
book_trans<-as(as.matrix(book),"transactions")
inspect(book_trans[1:100])
# If we inspect book_trans
# we should get transactions of items i.e.
# As we have 2000 rows ..so we should get 2000 transactions
# Each row represents one transaction
# After converting the binary format of data frame from matrix to transactions
# Perform apriori algorithm by changing the values of support and confidence
rules<-apriori(book_trans,parameter = list(support=0.002,confidence=0.7))
inspect(rules[1:5])
plot(rules)
head(quality(rules))
rules1<-apriori(book_trans,parameter = list(support=0.002,confidence=0.7, minlen = 4))
inspect(rules1[1:5])
plot(rules1)
head(quality(rules1))
rules2<-apriori(book_trans,parameter = list(support=0.006,confidence=0.7, minlen = 4))
inspect(rules2[1:5])
plot(rules2)
head(quality(rules2))
# Whenever we have binary kind of data .....e this for forming
# Association rules and changing the values of support,confidence, and minlen
# to get different rules
# Whenever we have data containing item names, then load that data using
# read.transactions(file="path",format="basket",sep=",")
# use this to form association rules
###################################################################################
#the groceries dataset is present in the form of transactions, so using the read.transactions
#to read the data
groceries<-read.transactions(file.choose(),format="basket")
inspect(groceries[1:10])
class(groceries)
# Perform apriori algorithm by changing the values of support and confidence to get different rules
groceries_rules<-apriori(groceries,parameter = list(support = 0.002,confidence = 0.05,minlen=3))
inspect(groceries_rules[1:10])
plot(groceries_rules)
head(quality(groceries_rules))
groceries_rules1<-apriori(groceries,parameter = list(support = 0.002,confidence = 0.07,minlen=4))
inspect(groceries_rules1[1:10])
plot(groceries_rules1)
head(quality(groceries_rules1))
groceries_rules<-apriori(groceries,parameter = list(support = 0.003,confidence = 0.06,minlen=5))
inspect(groceries_rules2[1:10])
plot(groceries_rules2)
head(quality(groceries_rules2))
#########################################################################################################
library(arules)
library(arulesViz)
#importing the dataset
movie <- read.csv(file.choose())
View(movie)
#the dataset contains the same data in two fromats
# 1. from column 1-5 in the transactions format
# 2. from column 6-15 in binary format
# so now using the binary format data and storing the data frame in new variable
movies <- movie[,6:15]
View(movies)
# Each row represents one transaction
# After converting the binary format of data frame from matrix to transactions
# Perform apriori algorithm by changing the values of support and confidence
movie_trans<-as(as.matrix(movies),"transactions")
inspect(movie_trans[1:10])
#building model with different values of support and confidence
# Apriori algorithm
rules<-apriori(movie_trans,parameter = list(support=0.002,confidence=0.7))
inspect(rules[1:5])
plot(rules)
head(quality(rules))
rules1<-apriori(movie_trans,parameter = list(support=0.002,confidence=0.7, minlen = 2))
inspect(rules1[1:5])
plot(rules1)
head(quality(rules1))
rules2<-apriori(movie_trans,parameter = list(support=0.006,confidence=0.7, minlen = 3))
inspect(rules2[1:5])
plot(rules2)
head(quality(rules2))
| /books_groceries_movies.R | no_license | arunailani/DATA-SCIENCE-ASSIGNMENTS | R | false | false | 3,535 | r | library(arules)
library(arulesViz)
book<-read.csv(file.choose())
View(book)
class(book)
book_trans<-as(as.matrix(book),"transactions")
inspect(book_trans[1:100])
# If we inspect book_trans
# we should get transactions of items i.e.
# As we have 2000 rows ..so we should get 2000 transactions
# Each row represents one transaction
# After converting the binary format of data frame from matrix to transactions
# Perform apriori algorithm by changing the values of support and confidence
rules<-apriori(book_trans,parameter = list(support=0.002,confidence=0.7))
inspect(rules[1:5])
plot(rules)
head(quality(rules))
rules1<-apriori(book_trans,parameter = list(support=0.002,confidence=0.7, minlen = 4))
inspect(rules1[1:5])
plot(rules1)
head(quality(rules1))
rules2<-apriori(book_trans,parameter = list(support=0.006,confidence=0.7, minlen = 4))
inspect(rules2[1:5])
plot(rules2)
head(quality(rules2))
# Whenever we have binary kind of data .....e this for forming
# Association rules and changing the values of support,confidence, and minlen
# to get different rules
# Whenever we have data containing item names, then load that data using
# read.transactions(file="path",format="basket",sep=",")
# use this to form association rules
###################################################################################
#the groceries dataset is present in the form of transactions, so using the read.transactions
#to read the data
groceries<-read.transactions(file.choose(),format="basket")
inspect(groceries[1:10])
class(groceries)
# Perform apriori algorithm by changing the values of support and confidence to get different rules
groceries_rules<-apriori(groceries,parameter = list(support = 0.002,confidence = 0.05,minlen=3))
inspect(groceries_rules[1:10])
plot(groceries_rules)
head(quality(groceries_rules))
groceries_rules1<-apriori(groceries,parameter = list(support = 0.002,confidence = 0.07,minlen=4))
inspect(groceries_rules1[1:10])
plot(groceries_rules1)
head(quality(groceries_rules1))
groceries_rules<-apriori(groceries,parameter = list(support = 0.003,confidence = 0.06,minlen=5))
inspect(groceries_rules2[1:10])
plot(groceries_rules2)
head(quality(groceries_rules2))
#########################################################################################################
library(arules)
library(arulesViz)
#importing the dataset
movie <- read.csv(file.choose())
View(movie)
#the dataset contains the same data in two fromats
# 1. from column 1-5 in the transactions format
# 2. from column 6-15 in binary format
# so now using the binary format data and storing the data frame in new variable
movies <- movie[,6:15]
View(movies)
# Each row represents one transaction
# After converting the binary format of data frame from matrix to transactions
# Perform apriori algorithm by changing the values of support and confidence
movie_trans<-as(as.matrix(movies),"transactions")
inspect(movie_trans[1:10])
#building model with different values of support and confidence
# Apriori algorithm
rules<-apriori(movie_trans,parameter = list(support=0.002,confidence=0.7))
inspect(rules[1:5])
plot(rules)
head(quality(rules))
rules1<-apriori(movie_trans,parameter = list(support=0.002,confidence=0.7, minlen = 2))
inspect(rules1[1:5])
plot(rules1)
head(quality(rules1))
rules2<-apriori(movie_trans,parameter = list(support=0.006,confidence=0.7, minlen = 3))
inspect(rules2[1:5])
plot(rules2)
head(quality(rules2))
|
######Plot 4
#The dataset is already stored in my working directory and the data is loaded into R using the following codes where stringsAsFactors=FALSE is added to avoid the conversion of vectors to factors:
filenamne <- "household_power_consumption.txt"
rawfile <- read.table(filenamne, header= TRUE, sep= ";", stringsAsFactors= FALSE)
#Create a subset of the entire dataset including the dates 2007-02-01 and 2007-02-02 only.
Date <- rawfile[,1]
SubDate <- is.element(Date , strsplit(c("1/2/2007","2/2/2007")," "))
SubDate <- rawfile[SubDate,]
## read in date/time info in format "%d/%m/%Y %H:%M:%S"
dates <- SubDate$Date
times <- SubDate$Time
#Use the paste function to concatenate vectors after converting to character. The cbind function would not work here as it will generate matrix instead of character.
x <- paste(dates, times)
# Since i do live in Sweden and i want to express the dates in English instead of Swedish, i do use the following code first.
Sys.setlocale("LC_TIME", locale="USA")
# To convert the Date and Time variables to Date/Time classes in R using the strptime(), i.e., Date-time conversion from character vector.
Date_Time <- strptime(x, format = "%d/%m/%Y %H:%M:%S")
#Making a plot
png(filename = "plot4.png", width= 400, height= 400)
par(mfrow= c(2,2))
plot (Date_Time, as.numeric(SubDate$Global_active_power), type ="l", ylab = "Global Active Power (kilowatts)" , xlab="")
plot (Date_Time, as.numeric(SubDate$Voltage), type ="l", col ="black", xlab= "datetime", ylab = "Voltage" )
plot (Date_Time, as.numeric(SubDate$Sub_metering_1), type ="l", col ="black", xlab= " ", ylab = "Energy Sub metering" )
lines (Date_Time, as.numeric(SubDate$Sub_metering_2), col= "red")
lines (Date_Time, as.numeric(SubDate$Sub_metering_3), col= "blue")
legend ("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty=1, lwd=1, bty = "n", cex = 0.8)
plot (Date_Time, as.numeric(SubDate$Global_reactive_power), type ="l", col ="black", xlab= "datetime", ylab = "Global Reactive Power (kilowatts)" )
dev.off()
| /plot4.R | no_license | Payamdel/EDAera2015 | R | false | false | 2,101 | r | ######Plot 4
#The dataset is already stored in my working directory and the data is loaded into R using the following codes where stringsAsFactors=FALSE is added to avoid the conversion of vectors to factors:
filenamne <- "household_power_consumption.txt"
rawfile <- read.table(filenamne, header= TRUE, sep= ";", stringsAsFactors= FALSE)
#Create a subset of the entire dataset including the dates 2007-02-01 and 2007-02-02 only.
Date <- rawfile[,1]
SubDate <- is.element(Date , strsplit(c("1/2/2007","2/2/2007")," "))
SubDate <- rawfile[SubDate,]
## read in date/time info in format "%d/%m/%Y %H:%M:%S"
dates <- SubDate$Date
times <- SubDate$Time
#Use the paste function to concatenate vectors after converting to character. The cbind function would not work here as it will generate matrix instead of character.
x <- paste(dates, times)
# Since i do live in Sweden and i want to express the dates in English instead of Swedish, i do use the following code first.
Sys.setlocale("LC_TIME", locale="USA")
# To convert the Date and Time variables to Date/Time classes in R using the strptime(), i.e., Date-time conversion from character vector.
Date_Time <- strptime(x, format = "%d/%m/%Y %H:%M:%S")
#Making a plot
png(filename = "plot4.png", width= 400, height= 400)
par(mfrow= c(2,2))
plot (Date_Time, as.numeric(SubDate$Global_active_power), type ="l", ylab = "Global Active Power (kilowatts)" , xlab="")
plot (Date_Time, as.numeric(SubDate$Voltage), type ="l", col ="black", xlab= "datetime", ylab = "Voltage" )
plot (Date_Time, as.numeric(SubDate$Sub_metering_1), type ="l", col ="black", xlab= " ", ylab = "Energy Sub metering" )
lines (Date_Time, as.numeric(SubDate$Sub_metering_2), col= "red")
lines (Date_Time, as.numeric(SubDate$Sub_metering_3), col= "blue")
legend ("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black", "red", "blue"), lty=1, lwd=1, bty = "n", cex = 0.8)
plot (Date_Time, as.numeric(SubDate$Global_reactive_power), type ="l", col ="black", xlab= "datetime", ylab = "Global Reactive Power (kilowatts)" )
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.mfa}
\alias{plot.mfa}
\title{Plot Method For "mfa" Object}
\usage{
\method{plot}{mfa}(x, type, xdim = 1, ydim = 2, facetrows = 2, size = 5,
subtabs = NULL, legend = NA, mytitle = NA, label = NA,
bootstrap_size = 1000, bootstrap_comps = c(1, 2))
}
\arguments{
\item{x}{An object of class "mfa".}
\item{type}{Indicates what type of plot the user wishes to see. Must be one
of the following character strings: "compromise", "partial.factor", or
"loadings".}
\item{xdim, ydim}{The two components the user wishes to plot. Numeric values.}
\item{facetrows}{Used with "partial.factor" and "loadings" plots. Controls
how many rows to use when displaying multiple sub-plots.}
\item{size}{Controls the size of the plotted points. If plotted points
overlap, the user is encouraged to try reducing size.}
\item{subtabs}{Used with "partial.factor" and "loadings" plots. Allows the
user to choose which sub-tables she/he wants to see plots for. Default is
NULL, which will display all the subtables. If not NULL, must be a numeric
vector. each element must be between 1 and K, where K is the total number
of sub-tables in the analysis.}
\item{legend}{An optional parameter that allows the user to control legend
text. Default value is NA. If NA, legend text will be chosen
automatically, based on data row or column names, depending on the plot
type.}
\item{mytitle}{An optional parameter for the user to choose the plot title.
By default, mytitle is NA. If NA, the plot is given a title corresponding
to its type, viz "compromise", "partial.factor", or "loadings."}
\item{label}{Used with "compromise" and "compromise.partial" plots. Allows the
user to choose which values can be presented as a label on the plot. Default is
NULL, which will display no label. If not NULL, must be a vector.}
\item{bootstrap_size}{Used only with "bootstrap" plot to control the bootstrap size. Default
value is 1000.}
\item{bootstrap_comps}{Used only with "bootstrap" plot. Allows the user to chosose which
components of bootstrap result she/he want to see plots for. Default is c(1,2), which will
display component 1 and 2.}
}
\value{
Displays the plot of the user's choice.
}
\description{
A plotting function that, given two components/dimensions,
displays a graphic of one of the following: \cr
\itemize{
\item Compromise/Common Factor Scores
\item Partial Factor Scores
\item Loadings
\item Eigenvalues
\item Compromise + Partial Factor Scores
\item Bootstrap ratio plots
}
}
\examples{
# Create an mfa object.
sets.num <- list(c(1:6), c(7:12), c(13:18), c(19:23), c(24:29), c(30:34),
c(35:38), c(39:44), c(45:49), c(50:53))
mfa1 <- mfa(winedata, sets.num)
# Different types of plots:
plot(mfa1, type = "compromise", legend=substr(rownames(mfa1$Fcommon),1,2),
label=substr(rownames(mfa1$Fcommon),3,3))
plot(mfa1, type = "partial.factor", subtabs = NULL, xdim = 2, ydim = 3, size = 4,
legend=substr(rownames(mfa1$Fpartial[[1]]),1,2), label=substr(rownames(mfa1$Fcommon),3,3))
plot(mfa1, type = "loadings", size = 2.5, subtabs = c(9,10),
legend = c("cat pee", "passion fruit", "green pepper", "mineral","optional 1", "optional 2")))
plot(mfa1, type = "eigenvalues")
plot(mfa1, type = "compromise.partial", xdim = 1, ydim = 2, l
egend=substr(rownames(mfa1$Fcommon),1,2),label=substr(rownames(mfa1$Fcommon),3,3))
plot(mfa1, type= "bootstrap", bootstrap_size = 1000, bootstrap_comps=c(1,2), facetrows=2)
}
| /final_submission/package/mfaMKTLT/man/plot.mfa.Rd | no_license | mshin03/stat243 | R | false | true | 3,522 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.mfa}
\alias{plot.mfa}
\title{Plot Method For "mfa" Object}
\usage{
\method{plot}{mfa}(x, type, xdim = 1, ydim = 2, facetrows = 2, size = 5,
subtabs = NULL, legend = NA, mytitle = NA, label = NA,
bootstrap_size = 1000, bootstrap_comps = c(1, 2))
}
\arguments{
\item{x}{An object of class "mfa".}
\item{type}{Indicates what type of plot the user wishes to see. Must be one
of the following character strings: "compromise", "partial.factor", or
"loadings".}
\item{xdim, ydim}{The two components the user wishes to plot. Numeric values.}
\item{facetrows}{Used with "partial.factor" and "loadings" plots. Controls
how many rows to use when displaying multiple sub-plots.}
\item{size}{Controls the size of the plotted points. If plotted points
overlap, the user is encouraged to try reducing size.}
\item{subtabs}{Used with "partial.factor" and "loadings" plots. Allows the
user to choose which sub-tables she/he wants to see plots for. Default is
NULL, which will display all the subtables. If not NULL, must be a numeric
vector. each element must be between 1 and K, where K is the total number
of sub-tables in the analysis.}
\item{legend}{An optional parameter that allows the user to control legend
text. Default value is NA. If NA, legend text will be chosen
automatically, based on data row or column names, depending on the plot
type.}
\item{mytitle}{An optional parameter for the user to choose the plot title.
By default, mytitle is NA. If NA, the plot is given a title corresponding
to its type, viz "compromise", "partial.factor", or "loadings."}
\item{label}{Used with "compromise" and "compromise.partial" plots. Allows the
user to choose which values can be presented as a label on the plot. Default is
NULL, which will display no label. If not NULL, must be a vector.}
\item{bootstrap_size}{Used only with "bootstrap" plot to control the bootstrap size. Default
value is 1000.}
\item{bootstrap_comps}{Used only with "bootstrap" plot. Allows the user to chosose which
components of bootstrap result she/he want to see plots for. Default is c(1,2), which will
display component 1 and 2.}
}
\value{
Displays the plot of the user's choice.
}
\description{
A plotting function that, given two components/dimensions,
displays a graphic of one of the following: \cr
\itemize{
\item Compromise/Common Factor Scores
\item Partial Factor Scores
\item Loadings
\item Eigenvalues
\item Compromise + Partial Factor Scores
\item Bootstrap ratio plots
}
}
\examples{
# Create an mfa object.
sets.num <- list(c(1:6), c(7:12), c(13:18), c(19:23), c(24:29), c(30:34),
c(35:38), c(39:44), c(45:49), c(50:53))
mfa1 <- mfa(winedata, sets.num)
# Different types of plots:
plot(mfa1, type = "compromise", legend=substr(rownames(mfa1$Fcommon),1,2),
label=substr(rownames(mfa1$Fcommon),3,3))
plot(mfa1, type = "partial.factor", subtabs = NULL, xdim = 2, ydim = 3, size = 4,
legend=substr(rownames(mfa1$Fpartial[[1]]),1,2), label=substr(rownames(mfa1$Fcommon),3,3))
plot(mfa1, type = "loadings", size = 2.5, subtabs = c(9,10),
legend = c("cat pee", "passion fruit", "green pepper", "mineral","optional 1", "optional 2")))
plot(mfa1, type = "eigenvalues")
plot(mfa1, type = "compromise.partial", xdim = 1, ydim = 2, l
egend=substr(rownames(mfa1$Fcommon),1,2),label=substr(rownames(mfa1$Fcommon),3,3))
plot(mfa1, type= "bootstrap", bootstrap_size = 1000, bootstrap_comps=c(1,2), facetrows=2)
}
|
library(unmarked)
### Name: unmarkedFramePCount
### Title: Organize data for the N-mixture model fit by pcount
### Aliases: unmarkedFramePCount
### ** Examples
# Fake data
R <- 4 # number of sites
J <- 3 # number of visits
y <- matrix(c(
1,2,0,
0,0,0,
1,1,1,
2,2,1), nrow=R, ncol=J, byrow=TRUE)
y
site.covs <- data.frame(x1=1:4, x2=factor(c('A','B','A','B')))
site.covs
obs.covs <- list(
x3 = matrix(c(
-1,0,1,
-2,0,0,
-3,1,0,
0,0,0), nrow=R, ncol=J, byrow=TRUE),
x4 = matrix(c(
'a','b','c',
'd','b','a',
'a','a','c',
'a','b','a'), nrow=R, ncol=J, byrow=TRUE))
obs.covs
umf <- unmarkedFramePCount(y=y, siteCovs=site.covs,
obsCovs=obs.covs) # organize data
umf # take a l
summary(umf) # summarize data
fm <- pcount(~1 ~1, umf, K=10) # fit a model
| /data/genthat_extracted_code/unmarked/examples/unmarkedFramePCount.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 890 | r | library(unmarked)
### Name: unmarkedFramePCount
### Title: Organize data for the N-mixture model fit by pcount
### Aliases: unmarkedFramePCount
### ** Examples
# Fake data
R <- 4 # number of sites
J <- 3 # number of visits
y <- matrix(c(
1,2,0,
0,0,0,
1,1,1,
2,2,1), nrow=R, ncol=J, byrow=TRUE)
y
site.covs <- data.frame(x1=1:4, x2=factor(c('A','B','A','B')))
site.covs
obs.covs <- list(
x3 = matrix(c(
-1,0,1,
-2,0,0,
-3,1,0,
0,0,0), nrow=R, ncol=J, byrow=TRUE),
x4 = matrix(c(
'a','b','c',
'd','b','a',
'a','a','c',
'a','b','a'), nrow=R, ncol=J, byrow=TRUE))
obs.covs
umf <- unmarkedFramePCount(y=y, siteCovs=site.covs,
obsCovs=obs.covs) # organize data
umf # take a l
summary(umf) # summarize data
fm <- pcount(~1 ~1, umf, K=10) # fit a model
|
library(testthat)
library(tblHelpers)
test_check("tblHelpers")
| /tests/testthat.R | permissive | bcjaeger/tblHelpers | R | false | false | 64 | r | library(testthat)
library(tblHelpers)
test_check("tblHelpers")
|
# import packages
library(tidyverse)
library(magrittr)
library(readxl)
library(data.table)
library(janitor)
library(readr)
library(fuzzyjoin)
library(zipcodeR)
library(stringr)
library(parallel)
library(geosphere)
library(tm)
library(ggmap)
library(numform)
library(HistogramTools)
library(plotly)
library(reticulate)
# load functions and objects
source('core/functions.R')
for (obj in list.files('objects/')) {
load(paste0('objects/',obj))
}
# buils turicreate models
turicreate <- import("turicreate")
np <- import("numpy")
pd <- import("pandas")
p
turicreate$SFrame(imputed_df_list[[1]])
| /core/model.R | permissive | homebase3/nycdsa_final_project | R | false | false | 596 | r | # import packages
library(tidyverse)
library(magrittr)
library(readxl)
library(data.table)
library(janitor)
library(readr)
library(fuzzyjoin)
library(zipcodeR)
library(stringr)
library(parallel)
library(geosphere)
library(tm)
library(ggmap)
library(numform)
library(HistogramTools)
library(plotly)
library(reticulate)
# load functions and objects
source('core/functions.R')
for (obj in list.files('objects/')) {
load(paste0('objects/',obj))
}
# buils turicreate models
turicreate <- import("turicreate")
np <- import("numpy")
pd <- import("pandas")
p
turicreate$SFrame(imputed_df_list[[1]])
|
library(readxl)
library(data.table)
CONPenh <- read_excel("aat6720_CONPPutEnhs.xlsx", sheet = 2,header=TRUE)
> head(CONPenh)
# A tibble: 6 x 16
Chrom Start End CONP_ID All_OP_No. iPSC_OP_No. TD0_OP_No. TD11_OP_No.
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 chr1 817175 817393 2 2 0 0 0
2 chr1 818953 819108 3 2 0 1 1
3 chr1 907577 908523 8 8 0 0 3
4 chr1 913787 914227 11 2 0 0 0
5 chr1 915886 918310 12 27 0 7 10
6 chr1 994848 995028 47 3 0 1 0
# ... with 8 more variables: TD30_OP_No. <dbl>, CTX1_OP_No. <dbl>,
# CTX2_OP_No. <dbl>, TD0_annotation <chr>, TD11_annotation <chr>,
# TD30_annotation <chr>, CTX1_annotation <chr>, CTX2_annotation <chr>
write.table(CONPenh,"AmiriEtAl_PutEnh.bed",quote=FALSE, row.names=FALSE, col.names=FALSE,sep="\t") | /AmariEtAl_Analysis/EnhancerLocationExtraction.r | no_license | lengie/eRNA_Detect | R | false | false | 1,075 | r | library(readxl)
library(data.table)
CONPenh <- read_excel("aat6720_CONPPutEnhs.xlsx", sheet = 2,header=TRUE)
> head(CONPenh)
# A tibble: 6 x 16
Chrom Start End CONP_ID All_OP_No. iPSC_OP_No. TD0_OP_No. TD11_OP_No.
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 chr1 817175 817393 2 2 0 0 0
2 chr1 818953 819108 3 2 0 1 1
3 chr1 907577 908523 8 8 0 0 3
4 chr1 913787 914227 11 2 0 0 0
5 chr1 915886 918310 12 27 0 7 10
6 chr1 994848 995028 47 3 0 1 0
# ... with 8 more variables: TD30_OP_No. <dbl>, CTX1_OP_No. <dbl>,
# CTX2_OP_No. <dbl>, TD0_annotation <chr>, TD11_annotation <chr>,
# TD30_annotation <chr>, CTX1_annotation <chr>, CTX2_annotation <chr>
write.table(CONPenh,"AmiriEtAl_PutEnh.bed",quote=FALSE, row.names=FALSE, col.names=FALSE,sep="\t") |
library(shiny)
ui<- fluidPage(
sliderInput(inputId= "num", label = "Choose number of bins", min=1, max=25, value = 10, step = 1) ,
plotOutput(outputId = "hist")
) | /ui.R | no_license | smcnish/Shiny-App | R | false | false | 168 | r | library(shiny)
ui<- fluidPage(
sliderInput(inputId= "num", label = "Choose number of bins", min=1, max=25, value = 10, step = 1) ,
plotOutput(outputId = "hist")
) |
######################################################################
### BLUPHAT Model Development & Validation For RFS ###
######################################################################
setwd('~/bigdata/LABDATA/BLUPHAT/')
###############################################################
source('script/BLUP_Functions.R')
source('script/Commercial_Panels.R')
library(pROC)
library(ggplot2)
library(survival)
library(survminer)
### Phenotype
phenoData <- readRDS('data/TCGA-PRAD/Clinical_TCGA_PRAD_With_PreopPSA_and_BCR.RDS')
phenoData$rfs <- phenoData$days_to_first_biochemical_recurrence
phenoData$days_to_first_biochemical_recurrence <- ifelse(phenoData$recurrence_status==1, phenoData$days_to_first_biochemical_recurrence,
phenoData$days_to_last_followup)
yr <- 5
keep <- which(phenoData$recurrence_status==1 | (phenoData$recurrence_status==0 & phenoData$days_to_first_biochemical_recurrence>=yr*365))
length(keep)
phenoData <- phenoData[keep,]
phenoData$days_to_first_biochemical_recurrence<- ifelse(phenoData$days_to_first_biochemical_recurrence>=yr*365,
1, phenoData$days_to_first_biochemical_recurrence/yr/365)
phenoData$days_to_first_biochemical_recurrence
#### Genotype
rnaData <- readRDS('data/TCGA-PRAD/mRNA_Expression_LogCPM_Filter_Low_TCGA_PRAD.RDS')
mirData <- readRDS('data/TCGA-PRAD/miRNA_Expression_LogCPM_Filter_Low_TCGA_PRAD.RDS')
methyData <- readRDS('data/TCGA-PRAD/Methylation_Filter_NA_TCGA_PRAD.RDS')
samples <- Reduce(intersect, list(rownames(phenoData), colnames(rnaData), colnames(mirData), colnames(methyData)))
samples
phenoData <-phenoData[samples,]
gene <- rnaData[,samples]
gene <- as.matrix(t(gene))
gene[1:5,1:5]
gene <- scale(gene)
### rfs5yr
pheno <- as.matrix(phenoData$days_to_first_biochemical_recurrence, drop=FALSE)
y <- as.numeric(pheno)
pheno.mrna <- pheno
corr <- abs(apply(gene, 2, function(v) cor.test(v,y)$estimate))
corrVal <- apply(gene, 2, function(v) cor.test(v,y)$estimate)
o <- order(corr, decreasing=T)
corrDa <- data.frame(corrVAL=corrVal[o],corrABS=corr[o], rank=1:length(o))
o.mrna <- o
#################################################################################
####### Stepwise Forward Selection
nGene <- length(o)
nGene
selected <- o[1]
lastHAT <- 0
for (i in seq(2,nGene,1)) {
print ('====================================')
print (i)
selected <- c(selected, o[i])
geno<-gene[,selected]
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
if (hat > lastHAT) {
selected <- selected
lastHAT <- hat
} else {
selected <- selected[-length(selected)]
lastHAT <- lastHAT
}
print (lastHAT)
}
############ Confirmation of Selected genes
### test top n genes
#selected <- o[1:topn]
selected
geno.mrna <-gene[,selected]
saveRDS(geno.mrna, 'report/TCGA_mRNA_Expression_Stepwise_RFS.RDS')
kk<-kinship(gen=geno.mrna)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
res <- blup.hat(mydata=y, mykin=kk)
hat <- res$predic.HAT
hat
############## GENERAL CV PREDICTION
geno.mrna <- gene[,selected]
kk<-kinship(gen=geno.mrna)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- 153
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:153
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.val <- auc.ci[2]
auc.ci[1]
auc.ci[3]
### Survival
daysToDeath <- as.numeric(phenoData$rfs)/365*12
daysToDeath
nonComplt <- is.na(daysToDeath)
vitalStatus <- as.numeric(ifelse(nonComplt, 0, 1))
daysToDeath[nonComplt] <- as.numeric(phenoData$days_to_last_followup[nonComplt])/365*12
risk <- pred$yhat[order(pred$id)]
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.7
lgd.ypos = 0.42
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/2
p.ypos = 0.2
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
######################################################################################
############# miRNA
mir <- mirData[,samples]
mir <- as.matrix(t(mir))
mir[1:5,1:5]
dim(mir)
mir <- scale(mir)
mir[1:5,1:5]
### rfs5yr
pheno <- as.matrix(phenoData$days_to_first_biochemical_recurrence, drop=FALSE)
y <- as.numeric(pheno)
corr <- abs(apply(mir, 2, function(v) cor.test(v,y)$estimate))
corrVal <- apply(mir, 2, function(v) cor.test(v,y)$estimate)
o <- order(corr, decreasing=T)
corrDa <- data.frame(corrVAL=corrVal[o],corrABS=corr[o], rank=1:length(o))
o.mir <- o
#################################################################################
####### Stepwise Forward
nGene <- length(o)
nGene
selected <- o[1]
lastHAT <- 0
for (i in seq(2,nGene,1)) {
print ('====================================')
print (i)
selected <- c(selected, o[i])
geno<-mir[,selected]
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
if (hat > lastHAT) {
selected <- selected
lastHAT <- hat
} else {
selected <- selected[-length(selected)]
lastHAT <- lastHAT
}
print (lastHAT)
}
############ Confirmation of Selected genes
### test top n genes
#selected <- o[1:topn]
geno.mir<-mir[,selected]
#geno<-gene[te,selected]
saveRDS(geno.mir, 'report/TCGA_miRNA_Expression_Stepwise_RFS.RDS')
kk<-kinship(gen=geno.mir)
#write.csv(x=kk[[1]],file="yan\\input\\kk1.csv",row.names=FALSE)
#write.csv(x=kk[[2]],file="yan\\input\\cc1.csv",row.names=FALSE)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
geno.mir<-mir[,selected]
#geno<-gene[te,selected]
kk<-kinship(gen=geno.mir)
#write.csv(x=kk[[1]],file="yan\\input\\kk1.csv",row.names=FALSE)
#write.csv(x=kk[[2]],file="yan\\input\\cc1.csv",row.names=FALSE)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- 153
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:153
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$rfs)/365*12
daysToDeath
nonComplt <- is.na(daysToDeath)
vitalStatus <- as.numeric(ifelse(nonComplt, 0, 1))
daysToDeath[nonComplt] <- as.numeric(phenoData$days_to_last_followup[nonComplt])/365*12
risk <- pred$yhat[order(pred$id)]
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.7
lgd.ypos = 0.42
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/2
p.ypos = 0.2
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
#######################################################################################
############## Intergration of mRNA and miRNA
rownames(geno.mir)==rownames(geno.mrna)
geno.comb <- cbind(geno.mrna, geno.mir)
############ Confirmation of Selected genes
kk<-kinship(gen=geno.comb)
#write.csv(x=kk[[1]],file="yan\\input\\kk1.csv",row.names=FALSE)
#write.csv(x=kk[[2]],file="yan\\input\\cc1.csv",row.names=FALSE)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
kk<-kinship(gen=geno.comb)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- 153
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:153
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
#auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$rfs)/365*12
daysToDeath
nonComplt <- is.na(daysToDeath)
vitalStatus <- as.numeric(ifelse(nonComplt, 0, 1))
daysToDeath[nonComplt] <- as.numeric(phenoData$days_to_last_followup[nonComplt])/365*12
risk <- pred$yhat[order(pred$id)]
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.7
lgd.ypos = 0.42
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/2
p.ypos = 0.2
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
##################################################################################
################ Validation
genesInValidation <- data.frame(matrix(rep(0,(160+65)*7), nrow=160+65, ncol=7), stringsAsFactors = F)
genesInValidation
rownames(genesInValidation) <- colnames(geno.comb)
colnames(genesInValidation) <- c('GSE70769','DKFZ2018','GSE116918','GSE107299','GSE54460','MSKCC2010RNA','MSKCC2010MIR')
####### GSE107299 #######
dataset <- 'GSE107299'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
####### GSE21034 #######
#dataset <- 'GSE21034'
#eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
#exprData <- exprs(eSet)
#exprData[1:5,1:5]
#phenoData <- pData(eSet)
#View(phenoData)
#table(phenoData$sample_type)
#keep <- which(phenoData$sample_type=='Primary')
#exprData <- exprData[,keep]
#phenoData <- phenoData[keep,]
###### MSKCC2010
dataset <- 'GSE21034'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
phenoData <- pData(eSet)
table(phenoData$sample_type)
keep <- which(phenoData$sample_type=='Primary')
phenoData <- phenoData[keep,]
exprData <- read.table('data/Validation/MSKCC_PCa_mRNA_data.txt', header = T, sep = '\t', stringsAsFactors = F)
exprData[1:5,1:5]
annoData <- readRDS('~/bigdata/PCa/data/Annotation/Homo_Sapiens_Gene_Annotation_ENSEMBL_HGNC_ENTREZ.RDS')
idx <- match(colnames(geno.mrna), as.character(annoData$ensembl_id))
entrez.id <- annoData[idx,]$entrez_id
entrez.id <- entrez.id[-which(is.na(entrez.id))]
idx <- which(exprData$GeneID %in% entrez.id)
exprData <- exprData[idx,]
ensembl.id <- as.character(annoData$ensembl_id[match(exprData$GeneID, annoData$entrez_id)])
ensembl.id
rownames(exprData) <- ensembl.id
rownames(phenoData) <- phenoData$sample_id
samples <- intersect(colnames(exprData),rownames(phenoData))
exprData <- exprData[,samples]
phenoData <- phenoData[samples,]
####### DKFZ2018 #######
dataset <- 'DKFZ2018'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
#View(phenoData)
filter <- which(duplicated(phenoData$patient_id))
exprData <- exprData[,-filter]
phenoData <- phenoData[-filter,]
####### GSE54460 #######
dataset <- 'GSE54460'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
#View(phenoData)
filter <- which(phenoData$filter=='Duplicate')
filter
exprData <- exprData[,-filter]
phenoData <- phenoData[-filter,]
####### GSE70769 #######
dataset <- 'GSE70769'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
#View(phenoData)
keep <- which(phenoData$sample_type=='Primary')
exprData <- exprData[,keep]
phenoData <- phenoData[keep,]
####### GSE116918 BCR #######
dataset <- 'GSE116918'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
dim(exprData)
#View(phenoData)
table(phenoData$sample_type)
keep <- which(phenoData$sample_type=='Primary')
exprData <- exprData[,keep]
phenoData <- phenoData[keep,]
#####################################################################################
#####################################################################################
total <- nrow(phenoData)
notNA <- sum(!is.na(phenoData$time_to_bcr))
yr <- 5
keep <- which(phenoData$bcr_status==1 | (phenoData$bcr_status==0 & phenoData$time_to_bcr>=yr*12))
rfs5yr <- length(keep)
phenoData <- phenoData[keep,]
phenoData$y <- ifelse(phenoData$time_to_bcr>=yr*12, 1, phenoData$time_to_bcr/yr/12)
rfs5yr1 <- sum(phenoData$y==1)
ovlp <- intersect(colnames(geno.mrna), rownames(exprData))
ovlp
#ovlp <- sample(rownames(exprData), 150, replace = F)
#ovlp <- prolaris
#ovlp
#ovlp <- intersect(colnames(gene[,o.mrna[1:topn]]), rownames(exprData))
#ovlp
geno <- scale(t(exprData[ovlp,keep]))
dim(geno)
#geno <- scale(t(exprData[,keep]))
#dim(geno)
#genesInValidation[ovlp, 'MSKCC2010RNA'] <- 1
genesInValidation[ovlp, dataset] <- 1
#gene.name <- as.character(annoData$gene_name[match(rownames(genesInValidation), annoData$ensembl_id)])
#gene.name
#genesInValidation$Symbol <- gene.name
#write.table(genesInValidation, file='report/GENE160_MIR65_In_Validation_Datasets.txt', sep='\t', quote=F)
pheno <- as.matrix(phenoData$y, drop=FALSE)
y <- as.numeric(pheno)
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
x
nfold <- length(y)
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:nfold
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$time_to_bcr)
vitalStatus <- as.numeric(phenoData$bcr_status)
pred <- cbind(pred, daysToDeath, vitalStatus)
pred
write.table(pred, file=paste0('report/Validation_', dataset, '_mRNA_Prediction.txt'), sep = '\t', quote = F, row.names = F)
dataset
write.table(pred, file=paste0('report/Validation_MSKCC2010_mRNA_Prediction.txt'), sep = '\t', quote = F, row.names = F)
dataset
risk <- pred$yhat[order(pred$id)]
risk
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
coeffs <- coeffs[-1]
#BiocManager::install("survcomp")
#library(survcomp)
idx <- which(!is.na(pred$daysToDeath))
c <- concordance.index(x=risk[idx],
surv.time=daysToDeath[idx],
surv.event=vitalStatus[idx],
#cl=riskGroup[idx],
method="noether")
c$c.index
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
km.coeffs <- c(hr, lower95, upper95, p.val)
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.27
lgd.ypos = 0.3
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/25
p.ypos = 0.07
lgd.xpos <- 0.7
lgd.ypos = 0.85
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/25
p.ypos = 0.07
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
stats <- as.character(c(dataset, total, notNA, rfs5yr, rfs5yr1, hat, r2, auc.val, auc.ci[1], auc.ci[3], coeffs, km.coeffs))
stats
#####################################################################################
#####################################################################################
###### Integration of mRNA and miRNA
####### GSE21034 #######
###### MSKCC2010
dataset <- 'GSE21034'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
phenoData <- pData(eSet)
table(phenoData$sample_type)
keep <- which(phenoData$sample_type=='Primary')
phenoData <- phenoData[keep,]
exprData <- read.table('data/Validation/MSKCC_PCa_mRNA_data.txt', header = T, sep = '\t', stringsAsFactors = F)
exprData[1:5,1:5]
annoData <- readRDS('~/bigdata/PCa/data/Annotation/Homo_Sapiens_Gene_Annotation_ENSEMBL_HGNC_ENTREZ.RDS')
idx <- match(colnames(geno.mrna), as.character(annoData$ensembl_id))
entrez.id <- annoData[idx,]$entrez_id
entrez.id <- entrez.id[-which(is.na(entrez.id))]
idx <- which(exprData$GeneID %in% entrez.id)
exprData <- exprData[idx,]
ensembl.id <- as.character(annoData$ensembl_id[match(exprData$GeneID, annoData$entrez_id)])
ensembl.id
rownames(exprData) <- ensembl.id
rownames(phenoData) <- phenoData$sample_id
samples <- intersect(colnames(exprData),rownames(phenoData))
exprData <- exprData[,samples]
phenoData <- phenoData[samples,]
mirData <- read.delim('data/Validation/MSKCC_PCa_microRNA_data.mir21.txt', header = T, sep = '\t', stringsAsFactors = F)
mirData[1:5,1:5]
rownames(mirData) <- mirData$MicroRNA
mirData <- mirData[,-1]
ovlp <- intersect(rownames(phenoData), colnames(mirData))
ovlp
exprData <- exprData[,ovlp]
mirData <- mirData[,ovlp]
phenoData <- phenoData[ovlp,]
yr <- 5
keep <- which(phenoData$bcr_status==1 | (phenoData$bcr_status==0 & phenoData$time_to_bcr>=yr*12))
phenoData <- phenoData[keep,]
phenoData$y <- ifelse(phenoData$time_to_bcr>=yr*12, 1, phenoData$time_to_bcr/yr/12)
sum(phenoData$y==1)
ovlp <- intersect(colnames(geno.mrna), rownames(exprData))
geno1 <- scale(t(exprData[ovlp,keep]))
#geno1 <- scale(t(exprData[,keep]))
ovlp <- intersect(colnames(geno.mir), rownames(mirData))
geno2 <- scale(t(mirData[ovlp,keep]))
geno <- cbind(geno1, geno2)
colnames(geno2) %in% rownames(genesInValidation)
genesInValidation[colnames(geno2),'MSKCC2010MIR'] <- 1
#geno <- geno2
#geno <- geno1
geno <- geno1
pheno <- as.matrix(phenoData$y, drop=FALSE)
y <- as.numeric(pheno)
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- length(y)
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:nfold
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$time_to_bcr)
vitalStatus <- as.numeric(phenoData$bcr_status)
pred <- cbind(pred, daysToDeath, vitalStatus)
pred
write.table(pred, file='report/Validation_MSKCC2010_mRNA_miRNA_Prediction.txt', sep = '\t', quote = F, row.names = F)
risk <- pred$yhat[order(pred$id)]
risk
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.27
lgd.ypos = 0.3
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/25
p.ypos = 0.07
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
###############################################################################################################################
##################### Forest plot
### TCGA
dataForForestPlot <- read.delim('report/BLUPHAT_Training_TCGA.txt', header=T, sep='\t', stringsAsFactors = F, row.names = 1)
dataForForestPlot
dataForForestPlot$dataset <- factor(paste0('TCGA-PRAD (',rownames(dataForForestPlot),')'),
levels=rev(paste0('TCGA-PRAD (',rownames(dataForForestPlot),')')))
dataForForestPlot$p.coxph <- paste0('p = ', formatC(dataForForestPlot$p.coxph, format = "e", digits = 2))
### VALIDATION
dataForForestPlot <- read.delim('report/BLUPHAT_Validation.txt', header=T, sep='\t', stringsAsFactors = F, row.names = 1)
dataForForestPlot
dataForForestPlot <- dataForForestPlot[order(dataForForestPlot$p.coxph),]
dataForForestPlot
dataForForestPlot <- dataForForestPlot[c(1:2,4:5,7,6,3),]
dataForForestPlot$dataset <- factor(paste0(rownames(dataForForestPlot),' (N=',dataForForestPlot$rfs5yr,')'),
levels=rev(paste0(rownames(dataForForestPlot),' (N=',dataForForestPlot$rfs5yr,')')))
dataForForestPlot$p.coxph <- ifelse(dataForForestPlot$p.coxph >= 0.01, formatC(dataForForestPlot$p.coxph, digits = 2),
formatC(dataForForestPlot$p.coxph, format = "e", digits = 2))
dataForForestPlot$p.coxph <- paste0('p = ', dataForForestPlot$p.coxph)
### PLOT
ggplot(dataForForestPlot, aes(x=dataset, y=hr.coxph)) +
#geom_segment(aes(y=dataset, x=lower95.coxph, xend=upper95.coxph, yend=dataset), color='black', size=1) +
#geom_segment(aes(y=6:1-0.1, x=lower95.coxph, xend=lower95.coxph, yend=6:!+0.1), color='black', size=1) +
geom_errorbar(aes(ymin=lower95.coxph, ymax=upper95.coxph),width=0.1, size=0.8, color='black')+
geom_point(color=google.red, size=3, shape=15) + #facet_grid(.~type) +
#geom_text(data =dataForForestPlot, aes(x=dataset, y=c(0.017,0.033,0.018), label=p.coxph, group=NULL),
# size=4.4) +
geom_text(data =dataForForestPlot, aes(x=dataset, y=c(0.35,0.5,0.2,0.45,0.95,0.72,0.46), label=p.coxph, group=NULL),
size=4.4) +
coord_flip()+
#ylim(0,0.05) +
ylim(0,1.05) +
xlab('')+ylab('Hazard Ratio') +
#xlim(0,100) +
theme_bw()+
#theme_set(theme_minimal()) #
theme(legend.title = element_blank(),
legend.text = element_text(size=14),
legend.position = 'right') +
theme(axis.title=element_text(size=16),
axis.text = element_text(color='black', size=12),
axis.text.x = element_text(angle = 0, hjust=0.5),
strip.text = element_text(size=14)) +
theme(axis.line = element_line(colour = "black"),
axis.line.y = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
| /SFS-BLUPHAT_MultiOmics_RFS.R | no_license | rli012/BLUPHAT | R | false | false | 36,072 | r |
######################################################################
### BLUPHAT Model Development & Validation For RFS ###
######################################################################
setwd('~/bigdata/LABDATA/BLUPHAT/')
###############################################################
source('script/BLUP_Functions.R')
source('script/Commercial_Panels.R')
library(pROC)
library(ggplot2)
library(survival)
library(survminer)
### Phenotype
phenoData <- readRDS('data/TCGA-PRAD/Clinical_TCGA_PRAD_With_PreopPSA_and_BCR.RDS')
phenoData$rfs <- phenoData$days_to_first_biochemical_recurrence
phenoData$days_to_first_biochemical_recurrence <- ifelse(phenoData$recurrence_status==1, phenoData$days_to_first_biochemical_recurrence,
phenoData$days_to_last_followup)
yr <- 5
keep <- which(phenoData$recurrence_status==1 | (phenoData$recurrence_status==0 & phenoData$days_to_first_biochemical_recurrence>=yr*365))
length(keep)
phenoData <- phenoData[keep,]
phenoData$days_to_first_biochemical_recurrence<- ifelse(phenoData$days_to_first_biochemical_recurrence>=yr*365,
1, phenoData$days_to_first_biochemical_recurrence/yr/365)
phenoData$days_to_first_biochemical_recurrence
#### Genotype
rnaData <- readRDS('data/TCGA-PRAD/mRNA_Expression_LogCPM_Filter_Low_TCGA_PRAD.RDS')
mirData <- readRDS('data/TCGA-PRAD/miRNA_Expression_LogCPM_Filter_Low_TCGA_PRAD.RDS')
methyData <- readRDS('data/TCGA-PRAD/Methylation_Filter_NA_TCGA_PRAD.RDS')
samples <- Reduce(intersect, list(rownames(phenoData), colnames(rnaData), colnames(mirData), colnames(methyData)))
samples
phenoData <-phenoData[samples,]
gene <- rnaData[,samples]
gene <- as.matrix(t(gene))
gene[1:5,1:5]
gene <- scale(gene)
### rfs5yr
pheno <- as.matrix(phenoData$days_to_first_biochemical_recurrence, drop=FALSE)
y <- as.numeric(pheno)
pheno.mrna <- pheno
corr <- abs(apply(gene, 2, function(v) cor.test(v,y)$estimate))
corrVal <- apply(gene, 2, function(v) cor.test(v,y)$estimate)
o <- order(corr, decreasing=T)
corrDa <- data.frame(corrVAL=corrVal[o],corrABS=corr[o], rank=1:length(o))
o.mrna <- o
#################################################################################
####### Stepwise Forward Selection
nGene <- length(o)
nGene
selected <- o[1]
lastHAT <- 0
for (i in seq(2,nGene,1)) {
print ('====================================')
print (i)
selected <- c(selected, o[i])
geno<-gene[,selected]
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
if (hat > lastHAT) {
selected <- selected
lastHAT <- hat
} else {
selected <- selected[-length(selected)]
lastHAT <- lastHAT
}
print (lastHAT)
}
############ Confirmation of Selected genes
### test top n genes
#selected <- o[1:topn]
selected
geno.mrna <-gene[,selected]
saveRDS(geno.mrna, 'report/TCGA_mRNA_Expression_Stepwise_RFS.RDS')
kk<-kinship(gen=geno.mrna)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
res <- blup.hat(mydata=y, mykin=kk)
hat <- res$predic.HAT
hat
############## GENERAL CV PREDICTION
geno.mrna <- gene[,selected]
kk<-kinship(gen=geno.mrna)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- 153
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:153
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.val <- auc.ci[2]
auc.ci[1]
auc.ci[3]
### Survival
daysToDeath <- as.numeric(phenoData$rfs)/365*12
daysToDeath
nonComplt <- is.na(daysToDeath)
vitalStatus <- as.numeric(ifelse(nonComplt, 0, 1))
daysToDeath[nonComplt] <- as.numeric(phenoData$days_to_last_followup[nonComplt])/365*12
risk <- pred$yhat[order(pred$id)]
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.7
lgd.ypos = 0.42
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/2
p.ypos = 0.2
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
######################################################################################
############# miRNA
mir <- mirData[,samples]
mir <- as.matrix(t(mir))
mir[1:5,1:5]
dim(mir)
mir <- scale(mir)
mir[1:5,1:5]
### rfs5yr
pheno <- as.matrix(phenoData$days_to_first_biochemical_recurrence, drop=FALSE)
y <- as.numeric(pheno)
corr <- abs(apply(mir, 2, function(v) cor.test(v,y)$estimate))
corrVal <- apply(mir, 2, function(v) cor.test(v,y)$estimate)
o <- order(corr, decreasing=T)
corrDa <- data.frame(corrVAL=corrVal[o],corrABS=corr[o], rank=1:length(o))
o.mir <- o
#################################################################################
####### Stepwise Forward
nGene <- length(o)
nGene
selected <- o[1]
lastHAT <- 0
for (i in seq(2,nGene,1)) {
print ('====================================')
print (i)
selected <- c(selected, o[i])
geno<-mir[,selected]
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
if (hat > lastHAT) {
selected <- selected
lastHAT <- hat
} else {
selected <- selected[-length(selected)]
lastHAT <- lastHAT
}
print (lastHAT)
}
############ Confirmation of Selected genes
### test top n genes
#selected <- o[1:topn]
geno.mir<-mir[,selected]
#geno<-gene[te,selected]
saveRDS(geno.mir, 'report/TCGA_miRNA_Expression_Stepwise_RFS.RDS')
kk<-kinship(gen=geno.mir)
#write.csv(x=kk[[1]],file="yan\\input\\kk1.csv",row.names=FALSE)
#write.csv(x=kk[[2]],file="yan\\input\\cc1.csv",row.names=FALSE)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
geno.mir<-mir[,selected]
#geno<-gene[te,selected]
kk<-kinship(gen=geno.mir)
#write.csv(x=kk[[1]],file="yan\\input\\kk1.csv",row.names=FALSE)
#write.csv(x=kk[[2]],file="yan\\input\\cc1.csv",row.names=FALSE)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- 153
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:153
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$rfs)/365*12
daysToDeath
nonComplt <- is.na(daysToDeath)
vitalStatus <- as.numeric(ifelse(nonComplt, 0, 1))
daysToDeath[nonComplt] <- as.numeric(phenoData$days_to_last_followup[nonComplt])/365*12
risk <- pred$yhat[order(pred$id)]
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.7
lgd.ypos = 0.42
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/2
p.ypos = 0.2
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
#######################################################################################
############## Intergration of mRNA and miRNA
rownames(geno.mir)==rownames(geno.mrna)
geno.comb <- cbind(geno.mrna, geno.mir)
############ Confirmation of Selected genes
kk<-kinship(gen=geno.comb)
#write.csv(x=kk[[1]],file="yan\\input\\kk1.csv",row.names=FALSE)
#write.csv(x=kk[[2]],file="yan\\input\\cc1.csv",row.names=FALSE)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
kk<-kinship(gen=geno.comb)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- 153
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:153
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
#auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$rfs)/365*12
daysToDeath
nonComplt <- is.na(daysToDeath)
vitalStatus <- as.numeric(ifelse(nonComplt, 0, 1))
daysToDeath[nonComplt] <- as.numeric(phenoData$days_to_last_followup[nonComplt])/365*12
risk <- pred$yhat[order(pred$id)]
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.7
lgd.ypos = 0.42
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/2
p.ypos = 0.2
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
##################################################################################
################ Validation
genesInValidation <- data.frame(matrix(rep(0,(160+65)*7), nrow=160+65, ncol=7), stringsAsFactors = F)
genesInValidation
rownames(genesInValidation) <- colnames(geno.comb)
colnames(genesInValidation) <- c('GSE70769','DKFZ2018','GSE116918','GSE107299','GSE54460','MSKCC2010RNA','MSKCC2010MIR')
####### GSE107299 #######
dataset <- 'GSE107299'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
####### GSE21034 #######
#dataset <- 'GSE21034'
#eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
#exprData <- exprs(eSet)
#exprData[1:5,1:5]
#phenoData <- pData(eSet)
#View(phenoData)
#table(phenoData$sample_type)
#keep <- which(phenoData$sample_type=='Primary')
#exprData <- exprData[,keep]
#phenoData <- phenoData[keep,]
###### MSKCC2010
dataset <- 'GSE21034'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
phenoData <- pData(eSet)
table(phenoData$sample_type)
keep <- which(phenoData$sample_type=='Primary')
phenoData <- phenoData[keep,]
exprData <- read.table('data/Validation/MSKCC_PCa_mRNA_data.txt', header = T, sep = '\t', stringsAsFactors = F)
exprData[1:5,1:5]
annoData <- readRDS('~/bigdata/PCa/data/Annotation/Homo_Sapiens_Gene_Annotation_ENSEMBL_HGNC_ENTREZ.RDS')
idx <- match(colnames(geno.mrna), as.character(annoData$ensembl_id))
entrez.id <- annoData[idx,]$entrez_id
entrez.id <- entrez.id[-which(is.na(entrez.id))]
idx <- which(exprData$GeneID %in% entrez.id)
exprData <- exprData[idx,]
ensembl.id <- as.character(annoData$ensembl_id[match(exprData$GeneID, annoData$entrez_id)])
ensembl.id
rownames(exprData) <- ensembl.id
rownames(phenoData) <- phenoData$sample_id
samples <- intersect(colnames(exprData),rownames(phenoData))
exprData <- exprData[,samples]
phenoData <- phenoData[samples,]
####### DKFZ2018 #######
dataset <- 'DKFZ2018'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
#View(phenoData)
filter <- which(duplicated(phenoData$patient_id))
exprData <- exprData[,-filter]
phenoData <- phenoData[-filter,]
####### GSE54460 #######
dataset <- 'GSE54460'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
#View(phenoData)
filter <- which(phenoData$filter=='Duplicate')
filter
exprData <- exprData[,-filter]
phenoData <- phenoData[-filter,]
####### GSE70769 #######
dataset <- 'GSE70769'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
#View(phenoData)
keep <- which(phenoData$sample_type=='Primary')
exprData <- exprData[,keep]
phenoData <- phenoData[keep,]
####### GSE116918 BCR #######
dataset <- 'GSE116918'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
exprData <- exprs(eSet)
phenoData <- pData(eSet)
dim(exprData)
#View(phenoData)
table(phenoData$sample_type)
keep <- which(phenoData$sample_type=='Primary')
exprData <- exprData[,keep]
phenoData <- phenoData[keep,]
#####################################################################################
#####################################################################################
total <- nrow(phenoData)
notNA <- sum(!is.na(phenoData$time_to_bcr))
yr <- 5
keep <- which(phenoData$bcr_status==1 | (phenoData$bcr_status==0 & phenoData$time_to_bcr>=yr*12))
rfs5yr <- length(keep)
phenoData <- phenoData[keep,]
phenoData$y <- ifelse(phenoData$time_to_bcr>=yr*12, 1, phenoData$time_to_bcr/yr/12)
rfs5yr1 <- sum(phenoData$y==1)
ovlp <- intersect(colnames(geno.mrna), rownames(exprData))
ovlp
#ovlp <- sample(rownames(exprData), 150, replace = F)
#ovlp <- prolaris
#ovlp
#ovlp <- intersect(colnames(gene[,o.mrna[1:topn]]), rownames(exprData))
#ovlp
geno <- scale(t(exprData[ovlp,keep]))
dim(geno)
#geno <- scale(t(exprData[,keep]))
#dim(geno)
#genesInValidation[ovlp, 'MSKCC2010RNA'] <- 1
genesInValidation[ovlp, dataset] <- 1
#gene.name <- as.character(annoData$gene_name[match(rownames(genesInValidation), annoData$ensembl_id)])
#gene.name
#genesInValidation$Symbol <- gene.name
#write.table(genesInValidation, file='report/GENE160_MIR65_In_Validation_Datasets.txt', sep='\t', quote=F)
pheno <- as.matrix(phenoData$y, drop=FALSE)
y <- as.numeric(pheno)
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
x
nfold <- length(y)
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:nfold
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$time_to_bcr)
vitalStatus <- as.numeric(phenoData$bcr_status)
pred <- cbind(pred, daysToDeath, vitalStatus)
pred
write.table(pred, file=paste0('report/Validation_', dataset, '_mRNA_Prediction.txt'), sep = '\t', quote = F, row.names = F)
dataset
write.table(pred, file=paste0('report/Validation_MSKCC2010_mRNA_Prediction.txt'), sep = '\t', quote = F, row.names = F)
dataset
risk <- pred$yhat[order(pred$id)]
risk
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
coeffs <- coeffs[-1]
#BiocManager::install("survcomp")
#library(survcomp)
idx <- which(!is.na(pred$daysToDeath))
c <- concordance.index(x=risk[idx],
surv.time=daysToDeath[idx],
surv.event=vitalStatus[idx],
#cl=riskGroup[idx],
method="noether")
c$c.index
### KM Plot
risk <- pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
km.coeffs <- c(hr, lower95, upper95, p.val)
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.27
lgd.ypos = 0.3
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/25
p.ypos = 0.07
lgd.xpos <- 0.7
lgd.ypos = 0.85
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/25
p.ypos = 0.07
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
stats <- as.character(c(dataset, total, notNA, rfs5yr, rfs5yr1, hat, r2, auc.val, auc.ci[1], auc.ci[3], coeffs, km.coeffs))
stats
#####################################################################################
#####################################################################################
###### Integration of mRNA and miRNA
####### GSE21034 #######
###### MSKCC2010
dataset <- 'GSE21034'
eSet <- readRDS(paste0('data/Validation/', dataset, '_eSet.RDS'))
phenoData <- pData(eSet)
table(phenoData$sample_type)
keep <- which(phenoData$sample_type=='Primary')
phenoData <- phenoData[keep,]
exprData <- read.table('data/Validation/MSKCC_PCa_mRNA_data.txt', header = T, sep = '\t', stringsAsFactors = F)
exprData[1:5,1:5]
annoData <- readRDS('~/bigdata/PCa/data/Annotation/Homo_Sapiens_Gene_Annotation_ENSEMBL_HGNC_ENTREZ.RDS')
idx <- match(colnames(geno.mrna), as.character(annoData$ensembl_id))
entrez.id <- annoData[idx,]$entrez_id
entrez.id <- entrez.id[-which(is.na(entrez.id))]
idx <- which(exprData$GeneID %in% entrez.id)
exprData <- exprData[idx,]
ensembl.id <- as.character(annoData$ensembl_id[match(exprData$GeneID, annoData$entrez_id)])
ensembl.id
rownames(exprData) <- ensembl.id
rownames(phenoData) <- phenoData$sample_id
samples <- intersect(colnames(exprData),rownames(phenoData))
exprData <- exprData[,samples]
phenoData <- phenoData[samples,]
mirData <- read.delim('data/Validation/MSKCC_PCa_microRNA_data.mir21.txt', header = T, sep = '\t', stringsAsFactors = F)
mirData[1:5,1:5]
rownames(mirData) <- mirData$MicroRNA
mirData <- mirData[,-1]
ovlp <- intersect(rownames(phenoData), colnames(mirData))
ovlp
exprData <- exprData[,ovlp]
mirData <- mirData[,ovlp]
phenoData <- phenoData[ovlp,]
yr <- 5
keep <- which(phenoData$bcr_status==1 | (phenoData$bcr_status==0 & phenoData$time_to_bcr>=yr*12))
phenoData <- phenoData[keep,]
phenoData$y <- ifelse(phenoData$time_to_bcr>=yr*12, 1, phenoData$time_to_bcr/yr/12)
sum(phenoData$y==1)
ovlp <- intersect(colnames(geno.mrna), rownames(exprData))
geno1 <- scale(t(exprData[ovlp,keep]))
#geno1 <- scale(t(exprData[,keep]))
ovlp <- intersect(colnames(geno.mir), rownames(mirData))
geno2 <- scale(t(mirData[ovlp,keep]))
geno <- cbind(geno1, geno2)
colnames(geno2) %in% rownames(genesInValidation)
genesInValidation[colnames(geno2),'MSKCC2010MIR'] <- 1
#geno <- geno2
#geno <- geno1
geno <- geno1
pheno <- as.matrix(phenoData$y, drop=FALSE)
y <- as.numeric(pheno)
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
result1 <- blup.hat(mydata=y, mykin=kk)
hat <- result1$predic.HAT
hat
############## GENERAL CV PREDICTION
kk<-kinship(gen=geno)
kk <- kk[[1]]
kk<-kk[,-c(1,2)]
kk<-as.matrix(kk)
n<-length(pheno)
x<-matrix(1,n,1)
nfold <- length(y)
#foldid <- sample(1:n, n, replace = F)
#foldid
foldid <- 1:nfold
blup<-blup.cv(x=x,y=pheno,kk=kk,nfold=nfold,foldid=foldid)
r2<- as.numeric(blup[[1]])
r2
pred <- blup[[2]]
pred
########## AUC
md <- 1
survLabel <- ifelse(pred$yobs < md, 0, 1)
auc.ci <- ci(survLabel,pred$yhat)
auc.ci[1]
auc.ci[3]
auc.val <- auc.ci[2]
auc.val <- auc(survLabel,pred$yhat)
auc.val
### Survival
daysToDeath <- as.numeric(phenoData$time_to_bcr)
vitalStatus <- as.numeric(phenoData$bcr_status)
pred <- cbind(pred, daysToDeath, vitalStatus)
pred
write.table(pred, file='report/Validation_MSKCC2010_mRNA_miRNA_Prediction.txt', sep = '\t', quote = F, row.names = F)
risk <- pred$yhat[order(pred$id)]
risk
coxtest <- coxph(Surv(daysToDeath, vitalStatus) ~ risk)
summcph <- summary(coxtest)
coeffs <- c(summcph$coefficients[,1:2], summcph$conf.int[,3:4],
summcph$coefficients[,5])
coeffs
### KM Plot
pred$yhat[order(pred$id)]
risk.group <- risk < median(risk, na.rm = T)
median(risk, na.rm=T)
sort(risk)
n.high <- sum(risk.group, na.rm=T)
n.low <- sum(!risk.group, na.rm=T)
sdf <- survdiff(Surv(daysToDeath, vitalStatus) ~ risk.group)
p.val <- pchisq(sdf$chisq, length(sdf$n)-1, lower.tail = FALSE)
#p.val = 1 - pchisq(data.survdiff$chisq, length(data.survdiff$n) - 1)
hr = (sdf$obs[2]/sdf$exp[2])/(sdf$obs[1]/sdf$exp[1])
upper95 = exp(log(hr) + qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
lower95 = exp(log(hr) - qnorm(0.975)*sqrt(1/sdf$exp[2]+1/sdf$exp[1]))
hr <- format(hr, digits = 2, nsmall=2)
upper95 <- format(upper95, digits = 2, nsmall=2)
lower95 <- format(lower95, digits = 2, nsmall=2)
p.val <- ifelse(p.val >= 0.01, formatC(p.val, digits = 2),
formatC(p.val, format = "e", digits = 2))
hr
lower95
upper95
p.val
label.hr <- paste('HR = ', hr, ' (', lower95, ' - ', upper95, ')', sep='')
label.p <- paste('P Value = ', p.val, sep='')
survData <- data.frame(daysToDeath, vitalStatus, risk.group, stringsAsFactors = F)
fit <- survfit(Surv(daysToDeath, vitalStatus) ~ risk.group, data=survData)
lgd.xpos <- 0.27
lgd.ypos = 0.3
p.xpos = max(survData$daysToDeath, na.rm=TRUE)/25
p.ypos = 0.07
#title <- 'PFR10YR'
type <- 'Relapse-free Survival'
plt <- ggsurvplot(fit, data=survData, pval = paste0(label.hr, '\n', label.p), pval.coord = c(p.xpos, p.ypos),
pval.size=5.5,
font.main = c(16, 'bold', 'black'), conf.int = FALSE,
#title = title,
legend = c(lgd.xpos, lgd.ypos),
#color = c('blue', 'green'),
palette= c(google.blue, google.red),
legend.labs = c(paste('Low Risk (N=',n.low,')',sep=''),
paste('High Risk (N=',n.high,')',sep='')),
legend.title='Group',
xlab = paste(type,'(months)'), ylab = 'Survival probability',
font.x = c(20), font.y = c(20), ylim=c(0,1), #16
ggtheme = theme_bw()+ theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#panel.border = element_rect(colour='black'),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text = element_text(size=16),#14
legend.title = element_text(size=16),
#axis.title = element_text(size=30),
axis.text = element_text(size=18, color='black')))
print (plt[[1]])
###############################################################################################################################
##################### Forest plot
### TCGA
dataForForestPlot <- read.delim('report/BLUPHAT_Training_TCGA.txt', header=T, sep='\t', stringsAsFactors = F, row.names = 1)
dataForForestPlot
dataForForestPlot$dataset <- factor(paste0('TCGA-PRAD (',rownames(dataForForestPlot),')'),
levels=rev(paste0('TCGA-PRAD (',rownames(dataForForestPlot),')')))
dataForForestPlot$p.coxph <- paste0('p = ', formatC(dataForForestPlot$p.coxph, format = "e", digits = 2))
### VALIDATION
dataForForestPlot <- read.delim('report/BLUPHAT_Validation.txt', header=T, sep='\t', stringsAsFactors = F, row.names = 1)
dataForForestPlot
dataForForestPlot <- dataForForestPlot[order(dataForForestPlot$p.coxph),]
dataForForestPlot
dataForForestPlot <- dataForForestPlot[c(1:2,4:5,7,6,3),]
dataForForestPlot$dataset <- factor(paste0(rownames(dataForForestPlot),' (N=',dataForForestPlot$rfs5yr,')'),
levels=rev(paste0(rownames(dataForForestPlot),' (N=',dataForForestPlot$rfs5yr,')')))
dataForForestPlot$p.coxph <- ifelse(dataForForestPlot$p.coxph >= 0.01, formatC(dataForForestPlot$p.coxph, digits = 2),
formatC(dataForForestPlot$p.coxph, format = "e", digits = 2))
dataForForestPlot$p.coxph <- paste0('p = ', dataForForestPlot$p.coxph)
### PLOT
ggplot(dataForForestPlot, aes(x=dataset, y=hr.coxph)) +
#geom_segment(aes(y=dataset, x=lower95.coxph, xend=upper95.coxph, yend=dataset), color='black', size=1) +
#geom_segment(aes(y=6:1-0.1, x=lower95.coxph, xend=lower95.coxph, yend=6:!+0.1), color='black', size=1) +
geom_errorbar(aes(ymin=lower95.coxph, ymax=upper95.coxph),width=0.1, size=0.8, color='black')+
geom_point(color=google.red, size=3, shape=15) + #facet_grid(.~type) +
#geom_text(data =dataForForestPlot, aes(x=dataset, y=c(0.017,0.033,0.018), label=p.coxph, group=NULL),
# size=4.4) +
geom_text(data =dataForForestPlot, aes(x=dataset, y=c(0.35,0.5,0.2,0.45,0.95,0.72,0.46), label=p.coxph, group=NULL),
size=4.4) +
coord_flip()+
#ylim(0,0.05) +
ylim(0,1.05) +
xlab('')+ylab('Hazard Ratio') +
#xlim(0,100) +
theme_bw()+
#theme_set(theme_minimal()) #
theme(legend.title = element_blank(),
legend.text = element_text(size=14),
legend.position = 'right') +
theme(axis.title=element_text(size=16),
axis.text = element_text(color='black', size=12),
axis.text.x = element_text(angle = 0, hjust=0.5),
strip.text = element_text(size=14)) +
theme(axis.line = element_line(colour = "black"),
axis.line.y = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
|
f_hosmerlem <- function(y, yhat, g = 10){
# http://sas-and-r.blogspot.com/2010/09/example-87-hosmer-and-lemeshow-goodness.html
#===============================================
### ordered and grouped by predicted probability
cutyhat <- cut(
yhat,
breaks = c(
0,
quantile(
yhat,
probs = seq(0, 1, 1 / (g-1))
) # end quantile
) # end breaks
) # end cut
### observe and expect
obs = xtabs(cbind(1 - y, y) ~ cutyhat)
expect = xtabs(cbind(1 - yhat, yhat) ~ cutyhat)
### person chi square
chisq = sum((obs - expect)^2 / expect)
p = 1 - pchisq(chisq, g - 2)
return(list(chisq = chisq, p.value = p))
} # end func | /codesnippet_r/f_hosmerlem.R | no_license | clintko/Duke_BIOS719_GLM | R | false | false | 776 | r | f_hosmerlem <- function(y, yhat, g = 10){
# http://sas-and-r.blogspot.com/2010/09/example-87-hosmer-and-lemeshow-goodness.html
#===============================================
### ordered and grouped by predicted probability
cutyhat <- cut(
yhat,
breaks = c(
0,
quantile(
yhat,
probs = seq(0, 1, 1 / (g-1))
) # end quantile
) # end breaks
) # end cut
### observe and expect
obs = xtabs(cbind(1 - y, y) ~ cutyhat)
expect = xtabs(cbind(1 - yhat, yhat) ~ cutyhat)
### person chi square
chisq = sum((obs - expect)^2 / expect)
p = 1 - pchisq(chisq, g - 2)
return(list(chisq = chisq, p.value = p))
} # end func |
ugh<-marks[1:20,]
dys<-unique(ugh$date)
unique(ugh$year)
ugh$whole_kg
ugh$total_obs
ugh$marked
ugh$tags_from_fishery
ugh$mean_weight
ugh$WPUE
ugh$interp_mean =
ugh$mean_npue <- ugh$WPUE/ugh$mean_weight
cumsum(ugh$tags_from_fishery)
for (d in dys){
Dat<-Ugh[Ugh$date == d,]
Dat$whole_kg = sum(whole_kg),
total_obs = sum(total_obs),
total_marked = sum(marked),
tags_from_fishery = sum(tags_from_fishery),
mean_weight = mean(mean_weight),
mean_wpue = mean(WPUE)
}
fshbio<-read.csv(paste0("data/fishery/fishery_bio_2000_", YEAR,".csv"))
str(fshbio)
read_csv(paste0("data/fishery/fishery_bio_2000_", YEAR,".csv"),
guess_max = 50000) %>%
filter(!is.na(weight)) %>%
mutate(date = ymd(as.Date(date, "%m/%d/%Y"))) %>%
select(date, trip_no, weight, Stat) %>%
group_by(date, trip_no) %>%
dplyr::summarize(mean_weight_bios = mean(weight)) -> fsh_bio2
view(fsh_bio2)
read_csv(paste0("data/fishery/fishery_bio_2000_", YEAR,".csv"),
guess_max = 50000) %>%
#filter(!is.na(weight)) %>%
mutate(date = ymd(as.Date(date, "%m/%d/%Y"))) %>%
select(date, trip_no, Stat) %>%
group_by(date, trip_no) -> fsh_bio3 #%>%
# dplyr::summarize(mean_weight_bios = mean(weight))
view(fsh_bio3)
unique(fsh_bio3$Stat)
left_join(marks, fsh_bio3, by = c("date", "trip_no"))-> marks2
view(marks2)
left_join(marks, fsh_bio3, by = c("date", "trip_no")) %>%
#mutate(mean_weight = ifelse(!is.na(mean_weight_bios), mean_weight_bios, mean_weight)) %>%
select(-mean_weight_bios) -> marks2
read_csv(paste0("data/fishery/nsei_daily_tag_accounting_2004_", YEAR-1, ".csv")) -> marks3
marks3 %>%
filter(year >= FIRST_YEAR &
!year %in% NO_MARK_SRV) %>%
mutate(all_observed = ifelse(
!grepl(c("Missing|missing|Missed|missed|eastern|Eastern|not counted|
Did not observe|did not observe|dressed|Dressed"), comments) &
observed_flag == "Yes", "Yes", "No"),
mean_weight = ifelse(all_observed == "Yes", whole_kg/total_obs, NA),
year_trip = paste0(year, "_", trip_no)) -> marks3
#left_join(marks3, fsh_tx, by = c("date", "trip_no"))-> marks2
left_join(marks3, fsh_tx, by = c("date", "year_trip"))-> marks2
view(marks2)
ex5<-marks3[marks3$year == 2005,]
tx5<-fsh_tx[fsh_tx$year == 2005,]
view(ex5)
unique(ex5$year_trip)
unique(tx5$year_trip)
unique(tx5$Stat)
left_join(ex5, tx5 %>%
select(year_trip, Stat),
by = c("year_trip"))-> ex5.2
ex5.2<-distinct(ex5.2)
view(ex5.2)
nrow(ex5.2)
nrow(distinct(ex5.2))
tx5$Stat[tx5$year_trip == "2005_9501"]
ex5.2[ex5.2$year_trip == "2005_9501",]
fsh_tx[fsh_tx$year_trip=="2005_9501",]
fsh_tx[fsh_tx$trip_no =="9501",]
nostat<-marks3[is.na(marks3$Stat),]
view(nostat)
with(nostat, table(year))
nrow(marks3)
fsh_tx[fsh_tx$trip_no == 9301,]
rawtx<-read.csv(paste0("data/fishery/nseiharvest_ifdb_1985_", YEAR,".csv"))
rawtx[rawtx$trip_no == 9301,]
rawtx[rawtx$date == "2020-09-19",]
unique(rawtx$trip_no[rawtx$year == 2020])
#check fishery CPUE for missing trip numbers...
str(fsh_cpue)
read_csv(paste0("data/fishery/fishery_cpue_2022reboot_1997_", YEAR,".csv"),
guess_max = 50000) %>%
filter(Spp_cde == "710") %>%
mutate(sable_kg_set = sable_lbs_set * 0.45359237, # conversion lb to kg
std_hooks = 2.2 * no_hooks * (1 - exp(-0.57 * (0.0254 * hook_space))), #standardize hook spacing (Sigler & Lunsford 2001, CJFAS)
# kg sablefish/1000 hooks, following Mueter 2007
WPUE = sable_kg_set / (std_hooks / 1000)) %>%
filter(!is.na(date) &
!is.na(sable_lbs_set) &
# omit special projects before/after fishery
julian_day > 226 & julian_day < 322) %>%
group_by(year, trip_no) %>%
dplyr::summarize(WPUE = mean(WPUE)) -> fsh_cpue2
str(fsh_cpue2)
fsh_cpue2<-read.csv(paste0("data/fishery/fishery_cpue_2022reboot_1997_", YEAR,".csv"))
fsh_cpue2$year_trip = paste0(fsh_cpue2$year, "_", fsh_cpue2$trip_no)
fsh_cpue2[fsh_cpue2$year_trip == "2020_9301",]
fsh_cpue2[fsh_cpue2$trip_no == 9302,]
marks3[marks3$trip_no == 9302,]
fsh_cpue2[fsh_cpue2$year_trip == "2020_101",]
rawtx[rawtx$year == 2020 & is.na(rawtx$trip_no),]
#===============================================================================
read_csv(paste0("data/fishery/nsei_daily_tag_accounting_2004_", YEAR-1, ".csv")) -> marks4
marks4 %>%
filter(year >= FIRST_YEAR &
!year %in% NO_MARK_SRV) %>%
mutate(all_observed = ifelse(
!grepl(c("Missing|missing|Missed|missed|eastern|Eastern|not counted|
Did not observe|did not observe|dressed|Dressed"), comments) &
observed_flag == "Yes", "Yes", "No"),
mean_weight = ifelse(all_observed == "Yes", whole_kg/total_obs, NA),
year_trip = paste0(year, "_", trip_no)) -> marks4
nrow(marks4)
left_join(marks4, fsh_cpue2 %>%
select(year_trip, Stat),
by = c("year_trip"))-> marks4
view(marks4)
marks4<-distinct(marks4)
nostat4<-marks4[is.na(marks4$Stat),]
view(nostat4) #some missing trip numbers not present in fishery cpue or fish_tx data!!!
with(nostat4, table(year))
nrow(marks4)
3327/1423
head(mtry,20)
view(mtry[1:6,])
head(marks3)
view(mtry[mtry$year_trip == "2005_106",])
view(fsh_tx[fsh_tx$year_trip == "2005_106",])
view(marks[marks$year_trip == "2005_106",])
1397.5+4125
marks[marks$trip_no == 106 & marks$year == 2005,]
str(fsh_tx)
view(fsh_tx[fsh_tx$year_trip == "2005_106",])
view(mtry)
test<-mtry[mtry$year == 2005,][1:10,]
test %>%
# padr::pad fills in missing dates with NAs, grouping by years.
pad(group = "year") %>%
group_by(year, date) %>%
dplyr::summarize(whole_kg = sum(whole_kg.y),
total_obs = sum(total_obs),
total_marked = sum(marked),
tags_from_fishery = sum(tags_from_fishery),
mean_weight = mean(mean_weight),
mean_wpue = mean(WPUE)) %>%
# interpolate mean_weight column to get npue from wpue (some trips have wpue
# data but no bio data)
mutate(interp_mean = zoo::na.approx(mean_weight, maxgap = 20, rule = 2),
mean_npue = mean_wpue / interp_mean) %>% #<-weight to n
# padr::fill_ replaces NAs with 0 for specified cols
fill_by_value(whole_kg, total_obs, total_marked, tags_from_fishery, value = 0) %>%
group_by(year) %>%
mutate(cum_whole_kg = cumsum(whole_kg), #cumsum makes vector
cum_obs = cumsum(total_obs),
cum_marks = cumsum(total_marked),
julian_day = yday(date)) -> t3
daily_marks3[daily_marks3$year_trip == "2005_106",]
view(test)
view(t3)
mksub<-marks3[marks3$year_trip == "2005_2003" |
marks3$year_trip == "2005_2006" |
marks3$year_trip == "2005_58" |
marks3$year_trip == "2005_59",]
view(mksub)
philcpue<-read.csv(paste0("data/fishery/fishery_cpue_2022reboot_1997_", YEAR,".csv"))
view(philcpue)
view(philcpue[philcpue$year == 2005 & philcpue$trip_no == 58,])
tcpue<-philcpue[philcpue$year == 2005 & philcpue$trip_no == 58,]
tcpue %>% filter(Spp_cde == "710") %>%
mutate(sable_kg_set = sable_lbs_set * 0.45359237, # conversion lb to kg
std_hooks = 2.2 * no_hooks * (1 - exp(-0.57 * (0.0254 * hook_space))), #standardize hook spacing (Sigler & Lunsford 2001, CJFAS)
# kg sablefish/1000 hooks, following Mueter 2007
WPUE = sable_kg_set / (std_hooks / 1000)) %>%
filter(!is.na(date) &
!is.na(sable_lbs_set) &
# omit special projects before/after fishery
julian_day > 226 & julian_day < 322) %>%
group_by(year, trip_no, Stat) %>%
dplyr::summarize(WPUE = mean(WPUE)) -> pcpue2
view(pcpue1)
view(pcpue2)
view(fsh_cpue_stat[fsh_cpue_stat$year == 2005 & fsh_cpue_stat$trip_no == 58,])
view(marks4[marks4$year_trip == "2005_58",])
view(marks5[marks5$year == 2005 & marks5$trip_no == 58,])
view(marks3[marks3$year_trip == "2005_58",])
view(fsh_cpue[fsh_cpue$year == 2005 & fsh_cpue$trip_no == 58,])
view(fsh_cpue_stat)
fsh_cpue[fsh_cpue$year == 2005 & fsh_cpue$trip_no == 58,]
fsh_cpue_stat[fsh_cpue_stat$year == 2005 & fsh_cpue_stat$trip_no == 58,]
view(marks)
view(marks2)
view(marks5)
| /2023/r/mr_pj_diagnostic_scrap.R | no_license | commfish/seak_sablefish | R | false | false | 8,136 | r | ugh<-marks[1:20,]
dys<-unique(ugh$date)
unique(ugh$year)
ugh$whole_kg
ugh$total_obs
ugh$marked
ugh$tags_from_fishery
ugh$mean_weight
ugh$WPUE
ugh$interp_mean =
ugh$mean_npue <- ugh$WPUE/ugh$mean_weight
cumsum(ugh$tags_from_fishery)
for (d in dys){
Dat<-Ugh[Ugh$date == d,]
Dat$whole_kg = sum(whole_kg),
total_obs = sum(total_obs),
total_marked = sum(marked),
tags_from_fishery = sum(tags_from_fishery),
mean_weight = mean(mean_weight),
mean_wpue = mean(WPUE)
}
fshbio<-read.csv(paste0("data/fishery/fishery_bio_2000_", YEAR,".csv"))
str(fshbio)
read_csv(paste0("data/fishery/fishery_bio_2000_", YEAR,".csv"),
guess_max = 50000) %>%
filter(!is.na(weight)) %>%
mutate(date = ymd(as.Date(date, "%m/%d/%Y"))) %>%
select(date, trip_no, weight, Stat) %>%
group_by(date, trip_no) %>%
dplyr::summarize(mean_weight_bios = mean(weight)) -> fsh_bio2
view(fsh_bio2)
read_csv(paste0("data/fishery/fishery_bio_2000_", YEAR,".csv"),
guess_max = 50000) %>%
#filter(!is.na(weight)) %>%
mutate(date = ymd(as.Date(date, "%m/%d/%Y"))) %>%
select(date, trip_no, Stat) %>%
group_by(date, trip_no) -> fsh_bio3 #%>%
# dplyr::summarize(mean_weight_bios = mean(weight))
view(fsh_bio3)
unique(fsh_bio3$Stat)
left_join(marks, fsh_bio3, by = c("date", "trip_no"))-> marks2
view(marks2)
left_join(marks, fsh_bio3, by = c("date", "trip_no")) %>%
#mutate(mean_weight = ifelse(!is.na(mean_weight_bios), mean_weight_bios, mean_weight)) %>%
select(-mean_weight_bios) -> marks2
read_csv(paste0("data/fishery/nsei_daily_tag_accounting_2004_", YEAR-1, ".csv")) -> marks3
marks3 %>%
filter(year >= FIRST_YEAR &
!year %in% NO_MARK_SRV) %>%
mutate(all_observed = ifelse(
!grepl(c("Missing|missing|Missed|missed|eastern|Eastern|not counted|
Did not observe|did not observe|dressed|Dressed"), comments) &
observed_flag == "Yes", "Yes", "No"),
mean_weight = ifelse(all_observed == "Yes", whole_kg/total_obs, NA),
year_trip = paste0(year, "_", trip_no)) -> marks3
#left_join(marks3, fsh_tx, by = c("date", "trip_no"))-> marks2
left_join(marks3, fsh_tx, by = c("date", "year_trip"))-> marks2
view(marks2)
ex5<-marks3[marks3$year == 2005,]
tx5<-fsh_tx[fsh_tx$year == 2005,]
view(ex5)
unique(ex5$year_trip)
unique(tx5$year_trip)
unique(tx5$Stat)
left_join(ex5, tx5 %>%
select(year_trip, Stat),
by = c("year_trip"))-> ex5.2
ex5.2<-distinct(ex5.2)
view(ex5.2)
nrow(ex5.2)
nrow(distinct(ex5.2))
tx5$Stat[tx5$year_trip == "2005_9501"]
ex5.2[ex5.2$year_trip == "2005_9501",]
fsh_tx[fsh_tx$year_trip=="2005_9501",]
fsh_tx[fsh_tx$trip_no =="9501",]
nostat<-marks3[is.na(marks3$Stat),]
view(nostat)
with(nostat, table(year))
nrow(marks3)
fsh_tx[fsh_tx$trip_no == 9301,]
rawtx<-read.csv(paste0("data/fishery/nseiharvest_ifdb_1985_", YEAR,".csv"))
rawtx[rawtx$trip_no == 9301,]
rawtx[rawtx$date == "2020-09-19",]
unique(rawtx$trip_no[rawtx$year == 2020])
#check fishery CPUE for missing trip numbers...
str(fsh_cpue)
read_csv(paste0("data/fishery/fishery_cpue_2022reboot_1997_", YEAR,".csv"),
guess_max = 50000) %>%
filter(Spp_cde == "710") %>%
mutate(sable_kg_set = sable_lbs_set * 0.45359237, # conversion lb to kg
std_hooks = 2.2 * no_hooks * (1 - exp(-0.57 * (0.0254 * hook_space))), #standardize hook spacing (Sigler & Lunsford 2001, CJFAS)
# kg sablefish/1000 hooks, following Mueter 2007
WPUE = sable_kg_set / (std_hooks / 1000)) %>%
filter(!is.na(date) &
!is.na(sable_lbs_set) &
# omit special projects before/after fishery
julian_day > 226 & julian_day < 322) %>%
group_by(year, trip_no) %>%
dplyr::summarize(WPUE = mean(WPUE)) -> fsh_cpue2
str(fsh_cpue2)
fsh_cpue2<-read.csv(paste0("data/fishery/fishery_cpue_2022reboot_1997_", YEAR,".csv"))
fsh_cpue2$year_trip = paste0(fsh_cpue2$year, "_", fsh_cpue2$trip_no)
fsh_cpue2[fsh_cpue2$year_trip == "2020_9301",]
fsh_cpue2[fsh_cpue2$trip_no == 9302,]
marks3[marks3$trip_no == 9302,]
fsh_cpue2[fsh_cpue2$year_trip == "2020_101",]
rawtx[rawtx$year == 2020 & is.na(rawtx$trip_no),]
#===============================================================================
read_csv(paste0("data/fishery/nsei_daily_tag_accounting_2004_", YEAR-1, ".csv")) -> marks4
marks4 %>%
filter(year >= FIRST_YEAR &
!year %in% NO_MARK_SRV) %>%
mutate(all_observed = ifelse(
!grepl(c("Missing|missing|Missed|missed|eastern|Eastern|not counted|
Did not observe|did not observe|dressed|Dressed"), comments) &
observed_flag == "Yes", "Yes", "No"),
mean_weight = ifelse(all_observed == "Yes", whole_kg/total_obs, NA),
year_trip = paste0(year, "_", trip_no)) -> marks4
nrow(marks4)
left_join(marks4, fsh_cpue2 %>%
select(year_trip, Stat),
by = c("year_trip"))-> marks4
view(marks4)
marks4<-distinct(marks4)
nostat4<-marks4[is.na(marks4$Stat),]
view(nostat4) #some missing trip numbers not present in fishery cpue or fish_tx data!!!
with(nostat4, table(year))
nrow(marks4)
3327/1423
head(mtry,20)
view(mtry[1:6,])
head(marks3)
view(mtry[mtry$year_trip == "2005_106",])
view(fsh_tx[fsh_tx$year_trip == "2005_106",])
view(marks[marks$year_trip == "2005_106",])
1397.5+4125
marks[marks$trip_no == 106 & marks$year == 2005,]
str(fsh_tx)
view(fsh_tx[fsh_tx$year_trip == "2005_106",])
view(mtry)
test<-mtry[mtry$year == 2005,][1:10,]
test %>%
# padr::pad fills in missing dates with NAs, grouping by years.
pad(group = "year") %>%
group_by(year, date) %>%
dplyr::summarize(whole_kg = sum(whole_kg.y),
total_obs = sum(total_obs),
total_marked = sum(marked),
tags_from_fishery = sum(tags_from_fishery),
mean_weight = mean(mean_weight),
mean_wpue = mean(WPUE)) %>%
# interpolate mean_weight column to get npue from wpue (some trips have wpue
# data but no bio data)
mutate(interp_mean = zoo::na.approx(mean_weight, maxgap = 20, rule = 2),
mean_npue = mean_wpue / interp_mean) %>% #<-weight to n
# padr::fill_ replaces NAs with 0 for specified cols
fill_by_value(whole_kg, total_obs, total_marked, tags_from_fishery, value = 0) %>%
group_by(year) %>%
mutate(cum_whole_kg = cumsum(whole_kg), #cumsum makes vector
cum_obs = cumsum(total_obs),
cum_marks = cumsum(total_marked),
julian_day = yday(date)) -> t3
daily_marks3[daily_marks3$year_trip == "2005_106",]
view(test)
view(t3)
mksub<-marks3[marks3$year_trip == "2005_2003" |
marks3$year_trip == "2005_2006" |
marks3$year_trip == "2005_58" |
marks3$year_trip == "2005_59",]
view(mksub)
philcpue<-read.csv(paste0("data/fishery/fishery_cpue_2022reboot_1997_", YEAR,".csv"))
view(philcpue)
view(philcpue[philcpue$year == 2005 & philcpue$trip_no == 58,])
tcpue<-philcpue[philcpue$year == 2005 & philcpue$trip_no == 58,]
tcpue %>% filter(Spp_cde == "710") %>%
mutate(sable_kg_set = sable_lbs_set * 0.45359237, # conversion lb to kg
std_hooks = 2.2 * no_hooks * (1 - exp(-0.57 * (0.0254 * hook_space))), #standardize hook spacing (Sigler & Lunsford 2001, CJFAS)
# kg sablefish/1000 hooks, following Mueter 2007
WPUE = sable_kg_set / (std_hooks / 1000)) %>%
filter(!is.na(date) &
!is.na(sable_lbs_set) &
# omit special projects before/after fishery
julian_day > 226 & julian_day < 322) %>%
group_by(year, trip_no, Stat) %>%
dplyr::summarize(WPUE = mean(WPUE)) -> pcpue2
view(pcpue1)
view(pcpue2)
view(fsh_cpue_stat[fsh_cpue_stat$year == 2005 & fsh_cpue_stat$trip_no == 58,])
view(marks4[marks4$year_trip == "2005_58",])
view(marks5[marks5$year == 2005 & marks5$trip_no == 58,])
view(marks3[marks3$year_trip == "2005_58",])
view(fsh_cpue[fsh_cpue$year == 2005 & fsh_cpue$trip_no == 58,])
view(fsh_cpue_stat)
fsh_cpue[fsh_cpue$year == 2005 & fsh_cpue$trip_no == 58,]
fsh_cpue_stat[fsh_cpue_stat$year == 2005 & fsh_cpue_stat$trip_no == 58,]
view(marks)
view(marks2)
view(marks5)
|
# Code shamelessly plagiarized from http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html#Scatterplot
source("Demos\\theme_IMD.R")
library(ggplot2)
data("midwest", package = "ggplot2")
# midwest <- read.csv("http://goo.gl/G1K41K") # bkup data source
# Scatterplot
gg <- ggplot(midwest, aes(x=area, y=poptotal)) +
geom_point(aes(col=state, size=popdensity))
gg+theme_IMD()
| /Demos/test-plot.R | no_license | KateMMiller/demo_repo | R | false | false | 413 | r | # Code shamelessly plagiarized from http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html#Scatterplot
source("Demos\\theme_IMD.R")
library(ggplot2)
data("midwest", package = "ggplot2")
# midwest <- read.csv("http://goo.gl/G1K41K") # bkup data source
# Scatterplot
gg <- ggplot(midwest, aes(x=area, y=poptotal)) +
geom_point(aes(col=state, size=popdensity))
gg+theme_IMD()
|
library(shiny)
ui <- fluidPage(
titlePanel("Let Your Users Decided on the Plot Height"),
sidebarLayout(
sidebarPanel(
selectInput(
inputId = "x",
label = "X Variable",
choices = c("mpg", "disp", "hp", "drat", "wt", "qsec")
),
selectInput(
inputId = "y",
label = "Y Variable",
choices = rev(c("mpg", "disp", "hp", "drat", "wt", "qsec"))
),
sliderInput(
inputId = "plot_height",
label = "Adjust Plot Height",
min = 200,
max = 1000,
value = 400,
ticks = FALSE,
post = "px"
)
),
mainPanel(
uiOutput("plot_placeholder")
)
)
)
| /adjust-plot-height/ui.R | permissive | thomas-neitmann/shiny-demo-apps | R | false | false | 690 | r | library(shiny)
ui <- fluidPage(
titlePanel("Let Your Users Decided on the Plot Height"),
sidebarLayout(
sidebarPanel(
selectInput(
inputId = "x",
label = "X Variable",
choices = c("mpg", "disp", "hp", "drat", "wt", "qsec")
),
selectInput(
inputId = "y",
label = "Y Variable",
choices = rev(c("mpg", "disp", "hp", "drat", "wt", "qsec"))
),
sliderInput(
inputId = "plot_height",
label = "Adjust Plot Height",
min = 200,
max = 1000,
value = 400,
ticks = FALSE,
post = "px"
)
),
mainPanel(
uiOutput("plot_placeholder")
)
)
)
|
set.seed(12345)
lambda<-function(x) 100*(sin(x*pi)+1)
Tmax<-10
lambdamax<-200
N<-rpois(1,Tmax*lambdamax)
prop<-runif(N,0,Tmax)
A<-runif(N)<(lambda(prop)/200)
X<-prop[A]
cat("#N\n",length(X),"\n#X\n",X,"\n",file="pp.dat")
| /tests/poisp/makedata.R | permissive | admb-project/admb | R | false | false | 231 | r | set.seed(12345)
lambda<-function(x) 100*(sin(x*pi)+1)
Tmax<-10
lambdamax<-200
N<-rpois(1,Tmax*lambdamax)
prop<-runif(N,0,Tmax)
A<-runif(N)<(lambda(prop)/200)
X<-prop[A]
cat("#N\n",length(X),"\n#X\n",X,"\n",file="pp.dat")
|
/w01.R | no_license | karabanb/UWr2018 | R | false | false | 8,606 | r | ||
library(ape)
testtree <- read.tree("5194_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5194_0_unrooted.txt") | /codeml_files/newick_trees_processed/5194_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("5194_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="5194_0_unrooted.txt") |
library(readr)
library(tidyverse)
library(gdata)
options(stringsAsFactors = F)
# Sys.setlocale(locale = "UTF-8")
# Sys.setlocale(category = "LC_ALL", locale = "cht")
# rm(list=ls())
NTU_Attnd <- read_csv("all_long_new.csv")
NTU_Vote <- read_csv("vote_all.csv")
NTU_Attnd_rate <- NTU_Attnd %>%
#mutate(Attnd_condition = if_else(Attnd == '出席', Attnd, '缺席')) %>%
filter(general == "current" | general == "current*") %>%
group_by(degree, college, grade, dept, name, start, end, Attnd) %>%
count() %>%
ungroup() %>%
group_by(degree, college, grade, dept, name, start, end) %>%
mutate(Attnd_Rate = n/sum(n)) %>%
ungroup()
df_combine <- NTU_Attnd_rate %>%
left_join(NTU_Vote, by = c('name' = 'name', 'start' = 'start', 'end' = 'end', 'college' = 'college'))
df_combine %>%
filter(is.na(elected)) %>%
select(college, name, start, end) %>%
group_by(college, name, start, end) %>%
count()
df_model <- df_combine %>%
mutate(degree = if_else(degree == '大學部', 0, 1),
grade = as.factor(grade),
start = str_c(str_sub(start, 1, 3), str_sub(start, 5, 5)),
vote_support_rate = vote_support/vote_object) %>%
filter(Attnd == '出席') %>%
select(-Attnd)
# View(df_model)
mm <- model.matrix( ~ college - 1, data = df_model )
colnames(mm)
df_model_college <- cbind(df_model, mm)
mm2 <- model.matrix( ~ grade - 1, data = df_model_college )
colnames(mm2)
df_model_grade <- cbind(df_model_college, mm2)
mm3 <- model.matrix( ~ start - 1, data = df_model_grade )
colnames(mm3)
df_model_final <- cbind(df_model_grade, mm3)
#View(df_model_final)
colnames(df_model_final)
df <- df_model_final %>%
select(-c(college, elected, grade, dept, name, start, end, n, vote_support,
vote_object, vote_invalid, college_population,
college_population_total, college_vote_population,
college_support_vote, college_vote_invalid))
#View(df)
df_colname <- df %>% colnames()
df_colname %>% str_c(collapse = ", ")
df_colname %>% str_c(collapse = "+ ")
model1 <- lm(formula= Attnd_Rate ~ degree + vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate,
data=df)
summary(model1)
model2 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate,
data=df)
summary(model2)
model3 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate,
data=df)
summary(model3)
model4 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + college_support_rate,
data=df)
summary(model4)
model5 <- lm(formula= Attnd_Rate ~ college_vote_rate + college_support_rate,
data=df)
summary(model5)
model6 <- lm(formula= Attnd_Rate ~ college_vote_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院,
data=df)
summary(model6)
model7 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院,
data=df)
summary(model7)
model8 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5,
data=df)
summary(model8)
model9 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1032+ start1041+ start1042+ start1051+ start1052+ start1061,
data=df)
summary(model9)
model10 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college法律學院 + college社會科學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1032+ start1041+ start1042+ start1051+ start1052+ start1061,
data=df)
summary(model10)
model11 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college法律學院 + college社會科學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1041+ start1042+ start1051+ start1061,
data=df)
summary(model11)
model12 <- lm(formula= Attnd_Rate ~ college_vote_rate + competitive + college法律學院 + college社會科學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1051,
data=df)
summary(model12)
model13 <- lm(formula= Attnd_Rate ~ degree + college_vote_rate + competitive + college法律學院 + college社會科學院 + college管理學院 +
grade1+ grade2+ grade3+ grade4 + grade5 + start1051,
data=df)
summary(model13)
model14 <- lm(formula= Attnd_Rate ~ degree + college_vote_rate + competitive + college法律學院 + college社會科學院 + college管理學院 +
grade1+ grade2+ grade3+ grade4 + start1051,
data=df)
summary(model14)
model15 <- lm(formula= Attnd_Rate ~ degree + college_vote_rate + competitive + college管理學院 +
grade1+ grade2+ grade3+ grade4 + start1051,
data=df)
summary(model15)
model16 <- lm(formula= Attnd_Rate ~ degree + competitive + college管理學院 +
grade1+ grade2+ grade3 + start1051,
data=df)
summary(model16)
model17 <- lm(formula= Attnd_Rate ~ degree + competitive + college管理學院 +
start1051,
data=df)
summary(model17)
model18 <- lm(formula= Attnd_Rate ~ degree + competitive + college管理學院,
data=df)
summary(model18)
model19 <- lm(formula= Attnd_Rate ~ degree + competitive,
data=df)
summary(model19)
model20 <- lm(formula= Attnd_Rate ~ competitive,
data=df)
summary(model20)
ggplot(df, aes(college_vote_rate, Attnd_Rate)) +
geom_point()
ggplot(df, aes(college_support_rate, Attnd_Rate)) +
geom_point()
ggplot(df, aes(competitive, Attnd_Rate)) +
geom_point()
ggplot(df, aes(degree, Attnd_Rate)) +
geom_point()
ggplot(df, aes(college_population_rate, Attnd_Rate)) +
geom_point() | /Data_modeling.R | no_license | Dennishi0925/NTUSC | R | false | false | 7,207 | r | library(readr)
library(tidyverse)
library(gdata)
options(stringsAsFactors = F)
# Sys.setlocale(locale = "UTF-8")
# Sys.setlocale(category = "LC_ALL", locale = "cht")
# rm(list=ls())
NTU_Attnd <- read_csv("all_long_new.csv")
NTU_Vote <- read_csv("vote_all.csv")
NTU_Attnd_rate <- NTU_Attnd %>%
#mutate(Attnd_condition = if_else(Attnd == '出席', Attnd, '缺席')) %>%
filter(general == "current" | general == "current*") %>%
group_by(degree, college, grade, dept, name, start, end, Attnd) %>%
count() %>%
ungroup() %>%
group_by(degree, college, grade, dept, name, start, end) %>%
mutate(Attnd_Rate = n/sum(n)) %>%
ungroup()
df_combine <- NTU_Attnd_rate %>%
left_join(NTU_Vote, by = c('name' = 'name', 'start' = 'start', 'end' = 'end', 'college' = 'college'))
df_combine %>%
filter(is.na(elected)) %>%
select(college, name, start, end) %>%
group_by(college, name, start, end) %>%
count()
df_model <- df_combine %>%
mutate(degree = if_else(degree == '大學部', 0, 1),
grade = as.factor(grade),
start = str_c(str_sub(start, 1, 3), str_sub(start, 5, 5)),
vote_support_rate = vote_support/vote_object) %>%
filter(Attnd == '出席') %>%
select(-Attnd)
# View(df_model)
mm <- model.matrix( ~ college - 1, data = df_model )
colnames(mm)
df_model_college <- cbind(df_model, mm)
mm2 <- model.matrix( ~ grade - 1, data = df_model_college )
colnames(mm2)
df_model_grade <- cbind(df_model_college, mm2)
mm3 <- model.matrix( ~ start - 1, data = df_model_grade )
colnames(mm3)
df_model_final <- cbind(df_model_grade, mm3)
#View(df_model_final)
colnames(df_model_final)
df <- df_model_final %>%
select(-c(college, elected, grade, dept, name, start, end, n, vote_support,
vote_object, vote_invalid, college_population,
college_population_total, college_vote_population,
college_support_vote, college_vote_invalid))
#View(df)
df_colname <- df %>% colnames()
df_colname %>% str_c(collapse = ", ")
df_colname %>% str_c(collapse = "+ ")
model1 <- lm(formula= Attnd_Rate ~ degree + vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate,
data=df)
summary(model1)
model2 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate,
data=df)
summary(model2)
model3 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate,
data=df)
summary(model3)
model4 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + college_support_rate,
data=df)
summary(model4)
model5 <- lm(formula= Attnd_Rate ~ college_vote_rate + college_support_rate,
data=df)
summary(model5)
model6 <- lm(formula= Attnd_Rate ~ college_vote_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院,
data=df)
summary(model6)
model7 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院,
data=df)
summary(model7)
model8 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5,
data=df)
summary(model8)
model9 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college工學院 + college文學院 + college生物資源暨農學院 + college法律學院 + college社會科學院 + college理學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1032+ start1041+ start1042+ start1051+ start1052+ start1061,
data=df)
summary(model9)
model10 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college法律學院 + college社會科學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1032+ start1041+ start1042+ start1051+ start1052+ start1061,
data=df)
summary(model10)
model11 <- lm(formula= Attnd_Rate ~ vote_support_rate*competitive + college_vote_rate + competitive + college_support_rate + college_population_rate + college法律學院 + college社會科學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1041+ start1042+ start1051+ start1061,
data=df)
summary(model11)
model12 <- lm(formula= Attnd_Rate ~ college_vote_rate + competitive + college法律學院 + college社會科學院 + college管理學院 + college醫學院 +
grade1+ grade2+ grade3+ grade4+ grade5 + start1051,
data=df)
summary(model12)
model13 <- lm(formula= Attnd_Rate ~ degree + college_vote_rate + competitive + college法律學院 + college社會科學院 + college管理學院 +
grade1+ grade2+ grade3+ grade4 + grade5 + start1051,
data=df)
summary(model13)
model14 <- lm(formula= Attnd_Rate ~ degree + college_vote_rate + competitive + college法律學院 + college社會科學院 + college管理學院 +
grade1+ grade2+ grade3+ grade4 + start1051,
data=df)
summary(model14)
model15 <- lm(formula= Attnd_Rate ~ degree + college_vote_rate + competitive + college管理學院 +
grade1+ grade2+ grade3+ grade4 + start1051,
data=df)
summary(model15)
model16 <- lm(formula= Attnd_Rate ~ degree + competitive + college管理學院 +
grade1+ grade2+ grade3 + start1051,
data=df)
summary(model16)
model17 <- lm(formula= Attnd_Rate ~ degree + competitive + college管理學院 +
start1051,
data=df)
summary(model17)
model18 <- lm(formula= Attnd_Rate ~ degree + competitive + college管理學院,
data=df)
summary(model18)
model19 <- lm(formula= Attnd_Rate ~ degree + competitive,
data=df)
summary(model19)
model20 <- lm(formula= Attnd_Rate ~ competitive,
data=df)
summary(model20)
ggplot(df, aes(college_vote_rate, Attnd_Rate)) +
geom_point()
ggplot(df, aes(college_support_rate, Attnd_Rate)) +
geom_point()
ggplot(df, aes(competitive, Attnd_Rate)) +
geom_point()
ggplot(df, aes(degree, Attnd_Rate)) +
geom_point()
ggplot(df, aes(college_population_rate, Attnd_Rate)) +
geom_point() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_set_optimization_options}
\alias{umx_set_optimization_options}
\title{Set options that affect optimization in OpenMx}
\usage{
umx_set_optimization_options(
opt = c("mvnRelEps", "mvnMaxPointsA", "Parallel diagnostics"),
value = NULL,
model = NULL,
silent = FALSE
)
}
\arguments{
\item{opt}{default returns current values of the options listed. Currently
"mvnRelEps", "mvnMaxPointsA", and "Parallel diagnostics".}
\item{value}{If not NULL, the value to set the opt to (can be a list of length(opt))}
\item{model}{A model for which to set the optimizer. Default (NULL) sets the optimizer globally.}
\item{silent}{If TRUE, no message will be printed.}
}
\value{
\itemize{
\item current values if no value set.
}
}
\description{
\code{umx_set_optimization_options} provides access to get and set options affecting optimization.
}
\details{
\emph{note}: For \code{mvnRelEps}, values between .0001 to .01 are conventional. Smaller values slow optimization.
}
\examples{
# show current value for selected or all options
umx_set_optimization_options() # print the existing state(s)
umx_set_optimization_options("mvnRelEps")
\dontrun{
umx_set_optimization_options("mvnRelEps", .01) # update globally
umx_set_optimization_options("Parallel diagnostics", value = "Yes")
}
}
\references{
\itemize{
\item \url{https://tbates.github.io}, \url{https://github.com/tbates/umx}
}
}
\seealso{
Other Get and set:
\code{\link{umx_get_checkpoint}()},
\code{\link{umx_get_options}()},
\code{\link{umx_set_auto_plot}()},
\code{\link{umx_set_auto_run}()},
\code{\link{umx_set_checkpoint}()},
\code{\link{umx_set_condensed_slots}()},
\code{\link{umx_set_cores}()},
\code{\link{umx_set_data_variance_check}()},
\code{\link{umx_set_optimizer}()},
\code{\link{umx_set_plot_file_suffix}()},
\code{\link{umx_set_plot_format}()},
\code{\link{umx_set_separator}()},
\code{\link{umx_set_silent}()},
\code{\link{umx_set_table_format}()},
\code{\link{umx}}
}
\concept{Get and set}
| /man/umx_set_optimization_options.Rd | no_license | jishanling/umx | R | false | true | 2,066 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc_and_utility.R
\name{umx_set_optimization_options}
\alias{umx_set_optimization_options}
\title{Set options that affect optimization in OpenMx}
\usage{
umx_set_optimization_options(
opt = c("mvnRelEps", "mvnMaxPointsA", "Parallel diagnostics"),
value = NULL,
model = NULL,
silent = FALSE
)
}
\arguments{
\item{opt}{default returns current values of the options listed. Currently
"mvnRelEps", "mvnMaxPointsA", and "Parallel diagnostics".}
\item{value}{If not NULL, the value to set the opt to (can be a list of length(opt))}
\item{model}{A model for which to set the optimizer. Default (NULL) sets the optimizer globally.}
\item{silent}{If TRUE, no message will be printed.}
}
\value{
\itemize{
\item current values if no value set.
}
}
\description{
\code{umx_set_optimization_options} provides access to get and set options affecting optimization.
}
\details{
\emph{note}: For \code{mvnRelEps}, values between .0001 to .01 are conventional. Smaller values slow optimization.
}
\examples{
# show current value for selected or all options
umx_set_optimization_options() # print the existing state(s)
umx_set_optimization_options("mvnRelEps")
\dontrun{
umx_set_optimization_options("mvnRelEps", .01) # update globally
umx_set_optimization_options("Parallel diagnostics", value = "Yes")
}
}
\references{
\itemize{
\item \url{https://tbates.github.io}, \url{https://github.com/tbates/umx}
}
}
\seealso{
Other Get and set:
\code{\link{umx_get_checkpoint}()},
\code{\link{umx_get_options}()},
\code{\link{umx_set_auto_plot}()},
\code{\link{umx_set_auto_run}()},
\code{\link{umx_set_checkpoint}()},
\code{\link{umx_set_condensed_slots}()},
\code{\link{umx_set_cores}()},
\code{\link{umx_set_data_variance_check}()},
\code{\link{umx_set_optimizer}()},
\code{\link{umx_set_plot_file_suffix}()},
\code{\link{umx_set_plot_format}()},
\code{\link{umx_set_separator}()},
\code{\link{umx_set_silent}()},
\code{\link{umx_set_table_format}()},
\code{\link{umx}}
}
\concept{Get and set}
|
##################################
#Abinesh Senthil Kumar
#Prediction of accidents and their severity
##################################
library(rpart)
library(gbm)
library(ada)
library(randomForest)
library(caret)
library(car)
library(ggmap)
library(ggplot2)
#setting working directory
setwd('C:/Users/Flynn/Desktop/Data analytics proj')
getwd()
#reading fulldata containing initial dataset with 2.25 million records for whole United States of America
fulldata <- read.csv('accidents12.csv')
#subsetting for LosAngeles only
laonly <- subset(fulldata, fulldata$City == 'Los Angeles')
#converting severity to two levels
laonly$Severity[laonly$Severity < 3] <- 1
laonly$Severity[laonly$Severity == 3] <- 2
laonly$Severity[laonly$Severity > 3] <- 2
laonly$Severity <- as.factor(laonly$Severity)
str(laonly)
summary(laonly)
#removing redundant variables
laonlyreqvar <- laonly[,c(4,7,8,15,16,17,24,25,26,27,28,30,31,32,34,37,42,44,46)]
finaldataset <- laonlyreqvar[,-c(5,6,8,12,13,15,17)]
str(finaldataset)
#lat and long used to plot in map
finaldatawithlatandlong <- finaldataset
#removing lat and long to create a modeling dataset
finalmodelingdataset <- finaldataset[, -c(2,3)]
str(finalmodelingdataset)
#checking and removing na values
sum(is.na(finalmodelingdataset))
finalmodelingdataset <- na.omit(finalmodelingdataset)
str(finalmodelingdataset)
#converting visibility to factor
finalmodelingdataset$Visibility.mi. <- as.factor(finalmodelingdataset$Visibility.mi.)
#converting weather conditioin to 4 level factor
#install.packages("car")
library(car)
finalmodelingdataset$Weather_Condition <- recode(finalmodelingdataset$Weather_Condition,"c('Drizzle','Heavy Rain','Light Drizzle','Light Rain','Light Thunderstorms and Rain','Rain','Thunderstorm')='rain';c('Mostly Cloudy','Overcast','Partly Cloudy','Scattered Clouds')='cloudy';c('Smoke','Fog','Haze','Mist','Patches of Fog','Shallow Fog')='fog'")
finalmodelingdataset <- finalmodelingdataset[!finalmodelingdataset$Weather_Condition == "",]
str(finalmodelingdataset)
#saving the final dataset to final.csv
write.csv(finalmodelingdataset, file = "final.csv")
######### Project starts from here ##########
laaccident <- read.csv('final.csv')
laaccident <- laaccident[,-c(1)] #used to remove the first index column that has been created while saving the new csv file
str(laaccident)
laaccident$Severity <- as.factor(laaccident$Severity)
laaccident$Visibility.mi. <- as.factor(laaccident$Visibility.mi.)
##########################################Exploratory Data Analysis#########################################################
dev.off()
#plotting response variable
plot(laaccident$Severity, ylim = c(0, 30000), main = "Response variable", col = 'pink', names = c('low severity','high severity'))
#using ggmap to plot the datapoints on Los Angeles map
incidents <- finaldatawithlatandlong
#install.packages("ggmap")
library(ggmap)
ggmap::register_google(key = "AIzaSyCK_MlkB3zLV8Yz-T-8yOIaNqVUNVpn_do")
#taking Los angeles map from googlemaps and plotting all datapoints in the map
p <- ggmap(get_googlemap(maptype="terrain",zoom=11,center = c(lon = -118.28904, lat = 34.078926)))
p + geom_point(aes(x =Start_Lng , y =Start_Lat ),colour = 'red', incidents, alpha=0.25, size = 0.5)
i2lsev <-subset(incidents,incidents$Severity=='1') #subsetting only low severity
i2hsev<-subset(incidents,incidents$Severity=='2') #subsetting only high severity
#distinguishing high severity as #red and low severity as #yellow
p + geom_point(aes(x =Start_Lng , y =Start_Lat ),colour = 'yellow', i2lsev, alpha=0.25, size = 0.5) +
geom_point(aes(x =Start_Lng , y =Start_Lat ),colour = 'red', i2hsev, alpha=0.25, size = 0.5)
#plotting all predictors
par(mfrow = c(3,3))
hist(laaccident$Temperature.F., main = 'Distribution of temperature',xlab = 'Temperature', col = 'skyblue')
hist(laaccident$Humidity..., main = 'Distribution of humidity', xlab = 'Humidity', col = 'skyblue')
hist(laaccident$Pressure.in., main = 'Distribution of pressure', xlab = 'Pressure', col = 'skyblue')
plot(laaccident$Side, ylim = c(0,50000), main = 'Side', xlab = '', col = 'skyblue')
plot(laaccident$Sunrise_Sunset, main = 'Time of the day', col = 'skyblue', ylim = c(0,35000))
plot(laaccident$Visibility.mi., ylim = c(0,45000), main = 'Visibility', col = 'skyblue')
plot((laaccident$Weather_Condition), ylim = c(0,35000) ,main = 'Weather condition', col = 'skyblue')
plot(laaccident$Junction, ylim = c(0,50000), col = 'skyblue', main = 'Junction' )
plot(laaccident$Traffic_Signal, ylim = c(0,50000), col = 'skyblue', main = 'traffic signal')
#checking for outliers for the continuous variables using boxplot
par(mfrow = c(1,3))
boxplot(laaccident$Temperature.F., main = 'Boxplot of Temperature', xlab = 'Temperature')
boxplot(laaccident$Humidity..., main = 'Boxplot of Humidity', xlab = 'Humidity')
boxplot(laaccident$Pressure.in., main = 'Boxplot of Pressure', xlab = 'Pressure')
Outlierspressure = data.frame(boxplot(laaccident$Pressure.in., plot=F)$out)
Outlierstemp = data.frame(boxplot(laaccident$Temperature.F., plot=F)$out)
Outliershumid = data.frame(boxplot(laaccident$Humidity..., plot=F)$out)
nrow(Outlierspressure)
nrow(Outlierstemp)
nrow(Outliershumid)
dev.off()
############################################## Fitting Models #####################################################################################
#Randomized holdout
set.seed(15)
numholdout = 10
percentholdout = 0.2
nmodel = 6
predictionaccuracy <- matrix(data= NA, ncol = nmodel, nrow = numholdout)
trainingaccuracy <- matrix(data= NA, ncol = nmodel, nrow = numholdout)
colnames(predictionaccuracy) <- c("Logistic regression", "Cart using rpart", "Randomforest", "Gbm boost", "Ada boost", "Null model")
colnames(trainingaccuracy) <- c("Logistic regression", "Cart using rpart", "Randomforest", "Gbm boost", "Ada boost", "Null model")
randomstring <- function(percent,length) {
s <- c()
for (j in 1:length) {
if(runif(1) <= percent) {
s[j] <- 1
}
else {
s[j] <- 0
}
}
s
}
####### used to get the final model to be used in for loop ########
##############################################
trainindex <- sample(x = 1:nrow(laaccident), size = 0.8*(nrow(laaccident)))
train.data <- laaccident[trainindex,]
test.data <- laaccident[-trainindex,]
##############################################
library(caret) #for confusion matrix function
#logistic regression
logistic <- glm(Severity ~ ., data = train.data, family = binomial())
logisticpred <- predict(logistic, newdata = test.data, type = 'response' )
logisticpred <- ifelse(logisticpred > 0.5, "2","1")
summary(logistic) #selecting only significant predictors from summary(logistic)
confusionMatrix(as.factor(logisticpred) , test.data$Severity)
#logistic in for loop
set.seed(13)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
#final model after removing insignificant terms
lm.a <- glm(Severity ~ Side+Humidity...+Pressure.in.+Weather_Condition+Junction+Traffic_Signal+Sunrise_Sunset, data = train, family = binomial())
lm.a.pred <- predict(lm.a, newdata = holdout, type = 'response' )
lm.a.pred <- ifelse(lm.a.pred > 0.5, "2","1")
lm.train.pred <- predict(lm.a, newdata = train, type = 'response')
lm.train.pred <- ifelse(lm.train.pred > 0.5, "2","1")
predictionaccuracy[i,1] <- sum(diag(table(lm.a.pred, holdout.response)))/sum(table(lm.a.pred, holdout.response))
trainingaccuracy[i,1] <- sum(diag(table(lm.train.pred, train$Severity)))/sum(table(lm.train.pred, train$Severity))
}
#######################
#rpart
library(rpart)
cart <- rpart(Severity ~ ., train.data, method = "class")
cart.predict <- predict(cart, newdata = test.data, type = 'class')
plot(cart)
text(cart)
confusionMatrix(cart.predict, test.data$Severity)
#rpart in for loop
library(rpart)
set.seed(17)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
cartmodel1 <- rpart(Severity ~ ., train, method = "class")
cart.predict <- predict(cartmodel1, newdata = holdout, type = 'class')
cart.train.pred <- predict(cartmodel1, newdata = train, type = 'class')
predictionaccuracy[i,2] <- sum(diag(table(cart.predict, holdout.response)))/sum(table(cart.predict, holdout.response))
trainingaccuracy[i,2] <- sum(diag(table(cart.train.pred, train$Severity)))/sum(table(cart.train.pred, train$Severity))
}
#######################
#randomforest
library(randomForest)
set.seed(80)
rfmodel <- randomForest(Severity ~ ., train.data, importance = T )
plot(rfmodel)
rferrorrate <- data.frame(rfmodel$err.rate)
#finding the tree size for minimum error
mintreerf <- which.min(rferrorrate$OOB)
mintreerf #given the optimal tree size
#new rf model with optimal tree size
set.seed(5)
rfmodel1 <- randomForest(Severity ~ ., train.data, ntree = mintreerf, importance = T)
print(rfmodel1)
plot(rfmodel1)
formtry <- c()
for(i in 1:9) {
temporaryrf <- randomForest(Severity ~., train.data,importance = T, mtry = i, ntree = mintreerf )
formtry[i] <- temporaryrf$err.rate[mintreerf]
}
formtry #from this we can see the optimal number of predictors
#we will use this optpred and mintreerf in random holdout
optimalmtry <- which.min(formtry)
optimalmtry
finalrfmodel <- randomForest(Severity ~ ., train.data, ntree = mintreerf, mtry = optimalmtry, importance = T )
plot(finalrfmodel)
rfpredicted <- predict(finalrfmodel, test.data)
confusionMatrix(rfpredicted, test.data$Severity)
#randomforest in forloop
library(randomForest)
set.seed(10)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
#ntree and mtry finalized after running the model individually
finalrfmodel <- randomForest(Severity ~ ., train, ntree = mintreerf, mtry = optimalmtry, importance = T )
rfpred <- predict(finalrfmodel, newdata = holdout)
rfpred.train <- predict(finalrfmodel, newdata = train)
predictionaccuracy[i,3] <- sum(diag(table(rfpred, holdout.response)))/sum(table(rfpred, holdout.response))
trainingaccuracy[i,3] <- sum(diag(table(rfpred.train, train$Severity)))/sum(table(rfpred.train, train$Severity))
}
varImpPlot(finalrfmodel) #variable importance plot of Randomforest model
####################
#gbmboosting
library(gbm)
gbmboosting <- gbm(Severity ~ .,data = train.data,distribution = "multinomial", n.trees=500, interaction.depth = 4)
gbmpred <- predict(gbmboosting, newdata = test.data, n.trees = 500, type = "response")
gbmpred <- as.factor(apply(gbmpred, 1, which.max))
summary(gbmboosting)
confusionMatrix(test.data$Severity,gbmpred)
plot(gbmboosting, i = 'Side') #Partial dependence plot for side
plot(gbmboosting, i = 'Traffic_Signal') #Partial dependence plot for traffic signal
plot(gbmboosting, i = 'Visibility.mi.') #Partial dependence plot for visibility
plot(gbmboosting, i = 'Humidity...') #Partial dependence plot for humidity
#gradient boosting in for loop
library(gbm)
set.seed(17)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
boosting <- gbm(Severity ~ .,data = train,distribution = "multinomial", n.trees=500, interaction.depth = 4)
boostpred <- predict(boosting, newdata = holdout, n.trees = 500, type = "response")
boostpred <- as.factor(apply(boostpred, 1, which.max))
boostpredtrain <- predict(boosting, newdata = train, n.trees = 500, type = 'response')
boostpredtrain <- as.factor(apply(boostpredtrain, 1, which.max))
predictionaccuracy[i,4] <- sum(diag(table(boostpred, holdout.response)))/sum(table(boostpred, holdout.response))
trainingaccuracy[i,4] <- sum(diag(table(boostpredtrain, train$Severity)))/sum(table(boostpredtrain, train$Severity))
}
##################
#ada boosting
library(ada)
set.seed(15)
adaboosting <- ada(Severity ~., train.data, iter = 50)
plot(adaboosting) #taking 45 as number of iteration
adapred <- predict(adaboosting, test.data)
confusionMatrix(test.data$Severity,adapred)
#ada boosting in for loop
library(ada)
set.seed(33)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
boostingmodelada <- ada(Severity ~ ., train, iter = 45)
pred.boostada <- predict(boostingmodelada, holdout)
pred.train.boostada <- predict(boostingmodelada, train)
predictionaccuracy[i,5] <- sum(diag(table(pred.boostada, holdout.response)))/sum(table(pred.boostada, holdout.response))
trainingaccuracy[i,5] <- sum(diag(table(pred.train.boostada, train$Severity)))/sum(table(pred.train.boostada, train$Severity))
}
###########################
#null model
library(caret)
set.seed(97)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
nullmodel <- nullModel(y = train$Severity, type = 'class')
pred.nullmodel <- predict(nullmodel, holdout)
pred.train.nullmodel <- predict(nullmodel, train)
predictionaccuracy[i,6] <- sum(diag(table(pred.nullmodel, holdout.response)))/sum(table(pred.nullmodel, holdout.response))
trainingaccuracy[i,6] <- sum(diag(table(pred.train.nullmodel, train$Severity)))/sum(table(pred.train.nullmodel, train$Severity))
}
#finding the average prediction and training accuracy
meanpredictionaccuracy <- c()
for (k in 1:nmodel) {
meanpredictionaccuracy[k] <- mean(predictionaccuracy[, k])
}
meanpredictionaccuracy #gives the mean prediction accuracy of all the models
max(meanpredictionaccuracy) #gives the maximum prediction accuracy out of all models
which.max(meanpredictionaccuracy) #gives which model has the maximum prediction accuracy
#model 3 has the highest prediction accuracy (Randomforest)
meantrainingaccuracy <- c()
for (k in 1:nmodel) {
meantrainingaccuracy[k] <- mean(trainingaccuracy[, k])
}
meantrainingaccuracy #gives the mean training accuracy of all the models
max(meantrainingaccuracy) #gives the maximum training accuracy out of all models
which.max(meantrainingaccuracy) #gives which model has the maximum training accuracy
#model 3 has the highest training accuracy (Randomforest)
###################
dev.off()
| /Accident severity prediction.R | no_license | abinesh-23/Accident-severity-prediction | R | false | false | 16,108 | r | ##################################
#Abinesh Senthil Kumar
#Prediction of accidents and their severity
##################################
library(rpart)
library(gbm)
library(ada)
library(randomForest)
library(caret)
library(car)
library(ggmap)
library(ggplot2)
#setting working directory
setwd('C:/Users/Flynn/Desktop/Data analytics proj')
getwd()
#reading fulldata containing initial dataset with 2.25 million records for whole United States of America
fulldata <- read.csv('accidents12.csv')
#subsetting for LosAngeles only
laonly <- subset(fulldata, fulldata$City == 'Los Angeles')
#converting severity to two levels
laonly$Severity[laonly$Severity < 3] <- 1
laonly$Severity[laonly$Severity == 3] <- 2
laonly$Severity[laonly$Severity > 3] <- 2
laonly$Severity <- as.factor(laonly$Severity)
str(laonly)
summary(laonly)
#removing redundant variables
laonlyreqvar <- laonly[,c(4,7,8,15,16,17,24,25,26,27,28,30,31,32,34,37,42,44,46)]
finaldataset <- laonlyreqvar[,-c(5,6,8,12,13,15,17)]
str(finaldataset)
#lat and long used to plot in map
finaldatawithlatandlong <- finaldataset
#removing lat and long to create a modeling dataset
finalmodelingdataset <- finaldataset[, -c(2,3)]
str(finalmodelingdataset)
#checking and removing na values
sum(is.na(finalmodelingdataset))
finalmodelingdataset <- na.omit(finalmodelingdataset)
str(finalmodelingdataset)
#converting visibility to factor
finalmodelingdataset$Visibility.mi. <- as.factor(finalmodelingdataset$Visibility.mi.)
#converting weather conditioin to 4 level factor
#install.packages("car")
library(car)
finalmodelingdataset$Weather_Condition <- recode(finalmodelingdataset$Weather_Condition,"c('Drizzle','Heavy Rain','Light Drizzle','Light Rain','Light Thunderstorms and Rain','Rain','Thunderstorm')='rain';c('Mostly Cloudy','Overcast','Partly Cloudy','Scattered Clouds')='cloudy';c('Smoke','Fog','Haze','Mist','Patches of Fog','Shallow Fog')='fog'")
finalmodelingdataset <- finalmodelingdataset[!finalmodelingdataset$Weather_Condition == "",]
str(finalmodelingdataset)
#saving the final dataset to final.csv
write.csv(finalmodelingdataset, file = "final.csv")
######### Project starts from here ##########
laaccident <- read.csv('final.csv')
laaccident <- laaccident[,-c(1)] #used to remove the first index column that has been created while saving the new csv file
str(laaccident)
laaccident$Severity <- as.factor(laaccident$Severity)
laaccident$Visibility.mi. <- as.factor(laaccident$Visibility.mi.)
##########################################Exploratory Data Analysis#########################################################
dev.off()
#plotting response variable
plot(laaccident$Severity, ylim = c(0, 30000), main = "Response variable", col = 'pink', names = c('low severity','high severity'))
#using ggmap to plot the datapoints on Los Angeles map
incidents <- finaldatawithlatandlong
#install.packages("ggmap")
library(ggmap)
ggmap::register_google(key = "AIzaSyCK_MlkB3zLV8Yz-T-8yOIaNqVUNVpn_do")
#taking Los angeles map from googlemaps and plotting all datapoints in the map
p <- ggmap(get_googlemap(maptype="terrain",zoom=11,center = c(lon = -118.28904, lat = 34.078926)))
p + geom_point(aes(x =Start_Lng , y =Start_Lat ),colour = 'red', incidents, alpha=0.25, size = 0.5)
i2lsev <-subset(incidents,incidents$Severity=='1') #subsetting only low severity
i2hsev<-subset(incidents,incidents$Severity=='2') #subsetting only high severity
#distinguishing high severity as #red and low severity as #yellow
p + geom_point(aes(x =Start_Lng , y =Start_Lat ),colour = 'yellow', i2lsev, alpha=0.25, size = 0.5) +
geom_point(aes(x =Start_Lng , y =Start_Lat ),colour = 'red', i2hsev, alpha=0.25, size = 0.5)
#plotting all predictors
par(mfrow = c(3,3))
hist(laaccident$Temperature.F., main = 'Distribution of temperature',xlab = 'Temperature', col = 'skyblue')
hist(laaccident$Humidity..., main = 'Distribution of humidity', xlab = 'Humidity', col = 'skyblue')
hist(laaccident$Pressure.in., main = 'Distribution of pressure', xlab = 'Pressure', col = 'skyblue')
plot(laaccident$Side, ylim = c(0,50000), main = 'Side', xlab = '', col = 'skyblue')
plot(laaccident$Sunrise_Sunset, main = 'Time of the day', col = 'skyblue', ylim = c(0,35000))
plot(laaccident$Visibility.mi., ylim = c(0,45000), main = 'Visibility', col = 'skyblue')
plot((laaccident$Weather_Condition), ylim = c(0,35000) ,main = 'Weather condition', col = 'skyblue')
plot(laaccident$Junction, ylim = c(0,50000), col = 'skyblue', main = 'Junction' )
plot(laaccident$Traffic_Signal, ylim = c(0,50000), col = 'skyblue', main = 'traffic signal')
#checking for outliers for the continuous variables using boxplot
par(mfrow = c(1,3))
boxplot(laaccident$Temperature.F., main = 'Boxplot of Temperature', xlab = 'Temperature')
boxplot(laaccident$Humidity..., main = 'Boxplot of Humidity', xlab = 'Humidity')
boxplot(laaccident$Pressure.in., main = 'Boxplot of Pressure', xlab = 'Pressure')
Outlierspressure = data.frame(boxplot(laaccident$Pressure.in., plot=F)$out)
Outlierstemp = data.frame(boxplot(laaccident$Temperature.F., plot=F)$out)
Outliershumid = data.frame(boxplot(laaccident$Humidity..., plot=F)$out)
nrow(Outlierspressure)
nrow(Outlierstemp)
nrow(Outliershumid)
dev.off()
############################################## Fitting Models #####################################################################################
#Randomized holdout
set.seed(15)
numholdout = 10
percentholdout = 0.2
nmodel = 6
predictionaccuracy <- matrix(data= NA, ncol = nmodel, nrow = numholdout)
trainingaccuracy <- matrix(data= NA, ncol = nmodel, nrow = numholdout)
colnames(predictionaccuracy) <- c("Logistic regression", "Cart using rpart", "Randomforest", "Gbm boost", "Ada boost", "Null model")
colnames(trainingaccuracy) <- c("Logistic regression", "Cart using rpart", "Randomforest", "Gbm boost", "Ada boost", "Null model")
randomstring <- function(percent,length) {
s <- c()
for (j in 1:length) {
if(runif(1) <= percent) {
s[j] <- 1
}
else {
s[j] <- 0
}
}
s
}
####### used to get the final model to be used in for loop ########
##############################################
trainindex <- sample(x = 1:nrow(laaccident), size = 0.8*(nrow(laaccident)))
train.data <- laaccident[trainindex,]
test.data <- laaccident[-trainindex,]
##############################################
library(caret) #for confusion matrix function
#logistic regression
logistic <- glm(Severity ~ ., data = train.data, family = binomial())
logisticpred <- predict(logistic, newdata = test.data, type = 'response' )
logisticpred <- ifelse(logisticpred > 0.5, "2","1")
summary(logistic) #selecting only significant predictors from summary(logistic)
confusionMatrix(as.factor(logisticpred) , test.data$Severity)
#logistic in for loop
set.seed(13)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
#final model after removing insignificant terms
lm.a <- glm(Severity ~ Side+Humidity...+Pressure.in.+Weather_Condition+Junction+Traffic_Signal+Sunrise_Sunset, data = train, family = binomial())
lm.a.pred <- predict(lm.a, newdata = holdout, type = 'response' )
lm.a.pred <- ifelse(lm.a.pred > 0.5, "2","1")
lm.train.pred <- predict(lm.a, newdata = train, type = 'response')
lm.train.pred <- ifelse(lm.train.pred > 0.5, "2","1")
predictionaccuracy[i,1] <- sum(diag(table(lm.a.pred, holdout.response)))/sum(table(lm.a.pred, holdout.response))
trainingaccuracy[i,1] <- sum(diag(table(lm.train.pred, train$Severity)))/sum(table(lm.train.pred, train$Severity))
}
#######################
#rpart
library(rpart)
cart <- rpart(Severity ~ ., train.data, method = "class")
cart.predict <- predict(cart, newdata = test.data, type = 'class')
plot(cart)
text(cart)
confusionMatrix(cart.predict, test.data$Severity)
#rpart in for loop
library(rpart)
set.seed(17)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
cartmodel1 <- rpart(Severity ~ ., train, method = "class")
cart.predict <- predict(cartmodel1, newdata = holdout, type = 'class')
cart.train.pred <- predict(cartmodel1, newdata = train, type = 'class')
predictionaccuracy[i,2] <- sum(diag(table(cart.predict, holdout.response)))/sum(table(cart.predict, holdout.response))
trainingaccuracy[i,2] <- sum(diag(table(cart.train.pred, train$Severity)))/sum(table(cart.train.pred, train$Severity))
}
#######################
#randomforest
library(randomForest)
set.seed(80)
rfmodel <- randomForest(Severity ~ ., train.data, importance = T )
plot(rfmodel)
rferrorrate <- data.frame(rfmodel$err.rate)
#finding the tree size for minimum error
mintreerf <- which.min(rferrorrate$OOB)
mintreerf #given the optimal tree size
#new rf model with optimal tree size
set.seed(5)
rfmodel1 <- randomForest(Severity ~ ., train.data, ntree = mintreerf, importance = T)
print(rfmodel1)
plot(rfmodel1)
formtry <- c()
for(i in 1:9) {
temporaryrf <- randomForest(Severity ~., train.data,importance = T, mtry = i, ntree = mintreerf )
formtry[i] <- temporaryrf$err.rate[mintreerf]
}
formtry #from this we can see the optimal number of predictors
#we will use this optpred and mintreerf in random holdout
optimalmtry <- which.min(formtry)
optimalmtry
finalrfmodel <- randomForest(Severity ~ ., train.data, ntree = mintreerf, mtry = optimalmtry, importance = T )
plot(finalrfmodel)
rfpredicted <- predict(finalrfmodel, test.data)
confusionMatrix(rfpredicted, test.data$Severity)
#randomforest in forloop
library(randomForest)
set.seed(10)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
#ntree and mtry finalized after running the model individually
finalrfmodel <- randomForest(Severity ~ ., train, ntree = mintreerf, mtry = optimalmtry, importance = T )
rfpred <- predict(finalrfmodel, newdata = holdout)
rfpred.train <- predict(finalrfmodel, newdata = train)
predictionaccuracy[i,3] <- sum(diag(table(rfpred, holdout.response)))/sum(table(rfpred, holdout.response))
trainingaccuracy[i,3] <- sum(diag(table(rfpred.train, train$Severity)))/sum(table(rfpred.train, train$Severity))
}
varImpPlot(finalrfmodel) #variable importance plot of Randomforest model
####################
#gbmboosting
library(gbm)
gbmboosting <- gbm(Severity ~ .,data = train.data,distribution = "multinomial", n.trees=500, interaction.depth = 4)
gbmpred <- predict(gbmboosting, newdata = test.data, n.trees = 500, type = "response")
gbmpred <- as.factor(apply(gbmpred, 1, which.max))
summary(gbmboosting)
confusionMatrix(test.data$Severity,gbmpred)
plot(gbmboosting, i = 'Side') #Partial dependence plot for side
plot(gbmboosting, i = 'Traffic_Signal') #Partial dependence plot for traffic signal
plot(gbmboosting, i = 'Visibility.mi.') #Partial dependence plot for visibility
plot(gbmboosting, i = 'Humidity...') #Partial dependence plot for humidity
#gradient boosting in for loop
library(gbm)
set.seed(17)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
boosting <- gbm(Severity ~ .,data = train,distribution = "multinomial", n.trees=500, interaction.depth = 4)
boostpred <- predict(boosting, newdata = holdout, n.trees = 500, type = "response")
boostpred <- as.factor(apply(boostpred, 1, which.max))
boostpredtrain <- predict(boosting, newdata = train, n.trees = 500, type = 'response')
boostpredtrain <- as.factor(apply(boostpredtrain, 1, which.max))
predictionaccuracy[i,4] <- sum(diag(table(boostpred, holdout.response)))/sum(table(boostpred, holdout.response))
trainingaccuracy[i,4] <- sum(diag(table(boostpredtrain, train$Severity)))/sum(table(boostpredtrain, train$Severity))
}
##################
#ada boosting
library(ada)
set.seed(15)
adaboosting <- ada(Severity ~., train.data, iter = 50)
plot(adaboosting) #taking 45 as number of iteration
adapred <- predict(adaboosting, test.data)
confusionMatrix(test.data$Severity,adapred)
#ada boosting in for loop
library(ada)
set.seed(33)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
boostingmodelada <- ada(Severity ~ ., train, iter = 45)
pred.boostada <- predict(boostingmodelada, holdout)
pred.train.boostada <- predict(boostingmodelada, train)
predictionaccuracy[i,5] <- sum(diag(table(pred.boostada, holdout.response)))/sum(table(pred.boostada, holdout.response))
trainingaccuracy[i,5] <- sum(diag(table(pred.train.boostada, train$Severity)))/sum(table(pred.train.boostada, train$Severity))
}
###########################
#null model
library(caret)
set.seed(97)
attach(laaccident)
for (i in 1:numholdout) {
s <- randomstring(percentholdout, nrow(laaccident))
tmp.data <- cbind(laaccident,s)
tmp.response <- (cbind(laaccident$Severity,s))
holdout <- subset(tmp.data, s==1)[,1:length(laaccident)]
holdout.response <- subset(tmp.response, s==1)[,1]
train <- subset(tmp.data, s==0)[,1:length(laaccident)]
sizeholdout <- dim(holdout)[1]
sizetrain <- dim(train)[1]
nullmodel <- nullModel(y = train$Severity, type = 'class')
pred.nullmodel <- predict(nullmodel, holdout)
pred.train.nullmodel <- predict(nullmodel, train)
predictionaccuracy[i,6] <- sum(diag(table(pred.nullmodel, holdout.response)))/sum(table(pred.nullmodel, holdout.response))
trainingaccuracy[i,6] <- sum(diag(table(pred.train.nullmodel, train$Severity)))/sum(table(pred.train.nullmodel, train$Severity))
}
#finding the average prediction and training accuracy
meanpredictionaccuracy <- c()
for (k in 1:nmodel) {
meanpredictionaccuracy[k] <- mean(predictionaccuracy[, k])
}
meanpredictionaccuracy #gives the mean prediction accuracy of all the models
max(meanpredictionaccuracy) #gives the maximum prediction accuracy out of all models
which.max(meanpredictionaccuracy) #gives which model has the maximum prediction accuracy
#model 3 has the highest prediction accuracy (Randomforest)
meantrainingaccuracy <- c()
for (k in 1:nmodel) {
meantrainingaccuracy[k] <- mean(trainingaccuracy[, k])
}
meantrainingaccuracy #gives the mean training accuracy of all the models
max(meantrainingaccuracy) #gives the maximum training accuracy out of all models
which.max(meantrainingaccuracy) #gives which model has the maximum training accuracy
#model 3 has the highest training accuracy (Randomforest)
###################
dev.off()
|
# wps.des: id = gridded_daily, title = A generalized daily climate statistics algorithm, abstract = TBD;
# wps.in: start, string, Start Year, Start Year (ie. 1950);
# wps.in: end, string, End Year, End Year (ie. 2000);
# wps.in: bbox_in, string, BBOX, Format, comma seperated min lat/lon max lat/lon;
# wps.in: days_tmax_abv_thresh, string, Days with tmax above threshold, comma seperated list of thresholds in degrees C, value = "";
# wps.in: days_tmin_blw_thresh, string, Days with tmin below threshold, comma seperated list of thresholds in degrees C, value = "";
# wps.in: days_prcp_abv_thresh, string, Days with prcp above threshold, comma seperated list of thresholds in mm, value = "";
# wps.in: longest_run_tmax_abv_thresh, string, Longest run with tmax above threshold, comma seperated list of thresholds in degrees C, value = "";
# wps.in: longest_run_prcp_blw_thresh, string, Longest run with tmin below threshold, comma seperated list of thresholds in mm, value = "";
# wps.in: growing_degree_day_thresh, string, Growing degree days, comma seperated list of thresholds in degrees C, value = "";
# wps.in: heating_degree_day_thresh, string, Heating degree days, comma seperated list of thresholds in degrees C, value = "";
# wps.in: cooling_degree_day_thresh, string, Cooling degree days, comma seperated list of thresholds in degrees C, value = "";
# wps.in: growing_season_lngth_thresh, string, Growing season length, comma seperated list of thresholds in degrees C, value = "";
# wps.in: OPeNDAP_URI, string, OPeNDAP URI, An OPeNDAP (dods) url for the climate dataset of interest.;
# wps.in: tmax_var, string, Tmax Variable, The variable from the OPeNDAP dataset to use as tmax.;
# wps.in: tmin_var, string, Tmin Variable, The variable from the OPeNDAP dataset to use as tmin.;
# wps.in: tave_var, string, Tave Variable, The variable from the OPeNDAP dataset to use as tave can be "NULL".;
# wps.in: prcp_var, string, Prcp Variable, The variable from the OPeNDAP dataset to use as prcp.;
library("dapClimates")
library("climates") # Because climates uses depends on stuff, this is needed as well as the dapClimates load.
t_names<-c("days_tmax_abv_thresh",
"days_tmin_blw_thresh",
"days_prcp_abv_thresh",
"longest_run_tmax_abv_thresh",
"longest_run_prcp_blw_thresh",
"growing_degree_day_thresh",
"heating_degree_day_thresh",
"cooling_degree_day_thresh",
"growing_season_lngth_thresh")
thresholds<-list()
for(t_name in t_names) {
tn<-get(t_name)
if(tn!="") thresholds[[t_name]] <- as.double(read.csv(header=F,colClasses=c("character"),text=tn))
}
bbox_in <- as.double(read.csv(header=F,colClasses=c("character"),text=bbox_in))
if(tave_var=="NULL") tave_var<-NULL
fileNames<-dap_daily_stats(start,end,bbox_in,thresholds,OPeNDAP_URI,tmax_var,tmin_var,tave_var,prcp_var)
name<-'dailyInd.zip'
dailyInd_zip<-zip(name,fileNames)
#wps.out: name, zip, bioclim_zip, A zip of the resulting bioclim geotiffs.;
| /gdp-process-wps/src/main/webapp/R/scripts/gridded_daily.R | permissive | mike-stern/geo-data-portal | R | false | false | 2,934 | r | # wps.des: id = gridded_daily, title = A generalized daily climate statistics algorithm, abstract = TBD;
# wps.in: start, string, Start Year, Start Year (ie. 1950);
# wps.in: end, string, End Year, End Year (ie. 2000);
# wps.in: bbox_in, string, BBOX, Format, comma seperated min lat/lon max lat/lon;
# wps.in: days_tmax_abv_thresh, string, Days with tmax above threshold, comma seperated list of thresholds in degrees C, value = "";
# wps.in: days_tmin_blw_thresh, string, Days with tmin below threshold, comma seperated list of thresholds in degrees C, value = "";
# wps.in: days_prcp_abv_thresh, string, Days with prcp above threshold, comma seperated list of thresholds in mm, value = "";
# wps.in: longest_run_tmax_abv_thresh, string, Longest run with tmax above threshold, comma seperated list of thresholds in degrees C, value = "";
# wps.in: longest_run_prcp_blw_thresh, string, Longest run with tmin below threshold, comma seperated list of thresholds in mm, value = "";
# wps.in: growing_degree_day_thresh, string, Growing degree days, comma seperated list of thresholds in degrees C, value = "";
# wps.in: heating_degree_day_thresh, string, Heating degree days, comma seperated list of thresholds in degrees C, value = "";
# wps.in: cooling_degree_day_thresh, string, Cooling degree days, comma seperated list of thresholds in degrees C, value = "";
# wps.in: growing_season_lngth_thresh, string, Growing season length, comma seperated list of thresholds in degrees C, value = "";
# wps.in: OPeNDAP_URI, string, OPeNDAP URI, An OPeNDAP (dods) url for the climate dataset of interest.;
# wps.in: tmax_var, string, Tmax Variable, The variable from the OPeNDAP dataset to use as tmax.;
# wps.in: tmin_var, string, Tmin Variable, The variable from the OPeNDAP dataset to use as tmin.;
# wps.in: tave_var, string, Tave Variable, The variable from the OPeNDAP dataset to use as tave can be "NULL".;
# wps.in: prcp_var, string, Prcp Variable, The variable from the OPeNDAP dataset to use as prcp.;
library("dapClimates")
library("climates") # Because climates uses depends on stuff, this is needed as well as the dapClimates load.
t_names<-c("days_tmax_abv_thresh",
"days_tmin_blw_thresh",
"days_prcp_abv_thresh",
"longest_run_tmax_abv_thresh",
"longest_run_prcp_blw_thresh",
"growing_degree_day_thresh",
"heating_degree_day_thresh",
"cooling_degree_day_thresh",
"growing_season_lngth_thresh")
thresholds<-list()
for(t_name in t_names) {
tn<-get(t_name)
if(tn!="") thresholds[[t_name]] <- as.double(read.csv(header=F,colClasses=c("character"),text=tn))
}
bbox_in <- as.double(read.csv(header=F,colClasses=c("character"),text=bbox_in))
if(tave_var=="NULL") tave_var<-NULL
fileNames<-dap_daily_stats(start,end,bbox_in,thresholds,OPeNDAP_URI,tmax_var,tmin_var,tave_var,prcp_var)
name<-'dailyInd.zip'
dailyInd_zip<-zip(name,fileNames)
#wps.out: name, zip, bioclim_zip, A zip of the resulting bioclim geotiffs.;
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarizeTaxonStates.R
\name{summarizeTaxonStates}
\alias{summarizeTaxonStates}
\title{Create a data frame of taxon states}
\usage{
summarizeTaxonStates(taxa)
}
\arguments{
\item{taxa}{a list of objects}
}
\value{
Returns a data frame of taxon values
}
\description{
This function creates a data frame of taxon states while simulating
characters with doSimulation and doSimulationsForPlotting TreEvo functions
}
\details{
Used by TreEvo doSimulation and doSimulationForPlotting functions to
summarize a list of objects into a data frame of taxon values
}
\author{
Brian O'Meara and Barb Banbury
}
\references{
O'Meara and Banbury, unpublished
}
| /man/summarizeTaxonStates.Rd | no_license | JakeJing/treevo | R | false | true | 724 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summarizeTaxonStates.R
\name{summarizeTaxonStates}
\alias{summarizeTaxonStates}
\title{Create a data frame of taxon states}
\usage{
summarizeTaxonStates(taxa)
}
\arguments{
\item{taxa}{a list of objects}
}
\value{
Returns a data frame of taxon values
}
\description{
This function creates a data frame of taxon states while simulating
characters with doSimulation and doSimulationsForPlotting TreEvo functions
}
\details{
Used by TreEvo doSimulation and doSimulationForPlotting functions to
summarize a list of objects into a data frame of taxon values
}
\author{
Brian O'Meara and Barb Banbury
}
\references{
O'Meara and Banbury, unpublished
}
|
\name{funtoonorm}
\alias{funtoonorm}
\title{
A function to normalize Illumina Infinium Human Methylation 450 BeadChip (Illumina 450K) with multiple tissues or cell types.
}
\description{
This function performs normalization of Illumina Infinium Human Methylation 450 BeadChip data using the information contained in the control probes. It implements different corrections at different quantiles, and allows for the normalization corrections to vary across tissues/cell types.
}
\usage{
funtoonorm(sigA, sigB, Annot = NULL, controlred, controlgrn,
cp.types = NULL, cell_type, ncmp = 4, save.quant = TRUE, save.loess = TRUE,
apply.loess = TRUE, validate = FALSE)
}
\arguments{
\item{sigA, sigB}{
Matrices containing the signal A and signal B results extracted from the IDAT files.
}
\item{controlred, controlgrn}{
Matrices containing control probe data.
}
\item{Annot}{
Annotation matrix. Supplied by default.
}
\item{cp.types}{
Vector of types of control probes.
}
\item{cell_type}{
Vector of cell type (or tissue type) information.
}
\item{ncmp}{
Number of partial least squares components used in the model fitting.
}
\item{save.quant}{
Logical, whether to save calculated quantiles.
}
\item{save.loess}{
Logical, whether to save calculated results of loess regression.
}
\item{apply.loess}{
Logical, whether to apply results of loess regression. If TRUE, two matrices are returned, one the data before normalization and one after normalization.
normalised beta values is returned.
}
\item{validate}{
Either FALSE, or the maximum number of PLS components to be explored in cross-validation.
If FALSE, the normalization corrections are calculated using \verb{ncmp} partial least squares (PLS) components. if not FALSE, then a number must be supplied. This number will be the maximum number of PLS components used when exploring model fit performance across a range of \verb{ncmp} values ranging from 1 to the supplied number.
}
}
\details{
The funtooNorm function operates in one of two modes. If validate=FALSE, then the normalization corrections are calculated using the supplied value of \verb{ncmp} to fix the number of partial least squares (PLS) components. If validate is an integer, K>1, (e.g. K=5), then cross-validation is performed exploring performance across a range of values for \verb{ncmp} ranging from 1 to K.
}
\value{The values returned depend on the parameters chosen.
\itemize{
\item If validate is not FALSE (i.e. validate=K), the function creates a pdf file containing a series of plots showing residual error curves across percentiles of the signal distributions, to facilitate the choice of an appropriate value for \verb{ncmp}. No object is returned by the function.
\item If validate = FALSE, then funtoonorm has the following behaviour:
\itemize{
\item If apply.loess = FALSE the function will not return any object.
However, if save.loess=TRUE or if save.quant=TRUE then RData objects will be saved to disk for future use.
\item If apply.less= TRUE, then the function returns a list of 2 objects. The first, 'origBeta', is the matrix of Beta avalues before normalization, and the second, 'newBeta' is the Beta values after normalization.
}
}
}
\examples{
%% to normalize methylation data:
ncmp <- 4
funtoonormout <- funtoonorm(sigA=sigAsample, sigB=sigBsample, Annot=Annotsample,
controlred=matred, controlgrn=matgrn,
cp.types=cp.types, cell_type = cell_type,
ncmp=ncmp, save.quant=TRUE, save.loess=TRUE, apply.loess=TRUE,
validate=FALSE)
%%to choose the number of components:
funtoonormout <- funtoonorm(sigA=sigAsample, sigB=sigBsample,
controlred=matred, controlgrn=matgrn,
cp.types=cp.types, cell_type = cell_type,
ncmp=4, save.quant=TRUE, save.loess=TRUE,
apply.loess=FALSE, validate=5)
}
| /man/funtoonorm.Rd | no_license | stepanv1/funtooNorm | R | false | false | 3,950 | rd | \name{funtoonorm}
\alias{funtoonorm}
\title{
A function to normalize Illumina Infinium Human Methylation 450 BeadChip (Illumina 450K) with multiple tissues or cell types.
}
\description{
This function performs normalization of Illumina Infinium Human Methylation 450 BeadChip data using the information contained in the control probes. It implements different corrections at different quantiles, and allows for the normalization corrections to vary across tissues/cell types.
}
\usage{
funtoonorm(sigA, sigB, Annot = NULL, controlred, controlgrn,
cp.types = NULL, cell_type, ncmp = 4, save.quant = TRUE, save.loess = TRUE,
apply.loess = TRUE, validate = FALSE)
}
\arguments{
\item{sigA, sigB}{
Matrices containing the signal A and signal B results extracted from the IDAT files.
}
\item{controlred, controlgrn}{
Matrices containing control probe data.
}
\item{Annot}{
Annotation matrix. Supplied by default.
}
\item{cp.types}{
Vector of types of control probes.
}
\item{cell_type}{
Vector of cell type (or tissue type) information.
}
\item{ncmp}{
Number of partial least squares components used in the model fitting.
}
\item{save.quant}{
Logical, whether to save calculated quantiles.
}
\item{save.loess}{
Logical, whether to save calculated results of loess regression.
}
\item{apply.loess}{
Logical, whether to apply results of loess regression. If TRUE, two matrices are returned, one the data before normalization and one after normalization.
normalised beta values is returned.
}
\item{validate}{
Either FALSE, or the maximum number of PLS components to be explored in cross-validation.
If FALSE, the normalization corrections are calculated using \verb{ncmp} partial least squares (PLS) components. if not FALSE, then a number must be supplied. This number will be the maximum number of PLS components used when exploring model fit performance across a range of \verb{ncmp} values ranging from 1 to the supplied number.
}
}
\details{
The funtooNorm function operates in one of two modes. If validate=FALSE, then the normalization corrections are calculated using the supplied value of \verb{ncmp} to fix the number of partial least squares (PLS) components. If validate is an integer, K>1, (e.g. K=5), then cross-validation is performed exploring performance across a range of values for \verb{ncmp} ranging from 1 to K.
}
\value{The values returned depend on the parameters chosen.
\itemize{
\item If validate is not FALSE (i.e. validate=K), the function creates a pdf file containing a series of plots showing residual error curves across percentiles of the signal distributions, to facilitate the choice of an appropriate value for \verb{ncmp}. No object is returned by the function.
\item If validate = FALSE, then funtoonorm has the following behaviour:
\itemize{
\item If apply.loess = FALSE the function will not return any object.
However, if save.loess=TRUE or if save.quant=TRUE then RData objects will be saved to disk for future use.
\item If apply.less= TRUE, then the function returns a list of 2 objects. The first, 'origBeta', is the matrix of Beta avalues before normalization, and the second, 'newBeta' is the Beta values after normalization.
}
}
}
\examples{
%% to normalize methylation data:
ncmp <- 4
funtoonormout <- funtoonorm(sigA=sigAsample, sigB=sigBsample, Annot=Annotsample,
controlred=matred, controlgrn=matgrn,
cp.types=cp.types, cell_type = cell_type,
ncmp=ncmp, save.quant=TRUE, save.loess=TRUE, apply.loess=TRUE,
validate=FALSE)
%%to choose the number of components:
funtoonormout <- funtoonorm(sigA=sigAsample, sigB=sigBsample,
controlred=matred, controlgrn=matgrn,
cp.types=cp.types, cell_type = cell_type,
ncmp=4, save.quant=TRUE, save.loess=TRUE,
apply.loess=FALSE, validate=5)
}
|
powerc <- read.table("./household_power_consumption.txt",header = TRUE, sep = ";", na.strings = "?")
plotdata<- rbind(powerc[powerc$Date=="1/2/2007",],powerc[powerc$Date=="2/2/2007",])
plotdata$Date <- as.Date(plotdata$Date,"%d/%m/%Y")
plotdata <- cbind(plotdata,"DateTime"= as.POSIXct(paste(plotdata$Date,plotdata$Time)))
png(file = "plot2.png")
plot(plotdata$Global_active_power ~ plotdata$DateTime, type="l", xlab= "",
ylab="Global Active power (kilowatts)")
dev.off() | /plot2.R | no_license | dvkrgrg3/ExData_Plotting1 | R | false | false | 476 | r | powerc <- read.table("./household_power_consumption.txt",header = TRUE, sep = ";", na.strings = "?")
plotdata<- rbind(powerc[powerc$Date=="1/2/2007",],powerc[powerc$Date=="2/2/2007",])
plotdata$Date <- as.Date(plotdata$Date,"%d/%m/%Y")
plotdata <- cbind(plotdata,"DateTime"= as.POSIXct(paste(plotdata$Date,plotdata$Time)))
png(file = "plot2.png")
plot(plotdata$Global_active_power ~ plotdata$DateTime, type="l", xlab= "",
ylab="Global Active power (kilowatts)")
dev.off() |
## ------------------------------------------------------------------------
library(sommer)
data(h2example)
head(h2example)
ans1 <- mmer2(y~1,
random = ~Name + Env + Name:Env + Block,
rcov = ~units,
data=h2example, silent = TRUE)
suma <- summary(ans1)
n.env <- length(levels(h2example$Env))
pin(ans1, h2 ~ V1 / ( V1 + (V3/n.env) + (V5/(2*n.env)) ) )
## ------------------------------------------------------------------------
library(sommer)
data(h2example)
head(h2example)
Z1 <- model.matrix(~Name-1, h2example)
Z2 <- model.matrix(~Env-1, h2example)
Z3 <- model.matrix(~Env:Name-1, h2example)
Z4 <- model.matrix(~Block-1, h2example)
ETA <- list(name=list(Z=Z1),env=list(Z=Z2),name.env=list(Z=Z3),block=list(Z=Z4))
y <- h2example$y
ans1 <- mmer(Y=y, Z=ETA, silent = TRUE)
vc <- ans1$var.comp
## ------------------------------------------------------------------------
data(CPdata)
CPpheno$idd <-CPpheno$id; CPpheno$ide <-CPpheno$id
### look at the data
head(CPpheno)
CPgeno[1:5,1:4]
## fit a model including additive and dominance effects
A <- A.mat(CPgeno) # additive relationship matrix
D <- D.mat(CPgeno) # dominance relationship matrix
E <- E.mat(CPgeno) # epistatic relationship matrix
ans.ADE <- mmer2(color~1,
random=~g(id) + g(idd) + g(ide),
rcov=~units,
G=list(id=A,idd=D,ide=E),
silent = TRUE, data=CPpheno)
suma <- summary(ans.ADE)$var.comp.table
(H2 <- sum(suma[1:3,1])/sum(suma[,1]))
(h2 <- sum(suma[1,1])/sum(suma[,1]))
## ------------------------------------------------------------------------
data(CPdata)
### look at the data
head(CPpheno)
CPgeno[1:5,1:4]
## fit a model including additive and dominance effects
Z1 <- model.matrix(~id-1, CPpheno); colnames(Z1) <- gsub("id","",colnames(Z1))
A <- A.mat(CPgeno) # additive relationship matrix
D <- D.mat(CPgeno) # dominance relationship matrix
E <- E.mat(CPgeno) # epistatic relationship matrix
y <- CPpheno$color
ETA <- list(id=list(Z=Z1,K=A),idd=list(Z=Z1,K=D),ide=list(Z=Z1,K=E))
ans.ADE <- mmer(Y=y, Z=ETA, silent = TRUE)
ans.ADE$var.comp
## ---- fig.show='hold'----------------------------------------------------
data(cornHybrid)
hybrid2 <- cornHybrid$hybrid # extract cross data
head(hybrid2)
### fit the model
modFD <- mmer2(Yield~1,
random=~ at(Location,c("3","4")):GCA2,
rcov= ~ at(Location):units,
data=hybrid2, silent = TRUE)
summary(modFD)
## ------------------------------------------------------------------------
data(cornHybrid)
hybrid2 <- cornHybrid$hybrid # extract cross data
## get the covariance structure for GCA2
A <- cornHybrid$K
## fit the model
modFD <- mmer2(Yield~1,
random=~ g(GCA2) + at(Location):g(GCA2),
rcov= ~ at(Location):units,
data=hybrid2, G=list(GCA2=A),
silent = TRUE, draw=FALSE)
summary(modFD)
## ------------------------------------------------------------------------
data(CPdata)
#### create the variance-covariance matrix
A <- A.mat(CPgeno)
#### look at the data and fit the model
head(CPpheno)
mix1 <- mmer2(color~1,
random=~g(id),
rcov=~units,
G=list(id=A), data=CPpheno, silent=TRUE)
summary(mix1)
#### run the pin function
pin(mix1, h2 ~ V1 / ( V1 + V2 ) )
## ------------------------------------------------------------------------
data(cornHybrid)
hybrid2 <- cornHybrid$hybrid # extract cross data
head(hybrid2)
modFD <- mmer2(Yield~Location,
random=~GCA1+GCA2+SCA,
rcov=~units,
data=hybrid2,silent = TRUE, draw=FALSE)
(suma <- summary(modFD))
Vgca <- sum(suma$var.comp.table[1:2,1])
Vsca <- suma$var.comp.table[3,1]
Ve <- suma$var.comp.table[4,1]
Va = 4*Vgca
Vd = 4*Vsca
Vg <- Va + Vd
(H2 <- Vg / (Vg + (Ve)) )
(h2 <- Va / (Vg + (Ve)) )
## ------------------------------------------------------------------------
data(HDdata)
head(HDdata)
HDdata$geno <- as.factor(HDdata$geno)
HDdata$male <- as.factor(HDdata$male)
HDdata$female <- as.factor(HDdata$female)
# Fit the model
modHD <- mmer2(sugar~1,
random=~overlay(male,female) + geno,
rcov=~units,
data=HDdata, silent = TRUE)
summary(modHD)
suma <- summary(modHD)$var.comp.table
Vgca <- suma[1,1]
Vsca <- suma[2,1]
Ve <- suma[3,1]
Va = 4*Vgca
Vd = 4*Vsca
Vg <- Va + Vd
(H2 <- Vg / (Vg + (Ve/2)) ) # 2 technical reps
(h2 <- Va / (Vg + (Ve/2)) )
## ------------------------------------------------------------------------
data(HDdata)
head(HDdata)
#### GCA matrix for half diallel using male and female columns
#### use the 'overlay' function to create the half diallel matrix
Z1 <- overlay(HDdata$female, HDdata$male)
#### Obtain the SCA matrix
Z2 <- model.matrix(~as.factor(geno)-1, data=HDdata)
#### Define the response variable and run
y <- HDdata$sugar
ETA <- list(list(Z=Z1), list(Z=Z2)) # Zu component
modHD <- mmer(Y=y, Z=ETA, draw=FALSE, silent=TRUE)
summary(modHD)
## ------------------------------------------------------------------------
data(wheatLines);
X <- wheatLines$wheatGeno; X[1:5,1:4]; dim(X)
Y <- data.frame(wheatLines$wheatPheno); Y$id <- rownames(Y); head(Y);
rownames(X) <- rownames(Y)
# select environment 1
K <- A.mat(X) # additive relationship matrix
# GBLUP pedigree-based approach
set.seed(12345)
y.trn <- Y
vv <- sample(rownames(Y),round(dim(Y)[1]/5))
y.trn[vv,"X1"] <- NA
## GBLUP
ans <- mmer2(X1~1,
random=~g(id),
rcov=~units,
G=list(id=K),
data=y.trn, silent = TRUE) # kinship based
cor(ans$u.hat$`g(id)`[vv,],Y[vv,"X1"])
## rrBLUP
y.trn$dummy <- paste("dummy",1:nrow(y.trn),sep="_")
ans <- mmer2(X1~1,
random=~dummy + grp(markers),
rcov=~units,
grouping =list(markers=X),
data=y.trn, silent = TRUE) # kinship based
u <- X %*% as.matrix(ans$u.hat$markers[,1]) # BLUPs for individuals
cor(u[vv,],Y[vv,"X1"]) # same correlation
# the same can be applied in multi-response models in GBLUP or rrBLUP
## ------------------------------------------------------------------------
data(Technow_data)
A.flint <- Technow_data$AF # Additive relationship matrix Flint
A.dent <- Technow_data$AD # Additive relationship matrix Dent
pheno <- Technow_data$pheno # phenotypes for 1254 single cross hybrids
head(pheno);dim(pheno)
# CREATE A DATA FRAME WITH ALL POSSIBLE HYBRIDS
DD <- kronecker(A.dent,A.flint,make.dimnames=TRUE)
hybs <- data.frame(sca=rownames(DD),yield=NA,matter=NA,gcad=NA, gcaf=NA)
hybs$yield[match(pheno$hy, hybs$sca)] <- pheno$GY
hybs$matter[match(pheno$hy, hybs$sca)] <- pheno$GM
hybs$gcad <- as.factor(gsub(":.*","",hybs$sca))
hybs$gcaf <- as.factor(gsub(".*:","",hybs$sca))
head(hybs)
# RUN THE PREDICTION MODEL
y.trn <- hybs
vv1 <- which(!is.na(hybs$yield))
vv2 <- sample(vv1, 100)
y.trn[vv2,"yield"] <- NA
anss2 <- mmer2(yield~1,
random=~g(gcad) + g(gcaf),
rcov=~units,
G=list(gcad=A.dent, gcaf=A.flint),
method="NR", silent=TRUE, data=y.trn)
summary(anss2)
cor(anss2$fitted.y[vv2], hybs$yield[vv2])
## ------------------------------------------------------------------------
data(CPdata)
head(CPpheno)
CPgeno[1:4,1:4]
#### create the variance-covariance matrix
A <- A.mat(CPgeno) # additive relationship matrix
#### look at the data and fit the model
head(CPpheno)
mix1 <- mmer2(Yield~1,
random=~g(id)
+ Rowf + Colf
+ spl2D(Row,Col),
rcov=~units,
G=list(id=A), silent=TRUE,
data=CPpheno)
summary(mix1)
## ------------------------------------------------------------------------
#### get the spatial plots
fittedvals <- spatPlots(mix1,row = "Row", range = "Col")
## ------------------------------------------------------------------------
data(CPdata)
### look at the data
head(CPpheno);CPgeno[1:5,1:4]
## fit a model including additive effects
A <- A.mat(CPgeno) # additive relationship matrix
####================####
#### ADDITIVE MODEL ####
####================####
ans.A <- mmer2(cbind(color,Yield)~1,
random=~us(trait):g(id),
rcov=~us(trait):units,
G=list(id=A),
data=CPpheno, silent = TRUE)
summary(ans.A)
## ------------------------------------------------------------------------
## genetic variance covariance
gvc <- ans.A$var.comp$`g(id)`
## extract variances (diagonals) and get standard deviations
sd.gvc <- as.matrix(sqrt(diag(gvc)))
## get possible products sd(Vgi) * sd(Vgi')
prod.sd <- sd.gvc %*% t(sd.gvc)
## genetic correlations cov(gi,gi')/[sd(Vgi) * sd(Vgi')]
(gen.cor <- gvc/prod.sd)
## heritabilities
(h2 <- diag(gvc) / diag(cov(CPpheno[,names(diag(gvc))], use = "complete.obs")))
| /inst/doc/sommer.R | no_license | Jaimemosg/sommer | R | false | false | 8,865 | r | ## ------------------------------------------------------------------------
library(sommer)
data(h2example)
head(h2example)
ans1 <- mmer2(y~1,
random = ~Name + Env + Name:Env + Block,
rcov = ~units,
data=h2example, silent = TRUE)
suma <- summary(ans1)
n.env <- length(levels(h2example$Env))
pin(ans1, h2 ~ V1 / ( V1 + (V3/n.env) + (V5/(2*n.env)) ) )
## ------------------------------------------------------------------------
library(sommer)
data(h2example)
head(h2example)
Z1 <- model.matrix(~Name-1, h2example)
Z2 <- model.matrix(~Env-1, h2example)
Z3 <- model.matrix(~Env:Name-1, h2example)
Z4 <- model.matrix(~Block-1, h2example)
ETA <- list(name=list(Z=Z1),env=list(Z=Z2),name.env=list(Z=Z3),block=list(Z=Z4))
y <- h2example$y
ans1 <- mmer(Y=y, Z=ETA, silent = TRUE)
vc <- ans1$var.comp
## ------------------------------------------------------------------------
data(CPdata)
CPpheno$idd <-CPpheno$id; CPpheno$ide <-CPpheno$id
### look at the data
head(CPpheno)
CPgeno[1:5,1:4]
## fit a model including additive and dominance effects
A <- A.mat(CPgeno) # additive relationship matrix
D <- D.mat(CPgeno) # dominance relationship matrix
E <- E.mat(CPgeno) # epistatic relationship matrix
ans.ADE <- mmer2(color~1,
random=~g(id) + g(idd) + g(ide),
rcov=~units,
G=list(id=A,idd=D,ide=E),
silent = TRUE, data=CPpheno)
suma <- summary(ans.ADE)$var.comp.table
(H2 <- sum(suma[1:3,1])/sum(suma[,1]))
(h2 <- sum(suma[1,1])/sum(suma[,1]))
## ------------------------------------------------------------------------
data(CPdata)
### look at the data
head(CPpheno)
CPgeno[1:5,1:4]
## fit a model including additive and dominance effects
Z1 <- model.matrix(~id-1, CPpheno); colnames(Z1) <- gsub("id","",colnames(Z1))
A <- A.mat(CPgeno) # additive relationship matrix
D <- D.mat(CPgeno) # dominance relationship matrix
E <- E.mat(CPgeno) # epistatic relationship matrix
y <- CPpheno$color
ETA <- list(id=list(Z=Z1,K=A),idd=list(Z=Z1,K=D),ide=list(Z=Z1,K=E))
ans.ADE <- mmer(Y=y, Z=ETA, silent = TRUE)
ans.ADE$var.comp
## ---- fig.show='hold'----------------------------------------------------
data(cornHybrid)
hybrid2 <- cornHybrid$hybrid # extract cross data
head(hybrid2)
### fit the model
modFD <- mmer2(Yield~1,
random=~ at(Location,c("3","4")):GCA2,
rcov= ~ at(Location):units,
data=hybrid2, silent = TRUE)
summary(modFD)
## ------------------------------------------------------------------------
data(cornHybrid)
hybrid2 <- cornHybrid$hybrid # extract cross data
## get the covariance structure for GCA2
A <- cornHybrid$K
## fit the model
modFD <- mmer2(Yield~1,
random=~ g(GCA2) + at(Location):g(GCA2),
rcov= ~ at(Location):units,
data=hybrid2, G=list(GCA2=A),
silent = TRUE, draw=FALSE)
summary(modFD)
## ------------------------------------------------------------------------
data(CPdata)
#### create the variance-covariance matrix
A <- A.mat(CPgeno)
#### look at the data and fit the model
head(CPpheno)
mix1 <- mmer2(color~1,
random=~g(id),
rcov=~units,
G=list(id=A), data=CPpheno, silent=TRUE)
summary(mix1)
#### run the pin function
pin(mix1, h2 ~ V1 / ( V1 + V2 ) )
## ------------------------------------------------------------------------
data(cornHybrid)
hybrid2 <- cornHybrid$hybrid # extract cross data
head(hybrid2)
modFD <- mmer2(Yield~Location,
random=~GCA1+GCA2+SCA,
rcov=~units,
data=hybrid2,silent = TRUE, draw=FALSE)
(suma <- summary(modFD))
Vgca <- sum(suma$var.comp.table[1:2,1])
Vsca <- suma$var.comp.table[3,1]
Ve <- suma$var.comp.table[4,1]
Va = 4*Vgca
Vd = 4*Vsca
Vg <- Va + Vd
(H2 <- Vg / (Vg + (Ve)) )
(h2 <- Va / (Vg + (Ve)) )
## ------------------------------------------------------------------------
data(HDdata)
head(HDdata)
HDdata$geno <- as.factor(HDdata$geno)
HDdata$male <- as.factor(HDdata$male)
HDdata$female <- as.factor(HDdata$female)
# Fit the model
modHD <- mmer2(sugar~1,
random=~overlay(male,female) + geno,
rcov=~units,
data=HDdata, silent = TRUE)
summary(modHD)
suma <- summary(modHD)$var.comp.table
Vgca <- suma[1,1]
Vsca <- suma[2,1]
Ve <- suma[3,1]
Va = 4*Vgca
Vd = 4*Vsca
Vg <- Va + Vd
(H2 <- Vg / (Vg + (Ve/2)) ) # 2 technical reps
(h2 <- Va / (Vg + (Ve/2)) )
## ------------------------------------------------------------------------
data(HDdata)
head(HDdata)
#### GCA matrix for half diallel using male and female columns
#### use the 'overlay' function to create the half diallel matrix
Z1 <- overlay(HDdata$female, HDdata$male)
#### Obtain the SCA matrix
Z2 <- model.matrix(~as.factor(geno)-1, data=HDdata)
#### Define the response variable and run
y <- HDdata$sugar
ETA <- list(list(Z=Z1), list(Z=Z2)) # Zu component
modHD <- mmer(Y=y, Z=ETA, draw=FALSE, silent=TRUE)
summary(modHD)
## ------------------------------------------------------------------------
data(wheatLines);
X <- wheatLines$wheatGeno; X[1:5,1:4]; dim(X)
Y <- data.frame(wheatLines$wheatPheno); Y$id <- rownames(Y); head(Y);
rownames(X) <- rownames(Y)
# select environment 1
K <- A.mat(X) # additive relationship matrix
# GBLUP pedigree-based approach
set.seed(12345)
y.trn <- Y
vv <- sample(rownames(Y),round(dim(Y)[1]/5))
y.trn[vv,"X1"] <- NA
## GBLUP
ans <- mmer2(X1~1,
random=~g(id),
rcov=~units,
G=list(id=K),
data=y.trn, silent = TRUE) # kinship based
cor(ans$u.hat$`g(id)`[vv,],Y[vv,"X1"])
## rrBLUP
y.trn$dummy <- paste("dummy",1:nrow(y.trn),sep="_")
ans <- mmer2(X1~1,
random=~dummy + grp(markers),
rcov=~units,
grouping =list(markers=X),
data=y.trn, silent = TRUE) # kinship based
u <- X %*% as.matrix(ans$u.hat$markers[,1]) # BLUPs for individuals
cor(u[vv,],Y[vv,"X1"]) # same correlation
# the same can be applied in multi-response models in GBLUP or rrBLUP
## ------------------------------------------------------------------------
data(Technow_data)
A.flint <- Technow_data$AF # Additive relationship matrix Flint
A.dent <- Technow_data$AD # Additive relationship matrix Dent
pheno <- Technow_data$pheno # phenotypes for 1254 single cross hybrids
head(pheno);dim(pheno)
# CREATE A DATA FRAME WITH ALL POSSIBLE HYBRIDS
DD <- kronecker(A.dent,A.flint,make.dimnames=TRUE)
hybs <- data.frame(sca=rownames(DD),yield=NA,matter=NA,gcad=NA, gcaf=NA)
hybs$yield[match(pheno$hy, hybs$sca)] <- pheno$GY
hybs$matter[match(pheno$hy, hybs$sca)] <- pheno$GM
hybs$gcad <- as.factor(gsub(":.*","",hybs$sca))
hybs$gcaf <- as.factor(gsub(".*:","",hybs$sca))
head(hybs)
# RUN THE PREDICTION MODEL
y.trn <- hybs
vv1 <- which(!is.na(hybs$yield))
vv2 <- sample(vv1, 100)
y.trn[vv2,"yield"] <- NA
anss2 <- mmer2(yield~1,
random=~g(gcad) + g(gcaf),
rcov=~units,
G=list(gcad=A.dent, gcaf=A.flint),
method="NR", silent=TRUE, data=y.trn)
summary(anss2)
cor(anss2$fitted.y[vv2], hybs$yield[vv2])
## ------------------------------------------------------------------------
data(CPdata)
head(CPpheno)
CPgeno[1:4,1:4]
#### create the variance-covariance matrix
A <- A.mat(CPgeno) # additive relationship matrix
#### look at the data and fit the model
head(CPpheno)
mix1 <- mmer2(Yield~1,
random=~g(id)
+ Rowf + Colf
+ spl2D(Row,Col),
rcov=~units,
G=list(id=A), silent=TRUE,
data=CPpheno)
summary(mix1)
## ------------------------------------------------------------------------
#### get the spatial plots
fittedvals <- spatPlots(mix1,row = "Row", range = "Col")
## ------------------------------------------------------------------------
data(CPdata)
### look at the data
head(CPpheno);CPgeno[1:5,1:4]
## fit a model including additive effects
A <- A.mat(CPgeno) # additive relationship matrix
####================####
#### ADDITIVE MODEL ####
####================####
ans.A <- mmer2(cbind(color,Yield)~1,
random=~us(trait):g(id),
rcov=~us(trait):units,
G=list(id=A),
data=CPpheno, silent = TRUE)
summary(ans.A)
## ------------------------------------------------------------------------
## genetic variance covariance
gvc <- ans.A$var.comp$`g(id)`
## extract variances (diagonals) and get standard deviations
sd.gvc <- as.matrix(sqrt(diag(gvc)))
## get possible products sd(Vgi) * sd(Vgi')
prod.sd <- sd.gvc %*% t(sd.gvc)
## genetic correlations cov(gi,gi')/[sd(Vgi) * sd(Vgi')]
(gen.cor <- gvc/prod.sd)
## heritabilities
(h2 <- diag(gvc) / diag(cov(CPpheno[,names(diag(gvc))], use = "complete.obs")))
|
# Network communications analysis #
# @Author: Haoyang Mi
library(ggplot2)
library(reshape2)
library(igraph)
library(reshape2)
library(tidyr)
setwd("D:/DP/Projects/HCC")
source('D:/DP/Projects/HCC/Functions.r')
# read gct file
gct_file <- data.frame(read.delim("D:/DP/Data/HCC/Community_Clustering.txt"))
# Bulk tumor community
gct_file$dendrogram_cut <- c(rep(1, 23), rep(2, 33), rep(3, 25), rep(4, 221), rep(5, 23), rep(6, 97), rep(7, 460), rep(8, 88))
# Responder
HCCdata <- readRDS("D:/DP/Data/HCC/hccdataset")
#remove UA Noncell
R_core <- unique(HCCdata[HCCdata$response == 'R',]$Core)
NR_core <- unique(HCCdata[HCCdata$response == 'NR',]$Core)
Rnet <- cor.network(gct_file, R_core)
NRnet <- cor.network(gct_file, NR_core)
#routes_network <- layout_components(routes_network)
g <- graph_from_data_frame(Rnet[,1:2])
plot(g)
# --------- Individual network count in R and NR -------------#
summary_all <- data.frame(matrix(nrow = 0, ncol = 0))
for(core in seq_len(37)){
#core <- 1
summary <- gct_file %>%
filter(id == core) %>%
group_by(dendrogram_cut) %>%
tally() %>%
cbind(core)
summary_all <- rbind(summary_all, summary)
#print(unique(HCCdata[HCCdata$Core == core, 'response']))
}
#
summary_all <- dcast(summary_all, core ~ dendrogram_cut, value.var = 'n')
summary_all[is.na(summary_all)] <- 0
# merge patient
Patient_table <- read.csv('Patient_Table.csv')
colnames(Patient_table)[1] <- 'core'
summary_all_patient <- merge(Patient_table, summary_all, by = 'core')
# R versus NR
R_count <- colSums(summary_all_patient[summary_all_patient$response == 'R', 4:11])
NR_count <- colSums(summary_all_patient[summary_all_patient$response == 'NR', 4:11])
response_count <- rbind(t(R_count), t(NR_count)) %>%
data.frame()
row.names(response_count) <- c('R', 'NR')
require(tidyverse)
response_count <- response_count %>% rownames_to_column('group')
colnames(response_count) <- c('group', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')
require(ggradar)
p <- ggradar(
response_count[1:2, 1:8],
values.radar = c("0", "100", '300'),
grid.min = 0, grid.mid = 100, grid.max = 300,
group.line.width = 2,
group.point.size = 5,
group.colours = c("#ef776d", "#21b7bd"),
# Background and grid lines
background.circle.colour = "white",
gridline.mid.colour = "grey",
legend.position = 'none',
axis.label.size = 7,
grid.label.size = 8,
)
p
ggsave(p, file=paste0("D:/DP/Projects/HCC/Figures/RadarPlot.png"), width = 8, height = 8, units = "in", dpi = 300)
write.csv(summary_all_patient, 'community_count_each_core.csv', row.names = FALSE)
| /Network-analysis/Communications.R | permissive | Shawnmhy/HCC-IMC-processing-pipeline | R | false | false | 2,709 | r | # Network communications analysis #
# @Author: Haoyang Mi
library(ggplot2)
library(reshape2)
library(igraph)
library(reshape2)
library(tidyr)
setwd("D:/DP/Projects/HCC")
source('D:/DP/Projects/HCC/Functions.r')
# read gct file
gct_file <- data.frame(read.delim("D:/DP/Data/HCC/Community_Clustering.txt"))
# Bulk tumor community
gct_file$dendrogram_cut <- c(rep(1, 23), rep(2, 33), rep(3, 25), rep(4, 221), rep(5, 23), rep(6, 97), rep(7, 460), rep(8, 88))
# Responder
HCCdata <- readRDS("D:/DP/Data/HCC/hccdataset")
#remove UA Noncell
R_core <- unique(HCCdata[HCCdata$response == 'R',]$Core)
NR_core <- unique(HCCdata[HCCdata$response == 'NR',]$Core)
Rnet <- cor.network(gct_file, R_core)
NRnet <- cor.network(gct_file, NR_core)
#routes_network <- layout_components(routes_network)
g <- graph_from_data_frame(Rnet[,1:2])
plot(g)
# --------- Individual network count in R and NR -------------#
summary_all <- data.frame(matrix(nrow = 0, ncol = 0))
for(core in seq_len(37)){
#core <- 1
summary <- gct_file %>%
filter(id == core) %>%
group_by(dendrogram_cut) %>%
tally() %>%
cbind(core)
summary_all <- rbind(summary_all, summary)
#print(unique(HCCdata[HCCdata$Core == core, 'response']))
}
#
summary_all <- dcast(summary_all, core ~ dendrogram_cut, value.var = 'n')
summary_all[is.na(summary_all)] <- 0
# merge patient
Patient_table <- read.csv('Patient_Table.csv')
colnames(Patient_table)[1] <- 'core'
summary_all_patient <- merge(Patient_table, summary_all, by = 'core')
# R versus NR
R_count <- colSums(summary_all_patient[summary_all_patient$response == 'R', 4:11])
NR_count <- colSums(summary_all_patient[summary_all_patient$response == 'NR', 4:11])
response_count <- rbind(t(R_count), t(NR_count)) %>%
data.frame()
row.names(response_count) <- c('R', 'NR')
require(tidyverse)
response_count <- response_count %>% rownames_to_column('group')
colnames(response_count) <- c('group', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H')
require(ggradar)
p <- ggradar(
response_count[1:2, 1:8],
values.radar = c("0", "100", '300'),
grid.min = 0, grid.mid = 100, grid.max = 300,
group.line.width = 2,
group.point.size = 5,
group.colours = c("#ef776d", "#21b7bd"),
# Background and grid lines
background.circle.colour = "white",
gridline.mid.colour = "grey",
legend.position = 'none',
axis.label.size = 7,
grid.label.size = 8,
)
p
ggsave(p, file=paste0("D:/DP/Projects/HCC/Figures/RadarPlot.png"), width = 8, height = 8, units = "in", dpi = 300)
write.csv(summary_all_patient, 'community_count_each_core.csv', row.names = FALSE)
|
### R code from vignette source 'useProbeInfo.Rnw'
###################################################
### code chunk number 1: loadlibs
###################################################
library("annotate")
library("rae230a.db")
library("rae230aprobe")
###################################################
### code chunk number 2: selprobe
###################################################
ps = names(as.list(rae230aACCNUM))
myp = ps[1001]
myA = get(myp, rae230aACCNUM)
wp = rae230aprobe$Probe.Set.Name == myp
myPr = rae230aprobe[wp,]
###################################################
### code chunk number 3: getACC
###################################################
myseq = getSEQ(myA)
nchar(myseq)
library("Biostrings")
mybs = DNAString(myseq)
match1 = matchPattern(as.character(myPr[1,1]), mybs)
match1
as.matrix(ranges(match1))
myPr[1,5]
###################################################
### code chunk number 4: getRev
###################################################
myp = ps[100]
myA = get(myp, rae230aACCNUM)
wp = rae230aprobe$Probe.Set.Name == myp
myPr = rae230aprobe[wp,]
myseq = getSEQ(myA)
mybs = DNAString(myseq)
Prstr = as.character(myPr[1,1])
match2 = matchPattern(Prstr, mybs)
## expecting 0 (no match)
length(match2)
match2 = matchPattern(reverseComplement(DNAString(Prstr)), mybs)
nchar(match2)
nchar(myseq) - as.matrix(ranges(match2))
myPr[1,5]
###################################################
### code chunk number 5: useProbeInfo.Rnw:159-160
###################################################
sessionInfo()
| /source/macOS/R-Portable-Mac/library/annotate/doc/useProbeInfo.R | permissive | romanhaa/Cerebro | R | false | false | 1,575 | r | ### R code from vignette source 'useProbeInfo.Rnw'
###################################################
### code chunk number 1: loadlibs
###################################################
library("annotate")
library("rae230a.db")
library("rae230aprobe")
###################################################
### code chunk number 2: selprobe
###################################################
ps = names(as.list(rae230aACCNUM))
myp = ps[1001]
myA = get(myp, rae230aACCNUM)
wp = rae230aprobe$Probe.Set.Name == myp
myPr = rae230aprobe[wp,]
###################################################
### code chunk number 3: getACC
###################################################
myseq = getSEQ(myA)
nchar(myseq)
library("Biostrings")
mybs = DNAString(myseq)
match1 = matchPattern(as.character(myPr[1,1]), mybs)
match1
as.matrix(ranges(match1))
myPr[1,5]
###################################################
### code chunk number 4: getRev
###################################################
myp = ps[100]
myA = get(myp, rae230aACCNUM)
wp = rae230aprobe$Probe.Set.Name == myp
myPr = rae230aprobe[wp,]
myseq = getSEQ(myA)
mybs = DNAString(myseq)
Prstr = as.character(myPr[1,1])
match2 = matchPattern(Prstr, mybs)
## expecting 0 (no match)
length(match2)
match2 = matchPattern(reverseComplement(DNAString(Prstr)), mybs)
nchar(match2)
nchar(myseq) - as.matrix(ranges(match2))
myPr[1,5]
###################################################
### code chunk number 5: useProbeInfo.Rnw:159-160
###################################################
sessionInfo()
|
#'##########################################################################
#' ame1 project - studying the relation between home range size and activity
#' 2019/10/04
#' Adam Kane, Enrico Pirotta & Barry McMahon
#' https://mecoco.github.io/ame1.html
#' applying the amt package on one study to calculate home ranges (MCP & KDE)
############################################################################
# Packages
library(tidyverse)
library(lubridate)
# Activity data----
# Notes from data providers:
# | 1 - vectronics | act_1: number of forward-backward moves |
# | | act_2: number side-to-side moves |
# | | act_3: not used
# | 3 - lotek 3300 | act_1: number of side-to-side moves |
# | | act_2: number of up-down moves |
# | | act_3: percentage of time in head down position (0 to 100)|
# | 5 - e-obs | act_1: number of forward-backward moves |
# | | act_2: number of side-to-side moves |
# | | act_3: number of up-down moves |
# Load and inspect activity data
adat <- read_csv("data/actdata_mecoco.csv", col_names=T)
head(adat)
names(adat)
adat$act_sel <- adat$act_1 #select activity channel of interest (f-b for sensors 1 and 5, s-s for sensor 3; all are in column act_1)
adat$animals_id <- as.factor(adat$animals_id)
adat <- arrange(adat, animals_id, acquisition_time)
# Time variables
adat$acquisition_time <- strptime(adat$acquisition_time, format="%Y-%m-%d %H:%M:%S", tz="UTC")
adat$year <- adat$acquisition_time$year+1900
adat$month <- month(adat$acquisition_time)
# Summarise activity by month
adat_m <- adat %>%
group_by(animals_id, year, month) %>%
summarise(mean_act = mean(act_sel), sd_act=sd(act_sel), max_act=max(act_sel), min_act=min(act_sel), n_obs=n(), activity_sensors_id=unique(activity_sensors_id), sensor_type=unique(activity_sensor_mode_code),study_areas_id=unique(study_areas_id),gps_sensors_id=unique(gps_sensors_id)) %>%
ungroup() %>%
group_by(animals_id) %>%
mutate(id = row_number()) %>%
ungroup()
dim(adat_m)
names(adat_m)
#plot mean activity:
ggplot(adat_m) +
geom_path(aes(x=id, y=mean_act, group=animals_id))
#plot sd activity:
ggplot(adat_m) +
geom_path(aes(x=id, y=sd_act, group=animals_id))
#plot metrics against each other
ggplot(adat_m) +
geom_point(aes(x=mean_act, y=sd_act))
ggplot(adat_m) +
geom_point(aes(x=mean_act, y=max_act))
#there seems to be good correlation between mean and sd of activity, and, to some extent, mean and max
#' write.csv(adat_m, "results/Activity_data_byMonth.csv", row.names=F)
| /code/4.activity_analysis.R | no_license | kanead/eurodeer | R | false | false | 2,877 | r | #'##########################################################################
#' ame1 project - studying the relation between home range size and activity
#' 2019/10/04
#' Adam Kane, Enrico Pirotta & Barry McMahon
#' https://mecoco.github.io/ame1.html
#' applying the amt package on one study to calculate home ranges (MCP & KDE)
############################################################################
# Packages
library(tidyverse)
library(lubridate)
# Activity data----
# Notes from data providers:
# | 1 - vectronics | act_1: number of forward-backward moves |
# | | act_2: number side-to-side moves |
# | | act_3: not used
# | 3 - lotek 3300 | act_1: number of side-to-side moves |
# | | act_2: number of up-down moves |
# | | act_3: percentage of time in head down position (0 to 100)|
# | 5 - e-obs | act_1: number of forward-backward moves |
# | | act_2: number of side-to-side moves |
# | | act_3: number of up-down moves |
# Load and inspect activity data
adat <- read_csv("data/actdata_mecoco.csv", col_names=T)
head(adat)
names(adat)
adat$act_sel <- adat$act_1 #select activity channel of interest (f-b for sensors 1 and 5, s-s for sensor 3; all are in column act_1)
adat$animals_id <- as.factor(adat$animals_id)
adat <- arrange(adat, animals_id, acquisition_time)
# Time variables
adat$acquisition_time <- strptime(adat$acquisition_time, format="%Y-%m-%d %H:%M:%S", tz="UTC")
adat$year <- adat$acquisition_time$year+1900
adat$month <- month(adat$acquisition_time)
# Summarise activity by month
adat_m <- adat %>%
group_by(animals_id, year, month) %>%
summarise(mean_act = mean(act_sel), sd_act=sd(act_sel), max_act=max(act_sel), min_act=min(act_sel), n_obs=n(), activity_sensors_id=unique(activity_sensors_id), sensor_type=unique(activity_sensor_mode_code),study_areas_id=unique(study_areas_id),gps_sensors_id=unique(gps_sensors_id)) %>%
ungroup() %>%
group_by(animals_id) %>%
mutate(id = row_number()) %>%
ungroup()
dim(adat_m)
names(adat_m)
#plot mean activity:
ggplot(adat_m) +
geom_path(aes(x=id, y=mean_act, group=animals_id))
#plot sd activity:
ggplot(adat_m) +
geom_path(aes(x=id, y=sd_act, group=animals_id))
#plot metrics against each other
ggplot(adat_m) +
geom_point(aes(x=mean_act, y=sd_act))
ggplot(adat_m) +
geom_point(aes(x=mean_act, y=max_act))
#there seems to be good correlation between mean and sd of activity, and, to some extent, mean and max
#' write.csv(adat_m, "results/Activity_data_byMonth.csv", row.names=F)
|
numPerPatch2000 <- c(2502,2498)
| /NatureEE-data-archive/Run203121/JAFSdata/JAFSnumPerPatch2000.R | no_license | flaxmans/NatureEE2017 | R | false | false | 32 | r | numPerPatch2000 <- c(2502,2498)
|
# This function is pretty broken because it doesn't consider the difference in scale between axes.
# It also probably doesn't need to always (ever) shorten both ends at least for margin labels...
shorten <- function (x0, y0, x1, y1, rad) {
line.lengths <- sqrt((x1-x0)^2+(y1-y0)^2)
pct.short <- rad/line.lengths
new.pt <- list()
new.pt$x1 <- x0+(x1-x0)*pct.short
new.pt$x0 <- x1+(x0-x1)*pct.short
new.pt$y1 <- y0+(y1-y0)*pct.short
new.pt$y0 <- y1+(y0-y1)*pct.short
return (new.pt)
}
#' Label points from the margin
#'
#' Labels are sorted according to the axis along which they are labeled.
#' This could be made a lot better since this often ends up in crossed lines..
#'
#' @examples
#' y <- rnorm(100)
#' x <- runif(100)
#' plot(x, y, pch=20, bty='n')
#' label.pts <- tail(order(y), 10)
#' marginlabels(x[label.pts], y[label.pts], margin=3, lty=3, rad=0.05)
#' @export
marginlabels <- function(x, y = NULL, labels=seq_along(x), margin=4,
col='black', lty=1, lwd=1, pch=1, pch.cex=1, las=2,
rad=0.15, ...) {
len <- length(labels)
if ( missing(y) || is.null(y) ) {
y <- seq_along(labels)
}
if ( length(x) != len ) x <- rep(x, len)
if ( length(y) != len ) y <- rep(y, len)
if ( margin == 1 || margin == 3 ) {
new.order <- order(x)
x <- x[new.order]
y <- y[new.order]
labels <- labels[new.order]
if ( !missing(col) && length(col) == len ) col <- col[new.order]
label.x <- seq(par('usr')[1], par('usr')[2], length.out=len+2)[-c(1, len+2)]
label.y <- par('usr')[if ( margin == 2) 3 else 4]
tick.pos <- label.x
} else {
new.order <- order(y)
x <- x[new.order]
y <- y[new.order]
labels <- labels[new.order]
if ( !missing(col) && length(col) == len ) col <- col[new.order]
label.y <- seq(par('usr')[3], par('usr')[4], length.out=len+2)[-c(1, len+2)]
label.x <- par('usr')[if ( margin == 1) 1 else 2]
tick.pos <- label.y
}
points(x, y, pch=pch, col=col, cex=pch.cex)
connect.lines <- shorten(x, y, label.x, label.y, pch.cex*rad)
connect.lines$lty <- lty
connect.lines$lwd <- lwd
connect.lines$x0 <- label.x
connect.lines$y0 <- label.y
do.call(segments, connect.lines)
axis(margin, at=tick.pos, labels, las=las, lwd=0, lwd.tick=lwd, lty=lty, line=0)
} | /R/margin_labels.r | permissive | tyjo/zoom.plot | R | false | false | 2,344 | r | # This function is pretty broken because it doesn't consider the difference in scale between axes.
# It also probably doesn't need to always (ever) shorten both ends at least for margin labels...
shorten <- function (x0, y0, x1, y1, rad) {
line.lengths <- sqrt((x1-x0)^2+(y1-y0)^2)
pct.short <- rad/line.lengths
new.pt <- list()
new.pt$x1 <- x0+(x1-x0)*pct.short
new.pt$x0 <- x1+(x0-x1)*pct.short
new.pt$y1 <- y0+(y1-y0)*pct.short
new.pt$y0 <- y1+(y0-y1)*pct.short
return (new.pt)
}
#' Label points from the margin
#'
#' Labels are sorted according to the axis along which they are labeled.
#' This could be made a lot better since this often ends up in crossed lines..
#'
#' @examples
#' y <- rnorm(100)
#' x <- runif(100)
#' plot(x, y, pch=20, bty='n')
#' label.pts <- tail(order(y), 10)
#' marginlabels(x[label.pts], y[label.pts], margin=3, lty=3, rad=0.05)
#' @export
marginlabels <- function(x, y = NULL, labels=seq_along(x), margin=4,
col='black', lty=1, lwd=1, pch=1, pch.cex=1, las=2,
rad=0.15, ...) {
len <- length(labels)
if ( missing(y) || is.null(y) ) {
y <- seq_along(labels)
}
if ( length(x) != len ) x <- rep(x, len)
if ( length(y) != len ) y <- rep(y, len)
if ( margin == 1 || margin == 3 ) {
new.order <- order(x)
x <- x[new.order]
y <- y[new.order]
labels <- labels[new.order]
if ( !missing(col) && length(col) == len ) col <- col[new.order]
label.x <- seq(par('usr')[1], par('usr')[2], length.out=len+2)[-c(1, len+2)]
label.y <- par('usr')[if ( margin == 2) 3 else 4]
tick.pos <- label.x
} else {
new.order <- order(y)
x <- x[new.order]
y <- y[new.order]
labels <- labels[new.order]
if ( !missing(col) && length(col) == len ) col <- col[new.order]
label.y <- seq(par('usr')[3], par('usr')[4], length.out=len+2)[-c(1, len+2)]
label.x <- par('usr')[if ( margin == 1) 1 else 2]
tick.pos <- label.y
}
points(x, y, pch=pch, col=col, cex=pch.cex)
connect.lines <- shorten(x, y, label.x, label.y, pch.cex*rad)
connect.lines$lty <- lty
connect.lines$lwd <- lwd
connect.lines$x0 <- label.x
connect.lines$y0 <- label.y
do.call(segments, connect.lines)
axis(margin, at=tick.pos, labels, las=las, lwd=0, lwd.tick=lwd, lty=lty, line=0)
} |
\name{check_smoltification}
\alias{check_smoltification}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
check_smoltification
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
check_smoltification()
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Cyril Piou
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function ()
{
.C("check_smoltification", PACKAGE = "metaIbasam")
invisible(NULL)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ misc }
\keyword{ utilities }
\keyword{ programming }
| /master/man/check_smoltification.Rd | no_license | Ibasam/MetaIBASAM | R | false | false | 1,261 | rd | \name{check_smoltification}
\alias{check_smoltification}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
check_smoltification
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
check_smoltification()
}
%- maybe also 'usage' for other objects documented here.
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Cyril Piou
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function ()
{
.C("check_smoltification", PACKAGE = "metaIbasam")
invisible(NULL)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ misc }
\keyword{ utilities }
\keyword{ programming }
|
\name{exp_calcMahalanobisDist}
\alias{exp_calcMahalanobisDist}
\title{Calculate Mahalanobis distance}
\author{Michael Lawrence <mflawren@fhcrc.org>}
\description{
Calculates mahalanobis distance between the samples (columns) in the data frame \code{ent_data}
}
\usage{exp_calcMahalanobisDist(ent_data)}
\arguments{
\item{ent_data}{a data frame of experimental data, according to exploRase conventions}
}
\details{}
\examples{}
\keyword{arith}
| /man/exp-calcMahalanobisDist-r1.Rd | no_license | lawremi/exploRase | R | false | false | 446 | rd | \name{exp_calcMahalanobisDist}
\alias{exp_calcMahalanobisDist}
\title{Calculate Mahalanobis distance}
\author{Michael Lawrence <mflawren@fhcrc.org>}
\description{
Calculates mahalanobis distance between the samples (columns) in the data frame \code{ent_data}
}
\usage{exp_calcMahalanobisDist(ent_data)}
\arguments{
\item{ent_data}{a data frame of experimental data, according to exploRase conventions}
}
\details{}
\examples{}
\keyword{arith}
|
# AUTO GENERATED FILE - DO NOT EDIT
htmlIns <- function(children=NULL, id=NULL, n_clicks=NULL, n_clicks_timestamp=NULL, key=NULL, role=NULL, cite=NULL, dateTime=NULL, accessKey=NULL, className=NULL, contentEditable=NULL, contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL, lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL, title=NULL, loading_state=NULL, ...) {
wildcard_names = names(dash_assert_valid_wildcards(attrib = list('data', 'aria'), ...))
props <- list(children=children, id=id, n_clicks=n_clicks, n_clicks_timestamp=n_clicks_timestamp, key=key, role=role, cite=cite, dateTime=dateTime, accessKey=accessKey, className=className, contentEditable=contentEditable, contextMenu=contextMenu, dir=dir, draggable=draggable, hidden=hidden, lang=lang, spellCheck=spellCheck, style=style, tabIndex=tabIndex, title=title, loading_state=loading_state, ...)
if (length(props) > 0) {
props <- props[!vapply(props, is.null, logical(1))]
}
component <- list(
props = props,
type = 'Ins',
namespace = 'dash_html_components',
propNames = c('children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'cite', 'dateTime', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state', wildcard_names),
package = 'dashHtmlComponents'
)
structure(component, class = c('dash_component', 'list'))
}
| /R/htmlIns.R | permissive | noisycomputation/dash-html-components | R | false | false | 1,487 | r | # AUTO GENERATED FILE - DO NOT EDIT
htmlIns <- function(children=NULL, id=NULL, n_clicks=NULL, n_clicks_timestamp=NULL, key=NULL, role=NULL, cite=NULL, dateTime=NULL, accessKey=NULL, className=NULL, contentEditable=NULL, contextMenu=NULL, dir=NULL, draggable=NULL, hidden=NULL, lang=NULL, spellCheck=NULL, style=NULL, tabIndex=NULL, title=NULL, loading_state=NULL, ...) {
wildcard_names = names(dash_assert_valid_wildcards(attrib = list('data', 'aria'), ...))
props <- list(children=children, id=id, n_clicks=n_clicks, n_clicks_timestamp=n_clicks_timestamp, key=key, role=role, cite=cite, dateTime=dateTime, accessKey=accessKey, className=className, contentEditable=contentEditable, contextMenu=contextMenu, dir=dir, draggable=draggable, hidden=hidden, lang=lang, spellCheck=spellCheck, style=style, tabIndex=tabIndex, title=title, loading_state=loading_state, ...)
if (length(props) > 0) {
props <- props[!vapply(props, is.null, logical(1))]
}
component <- list(
props = props,
type = 'Ins',
namespace = 'dash_html_components',
propNames = c('children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'cite', 'dateTime', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title', 'loading_state', wildcard_names),
package = 'dashHtmlComponents'
)
structure(component, class = c('dash_component', 'list'))
}
|
setwd('E:/cibersort_0104/2.DEG')
dir.create('limma') # output directory
library(GEOquery)
library(limma)
library(dplyr)
# prep exp data
f <- read.csv('TCGA_expression.txt',sep='\t')
dim(f)
View(head(f))
f <- f[-1,]
rownames(f) <- f[,1]
f <- f[,-1]
f <- f[,order(colnames(f))]
View(f[1:5,1:5])
# prep clinical data
clin <- read.csv('../1.clustering/clustered_sample.txt',sep='\t')
View(clin)
clin <- clin[,order(colnames(clin))]
length(colnames(clin))
# trim exp data
f <- f[,colnames(clin)]
dim(f)
f <- as.data.frame(f)
sprr3 <- f['SPRR3|6707']
f <- t(f)
clin <- as.data.frame(t(clin))
f[1:5,1:5]
# change value Hier_k3 == 2 into 0 (merge 0,2) : 0 : high risk, 1 : low risk
View(clin)
clin$Hier_k3[clin$Hier_k3==2] <- 0
clin$Hier_k3 <- replace(clin$Hier_k3,grepl(0,clin$Hier_k3),'high_risk')
clin$Hier_k3 <- replace(clin$Hier_k3,grepl(1,clin$Hier_k3),'low_risk')
View(clin)
clin$days <- round((clin$days)*30,0)
#check dimension
dim(clin)
dim(f)
group_H <- clin$Hier_k3
design <- model.matrix(~0+group_H)
colnames(design)
colnames(design) <- c('High_risk','Low_risk')
df1 <- as.data.frame(t(f[1:5,1:5]))
f <- as.data.frame(t(f))
rownames(f)
f[] <- lapply(f, function(x) {
if(is.factor(x)) as.numeric(as.character(x)) else x
})
#sprr3
sprr3_h3=cbind(sprr3,clin$Hier_k3)
sprr3_h3$`SPRR3|6707` <- log2(sprr3_h3$`SPRR3|6707`)
mean(sprr3_h3$`SPRR3|6707`[sprr3_h3['clin$Hier_k3']=='low_risk'])
mean(sprr3_h3$`SPRR3|6707`[sprr3_h3['clin$Hier_k3']=='high_risk'])
# limma analysis
fit = lmFit(log2(f),design) # essential for RNA-seq data
cont <- makeContrasts(diff=Low_risk-High_risk,levels=design) ### low risk focused!!!!!
fit.cont <- contrasts.fit(fit,cont)
fit.cont <- eBayes(fit.cont)
res <- topTable(fit.cont,number=Inf)
res <- na.omit(res)
res <- res[!is.infinite(rowSums(res)),]
View(res)
write.table(res,file='limma/Low_vs_High_risk.txt',sep='\t',quote = FALSE)
topT <- as.data.frame(res)
View(topT)
colnames(topT)
# Adjusted P values
with(topT, plot(logFC, -log10(adj.P.Val), pch=20, main="Volcano plot", col='grey', cex=1.0, xlab=bquote(~Log[2]~fold~change), ylab=bquote(~-log[10]~Q~value)))
cut_pvalue <- 0.001
cut_lfc <- 1
with(subset(topT, adj.P.Val<cut_pvalue & logFC>cut_lfc), points(logFC, -log10(adj.P.Val), pch=20, col='red', cex=1.5))
with(subset(topT, adj.P.Val<cut_pvalue & logFC<(-cut_lfc)), points(logFC, -log10(adj.P.Val), pch=20, col='blue', cex=1.5))
## Add lines for FC and P-value cut-off
abline(v=0, col='black', lty=3, lwd=1.0)
abline(v=-cut_lfc, col='black', lty=4, lwd=2.0)
abline(v=cut_lfc, col='black', lty=4, lwd=2.0)
abline(h=-log10(max(topT$adj.P.Val[topT$adj.P.Val<cut_pvalue], na.rm=TRUE)), col='black', lty=4, lwd=2.0)
| /4.DEG/.ipynb_checkpoints/limma_TCGA-checkpoint.R | no_license | wqhf/ORCA | R | false | false | 2,681 | r | setwd('E:/cibersort_0104/2.DEG')
dir.create('limma') # output directory
library(GEOquery)
library(limma)
library(dplyr)
# prep exp data
f <- read.csv('TCGA_expression.txt',sep='\t')
dim(f)
View(head(f))
f <- f[-1,]
rownames(f) <- f[,1]
f <- f[,-1]
f <- f[,order(colnames(f))]
View(f[1:5,1:5])
# prep clinical data
clin <- read.csv('../1.clustering/clustered_sample.txt',sep='\t')
View(clin)
clin <- clin[,order(colnames(clin))]
length(colnames(clin))
# trim exp data
f <- f[,colnames(clin)]
dim(f)
f <- as.data.frame(f)
sprr3 <- f['SPRR3|6707']
f <- t(f)
clin <- as.data.frame(t(clin))
f[1:5,1:5]
# change value Hier_k3 == 2 into 0 (merge 0,2) : 0 : high risk, 1 : low risk
View(clin)
clin$Hier_k3[clin$Hier_k3==2] <- 0
clin$Hier_k3 <- replace(clin$Hier_k3,grepl(0,clin$Hier_k3),'high_risk')
clin$Hier_k3 <- replace(clin$Hier_k3,grepl(1,clin$Hier_k3),'low_risk')
View(clin)
clin$days <- round((clin$days)*30,0)
#check dimension
dim(clin)
dim(f)
group_H <- clin$Hier_k3
design <- model.matrix(~0+group_H)
colnames(design)
colnames(design) <- c('High_risk','Low_risk')
df1 <- as.data.frame(t(f[1:5,1:5]))
f <- as.data.frame(t(f))
rownames(f)
f[] <- lapply(f, function(x) {
if(is.factor(x)) as.numeric(as.character(x)) else x
})
#sprr3
sprr3_h3=cbind(sprr3,clin$Hier_k3)
sprr3_h3$`SPRR3|6707` <- log2(sprr3_h3$`SPRR3|6707`)
mean(sprr3_h3$`SPRR3|6707`[sprr3_h3['clin$Hier_k3']=='low_risk'])
mean(sprr3_h3$`SPRR3|6707`[sprr3_h3['clin$Hier_k3']=='high_risk'])
# limma analysis
fit = lmFit(log2(f),design) # essential for RNA-seq data
cont <- makeContrasts(diff=Low_risk-High_risk,levels=design) ### low risk focused!!!!!
fit.cont <- contrasts.fit(fit,cont)
fit.cont <- eBayes(fit.cont)
res <- topTable(fit.cont,number=Inf)
res <- na.omit(res)
res <- res[!is.infinite(rowSums(res)),]
View(res)
write.table(res,file='limma/Low_vs_High_risk.txt',sep='\t',quote = FALSE)
topT <- as.data.frame(res)
View(topT)
colnames(topT)
# Adjusted P values
with(topT, plot(logFC, -log10(adj.P.Val), pch=20, main="Volcano plot", col='grey', cex=1.0, xlab=bquote(~Log[2]~fold~change), ylab=bquote(~-log[10]~Q~value)))
cut_pvalue <- 0.001
cut_lfc <- 1
with(subset(topT, adj.P.Val<cut_pvalue & logFC>cut_lfc), points(logFC, -log10(adj.P.Val), pch=20, col='red', cex=1.5))
with(subset(topT, adj.P.Val<cut_pvalue & logFC<(-cut_lfc)), points(logFC, -log10(adj.P.Val), pch=20, col='blue', cex=1.5))
## Add lines for FC and P-value cut-off
abline(v=0, col='black', lty=3, lwd=1.0)
abline(v=-cut_lfc, col='black', lty=4, lwd=2.0)
abline(v=cut_lfc, col='black', lty=4, lwd=2.0)
abline(h=-log10(max(topT$adj.P.Val[topT$adj.P.Val<cut_pvalue], na.rm=TRUE)), col='black', lty=4, lwd=2.0)
|
# Currently this function could only parse svg files created by the cairo
# graphics library, typically from svg() in the grDevices package (R >= 2.14.0
# required for Windows OS), and CairoSVG() in the Cairo package.
parseSVG = function(file.name) {
svgFile = xmlParse(file.name);
# Don't forget the name space!
newXMLNamespace(xmlRoot(svgFile), "http://www.w3.org/2000/svg", "svg");
# Find the first <g> child of <svg>
pathRoot = getNodeSet(svgFile, "/svg:svg/svg:g");
if(!length(pathRoot)) stop(sprintf("Failed in parsing file '%s'", file.name));
pathRoot = pathRoot[[1]];
# Default style for a <path> node
defaultStyle = c("stroke" = "none",
"stroke-width" = "1",
"stroke-linecap" = "butt",
"stroke-linejoin" = "miter",
"stroke-miterlimit" = "4",
"stroke-opacity" = "1",
"fill" = "rgb(0%,0%,0%)",
"fill-rule" = "nonzero",
"fill-opacity" = "1");
# Handle <path> style in named vector
parseStyle = function(style)
{
if(is.null(style)) return(NULL);
s = unlist(strsplit(style, ";"));
val = strsplit(s, ":");
result = sapply(val, function(x) x[2]);
names(result) = sapply(val, function(x) x[1]);
return(result);
}
# Update the attributes in "old" style with the values in "new"
# "old" must contain "new"
updateStyle = function(old, new)
{
if(is.null(new)) return(old);
result = old;
result[names(new)] = new;
return(result);
}
# Iteratively update the style from parent nodes
updateStyleUpward = function(node)
{
style = xmlAttrs(node)["style"];
if(is.na(style)) style = NULL;
style = parseStyle(style);
style = updateStyle(defaultStyle, style);
parentNode = xmlParent(node);
# Recursively search the parent
while(!is.null(parentNode))
{
parentStyle = xmlAttrs(parentNode)["style"];
if(is.null(parentStyle) || is.na(parentStyle)) parentStyle = NULL;
parentStyle = parseStyle(parentStyle);
style = updateStyle(style, parentStyle);
parentNode = xmlParent(parentNode);
}
return(style);
}
# Parse <path> and <use> nodes into structured lists
#
# <path style="" d=""> =====> style=..., d=..., x=0, y=0
#
# <use xlink:href="#glyph0-0" x="63.046875" y="385.921875"/>
# =====>
# style=..., d=..., x=63.046875, y=385.921875
#
parseNode = function(node)
{
if(xmlName(node) == "use")
{
attrs = xmlAttrs(node);
refID = sub("#", "", attrs["href"]);
refPathNode = getNodeSet(svgFile, sprintf("//*[@id='%s']/svg:path", refID))[[1]];
style = updateStyleUpward(refPathNode);
style = updateStyle(style, updateStyleUpward(node));
d = xmlAttrs(refPathNode)["d"];
x = xmlAttrs(node)["x"];
y = xmlAttrs(node)["y"];
} else if(xmlName(node) == "path") {
style = updateStyleUpward(node);
d = xmlAttrs(node)["d"];
x = y = 0;
} else return(NULL);
xy = as.numeric(c(x, y));
names(d) = NULL;
names(xy) = NULL;
return(list(style = style, d = d, xy = xy));
}
# Flatten nodes
# <g>
# <use />
# <use />
# <use />
# </g>
#
# =====>
#
# <use />
# <use />
# <use />
expandNode = function(node)
{
children = xmlChildren(node);
res = if(!length(children)) node else children;
return(res);
}
nodes = unlist(xmlSApply(pathRoot, expandNode));
names(nodes) = NULL;
paths = lapply(nodes, parseNode);
path.is.null = sapply(paths, is.null);
paths[path.is.null] = NULL;
if(!length(paths)) stop("Unknown child node of '/svg/g'");
return(paths);
}
#' Convert a sequence of SVG files to SWF file
#'
#' Given the file names of a sequence of SVG files, this function could
#' convert them into a Flash file (.swf).
#'
#' This function uses the XML package in R and a subset of librsvg
#' (\url{http://librsvg.sourceforge.net/}) to parse the SVG file, and
#' uses the Ming library (\url{http://www.libming.org/}) to
#' implement the conversion. Currently this function supports SVG files
#' created by \code{\link[grDevices]{svg}()} in the \pkg{grDevices}
#' package, and \code{\link[Cairo]{CairoSVG}()} in the
#' \pkg{Cairo} package.
#' @param input the file names of the SVG files to be converted
#' @param output the name of the output SWF file
#' @param bgColor background color of the output SWF file
#' @param interval the time interval (in seconds) between animation frames
#' @return The name of the generated SWF file if successful.
#' @export
#' @author Yixuan Qiu <\email{yixuan.qiu@@cos.name}>
#' @examples \dontrun{
#' if(capabilities("cairo")) {
#' olddir = setwd(tempdir())
#' svg("Rplot%03d.svg", onefile = FALSE)
#' set.seed(123)
#' x = rnorm(5)
#' y = rnorm(5)
#' for(i in 1:100) {
#' plot(x <- x + 0.1 * rnorm(5), y <- y + 0.1 * rnorm(5),
#' xlim = c(-3, 3), ylim = c(-3, 3), col = "steelblue",
#' pch = 16, cex = 2, xlab = "x", ylab = "y")
#' }
#' dev.off()
#' output = svg2swf(sprintf("Rplot%03d.svg", 1:100), interval = 0.1)
#' swf2html(output)
#' setwd(olddir)
#' }
#' }
#'
svg2swf = function(input, output = "movie.swf", bgColor = "white",
interval = 1) {
# Use XML package
if(!require(XML))
stop("svg2swf() requires XML package");
if(!is.character(input))
stop("'input' must be a character vector naming the input SVG files");
bg = col2rgb(bgColor, alpha = FALSE);
bg = as.integer(bg);
if(!all(file.exists(input))) stop("one or more input files do not exist");
filesData = lapply(input, parseSVG);
firstFile = xmlParse(input[1]);
size = xmlAttrs(xmlRoot(firstFile))["viewBox"];
size = as.numeric(unlist(strsplit(size, " ")));
outfile = normalizePath(output, mustWork = FALSE);
.Call("svg2swf", filesData, outfile, size,
bg, as.numeric(interval), PACKAGE = "R2SWF");
message("SWF file created at ", outfile);
invisible(output);
}
| /R/svg2swf.R | no_license | yixuan/R2SWF-archive | R | false | false | 6,021 | r | # Currently this function could only parse svg files created by the cairo
# graphics library, typically from svg() in the grDevices package (R >= 2.14.0
# required for Windows OS), and CairoSVG() in the Cairo package.
parseSVG = function(file.name) {
svgFile = xmlParse(file.name);
# Don't forget the name space!
newXMLNamespace(xmlRoot(svgFile), "http://www.w3.org/2000/svg", "svg");
# Find the first <g> child of <svg>
pathRoot = getNodeSet(svgFile, "/svg:svg/svg:g");
if(!length(pathRoot)) stop(sprintf("Failed in parsing file '%s'", file.name));
pathRoot = pathRoot[[1]];
# Default style for a <path> node
defaultStyle = c("stroke" = "none",
"stroke-width" = "1",
"stroke-linecap" = "butt",
"stroke-linejoin" = "miter",
"stroke-miterlimit" = "4",
"stroke-opacity" = "1",
"fill" = "rgb(0%,0%,0%)",
"fill-rule" = "nonzero",
"fill-opacity" = "1");
# Handle <path> style in named vector
parseStyle = function(style)
{
if(is.null(style)) return(NULL);
s = unlist(strsplit(style, ";"));
val = strsplit(s, ":");
result = sapply(val, function(x) x[2]);
names(result) = sapply(val, function(x) x[1]);
return(result);
}
# Update the attributes in "old" style with the values in "new"
# "old" must contain "new"
updateStyle = function(old, new)
{
if(is.null(new)) return(old);
result = old;
result[names(new)] = new;
return(result);
}
# Iteratively update the style from parent nodes
updateStyleUpward = function(node)
{
style = xmlAttrs(node)["style"];
if(is.na(style)) style = NULL;
style = parseStyle(style);
style = updateStyle(defaultStyle, style);
parentNode = xmlParent(node);
# Recursively search the parent
while(!is.null(parentNode))
{
parentStyle = xmlAttrs(parentNode)["style"];
if(is.null(parentStyle) || is.na(parentStyle)) parentStyle = NULL;
parentStyle = parseStyle(parentStyle);
style = updateStyle(style, parentStyle);
parentNode = xmlParent(parentNode);
}
return(style);
}
# Parse <path> and <use> nodes into structured lists
#
# <path style="" d=""> =====> style=..., d=..., x=0, y=0
#
# <use xlink:href="#glyph0-0" x="63.046875" y="385.921875"/>
# =====>
# style=..., d=..., x=63.046875, y=385.921875
#
parseNode = function(node)
{
if(xmlName(node) == "use")
{
attrs = xmlAttrs(node);
refID = sub("#", "", attrs["href"]);
refPathNode = getNodeSet(svgFile, sprintf("//*[@id='%s']/svg:path", refID))[[1]];
style = updateStyleUpward(refPathNode);
style = updateStyle(style, updateStyleUpward(node));
d = xmlAttrs(refPathNode)["d"];
x = xmlAttrs(node)["x"];
y = xmlAttrs(node)["y"];
} else if(xmlName(node) == "path") {
style = updateStyleUpward(node);
d = xmlAttrs(node)["d"];
x = y = 0;
} else return(NULL);
xy = as.numeric(c(x, y));
names(d) = NULL;
names(xy) = NULL;
return(list(style = style, d = d, xy = xy));
}
# Flatten nodes
# <g>
# <use />
# <use />
# <use />
# </g>
#
# =====>
#
# <use />
# <use />
# <use />
expandNode = function(node)
{
children = xmlChildren(node);
res = if(!length(children)) node else children;
return(res);
}
nodes = unlist(xmlSApply(pathRoot, expandNode));
names(nodes) = NULL;
paths = lapply(nodes, parseNode);
path.is.null = sapply(paths, is.null);
paths[path.is.null] = NULL;
if(!length(paths)) stop("Unknown child node of '/svg/g'");
return(paths);
}
#' Convert a sequence of SVG files to SWF file
#'
#' Given the file names of a sequence of SVG files, this function could
#' convert them into a Flash file (.swf).
#'
#' This function uses the XML package in R and a subset of librsvg
#' (\url{http://librsvg.sourceforge.net/}) to parse the SVG file, and
#' uses the Ming library (\url{http://www.libming.org/}) to
#' implement the conversion. Currently this function supports SVG files
#' created by \code{\link[grDevices]{svg}()} in the \pkg{grDevices}
#' package, and \code{\link[Cairo]{CairoSVG}()} in the
#' \pkg{Cairo} package.
#' @param input the file names of the SVG files to be converted
#' @param output the name of the output SWF file
#' @param bgColor background color of the output SWF file
#' @param interval the time interval (in seconds) between animation frames
#' @return The name of the generated SWF file if successful.
#' @export
#' @author Yixuan Qiu <\email{yixuan.qiu@@cos.name}>
#' @examples \dontrun{
#' if(capabilities("cairo")) {
#' olddir = setwd(tempdir())
#' svg("Rplot%03d.svg", onefile = FALSE)
#' set.seed(123)
#' x = rnorm(5)
#' y = rnorm(5)
#' for(i in 1:100) {
#' plot(x <- x + 0.1 * rnorm(5), y <- y + 0.1 * rnorm(5),
#' xlim = c(-3, 3), ylim = c(-3, 3), col = "steelblue",
#' pch = 16, cex = 2, xlab = "x", ylab = "y")
#' }
#' dev.off()
#' output = svg2swf(sprintf("Rplot%03d.svg", 1:100), interval = 0.1)
#' swf2html(output)
#' setwd(olddir)
#' }
#' }
#'
svg2swf = function(input, output = "movie.swf", bgColor = "white",
interval = 1) {
# Use XML package
if(!require(XML))
stop("svg2swf() requires XML package");
if(!is.character(input))
stop("'input' must be a character vector naming the input SVG files");
bg = col2rgb(bgColor, alpha = FALSE);
bg = as.integer(bg);
if(!all(file.exists(input))) stop("one or more input files do not exist");
filesData = lapply(input, parseSVG);
firstFile = xmlParse(input[1]);
size = xmlAttrs(xmlRoot(firstFile))["viewBox"];
size = as.numeric(unlist(strsplit(size, " ")));
outfile = normalizePath(output, mustWork = FALSE);
.Call("svg2swf", filesData, outfile, size,
bg, as.numeric(interval), PACKAGE = "R2SWF");
message("SWF file created at ", outfile);
invisible(output);
}
|
task_table = c(
"54" = "Hepatitis",
"37" = "Diabetes",
"31" = "German Credit",
"4534" = "Analcat Halloffame",
"spam" = "Spam",
"168337" = "Guillermo",
"7592" = "Adult",
"168335" = "MiniBooNE",
"albert" = "Albert",
"359994" = "SF Police Incidents")
learner_table = c(
cboost1 = "CWB (no binning)",
cboost_bin1 = "CWB (binning)",
cboost4 = "CWB Cosine Annealing (no binning)",
cboost_bin4 = "CWB Cosine Annealing (binning)",
cboost3 = "ACWB (no binning)",
cboost_bin3 = "ACWB (binning)",
cboost2 = "hCWB (no binning)",
cboost_bin2 = "hCWB (binning)",
ranger = "Random forest",
xgboost = "Boosted trees",
gamboost = "CWB (mboost)",
interpretML = "interpretML")
extractStringBetween = function(str, left, right) {
tmp = sapply(strsplit(str, left), function(x) x[2])
sapply(strsplit(tmp, right), function(x) x[1])
}
getTaskFromFile = function(file_name) {
tsks = extractStringBetween(file_name, "-task", "-classif")
unname(task_table[sapply(tsks, function(ts) which(ts == names(task_table)))])
}
getLearnerFromFile = function(file_name) {
lrns = extractStringBetween(file_name, "-classif_lrn_", "[.]Rda")
lrns_idx = sapply(lrns, function(l) which(l == names(learner_table)))
unname(learner_table[lrns_idx])
}
extractBMRData = function(file_name) {
lapply(file_name, function(file) {
load(file)
tmp = bmr_res[[3]]
idx_select = sapply(
c("classif.auc", "classif.ce", "classif.bbrier", "time_train", "time_predict", "time_both", "n_evals"),
function(m) which(m == names(tmp)))
tmp = tmp[, idx_select]
tmp$task = getTaskFromFile(file)
tmp$learner = getLearnerFromFile(file)
return(tmp)
})
}
base_dir = "~/repos/compboost/benchmark/mlr-bmr/"
files = list.files(paste0(base_dir, "res-results"), full.names = TRUE)
#getTaskFromFile(files)
#getLearnerFromFile(files)
df_bmr = do.call(rbind, extractBMRData(files))
df_bmr$time_per_model = df_bmr$time_train / df_bmr$n_evals
#save(df_bmr, file = paste0(base_dir, "df_bmr.Rda"))
#load("bmr-aggr/df_bmr.Rda")
if (FALSE) {
library(ggplot2)
library(dplyr)
df_bmr %>%
group_by(learner, task) %>%
summarize(med = median(classif.auc[1:3]), sd = sd(classif.auc[1:3]))
summarize(med = median(classif.auc), sd = sd(classif.auc))
ggplot(df_bmr, aes(x = learner, y = classif.auc, color = learner, fill = learner)) +
geom_boxplot(alpha = 0.2) +
facet_wrap(. ~ task, ncol = 3, scales = "free")
}
| /src/summarize-results.R | no_license | schalkdaniel/cacb-benchmark | R | false | false | 2,456 | r | task_table = c(
"54" = "Hepatitis",
"37" = "Diabetes",
"31" = "German Credit",
"4534" = "Analcat Halloffame",
"spam" = "Spam",
"168337" = "Guillermo",
"7592" = "Adult",
"168335" = "MiniBooNE",
"albert" = "Albert",
"359994" = "SF Police Incidents")
learner_table = c(
cboost1 = "CWB (no binning)",
cboost_bin1 = "CWB (binning)",
cboost4 = "CWB Cosine Annealing (no binning)",
cboost_bin4 = "CWB Cosine Annealing (binning)",
cboost3 = "ACWB (no binning)",
cboost_bin3 = "ACWB (binning)",
cboost2 = "hCWB (no binning)",
cboost_bin2 = "hCWB (binning)",
ranger = "Random forest",
xgboost = "Boosted trees",
gamboost = "CWB (mboost)",
interpretML = "interpretML")
extractStringBetween = function(str, left, right) {
tmp = sapply(strsplit(str, left), function(x) x[2])
sapply(strsplit(tmp, right), function(x) x[1])
}
getTaskFromFile = function(file_name) {
tsks = extractStringBetween(file_name, "-task", "-classif")
unname(task_table[sapply(tsks, function(ts) which(ts == names(task_table)))])
}
getLearnerFromFile = function(file_name) {
lrns = extractStringBetween(file_name, "-classif_lrn_", "[.]Rda")
lrns_idx = sapply(lrns, function(l) which(l == names(learner_table)))
unname(learner_table[lrns_idx])
}
extractBMRData = function(file_name) {
lapply(file_name, function(file) {
load(file)
tmp = bmr_res[[3]]
idx_select = sapply(
c("classif.auc", "classif.ce", "classif.bbrier", "time_train", "time_predict", "time_both", "n_evals"),
function(m) which(m == names(tmp)))
tmp = tmp[, idx_select]
tmp$task = getTaskFromFile(file)
tmp$learner = getLearnerFromFile(file)
return(tmp)
})
}
base_dir = "~/repos/compboost/benchmark/mlr-bmr/"
files = list.files(paste0(base_dir, "res-results"), full.names = TRUE)
#getTaskFromFile(files)
#getLearnerFromFile(files)
df_bmr = do.call(rbind, extractBMRData(files))
df_bmr$time_per_model = df_bmr$time_train / df_bmr$n_evals
#save(df_bmr, file = paste0(base_dir, "df_bmr.Rda"))
#load("bmr-aggr/df_bmr.Rda")
if (FALSE) {
library(ggplot2)
library(dplyr)
df_bmr %>%
group_by(learner, task) %>%
summarize(med = median(classif.auc[1:3]), sd = sd(classif.auc[1:3]))
summarize(med = median(classif.auc), sd = sd(classif.auc))
ggplot(df_bmr, aes(x = learner, y = classif.auc, color = learner, fill = learner)) +
geom_boxplot(alpha = 0.2) +
facet_wrap(. ~ task, ncol = 3, scales = "free")
}
|
library(tidyverse)
library(here)
library(sf)
# The root of the data directory
data_dir = readLines(here("data_dir.txt"), n=1)
# Convenience functions, including function datadir() to prepend data directory to a relative path
source(here("scripts/convenience_functions.R"))
locs = read_csv(datadir("grupenhoff_plot_data_orig/HolyGrail_trt_utm.csv"))
locs = locs %>%
mutate(utmzone = str_sub(`UTM Zone`,1,2))
locs_utm11 = locs %>%
filter(X > 435449.1) %>%
filter(!(is.na(X) | is.na(Y)))
locs_utm10 = locs %>%
filter(X < 435449.1)
locs11 = st_as_sf(locs_utm11,coords=c("X","Y"), crs="32611")
st_crs(locs11) = "32611"
locs10 = st_as_sf(locs_utm10,coords=c("X","Y"), crs="32610")
st_crs(locs10) = "32610"
locs = bind_rows(locs11,locs10)
| /scripts/map_plots.R | no_license | youngdjn/fuels-ai | R | false | false | 748 | r | library(tidyverse)
library(here)
library(sf)
# The root of the data directory
data_dir = readLines(here("data_dir.txt"), n=1)
# Convenience functions, including function datadir() to prepend data directory to a relative path
source(here("scripts/convenience_functions.R"))
locs = read_csv(datadir("grupenhoff_plot_data_orig/HolyGrail_trt_utm.csv"))
locs = locs %>%
mutate(utmzone = str_sub(`UTM Zone`,1,2))
locs_utm11 = locs %>%
filter(X > 435449.1) %>%
filter(!(is.na(X) | is.na(Y)))
locs_utm10 = locs %>%
filter(X < 435449.1)
locs11 = st_as_sf(locs_utm11,coords=c("X","Y"), crs="32611")
st_crs(locs11) = "32611"
locs10 = st_as_sf(locs_utm10,coords=c("X","Y"), crs="32610")
st_crs(locs10) = "32610"
locs = bind_rows(locs11,locs10)
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.88387452106224e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615844541-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,232 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.88387452106224e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.9700945594356e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
FRESA.Model <-
function(formula,data,OptType=c("Binary","Residual"),pvalue=0.05,filter.p.value=0.10,loops=32,maxTrainModelSize=20,elimination.bootstrap.steps=100,bootstrap.steps=100,print=FALSE,plots=FALSE,CVfolds=1,repeats=1,nk=0,categorizationType=c("Raw","Categorical","ZCategorical","RawZCategorical","RawTail","RawZTail","Tail","RawRaw"),cateGroups=c(0.1,0.9),raw.dataFrame=NULL,var.description=NULL,testType=c("zIDI","zNRI","Binomial","Wilcox","tStudent","Ftest"),lambda="lambda.1se",equivalent=FALSE,bswimsCycles=20,usrFitFun=NULL)
{
a = as.numeric(Sys.time());
set.seed(a);
categorizationType <- match.arg(categorizationType);
cl <- match.call();
cvObject <- NULL;
univariate <- NULL;
eq=NULL;
bagg=NULL;
type = "LM";
if (class(formula)=="character")
{
formula <- formula(formula);
}
if (class(formula)=="formula")
{
featureSize = ncol(data)-1;
OptType <- match.arg(OptType)
varlist <- attr(terms(formula),"variables")
dependent <- as.character(varlist[[2]])
timeOutcome = NA;
Outcome = NA;
type = "LM";
if (length(dependent)==3)
{
type = "COX"
timeOutcome = dependent[2];
Outcome = dependent[3];
dependentout = paste(dependent[1],"(",dependent[2],",",dependent[3],")");
}
else
{
Outcome = dependent[1];
dependentout = Outcome;
}
setIntersect <- attr(terms(formula),"intercept")
if (setIntersect == 0)
{
covariates = "0";
}
else
{
covariates = "1";
}
termslist <- attr(terms(formula),"term.labels");
acovariates <- covariates[1];
if (length(termslist)>0)
{
for (i in 1:length(termslist))
{
covariates <- paste(covariates,"+",termslist[i]);
acovariates <- append(acovariates,termslist[i]);
}
}
startOffset = length(termslist);
variables <- vector();
descrip <- vector();
pnames <- as.vector(colnames(data));
for (i in 1:length(pnames))
{
detected = 0;
if (length(termslist)>0)
{
for (j in 1:length(termslist))
{
if (termslist[j] == pnames[i]) detected = 1;
}
}
if (Outcome == pnames[i]) detected = 1;
if (!is.na(timeOutcome) )
{
if (timeOutcome == pnames[i]) detected = 1;
}
if (detected == 0)
{
variables <- append(variables,pnames[i]);
if (!is.null(var.description))
{
descrip <- append(descrip,var.description[i]);
}
}
}
if (!is.null(var.description))
{
variables <- cbind(variables,descrip);
}
else
{
variables <- cbind(variables,variables);
}
colnames(variables) <- c("Var","Description");
if (CVfolds>nrow(data))
{
cat("Setting to LOO CV\n");
CVfolds=nrow(data);
}
trainFraction <- 1.0-1.0/CVfolds;
trainRepetition <- repeats*CVfolds;
fraction = 1.0000; # will be working with 1.0000 fraction of the samples for bootstrap training
varMax = nrow(variables);
baseModel <- paste(dependentout,"~",covariates);
cvObject = NULL;
reducedModel = NULL;
bootstrappedModel = NULL;
UpdatedModel = NULL;
filter.z.value <- abs(qnorm(filter.p.value))
cutpvalue <- 3.0*filter.p.value
if (cutpvalue > 0.45) cutpvalue=0.45;
selectionType = match.arg(testType);
testType = match.arg(testType);
theScores <- names(table(data[,Outcome]))
if (((length(theScores)>2)||(min(data[,Outcome])<0))&&(OptType == "Binary"))
{
OptType = "Residual";
}
if (categorizationType=="RawRaw")
{
rownames(variables) <- variables[,1];
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType="Raw",type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Regression",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
univariate <- unirank$orderframe;
featureSize <- nrow(univariate);
unitPvalues <- (1.0-pnorm(univariate$ZUni));
names(unitPvalues) <- univariate$Name;
adjPvalues <- p.adjust(unitPvalues,"BH");
variables <- variables[names(adjPvalues[adjPvalues <= 2*filter.p.value]),];
}
if (OptType == "Binary")
{
if (length(dependent)==1)
{
type = "LOGIT";
}
# elimination.pValue <- pvalue; # To test if the variable is part of the model
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="zIDI",cateGroups,raw.dataFrame,description="Description",uniType="Binary",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome);
univariate <- unirank$orderframe;
featureSize <- nrow(univariate);
unitPvalues <- (1.0-pnorm(univariate$ZUni));
names(unitPvalues) <- univariate$Name;
adjPvalues <- p.adjust(unitPvalues,"BH");
varMax <- sum(univariate$ZUni >= filter.z.value);
if (categorizationType == "Raw")
{
gadjPvalues <- adjPvalues[adjPvalues < 2*filter.p.value]
noncornames <- correlated_Remove(data,names(gadjPvalues),thr=0.99);
if (length(noncornames) > 1) featureSize <- featureSize*length(noncornames)/length(gadjPvalues);
# cat(length(noncornames),":",length(gadjPvalues),":",length(noncornames)/length(gadjPvalues),"\n");
}
pvarMax <- sum(adjPvalues < 2*filter.p.value);
sizeM <- min(c(pvarMax,varMax));
if (sizeM < 5) sizeM = min(c(5,nrow(univariate)));
if (varMax > nrow(univariate)) varMax = nrow(univariate);
if (varMax < 5) varMax = min(c(5,nrow(univariate)));
redlist <- adjPvalues < cutpvalue;
totlist <- min(sum(1*redlist),100);
cat("Unadjusted size:",sum(univariate$ZUni >= filter.z.value)," Adjusted Size:",pvarMax," Cut size:",sum(1*redlist),"\n")
if (totlist<10)
{
redlist <- c(1:min(10,nrow(univariate)))
totlist <- length(totlist);
}
cat("\n Z: ",filter.z.value,", Features to test: ",sizeM,",Adjust Size:",featureSize,"\n");
shortUniv <- univariate[redlist,]
if (CVfolds>1)
{
if (categorizationType!="RawRaw")
{
rownames(variables) <- variables[,1];
# unirank$variableList <- variables[unique(as.character(univariate[redlist,2])),]
}
cvObject <- crossValidationFeatureSelection_Bin(sizeM,fraction,c(pvalue,filter.p.value),loops,acovariates,Outcome,timeOutcome,NULL,data,maxTrainModelSize,type,selectionType,startOffset,elimination.bootstrap.steps,trainFraction,trainRepetition,bootstrap.steps,nk,unirank,print=print,plots=plots,lambda=lambda,equivalent=equivalent,bswimsCycles=bswimsCycles,usrFitFun,featureSize=featureSize);
firstModel <- cvObject$forwardSelection;
UpdatedModel <- cvObject$updateforwardSelection;
reducedModel <- cvObject$BSWiMS;
bootstrappedModel <- cvObject$FullBSWiMS.bootstrapped;
BSWiMS.models <- cvObject$BSWiMS.models;
}
else
{
BSWiMS.models <- BSWiMS.model(formula=formula,data=data,type=type,testType=selectionType,pvalue=pvalue,variableList=shortUniv,size=sizeM,loops=loops,elimination.bootstrap.steps=bootstrap.steps,fraction=1.0,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize,NumberofRepeats=repeats);
firstModel <- BSWiMS.models$forward.model;
UpdatedModel <- BSWiMS.models$update.model;
reducedModel <- BSWiMS.models$BSWiMS.model;
bootstrappedModel <- reducedModel$bootCV;
}
}
if (OptType == "Residual")
{
# elimination.pValue <- pvalue; # To test if the variable is part of the model
if (testType=="zIDI")
{
if ((testType=="zIDI")&&(length(theScores)>10))
{
warning("Switching to Regresion, More than 10 scores");
testType = "Ftest";
}
else
{
cat("Doing a Ordinal Fit with zIDI Selection\n");
cat("Ordinal Fit will be stored in BSWiMS.models$oridinalModels\n");
cat("Use predict(BSWiMS.models$oridinalModels,testSet) to get the ordinal prediction on a new dataset \n");
}
}
if (length(dependent)==1)
{
if ((length(theScores)>2)||(min(data[,Outcome])<0))
{
type = "LM";
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Regression",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
if ((length(theScores)<=10)&&(testType=="zIDI"))
{
type = "LOGIT";
}
}
else
{
if (type == "LM") type = "LOGIT";
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Binary",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
}
}
else
{
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Binary",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
}
univariate <- unirank$orderframe;
featureSize <- nrow(univariate);
unitPvalues <- (1.0-pnorm(univariate$ZUni));
names(unitPvalues) <- univariate$Name;
adjPvalues <- p.adjust(unitPvalues,"BH");
varMax <- sum(univariate$ZUni >= filter.z.value);
if (categorizationType == "Raw")
{
gadjPvalues <- adjPvalues[adjPvalues < 2*filter.p.value]
noncornames <- correlated_Remove(data,names(gadjPvalues),thr=0.99);
if (length(noncornames) > 1) featureSize <- featureSize*length(noncornames)/length(gadjPvalues);
# cat(length(noncornames),":",length(gadjPvalues),":",length(noncornames)/length(gadjPvalues),"\n");
}
pvarMax <- sum(adjPvalues < 2*filter.p.value);
sizeM <- min(c(pvarMax,varMax));
if (sizeM < 5) sizeM = min(c(5,nrow(univariate)));
if (varMax > nrow(univariate)) varMax = nrow(univariate);
if (varMax < 5) varMax = min(c(5,nrow(univariate)));
bootstrappedModel = NULL;
redlist <- adjPvalues < cutpvalue;
totlist <- min(sum(1*redlist),100);
cat("Features to test:",sizeM," Adjusted Size:",featureSize,"\n");
if (totlist<10)
{
redlist <- c(1:min(10,nrow(univariate)))
totlist <- length(totlist);
}
cat("\n Z: ",filter.z.value," Var Max: ",featureSize,"FitType: ",type," Test Type: ",testType,"\n");
shortUniv <- univariate[redlist,]
if (CVfolds>1)
{
if (categorizationType != "RawRaw")
{
rownames(variables) <- variables[,1];
# unirank$variableList <- variables[unique(as.character(univariate[redlist,2])),]
}
cvObject <- crossValidationFeatureSelection_Res(size=sizeM,fraction=fraction,pvalue=c(pvalue,filter.p.value),loops=loops,covariates=acovariates,Outcome=Outcome,timeOutcome=timeOutcome,variableList=unirank$variableList,data=data,maxTrainModelSize=maxTrainModelSize,type=type,testType=testType,startOffset=startOffset,elimination.bootstrap.steps=elimination.bootstrap.steps,trainFraction=trainFraction,trainRepetition=trainRepetition,setIntersect=setIntersect,unirank=unirank,print=print,plots=plots,lambda=lambda,equivalent=equivalent,bswimsCycles=bswimsCycles,usrFitFun=usrFitFun,featureSize=featureSize);
firstModel <- cvObject$forwardSelection;
UpdatedModel <- cvObject$updatedforwardModel;
reducedModel <- cvObject$BSWiMS;
bootstrappedModel <- cvObject$BSWiMS$bootCV;
BSWiMS.models <- cvObject$BSWiMS.models;
}
else
{
BSWiMS.models <- BSWiMS.model(formula=formula,data=data,type=type,testType=testType,pvalue=pvalue,variableList=shortUniv,size=sizeM,loops=loops,elimination.bootstrap.steps=bootstrap.steps,fraction=1.0,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize,NumberofRepeats=repeats);
firstModel <- BSWiMS.models$forward.model;
UpdatedModel <- BSWiMS.models$update.model;
reducedModel <- BSWiMS.models$BSWiMS.model;
bootstrappedModel <- reducedModel$bootCV;
}
}
}
else
{
cat("Expecting a formula object\n");
}
if (is.null(reducedModel))
{
result <- list(BSWiMS.model = NULL,
reducedModel = reducedModel,
univariateAnalysis=univariate,
forwardModel=firstModel,
updatedforwardModel=UpdatedModel,
bootstrappedModel=bootstrappedModel,
cvObject=cvObject,
used.variables=varMax,
# independenSize=adjsize,
call=cl);
}
else
{
eq <- NULL;
bagg <- NULL;
if ((length(reducedModel$back.model$coefficients) > 1 ) && equivalent)
{
collectFormulas <- BSWiMS.models$forward.selection.list;
bagg <- baggedModel(collectFormulas,data,type,Outcome,timeOutcome,univariate=univariate,useFreq=loops);
shortcan <- bagg$frequencyTable[(bagg$frequencyTable >= (loops*0.05))];
modeltems <- attr(terms(reducedModel$back.model),"term.labels");
eshortlist <- unique(c(names(shortcan),str_replace_all(modeltems,":","\\*")));
eshortlist <- eshortlist[!is.na(eshortlist)];
if (length(eshortlist)>0)
{
nameslist <- c(all.vars(BSWiMS.models$bagging$bagged.model$formula),as.character(univariate[eshortlist,2]));
nameslist <- unique(nameslist[!is.na(nameslist)]);
if (categorizationType != "RawRaw")
{
eqdata <- data[,nameslist];
}
else
{
eqdata <- data;
}
eq <- reportEquivalentVariables(reducedModel$back.model,pvalue = 0.25*pvalue,
data=eqdata,
variableList=cbind(eshortlist,eshortlist),
Outcome = Outcome,
timeOutcome=timeOutcome,
type = type,osize=featureSize,
method="BH");
}
}
result <- list(BSWiMS.model = BSWiMS.models$bagging$bagged.model,
reducedModel = reducedModel,
univariateAnalysis=univariate,
forwardModel=firstModel,
updatedforwardModel=UpdatedModel,
bootstrappedModel=bootstrappedModel,
cvObject=cvObject,
used.variables=varMax,
bagging=bagg,
eBSWiMS.model=eq,
BSWiMS.models=BSWiMS.models,
call=cl
);
}
return (result);
}
| /fuzzedpackages/FRESA.CAD/R/FRESA.Model.R | no_license | akhikolla/testpackages | R | false | false | 13,621 | r | FRESA.Model <-
function(formula,data,OptType=c("Binary","Residual"),pvalue=0.05,filter.p.value=0.10,loops=32,maxTrainModelSize=20,elimination.bootstrap.steps=100,bootstrap.steps=100,print=FALSE,plots=FALSE,CVfolds=1,repeats=1,nk=0,categorizationType=c("Raw","Categorical","ZCategorical","RawZCategorical","RawTail","RawZTail","Tail","RawRaw"),cateGroups=c(0.1,0.9),raw.dataFrame=NULL,var.description=NULL,testType=c("zIDI","zNRI","Binomial","Wilcox","tStudent","Ftest"),lambda="lambda.1se",equivalent=FALSE,bswimsCycles=20,usrFitFun=NULL)
{
a = as.numeric(Sys.time());
set.seed(a);
categorizationType <- match.arg(categorizationType);
cl <- match.call();
cvObject <- NULL;
univariate <- NULL;
eq=NULL;
bagg=NULL;
type = "LM";
if (class(formula)=="character")
{
formula <- formula(formula);
}
if (class(formula)=="formula")
{
featureSize = ncol(data)-1;
OptType <- match.arg(OptType)
varlist <- attr(terms(formula),"variables")
dependent <- as.character(varlist[[2]])
timeOutcome = NA;
Outcome = NA;
type = "LM";
if (length(dependent)==3)
{
type = "COX"
timeOutcome = dependent[2];
Outcome = dependent[3];
dependentout = paste(dependent[1],"(",dependent[2],",",dependent[3],")");
}
else
{
Outcome = dependent[1];
dependentout = Outcome;
}
setIntersect <- attr(terms(formula),"intercept")
if (setIntersect == 0)
{
covariates = "0";
}
else
{
covariates = "1";
}
termslist <- attr(terms(formula),"term.labels");
acovariates <- covariates[1];
if (length(termslist)>0)
{
for (i in 1:length(termslist))
{
covariates <- paste(covariates,"+",termslist[i]);
acovariates <- append(acovariates,termslist[i]);
}
}
startOffset = length(termslist);
variables <- vector();
descrip <- vector();
pnames <- as.vector(colnames(data));
for (i in 1:length(pnames))
{
detected = 0;
if (length(termslist)>0)
{
for (j in 1:length(termslist))
{
if (termslist[j] == pnames[i]) detected = 1;
}
}
if (Outcome == pnames[i]) detected = 1;
if (!is.na(timeOutcome) )
{
if (timeOutcome == pnames[i]) detected = 1;
}
if (detected == 0)
{
variables <- append(variables,pnames[i]);
if (!is.null(var.description))
{
descrip <- append(descrip,var.description[i]);
}
}
}
if (!is.null(var.description))
{
variables <- cbind(variables,descrip);
}
else
{
variables <- cbind(variables,variables);
}
colnames(variables) <- c("Var","Description");
if (CVfolds>nrow(data))
{
cat("Setting to LOO CV\n");
CVfolds=nrow(data);
}
trainFraction <- 1.0-1.0/CVfolds;
trainRepetition <- repeats*CVfolds;
fraction = 1.0000; # will be working with 1.0000 fraction of the samples for bootstrap training
varMax = nrow(variables);
baseModel <- paste(dependentout,"~",covariates);
cvObject = NULL;
reducedModel = NULL;
bootstrappedModel = NULL;
UpdatedModel = NULL;
filter.z.value <- abs(qnorm(filter.p.value))
cutpvalue <- 3.0*filter.p.value
if (cutpvalue > 0.45) cutpvalue=0.45;
selectionType = match.arg(testType);
testType = match.arg(testType);
theScores <- names(table(data[,Outcome]))
if (((length(theScores)>2)||(min(data[,Outcome])<0))&&(OptType == "Binary"))
{
OptType = "Residual";
}
if (categorizationType=="RawRaw")
{
rownames(variables) <- variables[,1];
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType="Raw",type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Regression",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
univariate <- unirank$orderframe;
featureSize <- nrow(univariate);
unitPvalues <- (1.0-pnorm(univariate$ZUni));
names(unitPvalues) <- univariate$Name;
adjPvalues <- p.adjust(unitPvalues,"BH");
variables <- variables[names(adjPvalues[adjPvalues <= 2*filter.p.value]),];
}
if (OptType == "Binary")
{
if (length(dependent)==1)
{
type = "LOGIT";
}
# elimination.pValue <- pvalue; # To test if the variable is part of the model
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="zIDI",cateGroups,raw.dataFrame,description="Description",uniType="Binary",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome);
univariate <- unirank$orderframe;
featureSize <- nrow(univariate);
unitPvalues <- (1.0-pnorm(univariate$ZUni));
names(unitPvalues) <- univariate$Name;
adjPvalues <- p.adjust(unitPvalues,"BH");
varMax <- sum(univariate$ZUni >= filter.z.value);
if (categorizationType == "Raw")
{
gadjPvalues <- adjPvalues[adjPvalues < 2*filter.p.value]
noncornames <- correlated_Remove(data,names(gadjPvalues),thr=0.99);
if (length(noncornames) > 1) featureSize <- featureSize*length(noncornames)/length(gadjPvalues);
# cat(length(noncornames),":",length(gadjPvalues),":",length(noncornames)/length(gadjPvalues),"\n");
}
pvarMax <- sum(adjPvalues < 2*filter.p.value);
sizeM <- min(c(pvarMax,varMax));
if (sizeM < 5) sizeM = min(c(5,nrow(univariate)));
if (varMax > nrow(univariate)) varMax = nrow(univariate);
if (varMax < 5) varMax = min(c(5,nrow(univariate)));
redlist <- adjPvalues < cutpvalue;
totlist <- min(sum(1*redlist),100);
cat("Unadjusted size:",sum(univariate$ZUni >= filter.z.value)," Adjusted Size:",pvarMax," Cut size:",sum(1*redlist),"\n")
if (totlist<10)
{
redlist <- c(1:min(10,nrow(univariate)))
totlist <- length(totlist);
}
cat("\n Z: ",filter.z.value,", Features to test: ",sizeM,",Adjust Size:",featureSize,"\n");
shortUniv <- univariate[redlist,]
if (CVfolds>1)
{
if (categorizationType!="RawRaw")
{
rownames(variables) <- variables[,1];
# unirank$variableList <- variables[unique(as.character(univariate[redlist,2])),]
}
cvObject <- crossValidationFeatureSelection_Bin(sizeM,fraction,c(pvalue,filter.p.value),loops,acovariates,Outcome,timeOutcome,NULL,data,maxTrainModelSize,type,selectionType,startOffset,elimination.bootstrap.steps,trainFraction,trainRepetition,bootstrap.steps,nk,unirank,print=print,plots=plots,lambda=lambda,equivalent=equivalent,bswimsCycles=bswimsCycles,usrFitFun,featureSize=featureSize);
firstModel <- cvObject$forwardSelection;
UpdatedModel <- cvObject$updateforwardSelection;
reducedModel <- cvObject$BSWiMS;
bootstrappedModel <- cvObject$FullBSWiMS.bootstrapped;
BSWiMS.models <- cvObject$BSWiMS.models;
}
else
{
BSWiMS.models <- BSWiMS.model(formula=formula,data=data,type=type,testType=selectionType,pvalue=pvalue,variableList=shortUniv,size=sizeM,loops=loops,elimination.bootstrap.steps=bootstrap.steps,fraction=1.0,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize,NumberofRepeats=repeats);
firstModel <- BSWiMS.models$forward.model;
UpdatedModel <- BSWiMS.models$update.model;
reducedModel <- BSWiMS.models$BSWiMS.model;
bootstrappedModel <- reducedModel$bootCV;
}
}
if (OptType == "Residual")
{
# elimination.pValue <- pvalue; # To test if the variable is part of the model
if (testType=="zIDI")
{
if ((testType=="zIDI")&&(length(theScores)>10))
{
warning("Switching to Regresion, More than 10 scores");
testType = "Ftest";
}
else
{
cat("Doing a Ordinal Fit with zIDI Selection\n");
cat("Ordinal Fit will be stored in BSWiMS.models$oridinalModels\n");
cat("Use predict(BSWiMS.models$oridinalModels,testSet) to get the ordinal prediction on a new dataset \n");
}
}
if (length(dependent)==1)
{
if ((length(theScores)>2)||(min(data[,Outcome])<0))
{
type = "LM";
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Regression",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
if ((length(theScores)<=10)&&(testType=="zIDI"))
{
type = "LOGIT";
}
}
else
{
if (type == "LM") type = "LOGIT";
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Binary",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
}
}
else
{
unirank <- uniRankVar(variables,baseModel,Outcome,data,categorizationType,type,rankingTest="Ztest",cateGroups,raw.dataFrame,description="Description",uniType="Binary",FullAnalysis=FALSE,acovariates=acovariates,timeOutcome=timeOutcome)
}
univariate <- unirank$orderframe;
featureSize <- nrow(univariate);
unitPvalues <- (1.0-pnorm(univariate$ZUni));
names(unitPvalues) <- univariate$Name;
adjPvalues <- p.adjust(unitPvalues,"BH");
varMax <- sum(univariate$ZUni >= filter.z.value);
if (categorizationType == "Raw")
{
gadjPvalues <- adjPvalues[adjPvalues < 2*filter.p.value]
noncornames <- correlated_Remove(data,names(gadjPvalues),thr=0.99);
if (length(noncornames) > 1) featureSize <- featureSize*length(noncornames)/length(gadjPvalues);
# cat(length(noncornames),":",length(gadjPvalues),":",length(noncornames)/length(gadjPvalues),"\n");
}
pvarMax <- sum(adjPvalues < 2*filter.p.value);
sizeM <- min(c(pvarMax,varMax));
if (sizeM < 5) sizeM = min(c(5,nrow(univariate)));
if (varMax > nrow(univariate)) varMax = nrow(univariate);
if (varMax < 5) varMax = min(c(5,nrow(univariate)));
bootstrappedModel = NULL;
redlist <- adjPvalues < cutpvalue;
totlist <- min(sum(1*redlist),100);
cat("Features to test:",sizeM," Adjusted Size:",featureSize,"\n");
if (totlist<10)
{
redlist <- c(1:min(10,nrow(univariate)))
totlist <- length(totlist);
}
cat("\n Z: ",filter.z.value," Var Max: ",featureSize,"FitType: ",type," Test Type: ",testType,"\n");
shortUniv <- univariate[redlist,]
if (CVfolds>1)
{
if (categorizationType != "RawRaw")
{
rownames(variables) <- variables[,1];
# unirank$variableList <- variables[unique(as.character(univariate[redlist,2])),]
}
cvObject <- crossValidationFeatureSelection_Res(size=sizeM,fraction=fraction,pvalue=c(pvalue,filter.p.value),loops=loops,covariates=acovariates,Outcome=Outcome,timeOutcome=timeOutcome,variableList=unirank$variableList,data=data,maxTrainModelSize=maxTrainModelSize,type=type,testType=testType,startOffset=startOffset,elimination.bootstrap.steps=elimination.bootstrap.steps,trainFraction=trainFraction,trainRepetition=trainRepetition,setIntersect=setIntersect,unirank=unirank,print=print,plots=plots,lambda=lambda,equivalent=equivalent,bswimsCycles=bswimsCycles,usrFitFun=usrFitFun,featureSize=featureSize);
firstModel <- cvObject$forwardSelection;
UpdatedModel <- cvObject$updatedforwardModel;
reducedModel <- cvObject$BSWiMS;
bootstrappedModel <- cvObject$BSWiMS$bootCV;
BSWiMS.models <- cvObject$BSWiMS.models;
}
else
{
BSWiMS.models <- BSWiMS.model(formula=formula,data=data,type=type,testType=testType,pvalue=pvalue,variableList=shortUniv,size=sizeM,loops=loops,elimination.bootstrap.steps=bootstrap.steps,fraction=1.0,maxTrainModelSize=maxTrainModelSize,maxCycles=bswimsCycles,print=print,plots=plots,featureSize=featureSize,NumberofRepeats=repeats);
firstModel <- BSWiMS.models$forward.model;
UpdatedModel <- BSWiMS.models$update.model;
reducedModel <- BSWiMS.models$BSWiMS.model;
bootstrappedModel <- reducedModel$bootCV;
}
}
}
else
{
cat("Expecting a formula object\n");
}
if (is.null(reducedModel))
{
result <- list(BSWiMS.model = NULL,
reducedModel = reducedModel,
univariateAnalysis=univariate,
forwardModel=firstModel,
updatedforwardModel=UpdatedModel,
bootstrappedModel=bootstrappedModel,
cvObject=cvObject,
used.variables=varMax,
# independenSize=adjsize,
call=cl);
}
else
{
eq <- NULL;
bagg <- NULL;
if ((length(reducedModel$back.model$coefficients) > 1 ) && equivalent)
{
collectFormulas <- BSWiMS.models$forward.selection.list;
bagg <- baggedModel(collectFormulas,data,type,Outcome,timeOutcome,univariate=univariate,useFreq=loops);
shortcan <- bagg$frequencyTable[(bagg$frequencyTable >= (loops*0.05))];
modeltems <- attr(terms(reducedModel$back.model),"term.labels");
eshortlist <- unique(c(names(shortcan),str_replace_all(modeltems,":","\\*")));
eshortlist <- eshortlist[!is.na(eshortlist)];
if (length(eshortlist)>0)
{
nameslist <- c(all.vars(BSWiMS.models$bagging$bagged.model$formula),as.character(univariate[eshortlist,2]));
nameslist <- unique(nameslist[!is.na(nameslist)]);
if (categorizationType != "RawRaw")
{
eqdata <- data[,nameslist];
}
else
{
eqdata <- data;
}
eq <- reportEquivalentVariables(reducedModel$back.model,pvalue = 0.25*pvalue,
data=eqdata,
variableList=cbind(eshortlist,eshortlist),
Outcome = Outcome,
timeOutcome=timeOutcome,
type = type,osize=featureSize,
method="BH");
}
}
result <- list(BSWiMS.model = BSWiMS.models$bagging$bagged.model,
reducedModel = reducedModel,
univariateAnalysis=univariate,
forwardModel=firstModel,
updatedforwardModel=UpdatedModel,
bootstrappedModel=bootstrappedModel,
cvObject=cvObject,
used.variables=varMax,
bagging=bagg,
eBSWiMS.model=eq,
BSWiMS.models=BSWiMS.models,
call=cl
);
}
return (result);
}
|
## Kevin McMorrow
setwd('Desktop')
library(rtweet)
library(httr)
library(httpuv)
library(tm)
library(wordcloud)
appname = "kevdog"
key = "MBL0H3EkRae6B9pbKN88QOZmq"
secret = "aSUxvOgwVsHuWpaPkmrn9gdTsPGsoluvBxlsRqUm60JcyaHwB6"
twitter_token = create_token(
app = appname,
consumer_key = key,
consumer_secret = secret)
q = ('country music OR folk music OR americana music') #change this
dfLA = search_tweets(q, type="recent",geocode="34.029287,-118.262078,20mi",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000) #LA
dfLA$region = 'Southwest'
dfLA$num = 1
dfNY = search_tweets(q, type="recent",geocode="40.7128,-74.0059,20mi",
token=twitter_token, include_rts = FALSE, usr=TRUE, n=5000) #NY
dfNY$region = 'Northeast'
dfNY$num = 2
dfATL = search_tweets(q, type="recent",geocode="33.7490,-84.3880,20mi",
token=twitter_token, include_rts = FALSE, usr=TRUE, n=5000) #ATL
dfATL$region = 'Southeast'
dfATL$num = 3
dfSEAT = search_tweets(q, type="recent",geocode="47.6062,-122.3321,500mi",
token=twitter_token, include_rts = FALSE, usr=TRUE, n=5000) #SEAT
dfSEAT$region = 'Northwest'
dfSEAT$num = 4
#Merge the dfs... Might be unneccesary
n_df = Reduce(function(x, y) merge(x, y, all=TRUE), list(dfLA, dfNY, dfATL, dfSEAT))
country_df = n_df
rap_df = n_df
rock_df = n_df
#Wordcloud
x = n_df$text
x = gsub("[^A-Za-z0-9 ,.:;!?]", " ", x)
x = gsub("[ ]{2,}", " ", x)
x = gsub("https", " ", x)
x = gsub('music', " ", x)
x = gsub('country', " ", x)
x = gsub('rap', " ", x)
x = gsub('tco', " ", x)
doc = Corpus(VectorSource(x))
dtm = DocumentTermMatrix(x=doc, control=list(removePunctuation=T, removeNumbers=T, tolower=T,
wordLengths=c(3,12), stopwords=T,
weighting= function(x) weightBin(x)))
dtm_mat = as.matrix(dtm)
word_freq = colSums(dtm_mat)
s = colSums(dtm_mat)
k = order(s, decreasing=T)
w = colnames(dtm_mat)[k][1:500] #change
w_mat = dtm_mat[, w]
p = scale(w_mat)
k = n_df$num
opt = par(mfrow=c(2,2))
for (j in 1:4) {
if (sum(k==j)< 4) {next}
wordcloud(words=colnames(w_mat), freq=colSums(w_mat[k == j, ]),
max.words=50, main=paste("region:", n_df$region[n_df$num == j][1]))
print(n_df$region[n_df$num == j][2])
}
#SW NE
#SE NW
#-------------------------------
#Facial recognition
nrow(country_df) #1444 rows
nrow(rock_df) #2033 rows
nrow(rap_df) #5149 rows
##creates a new df of 500 randomly chosen rows
#these will be used for the facial recognition portion
rand_countrydf = country_df[sample(nrow(country_df), 500), ]
rand_rock_df = rock_df[sample(nrow(rock_df), 500), ]
rand_rapdf = rap_df[sample(nrow(rap_df), 500), ]
#-------------------------------
#Country facial recognition:
u_vec = unique(rand_countrydf$screen_name)
length(u_vec)
udf = lookup_users(users=u_vec, token=twitter_token, tw=FALSE)
nrow(udf)
endpoint = "https://api.kairos.com/detect"
app_id = "aa1cc858"
app_key = "f073ee6c5e0154294742ff1d666796a4"
image_url = gsub("_normal", "", udf$profile_image_url)
x = data.frame(id = seq(from=1, to=nrow(udf)),
screen_name = udf$screen_name,
num_faces = rep(0, times=nrow(udf)),
gender = rep("", times=nrow(udf)),
age = rep(0, times=nrow(udf)),
maleConfidence = rep(0, times=nrow(udf)),
femaleConfidence = rep(0, times=nrow(udf)),
asian = rep(0, times=nrow(udf)),
hispanic = rep(0, times=nrow(udf)),
black = rep(0, times=nrow(udf)),
white = rep(0, times=nrow(udf)),
other = rep(0, times=nrow(udf)),
info = rep("", times=nrow(udf)),
stringsAsFactors=F)
for (j in 1:nrow(udf)) {
cat("j is", j, "\n")
json_string = sub("xxx", image_url[j], '{ "image":"xxx"}' )
m = regexpr("[A-Za-z]{3}$", image_url[j])
ext = tolower(regmatches(image_url[j], m))
ext_test = ext %in% c("jpg", "png")
if (!ext_test) {
x$info[j] = "Bad image"
next
}
s = POST(url=endpoint,
add_headers("app_id"= app_id,
"app_key"=app_key),
content_type="application/json",
body=json_string)
Sys.sleep(0.1)
if (status_code(s) != 200) {
x$info[j] = "Not_OK"
next
}
if (length(httr::content(s, as="raw")) < 300) {
x$info[j] = "API error"
next
}
w = httr::content(s, as="parsed")
x$num_faces[j] = length(w$images[[1]]$faces)
x$gender[j] = w$images[[1]]$faces[[1]]$attributes$gender$type
x$age[j] = w$images[[1]]$faces[[1]]$attributes$age
x$maleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$maleConfidence
x$femaleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$femaleConfidence
x$asian[j] = w$images[[1]]$faces[[1]]$attributes$asian
x$hispanic[j] = w$images[[1]]$faces[[1]]$attributes$hispanic
x$black[j] = w$images[[1]]$faces[[1]]$attributes$black
x$white[j] = w$images[[1]]$faces[[1]]$attributes$white
x$other[j] = w$images[[1]]$faces[[1]]$attributes$other
}
k = nchar(x$info) > 0
country_x2 = x[!k, ]
cmerge = merge(x=rand_countrydf[, c("screen_name", "text")], y=country_x2, by.x="screen_name", by.y="screen_name", all=FALSE)
write.csv(cmerge, "text_and_face.csv", row.names=F)
#----------------------------
#Rap facial recognition:
u_vec = unique(rand_rapdf$screen_name)
length(u_vec)
udf = lookup_users(users=u_vec, token=twitter_token, tw=FALSE)
nrow(udf)
endpoint = "https://api.kairos.com/detect"
app_id = "aa1cc858"
app_key = "f073ee6c5e0154294742ff1d666796a4"
image_url = gsub("_normal", "", udf$profile_image_url)
x = data.frame(id = seq(from=1, to=nrow(udf)),
screen_name = udf$screen_name,
num_faces = rep(0, times=nrow(udf)),
gender = rep("", times=nrow(udf)),
age = rep(0, times=nrow(udf)),
maleConfidence = rep(0, times=nrow(udf)),
femaleConfidence = rep(0, times=nrow(udf)),
asian = rep(0, times=nrow(udf)),
hispanic = rep(0, times=nrow(udf)),
black = rep(0, times=nrow(udf)),
white = rep(0, times=nrow(udf)),
other = rep(0, times=nrow(udf)),
info = rep("", times=nrow(udf)),
stringsAsFactors=F)
for (j in 1:nrow(udf)) {
cat("j is", j, "\n")
json_string = sub("xxx", image_url[j], '{ "image":"xxx"}' )
m = regexpr("[A-Za-z]{3}$", image_url[j])
ext = tolower(regmatches(image_url[j], m))
ext_test = ext %in% c("jpg", "png")
if (!ext_test) {
x$info[j] = "Bad image"
next
}
s = POST(url=endpoint,
add_headers("app_id"= app_id,
"app_key"=app_key),
content_type="application/json",
body=json_string)
Sys.sleep(0.1)
if (status_code(s) != 200) {
x$info[j] = "Not_OK"
next
}
if (length(httr::content(s, as="raw")) < 300) {
x$info[j] = "API error"
next
}
w = httr::content(s, as="parsed")
x$num_faces[j] = length(w$images[[1]]$faces)
x$gender[j] = w$images[[1]]$faces[[1]]$attributes$gender$type
x$age[j] = w$images[[1]]$faces[[1]]$attributes$age
x$maleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$maleConfidence
x$femaleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$femaleConfidence
x$asian[j] = w$images[[1]]$faces[[1]]$attributes$asian
x$hispanic[j] = w$images[[1]]$faces[[1]]$attributes$hispanic
x$black[j] = w$images[[1]]$faces[[1]]$attributes$black
x$white[j] = w$images[[1]]$faces[[1]]$attributes$white
x$other[j] = w$images[[1]]$faces[[1]]$attributes$other
}
k = nchar(x$info) > 0
rap_x2 = x[!k, ]
rmerge = merge(x=rand_countrydf[, c("screen_name", "text")], y=rap_x2, by.x="screen_name", by.y="screen_name", all=FALSE)
write.csv(m2, "text_and_face.csv", row.names=F)
#-----------------------
#Rock facial recognition
u_vec = unique(rand_rock_df$screen_name)
length(u_vec)
udf = lookup_users(users=u_vec, token=twitter_token, tw=FALSE)
nrow(udf)
endpoint = "https://api.kairos.com/detect"
app_id = "aa1cc858"
app_key = "f073ee6c5e0154294742ff1d666796a4"
image_url = gsub("_normal", "", udf$profile_image_url)
x = data.frame(id = seq(from=1, to=nrow(udf)),
screen_name = udf$screen_name,
num_faces = rep(0, times=nrow(udf)),
gender = rep("", times=nrow(udf)),
age = rep(0, times=nrow(udf)),
maleConfidence = rep(0, times=nrow(udf)),
femaleConfidence = rep(0, times=nrow(udf)),
asian = rep(0, times=nrow(udf)),
hispanic = rep(0, times=nrow(udf)),
black = rep(0, times=nrow(udf)),
white = rep(0, times=nrow(udf)),
other = rep(0, times=nrow(udf)),
info = rep("", times=nrow(udf)),
stringsAsFactors=F)
for (j in 1:nrow(udf)) {
cat("j is", j, "\n")
json_string = sub("xxx", image_url[j], '{ "image":"xxx"}' )
m = regexpr("[A-Za-z]{3}$", image_url[j])
ext = tolower(regmatches(image_url[j], m))
ext_test = ext %in% c("jpg", "png")
if (!ext_test) {
x$info[j] = "Bad image"
next
}
s = POST(url=endpoint,
add_headers("app_id"= app_id,
"app_key"=app_key),
content_type="application/json",
body=json_string)
Sys.sleep(0.1)
if (status_code(s) != 200) {
x$info[j] = "Not_OK"
next
}
if (length(httr::content(s, as="raw")) < 300) {
x$info[j] = "API error"
next
}
w = httr::content(s, as="parsed")
x$num_faces[j] = length(w$images[[1]]$faces)
x$gender[j] = w$images[[1]]$faces[[1]]$attributes$gender$type
x$age[j] = w$images[[1]]$faces[[1]]$attributes$age
x$maleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$maleConfidence
x$femaleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$femaleConfidence
x$asian[j] = w$images[[1]]$faces[[1]]$attributes$asian
x$hispanic[j] = w$images[[1]]$faces[[1]]$attributes$hispanic
x$black[j] = w$images[[1]]$faces[[1]]$attributes$black
x$white[j] = w$images[[1]]$faces[[1]]$attributes$white
x$other[j] = w$images[[1]]$faces[[1]]$attributes$other
}
k = nchar(x$info) > 0
rock_x2 = x[!k, ]
rrmerge = merge(x=rand_rock_df[, c("screen_name", "text")], y=rock_x2, by.x="screen_name", by.y="screen_name", all=FALSE)
write.csv(rrmerge, "text_and_face.csv", row.names=F)
#----------------------------
View(country_x2)
View(rap_x2)
View(rock_x2)
country_x2$male = (ifelse(country_x2$gender == 'M', 1, 0))
hist(x = country_x2$male, xlim=c(0,1), breaks =2, xlab = 'Gender', ylab = 'Frequency', main = 'Gender of Twitter Users (from Country Dataset)',
col = c('red','blue'))
legend(legend = c('Female','Male'), x = 0.7, y =100, lty=c(1,1), lwd = c(5,5), col = c('red','blue'))
rap_x2$male = (ifelse(rap_x2$gender == 'M', 1, 0))
hist(x = rap_x2$male, xlim=c(0,1), breaks =2, xlab = 'Gender', ylab = 'Frequency', main = 'Gender of Twitter Users (from Rap/Hip-Hop Dataset)',
col = c('red','blue'))
legend(legend = c('Female','Male'), x = 0.1, y =40, lty=c(1,1), lwd = c(5,5), col = c('red','blue'))
rock_x2$male = (ifelse(rock_x2$gender == 'M', 1, 0))
hist(x = rock_x2$male, xlim=c(0,1), breaks =2, xlab = 'Gender', ylab = 'Frequency', main = 'Gender of Twitter Users (from Rock Dataset)',
col = c('red','blue'))
legend(legend = c('Female','Male'), x = 0.1, y =65, lty=c(1,1), lwd = c(5,5), col = c('red','blue'))
country_x2$young = (ifelse(country_x2$age <30, 1,0))
sum(country_x2$young)
l = hist(x = country_x2$age, xlab = 'User Age', main = 'Twitter User Ages (from Country Dataset)')
l$density = l$counts/sum(l$counts)*100
plot(l, freq = FALSE, main = 'Twitter User Ages (from Country Dataset)', ylab = 'Percentage', xlab='User Age')
rap_x2$young = (ifelse(rap_x2$age <30, 1,0))
sum(rap_x2$young)
z = hist(x = rap_x2$age, xlab = 'User Age', main = 'Twitter User Ages (from Rap Dataset)')
z$density = z$counts/sum(z$counts)*100
plot(z, freq = FALSE, main = 'Twitter User Ages (from Rap Dataset)', ylab = 'Percentage', xlab = 'User Age')
rock_x2$young = (ifelse(rock_x2$age <30, 1,0))
sum(rock_x2$young)
z = hist(x = rock_x2$age, xlab = 'User Age', main = 'Twitter User Ages (from Rock Dataset)')
z$density = z$counts/sum(z$counts)*100
plot(z, freq = FALSE, main = 'Twitter User Ages (from Rock Dataset)', ylab = 'Percentage', xlab = 'User Age')
country_x2$asian1 = ifelse(country_x2$asian > .5, 1, 0)
country_x2$hispanic1 = ifelse(country_x2$hispanic > .5, 1, 0)
country_x2$black1 = ifelse(country_x2$black > .5, 1, 0)
country_x2$white1 = ifelse(country_x2$white > .5, 1, 0)
country_x2$other1 = ifelse(country_x2$other > .5, 1, 0)
hist(x = c(country_x2$asian1,country_x2$hispanic1,country_x2$black1,country_x2$white1,country_x2$other1))
names = c('asian','hispanic','black', 'white','other')
sums = c(sum(country_x2$asian1), sum(country_x2$hispanic1), sum(country_x2$black1), sum(country_x2$white1), sum(country_x2$other1))
m = table(names,sums)
m
b = matrix(c('Asian','Hispanic','Black','White','Other',sum(country_x2$asian1),sum(country_x2$hispanic1),
sum(country_x2$black1),sum(country_x2$white1), sum(country_x2$other1)), nrow = 2, ncol = 5)
b[,3] = 'Hispanic'
b[,4] = 'White'
b[,5]= 'Other'
b[2,] = 9
b[2,2] = 18
b[2,3] = 18
b[2,4] = 119
b[2,5]= 0
b
#-------------------------------------
rock_x2$asian1 = ifelse(rock_x2$asian > .5, 1, 0)
rock_x2$hispanic1 = ifelse(rock_x2$hispanic > .5, 1, 0)
rock_x2$black1 = ifelse(rock_x2$black > .5, 1, 0)
rock_x2$white1 = ifelse(rock_x2$white > .5, 1, 0)
rock_x2$other1 = ifelse(rock_x2$other > .5, 1, 0)
sums = c(sum(rock_x2$asian1), sum(rock_x2$hispanic1), sum(rock_x2$black1), sum(rock_x2$white1), sum(rock_x2$other1))
sums
names = c('Asian','Hispanic','Black','White','Other')
k = matrix(c('Asian','Hispanic','Black','White','Other',6, 10, 26, 76,2), ncol=5, nrow = 2)
k[,2]='Hispanic'
k[,3]='Black'
k[,4]='White'
k[,5]='Other'
k[2,1]= 6
k[2,2]= 10
k[2,3]=26
k[2,4]=76
k[2,5]=2
k
#-----------------------
rap_x2$asian1 = ifelse(rap_x2$asian > .5, 1, 0)
rap_x2$hispanic1 = ifelse(rap_x2$hispanic > .5, 1, 0)
rap_x2$black1 = ifelse(rap_x2$black > .5, 1, 0)
rap_x2$white1 = ifelse(rap_x2$white > .5, 1, 0)
rap_x2$other1 = ifelse(rap_x2$other > .5, 1, 0)
names = c('Asian','Hispanic','Black','White','Other')
sums = c(sum(rap_x2$asian1), sum(rap_x2$hispanic1), sum(rap_x2$black1), sum(rap_x2$white1), sum(rap_x2$other1))
k=matrix(names,ncol=5)
k
g = matrix(sums, ncol=5)
#**this is the proper way...**
k = rbind(k,g)
k
#-----------------------------------------
#Create a dataframe for each artist -- check out the public reception (use pitchfork/needledrop as a keyword??)
y = 'slowdive album'
s_df = search_tweets(y, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
yy = 'logic album'
l_df = search_tweets(yy, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
xy = 'gorillaz album'
g_df = search_tweets(xy, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
zy = 'harry styles album'
h_df = search_tweets(zy, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
ky = 'kendrick lamar album'
k_df = search_tweets(ky, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
#albums in play...
View(g_df) #gorillaz
View(s_df) #slowdive
View(l_df) #logic
View(h_df) #harry styles
gwords = c('good','incredible','amazing','awesome','best','excellent','strong') #words w/ good connotation
bwords = c('bad','horrible','terrible','awful','worst', 'weak', 'bland') #words w/ bad conn.
counter = 0
#returns the the table of words, and total sum
for (s in gwords) {
print(table(grepl(s, s_df$text, ignore.case=T)))
counter = counter + sum((grepl(s, s_df$text, ignore.case=T)))
}
counter
w_percent = (counter/(nrow(s_df)))*100
w_percent
| /MusicR_Project.R | no_license | kmcmorrow1/QAC211-Twitter-Music-Reception | R | false | false | 16,153 | r | ## Kevin McMorrow
setwd('Desktop')
library(rtweet)
library(httr)
library(httpuv)
library(tm)
library(wordcloud)
appname = "kevdog"
key = "MBL0H3EkRae6B9pbKN88QOZmq"
secret = "aSUxvOgwVsHuWpaPkmrn9gdTsPGsoluvBxlsRqUm60JcyaHwB6"
twitter_token = create_token(
app = appname,
consumer_key = key,
consumer_secret = secret)
q = ('country music OR folk music OR americana music') #change this
dfLA = search_tweets(q, type="recent",geocode="34.029287,-118.262078,20mi",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000) #LA
dfLA$region = 'Southwest'
dfLA$num = 1
dfNY = search_tweets(q, type="recent",geocode="40.7128,-74.0059,20mi",
token=twitter_token, include_rts = FALSE, usr=TRUE, n=5000) #NY
dfNY$region = 'Northeast'
dfNY$num = 2
dfATL = search_tweets(q, type="recent",geocode="33.7490,-84.3880,20mi",
token=twitter_token, include_rts = FALSE, usr=TRUE, n=5000) #ATL
dfATL$region = 'Southeast'
dfATL$num = 3
dfSEAT = search_tweets(q, type="recent",geocode="47.6062,-122.3321,500mi",
token=twitter_token, include_rts = FALSE, usr=TRUE, n=5000) #SEAT
dfSEAT$region = 'Northwest'
dfSEAT$num = 4
#Merge the dfs... Might be unneccesary
n_df = Reduce(function(x, y) merge(x, y, all=TRUE), list(dfLA, dfNY, dfATL, dfSEAT))
country_df = n_df
rap_df = n_df
rock_df = n_df
#Wordcloud
x = n_df$text
x = gsub("[^A-Za-z0-9 ,.:;!?]", " ", x)
x = gsub("[ ]{2,}", " ", x)
x = gsub("https", " ", x)
x = gsub('music', " ", x)
x = gsub('country', " ", x)
x = gsub('rap', " ", x)
x = gsub('tco', " ", x)
doc = Corpus(VectorSource(x))
dtm = DocumentTermMatrix(x=doc, control=list(removePunctuation=T, removeNumbers=T, tolower=T,
wordLengths=c(3,12), stopwords=T,
weighting= function(x) weightBin(x)))
dtm_mat = as.matrix(dtm)
word_freq = colSums(dtm_mat)
s = colSums(dtm_mat)
k = order(s, decreasing=T)
w = colnames(dtm_mat)[k][1:500] #change
w_mat = dtm_mat[, w]
p = scale(w_mat)
k = n_df$num
opt = par(mfrow=c(2,2))
for (j in 1:4) {
if (sum(k==j)< 4) {next}
wordcloud(words=colnames(w_mat), freq=colSums(w_mat[k == j, ]),
max.words=50, main=paste("region:", n_df$region[n_df$num == j][1]))
print(n_df$region[n_df$num == j][2])
}
#SW NE
#SE NW
#-------------------------------
#Facial recognition
nrow(country_df) #1444 rows
nrow(rock_df) #2033 rows
nrow(rap_df) #5149 rows
##creates a new df of 500 randomly chosen rows
#these will be used for the facial recognition portion
rand_countrydf = country_df[sample(nrow(country_df), 500), ]
rand_rock_df = rock_df[sample(nrow(rock_df), 500), ]
rand_rapdf = rap_df[sample(nrow(rap_df), 500), ]
#-------------------------------
#Country facial recognition:
u_vec = unique(rand_countrydf$screen_name)
length(u_vec)
udf = lookup_users(users=u_vec, token=twitter_token, tw=FALSE)
nrow(udf)
endpoint = "https://api.kairos.com/detect"
app_id = "aa1cc858"
app_key = "f073ee6c5e0154294742ff1d666796a4"
image_url = gsub("_normal", "", udf$profile_image_url)
x = data.frame(id = seq(from=1, to=nrow(udf)),
screen_name = udf$screen_name,
num_faces = rep(0, times=nrow(udf)),
gender = rep("", times=nrow(udf)),
age = rep(0, times=nrow(udf)),
maleConfidence = rep(0, times=nrow(udf)),
femaleConfidence = rep(0, times=nrow(udf)),
asian = rep(0, times=nrow(udf)),
hispanic = rep(0, times=nrow(udf)),
black = rep(0, times=nrow(udf)),
white = rep(0, times=nrow(udf)),
other = rep(0, times=nrow(udf)),
info = rep("", times=nrow(udf)),
stringsAsFactors=F)
for (j in 1:nrow(udf)) {
cat("j is", j, "\n")
json_string = sub("xxx", image_url[j], '{ "image":"xxx"}' )
m = regexpr("[A-Za-z]{3}$", image_url[j])
ext = tolower(regmatches(image_url[j], m))
ext_test = ext %in% c("jpg", "png")
if (!ext_test) {
x$info[j] = "Bad image"
next
}
s = POST(url=endpoint,
add_headers("app_id"= app_id,
"app_key"=app_key),
content_type="application/json",
body=json_string)
Sys.sleep(0.1)
if (status_code(s) != 200) {
x$info[j] = "Not_OK"
next
}
if (length(httr::content(s, as="raw")) < 300) {
x$info[j] = "API error"
next
}
w = httr::content(s, as="parsed")
x$num_faces[j] = length(w$images[[1]]$faces)
x$gender[j] = w$images[[1]]$faces[[1]]$attributes$gender$type
x$age[j] = w$images[[1]]$faces[[1]]$attributes$age
x$maleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$maleConfidence
x$femaleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$femaleConfidence
x$asian[j] = w$images[[1]]$faces[[1]]$attributes$asian
x$hispanic[j] = w$images[[1]]$faces[[1]]$attributes$hispanic
x$black[j] = w$images[[1]]$faces[[1]]$attributes$black
x$white[j] = w$images[[1]]$faces[[1]]$attributes$white
x$other[j] = w$images[[1]]$faces[[1]]$attributes$other
}
k = nchar(x$info) > 0
country_x2 = x[!k, ]
cmerge = merge(x=rand_countrydf[, c("screen_name", "text")], y=country_x2, by.x="screen_name", by.y="screen_name", all=FALSE)
write.csv(cmerge, "text_and_face.csv", row.names=F)
#----------------------------
#Rap facial recognition:
u_vec = unique(rand_rapdf$screen_name)
length(u_vec)
udf = lookup_users(users=u_vec, token=twitter_token, tw=FALSE)
nrow(udf)
endpoint = "https://api.kairos.com/detect"
app_id = "aa1cc858"
app_key = "f073ee6c5e0154294742ff1d666796a4"
image_url = gsub("_normal", "", udf$profile_image_url)
x = data.frame(id = seq(from=1, to=nrow(udf)),
screen_name = udf$screen_name,
num_faces = rep(0, times=nrow(udf)),
gender = rep("", times=nrow(udf)),
age = rep(0, times=nrow(udf)),
maleConfidence = rep(0, times=nrow(udf)),
femaleConfidence = rep(0, times=nrow(udf)),
asian = rep(0, times=nrow(udf)),
hispanic = rep(0, times=nrow(udf)),
black = rep(0, times=nrow(udf)),
white = rep(0, times=nrow(udf)),
other = rep(0, times=nrow(udf)),
info = rep("", times=nrow(udf)),
stringsAsFactors=F)
for (j in 1:nrow(udf)) {
cat("j is", j, "\n")
json_string = sub("xxx", image_url[j], '{ "image":"xxx"}' )
m = regexpr("[A-Za-z]{3}$", image_url[j])
ext = tolower(regmatches(image_url[j], m))
ext_test = ext %in% c("jpg", "png")
if (!ext_test) {
x$info[j] = "Bad image"
next
}
s = POST(url=endpoint,
add_headers("app_id"= app_id,
"app_key"=app_key),
content_type="application/json",
body=json_string)
Sys.sleep(0.1)
if (status_code(s) != 200) {
x$info[j] = "Not_OK"
next
}
if (length(httr::content(s, as="raw")) < 300) {
x$info[j] = "API error"
next
}
w = httr::content(s, as="parsed")
x$num_faces[j] = length(w$images[[1]]$faces)
x$gender[j] = w$images[[1]]$faces[[1]]$attributes$gender$type
x$age[j] = w$images[[1]]$faces[[1]]$attributes$age
x$maleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$maleConfidence
x$femaleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$femaleConfidence
x$asian[j] = w$images[[1]]$faces[[1]]$attributes$asian
x$hispanic[j] = w$images[[1]]$faces[[1]]$attributes$hispanic
x$black[j] = w$images[[1]]$faces[[1]]$attributes$black
x$white[j] = w$images[[1]]$faces[[1]]$attributes$white
x$other[j] = w$images[[1]]$faces[[1]]$attributes$other
}
k = nchar(x$info) > 0
rap_x2 = x[!k, ]
rmerge = merge(x=rand_countrydf[, c("screen_name", "text")], y=rap_x2, by.x="screen_name", by.y="screen_name", all=FALSE)
write.csv(m2, "text_and_face.csv", row.names=F)
#-----------------------
#Rock facial recognition
u_vec = unique(rand_rock_df$screen_name)
length(u_vec)
udf = lookup_users(users=u_vec, token=twitter_token, tw=FALSE)
nrow(udf)
endpoint = "https://api.kairos.com/detect"
app_id = "aa1cc858"
app_key = "f073ee6c5e0154294742ff1d666796a4"
image_url = gsub("_normal", "", udf$profile_image_url)
x = data.frame(id = seq(from=1, to=nrow(udf)),
screen_name = udf$screen_name,
num_faces = rep(0, times=nrow(udf)),
gender = rep("", times=nrow(udf)),
age = rep(0, times=nrow(udf)),
maleConfidence = rep(0, times=nrow(udf)),
femaleConfidence = rep(0, times=nrow(udf)),
asian = rep(0, times=nrow(udf)),
hispanic = rep(0, times=nrow(udf)),
black = rep(0, times=nrow(udf)),
white = rep(0, times=nrow(udf)),
other = rep(0, times=nrow(udf)),
info = rep("", times=nrow(udf)),
stringsAsFactors=F)
for (j in 1:nrow(udf)) {
cat("j is", j, "\n")
json_string = sub("xxx", image_url[j], '{ "image":"xxx"}' )
m = regexpr("[A-Za-z]{3}$", image_url[j])
ext = tolower(regmatches(image_url[j], m))
ext_test = ext %in% c("jpg", "png")
if (!ext_test) {
x$info[j] = "Bad image"
next
}
s = POST(url=endpoint,
add_headers("app_id"= app_id,
"app_key"=app_key),
content_type="application/json",
body=json_string)
Sys.sleep(0.1)
if (status_code(s) != 200) {
x$info[j] = "Not_OK"
next
}
if (length(httr::content(s, as="raw")) < 300) {
x$info[j] = "API error"
next
}
w = httr::content(s, as="parsed")
x$num_faces[j] = length(w$images[[1]]$faces)
x$gender[j] = w$images[[1]]$faces[[1]]$attributes$gender$type
x$age[j] = w$images[[1]]$faces[[1]]$attributes$age
x$maleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$maleConfidence
x$femaleConfidence[j] = w$images[[1]]$faces[[1]]$attributes$gender$femaleConfidence
x$asian[j] = w$images[[1]]$faces[[1]]$attributes$asian
x$hispanic[j] = w$images[[1]]$faces[[1]]$attributes$hispanic
x$black[j] = w$images[[1]]$faces[[1]]$attributes$black
x$white[j] = w$images[[1]]$faces[[1]]$attributes$white
x$other[j] = w$images[[1]]$faces[[1]]$attributes$other
}
k = nchar(x$info) > 0
rock_x2 = x[!k, ]
rrmerge = merge(x=rand_rock_df[, c("screen_name", "text")], y=rock_x2, by.x="screen_name", by.y="screen_name", all=FALSE)
write.csv(rrmerge, "text_and_face.csv", row.names=F)
#----------------------------
View(country_x2)
View(rap_x2)
View(rock_x2)
country_x2$male = (ifelse(country_x2$gender == 'M', 1, 0))
hist(x = country_x2$male, xlim=c(0,1), breaks =2, xlab = 'Gender', ylab = 'Frequency', main = 'Gender of Twitter Users (from Country Dataset)',
col = c('red','blue'))
legend(legend = c('Female','Male'), x = 0.7, y =100, lty=c(1,1), lwd = c(5,5), col = c('red','blue'))
rap_x2$male = (ifelse(rap_x2$gender == 'M', 1, 0))
hist(x = rap_x2$male, xlim=c(0,1), breaks =2, xlab = 'Gender', ylab = 'Frequency', main = 'Gender of Twitter Users (from Rap/Hip-Hop Dataset)',
col = c('red','blue'))
legend(legend = c('Female','Male'), x = 0.1, y =40, lty=c(1,1), lwd = c(5,5), col = c('red','blue'))
rock_x2$male = (ifelse(rock_x2$gender == 'M', 1, 0))
hist(x = rock_x2$male, xlim=c(0,1), breaks =2, xlab = 'Gender', ylab = 'Frequency', main = 'Gender of Twitter Users (from Rock Dataset)',
col = c('red','blue'))
legend(legend = c('Female','Male'), x = 0.1, y =65, lty=c(1,1), lwd = c(5,5), col = c('red','blue'))
country_x2$young = (ifelse(country_x2$age <30, 1,0))
sum(country_x2$young)
l = hist(x = country_x2$age, xlab = 'User Age', main = 'Twitter User Ages (from Country Dataset)')
l$density = l$counts/sum(l$counts)*100
plot(l, freq = FALSE, main = 'Twitter User Ages (from Country Dataset)', ylab = 'Percentage', xlab='User Age')
rap_x2$young = (ifelse(rap_x2$age <30, 1,0))
sum(rap_x2$young)
z = hist(x = rap_x2$age, xlab = 'User Age', main = 'Twitter User Ages (from Rap Dataset)')
z$density = z$counts/sum(z$counts)*100
plot(z, freq = FALSE, main = 'Twitter User Ages (from Rap Dataset)', ylab = 'Percentage', xlab = 'User Age')
rock_x2$young = (ifelse(rock_x2$age <30, 1,0))
sum(rock_x2$young)
z = hist(x = rock_x2$age, xlab = 'User Age', main = 'Twitter User Ages (from Rock Dataset)')
z$density = z$counts/sum(z$counts)*100
plot(z, freq = FALSE, main = 'Twitter User Ages (from Rock Dataset)', ylab = 'Percentage', xlab = 'User Age')
country_x2$asian1 = ifelse(country_x2$asian > .5, 1, 0)
country_x2$hispanic1 = ifelse(country_x2$hispanic > .5, 1, 0)
country_x2$black1 = ifelse(country_x2$black > .5, 1, 0)
country_x2$white1 = ifelse(country_x2$white > .5, 1, 0)
country_x2$other1 = ifelse(country_x2$other > .5, 1, 0)
hist(x = c(country_x2$asian1,country_x2$hispanic1,country_x2$black1,country_x2$white1,country_x2$other1))
names = c('asian','hispanic','black', 'white','other')
sums = c(sum(country_x2$asian1), sum(country_x2$hispanic1), sum(country_x2$black1), sum(country_x2$white1), sum(country_x2$other1))
m = table(names,sums)
m
b = matrix(c('Asian','Hispanic','Black','White','Other',sum(country_x2$asian1),sum(country_x2$hispanic1),
sum(country_x2$black1),sum(country_x2$white1), sum(country_x2$other1)), nrow = 2, ncol = 5)
b[,3] = 'Hispanic'
b[,4] = 'White'
b[,5]= 'Other'
b[2,] = 9
b[2,2] = 18
b[2,3] = 18
b[2,4] = 119
b[2,5]= 0
b
#-------------------------------------
rock_x2$asian1 = ifelse(rock_x2$asian > .5, 1, 0)
rock_x2$hispanic1 = ifelse(rock_x2$hispanic > .5, 1, 0)
rock_x2$black1 = ifelse(rock_x2$black > .5, 1, 0)
rock_x2$white1 = ifelse(rock_x2$white > .5, 1, 0)
rock_x2$other1 = ifelse(rock_x2$other > .5, 1, 0)
sums = c(sum(rock_x2$asian1), sum(rock_x2$hispanic1), sum(rock_x2$black1), sum(rock_x2$white1), sum(rock_x2$other1))
sums
names = c('Asian','Hispanic','Black','White','Other')
k = matrix(c('Asian','Hispanic','Black','White','Other',6, 10, 26, 76,2), ncol=5, nrow = 2)
k[,2]='Hispanic'
k[,3]='Black'
k[,4]='White'
k[,5]='Other'
k[2,1]= 6
k[2,2]= 10
k[2,3]=26
k[2,4]=76
k[2,5]=2
k
#-----------------------
rap_x2$asian1 = ifelse(rap_x2$asian > .5, 1, 0)
rap_x2$hispanic1 = ifelse(rap_x2$hispanic > .5, 1, 0)
rap_x2$black1 = ifelse(rap_x2$black > .5, 1, 0)
rap_x2$white1 = ifelse(rap_x2$white > .5, 1, 0)
rap_x2$other1 = ifelse(rap_x2$other > .5, 1, 0)
names = c('Asian','Hispanic','Black','White','Other')
sums = c(sum(rap_x2$asian1), sum(rap_x2$hispanic1), sum(rap_x2$black1), sum(rap_x2$white1), sum(rap_x2$other1))
k=matrix(names,ncol=5)
k
g = matrix(sums, ncol=5)
#**this is the proper way...**
k = rbind(k,g)
k
#-----------------------------------------
#Create a dataframe for each artist -- check out the public reception (use pitchfork/needledrop as a keyword??)
y = 'slowdive album'
s_df = search_tweets(y, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
yy = 'logic album'
l_df = search_tweets(yy, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
xy = 'gorillaz album'
g_df = search_tweets(xy, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
zy = 'harry styles album'
h_df = search_tweets(zy, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
ky = 'kendrick lamar album'
k_df = search_tweets(ky, type="recent",
token=twitter_token,include_rts = FALSE, usr=TRUE, n=5000)
#albums in play...
View(g_df) #gorillaz
View(s_df) #slowdive
View(l_df) #logic
View(h_df) #harry styles
gwords = c('good','incredible','amazing','awesome','best','excellent','strong') #words w/ good connotation
bwords = c('bad','horrible','terrible','awful','worst', 'weak', 'bland') #words w/ bad conn.
counter = 0
#returns the the table of words, and total sum
for (s in gwords) {
print(table(grepl(s, s_df$text, ignore.case=T)))
counter = counter + sum((grepl(s, s_df$text, ignore.case=T)))
}
counter
w_percent = (counter/(nrow(s_df)))*100
w_percent
|
library("BiocManager")
library("DESeq2")
library("BiocParallel")
library("vsn")
library("pheatmap")
library("RColorBrewer")
library("ggplot2")
library("ggrepel")
library("EnhancedVolcano")
register(MulticoreParam(12))
dir <- "~/Genome-Analysis/data/differential_expression/count"
sampleFiles <- grep("count",list.files(dir),value=TRUE)
sampleCondition <-c("Musang", "Musang", "Musang", "Musang", "Musang", "Monthong", "Monthong", "Monthong")
sampleName <- c("Durio zibethinus Musang King: leaf", "Durio zibethinus Musang King: root", "Durio zibethinus Musang King: aril 2", "Durio zibethinus Musang King: stem", "Durio zibethinus Musang King: aril 3", "Durio zibethinus Monthong: aril 2", "Durio zibethinus Monthong: aril 3", "Durio zibethinus Monthong: aril 1")
sampleName <- c("leaf", "root", "aril 2", "stem", "aril 3", "aril 2", "aril 3", "aril 1")
sampleTable <- data.frame(sampleName = sampleName,
fileName = sampleFiles,
condition = sampleCondition,
type = sampleType)
sampleTable$condition <- factor(sampleTable$condition)
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = dir,
design= ~ condition)
keep <- rowSums(counts(ddsHTSeq)) >= 10
ddsHTSeq <- ddsHTSeq[keep,]
dds <- DESeq(ddsHTSeq)
res <- results(dds)
plotMA(res, ylim=c(-10,10))
ntd <- normTransform(dds)
meanSdPlot(assay(ntd))
select <- order(rowMeans(counts(dds,normalized=TRUE)),
decreasing=TRUE)[1:20]
df <- as.data.frame(colData(dds)[,c("condition", "type")])
pheatmap(assay(ntd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
cluster_cols=FALSE, annotation_col=df)
vsd <- vst(dds, blind=FALSE)
rld <- rlog(dds, blind=FALSE)
sampleDists <- dist(t(assay(vsd)))
sampleDistMatrix <- as.matrix(sampleDists)
rownames(sampleDistMatrix) <- paste(vsd$condition, vsd$type, sep="-")
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors)
plotPCA(vsd, intgroup=c("condition", "type"))
pcaData <- plotPCA(vsd, intgroup=c("condition", "type"), returnData=TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
ggplot(pcaData, aes(PC1, PC2, color=condition, shape=type)) +
geom_point(size=3) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
coord_fixed()
EnhancedVolcano(res, lab = rownames(res), x = 'log2FoldChange', y = 'pvalue', xlim = c(-5, 10))
| /code/DEseq.R | no_license | IG-AI/Genome-Analysis-Durio-Zibethinus | R | false | false | 2,836 | r | library("BiocManager")
library("DESeq2")
library("BiocParallel")
library("vsn")
library("pheatmap")
library("RColorBrewer")
library("ggplot2")
library("ggrepel")
library("EnhancedVolcano")
register(MulticoreParam(12))
dir <- "~/Genome-Analysis/data/differential_expression/count"
sampleFiles <- grep("count",list.files(dir),value=TRUE)
sampleCondition <-c("Musang", "Musang", "Musang", "Musang", "Musang", "Monthong", "Monthong", "Monthong")
sampleName <- c("Durio zibethinus Musang King: leaf", "Durio zibethinus Musang King: root", "Durio zibethinus Musang King: aril 2", "Durio zibethinus Musang King: stem", "Durio zibethinus Musang King: aril 3", "Durio zibethinus Monthong: aril 2", "Durio zibethinus Monthong: aril 3", "Durio zibethinus Monthong: aril 1")
sampleName <- c("leaf", "root", "aril 2", "stem", "aril 3", "aril 2", "aril 3", "aril 1")
sampleTable <- data.frame(sampleName = sampleName,
fileName = sampleFiles,
condition = sampleCondition,
type = sampleType)
sampleTable$condition <- factor(sampleTable$condition)
ddsHTSeq <- DESeqDataSetFromHTSeqCount(sampleTable = sampleTable,
directory = dir,
design= ~ condition)
keep <- rowSums(counts(ddsHTSeq)) >= 10
ddsHTSeq <- ddsHTSeq[keep,]
dds <- DESeq(ddsHTSeq)
res <- results(dds)
plotMA(res, ylim=c(-10,10))
ntd <- normTransform(dds)
meanSdPlot(assay(ntd))
select <- order(rowMeans(counts(dds,normalized=TRUE)),
decreasing=TRUE)[1:20]
df <- as.data.frame(colData(dds)[,c("condition", "type")])
pheatmap(assay(ntd)[select,], cluster_rows=FALSE, show_rownames=FALSE,
cluster_cols=FALSE, annotation_col=df)
vsd <- vst(dds, blind=FALSE)
rld <- rlog(dds, blind=FALSE)
sampleDists <- dist(t(assay(vsd)))
sampleDistMatrix <- as.matrix(sampleDists)
rownames(sampleDistMatrix) <- paste(vsd$condition, vsd$type, sep="-")
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette( rev(brewer.pal(9, "Blues")) )(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors)
plotPCA(vsd, intgroup=c("condition", "type"))
pcaData <- plotPCA(vsd, intgroup=c("condition", "type"), returnData=TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
ggplot(pcaData, aes(PC1, PC2, color=condition, shape=type)) +
geom_point(size=3) +
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
coord_fixed()
EnhancedVolcano(res, lab = rownames(res), x = 'log2FoldChange', y = 'pvalue', xlim = c(-5, 10))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plugins.R
\name{dyUnzoom}
\alias{dyUnzoom}
\title{The dyUnzoom plugin adds an "Unzoom" button to the graph when it's displaying
in a zoomed state (this is a bit more discoverable than the default double-
click gesture for unzooming).}
\usage{
dyUnzoom(dygraph)
}
\arguments{
\item{dygraph}{Dygraph to add plugin to}
}
\value{
Dygraph with Unzoom plugin enabled
}
\description{
The dyUnzoom plugin adds an "Unzoom" button to the graph when it's displaying
in a zoomed state (this is a bit more discoverable than the default double-
click gesture for unzooming).
}
\examples{
library(dygraphs)
dygraph(ldeaths) \%>\%
dyRangeSelector() \%>\%
dyUnzoom()
}
| /man/dyUnzoom.Rd | no_license | kieshin/dygraphs | R | false | true | 736 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plugins.R
\name{dyUnzoom}
\alias{dyUnzoom}
\title{The dyUnzoom plugin adds an "Unzoom" button to the graph when it's displaying
in a zoomed state (this is a bit more discoverable than the default double-
click gesture for unzooming).}
\usage{
dyUnzoom(dygraph)
}
\arguments{
\item{dygraph}{Dygraph to add plugin to}
}
\value{
Dygraph with Unzoom plugin enabled
}
\description{
The dyUnzoom plugin adds an "Unzoom" button to the graph when it's displaying
in a zoomed state (this is a bit more discoverable than the default double-
click gesture for unzooming).
}
\examples{
library(dygraphs)
dygraph(ldeaths) \%>\%
dyRangeSelector() \%>\%
dyUnzoom()
}
|
cpus <- 4
print(cpus)
args <- commandArgs(TRUE)
eval(parse(text=args[[1]]))
setting <- as.numeric(setting)
assign <- function(x) {
x$prop <- x$count / x$parentcount
assign <- as.numeric(by(x, x$subset, function(y) max(y$prop[y$stim != 0]) > min(y$prop[y$stim == 0])))
assign[assign == 1] <- -1
result <- data.frame(ptid = x$ptid[1], subset = unique(x$subset), assign = assign)
return(result)
}
getExpression <- function(str) {
first <- substr(str, 1, 7)
second <- substr(str, 8, nchar(str))
second <- strsplit(second, "")[[1]]
seperators <- c(0, which(second %in% c("-", "+")))
expressed <- list()
for(i in 2:length(seperators)) {
if(second[seperators[i]] == "+") {
expressed[[i]] <- paste(second[(seperators[(i - 1)] + 1) : seperators[i]], collapse = '')
}
}
expressed <- paste(unlist(expressed), collapse = '')
expressed <- paste(first, expressed, sep = '')
return(expressed)
}
# Loading Data --------------------------------
# hvtn <- read.csv(file = "data/merged_505_stats.csv")
# names(hvtn) <- tolower(names(hvtn))
# hvtn <- subset(hvtn, !is.na(ptid))
# saveRDS(hvtn, file = "data/505_stats.rds")
# Getting marginals -----------------------------
library(flowReMix)
hvtn <- readRDS(file = "data/505_stats.rds")
length(unique(hvtn$name))
length(unique(hvtn$ptid))
length(unique(hvtn$population))
unique(hvtn$population)
unique(hvtn$stim)
nchars <- nchar(as.character(unique(hvtn$population)))
#marginals <- unique(hvtn$population)[nchars < 26]
marginals <- unique(hvtn$population)[nchars == 26]
marginals <- subset(hvtn, population %in% marginals)
marginals <- subset(marginals, stim %in% c("negctrl", "VRC ENV A",
"VRC ENV B", "VRC ENV C",
"VRC GAG B", "VRC NEF B",
"VRC POL 1 B", "VRC POL 2 B"))
marginals <- subset(marginals, !(population %in% c("4+", "8+")))
marginals <- subset(marginals, !(population %in% c("8+/107a-154-IFNg-IL2-TNFa-", "4+/107a-154-IFNg-IL2-TNFa-")))
marginals$stim <- factor(as.character(marginals$stim))
marginals$population <- factor(as.character(marginals$population))
# Descriptives -------------------------------------
library(ggplot2)
marginals$prop <- marginals$count / marginals$parentcount
# ggplot(marginals) + geom_boxplot(aes(x = population, y = log(prop), col = stim))
require(dplyr)
negctrl <- subset(marginals, stim == "negctrl")
negctrl <- summarize(group_by(negctrl, ptid, population), negprop = mean(prop))
negctrl <- as.data.frame(negctrl)
marginals <- merge(marginals, negctrl, all.x = TRUE)
# ggplot(subset(marginals, stim != "negctrl" & parent == "4+")) +
# geom_point(aes(x = log(negprop), y = log(prop)), size = 0.25) +
# facet_grid(stim ~ population, scales = "free") +
# theme_bw() +
# geom_abline(intercept = 0, slope = 1)
# Setting up data for analysis ---------------------------
unique(marginals$stim)
gag <- subset(marginals, stim %in% c("VRC GAG B", "negctrl"))
gag$subset <- factor(paste("gag", gag$population, sep = "/"))
gag$stimGroup <- "gag"
pol <-subset(marginals, stim %in% c("negctrl", "VRC POL 1 B", "VRC POL 2 B"))
pol$subset <- factor(paste("pol", pol$population, sep = "/"))
pol$stimGroup <- "pol"
env <- subset(marginals, stim %in% c("negctrl", "VRC ENV C", "VRC ENV B", "VRC ENV A"))
env$subset <- factor(paste("env", env$population, sep = "/"))
env$stimGroup <- "env"
nef <- subset(marginals, stim %in% c("negctrl", "VRC NEF B"))
nef$subset <- factor(paste("nef", nef$population, sep = "/"))
nef$stimGroup <- "nef"
subsetDat <- rbind(gag, pol, env, nef)
subsetDat$stim <- as.character(subsetDat$stim)
subsetDat$stim[subsetDat$stim == "negctrl"] <- 0
subsetDat$stim <- factor(subsetDat$stim)
# Converting subset names ------------------
subsets <- as.character(unique(subsetDat$subset))
expressed <- sapply(subsets, getExpression)
map <- cbind(subsets, expressed)
subsetDat$subset <- as.character(subsetDat$subset)
for(i in 1:nrow(map)) {
subsetDat$subset[which(subsetDat$subset == map[i, 1])] <- map[i, 2]
}
subsetDat$subset <- factor(subsetDat$subset)
# Getting outcomes -------------------------------
# treatmentdat <- read.csv(file = "data/rx_v2.csv")
# names(treatmentdat) <- tolower(names(treatmentdat))
# treatmentdat$ptid <- factor(gsub("-", "", (treatmentdat$ptid)))
# treatmentdat <- subset(treatmentdat, ptid %in% unique(subsetDat$ptid))
# Finding problematic subsets?
keep <- by(subsetDat, list(subsetDat$subset), function(x) mean(x$count > 1) > 0.02)
keep <- names(keep[sapply(keep, function(x) x)])
#result$subsets[result$qvals < 0.1] %in% keep
subsetDat <- subset(subsetDat, subset %in% keep)
subsetDat$subset <- factor(as.character(subsetDat$subset))
configurations <- expand.grid(method = c("SA", "MC"),
seed = 1:5,
prior = c(0, 2),
niter = c(40, 80),
includeBatch = FALSE)
config <- configurations[setting, ]
print(config)
niter <- config[["niter"]]
seed <- config[["seed"]]
prior <- config[["prior"]]
method <- config[["method"]]
includeBatch <- config[["includeBatch"]]
if(method == "MC") {
npost <- 3
lag <- 20
keepeach <- 20
mcEM <- TRUE
} else if(method == "SA") {
npost <- 1
lag <- 10
keepeach <- 20
mcEM <- FALSE
} else if(method == "LS") {
npost <- 1
lag <- round(niter / 2)
keepeach <- 20
mcEM <- FALSE
}
if(includeBatch) {
batchstr <- "batch"
formula <- formula(cbind(count, parentcount - count) ~ stim + batch)
} else {
batchstr <- ""
formula <- formula(cbind(count, parentcount - count) ~ stim)
}
# Fitting the model ------------------------------
library(flowReMix)
control <- flowReMix_control(updateLag = lag, nsamp = 200,
keepEach = keepeach, initMHcoef = 2.5,
nPosteriors = npost, centerCovariance = FALSE,
maxDispersion = 10^3, minDispersion = 10^7,
randomAssignProb = 10^-8, intSampSize = 100,
seed = seed, zeroPosteriorProbs = FALSE,
ncores = cpus, preAssignCoefs = 1,
prior = prior, isingWprior = FALSE,
markovChainEM = mcEM,
initMethod = "robust",
learningRate = 0.6, keepWeightPercent = 0.9)
subsetDat$batch <- factor(subsetDat$batch..)
subsetDat$stimGroup <- factor(subsetDat$stimGroup)
subsetDat <- data.frame(subsetDat %>% group_by(ptid,population,stim,stimGroup,parent) %>% filter(collection.num==max(collection.num)))
# preAssign <- by(subsetDat, subsetDat$ptid, assign)
# preAssign <- do.call("rbind", preAssign)
subsetDat$batch <- factor(as.character(subsetDat$batch), levels = unique(as.character(subsetDat$batch)))
# unique(data.frame(subsetDat$ptid, subsetDat$batch))
# by(subsetDat, subsetDat$subset, function(x) table(x$batch))
fit <- flowReMix(cbind(count, parentcount - count) ~ stim,
subject_id = ptid,
cell_type = subset,
cluster_variable = stim,
data = subsetDat,
covariance = "sparse",
ising_model = "sparse",
regression_method = "robust",
iterations = niter,
parallel = TRUE, keepSamples = FALSE,
cluster_assignment = TRUE,
verbose = TRUE, control = control)
file <- paste("results/hvtn_32_niter", niter, "npost", npost, "seed", seed, "prior", prior, method, ".rds", sep = "")
print(file)
saveRDS(object = fit, file = file)
stab <- stabilityGraph(fit, type = "ising", cpus = cpus, AND = TRUE,
gamma = 0.25, reps = 200, cv = FALSE)
fit$stabilityGraph <- stab
fit$randomEffectSamp <- NULL
fit$assignmentList <- NULL
fit$data <- NULL
saveRDS(object = fit, file = file)
print("WTF?!")
print("WTF?!???????")
| /cluster/hvtn/oldcode/HVTNclusterSA9.R | permissive | RGLab/flowReMix | R | false | false | 8,010 | r | cpus <- 4
print(cpus)
args <- commandArgs(TRUE)
eval(parse(text=args[[1]]))
setting <- as.numeric(setting)
assign <- function(x) {
x$prop <- x$count / x$parentcount
assign <- as.numeric(by(x, x$subset, function(y) max(y$prop[y$stim != 0]) > min(y$prop[y$stim == 0])))
assign[assign == 1] <- -1
result <- data.frame(ptid = x$ptid[1], subset = unique(x$subset), assign = assign)
return(result)
}
getExpression <- function(str) {
first <- substr(str, 1, 7)
second <- substr(str, 8, nchar(str))
second <- strsplit(second, "")[[1]]
seperators <- c(0, which(second %in% c("-", "+")))
expressed <- list()
for(i in 2:length(seperators)) {
if(second[seperators[i]] == "+") {
expressed[[i]] <- paste(second[(seperators[(i - 1)] + 1) : seperators[i]], collapse = '')
}
}
expressed <- paste(unlist(expressed), collapse = '')
expressed <- paste(first, expressed, sep = '')
return(expressed)
}
# Loading Data --------------------------------
# hvtn <- read.csv(file = "data/merged_505_stats.csv")
# names(hvtn) <- tolower(names(hvtn))
# hvtn <- subset(hvtn, !is.na(ptid))
# saveRDS(hvtn, file = "data/505_stats.rds")
# Getting marginals -----------------------------
library(flowReMix)
hvtn <- readRDS(file = "data/505_stats.rds")
length(unique(hvtn$name))
length(unique(hvtn$ptid))
length(unique(hvtn$population))
unique(hvtn$population)
unique(hvtn$stim)
nchars <- nchar(as.character(unique(hvtn$population)))
#marginals <- unique(hvtn$population)[nchars < 26]
marginals <- unique(hvtn$population)[nchars == 26]
marginals <- subset(hvtn, population %in% marginals)
marginals <- subset(marginals, stim %in% c("negctrl", "VRC ENV A",
"VRC ENV B", "VRC ENV C",
"VRC GAG B", "VRC NEF B",
"VRC POL 1 B", "VRC POL 2 B"))
marginals <- subset(marginals, !(population %in% c("4+", "8+")))
marginals <- subset(marginals, !(population %in% c("8+/107a-154-IFNg-IL2-TNFa-", "4+/107a-154-IFNg-IL2-TNFa-")))
marginals$stim <- factor(as.character(marginals$stim))
marginals$population <- factor(as.character(marginals$population))
# Descriptives -------------------------------------
library(ggplot2)
marginals$prop <- marginals$count / marginals$parentcount
# ggplot(marginals) + geom_boxplot(aes(x = population, y = log(prop), col = stim))
require(dplyr)
negctrl <- subset(marginals, stim == "negctrl")
negctrl <- summarize(group_by(negctrl, ptid, population), negprop = mean(prop))
negctrl <- as.data.frame(negctrl)
marginals <- merge(marginals, negctrl, all.x = TRUE)
# ggplot(subset(marginals, stim != "negctrl" & parent == "4+")) +
# geom_point(aes(x = log(negprop), y = log(prop)), size = 0.25) +
# facet_grid(stim ~ population, scales = "free") +
# theme_bw() +
# geom_abline(intercept = 0, slope = 1)
# Setting up data for analysis ---------------------------
unique(marginals$stim)
gag <- subset(marginals, stim %in% c("VRC GAG B", "negctrl"))
gag$subset <- factor(paste("gag", gag$population, sep = "/"))
gag$stimGroup <- "gag"
pol <-subset(marginals, stim %in% c("negctrl", "VRC POL 1 B", "VRC POL 2 B"))
pol$subset <- factor(paste("pol", pol$population, sep = "/"))
pol$stimGroup <- "pol"
env <- subset(marginals, stim %in% c("negctrl", "VRC ENV C", "VRC ENV B", "VRC ENV A"))
env$subset <- factor(paste("env", env$population, sep = "/"))
env$stimGroup <- "env"
nef <- subset(marginals, stim %in% c("negctrl", "VRC NEF B"))
nef$subset <- factor(paste("nef", nef$population, sep = "/"))
nef$stimGroup <- "nef"
subsetDat <- rbind(gag, pol, env, nef)
subsetDat$stim <- as.character(subsetDat$stim)
subsetDat$stim[subsetDat$stim == "negctrl"] <- 0
subsetDat$stim <- factor(subsetDat$stim)
# Converting subset names ------------------
subsets <- as.character(unique(subsetDat$subset))
expressed <- sapply(subsets, getExpression)
map <- cbind(subsets, expressed)
subsetDat$subset <- as.character(subsetDat$subset)
for(i in 1:nrow(map)) {
subsetDat$subset[which(subsetDat$subset == map[i, 1])] <- map[i, 2]
}
subsetDat$subset <- factor(subsetDat$subset)
# Getting outcomes -------------------------------
# treatmentdat <- read.csv(file = "data/rx_v2.csv")
# names(treatmentdat) <- tolower(names(treatmentdat))
# treatmentdat$ptid <- factor(gsub("-", "", (treatmentdat$ptid)))
# treatmentdat <- subset(treatmentdat, ptid %in% unique(subsetDat$ptid))
# Finding problematic subsets?
keep <- by(subsetDat, list(subsetDat$subset), function(x) mean(x$count > 1) > 0.02)
keep <- names(keep[sapply(keep, function(x) x)])
#result$subsets[result$qvals < 0.1] %in% keep
subsetDat <- subset(subsetDat, subset %in% keep)
subsetDat$subset <- factor(as.character(subsetDat$subset))
configurations <- expand.grid(method = c("SA", "MC"),
seed = 1:5,
prior = c(0, 2),
niter = c(40, 80),
includeBatch = FALSE)
config <- configurations[setting, ]
print(config)
niter <- config[["niter"]]
seed <- config[["seed"]]
prior <- config[["prior"]]
method <- config[["method"]]
includeBatch <- config[["includeBatch"]]
if(method == "MC") {
npost <- 3
lag <- 20
keepeach <- 20
mcEM <- TRUE
} else if(method == "SA") {
npost <- 1
lag <- 10
keepeach <- 20
mcEM <- FALSE
} else if(method == "LS") {
npost <- 1
lag <- round(niter / 2)
keepeach <- 20
mcEM <- FALSE
}
if(includeBatch) {
batchstr <- "batch"
formula <- formula(cbind(count, parentcount - count) ~ stim + batch)
} else {
batchstr <- ""
formula <- formula(cbind(count, parentcount - count) ~ stim)
}
# Fitting the model ------------------------------
library(flowReMix)
control <- flowReMix_control(updateLag = lag, nsamp = 200,
keepEach = keepeach, initMHcoef = 2.5,
nPosteriors = npost, centerCovariance = FALSE,
maxDispersion = 10^3, minDispersion = 10^7,
randomAssignProb = 10^-8, intSampSize = 100,
seed = seed, zeroPosteriorProbs = FALSE,
ncores = cpus, preAssignCoefs = 1,
prior = prior, isingWprior = FALSE,
markovChainEM = mcEM,
initMethod = "robust",
learningRate = 0.6, keepWeightPercent = 0.9)
subsetDat$batch <- factor(subsetDat$batch..)
subsetDat$stimGroup <- factor(subsetDat$stimGroup)
subsetDat <- data.frame(subsetDat %>% group_by(ptid,population,stim,stimGroup,parent) %>% filter(collection.num==max(collection.num)))
# preAssign <- by(subsetDat, subsetDat$ptid, assign)
# preAssign <- do.call("rbind", preAssign)
subsetDat$batch <- factor(as.character(subsetDat$batch), levels = unique(as.character(subsetDat$batch)))
# unique(data.frame(subsetDat$ptid, subsetDat$batch))
# by(subsetDat, subsetDat$subset, function(x) table(x$batch))
fit <- flowReMix(cbind(count, parentcount - count) ~ stim,
subject_id = ptid,
cell_type = subset,
cluster_variable = stim,
data = subsetDat,
covariance = "sparse",
ising_model = "sparse",
regression_method = "robust",
iterations = niter,
parallel = TRUE, keepSamples = FALSE,
cluster_assignment = TRUE,
verbose = TRUE, control = control)
file <- paste("results/hvtn_32_niter", niter, "npost", npost, "seed", seed, "prior", prior, method, ".rds", sep = "")
print(file)
saveRDS(object = fit, file = file)
stab <- stabilityGraph(fit, type = "ising", cpus = cpus, AND = TRUE,
gamma = 0.25, reps = 200, cv = FALSE)
fit$stabilityGraph <- stab
fit$randomEffectSamp <- NULL
fit$assignmentList <- NULL
fit$data <- NULL
saveRDS(object = fit, file = file)
print("WTF?!")
print("WTF?!???????")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SetupProject.R
\name{ProjectFromAsyncUrl}
\alias{ProjectFromAsyncUrl}
\title{Retrieve a project from the project-creation URL}
\usage{
ProjectFromAsyncUrl(asyncUrl, maxWait = 600)
}
\arguments{
\item{asyncUrl}{The temporary status URL}
\item{maxWait}{The maximum time to wait (in seconds) for project creation before aborting.}
}
\description{
If project creation times out, the error message includes a URL corresponding to the project
creation task. That URL can be passed to this function (which will return the completed project
details when finished) to resume waiting for project creation.
}
| /man/ProjectFromAsyncUrl.Rd | no_license | anno526/datarobot | R | false | true | 677 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SetupProject.R
\name{ProjectFromAsyncUrl}
\alias{ProjectFromAsyncUrl}
\title{Retrieve a project from the project-creation URL}
\usage{
ProjectFromAsyncUrl(asyncUrl, maxWait = 600)
}
\arguments{
\item{asyncUrl}{The temporary status URL}
\item{maxWait}{The maximum time to wait (in seconds) for project creation before aborting.}
}
\description{
If project creation times out, the error message includes a URL corresponding to the project
creation task. That URL can be passed to this function (which will return the completed project
details when finished) to resume waiting for project creation.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/startup.R
\name{report}
\alias{report}
\title{Helper to call the report object from .syberiaReport}
\usage{
report()
}
\value{
.syberiaReport$report
}
\description{
Helper to call the report object from .syberiaReport
}
| /man/report.Rd | permissive | christiantillich/syberiaReports | R | false | true | 299 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/startup.R
\name{report}
\alias{report}
\title{Helper to call the report object from .syberiaReport}
\usage{
report()
}
\value{
.syberiaReport$report
}
\description{
Helper to call the report object from .syberiaReport
}
|
# test statistic
t=.26528/.10127
print(t)
df=17
t1value=qt(1-0.01,df)
t2value=qt(1-0.005,df)
print(t1value)
print(t2value)
# Thus, H0 would be rejected at the alpha= .01 level but not at the alpha= .005 level
pvalue =pt(-t, df)
print(pvalue) | /An_Introduction_To_Statistical_Methods_And_Data_Analysis_by_R_Lyman_Ott_And_Michael_Longnecker/CH12/EX12.16/Ex12_16.r | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 255 | r | # test statistic
t=.26528/.10127
print(t)
df=17
t1value=qt(1-0.01,df)
t2value=qt(1-0.005,df)
print(t1value)
print(t2value)
# Thus, H0 would be rejected at the alpha= .01 level but not at the alpha= .005 level
pvalue =pt(-t, df)
print(pvalue) |
library("tidyverse")
library("patchwork")
library("plotly")
library("shiny")
library("rsconnect")
library("shinythemes")
library("markdown")
library("reshape2")
# IMPORT DATA
heart_data <- read_csv('data/heart.csv')
heart_data <- as.data.frame(heart_data)
#Renaming columns.
data_col_names <- c('Age', 'Sex', 'Chest Pain Type', 'Resting Blood Pressure', 'Cholesterol', 'Fasting Blood Sugar', 'Resting ECG', 'Max. Heart Rate',
'Exercise Induced Angina', 'Previous Peak', 'Slope', 'No. Major Blood Vessels', 'Thal Rate', 'Condition')
colnames(heart_data) <- data_col_names
# Select numerical and categorical data
Numerical <- heart_data %>% select('Age','Resting Blood Pressure','Cholesterol','Max. Heart Rate','Previous Peak')
Categorical <- heart_data %>% select('Sex','Chest Pain Type','Fasting Blood Sugar','Resting ECG','Exercise Induced Angina','Slope','No. Major Blood Vessels','Thal Rate')
# separate x and y of the dataset
Samples <- heart_data %>% select(!Condition)
Labels <- heart_data %>% select(Condition)
#plot the correlation_matrix
Correlation_matrix <- cor(heart_data) %>% round(3)
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
upper_tri <- get_upper_tri(Correlation_matrix)
melted_cormat <- melt(upper_tri,na.rm = TRUE)
Correlation_matrix_plot <- ggplot(melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
geom_text(aes(Var2, Var1, label = value), color = "black", size = 4) +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Correlation") +
theme_minimal()+ # minimal theme
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 8, hjust = 1))+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.6, 0.7),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 7, barheight = 1,
title.position = "top", title.hjust = 0.5))
#From the above correlation matrix, we can see that the correlation between features is less.
#Chest Pain Type with Condition and Max. Heart Rate with Condition have high correlated features in our dataset; Correlation Coefficient of 0.43 and 0.42 respectively.
#Our features have a lot of negative correlation coefficient indicating that two individual variables have a statistical relationship such that generally move in opposite directions from one another.
##==============================================================================
# create categorical plots with condition
heart_data_copy <- data.frame(heart_data)
colnames(heart_data_copy) <- data_col_names
heart_data_copy$Slope <- as.factor(heart_data_copy$Slope)
heart_data_copy$`No. Major Blood Vessels`<- as.factor(heart_data_copy$`No. Major Blood Vessels`)
heart_data_copy$`Thal Rate`<- as.factor(heart_data_copy$`Thal Rate`)
heart_data_copy$Condition <-factor(heart_data_copy$Condition,
levels = c(0,1),
labels = c("less chance of heart attack","more chance of heart attack"))
heart_data_copy$Sex <- factor(heart_data_copy$Sex,
levels = c(0,1),
labels = c("female","male"))
heart_data_copy$`Chest Pain Type` <- factor(heart_data_copy$`Chest Pain Type`,
levels =c(0,1,2,3),
labels = c("typical angina","atypical angina","non-anginal pain","asymptomatic"))
heart_data_copy$`Fasting Blood Sugar`<-factor(heart_data_copy$`Fasting Blood Sugar`,
levels = c(0,1),
labels = c("false","true"))
heart_data_copy$`Resting ECG`<- factor(heart_data_copy$`Resting ECG`,
levels = c(0,1,2),
labels = c("normal","having ST-T wave abnormality","showing probable or definite left ventricular hypertrophy"))
heart_data_copy$`Exercise Induced Angina`<-factor(heart_data_copy$`Exercise Induced Angina`,
levels = c(0,1),
labels = c("no","yes"))
Sex_plot <- ggplot(heart_data_copy,aes(x=Sex,fill=Condition))+
geom_bar(position = "dodge")
Chest_plot <- ggplot(heart_data_copy,aes(x=`Chest Pain Type`,fill=Condition))+
geom_bar(position = "dodge")+
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 8, hjust = 1))
Sugar_plot <- ggplot(heart_data_copy,aes(x=`Fasting Blood Sugar`,fill=Condition))+
geom_bar(position = "dodge")
ECG_plot <- ggplot(heart_data_copy,aes(x=`Resting ECG`,fill=Condition))+
geom_bar(position = "dodge")+
theme(axis.text.x = element_text(angle = 30, vjust = 1,
size = 8, hjust = 1))
Exercise_plot <- ggplot(heart_data_copy,aes(x=`Exercise Induced Angina`,fill=Condition))+
geom_bar(position = "dodge")
Slope_plot <- ggplot(heart_data_copy,aes(x=Slope,fill=Condition))+
geom_bar(position = "dodge")
Vessels_plot <- ggplot(heart_data_copy,aes(x=`No. Major Blood Vessels`,fill=Condition))+
geom_bar(position = "dodge")
Thal_plot <- ggplot(heart_data_copy,aes(x=`Thal Rate`,fill=Condition))+
geom_bar(position = "dodge")
# create numerical plot with condition
heart_data_copy$Age <- as.numeric(heart_data_copy$Age)
heart_data_copy$`Resting Blood Pressure` <- as.numeric(heart_data_copy$`Resting Blood Pressure`)
heart_data_copy$Cholesterol <- as.numeric(heart_data$Cholesterol)
heart_data_copy$`Max. Heart Rate` <- as.numeric(heart_data_copy$`Max. Heart Rate`)
heart_data_copy$`Previous Peak` <- as.numeric(heart_data_copy$`Previous Peak`)
Age_plot <- ggplot(heart_data_copy,aes(x=Age,fill=Condition))+
geom_density(alpha=0.3)
Pressure_plot <- ggplot(heart_data_copy,aes(x=`Resting Blood Pressure`,fill=Condition))+
geom_density(alpha=0.3)
Cholesterol_plot <- ggplot(heart_data_copy,aes(x=Cholesterol,fill=Condition))+
geom_density(alpha=0.3)
HeartRate_plot <- ggplot(heart_data_copy,aes(x=`Max. Heart Rate`,fill=Condition))+
geom_density(alpha=0.3)
Peak_plot<-ggplot(heart_data_copy,aes(x=`Previous Peak`,fill=Condition))+
geom_density(alpha=0.3)
## import trained model
logistic_model <- load(file = "model/logistic.rda",.GlobalEnv)
# predict(fit_boost, data)
smp_size <- floor(0.8 * nrow(heart_data))
## set the seed to make your partition reproducible
set.seed(123)
train_ind <- sample(seq_len(nrow(heart_data)), size = smp_size)
train <- heart_data[train_ind, ]
test <- heart_data[-train_ind, ]
glm.fit <- glm(Condition ~ Age + Sex + `Chest Pain Type` + `Max. Heart Rate`, data = train, family = binomial)
# heart_data$Condition <-as.factor(heart_data$Condition)
# heart_data$Sex<-as.factor(heart_data$Sex)
# heart_data$`Chest Pain Type`<-as.factor(heart_data$`Chest Pain Type`)
# summary(heart_data)
# set.seed(1)
# sample <- sample(c(TRUE,FALSE),nrow(heart_data),replace=TRUE,prob = c(0.7,0.3))
# train <- heart_data[sample,]
# test <- heart_data[!sample,]
#
# model <- glm(Condition~Age+Sex+`Chest Pain Type`+`Max. Heart Rate`, family = "binomial",data=train)
# options(scipen = 999)
# summary(model)
#
# new <- data.frame(Age = 67, Sex = as.factor(1) , `Chest Pain Type` = as.factor(0), `Max. Heart Rate`= 129)
# col_names <- c('Age','Sex','Chest Pain Type','Max. Heart Rate')
# colnames(new) <- col_names
# predict(model,new,type="response")
#
# predicted <- predict(model,test,type="response")
# predicted
# APP UI
# Design UI for app
ui <- navbarPage("Heart Attack Prediction",
tabPanel("Feature Analysis",
# App title
titlePanel(strong("Feature Analysis")),
# Captions for top of app, explains what is going on
h4(p("This page is to visualize our dataset, we display the categorical feature count plots and numerical feature density plots")),
h5(p("Here we show the relationship between each feature and the chance of suffering heart attack")),
br(),
sidebarLayout(
sidebarPanel(
width = 3,
fluidRow(selectInput(
inputId = "Categorical",
label = "Choose one categorical feature to display:",
choices = c(
"Sex"= 1,
"Chest Pain Type"= 2 ,
"Fasting Blood Sugar" = 3,
"Resting Electrocardiographic Results"= 4,
"Exercise Induced Angina" = 5,
"Number of Major Blood Vessels" =6,
"Thal Rate" = 7
),
selected = 1)
),
fluidRow(selectInput(
inputId = "Numerical",
label = "Choose one numerical feature to display:",
choices = c("Age" = 1,
"Resting Blood Pressure" =2,
"Cholesterol" =3,
"Max. Heart Rate"=4,
"Previous Peak" =5),
selected = 1
)
)
),
# Display the Plotly plot in the main panel
mainPanel(width =9,
tabsetPanel(
tabPanel("Feature Plots",
fluidRow(plotlyOutput("Categorical_plot",height="300px")),
fluidRow(plotlyOutput("Numerical_plot",height = "300px"))
),
tabPanel("Correlation Matrix",
fluidRow(plotlyOutput("Correlation_Matrix_plot",height = "600px")))
)
)
)
),
tabPanel("Predictions",
# App title
titlePanel(strong("Prediction")),
h4(p("This page is to predict the risk of having heart attack with the information provided")),
h5(p("Here we use a logistic regression model trained with the data from the dataset")),
br(),
sidebarLayout(sidebarPanel(
width = 3,
fluidRow(selectInput(
inputId = "sex",
label = "Choose the sex ",
choices = c(
"Male"= 1,
"Female"= 0),
selected = 1)
),
fluidRow(numericInput(
inputId = "age",
label = "Put in the age",
value = 20)
),
fluidRow(numericInput(
inputId = "mhr",
label = "Maximum heart rate",
value = 20)
),
fluidRow(selectInput(
inputId = "chpt",
label = "Choose the chest pain type",
choices = c("typical angina" = 0,
"atypical angina" = 1,
"non-anginal pain" = 2,
"asymptomatic" = 3),
selected = 1)
)),
mainPanel(width =9,
img(src = 'heart-attack-anatomy.jpg'),
h1(strong("The predicted possibility of heart attack risk for the data input is: "),
style = "font-size:21px;"
),
textOutput("predicted")
)
)
)
)
# ==============================================================================
# APP SERVER
# Create R code for app functions
server <- function(input, output) {
# Create reactive Plotly plot for app
library(caret)
output$Categorical_plot <- renderPlotly({
if(input$Categorical==1){
Target_plot=Sex_plot
}else if(input$Categorical==2){
Target_plot=Chest_plot
}else if(input$Categorical==3){
Target_plot=Sugar_plot
}else if(input$Categorical==4){
Target_plot=ECG_plot
}else if(input$Categorical==5){
Target_plot=Exercise_plot
}else if(input$Categorical==6){
Target_plot=Vessels_plot
}else if(input$Categorical==7){
Target_plot=Thal_plot
}
plotly_build(Target_plot)
})
output$Numerical_plot <- renderPlotly({
if(input$Numerical==1){
Num_plot=Age_plot
}else if(input$Numerical==2){
Num_plot=Pressure_plot
}else if(input$Numerical==3){
Num_plot=Cholesterol_plot
}else if(input$Numerical==4){
Num_plot=HeartRate_plot
}else if(input$Numerical==5){
Num_plot=Peak_plot
}
plotly_build(Num_plot)
})
output$Correlation_Matrix_plot <- renderPlotly({
plotly_build(Correlation_matrix_plot)
})
# new_features <- reactive({
# # this is how you fetch the input variables from ui component
# Var1 <- as.numeric(input$sex)
# Var2 <- as.numeric(input$age)
# Var3 <- as.numeric(input$mhr)
# Var4 <- as.numeric(input$chpt)
# new_features <- cbind(Var1, Var2, Var3, Var4)
# new_features <- as.data.frame(new_features)
# new_f_col_names <- c('Age', 'Sex', 'Chest Pain Type', 'Max. Heart Rate')
# colnames(new_features) <- new_f_col_names
# new_features
# # Model action button
# })
output$predicted = renderText({
# this is how you fetch the input variables from ui component
Var1 <- as.numeric(input$age)
Var2 <- as.numeric(input$sex)
Var3 <- as.numeric(input$chpt)
Var4 <- as.numeric(input$mhr)
coeffs = glm.fit$coefficients;
p = coeffs[1] + coeffs[2] * Var1 + coeffs[3] * Var2 + coeffs[4] * Var3 + coeffs[5] * Var4;
# Model action button
p = as.numeric(p);
predicted = exp(p)/(1+exp(p))
predicted
})
}
# ==============================================================================
# BUILD APP
# Knit UI and Server to create app
shinyApp(ui = ui, server = server) | /final_project_app/app.R | no_license | Chongyu1117/Heart-Attack-Prediction | R | false | false | 17,073 | r |
library("tidyverse")
library("patchwork")
library("plotly")
library("shiny")
library("rsconnect")
library("shinythemes")
library("markdown")
library("reshape2")
# IMPORT DATA
heart_data <- read_csv('data/heart.csv')
heart_data <- as.data.frame(heart_data)
#Renaming columns.
data_col_names <- c('Age', 'Sex', 'Chest Pain Type', 'Resting Blood Pressure', 'Cholesterol', 'Fasting Blood Sugar', 'Resting ECG', 'Max. Heart Rate',
'Exercise Induced Angina', 'Previous Peak', 'Slope', 'No. Major Blood Vessels', 'Thal Rate', 'Condition')
colnames(heart_data) <- data_col_names
# Select numerical and categorical data
Numerical <- heart_data %>% select('Age','Resting Blood Pressure','Cholesterol','Max. Heart Rate','Previous Peak')
Categorical <- heart_data %>% select('Sex','Chest Pain Type','Fasting Blood Sugar','Resting ECG','Exercise Induced Angina','Slope','No. Major Blood Vessels','Thal Rate')
# separate x and y of the dataset
Samples <- heart_data %>% select(!Condition)
Labels <- heart_data %>% select(Condition)
#plot the correlation_matrix
Correlation_matrix <- cor(heart_data) %>% round(3)
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
upper_tri <- get_upper_tri(Correlation_matrix)
melted_cormat <- melt(upper_tri,na.rm = TRUE)
Correlation_matrix_plot <- ggplot(melted_cormat, aes(Var2, Var1, fill = value))+
geom_tile(color = "white")+
geom_text(aes(Var2, Var1, label = value), color = "black", size = 4) +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Correlation") +
theme_minimal()+ # minimal theme
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 8, hjust = 1))+
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.6, 0.7),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 7, barheight = 1,
title.position = "top", title.hjust = 0.5))
#From the above correlation matrix, we can see that the correlation between features is less.
#Chest Pain Type with Condition and Max. Heart Rate with Condition have high correlated features in our dataset; Correlation Coefficient of 0.43 and 0.42 respectively.
#Our features have a lot of negative correlation coefficient indicating that two individual variables have a statistical relationship such that generally move in opposite directions from one another.
##==============================================================================
# create categorical plots with condition
heart_data_copy <- data.frame(heart_data)
colnames(heart_data_copy) <- data_col_names
heart_data_copy$Slope <- as.factor(heart_data_copy$Slope)
heart_data_copy$`No. Major Blood Vessels`<- as.factor(heart_data_copy$`No. Major Blood Vessels`)
heart_data_copy$`Thal Rate`<- as.factor(heart_data_copy$`Thal Rate`)
heart_data_copy$Condition <-factor(heart_data_copy$Condition,
levels = c(0,1),
labels = c("less chance of heart attack","more chance of heart attack"))
heart_data_copy$Sex <- factor(heart_data_copy$Sex,
levels = c(0,1),
labels = c("female","male"))
heart_data_copy$`Chest Pain Type` <- factor(heart_data_copy$`Chest Pain Type`,
levels =c(0,1,2,3),
labels = c("typical angina","atypical angina","non-anginal pain","asymptomatic"))
heart_data_copy$`Fasting Blood Sugar`<-factor(heart_data_copy$`Fasting Blood Sugar`,
levels = c(0,1),
labels = c("false","true"))
heart_data_copy$`Resting ECG`<- factor(heart_data_copy$`Resting ECG`,
levels = c(0,1,2),
labels = c("normal","having ST-T wave abnormality","showing probable or definite left ventricular hypertrophy"))
heart_data_copy$`Exercise Induced Angina`<-factor(heart_data_copy$`Exercise Induced Angina`,
levels = c(0,1),
labels = c("no","yes"))
Sex_plot <- ggplot(heart_data_copy,aes(x=Sex,fill=Condition))+
geom_bar(position = "dodge")
Chest_plot <- ggplot(heart_data_copy,aes(x=`Chest Pain Type`,fill=Condition))+
geom_bar(position = "dodge")+
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 8, hjust = 1))
Sugar_plot <- ggplot(heart_data_copy,aes(x=`Fasting Blood Sugar`,fill=Condition))+
geom_bar(position = "dodge")
ECG_plot <- ggplot(heart_data_copy,aes(x=`Resting ECG`,fill=Condition))+
geom_bar(position = "dodge")+
theme(axis.text.x = element_text(angle = 30, vjust = 1,
size = 8, hjust = 1))
Exercise_plot <- ggplot(heart_data_copy,aes(x=`Exercise Induced Angina`,fill=Condition))+
geom_bar(position = "dodge")
Slope_plot <- ggplot(heart_data_copy,aes(x=Slope,fill=Condition))+
geom_bar(position = "dodge")
Vessels_plot <- ggplot(heart_data_copy,aes(x=`No. Major Blood Vessels`,fill=Condition))+
geom_bar(position = "dodge")
Thal_plot <- ggplot(heart_data_copy,aes(x=`Thal Rate`,fill=Condition))+
geom_bar(position = "dodge")
# create numerical plot with condition
heart_data_copy$Age <- as.numeric(heart_data_copy$Age)
heart_data_copy$`Resting Blood Pressure` <- as.numeric(heart_data_copy$`Resting Blood Pressure`)
heart_data_copy$Cholesterol <- as.numeric(heart_data$Cholesterol)
heart_data_copy$`Max. Heart Rate` <- as.numeric(heart_data_copy$`Max. Heart Rate`)
heart_data_copy$`Previous Peak` <- as.numeric(heart_data_copy$`Previous Peak`)
Age_plot <- ggplot(heart_data_copy,aes(x=Age,fill=Condition))+
geom_density(alpha=0.3)
Pressure_plot <- ggplot(heart_data_copy,aes(x=`Resting Blood Pressure`,fill=Condition))+
geom_density(alpha=0.3)
Cholesterol_plot <- ggplot(heart_data_copy,aes(x=Cholesterol,fill=Condition))+
geom_density(alpha=0.3)
HeartRate_plot <- ggplot(heart_data_copy,aes(x=`Max. Heart Rate`,fill=Condition))+
geom_density(alpha=0.3)
Peak_plot<-ggplot(heart_data_copy,aes(x=`Previous Peak`,fill=Condition))+
geom_density(alpha=0.3)
## import trained model
logistic_model <- load(file = "model/logistic.rda",.GlobalEnv)
# predict(fit_boost, data)
smp_size <- floor(0.8 * nrow(heart_data))
## set the seed to make your partition reproducible
set.seed(123)
train_ind <- sample(seq_len(nrow(heart_data)), size = smp_size)
train <- heart_data[train_ind, ]
test <- heart_data[-train_ind, ]
glm.fit <- glm(Condition ~ Age + Sex + `Chest Pain Type` + `Max. Heart Rate`, data = train, family = binomial)
# heart_data$Condition <-as.factor(heart_data$Condition)
# heart_data$Sex<-as.factor(heart_data$Sex)
# heart_data$`Chest Pain Type`<-as.factor(heart_data$`Chest Pain Type`)
# summary(heart_data)
# set.seed(1)
# sample <- sample(c(TRUE,FALSE),nrow(heart_data),replace=TRUE,prob = c(0.7,0.3))
# train <- heart_data[sample,]
# test <- heart_data[!sample,]
#
# model <- glm(Condition~Age+Sex+`Chest Pain Type`+`Max. Heart Rate`, family = "binomial",data=train)
# options(scipen = 999)
# summary(model)
#
# new <- data.frame(Age = 67, Sex = as.factor(1) , `Chest Pain Type` = as.factor(0), `Max. Heart Rate`= 129)
# col_names <- c('Age','Sex','Chest Pain Type','Max. Heart Rate')
# colnames(new) <- col_names
# predict(model,new,type="response")
#
# predicted <- predict(model,test,type="response")
# predicted
# APP UI
# Design UI for app
ui <- navbarPage("Heart Attack Prediction",
tabPanel("Feature Analysis",
# App title
titlePanel(strong("Feature Analysis")),
# Captions for top of app, explains what is going on
h4(p("This page is to visualize our dataset, we display the categorical feature count plots and numerical feature density plots")),
h5(p("Here we show the relationship between each feature and the chance of suffering heart attack")),
br(),
sidebarLayout(
sidebarPanel(
width = 3,
fluidRow(selectInput(
inputId = "Categorical",
label = "Choose one categorical feature to display:",
choices = c(
"Sex"= 1,
"Chest Pain Type"= 2 ,
"Fasting Blood Sugar" = 3,
"Resting Electrocardiographic Results"= 4,
"Exercise Induced Angina" = 5,
"Number of Major Blood Vessels" =6,
"Thal Rate" = 7
),
selected = 1)
),
fluidRow(selectInput(
inputId = "Numerical",
label = "Choose one numerical feature to display:",
choices = c("Age" = 1,
"Resting Blood Pressure" =2,
"Cholesterol" =3,
"Max. Heart Rate"=4,
"Previous Peak" =5),
selected = 1
)
)
),
# Display the Plotly plot in the main panel
mainPanel(width =9,
tabsetPanel(
tabPanel("Feature Plots",
fluidRow(plotlyOutput("Categorical_plot",height="300px")),
fluidRow(plotlyOutput("Numerical_plot",height = "300px"))
),
tabPanel("Correlation Matrix",
fluidRow(plotlyOutput("Correlation_Matrix_plot",height = "600px")))
)
)
)
),
tabPanel("Predictions",
# App title
titlePanel(strong("Prediction")),
h4(p("This page is to predict the risk of having heart attack with the information provided")),
h5(p("Here we use a logistic regression model trained with the data from the dataset")),
br(),
sidebarLayout(sidebarPanel(
width = 3,
fluidRow(selectInput(
inputId = "sex",
label = "Choose the sex ",
choices = c(
"Male"= 1,
"Female"= 0),
selected = 1)
),
fluidRow(numericInput(
inputId = "age",
label = "Put in the age",
value = 20)
),
fluidRow(numericInput(
inputId = "mhr",
label = "Maximum heart rate",
value = 20)
),
fluidRow(selectInput(
inputId = "chpt",
label = "Choose the chest pain type",
choices = c("typical angina" = 0,
"atypical angina" = 1,
"non-anginal pain" = 2,
"asymptomatic" = 3),
selected = 1)
)),
mainPanel(width =9,
img(src = 'heart-attack-anatomy.jpg'),
h1(strong("The predicted possibility of heart attack risk for the data input is: "),
style = "font-size:21px;"
),
textOutput("predicted")
)
)
)
)
# ==============================================================================
# APP SERVER
# Create R code for app functions
server <- function(input, output) {
# Create reactive Plotly plot for app
library(caret)
output$Categorical_plot <- renderPlotly({
if(input$Categorical==1){
Target_plot=Sex_plot
}else if(input$Categorical==2){
Target_plot=Chest_plot
}else if(input$Categorical==3){
Target_plot=Sugar_plot
}else if(input$Categorical==4){
Target_plot=ECG_plot
}else if(input$Categorical==5){
Target_plot=Exercise_plot
}else if(input$Categorical==6){
Target_plot=Vessels_plot
}else if(input$Categorical==7){
Target_plot=Thal_plot
}
plotly_build(Target_plot)
})
output$Numerical_plot <- renderPlotly({
if(input$Numerical==1){
Num_plot=Age_plot
}else if(input$Numerical==2){
Num_plot=Pressure_plot
}else if(input$Numerical==3){
Num_plot=Cholesterol_plot
}else if(input$Numerical==4){
Num_plot=HeartRate_plot
}else if(input$Numerical==5){
Num_plot=Peak_plot
}
plotly_build(Num_plot)
})
output$Correlation_Matrix_plot <- renderPlotly({
plotly_build(Correlation_matrix_plot)
})
# new_features <- reactive({
# # this is how you fetch the input variables from ui component
# Var1 <- as.numeric(input$sex)
# Var2 <- as.numeric(input$age)
# Var3 <- as.numeric(input$mhr)
# Var4 <- as.numeric(input$chpt)
# new_features <- cbind(Var1, Var2, Var3, Var4)
# new_features <- as.data.frame(new_features)
# new_f_col_names <- c('Age', 'Sex', 'Chest Pain Type', 'Max. Heart Rate')
# colnames(new_features) <- new_f_col_names
# new_features
# # Model action button
# })
output$predicted = renderText({
# this is how you fetch the input variables from ui component
Var1 <- as.numeric(input$age)
Var2 <- as.numeric(input$sex)
Var3 <- as.numeric(input$chpt)
Var4 <- as.numeric(input$mhr)
coeffs = glm.fit$coefficients;
p = coeffs[1] + coeffs[2] * Var1 + coeffs[3] * Var2 + coeffs[4] * Var3 + coeffs[5] * Var4;
# Model action button
p = as.numeric(p);
predicted = exp(p)/(1+exp(p))
predicted
})
}
# ==============================================================================
# BUILD APP
# Knit UI and Server to create app
shinyApp(ui = ui, server = server) |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{dir2dfList}
\alias{dir2dfList}
\title{Turn a directory of flat files into a list of data.frames}
\usage{
dir2dfList(dfdir, ext = ".txt", exclude = NULL, ...)
}
\arguments{
\item{dfdir}{character string of the directory where you want to load flat files}
\item{ext}{file extention on the type of files to load. Usually \code{.csv} or \code{.txt}}
\item{exclude}{character string of table names to be excluded from app. Needs to be specified to \code{NULL} or a character
vector or else \code{...} arguments will not be handled properly.}
\item{...}{parameters to pass to \code{\link{read.delim}}. Commonly \code{nrow}, \code{sep},}
}
\value{
list of data.frames
}
\description{
Useful to prepare data for \code{\link{tableNet}}
}
\examples{
\dontrun{
## download some baseball data. NOTE This will download 30MB of data (25 csv files) into a temporary directory
temp <- tempfile()
localDataDir <- paste0(tempdir(), '\\\\lahman2012-csv-onYourComp.zip')
download.file('http://seanlahman.com/files/database/lahman2012-csv.zip', localDataDir)
unzip(localDataDir, exdir=paste0(tempdir(), '\\\\lahman2012-csv-onYourComp')) ## may not be necessary
## create a list of data.frames from .CSVs
dfL <- dir2dfList(paste0(tempdir(), '\\\\lahman2012-csv-onYourComp'), ext='.csv', exclude=NULL, sep=',', stringsAsFactors=F)
}
}
\seealso{
\code{\link{tableNet}} \code{\link{isKey}}
}
| /man/dir2dfList.Rd | permissive | Sunil-Pai-G/Rsenal | R | false | false | 1,434 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{dir2dfList}
\alias{dir2dfList}
\title{Turn a directory of flat files into a list of data.frames}
\usage{
dir2dfList(dfdir, ext = ".txt", exclude = NULL, ...)
}
\arguments{
\item{dfdir}{character string of the directory where you want to load flat files}
\item{ext}{file extention on the type of files to load. Usually \code{.csv} or \code{.txt}}
\item{exclude}{character string of table names to be excluded from app. Needs to be specified to \code{NULL} or a character
vector or else \code{...} arguments will not be handled properly.}
\item{...}{parameters to pass to \code{\link{read.delim}}. Commonly \code{nrow}, \code{sep},}
}
\value{
list of data.frames
}
\description{
Useful to prepare data for \code{\link{tableNet}}
}
\examples{
\dontrun{
## download some baseball data. NOTE This will download 30MB of data (25 csv files) into a temporary directory
temp <- tempfile()
localDataDir <- paste0(tempdir(), '\\\\lahman2012-csv-onYourComp.zip')
download.file('http://seanlahman.com/files/database/lahman2012-csv.zip', localDataDir)
unzip(localDataDir, exdir=paste0(tempdir(), '\\\\lahman2012-csv-onYourComp')) ## may not be necessary
## create a list of data.frames from .CSVs
dfL <- dir2dfList(paste0(tempdir(), '\\\\lahman2012-csv-onYourComp'), ext='.csv', exclude=NULL, sep=',', stringsAsFactors=F)
}
}
\seealso{
\code{\link{tableNet}} \code{\link{isKey}}
}
|
#' @title Make Template Objects
#' @description Return a `tibble` containing the common set of columns and
#' column types.
#' The following template objects are available:
#' \itemize{
#' \item \code{data_template}
#' \item \code{variable_template}
#' \item \code{indicator_template}
#' }
#' @return a `tibble`
#' @rdname templates
#' @export
make_data_template <- function(){
data_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
VARIABLE = NA_character_,
VARIABLE_SUBTOTAL = NA_character_,
VARIABLE_SUBTOTAL_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(data_template)
}
#' @rdname templates
#' @export
make_metadata_template <- function(){
metadata_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_
) %>% dplyr::slice(0)
return(metadata_template)
}
#' @rdname templates
#' @export
make_variable_template <- function(){
variable_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
VARIABLE_SUBTOTAL = NA_character_,
VARIABLE_SUBTOTAL_DESC = NA_character_,
VARIABLE_ROLE = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(variable_template)
}
#' @rdname templates
#' @export
make_indicator_template <- function(){
indicator_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(indicator_template)
}
#' @rdname templates
#' @export
make_indicator_dimension_template <- function(){
indicator_dimension_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
DIMENSION = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(indicator_dimension_template)
}
#' @rdname templates
#' @export
make_indicator_type_template <- function(){
indicator_dimension_template %>%
indicator_type_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
DIMENSION = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_,
INDICATOR_TYPE = NA_character_,
INDICATOR_TYPE_THRESHOLD = NA_character_,
INDICATOR_TYPE_THRESHOLD_VALUE = NA_real_,
INDICATOR_TYPE_DESC = NA_character_,
INDICATOR_TYPE_VALUE = NA_real_,
INDICATOR_TYPE_VALUE_DESC = NA_character_,
INDICATOR_TYPE_MODEL = NA_character_
) %>% dplyr::slice(0)
return(indicator_type_template)
}
#' @rdname templates
#' @export
make_indicator_value_template <- function(){
indicator_value_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
DIMENSION = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
ESTIMATE_BEGIN = NA_real_,
ESTIMATE_END = NA_real_,
MOE = NA_real_,
MOE_BEGIN = NA_real_,
MOE_END = NA_real_,
DIFFERENCE = NA_real_ ,
DIFFERENCE_MOE = NA_real_ ,
RELATIVE = NA_real_,
RELATIVE_DESC = NA_character_,
RELATIVE_THRESHOLD = NA_real_,
RELATIVE_LGL = NA,
RELATIVE_BEGIN = NA_real_,
RELATIVE_DESC_BEGIN = NA_character_,
RELATIVE_THRESHOLD_BEGIN = NA_real_,
RELATIVE_LGL_BEGIN = NA,
RELATIVE_END = NA_real_,
RELATIVE_DESC_END = NA_character_,
RELATIVE_THRESHOLD_END = NA_real_,
RELATIVE_LGL_END = NA,
CHANGE = NA_real_,
CHANGE_MOE = NA_real_,
CHANGE_DESC = NA_character_,
CHANGE_THRESHOLD = NA_real_,
CHANGE_LGL = NA,
RELATIVE_CHANGE_DESC = NA_character_,
RELATIVE_CHANGE_LGL = NA,
PROXIMITY_DESC = NA_character_
) %>% dplyr::slice(0)
return(indicator_value_template)
}
| /R/templates.R | permissive | tiernanmartin/NeighborhoodChangeTypology | R | false | false | 9,834 | r | #' @title Make Template Objects
#' @description Return a `tibble` containing the common set of columns and
#' column types.
#' The following template objects are available:
#' \itemize{
#' \item \code{data_template}
#' \item \code{variable_template}
#' \item \code{indicator_template}
#' }
#' @return a `tibble`
#' @rdname templates
#' @export
make_data_template <- function(){
data_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
VARIABLE = NA_character_,
VARIABLE_SUBTOTAL = NA_character_,
VARIABLE_SUBTOTAL_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(data_template)
}
#' @rdname templates
#' @export
make_metadata_template <- function(){
metadata_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_
) %>% dplyr::slice(0)
return(metadata_template)
}
#' @rdname templates
#' @export
make_variable_template <- function(){
variable_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
VARIABLE_SUBTOTAL = NA_character_,
VARIABLE_SUBTOTAL_DESC = NA_character_,
VARIABLE_ROLE = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(variable_template)
}
#' @rdname templates
#' @export
make_indicator_template <- function(){
indicator_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(indicator_template)
}
#' @rdname templates
#' @export
make_indicator_dimension_template <- function(){
indicator_dimension_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
DIMENSION = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_
) %>% dplyr::slice(0)
return(indicator_dimension_template)
}
#' @rdname templates
#' @export
make_indicator_type_template <- function(){
indicator_dimension_template %>%
indicator_type_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
DIMENSION = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
MOE = NA_real_,
INDICATOR_TYPE = NA_character_,
INDICATOR_TYPE_THRESHOLD = NA_character_,
INDICATOR_TYPE_THRESHOLD_VALUE = NA_real_,
INDICATOR_TYPE_DESC = NA_character_,
INDICATOR_TYPE_VALUE = NA_real_,
INDICATOR_TYPE_VALUE_DESC = NA_character_,
INDICATOR_TYPE_MODEL = NA_character_
) %>% dplyr::slice(0)
return(indicator_type_template)
}
#' @rdname templates
#' @export
make_indicator_value_template <- function(){
indicator_value_template <- tibble::tibble(SOURCE = NA_character_,
GEOGRAPHY_ID = NA_character_,
GEOGRAPHY_ID_TYPE = NA_character_,
GEOGRAPHY_NAME = NA_character_,
GEOGRAPHY_TYPE = NA_character_,
DATE_GROUP_ID = NA_character_,
DATE_BEGIN = NA_character_,
DATE_END = NA_character_,
DATE_RANGE = NA_character_,
DATE_RANGE_TYPE = NA_character_,
DIMENSION = NA_character_,
INDICATOR = NA_character_,
VARIABLE = NA_character_,
VARIABLE_DESC = NA_character_,
MEASURE_TYPE = NA_character_,
ESTIMATE = NA_real_,
ESTIMATE_BEGIN = NA_real_,
ESTIMATE_END = NA_real_,
MOE = NA_real_,
MOE_BEGIN = NA_real_,
MOE_END = NA_real_,
DIFFERENCE = NA_real_ ,
DIFFERENCE_MOE = NA_real_ ,
RELATIVE = NA_real_,
RELATIVE_DESC = NA_character_,
RELATIVE_THRESHOLD = NA_real_,
RELATIVE_LGL = NA,
RELATIVE_BEGIN = NA_real_,
RELATIVE_DESC_BEGIN = NA_character_,
RELATIVE_THRESHOLD_BEGIN = NA_real_,
RELATIVE_LGL_BEGIN = NA,
RELATIVE_END = NA_real_,
RELATIVE_DESC_END = NA_character_,
RELATIVE_THRESHOLD_END = NA_real_,
RELATIVE_LGL_END = NA,
CHANGE = NA_real_,
CHANGE_MOE = NA_real_,
CHANGE_DESC = NA_character_,
CHANGE_THRESHOLD = NA_real_,
CHANGE_LGL = NA,
RELATIVE_CHANGE_DESC = NA_character_,
RELATIVE_CHANGE_LGL = NA,
PROXIMITY_DESC = NA_character_
) %>% dplyr::slice(0)
return(indicator_value_template)
}
|
library(dplyr)
X_train <- read.table("C:/Users/CA/Downloads/HAR/1/train/X_train.txt", quote="", comment.char="")
Y_train <- read.table("C:/Users/CA/Downloads/HAR/1/train/y_train.txt", quote="", comment.char="")
X_test <- read.table("C:/Users/CA/Downloads/HAR/1/test/X_test.txt", quote="", comment.char="")
Y_test <- read.table("C:/Users/CA/Downloads/HAR/1/test/y_test.txt", quote="", comment.char="")
Features <- read.table("C:/Users/CA/Downloads/HAR/1/features.txt", quote="", comment.char="")
activity_labels <- read.table("C:/Users/CA/Downloads/HAR/1/activity_labels.txt", quote="", comment.char="")
subject_train <- read.table("C:/Users/CA/Downloads/HAR/1/train/subject_train.txt", quote="", comment.char="")
subject_test <- read.table("C:/Users/CA/Downloads/HAR/1/test/subject_test.txt", quote="", comment.char="")
Activity <- rbind (Y_train, Y_test)
names(Activity) <- "Activity"
View(Activity)
Subject <- rbind(subject_train,subject_test)
names(Subject) <- "Subject"
View(Subject)
Data <- rbind(X_train,X_test)
names(Data) <- Features[,"V2"]
View(Data)
Data1 <- cbind.data.frame(Data$`tBodyAcc-mean()-X`, Data$`tBodyAcc-mean()-Y`, Data$`tBodyAcc-std()-X`, Data$`tBodyAcc-std()-Y`, Data$`tGravityAcc-mean()-X`, Data$`tGravityAcc-mean()-Y`, Data$`tGravityAcc-std()-X`, Data$`tGravityAcc-std()-Y`)
View(Data1)
names(activity_labels$V1) <- "Activity_Code"
names(Activity) <- "Activity_Code"
names(Subject) <- "Subject_Code"
names(Data1)[1] <- "tBodyAcc-mean()-X"
names(Data1)[2] <- "tBodyAcc-means()-Y"
names(Data1)[3] <- "tBodyAcc-std()-X"
names(Data1)[4] <- "tBodyAcc-std()-Y"
names(Data1)[5] <- "tGravityAcc-mean()-X"
names(Data1)[6] <- "tGravityAcc-mean()-Y"
names(Data1)[7] <- "tGravityAcc-std()-X"
names(Data1)[8] <- "tGravityAcc-std()-Y"
Data2 <- cbind(Subject, Activity, Data1)
View(Data2)
df <- tibble(Data2)
View(df)
df1 <- group_by(df,Subject_Code,Activity_Code)
final_data <- summarise_all(df1,mean)
write.table(final_data,file="tidy_data_set.txt",row.name=FALSE)
View(final_data)
library(writexl)
write_xlsx(final_data,"C:\Users\CA\Documents\Final_Data.xlsx")
| /run_analysis.R | no_license | capelian4/ResumeGithubRPlotting | R | false | false | 2,121 | r | library(dplyr)
X_train <- read.table("C:/Users/CA/Downloads/HAR/1/train/X_train.txt", quote="", comment.char="")
Y_train <- read.table("C:/Users/CA/Downloads/HAR/1/train/y_train.txt", quote="", comment.char="")
X_test <- read.table("C:/Users/CA/Downloads/HAR/1/test/X_test.txt", quote="", comment.char="")
Y_test <- read.table("C:/Users/CA/Downloads/HAR/1/test/y_test.txt", quote="", comment.char="")
Features <- read.table("C:/Users/CA/Downloads/HAR/1/features.txt", quote="", comment.char="")
activity_labels <- read.table("C:/Users/CA/Downloads/HAR/1/activity_labels.txt", quote="", comment.char="")
subject_train <- read.table("C:/Users/CA/Downloads/HAR/1/train/subject_train.txt", quote="", comment.char="")
subject_test <- read.table("C:/Users/CA/Downloads/HAR/1/test/subject_test.txt", quote="", comment.char="")
Activity <- rbind (Y_train, Y_test)
names(Activity) <- "Activity"
View(Activity)
Subject <- rbind(subject_train,subject_test)
names(Subject) <- "Subject"
View(Subject)
Data <- rbind(X_train,X_test)
names(Data) <- Features[,"V2"]
View(Data)
Data1 <- cbind.data.frame(Data$`tBodyAcc-mean()-X`, Data$`tBodyAcc-mean()-Y`, Data$`tBodyAcc-std()-X`, Data$`tBodyAcc-std()-Y`, Data$`tGravityAcc-mean()-X`, Data$`tGravityAcc-mean()-Y`, Data$`tGravityAcc-std()-X`, Data$`tGravityAcc-std()-Y`)
View(Data1)
names(activity_labels$V1) <- "Activity_Code"
names(Activity) <- "Activity_Code"
names(Subject) <- "Subject_Code"
names(Data1)[1] <- "tBodyAcc-mean()-X"
names(Data1)[2] <- "tBodyAcc-means()-Y"
names(Data1)[3] <- "tBodyAcc-std()-X"
names(Data1)[4] <- "tBodyAcc-std()-Y"
names(Data1)[5] <- "tGravityAcc-mean()-X"
names(Data1)[6] <- "tGravityAcc-mean()-Y"
names(Data1)[7] <- "tGravityAcc-std()-X"
names(Data1)[8] <- "tGravityAcc-std()-Y"
Data2 <- cbind(Subject, Activity, Data1)
View(Data2)
df <- tibble(Data2)
View(df)
df1 <- group_by(df,Subject_Code,Activity_Code)
final_data <- summarise_all(df1,mean)
write.table(final_data,file="tidy_data_set.txt",row.name=FALSE)
View(final_data)
library(writexl)
write_xlsx(final_data,"C:\Users\CA\Documents\Final_Data.xlsx")
|
## The RAINLINK package. Retrieval algorithm for rainfall mapping from microwave links
## in a cellular communication network.
##
## Version 1.11
## Copyright (C) 2017 Aart Overeem
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#' Function to apply filter to remove outliers in path-averaged microwave link attenuations.
#' @description Function to apply filter to remove outliers in link-based rainfall estimates.
#' Malfunctioning link antennas can cause outliers in rainfall retrievals (especially for
#' daily accumulations). These outliers can be removed by using a filter that is based on the
#' assumption that rainfall is correlated in space. The filter discards a time interval of a
#' link for which the cumulative difference between its specific attenuation and that of the
#' surrounding links over the previous 24 h (including the present time interval), F, becomes
#' lower than a threshold value in dB h km\eqn{^{-1}}.
#'
#' Works for a sampling strategy where minimum and maximum received signal powers
#' are provided, and the transmitted power levels are constant.
#'
#' The outlier filter has been extensively tested on minimum received signal powers, i.e.
#' for a sampling strategy where minimum and maximum received signal powers
#' are provided, and the transmitted power levels are constant.
#' This function can also be applied in case of other sampling strategies, because
#' it does not explicitly require minimum and maximum received signal powers.
#' It just applies the selection on all rows in a data frame.
#' Whether the outlier filter will give good results when applied to link data
#' obtained from other sampling strategies would need to be tested.
#' Hence, ''MinMaxRSL'' is kept in this function name to stress that it
#' has been tested for a sampling strategy where minimum and maximum received
#' powers are provided.
#' Update: Now also works for a sampling strategy where instantaneous transmitted and received signal levels are obtained.
#' In case of instantaneous signal levels, it does not matter whether transmitted power levels vary or are constant.
#' The only requirement is that the input data for RAINLINK needs some preprocessing. See ''ManualRAINLINK.pdf''
#' for instructions.
#'
#' Can only be applied when function WetDryNearbyLinkApMinMaxRSL has been executed.
#'
#' @param Data Data frame with microwave link data.
#' @param F Values for filter to remove outliers (dB km\eqn{^{-1}} h).
#' @param FilterThreshold Outlier filter threshold (dB h km\eqn{^{-1}}).
#' @return Data frame with microwave link data.
#' @export OutlierFilterMinMaxRSL
#' @examples
#' OutlierFilterMinMaxRSL(Data=DataPreprocessed,F=WetDry$F,FilterThreshold=-32.5)
#' @author Aart Overeem & Hidde Leijnse
#' @references ''ManualRAINLINK.pdf''
#'
#' Overeem, A., Leijnse, H., and Uijlenhoet, R., 2016: Retrieval algorithm for rainfall mapping from microwave links in a
#' cellular communication network, Atmospheric Measurement Techniques, 9, 2425-2444, https://doi.org/10.5194/amt-9-2425-2016.
OutlierFilterMinMaxRSL <- function(Data,F,FilterThreshold=-32.5)
{
# Set Pmin variable to NA when F exceeds the threshold
Data$Pmin[F <= FilterThreshold] <- NA
# Return the modified data frame
return(Data)
}
| /R/OutlierFilterMinMaxRSL.R | no_license | cvelascof/RAINLINK | R | false | false | 3,858 | r | ## The RAINLINK package. Retrieval algorithm for rainfall mapping from microwave links
## in a cellular communication network.
##
## Version 1.11
## Copyright (C) 2017 Aart Overeem
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#' Function to apply filter to remove outliers in path-averaged microwave link attenuations.
#' @description Function to apply filter to remove outliers in link-based rainfall estimates.
#' Malfunctioning link antennas can cause outliers in rainfall retrievals (especially for
#' daily accumulations). These outliers can be removed by using a filter that is based on the
#' assumption that rainfall is correlated in space. The filter discards a time interval of a
#' link for which the cumulative difference between its specific attenuation and that of the
#' surrounding links over the previous 24 h (including the present time interval), F, becomes
#' lower than a threshold value in dB h km\eqn{^{-1}}.
#'
#' Works for a sampling strategy where minimum and maximum received signal powers
#' are provided, and the transmitted power levels are constant.
#'
#' The outlier filter has been extensively tested on minimum received signal powers, i.e.
#' for a sampling strategy where minimum and maximum received signal powers
#' are provided, and the transmitted power levels are constant.
#' This function can also be applied in case of other sampling strategies, because
#' it does not explicitly require minimum and maximum received signal powers.
#' It just applies the selection on all rows in a data frame.
#' Whether the outlier filter will give good results when applied to link data
#' obtained from other sampling strategies would need to be tested.
#' Hence, ''MinMaxRSL'' is kept in this function name to stress that it
#' has been tested for a sampling strategy where minimum and maximum received
#' powers are provided.
#' Update: Now also works for a sampling strategy where instantaneous transmitted and received signal levels are obtained.
#' In case of instantaneous signal levels, it does not matter whether transmitted power levels vary or are constant.
#' The only requirement is that the input data for RAINLINK needs some preprocessing. See ''ManualRAINLINK.pdf''
#' for instructions.
#'
#' Can only be applied when function WetDryNearbyLinkApMinMaxRSL has been executed.
#'
#' @param Data Data frame with microwave link data.
#' @param F Values for filter to remove outliers (dB km\eqn{^{-1}} h).
#' @param FilterThreshold Outlier filter threshold (dB h km\eqn{^{-1}}).
#' @return Data frame with microwave link data.
#' @export OutlierFilterMinMaxRSL
#' @examples
#' OutlierFilterMinMaxRSL(Data=DataPreprocessed,F=WetDry$F,FilterThreshold=-32.5)
#' @author Aart Overeem & Hidde Leijnse
#' @references ''ManualRAINLINK.pdf''
#'
#' Overeem, A., Leijnse, H., and Uijlenhoet, R., 2016: Retrieval algorithm for rainfall mapping from microwave links in a
#' cellular communication network, Atmospheric Measurement Techniques, 9, 2425-2444, https://doi.org/10.5194/amt-9-2425-2016.
OutlierFilterMinMaxRSL <- function(Data,F,FilterThreshold=-32.5)
{
# Set Pmin variable to NA when F exceeds the threshold
Data$Pmin[F <= FilterThreshold] <- NA
# Return the modified data frame
return(Data)
}
|
testlist <- list(bytes1 = integer(0), pmutation = 1.38791248479841e-309)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) | /mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612801991-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 136 | r | testlist <- list(bytes1 = integer(0), pmutation = 1.38791248479841e-309)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result) |
testlist <- list(a = 9.98234632759903e-316, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) | /BayesMRA/inst/testfiles/rmvn_arma_scalar/AFL_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1615926095-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 117 | r | testlist <- list(a = 9.98234632759903e-316, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result) |
library(learnr);
bb<-quiz(
question("下面哪一项不是R语言的最基本数据类型?",
answer("字符character"),
answer("数值numeric"),
answer("日期Date", correct = TRUE),
answer("整数integer")
),
question("下面关于存货核算,错误的描述是",
answer("可以先生成凭证,再进行入库核算", correct = TRUE),
answer("先入库核算,再出库核算"),
answer("出库成本核算后先检查合法性报告"),
answer("次月入库,当月出库也可以进行成本成本核算", correct = TRUE)
)
);
bb;
?? answer;
question("What number is the letter A in the alphabet?",
answer("8"),
answer("14"),
answer("1", correct = TRUE),
answer("23"),
incorrect = "See [here](https://en.wikipedia.org/wiki/English_alphabet) and try again.",
allow_retry = TRUE
)
| /data-raw/02-test-quiz.R | permissive | takewiki/learnr | R | false | false | 939 | r | library(learnr);
bb<-quiz(
question("下面哪一项不是R语言的最基本数据类型?",
answer("字符character"),
answer("数值numeric"),
answer("日期Date", correct = TRUE),
answer("整数integer")
),
question("下面关于存货核算,错误的描述是",
answer("可以先生成凭证,再进行入库核算", correct = TRUE),
answer("先入库核算,再出库核算"),
answer("出库成本核算后先检查合法性报告"),
answer("次月入库,当月出库也可以进行成本成本核算", correct = TRUE)
)
);
bb;
?? answer;
question("What number is the letter A in the alphabet?",
answer("8"),
answer("14"),
answer("1", correct = TRUE),
answer("23"),
incorrect = "See [here](https://en.wikipedia.org/wiki/English_alphabet) and try again.",
allow_retry = TRUE
)
|
## Exploratory Data Analysis
powerData <- read.table("household_power_consumption.txt", na.strings=c("?",""), header=TRUE, sep=";")
str(powerData)
## make the first column class Date
powerData[,Date:= as.Date(Date,format="%d/%m/%Y")]
powerData$Date <- as.Date(powerData$Date, format="%d/%m/%Y")
str(powerData)
powerData$timetemp <- paste(powerData$Date, powerData$Time)
str(powerData)
powerData$Time <- strptime(powerData$timetemp, format = "%Y-%m-%d %H:%M:%S")
## subset
powerSub <- subset(powerData,Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
## Plot 4
png("Plot4.png", width=480, height=480)
par(mfrow = c(2,2))
hist(powerSub$Global_active_power, xlab = " Global Active Power (kilowatts)", col= "red", main = "Global Active Power")
plot(powerSub$Time,powerSub$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
plot(powerSub$Time,powerSub$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(powerSub$Time, powerSub$Sub_metering_2, col="red")
lines(powerSub$Time, powerSub$Sub_metering_3, col="blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),lwd=c(2.5,2.5))
plot(powerSub$Time, powerSub$Global_reactive_power, ylab="Global_reactive_power", xlab="datetime", type="l")
dev.off() | /plot4.R | no_license | carolynpearce/ExData_Plotting1 | R | false | false | 1,329 | r | ## Exploratory Data Analysis
powerData <- read.table("household_power_consumption.txt", na.strings=c("?",""), header=TRUE, sep=";")
str(powerData)
## make the first column class Date
powerData[,Date:= as.Date(Date,format="%d/%m/%Y")]
powerData$Date <- as.Date(powerData$Date, format="%d/%m/%Y")
str(powerData)
powerData$timetemp <- paste(powerData$Date, powerData$Time)
str(powerData)
powerData$Time <- strptime(powerData$timetemp, format = "%Y-%m-%d %H:%M:%S")
## subset
powerSub <- subset(powerData,Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
## Plot 4
png("Plot4.png", width=480, height=480)
par(mfrow = c(2,2))
hist(powerSub$Global_active_power, xlab = " Global Active Power (kilowatts)", col= "red", main = "Global Active Power")
plot(powerSub$Time,powerSub$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
plot(powerSub$Time,powerSub$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
lines(powerSub$Time, powerSub$Sub_metering_2, col="red")
lines(powerSub$Time, powerSub$Sub_metering_3, col="blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),lwd=c(2.5,2.5))
plot(powerSub$Time, powerSub$Global_reactive_power, ylab="Global_reactive_power", xlab="datetime", type="l")
dev.off() |
##################PDRs
###APP E
###FS
#Compute Server E received
traceserver_e_fs<-read.table(file = 'result/server_etf_car_fs_tt.txt', sep=' ')
names(traceserver_e_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e_fs$time <- as.POSIXlt(traceserver_e_fs$time, origin = "1987-10-05 11:00:00")
traceserver_e_fs$size<- traceserver_e_fs$size*8
sum1segserver_e_fs<-aggregate(list(size = traceserver_e_fs$size), list(segundos = cut(traceserver_e_fs$time, "1 sec")), sum)
mean1segserver_e_fs<-append(list(size = sum1segserver_e_fs$size), list(time = as.numeric(sum1segserver_e_fs$segundos)))
mean1segserver_e_fs$size[1:150]<- mean1segserver_e_fs$size[1:150]/7
mean1segserver_e_fs$size[151:225]<- mean1segserver_e_fs$size[151:225]/11
mean1segserver_e_fs$size[226:300]<- mean1segserver_e_fs$size[226:300]/15
pd_e_server<-traceserver_e_fs
pd_e_server$size<-pd_e_server$size/8/1498
sumpd75segserver_e_fs<-aggregate(list(size = pd_e_server$size), list(segundos = cut(pd_e_server$time, "75 sec")), sum)
meanpd75segserver_e_fs<-append(list(size = sumpd75segserver_e_fs$size), list(time = as.numeric(sumpd75segserver_e_fs$segundos)))
#Compute Car sent Server E
tracecar_e_fs<-read.table(file = 'result/cartf_fs_5003_tt.txt', sep=' ')
names(tracecar_e_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_e_fs$time <- as.POSIXlt(tracecar_e_fs$time, origin = "1987-10-05 11:00:00")
tracecar_e_fs$size<- tracecar_e_fs$size*8
sum1segcar_e_fs<-aggregate(list(size = tracecar_e_fs$size), list(segundos = cut(tracecar_e_fs$time, "1 sec")), sum)
mean1segcar_e_fs<-append(list(size = sum1segcar_e_fs$size), list(time = as.numeric(sum1segcar_e_fs$segundos)))
mean1segcar_e_fs$size[1:150]<- mean1segcar_e_fs$size[1:150]/7
mean1segcar_e_fs$size[151:225]<- mean1segcar_e_fs$size[151:225]/11
mean1segcar_e_fs$size[226:300]<- mean1segcar_e_fs$size[226:300]/15
pd_e_car<-tracecar_e_fs
pd_e_car$size<-pd_e_car$size/8/1498
sumpd75segcar_e_fs<-aggregate(list(size = pd_e_car$size), list(segundos = cut(pd_e_car$time, "75 sec")), sum)
meanpd75segcar_e_fs<-append(list(size = sumpd75segcar_e_fs$size), list(time = as.numeric(sumpd75segcar_e_fs$segundos)))
#Compute PDR Server E
pdr75seg_e_fs<-meanpd75segserver_e_fs$size/meanpd75segcar_e_fs$size
pdr1seg_e_fs<-mean1segserver_e_fs$size[1:300]/mean1segcar_e_fs$size[1:300]
require(Rmisc)
w_e_fs<-CI(pdr1seg_e_fs[1:75], ci=0.95)
x_e_fs<-CI(pdr1seg_e_fs[76:150], ci=0.95)
y_e_fs<-CI(pdr1seg_e_fs[151:225], ci=0.95)
z_e_fs<-CI(pdr1seg_e_fs[225:300], ci=0.95)
up_e_fs<-c(w_e_fs[1], x_e_fs[1], y_e_fs[1], z_e_fs[1])
lo_e_fs<-c(w_e_fs[3], x_e_fs[3], y_e_fs[3], z_e_fs[3])
###FQ
#Compute Server E received
traceserver_e_fq<-read.table(file = 'result/server_etf_car_fq_tt.txt', sep=' ')
names(traceserver_e_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e_fq$time <- as.POSIXlt(traceserver_e_fq$time, origin = "1987-10-05 11:00:00")
traceserver_e_fq$size<- traceserver_e_fq$size*8
sum1segserver_e_fq<-aggregate(list(size = traceserver_e_fq$size), list(segundos = cut(traceserver_e_fq$time, "1 sec")), sum)
mean1segserver_e_fq<-append(list(size = sum1segserver_e_fq$size), list(time = as.numeric(sum1segserver_e_fq$segundos)))
mean1segserver_e_fq$size[1:150]<- mean1segserver_e_fq$size[1:150]/7
mean1segserver_e_fq$size[151:225]<- mean1segserver_e_fq$size[151:225]/11
mean1segserver_e_fq$size[226:300]<- mean1segserver_e_fq$size[226:300]/15
pd_e_server<-traceserver_e_fq
pd_e_server$size<-pd_e_server$size/8/1498
sumpd75segserver_e_fq<-aggregate(list(size = pd_e_server$size), list(segundos = cut(pd_e_server$time, "75 sec")), sum)
meanpd75segserver_e_fq<-append(list(size = sumpd75segserver_e_fq$size), list(time = as.numeric(sumpd75segserver_e_fq$segundos)))
#Compute Car sent Server E
tracecar_e_fq<-read.table(file = 'result/cartf_fq_5003_tt.txt', sep=' ')
names(tracecar_e_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_e_fq$time <- as.POSIXlt(tracecar_e_fq$time, origin = "1987-10-05 11:00:00")
tracecar_e_fq$size<- tracecar_e_fq$size*8
sum1segcar_e_fq<-aggregate(list(size = tracecar_e_fq$size), list(segundos = cut(tracecar_e_fq$time, "1 sec")), sum)
mean1segcar_e_fq<-append(list(size = sum1segcar_e_fq$size), list(time = as.numeric(sum1segcar_e_fq$segundos)))
mean1segcar_e_fq$size[1:150]<- mean1segcar_e_fq$size[1:150]/7
mean1segcar_e_fq$size[151:225]<- mean1segcar_e_fq$size[151:225]/11
mean1segcar_e_fq$size[226:300]<- mean1segcar_e_fq$size[226:300]/15
pd_e_car<-tracecar_e_fq
pd_e_car$size<-pd_e_car$size/8/1498
sumpd75segcar_e_fq<-aggregate(list(size = pd_e_car$size), list(segundos = cut(pd_e_car$time, "75 sec")), sum)
meanpd75segcar_e_fq<-append(list(size = sumpd75segcar_e_fq$size), list(time = as.numeric(sumpd75segcar_e_fq$segundos)))
#Compute PDR Server E
pdr75seg_e_fq<-meanpd75segserver_e_fq$size/meanpd75segcar_e_fq$size
pdr1seg_e_fq<-mean1segserver_e_fq$size[1:300]/mean1segcar_e_fq$size[1:300]
require(Rmisc)
w_e_fq<-CI(pdr1seg_e_fq[1:75], ci=0.95)
x_e_fq<-CI(pdr1seg_e_fq[76:150], ci=0.95)
y_e_fq<-CI(pdr1seg_e_fq[151:225], ci=0.95)
z_e_fq<-CI(pdr1seg_e_fq[225:300], ci=0.95)
up_e_fq<-c(w_e_fq[1], x_e_fq[1], y_e_fq[1], z_e_fq[1])
lo_e_fq<-c(w_e_fq[3], x_e_fq[3], y_e_fq[3], z_e_fq[3])
####FN
#Compute Server E received
traceserver_e_fn<-read.table(file = 'result/server_etf_car_fn_tt.txt', sep=' ')
names(traceserver_e_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e_fn$time <- as.POSIXlt(traceserver_e_fn$time, origin = "1987-10-05 11:00:00")
traceserver_e_fn$size<- traceserver_e_fn$size*8
sum1segserver_e_fn<-aggregate(list(size = traceserver_e_fn$size), list(segundos = cut(traceserver_e_fn$time, "1 sec")), sum)
mean1segserver_e_fn<-append(list(size = sum1segserver_e_fn$size), list(time = as.numeric(sum1segserver_e_fn$segundos)))
mean1segserver_e_fn$size[1:150]<- mean1segserver_e_fn$size[1:150]/7
mean1segserver_e_fn$size[151:225]<- mean1segserver_e_fn$size[151:225]/11
mean1segserver_e_fn$size[226:300]<- mean1segserver_e_fn$size[226:300]/15
pd_e_server<-traceserver_e_fn
pd_e_server$size<-pd_e_server$size/8/1498
sumpd75segserver_e_fn<-aggregate(list(size = pd_e_server$size), list(segundos = cut(pd_e_server$time, "75 sec")), sum)
meanpd75segserver_e_fn<-append(list(size = sumpd75segserver_e_fn$size), list(time = as.numeric(sumpd75segserver_e_fn$segundos)))
#Compute Car sent Server E
tracecar_e_fn<-read.table(file = 'result/cartf_fn_5003_tt.txt', sep=' ')
names(tracecar_e_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_e_fn$time <- as.POSIXlt(tracecar_e_fn$time, origin = "1987-10-05 11:00:00")
tracecar_e_fn$size<- tracecar_e_fn$size*8
sum1segcar_e_fn<-aggregate(list(size = tracecar_e_fn$size), list(segundos = cut(tracecar_e_fn$time, "1 sec")), sum)
mean1segcar_e_fn<-append(list(size = sum1segcar_e_fn$size), list(time = as.numeric(sum1segcar_e_fn$segundos)))
mean1segcar_e_fn$size[1:150]<- mean1segcar_e_fn$size[1:150]/7
mean1segcar_e_fn$size[151:225]<- mean1segcar_e_fn$size[151:225]/11
mean1segcar_e_fn$size[226:300]<- mean1segcar_e_fn$size[226:300]/15
pd_e_car<-tracecar_e_fn
pd_e_car$size<-pd_e_car$size/8/1498
sumpd75segcar_e_fn<-aggregate(list(size = pd_e_car$size), list(segundos = cut(pd_e_car$time, "75 sec")), sum)
meanpd75segcar_e_fn<-append(list(size = sumpd75segcar_e_fn$size), list(time = as.numeric(sumpd75segcar_e_fn$segundos)))
#Compute PDR Server E
pdr75seg_e_fn<-meanpd75segserver_e_fn$size/meanpd75segcar_e_fn$size
pdr1seg_e_fn<-mean1segserver_e_fn$size[1:300]/mean1segcar_e_fn$size[1:300]
require(Rmisc)
w_e_fn<-CI(pdr1seg_e_fn[1:75], ci=0.95)
x_e_fn<-CI(pdr1seg_e_fn[76:150], ci=0.95)
y_e_fn<-CI(pdr1seg_e_fn[151:225], ci=0.95)
z_e_fn<-CI(pdr1seg_e_fn[225:300], ci=0.95)
up_e_fn<-c(w_e_fn[1], x_e_fn[1], y_e_fn[1], z_e_fn[1])
lo_e_fn<-c(w_e_fn[3], x_e_fn[3], y_e_fn[3], z_e_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_e_fs[1:4], ui=up_e_fs, li=lo_e_fs, col="red", main="PDR Application E", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
plotCI(c(1:4), pdr75seg_e_fs[1:4], ui=up_e_fs, li=lo_e_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_e_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e_fq[1:4], ui=up_e_fq, li=lo_e_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e_fn[1:4], ui=up_e_fn, li=lo_e_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
#################
###APP E2
###FS
#Compute Server E2 received
traceserver_e2_fs<-read.table(file = 'result/server_e2tf_car_fs_tt.txt', sep=' ')
names(traceserver_e2_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e2_fs$time <- as.POSIXlt(traceserver_e2_fs$time, origin = "1987-10-05 11:00:00")
traceserver_e2_fs$size<- traceserver_e2_fs$size*8
sum1segserver_e2_fs<-aggregate(list(size = traceserver_e2_fs$size), list(segundos = cut(traceserver_e2_fs$time, "1 sec")), sum)
mean1segserver_e2_fs<-append(list(size = sum1segserver_e2_fs$size), list(time = as.numeric(sum1segserver_e2_fs$segundos)))
mean1segserver_e2_fs$size[1:150]<- mean1segserver_e2_fs$size[1:150]/7
mean1segserver_e2_fs$size[151:225]<- mean1segserver_e2_fs$size[151:225]/11
mean1segserver_e2_fs$size[226:300]<- mean1segserver_e2_fs$size[226:300]/15
pd_e2_server<-traceserver_e2_fs
pd_e2_server$size<-pd_e2_server$size/8/1498
sumpd75segserver_e2_fs<-aggregate(list(size = pd_e2_server$size), list(segundos = cut(pd_e2_server$time, "75 sec")), sum)
meanpd75segserver_e2_fs<-append(list(size = sumpd75segserver_e2_fs$size), list(time = as.numeric(sumpd75segserver_e2_fs$segundos)))
#Compute Car sent Server E2
tracecar_e2_fs<-read.table(file = 'result/cartf_fs_5004_tt.txt', sep=' ')
names(tracecar_e2_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_e2_fs$time <- as.POSIXlt(tracecar_e2_fs$time, origin = "1987-10-05 11:00:00")
tracecar_e2_fs$size<- tracecar_e2_fs$size*8
sum1segcar_e2_fs<-aggregate(list(size = tracecar_e2_fs$size), list(segundos = cut(tracecar_e2_fs$time, "1 sec")), sum)
mean1segcar_e2_fs<-append(list(size = sum1segcar_e2_fs$size), list(time = as.numeric(sum1segcar_e2_fs$segundos)))
mean1segcar_e2_fs$size[1:150]<- mean1segcar_e2_fs$size[1:150]/7
mean1segcar_e2_fs$size[151:225]<- mean1segcar_e2_fs$size[151:225]/11
mean1segcar_e2_fs$size[226:300]<- mean1segcar_e2_fs$size[226:300]/15
pd_e2_car<-tracecar_e2_fs
pd_e2_car$size<-pd_e2_car$size/8/1498
sumpd75segcar_e2_fs<-aggregate(list(size = pd_e2_car$size), list(segundos = cut(pd_e2_car$time, "75 sec")), sum)
meanpd75segcar_e2_fs<-append(list(size = sumpd75segcar_e2_fs$size), list(time = as.numeric(sumpd75segcar_e2_fs$segundos)))
#Compute PDR Server E2
pdr75seg_e2_fs<-meanpd75segserver_e2_fs$size/meanpd75segcar_e2_fs$size
pdr1seg_e2_fs<-mean1segserver_e2_fs$size[1:300]/mean1segcar_e2_fs$size[1:300]
require(Rmisc)
w_e2_fs<-CI(pdr1seg_e2_fs[1:75], ci=0.95)
x_e2_fs<-CI(pdr1seg_e2_fs[76:150], ci=0.95)
y_e2_fs<-CI(pdr1seg_e2_fs[151:225], ci=0.95)
z_e2_fs<-CI(pdr1seg_e2_fs[225:300], ci=0.95)
up_e2_fs<-c(w_e2_fs[1], x_e2_fs[1], y_e2_fs[1], z_e2_fs[1])
lo_e2_fs<-c(w_e2_fs[3], x_e2_fs[3], y_e2_fs[3], z_e2_fs[3])
###FQ
#Compute Server E2 received
traceserver_e2_fq<-read.table(file = 'result/server_e2tf_car_fq_tt.txt', sep=' ')
names(traceserver_e2_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e2_fq$time <- as.POSIXlt(traceserver_e2_fq$time, origin = "1987-10-05 11:00:00")
traceserver_e2_fq$size<- traceserver_e2_fq$size*8
sum1segserver_e2_fq<-aggregate(list(size = traceserver_e2_fq$size), list(segundos = cut(traceserver_e2_fq$time, "1 sec")), sum)
mean1segserver_e2_fq<-append(list(size = sum1segserver_e2_fq$size), list(time = as.numeric(sum1segserver_e2_fq$segundos)))
mean1segserver_e2_fq$size[1:150]<- mean1segserver_e2_fq$size[1:150]/7
mean1segserver_e2_fq$size[151:225]<- mean1segserver_e2_fq$size[151:225]/11
mean1segserver_e2_fq$size[226:300]<- mean1segserver_e2_fq$size[226:300]/15
pd_e2_server<-traceserver_e2_fq
pd_e2_server$size<-pd_e2_server$size/8/1498
sumpd75segserver_e2_fq<-aggregate(list(size = pd_e2_server$size), list(segundos = cut(pd_e2_server$time, "75 sec")), sum)
meanpd75segserver_e2_fq<-append(list(size = sumpd75segserver_e2_fq$size), list(time = as.numeric(sumpd75segserver_e2_fq$segundos)))
#Compute Car sent Server E2
tracecar_e2_fq<-read.table(file = 'result/cartf_fq_5004_tt.txt', sep=' ')
names(tracecar_e2_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_e2_fq$time <- as.POSIXlt(tracecar_e2_fq$time, origin = "1987-10-05 11:00:00")
tracecar_e2_fq$size<- tracecar_e2_fq$size*8
sum1segcar_e2_fq<-aggregate(list(size = tracecar_e2_fq$size), list(segundos = cut(tracecar_e2_fq$time, "1 sec")), sum)
mean1segcar_e2_fq<-append(list(size = sum1segcar_e2_fq$size), list(time = as.numeric(sum1segcar_e2_fq$segundos)))
mean1segcar_e2_fq$size[1:150]<- mean1segcar_e2_fq$size[1:150]/7
mean1segcar_e2_fq$size[151:225]<- mean1segcar_e2_fq$size[151:225]/11
mean1segcar_e2_fq$size[226:300]<- mean1segcar_e2_fq$size[226:300]/15
pd_e2_car<-tracecar_e2_fq
pd_e2_car$size<-pd_e2_car$size/8/1498
sumpd75segcar_e2_fq<-aggregate(list(size = pd_e2_car$size), list(segundos = cut(pd_e2_car$time, "75 sec")), sum)
meanpd75segcar_e2_fq<-append(list(size = sumpd75segcar_e2_fq$size), list(time = as.numeric(sumpd75segcar_e2_fq$segundos)))
#Compute PDR Server E2
pdr75seg_e2_fq<-meanpd75segserver_e2_fq$size/meanpd75segcar_e2_fq$size
pdr1seg_e2_fq<-mean1segserver_e2_fq$size[1:300]/mean1segcar_e2_fq$size[1:300]
require(Rmisc)
w_e2_fq<-CI(pdr1seg_e2_fq[1:75], ci=0.95)
x_e2_fq<-CI(pdr1seg_e2_fq[76:150], ci=0.95)
y_e2_fq<-CI(pdr1seg_e2_fq[151:225], ci=0.95)
z_e2_fq<-CI(pdr1seg_e2_fq[225:300], ci=0.95)
up_e2_fq<-c(w_e2_fq[1], x_e2_fq[1], y_e2_fq[1], z_e2_fq[1])
lo_e2_fq<-c(w_e2_fq[3], x_e2_fq[3], y_e2_fq[3], z_e2_fq[3])
####FN
#Compute Server E2 received
traceserver_e2_fn<-read.table(file = 'result/server_e2tf_car_fn_tt.txt', sep=' ')
names(traceserver_e2_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e2_fn$time <- as.POSIXlt(traceserver_e2_fn$time, origin = "1987-10-05 11:00:00")
traceserver_e2_fn$size<- traceserver_e2_fn$size*8
sum1segserver_e2_fn<-aggregate(list(size = traceserver_e2_fn$size), list(segundos = cut(traceserver_e2_fn$time, "1 sec")), sum)
mean1segserver_e2_fn<-append(list(size = sum1segserver_e2_fn$size), list(time = as.numeric(sum1segserver_e2_fn$segundos)))
mean1segserver_e2_fn$size[1:150]<- mean1segserver_e2_fn$size[1:150]/7
mean1segserver_e2_fn$size[151:225]<- mean1segserver_e2_fn$size[151:225]/11
mean1segserver_e2_fn$size[226:300]<- mean1segserver_e2_fn$size[226:300]/15
pd_e2_server<-traceserver_e2_fn
pd_e2_server$size<-pd_e2_server$size/8/1498
sumpd75segserver_e2_fn<-aggregate(list(size = pd_e2_server$size), list(segundos = cut(pd_e2_server$time, "75 sec")), sum)
meanpd75segserver_e2_fn<-append(list(size = sumpd75segserver_e2_fn$size), list(time = as.numeric(sumpd75segserver_e2_fn$segundos)))
#Compute Car sent Server E2
tracecar_e2_fn<-read.table(file = 'result/cartf_fn_5004_tt.txt', sep=' ')
names(tracecar_e2_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_e2_fn$time <- as.POSIXlt(tracecar_e2_fn$time, origin = "1987-10-05 11:00:00")
tracecar_e2_fn$size<- tracecar_e2_fn$size*8
sum1segcar_e2_fn<-aggregate(list(size = tracecar_e2_fn$size), list(segundos = cut(tracecar_e2_fn$time, "1 sec")), sum)
mean1segcar_e2_fn<-append(list(size = sum1segcar_e2_fn$size), list(time = as.numeric(sum1segcar_e2_fn$segundos)))
mean1segcar_e2_fn$size[1:150]<- mean1segcar_e2_fn$size[1:150]/7
mean1segcar_e2_fn$size[151:225]<- mean1segcar_e2_fn$size[151:225]/11
mean1segcar_e2_fn$size[226:300]<- mean1segcar_e2_fn$size[226:300]/15
pd_e2_car<-tracecar_e2_fn
pd_e2_car$size<-pd_e2_car$size/8/1498
sumpd75segcar_e2_fn<-aggregate(list(size = pd_e2_car$size), list(segundos = cut(pd_e2_car$time, "75 sec")), sum)
meanpd75segcar_e2_fn<-append(list(size = sumpd75segcar_e2_fn$size), list(time = as.numeric(sumpd75segcar_e2_fn$segundos)))
#Compute PDR Server E2
pdr75seg_e2_fn<-meanpd75segserver_e2_fn$size/meanpd75segcar_e2_fn$size
pdr1seg_e2_fn<-mean1segserver_e2_fn$size[1:300]/mean1segcar_e2_fn$size[1:300]
require(Rmisc)
w_e2_fn<-CI(pdr1seg_e2_fn[1:75], ci=0.95)
x_e2_fn<-CI(pdr1seg_e2_fn[76:150], ci=0.95)
y_e2_fn<-CI(pdr1seg_e2_fn[151:225], ci=0.95)
z_e2_fn<-CI(pdr1seg_e2_fn[225:300], ci=0.95)
up_e2_fn<-c(w_e2_fn[1], x_e2_fn[1], y_e2_fn[1], z_e2_fn[1])
lo_e2_fn<-c(w_e2_fn[3], x_e2_fn[3], y_e2_fn[3], z_e2_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_e2_fs[1:4], ui=up_e2_fs, li=lo_e2_fs, col="red", main="PDR Application E2", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
plotCI(c(1:4), pdr75seg_e2_fs[1:4], ui=up_e2_fs, li=lo_e2_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_e2_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e2_fq[1:4], ui=up_e2_fq, li=lo_e2_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e2_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e2_fn[1:4], ui=up_e2_fn, li=lo_e2_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e2_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
######################
#APP G
###FS
#Compute Server G received
traceserver_g_fs<-read.table(file = 'result/server_gtf_car_fs_tt.txt', sep=' ')
names(traceserver_g_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_g_fs$time <- as.POSIXlt(traceserver_g_fs$time, origin = "1987-10-05 11:00:00")
traceserver_g_fs$size<- traceserver_g_fs$size*8
sum1segserver_g_fs<-aggregate(list(size = traceserver_g_fs$size), list(segundos = cut(traceserver_g_fs$time, "1 sec")), sum)
mean1segserver_g_fs<-append(list(size = sum1segserver_g_fs$size), list(time = as.numeric(sum1segserver_g_fs$segundos)))
mean1segserver_g_fs$size[1:150]<- mean1segserver_g_fs$size[1:150]/7
mean1segserver_g_fs$size[151:225]<- mean1segserver_g_fs$size[151:225]/11
mean1segserver_g_fs$size[226:300]<- mean1segserver_g_fs$size[226:300]/15
pd_g_server<-traceserver_g_fs
pd_g_server$size<-pd_g_server$size/8/1498
sumpd75segserver_g_fs<-aggregate(list(size = pd_g_server$size), list(segundos = cut(pd_g_server$time, "75 sec")), sum)
meanpd75segserver_g_fs<-append(list(size = sumpd75segserver_g_fs$size), list(time = as.numeric(sumpd75segserver_g_fs$segundos)))
#Compute Car sent Server G
tracecar_g_fs<-read.table(file = 'result/cartf_fs_5005_tt.txt', sep=' ')
names(tracecar_g_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_g_fs$time <- as.POSIXlt(tracecar_g_fs$time, origin = "1987-10-05 11:00:00")
tracecar_g_fs$size<- tracecar_g_fs$size*8
sum1segcar_g_fs<-aggregate(list(size = tracecar_g_fs$size), list(segundos = cut(tracecar_g_fs$time, "1 sec")), sum)
mean1segcar_g_fs<-append(list(size = sum1segcar_g_fs$size), list(time = as.numeric(sum1segcar_g_fs$segundos)))
mean1segcar_g_fs$size[1:150]<- mean1segcar_g_fs$size[1:150]/7
mean1segcar_g_fs$size[151:225]<- mean1segcar_g_fs$size[151:225]/11
mean1segcar_g_fs$size[226:300]<- mean1segcar_g_fs$size[226:300]/15
pd_g_car<-tracecar_g_fs
pd_g_car$size<-pd_g_car$size/8/1498
sumpd75segcar_g_fs<-aggregate(list(size = pd_g_car$size), list(segundos = cut(pd_g_car$time, "75 sec")), sum)
meanpd75segcar_g_fs<-append(list(size = sumpd75segcar_g_fs$size), list(time = as.numeric(sumpd75segcar_g_fs$segundos)))
#Compute PDR Server G
pdr75seg_g_fs<-meanpd75segserver_g_fs$size/meanpd75segcar_g_fs$size
pdr1seg_g_fs<-mean1segserver_g_fs$size[1:300]/mean1segcar_g_fs$size[1:300]
require(Rmisc)
w_g_fs<-CI(pdr1seg_g_fs[1:75], ci=0.95)
x_g_fs<-CI(pdr1seg_g_fs[76:150], ci=0.95)
y_g_fs<-CI(pdr1seg_g_fs[151:225], ci=0.95)
z_g_fs<-CI(pdr1seg_g_fs[225:300], ci=0.95)
up_g_fs<-c(w_g_fs[1], x_g_fs[1], y_g_fs[1], z_g_fs[1])
lo_g_fs<-c(w_g_fs[3], x_g_fs[3], y_g_fs[3], z_g_fs[3])
###FQ
#Compute Server G received
traceserver_g_fq<-read.table(file = 'result/server_gtf_car_fq_tt.txt', sep=' ')
names(traceserver_g_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_g_fq$time <- as.POSIXlt(traceserver_g_fq$time, origin = "1987-10-05 11:00:00")
traceserver_g_fq$size<- traceserver_g_fq$size*8
sum1segserver_g_fq<-aggregate(list(size = traceserver_g_fq$size), list(segundos = cut(traceserver_g_fq$time, "1 sec")), sum)
mean1segserver_g_fq<-append(list(size = sum1segserver_g_fq$size), list(time = as.numeric(sum1segserver_g_fq$segundos)))
mean1segserver_g_fq$size[1:150]<- mean1segserver_g_fq$size[1:150]/7
mean1segserver_g_fq$size[151:225]<- mean1segserver_g_fq$size[151:225]/11
mean1segserver_g_fq$size[226:300]<- mean1segserver_g_fq$size[226:300]/15
pd_g_server<-traceserver_g_fq
pd_g_server$size<-pd_g_server$size/8/1498
sumpd75segserver_g_fq<-aggregate(list(size = pd_g_server$size), list(segundos = cut(pd_g_server$time, "75 sec")), sum)
meanpd75segserver_g_fq<-append(list(size = sumpd75segserver_g_fq$size), list(time = as.numeric(sumpd75segserver_g_fq$segundos)))
#Compute Car sent Server G
tracecar_g_fq<-read.table(file = 'result/cartf_fq_5005_tt.txt', sep=' ')
names(tracecar_g_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_g_fq$time <- as.POSIXlt(tracecar_g_fq$time, origin = "1987-10-05 11:00:00")
tracecar_g_fq$size<- tracecar_g_fq$size*8
sum1segcar_g_fq<-aggregate(list(size = tracecar_g_fq$size), list(segundos = cut(tracecar_g_fq$time, "1 sec")), sum)
mean1segcar_g_fq<-append(list(size = sum1segcar_g_fq$size), list(time = as.numeric(sum1segcar_g_fq$segundos)))
mean1segcar_g_fq$size[1:150]<- mean1segcar_g_fq$size[1:150]/7
mean1segcar_g_fq$size[151:225]<- mean1segcar_g_fq$size[151:225]/11
mean1segcar_g_fq$size[226:300]<- mean1segcar_g_fq$size[226:300]/15
pd_g_car<-tracecar_g_fq
pd_g_car$size<-pd_g_car$size/8/1498
sumpd75segcar_g_fq<-aggregate(list(size = pd_g_car$size), list(segundos = cut(pd_g_car$time, "75 sec")), sum)
meanpd75segcar_g_fq<-append(list(size = sumpd75segcar_g_fq$size), list(time = as.numeric(sumpd75segcar_g_fq$segundos)))
#Compute PDR Server G
pdr75seg_g_fq<-meanpd75segserver_g_fq$size/meanpd75segcar_g_fq$size
pdr1seg_g_fq<-mean1segserver_g_fq$size[1:300]/mean1segcar_g_fq$size[1:300]
require(Rmisc)
w_g_fq<-CI(pdr1seg_g_fq[1:75], ci=0.95)
x_g_fq<-CI(pdr1seg_g_fq[76:150], ci=0.95)
y_g_fq<-CI(pdr1seg_g_fq[151:225], ci=0.95)
z_g_fq<-CI(pdr1seg_g_fq[225:300], ci=0.95)
up_g_fq<-c(w_g_fq[1], x_g_fq[1], y_g_fq[1], z_g_fq[1])
lo_g_fq<-c(w_g_fq[3], x_g_fq[3], y_g_fq[3], z_g_fq[3])
####FN
#Compute Server G received
traceserver_g_fn<-read.table(file = 'result/server_gtf_car_fn_tt.txt', sep=' ')
names(traceserver_g_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_g_fn$time <- as.POSIXlt(traceserver_g_fn$time, origin = "1987-10-05 11:00:00")
traceserver_g_fn$size<- traceserver_g_fn$size*8
sum1segserver_g_fn<-aggregate(list(size = traceserver_g_fn$size), list(segundos = cut(traceserver_g_fn$time, "1 sec")), sum)
mean1segserver_g_fn<-append(list(size = sum1segserver_g_fn$size), list(time = as.numeric(sum1segserver_g_fn$segundos)))
mean1segserver_g_fn$size[1:150]<- mean1segserver_g_fn$size[1:150]/7
mean1segserver_g_fn$size[151:225]<- mean1segserver_g_fn$size[151:225]/11
mean1segserver_g_fn$size[226:300]<- mean1segserver_g_fn$size[226:300]/15
pd_g_server<-traceserver_g_fn
pd_g_server$size<-pd_g_server$size/8/1498
sumpd75segserver_g_fn<-aggregate(list(size = pd_g_server$size), list(segundos = cut(pd_g_server$time, "75 sec")), sum)
meanpd75segserver_g_fn<-append(list(size = sumpd75segserver_g_fn$size), list(time = as.numeric(sumpd75segserver_g_fn$segundos)))
#Compute Car sent Server G
tracecar_g_fn<-read.table(file = 'result/cartf_fn_5005_tt.txt', sep=' ')
names(tracecar_g_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_g_fn$time <- as.POSIXlt(tracecar_g_fn$time, origin = "1987-10-05 11:00:00")
tracecar_g_fn$size<- tracecar_g_fn$size*8
sum1segcar_g_fn<-aggregate(list(size = tracecar_g_fn$size), list(segundos = cut(tracecar_g_fn$time, "1 sec")), sum)
mean1segcar_g_fn<-append(list(size = sum1segcar_g_fn$size), list(time = as.numeric(sum1segcar_g_fn$segundos)))
mean1segcar_g_fn$size[1:150]<- mean1segcar_g_fn$size[1:150]/7
mean1segcar_g_fn$size[151:225]<- mean1segcar_g_fn$size[151:225]/11
mean1segcar_g_fn$size[226:300]<- mean1segcar_g_fn$size[226:300]/15
pd_g_car<-tracecar_g_fn
pd_g_car$size<-pd_g_car$size/8/1498
sumpd75segcar_g_fn<-aggregate(list(size = pd_g_car$size), list(segundos = cut(pd_g_car$time, "75 sec")), sum)
meanpd75segcar_g_fn<-append(list(size = sumpd75segcar_g_fn$size), list(time = as.numeric(sumpd75segcar_g_fn$segundos)))
#Compute PDR Server G
pdr75seg_g_fn<-meanpd75segserver_g_fn$size/meanpd75segcar_g_fn$size
pdr1seg_g_fn<-mean1segserver_g_fn$size[1:300]/mean1segcar_g_fn$size[1:300]
require(Rmisc)
w_g_fn<-CI(pdr1seg_g_fn[1:75], ci=0.95)
x_g_fn<-CI(pdr1seg_g_fn[76:150], ci=0.95)
y_g_fn<-CI(pdr1seg_g_fn[151:225], ci=0.95)
z_g_fn<-CI(pdr1seg_g_fn[225:300], ci=0.95)
up_g_fn<-c(w_g_fn[1], x_g_fn[1], y_g_fn[1], z_g_fn[1])
lo_g_fn<-c(w_g_fn[3], x_g_fn[3], y_g_fn[3], z_g_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_g_fs[1:4], ui=up_g_fs, li=lo_g_fs, col="red", main="PDR Application G", ylab = "PDR", xlab = "Congestion level", lwd="2" , ylim=c(0,1), xaxt="n")
plotCI(c(1:4), pdr75seg_g_fs[1:4], ui=up_g_fs, li=lo_g_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2" , ylim=c(0,1), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_g_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_g_fq[1:4], ui=up_g_fq, li=lo_g_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0,1))
lines(c(1:4),pdr75seg_g_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_g_fn[1:4], ui=up_g_fn, li=lo_g_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0,1) )
lines(c(1:4),pdr75seg_g_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
################################################################################
#APP S
###FS
#Compute Server S received
traceserver_s_fs<-read.table(file = 'result/server_stf_car_fs_tt.txt', sep=' ')
names(traceserver_s_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_s_fs$time <- as.POSIXlt(traceserver_s_fs$time, origin = "1987-10-05 11:00:00")
traceserver_s_fs$size<- traceserver_s_fs$size*8
sum1segserver_s_fs<-aggregate(list(size = traceserver_s_fs$size), list(segundos = cut(traceserver_s_fs$time, "1 sec")), sum)
mean1segserver_s_fs<-append(list(size = sum1segserver_s_fs$size), list(time = as.numeric(sum1segserver_s_fs$segundos)))
mean1segserver_s_fs$size[1:150]<- mean1segserver_s_fs$size[1:150]/7
mean1segserver_s_fs$size[151:225]<- mean1segserver_s_fs$size[151:225]/11
mean1segserver_s_fs$size[226:300]<- mean1segserver_s_fs$size[226:300]/15
pd_s_server<-traceserver_s_fs
pd_s_server$size<-pd_s_server$size/8/1498
sumpd75segserver_s_fs<-aggregate(list(size = pd_s_server$size), list(segundos = cut(pd_s_server$time, "75 sec")), sum)
meanpd75segserver_s_fs<-append(list(size = sumpd75segserver_s_fs$size), list(time = as.numeric(sumpd75segserver_s_fs$segundos)))
#Compute Car sent Server S
tracecar_s_fs<-read.table(file = 'result/cartf_fs_5002_tt.txt', sep=' ')
names(tracecar_s_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_s_fs$time <- as.POSIXlt(tracecar_s_fs$time, origin = "1987-10-05 11:00:00")
tracecar_s_fs$size<- tracecar_s_fs$size*8
sum1segcar_s_fs<-aggregate(list(size = tracecar_s_fs$size), list(segundos = cut(tracecar_s_fs$time, "1 sec")), sum)
mean1segcar_s_fs<-append(list(size = sum1segcar_s_fs$size), list(time = as.numeric(sum1segcar_s_fs$segundos)))
mean1segcar_s_fs$size[1:150]<- mean1segcar_s_fs$size[1:150]/7
mean1segcar_s_fs$size[151:225]<- mean1segcar_s_fs$size[151:225]/11
mean1segcar_s_fs$size[226:300]<- mean1segcar_s_fs$size[226:300]/15
pd_s_car<-tracecar_s_fs
pd_s_car$size<-pd_s_car$size/8/1498
sumpd75segcar_s_fs<-aggregate(list(size = pd_s_car$size), list(segundos = cut(pd_s_car$time, "75 sec")), sum)
meanpd75segcar_s_fs<-append(list(size = sumpd75segcar_s_fs$size), list(time = as.numeric(sumpd75segcar_s_fs$segundos)))
#Compute PDR Server S
pdr75seg_s_fs<-meanpd75segserver_s_fs$size/meanpd75segcar_s_fs$size
pdr1seg_s_fs<-mean1segserver_s_fs$size[1:300]/mean1segcar_s_fs$size[1:300]
require(Rmisc)
w_s_fs<-CI(pdr1seg_s_fs[1:75], ci=0.95)
x_s_fs<-CI(pdr1seg_s_fs[76:150], ci=0.95)
y_s_fs<-CI(pdr1seg_s_fs[151:225], ci=0.95)
z_s_fs<-CI(pdr1seg_s_fs[225:300], ci=0.95)
up_s_fs<-c(w_s_fs[1], x_s_fs[1], y_s_fs[1], z_s_fs[1])
lo_s_fs<-c(w_s_fs[3], x_s_fs[3], y_s_fs[3], z_s_fs[3])
###FQ
#Compute Server S received
traceserver_s_fq<-read.table(file = 'result/server_stf_car_fq_tt.txt', sep=' ')
names(traceserver_s_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_s_fq$time <- as.POSIXlt(traceserver_s_fq$time, origin = "1987-10-05 11:00:00")
traceserver_s_fq$size<- traceserver_s_fq$size*8
sum1segserver_s_fq<-aggregate(list(size = traceserver_s_fq$size), list(segundos = cut(traceserver_s_fq$time, "1 sec")), sum)
mean1segserver_s_fq<-append(list(size = sum1segserver_s_fq$size), list(time = as.numeric(sum1segserver_s_fq$segundos)))
mean1segserver_s_fq$size[1:150]<- mean1segserver_s_fq$size[1:150]/7
mean1segserver_s_fq$size[151:225]<- mean1segserver_s_fq$size[151:225]/11
mean1segserver_s_fq$size[226:300]<- mean1segserver_s_fq$size[226:300]/15
pd_s_server<-traceserver_s_fq
pd_s_server$size<-pd_s_server$size/8/1498
sumpd75segserver_s_fq<-aggregate(list(size = pd_s_server$size), list(segundos = cut(pd_s_server$time, "75 sec")), sum)
meanpd75segserver_s_fq<-append(list(size = sumpd75segserver_s_fq$size), list(time = as.numeric(sumpd75segserver_s_fq$segundos)))
#Compute Car sent Server S
tracecar_s_fq<-read.table(file = 'result/cartf_fq_5002_tt.txt', sep=' ')
names(tracecar_s_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_s_fq$time <- as.POSIXlt(tracecar_s_fq$time, origin = "1987-10-05 11:00:00")
tracecar_s_fq$size<- tracecar_s_fq$size*8
sum1segcar_s_fq<-aggregate(list(size = tracecar_s_fq$size), list(segundos = cut(tracecar_s_fq$time, "1 sec")), sum)
mean1segcar_s_fq<-append(list(size = sum1segcar_s_fq$size), list(time = as.numeric(sum1segcar_s_fq$segundos)))
mean1segcar_s_fq$size[1:150]<- mean1segcar_s_fq$size[1:150]/7
mean1segcar_s_fq$size[151:225]<- mean1segcar_s_fq$size[151:225]/11
mean1segcar_s_fq$size[226:300]<- mean1segcar_s_fq$size[226:300]/15
pd_s_car<-tracecar_s_fq
pd_s_car$size<-pd_s_car$size/8/1498
sumpd75segcar_s_fq<-aggregate(list(size = pd_s_car$size), list(segundos = cut(pd_s_car$time, "75 sec")), sum)
meanpd75segcar_s_fq<-append(list(size = sumpd75segcar_s_fq$size), list(time = as.numeric(sumpd75segcar_s_fq$segundos)))
#Compute PDR Server S
pdr75seg_s_fq<-meanpd75segserver_s_fq$size/meanpd75segcar_s_fq$size
pdr1seg_s_fq<-mean1segserver_s_fq$size[1:300]/mean1segcar_s_fq$size[1:300]
require(Rmisc)
w_s_fq<-CI(pdr1seg_s_fq[1:75], ci=0.95)
x_s_fq<-CI(pdr1seg_s_fq[76:150], ci=0.95)
y_s_fq<-CI(pdr1seg_s_fq[151:225], ci=0.95)
z_s_fq<-CI(pdr1seg_s_fq[225:300], ci=0.95)
up_s_fq<-c(w_s_fq[1], x_s_fq[1], y_s_fq[1], z_s_fq[1])
lo_s_fq<-c(w_s_fq[3], x_s_fq[3], y_s_fq[3], z_s_fq[3])
####FN
#Compute Server S received
traceserver_s_fn<-read.table(file = 'result/server_stf_car_fn_tt.txt', sep=' ')
names(traceserver_s_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_s_fn$time <- as.POSIXlt(traceserver_s_fn$time, origin = "1987-10-05 11:00:00")
traceserver_s_fn$size<- traceserver_s_fn$size*8
sum1segserver_s_fn<-aggregate(list(size = traceserver_s_fn$size), list(segundos = cut(traceserver_s_fn$time, "1 sec")), sum)
mean1segserver_s_fn<-append(list(size = sum1segserver_s_fn$size), list(time = as.numeric(sum1segserver_s_fn$segundos)))
mean1segserver_s_fn$size[1:150]<- mean1segserver_s_fn$size[1:150]/7
mean1segserver_s_fn$size[151:225]<- mean1segserver_s_fn$size[151:225]/11
mean1segserver_s_fn$size[226:300]<- mean1segserver_s_fn$size[226:300]/15
pd_s_server<-traceserver_s_fn
pd_s_server$size<-pd_s_server$size/8/1498
sumpd75segserver_s_fn<-aggregate(list(size = pd_s_server$size), list(segundos = cut(pd_s_server$time, "75 sec")), sum)
meanpd75segserver_s_fn<-append(list(size = sumpd75segserver_s_fn$size), list(time = as.numeric(sumpd75segserver_s_fn$segundos)))
#Compute Car sent Server S
tracecar_s_fn<-read.table(file = 'result/cartf_fn_5002_tt.txt', sep=' ')
names(tracecar_s_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_s_fn$time <- as.POSIXlt(tracecar_s_fn$time, origin = "1987-10-05 11:00:00")
tracecar_s_fn$size<- tracecar_s_fn$size*8
sum1segcar_s_fn<-aggregate(list(size = tracecar_s_fn$size), list(segundos = cut(tracecar_s_fn$time, "1 sec")), sum)
mean1segcar_s_fn<-append(list(size = sum1segcar_s_fn$size), list(time = as.numeric(sum1segcar_s_fn$segundos)))
mean1segcar_s_fn$size[1:150]<- mean1segcar_s_fn$size[1:150]/7
mean1segcar_s_fn$size[151:225]<- mean1segcar_s_fn$size[151:225]/11
mean1segcar_s_fn$size[226:300]<- mean1segcar_s_fn$size[226:300]/15
pd_s_car<-tracecar_s_fn
pd_s_car$size<-pd_s_car$size/8/1498
sumpd75segcar_s_fn<-aggregate(list(size = pd_s_car$size), list(segundos = cut(pd_s_car$time, "75 sec")), sum)
meanpd75segcar_s_fn<-append(list(size = sumpd75segcar_s_fn$size), list(time = as.numeric(sumpd75segcar_s_fn$segundos)))
#Compute PDR Server S
pdr75seg_s_fn<-meanpd75segserver_s_fn$size/meanpd75segcar_s_fn$size
pdr1seg_s_fn<-mean1segserver_s_fn$size[1:300]/mean1segcar_s_fn$size[1:300]
require(Rmisc)
w_s_fn<-CI(pdr1seg_s_fn[1:75], ci=0.95)
x_s_fn<-CI(pdr1seg_s_fn[76:150], ci=0.95)
y_s_fn<-CI(pdr1seg_s_fn[151:225], ci=0.95)
z_s_fn<-CI(pdr1seg_s_fn[225:300], ci=0.95)
up_s_fn<-c(w_s_fn[1], x_s_fn[1], y_s_fn[1], z_s_fn[1])
lo_s_fn<-c(w_s_fn[3], x_s_fn[3], y_s_fn[3], z_s_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_s_fs[1:4], ui=up_s_fs, li=lo_s_fs, col="red", main="PDR Application S", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.6,1.05), xaxt="n")
plotCI(c(1:4), pdr75seg_s_fs[1:4], ui=up_s_fs, li=lo_s_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.6,1.05), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_s_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_s_fq[1:4], ui=up_s_fq, li=lo_s_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.6,1.05))
lines(c(1:4),pdr75seg_s_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_s_fn[1:4], ui=up_s_fn, li=lo_s_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.6,1.05))
lines(c(1:4),pdr75seg_s_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
| /comb_pdr.R | no_license | rubiruchi/framework_its_sdn | R | false | false | 35,420 | r | ##################PDRs
###APP E
###FS
#Compute Server E received
traceserver_e_fs<-read.table(file = 'result/server_etf_car_fs_tt.txt', sep=' ')
names(traceserver_e_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e_fs$time <- as.POSIXlt(traceserver_e_fs$time, origin = "1987-10-05 11:00:00")
traceserver_e_fs$size<- traceserver_e_fs$size*8
sum1segserver_e_fs<-aggregate(list(size = traceserver_e_fs$size), list(segundos = cut(traceserver_e_fs$time, "1 sec")), sum)
mean1segserver_e_fs<-append(list(size = sum1segserver_e_fs$size), list(time = as.numeric(sum1segserver_e_fs$segundos)))
mean1segserver_e_fs$size[1:150]<- mean1segserver_e_fs$size[1:150]/7
mean1segserver_e_fs$size[151:225]<- mean1segserver_e_fs$size[151:225]/11
mean1segserver_e_fs$size[226:300]<- mean1segserver_e_fs$size[226:300]/15
pd_e_server<-traceserver_e_fs
pd_e_server$size<-pd_e_server$size/8/1498
sumpd75segserver_e_fs<-aggregate(list(size = pd_e_server$size), list(segundos = cut(pd_e_server$time, "75 sec")), sum)
meanpd75segserver_e_fs<-append(list(size = sumpd75segserver_e_fs$size), list(time = as.numeric(sumpd75segserver_e_fs$segundos)))
#Compute Car sent Server E
tracecar_e_fs<-read.table(file = 'result/cartf_fs_5003_tt.txt', sep=' ')
names(tracecar_e_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_e_fs$time <- as.POSIXlt(tracecar_e_fs$time, origin = "1987-10-05 11:00:00")
tracecar_e_fs$size<- tracecar_e_fs$size*8
sum1segcar_e_fs<-aggregate(list(size = tracecar_e_fs$size), list(segundos = cut(tracecar_e_fs$time, "1 sec")), sum)
mean1segcar_e_fs<-append(list(size = sum1segcar_e_fs$size), list(time = as.numeric(sum1segcar_e_fs$segundos)))
mean1segcar_e_fs$size[1:150]<- mean1segcar_e_fs$size[1:150]/7
mean1segcar_e_fs$size[151:225]<- mean1segcar_e_fs$size[151:225]/11
mean1segcar_e_fs$size[226:300]<- mean1segcar_e_fs$size[226:300]/15
pd_e_car<-tracecar_e_fs
pd_e_car$size<-pd_e_car$size/8/1498
sumpd75segcar_e_fs<-aggregate(list(size = pd_e_car$size), list(segundos = cut(pd_e_car$time, "75 sec")), sum)
meanpd75segcar_e_fs<-append(list(size = sumpd75segcar_e_fs$size), list(time = as.numeric(sumpd75segcar_e_fs$segundos)))
#Compute PDR Server E
pdr75seg_e_fs<-meanpd75segserver_e_fs$size/meanpd75segcar_e_fs$size
pdr1seg_e_fs<-mean1segserver_e_fs$size[1:300]/mean1segcar_e_fs$size[1:300]
require(Rmisc)
w_e_fs<-CI(pdr1seg_e_fs[1:75], ci=0.95)
x_e_fs<-CI(pdr1seg_e_fs[76:150], ci=0.95)
y_e_fs<-CI(pdr1seg_e_fs[151:225], ci=0.95)
z_e_fs<-CI(pdr1seg_e_fs[225:300], ci=0.95)
up_e_fs<-c(w_e_fs[1], x_e_fs[1], y_e_fs[1], z_e_fs[1])
lo_e_fs<-c(w_e_fs[3], x_e_fs[3], y_e_fs[3], z_e_fs[3])
###FQ
#Compute Server E received
traceserver_e_fq<-read.table(file = 'result/server_etf_car_fq_tt.txt', sep=' ')
names(traceserver_e_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e_fq$time <- as.POSIXlt(traceserver_e_fq$time, origin = "1987-10-05 11:00:00")
traceserver_e_fq$size<- traceserver_e_fq$size*8
sum1segserver_e_fq<-aggregate(list(size = traceserver_e_fq$size), list(segundos = cut(traceserver_e_fq$time, "1 sec")), sum)
mean1segserver_e_fq<-append(list(size = sum1segserver_e_fq$size), list(time = as.numeric(sum1segserver_e_fq$segundos)))
mean1segserver_e_fq$size[1:150]<- mean1segserver_e_fq$size[1:150]/7
mean1segserver_e_fq$size[151:225]<- mean1segserver_e_fq$size[151:225]/11
mean1segserver_e_fq$size[226:300]<- mean1segserver_e_fq$size[226:300]/15
pd_e_server<-traceserver_e_fq
pd_e_server$size<-pd_e_server$size/8/1498
sumpd75segserver_e_fq<-aggregate(list(size = pd_e_server$size), list(segundos = cut(pd_e_server$time, "75 sec")), sum)
meanpd75segserver_e_fq<-append(list(size = sumpd75segserver_e_fq$size), list(time = as.numeric(sumpd75segserver_e_fq$segundos)))
#Compute Car sent Server E
tracecar_e_fq<-read.table(file = 'result/cartf_fq_5003_tt.txt', sep=' ')
names(tracecar_e_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_e_fq$time <- as.POSIXlt(tracecar_e_fq$time, origin = "1987-10-05 11:00:00")
tracecar_e_fq$size<- tracecar_e_fq$size*8
sum1segcar_e_fq<-aggregate(list(size = tracecar_e_fq$size), list(segundos = cut(tracecar_e_fq$time, "1 sec")), sum)
mean1segcar_e_fq<-append(list(size = sum1segcar_e_fq$size), list(time = as.numeric(sum1segcar_e_fq$segundos)))
mean1segcar_e_fq$size[1:150]<- mean1segcar_e_fq$size[1:150]/7
mean1segcar_e_fq$size[151:225]<- mean1segcar_e_fq$size[151:225]/11
mean1segcar_e_fq$size[226:300]<- mean1segcar_e_fq$size[226:300]/15
pd_e_car<-tracecar_e_fq
pd_e_car$size<-pd_e_car$size/8/1498
sumpd75segcar_e_fq<-aggregate(list(size = pd_e_car$size), list(segundos = cut(pd_e_car$time, "75 sec")), sum)
meanpd75segcar_e_fq<-append(list(size = sumpd75segcar_e_fq$size), list(time = as.numeric(sumpd75segcar_e_fq$segundos)))
#Compute PDR Server E
pdr75seg_e_fq<-meanpd75segserver_e_fq$size/meanpd75segcar_e_fq$size
pdr1seg_e_fq<-mean1segserver_e_fq$size[1:300]/mean1segcar_e_fq$size[1:300]
require(Rmisc)
w_e_fq<-CI(pdr1seg_e_fq[1:75], ci=0.95)
x_e_fq<-CI(pdr1seg_e_fq[76:150], ci=0.95)
y_e_fq<-CI(pdr1seg_e_fq[151:225], ci=0.95)
z_e_fq<-CI(pdr1seg_e_fq[225:300], ci=0.95)
up_e_fq<-c(w_e_fq[1], x_e_fq[1], y_e_fq[1], z_e_fq[1])
lo_e_fq<-c(w_e_fq[3], x_e_fq[3], y_e_fq[3], z_e_fq[3])
####FN
#Compute Server E received
traceserver_e_fn<-read.table(file = 'result/server_etf_car_fn_tt.txt', sep=' ')
names(traceserver_e_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e_fn$time <- as.POSIXlt(traceserver_e_fn$time, origin = "1987-10-05 11:00:00")
traceserver_e_fn$size<- traceserver_e_fn$size*8
sum1segserver_e_fn<-aggregate(list(size = traceserver_e_fn$size), list(segundos = cut(traceserver_e_fn$time, "1 sec")), sum)
mean1segserver_e_fn<-append(list(size = sum1segserver_e_fn$size), list(time = as.numeric(sum1segserver_e_fn$segundos)))
mean1segserver_e_fn$size[1:150]<- mean1segserver_e_fn$size[1:150]/7
mean1segserver_e_fn$size[151:225]<- mean1segserver_e_fn$size[151:225]/11
mean1segserver_e_fn$size[226:300]<- mean1segserver_e_fn$size[226:300]/15
pd_e_server<-traceserver_e_fn
pd_e_server$size<-pd_e_server$size/8/1498
sumpd75segserver_e_fn<-aggregate(list(size = pd_e_server$size), list(segundos = cut(pd_e_server$time, "75 sec")), sum)
meanpd75segserver_e_fn<-append(list(size = sumpd75segserver_e_fn$size), list(time = as.numeric(sumpd75segserver_e_fn$segundos)))
#Compute Car sent Server E
tracecar_e_fn<-read.table(file = 'result/cartf_fn_5003_tt.txt', sep=' ')
names(tracecar_e_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_e_fn$time <- as.POSIXlt(tracecar_e_fn$time, origin = "1987-10-05 11:00:00")
tracecar_e_fn$size<- tracecar_e_fn$size*8
sum1segcar_e_fn<-aggregate(list(size = tracecar_e_fn$size), list(segundos = cut(tracecar_e_fn$time, "1 sec")), sum)
mean1segcar_e_fn<-append(list(size = sum1segcar_e_fn$size), list(time = as.numeric(sum1segcar_e_fn$segundos)))
mean1segcar_e_fn$size[1:150]<- mean1segcar_e_fn$size[1:150]/7
mean1segcar_e_fn$size[151:225]<- mean1segcar_e_fn$size[151:225]/11
mean1segcar_e_fn$size[226:300]<- mean1segcar_e_fn$size[226:300]/15
pd_e_car<-tracecar_e_fn
pd_e_car$size<-pd_e_car$size/8/1498
sumpd75segcar_e_fn<-aggregate(list(size = pd_e_car$size), list(segundos = cut(pd_e_car$time, "75 sec")), sum)
meanpd75segcar_e_fn<-append(list(size = sumpd75segcar_e_fn$size), list(time = as.numeric(sumpd75segcar_e_fn$segundos)))
#Compute PDR Server E
pdr75seg_e_fn<-meanpd75segserver_e_fn$size/meanpd75segcar_e_fn$size
pdr1seg_e_fn<-mean1segserver_e_fn$size[1:300]/mean1segcar_e_fn$size[1:300]
require(Rmisc)
w_e_fn<-CI(pdr1seg_e_fn[1:75], ci=0.95)
x_e_fn<-CI(pdr1seg_e_fn[76:150], ci=0.95)
y_e_fn<-CI(pdr1seg_e_fn[151:225], ci=0.95)
z_e_fn<-CI(pdr1seg_e_fn[225:300], ci=0.95)
up_e_fn<-c(w_e_fn[1], x_e_fn[1], y_e_fn[1], z_e_fn[1])
lo_e_fn<-c(w_e_fn[3], x_e_fn[3], y_e_fn[3], z_e_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_e_fs[1:4], ui=up_e_fs, li=lo_e_fs, col="red", main="PDR Application E", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
plotCI(c(1:4), pdr75seg_e_fs[1:4], ui=up_e_fs, li=lo_e_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_e_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e_fq[1:4], ui=up_e_fq, li=lo_e_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e_fn[1:4], ui=up_e_fn, li=lo_e_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
#################
###APP E2
###FS
#Compute Server E2 received
traceserver_e2_fs<-read.table(file = 'result/server_e2tf_car_fs_tt.txt', sep=' ')
names(traceserver_e2_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e2_fs$time <- as.POSIXlt(traceserver_e2_fs$time, origin = "1987-10-05 11:00:00")
traceserver_e2_fs$size<- traceserver_e2_fs$size*8
sum1segserver_e2_fs<-aggregate(list(size = traceserver_e2_fs$size), list(segundos = cut(traceserver_e2_fs$time, "1 sec")), sum)
mean1segserver_e2_fs<-append(list(size = sum1segserver_e2_fs$size), list(time = as.numeric(sum1segserver_e2_fs$segundos)))
mean1segserver_e2_fs$size[1:150]<- mean1segserver_e2_fs$size[1:150]/7
mean1segserver_e2_fs$size[151:225]<- mean1segserver_e2_fs$size[151:225]/11
mean1segserver_e2_fs$size[226:300]<- mean1segserver_e2_fs$size[226:300]/15
pd_e2_server<-traceserver_e2_fs
pd_e2_server$size<-pd_e2_server$size/8/1498
sumpd75segserver_e2_fs<-aggregate(list(size = pd_e2_server$size), list(segundos = cut(pd_e2_server$time, "75 sec")), sum)
meanpd75segserver_e2_fs<-append(list(size = sumpd75segserver_e2_fs$size), list(time = as.numeric(sumpd75segserver_e2_fs$segundos)))
#Compute Car sent Server E2
tracecar_e2_fs<-read.table(file = 'result/cartf_fs_5004_tt.txt', sep=' ')
names(tracecar_e2_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_e2_fs$time <- as.POSIXlt(tracecar_e2_fs$time, origin = "1987-10-05 11:00:00")
tracecar_e2_fs$size<- tracecar_e2_fs$size*8
sum1segcar_e2_fs<-aggregate(list(size = tracecar_e2_fs$size), list(segundos = cut(tracecar_e2_fs$time, "1 sec")), sum)
mean1segcar_e2_fs<-append(list(size = sum1segcar_e2_fs$size), list(time = as.numeric(sum1segcar_e2_fs$segundos)))
mean1segcar_e2_fs$size[1:150]<- mean1segcar_e2_fs$size[1:150]/7
mean1segcar_e2_fs$size[151:225]<- mean1segcar_e2_fs$size[151:225]/11
mean1segcar_e2_fs$size[226:300]<- mean1segcar_e2_fs$size[226:300]/15
pd_e2_car<-tracecar_e2_fs
pd_e2_car$size<-pd_e2_car$size/8/1498
sumpd75segcar_e2_fs<-aggregate(list(size = pd_e2_car$size), list(segundos = cut(pd_e2_car$time, "75 sec")), sum)
meanpd75segcar_e2_fs<-append(list(size = sumpd75segcar_e2_fs$size), list(time = as.numeric(sumpd75segcar_e2_fs$segundos)))
#Compute PDR Server E2
pdr75seg_e2_fs<-meanpd75segserver_e2_fs$size/meanpd75segcar_e2_fs$size
pdr1seg_e2_fs<-mean1segserver_e2_fs$size[1:300]/mean1segcar_e2_fs$size[1:300]
require(Rmisc)
w_e2_fs<-CI(pdr1seg_e2_fs[1:75], ci=0.95)
x_e2_fs<-CI(pdr1seg_e2_fs[76:150], ci=0.95)
y_e2_fs<-CI(pdr1seg_e2_fs[151:225], ci=0.95)
z_e2_fs<-CI(pdr1seg_e2_fs[225:300], ci=0.95)
up_e2_fs<-c(w_e2_fs[1], x_e2_fs[1], y_e2_fs[1], z_e2_fs[1])
lo_e2_fs<-c(w_e2_fs[3], x_e2_fs[3], y_e2_fs[3], z_e2_fs[3])
###FQ
#Compute Server E2 received
traceserver_e2_fq<-read.table(file = 'result/server_e2tf_car_fq_tt.txt', sep=' ')
names(traceserver_e2_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e2_fq$time <- as.POSIXlt(traceserver_e2_fq$time, origin = "1987-10-05 11:00:00")
traceserver_e2_fq$size<- traceserver_e2_fq$size*8
sum1segserver_e2_fq<-aggregate(list(size = traceserver_e2_fq$size), list(segundos = cut(traceserver_e2_fq$time, "1 sec")), sum)
mean1segserver_e2_fq<-append(list(size = sum1segserver_e2_fq$size), list(time = as.numeric(sum1segserver_e2_fq$segundos)))
mean1segserver_e2_fq$size[1:150]<- mean1segserver_e2_fq$size[1:150]/7
mean1segserver_e2_fq$size[151:225]<- mean1segserver_e2_fq$size[151:225]/11
mean1segserver_e2_fq$size[226:300]<- mean1segserver_e2_fq$size[226:300]/15
pd_e2_server<-traceserver_e2_fq
pd_e2_server$size<-pd_e2_server$size/8/1498
sumpd75segserver_e2_fq<-aggregate(list(size = pd_e2_server$size), list(segundos = cut(pd_e2_server$time, "75 sec")), sum)
meanpd75segserver_e2_fq<-append(list(size = sumpd75segserver_e2_fq$size), list(time = as.numeric(sumpd75segserver_e2_fq$segundos)))
#Compute Car sent Server E2
tracecar_e2_fq<-read.table(file = 'result/cartf_fq_5004_tt.txt', sep=' ')
names(tracecar_e2_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_e2_fq$time <- as.POSIXlt(tracecar_e2_fq$time, origin = "1987-10-05 11:00:00")
tracecar_e2_fq$size<- tracecar_e2_fq$size*8
sum1segcar_e2_fq<-aggregate(list(size = tracecar_e2_fq$size), list(segundos = cut(tracecar_e2_fq$time, "1 sec")), sum)
mean1segcar_e2_fq<-append(list(size = sum1segcar_e2_fq$size), list(time = as.numeric(sum1segcar_e2_fq$segundos)))
mean1segcar_e2_fq$size[1:150]<- mean1segcar_e2_fq$size[1:150]/7
mean1segcar_e2_fq$size[151:225]<- mean1segcar_e2_fq$size[151:225]/11
mean1segcar_e2_fq$size[226:300]<- mean1segcar_e2_fq$size[226:300]/15
pd_e2_car<-tracecar_e2_fq
pd_e2_car$size<-pd_e2_car$size/8/1498
sumpd75segcar_e2_fq<-aggregate(list(size = pd_e2_car$size), list(segundos = cut(pd_e2_car$time, "75 sec")), sum)
meanpd75segcar_e2_fq<-append(list(size = sumpd75segcar_e2_fq$size), list(time = as.numeric(sumpd75segcar_e2_fq$segundos)))
#Compute PDR Server E2
pdr75seg_e2_fq<-meanpd75segserver_e2_fq$size/meanpd75segcar_e2_fq$size
pdr1seg_e2_fq<-mean1segserver_e2_fq$size[1:300]/mean1segcar_e2_fq$size[1:300]
require(Rmisc)
w_e2_fq<-CI(pdr1seg_e2_fq[1:75], ci=0.95)
x_e2_fq<-CI(pdr1seg_e2_fq[76:150], ci=0.95)
y_e2_fq<-CI(pdr1seg_e2_fq[151:225], ci=0.95)
z_e2_fq<-CI(pdr1seg_e2_fq[225:300], ci=0.95)
up_e2_fq<-c(w_e2_fq[1], x_e2_fq[1], y_e2_fq[1], z_e2_fq[1])
lo_e2_fq<-c(w_e2_fq[3], x_e2_fq[3], y_e2_fq[3], z_e2_fq[3])
####FN
#Compute Server E2 received
traceserver_e2_fn<-read.table(file = 'result/server_e2tf_car_fn_tt.txt', sep=' ')
names(traceserver_e2_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_e2_fn$time <- as.POSIXlt(traceserver_e2_fn$time, origin = "1987-10-05 11:00:00")
traceserver_e2_fn$size<- traceserver_e2_fn$size*8
sum1segserver_e2_fn<-aggregate(list(size = traceserver_e2_fn$size), list(segundos = cut(traceserver_e2_fn$time, "1 sec")), sum)
mean1segserver_e2_fn<-append(list(size = sum1segserver_e2_fn$size), list(time = as.numeric(sum1segserver_e2_fn$segundos)))
mean1segserver_e2_fn$size[1:150]<- mean1segserver_e2_fn$size[1:150]/7
mean1segserver_e2_fn$size[151:225]<- mean1segserver_e2_fn$size[151:225]/11
mean1segserver_e2_fn$size[226:300]<- mean1segserver_e2_fn$size[226:300]/15
pd_e2_server<-traceserver_e2_fn
pd_e2_server$size<-pd_e2_server$size/8/1498
sumpd75segserver_e2_fn<-aggregate(list(size = pd_e2_server$size), list(segundos = cut(pd_e2_server$time, "75 sec")), sum)
meanpd75segserver_e2_fn<-append(list(size = sumpd75segserver_e2_fn$size), list(time = as.numeric(sumpd75segserver_e2_fn$segundos)))
#Compute Car sent Server E2
tracecar_e2_fn<-read.table(file = 'result/cartf_fn_5004_tt.txt', sep=' ')
names(tracecar_e2_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_e2_fn$time <- as.POSIXlt(tracecar_e2_fn$time, origin = "1987-10-05 11:00:00")
tracecar_e2_fn$size<- tracecar_e2_fn$size*8
sum1segcar_e2_fn<-aggregate(list(size = tracecar_e2_fn$size), list(segundos = cut(tracecar_e2_fn$time, "1 sec")), sum)
mean1segcar_e2_fn<-append(list(size = sum1segcar_e2_fn$size), list(time = as.numeric(sum1segcar_e2_fn$segundos)))
mean1segcar_e2_fn$size[1:150]<- mean1segcar_e2_fn$size[1:150]/7
mean1segcar_e2_fn$size[151:225]<- mean1segcar_e2_fn$size[151:225]/11
mean1segcar_e2_fn$size[226:300]<- mean1segcar_e2_fn$size[226:300]/15
pd_e2_car<-tracecar_e2_fn
pd_e2_car$size<-pd_e2_car$size/8/1498
sumpd75segcar_e2_fn<-aggregate(list(size = pd_e2_car$size), list(segundos = cut(pd_e2_car$time, "75 sec")), sum)
meanpd75segcar_e2_fn<-append(list(size = sumpd75segcar_e2_fn$size), list(time = as.numeric(sumpd75segcar_e2_fn$segundos)))
#Compute PDR Server E2
pdr75seg_e2_fn<-meanpd75segserver_e2_fn$size/meanpd75segcar_e2_fn$size
pdr1seg_e2_fn<-mean1segserver_e2_fn$size[1:300]/mean1segcar_e2_fn$size[1:300]
require(Rmisc)
w_e2_fn<-CI(pdr1seg_e2_fn[1:75], ci=0.95)
x_e2_fn<-CI(pdr1seg_e2_fn[76:150], ci=0.95)
y_e2_fn<-CI(pdr1seg_e2_fn[151:225], ci=0.95)
z_e2_fn<-CI(pdr1seg_e2_fn[225:300], ci=0.95)
up_e2_fn<-c(w_e2_fn[1], x_e2_fn[1], y_e2_fn[1], z_e2_fn[1])
lo_e2_fn<-c(w_e2_fn[3], x_e2_fn[3], y_e2_fn[3], z_e2_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_e2_fs[1:4], ui=up_e2_fs, li=lo_e2_fs, col="red", main="PDR Application E2", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
plotCI(c(1:4), pdr75seg_e2_fs[1:4], ui=up_e2_fs, li=lo_e2_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.5,1), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_e2_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e2_fq[1:4], ui=up_e2_fq, li=lo_e2_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e2_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_e2_fn[1:4], ui=up_e2_fn, li=lo_e2_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.5,1))
lines(c(1:4),pdr75seg_e2_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
######################
#APP G
###FS
#Compute Server G received
traceserver_g_fs<-read.table(file = 'result/server_gtf_car_fs_tt.txt', sep=' ')
names(traceserver_g_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_g_fs$time <- as.POSIXlt(traceserver_g_fs$time, origin = "1987-10-05 11:00:00")
traceserver_g_fs$size<- traceserver_g_fs$size*8
sum1segserver_g_fs<-aggregate(list(size = traceserver_g_fs$size), list(segundos = cut(traceserver_g_fs$time, "1 sec")), sum)
mean1segserver_g_fs<-append(list(size = sum1segserver_g_fs$size), list(time = as.numeric(sum1segserver_g_fs$segundos)))
mean1segserver_g_fs$size[1:150]<- mean1segserver_g_fs$size[1:150]/7
mean1segserver_g_fs$size[151:225]<- mean1segserver_g_fs$size[151:225]/11
mean1segserver_g_fs$size[226:300]<- mean1segserver_g_fs$size[226:300]/15
pd_g_server<-traceserver_g_fs
pd_g_server$size<-pd_g_server$size/8/1498
sumpd75segserver_g_fs<-aggregate(list(size = pd_g_server$size), list(segundos = cut(pd_g_server$time, "75 sec")), sum)
meanpd75segserver_g_fs<-append(list(size = sumpd75segserver_g_fs$size), list(time = as.numeric(sumpd75segserver_g_fs$segundos)))
#Compute Car sent Server G
tracecar_g_fs<-read.table(file = 'result/cartf_fs_5005_tt.txt', sep=' ')
names(tracecar_g_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_g_fs$time <- as.POSIXlt(tracecar_g_fs$time, origin = "1987-10-05 11:00:00")
tracecar_g_fs$size<- tracecar_g_fs$size*8
sum1segcar_g_fs<-aggregate(list(size = tracecar_g_fs$size), list(segundos = cut(tracecar_g_fs$time, "1 sec")), sum)
mean1segcar_g_fs<-append(list(size = sum1segcar_g_fs$size), list(time = as.numeric(sum1segcar_g_fs$segundos)))
mean1segcar_g_fs$size[1:150]<- mean1segcar_g_fs$size[1:150]/7
mean1segcar_g_fs$size[151:225]<- mean1segcar_g_fs$size[151:225]/11
mean1segcar_g_fs$size[226:300]<- mean1segcar_g_fs$size[226:300]/15
pd_g_car<-tracecar_g_fs
pd_g_car$size<-pd_g_car$size/8/1498
sumpd75segcar_g_fs<-aggregate(list(size = pd_g_car$size), list(segundos = cut(pd_g_car$time, "75 sec")), sum)
meanpd75segcar_g_fs<-append(list(size = sumpd75segcar_g_fs$size), list(time = as.numeric(sumpd75segcar_g_fs$segundos)))
#Compute PDR Server G
pdr75seg_g_fs<-meanpd75segserver_g_fs$size/meanpd75segcar_g_fs$size
pdr1seg_g_fs<-mean1segserver_g_fs$size[1:300]/mean1segcar_g_fs$size[1:300]
require(Rmisc)
w_g_fs<-CI(pdr1seg_g_fs[1:75], ci=0.95)
x_g_fs<-CI(pdr1seg_g_fs[76:150], ci=0.95)
y_g_fs<-CI(pdr1seg_g_fs[151:225], ci=0.95)
z_g_fs<-CI(pdr1seg_g_fs[225:300], ci=0.95)
up_g_fs<-c(w_g_fs[1], x_g_fs[1], y_g_fs[1], z_g_fs[1])
lo_g_fs<-c(w_g_fs[3], x_g_fs[3], y_g_fs[3], z_g_fs[3])
###FQ
#Compute Server G received
traceserver_g_fq<-read.table(file = 'result/server_gtf_car_fq_tt.txt', sep=' ')
names(traceserver_g_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_g_fq$time <- as.POSIXlt(traceserver_g_fq$time, origin = "1987-10-05 11:00:00")
traceserver_g_fq$size<- traceserver_g_fq$size*8
sum1segserver_g_fq<-aggregate(list(size = traceserver_g_fq$size), list(segundos = cut(traceserver_g_fq$time, "1 sec")), sum)
mean1segserver_g_fq<-append(list(size = sum1segserver_g_fq$size), list(time = as.numeric(sum1segserver_g_fq$segundos)))
mean1segserver_g_fq$size[1:150]<- mean1segserver_g_fq$size[1:150]/7
mean1segserver_g_fq$size[151:225]<- mean1segserver_g_fq$size[151:225]/11
mean1segserver_g_fq$size[226:300]<- mean1segserver_g_fq$size[226:300]/15
pd_g_server<-traceserver_g_fq
pd_g_server$size<-pd_g_server$size/8/1498
sumpd75segserver_g_fq<-aggregate(list(size = pd_g_server$size), list(segundos = cut(pd_g_server$time, "75 sec")), sum)
meanpd75segserver_g_fq<-append(list(size = sumpd75segserver_g_fq$size), list(time = as.numeric(sumpd75segserver_g_fq$segundos)))
#Compute Car sent Server G
tracecar_g_fq<-read.table(file = 'result/cartf_fq_5005_tt.txt', sep=' ')
names(tracecar_g_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_g_fq$time <- as.POSIXlt(tracecar_g_fq$time, origin = "1987-10-05 11:00:00")
tracecar_g_fq$size<- tracecar_g_fq$size*8
sum1segcar_g_fq<-aggregate(list(size = tracecar_g_fq$size), list(segundos = cut(tracecar_g_fq$time, "1 sec")), sum)
mean1segcar_g_fq<-append(list(size = sum1segcar_g_fq$size), list(time = as.numeric(sum1segcar_g_fq$segundos)))
mean1segcar_g_fq$size[1:150]<- mean1segcar_g_fq$size[1:150]/7
mean1segcar_g_fq$size[151:225]<- mean1segcar_g_fq$size[151:225]/11
mean1segcar_g_fq$size[226:300]<- mean1segcar_g_fq$size[226:300]/15
pd_g_car<-tracecar_g_fq
pd_g_car$size<-pd_g_car$size/8/1498
sumpd75segcar_g_fq<-aggregate(list(size = pd_g_car$size), list(segundos = cut(pd_g_car$time, "75 sec")), sum)
meanpd75segcar_g_fq<-append(list(size = sumpd75segcar_g_fq$size), list(time = as.numeric(sumpd75segcar_g_fq$segundos)))
#Compute PDR Server G
pdr75seg_g_fq<-meanpd75segserver_g_fq$size/meanpd75segcar_g_fq$size
pdr1seg_g_fq<-mean1segserver_g_fq$size[1:300]/mean1segcar_g_fq$size[1:300]
require(Rmisc)
w_g_fq<-CI(pdr1seg_g_fq[1:75], ci=0.95)
x_g_fq<-CI(pdr1seg_g_fq[76:150], ci=0.95)
y_g_fq<-CI(pdr1seg_g_fq[151:225], ci=0.95)
z_g_fq<-CI(pdr1seg_g_fq[225:300], ci=0.95)
up_g_fq<-c(w_g_fq[1], x_g_fq[1], y_g_fq[1], z_g_fq[1])
lo_g_fq<-c(w_g_fq[3], x_g_fq[3], y_g_fq[3], z_g_fq[3])
####FN
#Compute Server G received
traceserver_g_fn<-read.table(file = 'result/server_gtf_car_fn_tt.txt', sep=' ')
names(traceserver_g_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_g_fn$time <- as.POSIXlt(traceserver_g_fn$time, origin = "1987-10-05 11:00:00")
traceserver_g_fn$size<- traceserver_g_fn$size*8
sum1segserver_g_fn<-aggregate(list(size = traceserver_g_fn$size), list(segundos = cut(traceserver_g_fn$time, "1 sec")), sum)
mean1segserver_g_fn<-append(list(size = sum1segserver_g_fn$size), list(time = as.numeric(sum1segserver_g_fn$segundos)))
mean1segserver_g_fn$size[1:150]<- mean1segserver_g_fn$size[1:150]/7
mean1segserver_g_fn$size[151:225]<- mean1segserver_g_fn$size[151:225]/11
mean1segserver_g_fn$size[226:300]<- mean1segserver_g_fn$size[226:300]/15
pd_g_server<-traceserver_g_fn
pd_g_server$size<-pd_g_server$size/8/1498
sumpd75segserver_g_fn<-aggregate(list(size = pd_g_server$size), list(segundos = cut(pd_g_server$time, "75 sec")), sum)
meanpd75segserver_g_fn<-append(list(size = sumpd75segserver_g_fn$size), list(time = as.numeric(sumpd75segserver_g_fn$segundos)))
#Compute Car sent Server G
tracecar_g_fn<-read.table(file = 'result/cartf_fn_5005_tt.txt', sep=' ')
names(tracecar_g_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_g_fn$time <- as.POSIXlt(tracecar_g_fn$time, origin = "1987-10-05 11:00:00")
tracecar_g_fn$size<- tracecar_g_fn$size*8
sum1segcar_g_fn<-aggregate(list(size = tracecar_g_fn$size), list(segundos = cut(tracecar_g_fn$time, "1 sec")), sum)
mean1segcar_g_fn<-append(list(size = sum1segcar_g_fn$size), list(time = as.numeric(sum1segcar_g_fn$segundos)))
mean1segcar_g_fn$size[1:150]<- mean1segcar_g_fn$size[1:150]/7
mean1segcar_g_fn$size[151:225]<- mean1segcar_g_fn$size[151:225]/11
mean1segcar_g_fn$size[226:300]<- mean1segcar_g_fn$size[226:300]/15
pd_g_car<-tracecar_g_fn
pd_g_car$size<-pd_g_car$size/8/1498
sumpd75segcar_g_fn<-aggregate(list(size = pd_g_car$size), list(segundos = cut(pd_g_car$time, "75 sec")), sum)
meanpd75segcar_g_fn<-append(list(size = sumpd75segcar_g_fn$size), list(time = as.numeric(sumpd75segcar_g_fn$segundos)))
#Compute PDR Server G
pdr75seg_g_fn<-meanpd75segserver_g_fn$size/meanpd75segcar_g_fn$size
pdr1seg_g_fn<-mean1segserver_g_fn$size[1:300]/mean1segcar_g_fn$size[1:300]
require(Rmisc)
w_g_fn<-CI(pdr1seg_g_fn[1:75], ci=0.95)
x_g_fn<-CI(pdr1seg_g_fn[76:150], ci=0.95)
y_g_fn<-CI(pdr1seg_g_fn[151:225], ci=0.95)
z_g_fn<-CI(pdr1seg_g_fn[225:300], ci=0.95)
up_g_fn<-c(w_g_fn[1], x_g_fn[1], y_g_fn[1], z_g_fn[1])
lo_g_fn<-c(w_g_fn[3], x_g_fn[3], y_g_fn[3], z_g_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_g_fs[1:4], ui=up_g_fs, li=lo_g_fs, col="red", main="PDR Application G", ylab = "PDR", xlab = "Congestion level", lwd="2" , ylim=c(0,1), xaxt="n")
plotCI(c(1:4), pdr75seg_g_fs[1:4], ui=up_g_fs, li=lo_g_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2" , ylim=c(0,1), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_g_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_g_fq[1:4], ui=up_g_fq, li=lo_g_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0,1))
lines(c(1:4),pdr75seg_g_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_g_fn[1:4], ui=up_g_fn, li=lo_g_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0,1) )
lines(c(1:4),pdr75seg_g_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
################################################################################
#APP S
###FS
#Compute Server S received
traceserver_s_fs<-read.table(file = 'result/server_stf_car_fs_tt.txt', sep=' ')
names(traceserver_s_fs)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_s_fs$time <- as.POSIXlt(traceserver_s_fs$time, origin = "1987-10-05 11:00:00")
traceserver_s_fs$size<- traceserver_s_fs$size*8
sum1segserver_s_fs<-aggregate(list(size = traceserver_s_fs$size), list(segundos = cut(traceserver_s_fs$time, "1 sec")), sum)
mean1segserver_s_fs<-append(list(size = sum1segserver_s_fs$size), list(time = as.numeric(sum1segserver_s_fs$segundos)))
mean1segserver_s_fs$size[1:150]<- mean1segserver_s_fs$size[1:150]/7
mean1segserver_s_fs$size[151:225]<- mean1segserver_s_fs$size[151:225]/11
mean1segserver_s_fs$size[226:300]<- mean1segserver_s_fs$size[226:300]/15
pd_s_server<-traceserver_s_fs
pd_s_server$size<-pd_s_server$size/8/1498
sumpd75segserver_s_fs<-aggregate(list(size = pd_s_server$size), list(segundos = cut(pd_s_server$time, "75 sec")), sum)
meanpd75segserver_s_fs<-append(list(size = sumpd75segserver_s_fs$size), list(time = as.numeric(sumpd75segserver_s_fs$segundos)))
#Compute Car sent Server S
tracecar_s_fs<-read.table(file = 'result/cartf_fs_5002_tt.txt', sep=' ')
names(tracecar_s_fs)<-c("time", "id", "size", "ori", "dest" )
tracecar_s_fs$time <- as.POSIXlt(tracecar_s_fs$time, origin = "1987-10-05 11:00:00")
tracecar_s_fs$size<- tracecar_s_fs$size*8
sum1segcar_s_fs<-aggregate(list(size = tracecar_s_fs$size), list(segundos = cut(tracecar_s_fs$time, "1 sec")), sum)
mean1segcar_s_fs<-append(list(size = sum1segcar_s_fs$size), list(time = as.numeric(sum1segcar_s_fs$segundos)))
mean1segcar_s_fs$size[1:150]<- mean1segcar_s_fs$size[1:150]/7
mean1segcar_s_fs$size[151:225]<- mean1segcar_s_fs$size[151:225]/11
mean1segcar_s_fs$size[226:300]<- mean1segcar_s_fs$size[226:300]/15
pd_s_car<-tracecar_s_fs
pd_s_car$size<-pd_s_car$size/8/1498
sumpd75segcar_s_fs<-aggregate(list(size = pd_s_car$size), list(segundos = cut(pd_s_car$time, "75 sec")), sum)
meanpd75segcar_s_fs<-append(list(size = sumpd75segcar_s_fs$size), list(time = as.numeric(sumpd75segcar_s_fs$segundos)))
#Compute PDR Server S
pdr75seg_s_fs<-meanpd75segserver_s_fs$size/meanpd75segcar_s_fs$size
pdr1seg_s_fs<-mean1segserver_s_fs$size[1:300]/mean1segcar_s_fs$size[1:300]
require(Rmisc)
w_s_fs<-CI(pdr1seg_s_fs[1:75], ci=0.95)
x_s_fs<-CI(pdr1seg_s_fs[76:150], ci=0.95)
y_s_fs<-CI(pdr1seg_s_fs[151:225], ci=0.95)
z_s_fs<-CI(pdr1seg_s_fs[225:300], ci=0.95)
up_s_fs<-c(w_s_fs[1], x_s_fs[1], y_s_fs[1], z_s_fs[1])
lo_s_fs<-c(w_s_fs[3], x_s_fs[3], y_s_fs[3], z_s_fs[3])
###FQ
#Compute Server S received
traceserver_s_fq<-read.table(file = 'result/server_stf_car_fq_tt.txt', sep=' ')
names(traceserver_s_fq)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_s_fq$time <- as.POSIXlt(traceserver_s_fq$time, origin = "1987-10-05 11:00:00")
traceserver_s_fq$size<- traceserver_s_fq$size*8
sum1segserver_s_fq<-aggregate(list(size = traceserver_s_fq$size), list(segundos = cut(traceserver_s_fq$time, "1 sec")), sum)
mean1segserver_s_fq<-append(list(size = sum1segserver_s_fq$size), list(time = as.numeric(sum1segserver_s_fq$segundos)))
mean1segserver_s_fq$size[1:150]<- mean1segserver_s_fq$size[1:150]/7
mean1segserver_s_fq$size[151:225]<- mean1segserver_s_fq$size[151:225]/11
mean1segserver_s_fq$size[226:300]<- mean1segserver_s_fq$size[226:300]/15
pd_s_server<-traceserver_s_fq
pd_s_server$size<-pd_s_server$size/8/1498
sumpd75segserver_s_fq<-aggregate(list(size = pd_s_server$size), list(segundos = cut(pd_s_server$time, "75 sec")), sum)
meanpd75segserver_s_fq<-append(list(size = sumpd75segserver_s_fq$size), list(time = as.numeric(sumpd75segserver_s_fq$segundos)))
#Compute Car sent Server S
tracecar_s_fq<-read.table(file = 'result/cartf_fq_5002_tt.txt', sep=' ')
names(tracecar_s_fq)<-c("time", "id", "size", "ori", "dest" )
tracecar_s_fq$time <- as.POSIXlt(tracecar_s_fq$time, origin = "1987-10-05 11:00:00")
tracecar_s_fq$size<- tracecar_s_fq$size*8
sum1segcar_s_fq<-aggregate(list(size = tracecar_s_fq$size), list(segundos = cut(tracecar_s_fq$time, "1 sec")), sum)
mean1segcar_s_fq<-append(list(size = sum1segcar_s_fq$size), list(time = as.numeric(sum1segcar_s_fq$segundos)))
mean1segcar_s_fq$size[1:150]<- mean1segcar_s_fq$size[1:150]/7
mean1segcar_s_fq$size[151:225]<- mean1segcar_s_fq$size[151:225]/11
mean1segcar_s_fq$size[226:300]<- mean1segcar_s_fq$size[226:300]/15
pd_s_car<-tracecar_s_fq
pd_s_car$size<-pd_s_car$size/8/1498
sumpd75segcar_s_fq<-aggregate(list(size = pd_s_car$size), list(segundos = cut(pd_s_car$time, "75 sec")), sum)
meanpd75segcar_s_fq<-append(list(size = sumpd75segcar_s_fq$size), list(time = as.numeric(sumpd75segcar_s_fq$segundos)))
#Compute PDR Server S
pdr75seg_s_fq<-meanpd75segserver_s_fq$size/meanpd75segcar_s_fq$size
pdr1seg_s_fq<-mean1segserver_s_fq$size[1:300]/mean1segcar_s_fq$size[1:300]
require(Rmisc)
w_s_fq<-CI(pdr1seg_s_fq[1:75], ci=0.95)
x_s_fq<-CI(pdr1seg_s_fq[76:150], ci=0.95)
y_s_fq<-CI(pdr1seg_s_fq[151:225], ci=0.95)
z_s_fq<-CI(pdr1seg_s_fq[225:300], ci=0.95)
up_s_fq<-c(w_s_fq[1], x_s_fq[1], y_s_fq[1], z_s_fq[1])
lo_s_fq<-c(w_s_fq[3], x_s_fq[3], y_s_fq[3], z_s_fq[3])
####FN
#Compute Server S received
traceserver_s_fn<-read.table(file = 'result/server_stf_car_fn_tt.txt', sep=' ')
names(traceserver_s_fn)<-c("time", "id", "size", "ori", "dest" )
options(drigits.secs = 6)
traceserver_s_fn$time <- as.POSIXlt(traceserver_s_fn$time, origin = "1987-10-05 11:00:00")
traceserver_s_fn$size<- traceserver_s_fn$size*8
sum1segserver_s_fn<-aggregate(list(size = traceserver_s_fn$size), list(segundos = cut(traceserver_s_fn$time, "1 sec")), sum)
mean1segserver_s_fn<-append(list(size = sum1segserver_s_fn$size), list(time = as.numeric(sum1segserver_s_fn$segundos)))
mean1segserver_s_fn$size[1:150]<- mean1segserver_s_fn$size[1:150]/7
mean1segserver_s_fn$size[151:225]<- mean1segserver_s_fn$size[151:225]/11
mean1segserver_s_fn$size[226:300]<- mean1segserver_s_fn$size[226:300]/15
pd_s_server<-traceserver_s_fn
pd_s_server$size<-pd_s_server$size/8/1498
sumpd75segserver_s_fn<-aggregate(list(size = pd_s_server$size), list(segundos = cut(pd_s_server$time, "75 sec")), sum)
meanpd75segserver_s_fn<-append(list(size = sumpd75segserver_s_fn$size), list(time = as.numeric(sumpd75segserver_s_fn$segundos)))
#Compute Car sent Server S
tracecar_s_fn<-read.table(file = 'result/cartf_fn_5002_tt.txt', sep=' ')
names(tracecar_s_fn)<-c("time", "id", "size", "ori", "dest" )
tracecar_s_fn$time <- as.POSIXlt(tracecar_s_fn$time, origin = "1987-10-05 11:00:00")
tracecar_s_fn$size<- tracecar_s_fn$size*8
sum1segcar_s_fn<-aggregate(list(size = tracecar_s_fn$size), list(segundos = cut(tracecar_s_fn$time, "1 sec")), sum)
mean1segcar_s_fn<-append(list(size = sum1segcar_s_fn$size), list(time = as.numeric(sum1segcar_s_fn$segundos)))
mean1segcar_s_fn$size[1:150]<- mean1segcar_s_fn$size[1:150]/7
mean1segcar_s_fn$size[151:225]<- mean1segcar_s_fn$size[151:225]/11
mean1segcar_s_fn$size[226:300]<- mean1segcar_s_fn$size[226:300]/15
pd_s_car<-tracecar_s_fn
pd_s_car$size<-pd_s_car$size/8/1498
sumpd75segcar_s_fn<-aggregate(list(size = pd_s_car$size), list(segundos = cut(pd_s_car$time, "75 sec")), sum)
meanpd75segcar_s_fn<-append(list(size = sumpd75segcar_s_fn$size), list(time = as.numeric(sumpd75segcar_s_fn$segundos)))
#Compute PDR Server S
pdr75seg_s_fn<-meanpd75segserver_s_fn$size/meanpd75segcar_s_fn$size
pdr1seg_s_fn<-mean1segserver_s_fn$size[1:300]/mean1segcar_s_fn$size[1:300]
require(Rmisc)
w_s_fn<-CI(pdr1seg_s_fn[1:75], ci=0.95)
x_s_fn<-CI(pdr1seg_s_fn[76:150], ci=0.95)
y_s_fn<-CI(pdr1seg_s_fn[151:225], ci=0.95)
z_s_fn<-CI(pdr1seg_s_fn[225:300], ci=0.95)
up_s_fn<-c(w_s_fn[1], x_s_fn[1], y_s_fn[1], z_s_fn[1])
lo_s_fn<-c(w_s_fn[3], x_s_fn[3], y_s_fn[3], z_s_fn[3])
require(plotrix)
#plotCI(c(1:4), pdr75seg_s_fs[1:4], ui=up_s_fs, li=lo_s_fs, col="red", main="PDR Application S", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.6,1.05), xaxt="n")
plotCI(c(1:4), pdr75seg_s_fs[1:4], ui=up_s_fs, li=lo_s_fs, col="red", ylab = "PDR", xlab = "Congestion level", lwd="2", ylim=c(0.6,1.05), xaxt="n")
axis(1, at=1:4, labels=c("C1", "C2", "C3", "C4"))
lines(c(1:4),pdr75seg_s_fs[1:4], type = "l", col="red", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_s_fq[1:4], ui=up_s_fq, li=lo_s_fq, col="blue", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.6,1.05))
lines(c(1:4),pdr75seg_s_fq[1:4], type = "l", col="blue", lwd="2")
par(new=T)
plotCI(c(1:4), pdr75seg_s_fn[1:4], ui=up_s_fn, li=lo_s_fn, col="orange", axes=F, xlab=NA, ylab=NA, lwd="2", ylim=c(0.6,1.05))
lines(c(1:4),pdr75seg_s_fn[1:4], type = "l", col="orange", lwd="2")
legend("topright", legend=c("Framework", "QoS", "Best effort"), lty=c(1,1,1), col=c("red", "blue", "orange"))
|
#assing 2 to x variable and print
x <- 2
x
#assign 2 to x variable and print
y <- 5
y
| /Week2Demo.R | no_license | iarlaith/Week2Demo | R | false | false | 88 | r | #assing 2 to x variable and print
x <- 2
x
#assign 2 to x variable and print
y <- 5
y
|
source("metric_functions.R")
cities <- c("Baltimore", "Charleston", "Chicago", "Columbus", "Dayton",
"Denver", "Kansas City", "Memphis", "Milwaukee", "Oklahoma City",
"Pittsburgh", "St. Louis", "Syracuse", "Wichita")
filenames <- list.files("data/prepped", pattern="*.csv", full.names=TRUE)
ldf <- lapply(filenames, read.csv)
dissimilarities <- lapply(ldf, dissimilarity)
interactions <- lapply(ldf, interaction)
isolations <- lapply(ldf, isolation)
df <- data.frame(unlist(dissimilarities), unlist(interactions), unlist(isolations))
rownames(df) <- cities
colnames(df)[1] <- 'dissimilarity.index'
colnames(df)[2] <- 'interaction.index'
colnames(df)[3] <- 'isolation.index'
plot(df$dissimilarity.index, xlab='City', ylab='Dissimilarity Index') | /analysis.R | permissive | shgao/a2-data-wrangling-shgao | R | false | false | 771 | r | source("metric_functions.R")
cities <- c("Baltimore", "Charleston", "Chicago", "Columbus", "Dayton",
"Denver", "Kansas City", "Memphis", "Milwaukee", "Oklahoma City",
"Pittsburgh", "St. Louis", "Syracuse", "Wichita")
filenames <- list.files("data/prepped", pattern="*.csv", full.names=TRUE)
ldf <- lapply(filenames, read.csv)
dissimilarities <- lapply(ldf, dissimilarity)
interactions <- lapply(ldf, interaction)
isolations <- lapply(ldf, isolation)
df <- data.frame(unlist(dissimilarities), unlist(interactions), unlist(isolations))
rownames(df) <- cities
colnames(df)[1] <- 'dissimilarity.index'
colnames(df)[2] <- 'interaction.index'
colnames(df)[3] <- 'isolation.index'
plot(df$dissimilarity.index, xlab='City', ylab='Dissimilarity Index') |
##' simulateScene generates a matingScene object -- a simulated population
##' in a standard format with individuals randomly assigned a mating schedule,
##' a location, and S-alleles
##'
##' @title Simulate a Mating Scene
##' @param size integer number of plants
##' @param meanSD date mean start date
##' @param sdSD date standard deviation of start date
##' @param skSD skew of the start date of the population
##' @param meanDur numeric duration in days
##' @param sdDur standard deviation of duration in days
##' @param xRange range of spatial extent of individuals along x-axis
##' @param yRange range of spatial extent of individuals along y-axis
##' @param distro unimplemented
##' @param sAlleles integer count of S-Alleles that could be in the population
##'
##' @return matingScene data frame -- see \code{\link{makeScene}}
##' @seealso \code{\link{makeScene}}
##' @author Stuart Wagenius
##' @examples
##' simulateScene()
##' \dontrun{simulateScene(NULL)}
simulateScene <- function(size = 30, meanSD = "2012-07-12", sdSD = 6, meanDur = 11,
sdDur = 3, skSD = 0 ,xRange = c(0, 100), yRange = c(0, 100),
distro = "unif", sAlleles = 10) {
md <- as.integer(as.Date(meanSD, "%Y-%m-%d"))
sd <- as.integer(md + round(sn::rsn(n = size, 0, omega = sdSD, alpha = skSD), 0))
ed <- as.integer(sd + abs(round(rnorm(size, meanDur, sdDur), 0)))
if (distro != "unif")
warning("distro must be unif")
xv <- runif(size, min = xRange[1], max = xRange[2])
yv <- runif(size, min = yRange[1], max = yRange[2])
sM <- sample(x = 1:sAlleles, size = size, replace = TRUE)
if (sAlleles == 2) {
sP <- 3 - sM
} else {
sP <- sapply(sM, FUN = function(x) sample((1:sAlleles)[-x], 1))
}
df <- data.frame(id = 1:size, start = sd, end = ed, x = xv,
y = yv, s1 = sM, s2 = sP)
makeScene(df, startCol = "start", endCol = "end", xCol = "x", yCol = "y",
idCol = "pla", dateFormat = "1970-01-01")
}
##' Turns a data frame with information about temporal, spatial, or
##' genetic mating data into a matingScene object using a standard format.
##'
##' @title Create a matingScene object from a data frame
##' @param df a data frame containing information about a mating scene,
##' namely coordinate of individuals in space, time, and mating type.
##' @param multiYear logical indicating whether or not to split the result into
##' a list by year
##' @param startCol character name of column with start dates
##' @param endCol character name of column with end dates
##' @param xCol character name of column with x or E coordinates
##' @param yCol character name of column with y or N coordinates
##' @param s1Col character name of one column with S-allele
##' @param s2Col character name of another column with S-alleles
##' @param idCol character name for column with unique identifier
##' @param otherCols character vector of column(s) to include besides the
##' necessary ones for the mating scene. If NULL, it will be ignored.
##' @param dateFormat character indicating either (1) the format of the start and end
##' date columns if those columns are characters or (2) the origin for the start
##' and end date columns if those columns are numeric. It is used in as.Date
##' @param split character name for a column with values by which the result should be split
##'
##' @return a matingScene object, either a single dataframe in standard format
##' or a list of dataframes. Attributes of the matingScene object indicate the type of
##' information in the data frame, including the original column names,
##' and the origin of the date columns. If multiYear = TRUE,
##' the return value will be a list of matingScene data frames where each
##' element in the list represents one year. If split is specified, the return value will be a list of matingScene data frames where each element in the list represents a value of the specifed variable. See details for more information
##' on attributes and how to work with multi-year data.
##' @details The input dataframe can contain information about locations of
##' individuals in 1, 2, or 3 dimensions of a mating scenes.
##' The function currently allows two spatial coordinates. The user specifies
##' the names of the columns and they will be saved xCol and yCol in the
##' matingScene object. MatingScene objects currently save temporal
##' coordinates for individuals as start and end date of mating activity
##' within a year. Mating type coordinates are saved as mating type alleles.
##' Columns are named id, start, end, x, y, s1, and s2 for
##' idCol, startCol, endCol, xCol, yCol, s1Col, and s2Col respectively.
##' The attributes "t", "s", and "mt" will be set to TRUE if the data frame
##' has temporal, spatial, or mating type data, respectively and
##' will be FALSE otherwise. The attribute originalNames contains all the
##' names of the columns in the original data frame.\cr
##' The start and end columns will be changed to integers relative to the start
##' day of the population. So the first day of the first individual to become
##' receptive will be 1 and so on. The attribute origin contains the
##' origin that can be used when converting the columns start and end
##' from integers to dates.\cr
##' If no temporal data are available except the year in which it was
##' collected and df is a multi-year data set, put the collection year into the
##' column labelled as startCol and set dateFormat = "%Y" and that will split
##' the data appropriately.
##' @author Danny Hanson
makeScene <- function (df, multiYear = FALSE, startCol = "start", endCol = "end",
xCol = "x", yCol = "y", s1Col = "s1", s2Col = "s2",
idCol = "id", otherCols = NULL, dateFormat = "%Y-%m-%d",
split = NULL) {
if (multiYear) {
if (dateFormat == "%Y") {
dates <- as.Date(as.character(df[, startCol]), dateFormat)
} else {
dates <- as.Date(df[, startCol], dateFormat)
}
df$year <- as.numeric(format(dates, "%Y"))
years <- levels(as.factor(df$year))
newScene <- list()
for (i in 1:length(years)) {
newScene[[as.character(years[i])]] <-
makeScene(df[df$year %in% years[i],], F, startCol, endCol, xCol, yCol,
s1Col, s2Col, idCol, otherCols, dateFormat, split)
}
} else if(!is.null(split)){
splitTo <- levels(as.factor(df[,split]))
newScene <- list()
for (i in 1:length(splitTo)){
newScene[[as.character(splitTo[i])]] <-
makeScene(df[df[,split] %in% splitTo[i],], F, startCol, endCol, xCol, yCol,
s1Col, s2Col, idCol, otherCols, dateFormat)
}
} else {
newScene <- data.frame(id = character(nrow(df)))
if (idCol %in% names(df)) {
newScene$id <- df[, idCol]
} else {
newScene$id <- 1:nrow(df)
}
attr(newScene, "t") <- FALSE
attr(newScene, "s") <- FALSE
attr(newScene, "mt") <- FALSE
attr(newScene, "originalNames") <- names(df)
if (all(c(startCol, endCol) %in% names(df))) {
attr(newScene, "t") <- TRUE
newScene$start <- as.integer(as.Date(df[, startCol], dateFormat))
firstDay <- min(newScene$start)
newScene$start <- newScene$start - firstDay + 1
newScene$end <- as.integer(as.Date(df[, endCol], dateFormat)) - firstDay + 1
newScene$duration <- newScene$end - newScene$start + 1
origin <- as.Date(firstDay-1, "1970-01-01")
attr(newScene, "origin") <- origin
}
if (all(c(xCol, yCol) %in% names(df))) {
attr(newScene, "s") <- TRUE
newScene$x <- df[, xCol]
newScene$y <- df[, yCol]
}
if (all(c(s1Col, s2Col) %in% names(df))) {
attr(newScene, "mt") <- TRUE
newScene$s1 <- as.factor(df[, s1Col])
newScene$s2 <- as.factor(df[, s2Col])
}
if (!is.null(otherCols)) {
newScene[, otherCols] <- df[, otherCols]
}
# not going to add this for now because it's unlikely we'll make our
# own generics or use oop
# class(newScene) <- "matingScene"
}
newScene
}
| /R/setUpPopulations.R | no_license | swnordstrom/mateable | R | false | false | 8,066 | r | ##' simulateScene generates a matingScene object -- a simulated population
##' in a standard format with individuals randomly assigned a mating schedule,
##' a location, and S-alleles
##'
##' @title Simulate a Mating Scene
##' @param size integer number of plants
##' @param meanSD date mean start date
##' @param sdSD date standard deviation of start date
##' @param skSD skew of the start date of the population
##' @param meanDur numeric duration in days
##' @param sdDur standard deviation of duration in days
##' @param xRange range of spatial extent of individuals along x-axis
##' @param yRange range of spatial extent of individuals along y-axis
##' @param distro unimplemented
##' @param sAlleles integer count of S-Alleles that could be in the population
##'
##' @return matingScene data frame -- see \code{\link{makeScene}}
##' @seealso \code{\link{makeScene}}
##' @author Stuart Wagenius
##' @examples
##' simulateScene()
##' \dontrun{simulateScene(NULL)}
simulateScene <- function(size = 30, meanSD = "2012-07-12", sdSD = 6, meanDur = 11,
sdDur = 3, skSD = 0 ,xRange = c(0, 100), yRange = c(0, 100),
distro = "unif", sAlleles = 10) {
md <- as.integer(as.Date(meanSD, "%Y-%m-%d"))
sd <- as.integer(md + round(sn::rsn(n = size, 0, omega = sdSD, alpha = skSD), 0))
ed <- as.integer(sd + abs(round(rnorm(size, meanDur, sdDur), 0)))
if (distro != "unif")
warning("distro must be unif")
xv <- runif(size, min = xRange[1], max = xRange[2])
yv <- runif(size, min = yRange[1], max = yRange[2])
sM <- sample(x = 1:sAlleles, size = size, replace = TRUE)
if (sAlleles == 2) {
sP <- 3 - sM
} else {
sP <- sapply(sM, FUN = function(x) sample((1:sAlleles)[-x], 1))
}
df <- data.frame(id = 1:size, start = sd, end = ed, x = xv,
y = yv, s1 = sM, s2 = sP)
makeScene(df, startCol = "start", endCol = "end", xCol = "x", yCol = "y",
idCol = "pla", dateFormat = "1970-01-01")
}
##' Turns a data frame with information about temporal, spatial, or
##' genetic mating data into a matingScene object using a standard format.
##'
##' @title Create a matingScene object from a data frame
##' @param df a data frame containing information about a mating scene,
##' namely coordinate of individuals in space, time, and mating type.
##' @param multiYear logical indicating whether or not to split the result into
##' a list by year
##' @param startCol character name of column with start dates
##' @param endCol character name of column with end dates
##' @param xCol character name of column with x or E coordinates
##' @param yCol character name of column with y or N coordinates
##' @param s1Col character name of one column with S-allele
##' @param s2Col character name of another column with S-alleles
##' @param idCol character name for column with unique identifier
##' @param otherCols character vector of column(s) to include besides the
##' necessary ones for the mating scene. If NULL, it will be ignored.
##' @param dateFormat character indicating either (1) the format of the start and end
##' date columns if those columns are characters or (2) the origin for the start
##' and end date columns if those columns are numeric. It is used in as.Date
##' @param split character name for a column with values by which the result should be split
##'
##' @return a matingScene object, either a single dataframe in standard format
##' or a list of dataframes. Attributes of the matingScene object indicate the type of
##' information in the data frame, including the original column names,
##' and the origin of the date columns. If multiYear = TRUE,
##' the return value will be a list of matingScene data frames where each
##' element in the list represents one year. If split is specified, the return value will be a list of matingScene data frames where each element in the list represents a value of the specifed variable. See details for more information
##' on attributes and how to work with multi-year data.
##' @details The input dataframe can contain information about locations of
##' individuals in 1, 2, or 3 dimensions of a mating scenes.
##' The function currently allows two spatial coordinates. The user specifies
##' the names of the columns and they will be saved xCol and yCol in the
##' matingScene object. MatingScene objects currently save temporal
##' coordinates for individuals as start and end date of mating activity
##' within a year. Mating type coordinates are saved as mating type alleles.
##' Columns are named id, start, end, x, y, s1, and s2 for
##' idCol, startCol, endCol, xCol, yCol, s1Col, and s2Col respectively.
##' The attributes "t", "s", and "mt" will be set to TRUE if the data frame
##' has temporal, spatial, or mating type data, respectively and
##' will be FALSE otherwise. The attribute originalNames contains all the
##' names of the columns in the original data frame.\cr
##' The start and end columns will be changed to integers relative to the start
##' day of the population. So the first day of the first individual to become
##' receptive will be 1 and so on. The attribute origin contains the
##' origin that can be used when converting the columns start and end
##' from integers to dates.\cr
##' If no temporal data are available except the year in which it was
##' collected and df is a multi-year data set, put the collection year into the
##' column labelled as startCol and set dateFormat = "%Y" and that will split
##' the data appropriately.
##' @author Danny Hanson
makeScene <- function (df, multiYear = FALSE, startCol = "start", endCol = "end",
xCol = "x", yCol = "y", s1Col = "s1", s2Col = "s2",
idCol = "id", otherCols = NULL, dateFormat = "%Y-%m-%d",
split = NULL) {
if (multiYear) {
if (dateFormat == "%Y") {
dates <- as.Date(as.character(df[, startCol]), dateFormat)
} else {
dates <- as.Date(df[, startCol], dateFormat)
}
df$year <- as.numeric(format(dates, "%Y"))
years <- levels(as.factor(df$year))
newScene <- list()
for (i in 1:length(years)) {
newScene[[as.character(years[i])]] <-
makeScene(df[df$year %in% years[i],], F, startCol, endCol, xCol, yCol,
s1Col, s2Col, idCol, otherCols, dateFormat, split)
}
} else if(!is.null(split)){
splitTo <- levels(as.factor(df[,split]))
newScene <- list()
for (i in 1:length(splitTo)){
newScene[[as.character(splitTo[i])]] <-
makeScene(df[df[,split] %in% splitTo[i],], F, startCol, endCol, xCol, yCol,
s1Col, s2Col, idCol, otherCols, dateFormat)
}
} else {
newScene <- data.frame(id = character(nrow(df)))
if (idCol %in% names(df)) {
newScene$id <- df[, idCol]
} else {
newScene$id <- 1:nrow(df)
}
attr(newScene, "t") <- FALSE
attr(newScene, "s") <- FALSE
attr(newScene, "mt") <- FALSE
attr(newScene, "originalNames") <- names(df)
if (all(c(startCol, endCol) %in% names(df))) {
attr(newScene, "t") <- TRUE
newScene$start <- as.integer(as.Date(df[, startCol], dateFormat))
firstDay <- min(newScene$start)
newScene$start <- newScene$start - firstDay + 1
newScene$end <- as.integer(as.Date(df[, endCol], dateFormat)) - firstDay + 1
newScene$duration <- newScene$end - newScene$start + 1
origin <- as.Date(firstDay-1, "1970-01-01")
attr(newScene, "origin") <- origin
}
if (all(c(xCol, yCol) %in% names(df))) {
attr(newScene, "s") <- TRUE
newScene$x <- df[, xCol]
newScene$y <- df[, yCol]
}
if (all(c(s1Col, s2Col) %in% names(df))) {
attr(newScene, "mt") <- TRUE
newScene$s1 <- as.factor(df[, s1Col])
newScene$s2 <- as.factor(df[, s2Col])
}
if (!is.null(otherCols)) {
newScene[, otherCols] <- df[, otherCols]
}
# not going to add this for now because it's unlikely we'll make our
# own generics or use oop
# class(newScene) <- "matingScene"
}
newScene
}
|
install.packages("rvest")
library(rvest)
# COMO FAZER A RASPAGEM DE DADOS DA B3 - DERIVATIVOS E AÇÕES
#DERIVATIVOS
# Insira a url alvo - ajuste derivativos - Pregão
url <- "http://www2.bmf.com.br/pages/portal/bmfbovespa/lumis/lum-ajustes-do-pregao-ptBR.asp"
# Ler o código da HTML indicada
site <- read_html(url)
site
# Escolher qual elemento do endereço HTML que usar para fazer o ajuste
info_HTML_ajuste <- html_nodes(site,"table") # Essa função procurar as estruturas do código HTML
info_HTML_ajuste
# Converter a HTML para texto
HTML_ajuste <- html_text(info_HTML_ajuste) # Trasnforma aquele código em texto
HTML_ajuste
#Visualização do texto
head(HTML_ajuste,20)
#Como melhorar a visualização do texto e tabela
head(info_HTML_ajuste)
# A melhor forma de trasnformar aquele código em tabela é transformando-o em lista da seguinte forma:
lista_tabela <- site %>%
html_nodes("table") %>%
html_table(fill = TRUE)
# vISUALIZAÇÃO
str(lista_tabela)
head(lista_tabela[[1]], 10)
View(lista_tabela[[1]])
#Atribuição
AJUSTE <- (lista_tabela[1])
# AÇÕES - ITAÚSA
# Insira a url alvo - Balanço
url1 <- "http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM=7617&ViewDoc=1&AnoDoc=2019&VersaoDoc=1&NumSeqDoc=82855#a"
# Ler o código da HTML indicada
site1 <- read_html(url1)
# Escolher qual elemento do endereço HTML que usar para fazer o ajuste
info_balanco <- html_nodes(site1, "table")
# Converter a HTML para texto
html_texto <- html_text(info_balanco)
# VISUALIZAÇÃO
head(html_texto,20)
#COMO MELHORAR A VISUALIZAÇÃO
head(info_balanco)
# A melhor forma de trasnformar aquele código em tabela é transformando-o em lista da seguinte forma:
lista_tabela2 <- site1 %>%
html_nodes("table") %>%
.[3:5] %>%
html_table(fill = TRUE)
# VISUZALIZAÇÃO
str(lista_tabela2)
head(lista_tabela2[[1]],10)
head(lista_tabela2[[2]],10)
head(lista_tabela2[[3]],10)
View(lista_tabela2[[1]])
View(lista_tabela2[[2]])
View(lista_tabela2[[3]])
BP <- (lista_tabela2[[1]])
DR <- (lista_tabela2[[2]])
DFC <-(lista_tabela2[[3]])
| /WEB CRAWLER.R | no_license | paoliveira7/Vega-Data-Analysis | R | false | false | 2,253 | r | install.packages("rvest")
library(rvest)
# COMO FAZER A RASPAGEM DE DADOS DA B3 - DERIVATIVOS E AÇÕES
#DERIVATIVOS
# Insira a url alvo - ajuste derivativos - Pregão
url <- "http://www2.bmf.com.br/pages/portal/bmfbovespa/lumis/lum-ajustes-do-pregao-ptBR.asp"
# Ler o código da HTML indicada
site <- read_html(url)
site
# Escolher qual elemento do endereço HTML que usar para fazer o ajuste
info_HTML_ajuste <- html_nodes(site,"table") # Essa função procurar as estruturas do código HTML
info_HTML_ajuste
# Converter a HTML para texto
HTML_ajuste <- html_text(info_HTML_ajuste) # Trasnforma aquele código em texto
HTML_ajuste
#Visualização do texto
head(HTML_ajuste,20)
#Como melhorar a visualização do texto e tabela
head(info_HTML_ajuste)
# A melhor forma de trasnformar aquele código em tabela é transformando-o em lista da seguinte forma:
lista_tabela <- site %>%
html_nodes("table") %>%
html_table(fill = TRUE)
# vISUALIZAÇÃO
str(lista_tabela)
head(lista_tabela[[1]], 10)
View(lista_tabela[[1]])
#Atribuição
AJUSTE <- (lista_tabela[1])
# AÇÕES - ITAÚSA
# Insira a url alvo - Balanço
url1 <- "http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM=7617&ViewDoc=1&AnoDoc=2019&VersaoDoc=1&NumSeqDoc=82855#a"
# Ler o código da HTML indicada
site1 <- read_html(url1)
# Escolher qual elemento do endereço HTML que usar para fazer o ajuste
info_balanco <- html_nodes(site1, "table")
# Converter a HTML para texto
html_texto <- html_text(info_balanco)
# VISUALIZAÇÃO
head(html_texto,20)
#COMO MELHORAR A VISUALIZAÇÃO
head(info_balanco)
# A melhor forma de trasnformar aquele código em tabela é transformando-o em lista da seguinte forma:
lista_tabela2 <- site1 %>%
html_nodes("table") %>%
.[3:5] %>%
html_table(fill = TRUE)
# VISUZALIZAÇÃO
str(lista_tabela2)
head(lista_tabela2[[1]],10)
head(lista_tabela2[[2]],10)
head(lista_tabela2[[3]],10)
View(lista_tabela2[[1]])
View(lista_tabela2[[2]])
View(lista_tabela2[[3]])
BP <- (lista_tabela2[[1]])
DR <- (lista_tabela2[[2]])
DFC <-(lista_tabela2[[3]])
|
power <- read.table("household_power_consumption.txt", nrows = 1, sep = ";", header = TRUE, na.strings = "?")
cols <- colnames(power)
power <- read.table("household_power_consumption.txt", nrows = 2880, sep = ";", header = FALSE, col.names = cols, na.strings = "?", skip = 66637)
png(filename = "plot4.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
par(mfrow = c(2, 2))
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,3], type = "n", xlab = "", ylab = "Global Active Power")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,3], type = "l")
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,5], type = "n", xlab = "datetime", ylab = "Voltage")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,5], type = "l")
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,7], type = "n", xlab = "", ylab = "Energy sub metering")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,7], type = "l", col = "black")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,8], type = "l", col = "red")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,9], type = "l", col = "blue")
legend("topright", lty = c(1, 1, 1), lwd = c(2.5, 2.5, 2.5), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,4], type = "n", xlab = "datetime", ylab = "Global_reactive_power")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,4], type = "l")
dev.off() | /plot4.R | no_license | sreeramkumar/ExData_Plotting1 | R | false | false | 1,879 | r | power <- read.table("household_power_consumption.txt", nrows = 1, sep = ";", header = TRUE, na.strings = "?")
cols <- colnames(power)
power <- read.table("household_power_consumption.txt", nrows = 2880, sep = ";", header = FALSE, col.names = cols, na.strings = "?", skip = 66637)
png(filename = "plot4.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
par(mfrow = c(2, 2))
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,3], type = "n", xlab = "", ylab = "Global Active Power")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,3], type = "l")
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,5], type = "n", xlab = "datetime", ylab = "Voltage")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,5], type = "l")
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,7], type = "n", xlab = "", ylab = "Energy sub metering")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,7], type = "l", col = "black")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,8], type = "l", col = "red")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,9], type = "l", col = "blue")
legend("topright", lty = c(1, 1, 1), lwd = c(2.5, 2.5, 2.5), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,4], type = "n", xlab = "datetime", ylab = "Global_reactive_power")
lines(strptime(paste(power[,1], power[, 2]), format = "%d/%m/%Y %H:%M:%S"), power[,4], type = "l")
dev.off() |
library(RCurl)
library(tidyverse)
library(stringr)
library(sf)
library(magrittr)
library(data.table)
library(parallel)
library(stringi)
#### 0. Download original data sets from IBGE ftp -----------------
ftp <- "ftp://geoftp.ibge.gov.br/recortes_para_fins_estatisticos/malha_de_areas_de_ponderacao/"
######## 1. Unzip original data sets downloaded from IBGE -----------------
# Root directory
root_dir <- "L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao"
setwd(root_dir)
# List all zip files for all years
all_zipped_files <- list.files(full.names = T, recursive = T, pattern = ".zip")
#### 1.1. Municipios sem area redefinidas --------------
files_1st_batch <- all_zipped_files[!all_zipped_files %like% "municipios_areas_redefinidas"]
# function to Unzip files in their original sub-dir
unzip_fun <- function(f){
unzip(f, exdir = file.path(root_dir, substr(f, 2, 24)))
}
# create computing clusters
cl <- parallel::makeCluster(detectCores())
parallel::clusterExport(cl=cl, varlist= c("files_1st_batch", "root_dir"), envir=environment())
# apply function in parallel
parallel::parLapply(cl, files_1st_batch, unzip_fun)
stopCluster(cl)
rm(list=setdiff(ls(), c("root_dir","all_zipped_files")))
gc(reset = T)
#### 1.2. Municipios area redefinidas --------------
files_2st_batch <- all_zipped_files[all_zipped_files %like% "municipios_areas_redefinidas"]
# function to Unzip files in their original sub-dir
unzip_fun <- function(f){
unzip(f, exdir = file.path(root_dir, substr(f, 2, 53) ))
}
# create computing clusters
cl <- parallel::makeCluster(detectCores())
parallel::clusterExport(cl=cl, varlist= c("files_2st_batch", "root_dir"), envir=environment())
# apply function in parallel
parallel::parLapply(cl, files_2st_batch, unzip_fun)
stopCluster(cl)
rm(list=setdiff(ls(), c("root_dir","all_zipped_files")))
gc(reset = T)
#### 2. Create folders to save sf.rds files -----------------
# create directory to save original shape files in sf format
dir.create(file.path("shapes_in_sf_all_years_original"), showWarnings = FALSE)
# create directory to save cleaned shape files in sf format
dir.create(file.path("shapes_in_sf_all_years_cleaned"), showWarnings = FALSE)
# create a subdirectory area_ponderacao
dir.create(file.path("shapes_in_sf_all_years_original", "area_ponderacao"), showWarnings = FALSE)
dir.create(file.path("shapes_in_sf_all_years_cleaned", "area_ponderacao"), showWarnings = FALSE)
# create a subdirectory of year
dir.create(file.path("shapes_in_sf_all_years_original", "area_ponderacao","2010"), showWarnings = FALSE)
dir.create(file.path("shapes_in_sf_all_years_cleaned", "area_ponderacao","2010"), showWarnings = FALSE)
# create a subdirectory of municipios_areas_redefinidas
dir.create(file.path("shapes_in_sf_all_years_original", "area_ponderacao","2010","municipios_areas_redefinidas"), showWarnings = FALSE)
dir.create(file.path("shapes_in_sf_all_years_cleaned", "area_ponderacao","2010","municipios_areas_redefinidas"), showWarnings = FALSE)
#### 3. Save original data sets downloaded from IBGE in compact .rds format-----------------
# Root directory
root_dir <- "L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao"
setwd(root_dir)
# List shapes for all years
all_shapes <- list.files(full.names = T, recursive = T, pattern = ".shp")
shp_to_sf_rds <- function(x){
shape <- st_read(x, quiet = T, stringsAsFactors=F, options = "ENCODING=WINDOWS-1252")
dest_dir <- paste0("./shapes_in_sf_all_years_original/area_ponderacao/", "2010")
# name of the file that will be saved
if( x %like% "municipios_areas_redefinidas"){ file_name <- paste0(toupper(substr(x, 26, 24)), "_AP", ".rds") }
if( !x %like% "municipios_areas_redefinidas"){ file_name <- paste0( toupper(substr(x, 26, 27)),"_AP", ".rds") }
substr(all_shapes[153], 55 )
all_shapes[1]
}
###### 0. Create folders to save the data -----------------
# Directory to keep raw zipped files
dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//2010")
dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//2010//municipios_areas_redefinidas")
# # Directory to keep raw sf files
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//shapes_in_sf_all_years_original")
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//shapes_in_sf_all_years_original//2010")
#
# # Directory to keep cleaned sf files
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//grade_estatistica//shapes_in_sf_all_years_cleaned")
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//grade_estatistica//shapes_in_sf_all_years_cleaned//2010")
###### 1. Download 2010 Raw data -----------------
# Root directory
root_dir <- "L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//2010"
setwd(root_dir)
# get files url
url = "ftp://geoftp.ibge.gov.br/recortes_para_fins_estatisticos/malha_de_areas_de_ponderacao/censo_demografico_2010/"
filenames = getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE)
filenames <- strsplit(filenames, "\r\n")
filenames = unlist(filenames)
filenames <- filenames[-28] # remove subdirectory 'municipios_areas_redefinidas'
# Download zipped files
for (filename in filenames) {
download.file(paste(url, filename, sep = ""), paste(filename))
}
###### 1.1 Download municipios_areas_redefinidas
# get files url
url = "ftp://geoftp.ibge.gov.br/recortes_para_fins_estatisticos/malha_de_areas_de_ponderacao/censo_demografico_2010/municipios_areas_redefinidas/"
filenames = getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE)
filenames <- strsplit(filenames, "\r\n")
filenames = unlist(filenames)
# Download zipped files
for (filename in filenames) {
download.file( url=paste(url, filename, sep = ""), destfile= paste0("./municipios_areas_redefinidas/",filename))
}
###### 2. Unzip Raw data -----------------
#pegando os nomes dos arquivos
filenames <- list.files(pattern = ".*\\.zip$")
filenamesred <- list.files(path = "./municipios_areas_redefinidas")
#descompactando
for (filename in filenames) {
unzip(filename)
#excluindo os arquivos .zip
}
for (filename in filenamesred) {
unzip(paste("./municipios_areas_redefinidas",filename,sep="/"),exdir = "./municipios_areas_redefinidas")
}
###### 3. Save original data sets downloaded from IBGE in compact .rds format-----------------
#transformando os dados e salvando como rds
for (filename in list.files(pattern = "^\\d|mun")) {
a=list.files(path = paste("./",filename,sep=""),pattern = ".*\\.shp$")
for (file in a) {
saveRDS(st_read(paste(".",filename,file,sep = "/")),
file = paste(paste(".",filename,gsub('.{0,4}$', '', file),sep="/"),".rds",sep=""))
}
#excluindo arquivos diferentes de .rds
b=list.files(path = paste("./",filename,sep=""))[!list.files(path = paste("./",filename,sep="")) %in%
list.files(path = paste("./",filename,sep=""),pattern = ".*\\.rds$")]
for (excluir in b) {
file.remove(paste(".",filename,excluir,sep="/"))
}
}
#Alterando o nome das pastas dos estados para facilitar a funcao
auxiliar <- list.files()
for(nome in auxiliar){
if(is.na(as.numeric(str_extract(nome,"\\d")))==F){
file.rename(paste(".",nome,sep="/"),paste(".",str_sub(nome,1,2),sep="/"))
}
}
#colocar no diretório a Tabela de códigos 2010 e transformar em csv
#trocando o nome dos municípios pelos códigos
#arrumando tabela do ibge
tabcod <- read.csv2("./Tabela de códigos 2010.csv",header = T,skip = 2)
#rodando uma funcao pra tirar os acentos
rm_accent <- function(str,pattern="all") {
# Rotinas e funções úteis V 1.0
# rm.accent - REMOVE ACENTOS DE PALAVRAS
# Função que tira todos os acentos e pontuações de um vetor de strings.
# Parâmetros:
# str - vetor de strings que terão seus acentos retirados.
# patterns - vetor de strings com um ou mais elementos indicando quais acentos deverão ser retirados.
# Para indicar quais acentos deverão ser retirados, um vetor com os símbolos deverão ser passados.
# Exemplo: pattern = c("´", "^") retirará os acentos agudos e circunflexos apenas.
# Outras palavras aceitas: "all" (retira todos os acentos, que são "´", "`", "^", "~", "¨", "ç")
if(!is.character(str))
str <- as.character(str)
pattern <- unique(pattern)
if(any(pattern=="Ç"))
pattern[pattern=="Ç"] <- "ç"
symbols <- c(
acute = "áéíóúÁÉÍÓÚýÝ",
grave = "àèìòùÀÈÌÒÙ",
circunflex = "âêîôûÂÊÎÔÛ",
tilde = "ãõÃÕñÑ",
umlaut = "äëïöüÄËÏÖÜÿ",
cedil = "çÇ"
)
nudeSymbols <- c(
acute = "aeiouAEIOUyY",
grave = "aeiouAEIOU",
circunflex = "aeiouAEIOU",
tilde = "aoAOnN",
umlaut = "aeiouAEIOUy",
cedil = "cC"
)
accentTypes <- c("´","`","^","~","¨","ç")
if(any(c("all","al","a","todos","t","to","tod","todo")%in%pattern)) # opcao retirar todos
return(chartr(paste(symbols, collapse=""), paste(nudeSymbols, collapse=""), str))
for(i in which(accentTypes%in%pattern))
str <- chartr(symbols[i],nudeSymbols[i], str)
return(str)
}
tabcod$Nome_Município <- tabcod$Nome_Município %>% as.character(.) %>% str_to_lower(.) %>% rm_accent(.) %>% str_replace(.,"[:punct:]"," ")
#arrumando nome dos arquivos
a=data.frame(matrix(ncol=2,nrow=0))
colnames(a)<-c("UF","Mun")
for (filename in list.files(pattern = "^\\d|mun")) {
for (f in list.files(path = paste("./",filename,sep=""),pattern = ".*\\.rds$")){
a <- rbind(a,data.frame(UF=filename,caminho=f))
}
}
#limpando os municipios
a$Mun <- a$caminho %>% str_replace_all(.,"_area.*","") %>% str_to_lower(.) %>%
str_replace(.,"[:punct:]"," ")%>% str_replace(.,"_"," ") %>% str_replace(.,"_"," ") %>% str_replace(.,"_"," ")
#trocando algunas nomes que vieram errados
a$Mun[11] <- "sao luis"
a$Mun[7] <- "santarem"
a$UF <- as.character(a$UF)
tabcod$UF <- as.character(tabcod$UF)
a$Mun <- as.character(a$Mun)
tabcod$Nome_Município <- as.character(tabcod$Nome_Município)
#juntando os codigos com os municipios e os caminhos
juntos1 <- left_join(a[1:138,],tabcod[,c(1,7,8)],by=c("UF"="UF","Mun"="Nome_Município"))
juntos2 <- left_join(a[139:152,],tabcod[,c(1,7,8)],by=c("Mun"="Nome_Município"))
#excluindo a santa maria duplicada
juntos2 <- juntos2[-c(13),-c(4)]
#renomeando UF
colnames(juntos2)[1] <- c("UF")
#junçao final
b <- rbind(juntos1,juntos2)
#renomeando os nomes das pastas
for (n in 1:152) {
if (!is.na(b$Município[n])){
file.rename(paste(".",b$UF[n],b$caminho[n],sep="/"),paste(paste(".",b$UF[n],b$Município[n],sep="/"),"_areaponderacao_2010.rds",sep=""))
}
}
#### Parte 2 #####
# excluindo as antigas
r <- list.files("municipios_areas_redefinidas")
for (i in r) {
file.remove(paste(substr(i,1,2),i,sep="/"))
}
# renomeando as areas redefinidas
for (i in r) {
file.rename(from=paste("municipios_areas_redefinidas",i,sep="/"),to=paste("municipios_areas_redefinidas",gsub('.rds', '_redefinida.rds', i),sep="/"))
}
# atualizando as areas redefenidas ##
install.packages("filesstrings")
library(filesstrings)
s <- list.files("municipios_areas_redefinidas")
for (i in s) {
file.move(paste("municipios_areas_redefinidas",i,sep="/"), substr(i,1,2))
}
#excluindo a pasta "municipios_areas_redefinidas"
unlink("municipios_areas_redefinidas",recursive = TRUE)
## igualando as base, trocando o nome das variaveis e add algumas
t=list.files(pattern = "^\\d")
for (i in t) {
u=list.files(i)
for (j in u) {
d <- as.data.frame(readRDS(paste(i,j,sep = "/")))
colnames(d)[colnames(d) %in% c("CD_APONDE","CD_APonde","cd_aponde")] <- "cod_areapond"
colnames(d)[colnames(d) %in% c("geometry")] <- "geom"
d <- d[,c("cod_areapond","geom")]
d$cod_mum <-substr(j,1,7)
d$cod_uf <- i
d <- st_sf(d)
saveRDS(d,file = paste(".",i,j,sep = "/"))
}
}
#juntando as areas de ponderação em uma mesma base, por estado
dir.proj="."
for (CODE in list.files(pattern = "^\\d")) {
if (!length(list.files(paste(dir.proj,CODE,sep="/")))==0) {
files <- list.files(paste(dir.proj,CODE,sep="/"),full.names = T)
files <- lapply(X=files, FUN= readr::read_rds)
files <- lapply(X=files, FUN= as.data.frame)
shape <- do.call('rbind', files)
shape <- st_sf(shape)
saveRDS(shape,paste0("./",CODE,"AP.rds"))
}
}
| /prep_data/prep_weighting_area.R | no_license | marionog/geobr | R | false | false | 12,547 | r | library(RCurl)
library(tidyverse)
library(stringr)
library(sf)
library(magrittr)
library(data.table)
library(parallel)
library(stringi)
#### 0. Download original data sets from IBGE ftp -----------------
ftp <- "ftp://geoftp.ibge.gov.br/recortes_para_fins_estatisticos/malha_de_areas_de_ponderacao/"
######## 1. Unzip original data sets downloaded from IBGE -----------------
# Root directory
root_dir <- "L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao"
setwd(root_dir)
# List all zip files for all years
all_zipped_files <- list.files(full.names = T, recursive = T, pattern = ".zip")
#### 1.1. Municipios sem area redefinidas --------------
files_1st_batch <- all_zipped_files[!all_zipped_files %like% "municipios_areas_redefinidas"]
# function to Unzip files in their original sub-dir
unzip_fun <- function(f){
unzip(f, exdir = file.path(root_dir, substr(f, 2, 24)))
}
# create computing clusters
cl <- parallel::makeCluster(detectCores())
parallel::clusterExport(cl=cl, varlist= c("files_1st_batch", "root_dir"), envir=environment())
# apply function in parallel
parallel::parLapply(cl, files_1st_batch, unzip_fun)
stopCluster(cl)
rm(list=setdiff(ls(), c("root_dir","all_zipped_files")))
gc(reset = T)
#### 1.2. Municipios area redefinidas --------------
files_2st_batch <- all_zipped_files[all_zipped_files %like% "municipios_areas_redefinidas"]
# function to Unzip files in their original sub-dir
unzip_fun <- function(f){
unzip(f, exdir = file.path(root_dir, substr(f, 2, 53) ))
}
# create computing clusters
cl <- parallel::makeCluster(detectCores())
parallel::clusterExport(cl=cl, varlist= c("files_2st_batch", "root_dir"), envir=environment())
# apply function in parallel
parallel::parLapply(cl, files_2st_batch, unzip_fun)
stopCluster(cl)
rm(list=setdiff(ls(), c("root_dir","all_zipped_files")))
gc(reset = T)
#### 2. Create folders to save sf.rds files -----------------
# create directory to save original shape files in sf format
dir.create(file.path("shapes_in_sf_all_years_original"), showWarnings = FALSE)
# create directory to save cleaned shape files in sf format
dir.create(file.path("shapes_in_sf_all_years_cleaned"), showWarnings = FALSE)
# create a subdirectory area_ponderacao
dir.create(file.path("shapes_in_sf_all_years_original", "area_ponderacao"), showWarnings = FALSE)
dir.create(file.path("shapes_in_sf_all_years_cleaned", "area_ponderacao"), showWarnings = FALSE)
# create a subdirectory of year
dir.create(file.path("shapes_in_sf_all_years_original", "area_ponderacao","2010"), showWarnings = FALSE)
dir.create(file.path("shapes_in_sf_all_years_cleaned", "area_ponderacao","2010"), showWarnings = FALSE)
# create a subdirectory of municipios_areas_redefinidas
dir.create(file.path("shapes_in_sf_all_years_original", "area_ponderacao","2010","municipios_areas_redefinidas"), showWarnings = FALSE)
dir.create(file.path("shapes_in_sf_all_years_cleaned", "area_ponderacao","2010","municipios_areas_redefinidas"), showWarnings = FALSE)
#### 3. Save original data sets downloaded from IBGE in compact .rds format-----------------
# Root directory
root_dir <- "L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao"
setwd(root_dir)
# List shapes for all years
all_shapes <- list.files(full.names = T, recursive = T, pattern = ".shp")
shp_to_sf_rds <- function(x){
shape <- st_read(x, quiet = T, stringsAsFactors=F, options = "ENCODING=WINDOWS-1252")
dest_dir <- paste0("./shapes_in_sf_all_years_original/area_ponderacao/", "2010")
# name of the file that will be saved
if( x %like% "municipios_areas_redefinidas"){ file_name <- paste0(toupper(substr(x, 26, 24)), "_AP", ".rds") }
if( !x %like% "municipios_areas_redefinidas"){ file_name <- paste0( toupper(substr(x, 26, 27)),"_AP", ".rds") }
substr(all_shapes[153], 55 )
all_shapes[1]
}
###### 0. Create folders to save the data -----------------
# Directory to keep raw zipped files
dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//2010")
dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//2010//municipios_areas_redefinidas")
# # Directory to keep raw sf files
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//shapes_in_sf_all_years_original")
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//shapes_in_sf_all_years_original//2010")
#
# # Directory to keep cleaned sf files
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//grade_estatistica//shapes_in_sf_all_years_cleaned")
# dir.create("L:////# DIRUR #//ASMEQ//geobr//data-raw//grade_estatistica//shapes_in_sf_all_years_cleaned//2010")
###### 1. Download 2010 Raw data -----------------
# Root directory
root_dir <- "L:////# DIRUR #//ASMEQ//geobr//data-raw//malha_de_areas_de_ponderacao//2010"
setwd(root_dir)
# get files url
url = "ftp://geoftp.ibge.gov.br/recortes_para_fins_estatisticos/malha_de_areas_de_ponderacao/censo_demografico_2010/"
filenames = getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE)
filenames <- strsplit(filenames, "\r\n")
filenames = unlist(filenames)
filenames <- filenames[-28] # remove subdirectory 'municipios_areas_redefinidas'
# Download zipped files
for (filename in filenames) {
download.file(paste(url, filename, sep = ""), paste(filename))
}
###### 1.1 Download municipios_areas_redefinidas
# get files url
url = "ftp://geoftp.ibge.gov.br/recortes_para_fins_estatisticos/malha_de_areas_de_ponderacao/censo_demografico_2010/municipios_areas_redefinidas/"
filenames = getURL(url, ftp.use.epsv = FALSE, dirlistonly = TRUE)
filenames <- strsplit(filenames, "\r\n")
filenames = unlist(filenames)
# Download zipped files
for (filename in filenames) {
download.file( url=paste(url, filename, sep = ""), destfile= paste0("./municipios_areas_redefinidas/",filename))
}
###### 2. Unzip Raw data -----------------
#pegando os nomes dos arquivos
filenames <- list.files(pattern = ".*\\.zip$")
filenamesred <- list.files(path = "./municipios_areas_redefinidas")
#descompactando
for (filename in filenames) {
unzip(filename)
#excluindo os arquivos .zip
}
for (filename in filenamesred) {
unzip(paste("./municipios_areas_redefinidas",filename,sep="/"),exdir = "./municipios_areas_redefinidas")
}
###### 3. Save original data sets downloaded from IBGE in compact .rds format-----------------
#transformando os dados e salvando como rds
for (filename in list.files(pattern = "^\\d|mun")) {
a=list.files(path = paste("./",filename,sep=""),pattern = ".*\\.shp$")
for (file in a) {
saveRDS(st_read(paste(".",filename,file,sep = "/")),
file = paste(paste(".",filename,gsub('.{0,4}$', '', file),sep="/"),".rds",sep=""))
}
#excluindo arquivos diferentes de .rds
b=list.files(path = paste("./",filename,sep=""))[!list.files(path = paste("./",filename,sep="")) %in%
list.files(path = paste("./",filename,sep=""),pattern = ".*\\.rds$")]
for (excluir in b) {
file.remove(paste(".",filename,excluir,sep="/"))
}
}
#Alterando o nome das pastas dos estados para facilitar a funcao
auxiliar <- list.files()
for(nome in auxiliar){
if(is.na(as.numeric(str_extract(nome,"\\d")))==F){
file.rename(paste(".",nome,sep="/"),paste(".",str_sub(nome,1,2),sep="/"))
}
}
#colocar no diretório a Tabela de códigos 2010 e transformar em csv
#trocando o nome dos municípios pelos códigos
#arrumando tabela do ibge
tabcod <- read.csv2("./Tabela de códigos 2010.csv",header = T,skip = 2)
#rodando uma funcao pra tirar os acentos
rm_accent <- function(str,pattern="all") {
# Rotinas e funções úteis V 1.0
# rm.accent - REMOVE ACENTOS DE PALAVRAS
# Função que tira todos os acentos e pontuações de um vetor de strings.
# Parâmetros:
# str - vetor de strings que terão seus acentos retirados.
# patterns - vetor de strings com um ou mais elementos indicando quais acentos deverão ser retirados.
# Para indicar quais acentos deverão ser retirados, um vetor com os símbolos deverão ser passados.
# Exemplo: pattern = c("´", "^") retirará os acentos agudos e circunflexos apenas.
# Outras palavras aceitas: "all" (retira todos os acentos, que são "´", "`", "^", "~", "¨", "ç")
if(!is.character(str))
str <- as.character(str)
pattern <- unique(pattern)
if(any(pattern=="Ç"))
pattern[pattern=="Ç"] <- "ç"
symbols <- c(
acute = "áéíóúÁÉÍÓÚýÝ",
grave = "àèìòùÀÈÌÒÙ",
circunflex = "âêîôûÂÊÎÔÛ",
tilde = "ãõÃÕñÑ",
umlaut = "äëïöüÄËÏÖÜÿ",
cedil = "çÇ"
)
nudeSymbols <- c(
acute = "aeiouAEIOUyY",
grave = "aeiouAEIOU",
circunflex = "aeiouAEIOU",
tilde = "aoAOnN",
umlaut = "aeiouAEIOUy",
cedil = "cC"
)
accentTypes <- c("´","`","^","~","¨","ç")
if(any(c("all","al","a","todos","t","to","tod","todo")%in%pattern)) # opcao retirar todos
return(chartr(paste(symbols, collapse=""), paste(nudeSymbols, collapse=""), str))
for(i in which(accentTypes%in%pattern))
str <- chartr(symbols[i],nudeSymbols[i], str)
return(str)
}
tabcod$Nome_Município <- tabcod$Nome_Município %>% as.character(.) %>% str_to_lower(.) %>% rm_accent(.) %>% str_replace(.,"[:punct:]"," ")
#arrumando nome dos arquivos
a=data.frame(matrix(ncol=2,nrow=0))
colnames(a)<-c("UF","Mun")
for (filename in list.files(pattern = "^\\d|mun")) {
for (f in list.files(path = paste("./",filename,sep=""),pattern = ".*\\.rds$")){
a <- rbind(a,data.frame(UF=filename,caminho=f))
}
}
#limpando os municipios
a$Mun <- a$caminho %>% str_replace_all(.,"_area.*","") %>% str_to_lower(.) %>%
str_replace(.,"[:punct:]"," ")%>% str_replace(.,"_"," ") %>% str_replace(.,"_"," ") %>% str_replace(.,"_"," ")
#trocando algunas nomes que vieram errados
a$Mun[11] <- "sao luis"
a$Mun[7] <- "santarem"
a$UF <- as.character(a$UF)
tabcod$UF <- as.character(tabcod$UF)
a$Mun <- as.character(a$Mun)
tabcod$Nome_Município <- as.character(tabcod$Nome_Município)
#juntando os codigos com os municipios e os caminhos
juntos1 <- left_join(a[1:138,],tabcod[,c(1,7,8)],by=c("UF"="UF","Mun"="Nome_Município"))
juntos2 <- left_join(a[139:152,],tabcod[,c(1,7,8)],by=c("Mun"="Nome_Município"))
#excluindo a santa maria duplicada
juntos2 <- juntos2[-c(13),-c(4)]
#renomeando UF
colnames(juntos2)[1] <- c("UF")
#junçao final
b <- rbind(juntos1,juntos2)
#renomeando os nomes das pastas
for (n in 1:152) {
if (!is.na(b$Município[n])){
file.rename(paste(".",b$UF[n],b$caminho[n],sep="/"),paste(paste(".",b$UF[n],b$Município[n],sep="/"),"_areaponderacao_2010.rds",sep=""))
}
}
#### Parte 2 #####
# excluindo as antigas
r <- list.files("municipios_areas_redefinidas")
for (i in r) {
file.remove(paste(substr(i,1,2),i,sep="/"))
}
# renomeando as areas redefinidas
for (i in r) {
file.rename(from=paste("municipios_areas_redefinidas",i,sep="/"),to=paste("municipios_areas_redefinidas",gsub('.rds', '_redefinida.rds', i),sep="/"))
}
# atualizando as areas redefenidas ##
install.packages("filesstrings")
library(filesstrings)
s <- list.files("municipios_areas_redefinidas")
for (i in s) {
file.move(paste("municipios_areas_redefinidas",i,sep="/"), substr(i,1,2))
}
#excluindo a pasta "municipios_areas_redefinidas"
unlink("municipios_areas_redefinidas",recursive = TRUE)
## igualando as base, trocando o nome das variaveis e add algumas
t=list.files(pattern = "^\\d")
for (i in t) {
u=list.files(i)
for (j in u) {
d <- as.data.frame(readRDS(paste(i,j,sep = "/")))
colnames(d)[colnames(d) %in% c("CD_APONDE","CD_APonde","cd_aponde")] <- "cod_areapond"
colnames(d)[colnames(d) %in% c("geometry")] <- "geom"
d <- d[,c("cod_areapond","geom")]
d$cod_mum <-substr(j,1,7)
d$cod_uf <- i
d <- st_sf(d)
saveRDS(d,file = paste(".",i,j,sep = "/"))
}
}
#juntando as areas de ponderação em uma mesma base, por estado
dir.proj="."
for (CODE in list.files(pattern = "^\\d")) {
if (!length(list.files(paste(dir.proj,CODE,sep="/")))==0) {
files <- list.files(paste(dir.proj,CODE,sep="/"),full.names = T)
files <- lapply(X=files, FUN= readr::read_rds)
files <- lapply(X=files, FUN= as.data.frame)
shape <- do.call('rbind', files)
shape <- st_sf(shape)
saveRDS(shape,paste0("./",CODE,"AP.rds"))
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.